python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Intel Corporation
#include "ipu3-tables.h"
#define X 0 /* Don't care value */
const struct imgu_css_bds_config
imgu_css_bds_configs[IMGU_BDS_CONFIG_LEN] = { {
/* Scale factor 32 / (32 + 0) = 1 */
.hor_phase_arr = {
.even = { { 0, 0, 64, 6, 0, 0, 0 } },
.odd = { { 0, 0, 64, 6, 0, 0, 0 } } },
.ver_phase_arr = {
.even = { { 0, 0, 64, 6, 0, 0, 0 } },
.odd = { { 0, 0, 64, 6, 0, 0, 0 } } },
.ptrn_arr = { { 0x3 } },
.sample_patrn_length = 2,
.hor_ds_en = 0,
.ver_ds_en = 0
}, {
/* Scale factor 32 / (32 + 1) = 0.969697 */
.hor_phase_arr = {
.even = { { 0, 3, 122, 7, 3, 0, 0 },
{ 0, 0, 122, 7, 7, -1, 0 },
{ 0, -3, 122, 7, 10, -1, 0 },
{ 0, -5, 121, 7, 14, -2, 0 },
{ 0, -7, 120, 7, 18, -3, 0 },
{ 0, -9, 118, 7, 23, -4, 0 },
{ 0, -11, 116, 7, 27, -4, 0 },
{ 0, -12, 113, 7, 32, -5, 0 },
{ 0, -13, 110, 7, 37, -6, 0 },
{ 0, -14, 107, 7, 42, -7, 0 },
{ 0, -14, 103, 7, 47, -8, 0 },
{ 0, -15, 100, 7, 52, -9, 0 },
{ 0, -15, 96, 7, 57, -10, 0 },
{ 0, -15, 92, 7, 62, -11, 0 },
{ 0, -14, 86, 7, 68, -12, 0 },
{ 0, -14, 82, 7, 73, -13, 0 },
{ 0, -14, 78, 7, 78, -14, 0 },
{ 0, -13, 73, 7, 82, -14, 0 },
{ 0, -12, 68, 7, 86, -14, 0 },
{ 0, -11, 62, 7, 92, -15, 0 },
{ 0, -10, 57, 7, 96, -15, 0 },
{ 0, -9, 52, 7, 100, -15, 0 },
{ 0, -8, 47, 7, 103, -14, 0 },
{ 0, -7, 42, 7, 107, -14, 0 },
{ 0, -6, 37, 7, 110, -13, 0 },
{ 0, -5, 32, 7, 113, -12, 0 },
{ 0, -4, 27, 7, 116, -11, 0 },
{ 0, -4, 23, 7, 118, -9, 0 },
{ 0, -3, 18, 7, 120, -7, 0 },
{ 0, -2, 14, 7, 121, -5, 0 },
{ 0, -1, 10, 7, 122, -3, 0 },
{ 0, -1, 7, 7, 122, 0, 0 } },
.odd = { { 0, 2, 122, 7, 5, -1, 0 },
{ 0, -1, 122, 7, 8, -1, 0 },
{ 0, -4, 122, 7, 12, -2, 0 },
{ 0, -6, 120, 7, 16, -2, 0 },
{ 0, -8, 118, 7, 21, -3, 0 },
{ 0, -10, 117, 7, 25, -4, 0 },
{ 0, -11, 114, 7, 30, -5, 0 },
{ 0, -13, 112, 7, 35, -6, 0 },
{ 0, -14, 109, 7, 40, -7, 0 },
{ 0, -14, 105, 7, 45, -8, 0 },
{ 0, -15, 102, 7, 50, -9, 0 },
{ 0, -15, 98, 7, 55, -10, 0 },
{ 0, -15, 94, 7, 60, -11, 0 },
{ 0, -15, 90, 7, 65, -12, 0 },
{ 0, -14, 85, 7, 70, -13, 0 },
{ 0, -14, 80, 7, 75, -13, 0 },
{ 0, -13, 75, 7, 80, -14, 0 },
{ 0, -13, 70, 7, 85, -14, 0 },
{ 0, -12, 65, 7, 90, -15, 0 },
{ 0, -11, 60, 7, 94, -15, 0 },
{ 0, -10, 55, 7, 98, -15, 0 },
{ 0, -9, 50, 7, 102, -15, 0 },
{ 0, -8, 45, 7, 105, -14, 0 },
{ 0, -7, 40, 7, 109, -14, 0 },
{ 0, -6, 35, 7, 112, -13, 0 },
{ 0, -5, 30, 7, 114, -11, 0 },
{ 0, -4, 25, 7, 117, -10, 0 },
{ 0, -3, 21, 7, 118, -8, 0 },
{ 0, -2, 16, 7, 120, -6, 0 },
{ 0, -2, 12, 7, 122, -4, 0 },
{ 0, -1, 8, 7, 122, -1, 0 },
{ 0, -1, 5, 7, 122, 2, 0 } } },
.ver_phase_arr = {
.even = { { 0, 3, 122, 7, 3, 0, 0 },
{ 0, 0, 122, 7, 7, -1, 0 },
{ 0, -3, 122, 7, 10, -1, 0 },
{ 0, -5, 121, 7, 14, -2, 0 },
{ 0, -7, 120, 7, 18, -3, 0 },
{ 0, -9, 118, 7, 23, -4, 0 },
{ 0, -11, 116, 7, 27, -4, 0 },
{ 0, -12, 113, 7, 32, -5, 0 },
{ 0, -13, 110, 7, 37, -6, 0 },
{ 0, -14, 107, 7, 42, -7, 0 },
{ 0, -14, 103, 7, 47, -8, 0 },
{ 0, -15, 100, 7, 52, -9, 0 },
{ 0, -15, 96, 7, 57, -10, 0 },
{ 0, -15, 92, 7, 62, -11, 0 },
{ 0, -14, 86, 7, 68, -12, 0 },
{ 0, -14, 82, 7, 73, -13, 0 },
{ 0, -14, 78, 7, 78, -14, 0 },
{ 0, -13, 73, 7, 82, -14, 0 },
{ 0, -12, 68, 7, 86, -14, 0 },
{ 0, -11, 62, 7, 92, -15, 0 },
{ 0, -10, 57, 7, 96, -15, 0 },
{ 0, -9, 52, 7, 100, -15, 0 },
{ 0, -8, 47, 7, 103, -14, 0 },
{ 0, -7, 42, 7, 107, -14, 0 },
{ 0, -6, 37, 7, 110, -13, 0 },
{ 0, -5, 32, 7, 113, -12, 0 },
{ 0, -4, 27, 7, 116, -11, 0 },
{ 0, -4, 23, 7, 118, -9, 0 },
{ 0, -3, 18, 7, 120, -7, 0 },
{ 0, -2, 14, 7, 121, -5, 0 },
{ 0, -1, 10, 7, 122, -3, 0 },
{ 0, -1, 7, 7, 122, 0, 0 } },
.odd = { { 0, 2, 122, 7, 5, -1, 0 },
{ 0, -1, 122, 7, 8, -1, 0 },
{ 0, -4, 122, 7, 12, -2, 0 },
{ 0, -6, 120, 7, 16, -2, 0 },
{ 0, -8, 118, 7, 21, -3, 0 },
{ 0, -10, 117, 7, 25, -4, 0 },
{ 0, -11, 114, 7, 30, -5, 0 },
{ 0, -13, 112, 7, 35, -6, 0 },
{ 0, -14, 109, 7, 40, -7, 0 },
{ 0, -14, 105, 7, 45, -8, 0 },
{ 0, -15, 102, 7, 50, -9, 0 },
{ 0, -15, 98, 7, 55, -10, 0 },
{ 0, -15, 94, 7, 60, -11, 0 },
{ 0, -15, 90, 7, 65, -12, 0 },
{ 0, -14, 85, 7, 70, -13, 0 },
{ 0, -14, 80, 7, 75, -13, 0 },
{ 0, -13, 75, 7, 80, -14, 0 },
{ 0, -13, 70, 7, 85, -14, 0 },
{ 0, -12, 65, 7, 90, -15, 0 },
{ 0, -11, 60, 7, 94, -15, 0 },
{ 0, -10, 55, 7, 98, -15, 0 },
{ 0, -9, 50, 7, 102, -15, 0 },
{ 0, -8, 45, 7, 105, -14, 0 },
{ 0, -7, 40, 7, 109, -14, 0 },
{ 0, -6, 35, 7, 112, -13, 0 },
{ 0, -5, 30, 7, 114, -11, 0 },
{ 0, -4, 25, 7, 117, -10, 0 },
{ 0, -3, 21, 7, 118, -8, 0 },
{ 0, -2, 16, 7, 120, -6, 0 },
{ 0, -2, 12, 7, 122, -4, 0 },
{ 0, -1, 8, 7, 122, -1, 0 },
{ 0, -1, 5, 7, 122, 2, 0 } } },
.ptrn_arr = { { 0xffffffff, 0xffffffff } },
.sample_patrn_length = 66,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 2) = 0.941176 */
.hor_phase_arr = {
.even = { { -1, 6, 118, 7, 6, -1, 0 },
{ 0, 0, 117, 7, 13, -2, 0 },
{ 0, -5, 116, 7, 21, -4, 0 },
{ 0, -9, 113, 7, 30, -6, 0 },
{ 0, -12, 109, 7, 39, -8, 0 },
{ 0, -13, 102, 7, 49, -10, 0 },
{ 0, -14, 94, 7, 59, -11, 0 },
{ 0, -14, 86, 7, 69, -13, 0 },
{ 0, -14, 78, 7, 78, -14, 0 },
{ 0, -13, 69, 7, 86, -14, 0 },
{ 0, -11, 59, 7, 94, -14, 0 },
{ 0, -10, 49, 7, 102, -13, 0 },
{ 0, -8, 39, 7, 109, -12, 0 },
{ 0, -6, 30, 7, 113, -9, 0 },
{ 0, -4, 21, 7, 116, -5, 0 },
{ 0, -2, 13, 7, 117, 0, 0 } },
.odd = { { -1, 3, 118, 7, 10, -2, 0 },
{ 0, -3, 117, 7, 17, -3, 0 },
{ 0, -7, 114, 7, 26, -5, 0 },
{ 0, -10, 110, 7, 35, -7, 0 },
{ 0, -13, 106, 7, 44, -9, 0 },
{ 0, -14, 99, 7, 54, -11, 0 },
{ 0, -14, 90, 7, 64, -12, 0 },
{ 0, -14, 82, 7, 73, -13, 0 },
{ 0, -13, 73, 7, 82, -14, 0 },
{ 0, -12, 64, 7, 90, -14, 0 },
{ 0, -11, 54, 7, 99, -14, 0 },
{ 0, -9, 44, 7, 106, -13, 0 },
{ 0, -7, 35, 7, 110, -10, 0 },
{ 0, -5, 26, 7, 114, -7, 0 },
{ 0, -3, 17, 7, 117, -3, 0 },
{ 0, -2, 10, 7, 118, 3, -1 } } },
.ver_phase_arr = {
.even = { { -1, 6, 118, 7, 6, -1, 0 },
{ 0, 0, 117, 7, 13, -2, 0 },
{ 0, -5, 116, 7, 21, -4, 0 },
{ 0, -9, 113, 7, 30, -6, 0 },
{ 0, -12, 109, 7, 39, -8, 0 },
{ 0, -13, 102, 7, 49, -10, 0 },
{ 0, -14, 94, 7, 59, -11, 0 },
{ 0, -14, 86, 7, 69, -13, 0 },
{ 0, -14, 78, 7, 78, -14, 0 },
{ 0, -13, 69, 7, 86, -14, 0 },
{ 0, -11, 59, 7, 94, -14, 0 },
{ 0, -10, 49, 7, 102, -13, 0 },
{ 0, -8, 39, 7, 109, -12, 0 },
{ 0, -6, 30, 7, 113, -9, 0 },
{ 0, -4, 21, 7, 116, -5, 0 },
{ 0, -2, 13, 7, 117, 0, 0 } },
.odd = { { -1, 3, 118, 7, 10, -2, 0 },
{ 0, -3, 117, 7, 17, -3, 0 },
{ 0, -7, 114, 7, 26, -5, 0 },
{ 0, -10, 110, 7, 35, -7, 0 },
{ 0, -13, 106, 7, 44, -9, 0 },
{ 0, -14, 99, 7, 54, -11, 0 },
{ 0, -14, 90, 7, 64, -12, 0 },
{ 0, -14, 82, 7, 73, -13, 0 },
{ 0, -13, 73, 7, 82, -14, 0 },
{ 0, -12, 64, 7, 90, -14, 0 },
{ 0, -11, 54, 7, 99, -14, 0 },
{ 0, -9, 44, 7, 106, -13, 0 },
{ 0, -7, 35, 7, 110, -10, 0 },
{ 0, -5, 26, 7, 114, -7, 0 },
{ 0, -3, 17, 7, 117, -3, 0 },
{ 0, -2, 10, 7, 118, 3, -1 } } },
.ptrn_arr = { { 0xffffffff } },
.sample_patrn_length = 34,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 3) = 0.914286 */
.hor_phase_arr = {
.even = { { -2, 9, 114, 7, 9, -2, 0 },
{ -1, 0, 114, 7, 20, -5, 0 },
{ 0, -7, 110, 7, 32, -7, 0 },
{ 0, -11, 103, 7, 46, -10, 0 },
{ 0, -13, 93, 7, 60, -12, 0 },
{ 0, -14, 82, 7, 74, -14, 0 },
{ 0, -13, 69, 7, 86, -14, 0 },
{ 0, -11, 55, 7, 97, -13, 0 },
{ 0, -9, 41, 7, 106, -10, 0 },
{ 0, -6, 28, 7, 111, -5, 0 },
{ 0, -4, 16, 7, 114, 3, -1 },
{ -2, 6, 115, 7, 12, -3, 0 },
{ 0, -2, 111, 7, 24, -5, 0 },
{ 0, -8, 107, 7, 37, -8, 0 },
{ 0, -12, 100, 7, 51, -11, 0 },
{ 0, -14, 90, 7, 65, -13, 0 },
{ 0, -14, 78, 7, 78, -14, 0 },
{ 0, -13, 65, 7, 90, -14, 0 },
{ 0, -11, 51, 7, 100, -12, 0 },
{ 0, -8, 37, 7, 107, -8, 0 },
{ 0, -5, 24, 7, 111, -2, 0 },
{ 0, -3, 12, 7, 115, 6, -2 },
{ -1, 3, 114, 7, 16, -4, 0 },
{ 0, -5, 111, 7, 28, -6, 0 },
{ 0, -10, 106, 7, 41, -9, 0 },
{ 0, -13, 97, 7, 55, -11, 0 },
{ 0, -14, 86, 7, 69, -13, 0 },
{ 0, -14, 74, 7, 82, -14, 0 },
{ 0, -12, 60, 7, 93, -13, 0 },
{ 0, -10, 46, 7, 103, -11, 0 },
{ 0, -7, 32, 7, 110, -7, 0 },
{ 0, -5, 20, 7, 114, 0, -1 } },
.odd = { { -1, 4, 114, 7, 14, -3, 0 },
{ 0, -4, 112, 7, 26, -6, 0 },
{ 0, -9, 107, 7, 39, -9, 0 },
{ 0, -13, 99, 7, 53, -11, 0 },
{ 0, -14, 88, 7, 67, -13, 0 },
{ 0, -14, 76, 7, 80, -14, 0 },
{ 0, -13, 62, 7, 93, -14, 0 },
{ 0, -10, 48, 7, 102, -12, 0 },
{ 0, -8, 35, 7, 109, -8, 0 },
{ 0, -5, 22, 7, 112, -1, 0 },
{ 0, -3, 11, 7, 115, 7, -2 },
{ -1, 1, 114, 7, 18, -4, 0 },
{ 0, -6, 111, 7, 30, -7, 0 },
{ 0, -10, 103, 7, 44, -9, 0 },
{ 0, -13, 95, 7, 58, -12, 0 },
{ 0, -14, 85, 7, 71, -14, 0 },
{ 0, -14, 71, 7, 85, -14, 0 },
{ 0, -12, 58, 7, 95, -13, 0 },
{ 0, -9, 44, 7, 103, -10, 0 },
{ 0, -7, 30, 7, 111, -6, 0 },
{ 0, -4, 18, 7, 114, 1, -1 },
{ -2, 7, 115, 7, 11, -3, 0 },
{ 0, -1, 112, 7, 22, -5, 0 },
{ 0, -8, 109, 7, 35, -8, 0 },
{ 0, -12, 102, 7, 48, -10, 0 },
{ 0, -14, 93, 7, 62, -13, 0 },
{ 0, -14, 80, 7, 76, -14, 0 },
{ 0, -13, 67, 7, 88, -14, 0 },
{ 0, -11, 53, 7, 99, -13, 0 },
{ 0, -9, 39, 7, 107, -9, 0 },
{ 0, -6, 26, 7, 112, -4, 0 },
{ 0, -3, 14, 7, 114, 4, -1 } } },
.ver_phase_arr = {
.even = { { -2, 9, 114, 7, 9, -2, 0 },
{ -1, 0, 114, 7, 20, -5, 0 },
{ 0, -7, 110, 7, 32, -7, 0 },
{ 0, -11, 103, 7, 46, -10, 0 },
{ 0, -13, 93, 7, 60, -12, 0 },
{ 0, -14, 82, 7, 74, -14, 0 },
{ 0, -13, 69, 7, 86, -14, 0 },
{ 0, -11, 55, 7, 97, -13, 0 },
{ 0, -9, 41, 7, 106, -10, 0 },
{ 0, -6, 28, 7, 111, -5, 0 },
{ 0, -4, 16, 7, 114, 3, -1 },
{ -2, 6, 115, 7, 12, -3, 0 },
{ 0, -2, 111, 7, 24, -5, 0 },
{ 0, -8, 107, 7, 37, -8, 0 },
{ 0, -12, 100, 7, 51, -11, 0 },
{ 0, -14, 90, 7, 65, -13, 0 },
{ 0, -14, 78, 7, 78, -14, 0 },
{ 0, -13, 65, 7, 90, -14, 0 },
{ 0, -11, 51, 7, 100, -12, 0 },
{ 0, -8, 37, 7, 107, -8, 0 },
{ 0, -5, 24, 7, 111, -2, 0 },
{ 0, -3, 12, 7, 115, 6, -2 },
{ -1, 3, 114, 7, 16, -4, 0 },
{ 0, -5, 111, 7, 28, -6, 0 },
{ 0, -10, 106, 7, 41, -9, 0 },
{ 0, -13, 97, 7, 55, -11, 0 },
{ 0, -14, 86, 7, 69, -13, 0 },
{ 0, -14, 74, 7, 82, -14, 0 },
{ 0, -12, 60, 7, 93, -13, 0 },
{ 0, -10, 46, 7, 103, -11, 0 },
{ 0, -7, 32, 7, 110, -7, 0 },
{ 0, -5, 20, 7, 114, 0, -1 } },
.odd = { { -1, 4, 114, 7, 14, -3, 0 },
{ 0, -4, 112, 7, 26, -6, 0 },
{ 0, -9, 107, 7, 39, -9, 0 },
{ 0, -13, 99, 7, 53, -11, 0 },
{ 0, -14, 88, 7, 67, -13, 0 },
{ 0, -14, 76, 7, 80, -14, 0 },
{ 0, -13, 62, 7, 93, -14, 0 },
{ 0, -10, 48, 7, 102, -12, 0 },
{ 0, -8, 35, 7, 109, -8, 0 },
{ 0, -5, 22, 7, 112, -1, 0 },
{ 0, -3, 11, 7, 115, 7, -2 },
{ -1, 1, 114, 7, 18, -4, 0 },
{ 0, -6, 111, 7, 30, -7, 0 },
{ 0, -10, 103, 7, 44, -9, 0 },
{ 0, -13, 95, 7, 58, -12, 0 },
{ 0, -14, 85, 7, 71, -14, 0 },
{ 0, -14, 71, 7, 85, -14, 0 },
{ 0, -12, 58, 7, 95, -13, 0 },
{ 0, -9, 44, 7, 103, -10, 0 },
{ 0, -7, 30, 7, 111, -6, 0 },
{ 0, -4, 18, 7, 114, 1, -1 },
{ -2, 7, 115, 7, 11, -3, 0 },
{ 0, -1, 112, 7, 22, -5, 0 },
{ 0, -8, 109, 7, 35, -8, 0 },
{ 0, -12, 102, 7, 48, -10, 0 },
{ 0, -14, 93, 7, 62, -13, 0 },
{ 0, -14, 80, 7, 76, -14, 0 },
{ 0, -13, 67, 7, 88, -14, 0 },
{ 0, -11, 53, 7, 99, -13, 0 },
{ 0, -9, 39, 7, 107, -9, 0 },
{ 0, -6, 26, 7, 112, -4, 0 },
{ 0, -3, 14, 7, 114, 4, -1 } } },
.ptrn_arr = { { 0xff3fffff, 0xffff9fff, 0xf } },
.sample_patrn_length = 70,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 4) = 0.888889 */
.hor_phase_arr = {
.even = { { -3, 12, 110, 7, 12, -3, 0 },
{ -1, 0, 110, 7, 26, -7, 0 },
{ 0, -8, 103, 7, 43, -10, 0 },
{ 0, -12, 92, 7, 61, -13, 0 },
{ 0, -14, 78, 7, 78, -14, 0 },
{ 0, -13, 61, 7, 92, -12, 0 },
{ 0, -10, 43, 7, 103, -8, 0 },
{ 0, -7, 26, 7, 110, 0, -1 } },
.odd = { { -2, 5, 111, 7, 19, -5, 0 },
{ 0, -4, 106, 7, 34, -8, 0 },
{ 0, -11, 98, 7, 52, -11, 0 },
{ 0, -13, 85, 7, 69, -13, 0 },
{ 0, -13, 69, 7, 85, -13, 0 },
{ 0, -11, 52, 7, 98, -11, 0 },
{ 0, -8, 34, 7, 106, -4, 0 },
{ 0, -5, 19, 7, 111, 5, -2 } } },
.ver_phase_arr = {
.even = { { -3, 12, 110, 7, 12, -3, 0 },
{ -1, 0, 110, 7, 26, -7, 0 },
{ 0, -8, 103, 7, 43, -10, 0 },
{ 0, -12, 92, 7, 61, -13, 0 },
{ 0, -14, 78, 7, 78, -14, 0 },
{ 0, -13, 61, 7, 92, -12, 0 },
{ 0, -10, 43, 7, 103, -8, 0 },
{ 0, -7, 26, 7, 110, 0, -1 } },
.odd = { { -2, 5, 111, 7, 19, -5, 0 },
{ 0, -4, 106, 7, 34, -8, 0 },
{ 0, -11, 98, 7, 52, -11, 0 },
{ 0, -13, 85, 7, 69, -13, 0 },
{ 0, -13, 69, 7, 85, -13, 0 },
{ 0, -11, 52, 7, 98, -11, 0 },
{ 0, -8, 34, 7, 106, -4, 0 },
{ 0, -5, 19, 7, 111, 5, -2 } } },
.ptrn_arr = { { 0xffff } },
.sample_patrn_length = 18,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 5) = 0.864865 */
.hor_phase_arr = {
.even = { { -5, 14, 110, 7, 14, -5, 0 },
{ -1, 0, 106, 7, 32, -9, 0 },
{ 0, -9, 96, 7, 53, -12, 0 },
{ 0, -13, 81, 7, 73, -13, 0 },
{ 0, -13, 61, 7, 91, -11, 0 },
{ 0, -10, 40, 7, 103, -4, -1 },
{ 0, -6, 21, 7, 108, 8, -3 },
{ -3, 5, 108, 7, 25, -7, 0 },
{ 0, -6, 101, 7, 44, -11, 0 },
{ 0, -12, 88, 7, 65, -13, 0 },
{ 0, -13, 69, 7, 85, -13, 0 },
{ 0, -11, 49, 7, 98, -8, 0 },
{ 0, -8, 28, 7, 108, 2, -2 },
{ -4, 11, 108, 7, 18, -5, 0 },
{ -1, -2, 104, 7, 36, -9, 0 },
{ 0, -10, 93, 7, 57, -12, 0 },
{ 0, -13, 77, 7, 77, -13, 0 },
{ 0, -12, 57, 7, 93, -10, 0 },
{ 0, -9, 36, 7, 104, -2, -1 },
{ 0, -5, 18, 7, 108, 11, -4 },
{ -2, 2, 108, 7, 28, -8, 0 },
{ 0, -8, 98, 7, 49, -11, 0 },
{ 0, -13, 85, 7, 69, -13, 0 },
{ 0, -13, 65, 7, 88, -12, 0 },
{ 0, -11, 44, 7, 101, -6, 0 },
{ 0, -7, 25, 7, 108, 5, -3 },
{ -3, 8, 108, 7, 21, -6, 0 },
{ -1, -4, 103, 7, 40, -10, 0 },
{ 0, -11, 91, 7, 61, -13, 0 },
{ 0, -13, 73, 7, 81, -13, 0 },
{ 0, -12, 53, 7, 96, -9, 0 },
{ 0, -9, 32, 7, 106, 0, -1 } },
.odd = { { -3, 7, 108, 7, 23, -7, 0 },
{ 0, -5, 101, 7, 42, -10, 0 },
{ 0, -12, 90, 7, 63, -13, 0 },
{ 0, -13, 71, 7, 83, -13, 0 },
{ 0, -12, 51, 7, 97, -8, 0 },
{ 0, -8, 30, 7, 107, 1, -2 },
{ -4, 13, 108, 7, 16, -5, 0 },
{ -1, -1, 105, 7, 34, -9, 0 },
{ 0, -10, 95, 7, 55, -12, 0 },
{ 0, -13, 79, 7, 75, -13, 0 },
{ 0, -13, 59, 7, 93, -11, 0 },
{ 0, -10, 38, 7, 104, -3, -1 },
{ 0, -6, 19, 7, 110, 9, -4 },
{ -2, 4, 106, 7, 27, -7, 0 },
{ 0, -7, 99, 7, 47, -11, 0 },
{ 0, -12, 86, 7, 67, -13, 0 },
{ 0, -13, 67, 7, 86, -12, 0 },
{ 0, -11, 47, 7, 99, -7, 0 },
{ 0, -7, 27, 7, 106, 4, -2 },
{ -4, 9, 110, 7, 19, -6, 0 },
{ -1, -3, 104, 7, 38, -10, 0 },
{ 0, -11, 93, 7, 59, -13, 0 },
{ 0, -13, 75, 7, 79, -13, 0 },
{ 0, -12, 55, 7, 95, -10, 0 },
{ 0, -9, 34, 7, 105, -1, -1 },
{ 0, -5, 16, 7, 108, 13, -4 },
{ -2, 1, 107, 7, 30, -8, 0 },
{ 0, -8, 97, 7, 51, -12, 0 },
{ 0, -13, 83, 7, 71, -13, 0 },
{ 0, -13, 63, 7, 90, -12, 0 },
{ 0, -10, 42, 7, 101, -5, 0 },
{ 0, -7, 23, 7, 108, 7, -3 } } },
.ver_phase_arr = {
.even = { { -5, 14, 110, 7, 14, -5, 0 },
{ -1, 0, 106, 7, 32, -9, 0 },
{ 0, -9, 96, 7, 53, -12, 0 },
{ 0, -13, 81, 7, 73, -13, 0 },
{ 0, -13, 61, 7, 91, -11, 0 },
{ 0, -10, 40, 7, 103, -4, -1 },
{ 0, -6, 21, 7, 108, 8, -3 },
{ -3, 5, 108, 7, 25, -7, 0 },
{ 0, -6, 101, 7, 44, -11, 0 },
{ 0, -12, 88, 7, 65, -13, 0 },
{ 0, -13, 69, 7, 85, -13, 0 },
{ 0, -11, 49, 7, 98, -8, 0 },
{ 0, -8, 28, 7, 108, 2, -2 },
{ -4, 11, 108, 7, 18, -5, 0 },
{ -1, -2, 104, 7, 36, -9, 0 },
{ 0, -10, 93, 7, 57, -12, 0 },
{ 0, -13, 77, 7, 77, -13, 0 },
{ 0, -12, 57, 7, 93, -10, 0 },
{ 0, -9, 36, 7, 104, -2, -1 },
{ 0, -5, 18, 7, 108, 11, -4 },
{ -2, 2, 108, 7, 28, -8, 0 },
{ 0, -8, 98, 7, 49, -11, 0 },
{ 0, -13, 85, 7, 69, -13, 0 },
{ 0, -13, 65, 7, 88, -12, 0 },
{ 0, -11, 44, 7, 101, -6, 0 },
{ 0, -7, 25, 7, 108, 5, -3 },
{ -3, 8, 108, 7, 21, -6, 0 },
{ -1, -4, 103, 7, 40, -10, 0 },
{ 0, -11, 91, 7, 61, -13, 0 },
{ 0, -13, 73, 7, 81, -13, 0 },
{ 0, -12, 53, 7, 96, -9, 0 },
{ 0, -9, 32, 7, 106, 0, -1 } },
.odd = { { -3, 7, 108, 7, 23, -7, 0 },
{ 0, -5, 101, 7, 42, -10, 0 },
{ 0, -12, 90, 7, 63, -13, 0 },
{ 0, -13, 71, 7, 83, -13, 0 },
{ 0, -12, 51, 7, 97, -8, 0 },
{ 0, -8, 30, 7, 107, 1, -2 },
{ -4, 13, 108, 7, 16, -5, 0 },
{ -1, -1, 105, 7, 34, -9, 0 },
{ 0, -10, 95, 7, 55, -12, 0 },
{ 0, -13, 79, 7, 75, -13, 0 },
{ 0, -13, 59, 7, 93, -11, 0 },
{ 0, -10, 38, 7, 104, -3, -1 },
{ 0, -6, 19, 7, 110, 9, -4 },
{ -2, 4, 106, 7, 27, -7, 0 },
{ 0, -7, 99, 7, 47, -11, 0 },
{ 0, -12, 86, 7, 67, -13, 0 },
{ 0, -13, 67, 7, 86, -12, 0 },
{ 0, -11, 47, 7, 99, -7, 0 },
{ 0, -7, 27, 7, 106, 4, -2 },
{ -4, 9, 110, 7, 19, -6, 0 },
{ -1, -3, 104, 7, 38, -10, 0 },
{ 0, -11, 93, 7, 59, -13, 0 },
{ 0, -13, 75, 7, 79, -13, 0 },
{ 0, -12, 55, 7, 95, -10, 0 },
{ 0, -9, 34, 7, 105, -1, -1 },
{ 0, -5, 16, 7, 108, 13, -4 },
{ -2, 1, 107, 7, 30, -8, 0 },
{ 0, -8, 97, 7, 51, -12, 0 },
{ 0, -13, 83, 7, 71, -13, 0 },
{ 0, -13, 63, 7, 90, -12, 0 },
{ 0, -10, 42, 7, 101, -5, 0 },
{ 0, -7, 23, 7, 108, 7, -3 } } },
.ptrn_arr = { { 0xcfff9fff, 0xf3ffe7ff, 0xff } },
.sample_patrn_length = 74,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 6) = 0.842105 */
.hor_phase_arr = {
.even = { { -6, 17, 106, 7, 17, -6, 0 },
{ -2, 0, 102, 7, 38, -10, 0 },
{ 0, -10, 89, 7, 62, -13, 0 },
{ 0, -13, 69, 7, 83, -11, 0 },
{ 0, -11, 46, 7, 98, -4, -1 },
{ 0, -7, 23, 7, 106, 10, -4 },
{ -3, 5, 104, 7, 31, -9, 0 },
{ 0, -7, 93, 7, 54, -12, 0 },
{ 0, -12, 76, 7, 76, -12, 0 },
{ 0, -12, 54, 7, 93, -7, 0 },
{ 0, -9, 31, 7, 104, 5, -3 },
{ -4, 10, 106, 7, 23, -7, 0 },
{ -1, -4, 98, 7, 46, -11, 0 },
{ 0, -11, 83, 7, 69, -13, 0 },
{ 0, -13, 62, 7, 89, -10, 0 },
{ 0, -10, 38, 7, 102, 0, -2 } },
.odd = { { -4, 8, 105, 7, 27, -8, 0 },
{ 0, -6, 96, 7, 50, -12, 0 },
{ 0, -12, 80, 7, 73, -13, 0 },
{ 0, -13, 58, 7, 92, -9, 0 },
{ 0, -9, 34, 7, 103, 2, -2 },
{ -5, 13, 107, 7, 20, -7, 0 },
{ -1, -2, 100, 7, 42, -11, 0 },
{ 0, -11, 87, 7, 65, -13, 0 },
{ 0, -13, 65, 7, 87, -11, 0 },
{ 0, -11, 42, 7, 100, -2, -1 },
{ 0, -7, 20, 7, 107, 13, -5 },
{ -2, 2, 103, 7, 34, -9, 0 },
{ 0, -9, 92, 7, 58, -13, 0 },
{ 0, -13, 73, 7, 80, -12, 0 },
{ 0, -12, 50, 7, 96, -6, 0 },
{ 0, -8, 27, 7, 105, 8, -4 } } },
.ver_phase_arr = {
.even = { { -6, 17, 106, 7, 17, -6, 0 },
{ -2, 0, 102, 7, 38, -10, 0 },
{ 0, -10, 89, 7, 62, -13, 0 },
{ 0, -13, 69, 7, 83, -11, 0 },
{ 0, -11, 46, 7, 98, -4, -1 },
{ 0, -7, 23, 7, 106, 10, -4 },
{ -3, 5, 104, 7, 31, -9, 0 },
{ 0, -7, 93, 7, 54, -12, 0 },
{ 0, -12, 76, 7, 76, -12, 0 },
{ 0, -12, 54, 7, 93, -7, 0 },
{ 0, -9, 31, 7, 104, 5, -3 },
{ -4, 10, 106, 7, 23, -7, 0 },
{ -1, -4, 98, 7, 46, -11, 0 },
{ 0, -11, 83, 7, 69, -13, 0 },
{ 0, -13, 62, 7, 89, -10, 0 },
{ 0, -10, 38, 7, 102, 0, -2 } },
.odd = { { -4, 8, 105, 7, 27, -8, 0 },
{ 0, -6, 96, 7, 50, -12, 0 },
{ 0, -12, 80, 7, 73, -13, 0 },
{ 0, -13, 58, 7, 92, -9, 0 },
{ 0, -9, 34, 7, 103, 2, -2 },
{ -5, 13, 107, 7, 20, -7, 0 },
{ -1, -2, 100, 7, 42, -11, 0 },
{ 0, -11, 87, 7, 65, -13, 0 },
{ 0, -13, 65, 7, 87, -11, 0 },
{ 0, -11, 42, 7, 100, -2, -1 },
{ 0, -7, 20, 7, 107, 13, -5 },
{ -2, 2, 103, 7, 34, -9, 0 },
{ 0, -9, 92, 7, 58, -13, 0 },
{ 0, -13, 73, 7, 80, -12, 0 },
{ 0, -12, 50, 7, 96, -6, 0 },
{ 0, -8, 27, 7, 105, 8, -4 } } },
.ptrn_arr = { { 0xfcffe7ff, 0xf } },
.sample_patrn_length = 38,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 7) = 0.820513 */
.hor_phase_arr = {
.even = { { -7, 19, 104, 7, 19, -7, 0 },
{ -2, 0, 98, 7, 43, -11, 0 },
{ 0, -10, 81, 7, 69, -12, 0 },
{ 0, -12, 58, 7, 89, -7, 0 },
{ 0, -10, 32, 7, 103, 7, -4 },
{ -5, 10, 103, 7, 29, -9, 0 },
{ -1, -6, 93, 7, 54, -12, 0 },
{ 0, -12, 72, 7, 79, -11, 0 },
{ 0, -12, 47, 7, 97, -2, -2 },
{ 0, -8, 22, 7, 104, 16, -6 },
{ -3, 2, 100, 7, 40, -11, 0 },
{ 0, -9, 84, 7, 65, -12, 0 },
{ 0, -13, 62, 7, 87, -8, 0 },
{ 0, -10, 36, 7, 100, 5, -3 },
{ -5, 13, 103, 7, 25, -8, 0 },
{ -1, -4, 94, 7, 51, -12, 0 },
{ 0, -12, 76, 7, 76, -12, 0 },
{ 0, -12, 51, 7, 94, -4, -1 },
{ 0, -8, 25, 7, 103, 13, -5 },
{ -3, 5, 100, 7, 36, -10, 0 },
{ 0, -8, 87, 7, 62, -13, 0 },
{ 0, -12, 65, 7, 84, -9, 0 },
{ 0, -11, 40, 7, 100, 2, -3 },
{ -6, 16, 104, 7, 22, -8, 0 },
{ -2, -2, 97, 7, 47, -12, 0 },
{ 0, -11, 79, 7, 72, -12, 0 },
{ 0, -12, 54, 7, 93, -6, -1 },
{ 0, -9, 29, 7, 103, 10, -5 },
{ -4, 7, 103, 7, 32, -10, 0 },
{ 0, -7, 89, 7, 58, -12, 0 },
{ 0, -12, 69, 7, 81, -10, 0 },
{ 0, -11, 43, 7, 98, 0, -2 } },
.odd = { { -4, 9, 101, 7, 31, -9, 0 },
{ -1, -6, 91, 7, 56, -12, 0 },
{ 0, -12, 71, 7, 80, -11, 0 },
{ 0, -11, 45, 7, 97, -1, -2 },
{ 0, -7, 20, 7, 105, 17, -7 },
{ -3, 1, 100, 7, 41, -11, 0 },
{ 0, -10, 83, 7, 67, -12, 0 },
{ 0, -13, 60, 7, 89, -8, 0 },
{ 0, -10, 34, 7, 102, 6, -4 },
{ -5, 11, 104, 7, 27, -9, 0 },
{ -1, -5, 94, 7, 52, -12, 0 },
{ 0, -12, 74, 7, 77, -11, 0 },
{ 0, -12, 49, 7, 95, -3, -1 },
{ 0, -8, 24, 7, 104, 14, -6 },
{ -3, 3, 100, 7, 38, -10, 0 },
{ 0, -9, 87, 7, 63, -13, 0 },
{ 0, -13, 63, 7, 87, -9, 0 },
{ 0, -10, 38, 7, 100, 3, -3 },
{ -6, 14, 104, 7, 24, -8, 0 },
{ -1, -3, 95, 7, 49, -12, 0 },
{ 0, -11, 77, 7, 74, -12, 0 },
{ 0, -12, 52, 7, 94, -5, -1 },
{ 0, -9, 27, 7, 104, 11, -5 },
{ -4, 6, 102, 7, 34, -10, 0 },
{ 0, -8, 89, 7, 60, -13, 0 },
{ 0, -12, 67, 7, 83, -10, 0 },
{ 0, -11, 41, 7, 100, 1, -3 },
{ -7, 17, 105, 7, 20, -7, 0 },
{ -2, -1, 97, 7, 45, -11, 0 },
{ 0, -11, 80, 7, 71, -12, 0 },
{ 0, -12, 56, 7, 91, -6, -1 },
{ 0, -9, 31, 7, 101, 9, -4 } } },
.ver_phase_arr = {
.even = { { -7, 19, 104, 7, 19, -7, 0 },
{ -2, 0, 98, 7, 43, -11, 0 },
{ 0, -10, 81, 7, 69, -12, 0 },
{ 0, -12, 58, 7, 89, -7, 0 },
{ 0, -10, 32, 7, 103, 7, -4 },
{ -5, 10, 103, 7, 29, -9, 0 },
{ -1, -6, 93, 7, 54, -12, 0 },
{ 0, -12, 72, 7, 79, -11, 0 },
{ 0, -12, 47, 7, 97, -2, -2 },
{ 0, -8, 22, 7, 104, 16, -6 },
{ -3, 2, 100, 7, 40, -11, 0 },
{ 0, -9, 84, 7, 65, -12, 0 },
{ 0, -13, 62, 7, 87, -8, 0 },
{ 0, -10, 36, 7, 100, 5, -3 },
{ -5, 13, 103, 7, 25, -8, 0 },
{ -1, -4, 94, 7, 51, -12, 0 },
{ 0, -12, 76, 7, 76, -12, 0 },
{ 0, -12, 51, 7, 94, -4, -1 },
{ 0, -8, 25, 7, 103, 13, -5 },
{ -3, 5, 100, 7, 36, -10, 0 },
{ 0, -8, 87, 7, 62, -13, 0 },
{ 0, -12, 65, 7, 84, -9, 0 },
{ 0, -11, 40, 7, 100, 2, -3 },
{ -6, 16, 104, 7, 22, -8, 0 },
{ -2, -2, 97, 7, 47, -12, 0 },
{ 0, -11, 79, 7, 72, -12, 0 },
{ 0, -12, 54, 7, 93, -6, -1 },
{ 0, -9, 29, 7, 103, 10, -5 },
{ -4, 7, 103, 7, 32, -10, 0 },
{ 0, -7, 89, 7, 58, -12, 0 },
{ 0, -12, 69, 7, 81, -10, 0 },
{ 0, -11, 43, 7, 98, 0, -2 } },
.odd = { { -4, 9, 101, 7, 31, -9, 0 },
{ -1, -6, 91, 7, 56, -12, 0 },
{ 0, -12, 71, 7, 80, -11, 0 },
{ 0, -11, 45, 7, 97, -1, -2 },
{ 0, -7, 20, 7, 105, 17, -7 },
{ -3, 1, 100, 7, 41, -11, 0 },
{ 0, -10, 83, 7, 67, -12, 0 },
{ 0, -13, 60, 7, 89, -8, 0 },
{ 0, -10, 34, 7, 102, 6, -4 },
{ -5, 11, 104, 7, 27, -9, 0 },
{ -1, -5, 94, 7, 52, -12, 0 },
{ 0, -12, 74, 7, 77, -11, 0 },
{ 0, -12, 49, 7, 95, -3, -1 },
{ 0, -8, 24, 7, 104, 14, -6 },
{ -3, 3, 100, 7, 38, -10, 0 },
{ 0, -9, 87, 7, 63, -13, 0 },
{ 0, -13, 63, 7, 87, -9, 0 },
{ 0, -10, 38, 7, 100, 3, -3 },
{ -6, 14, 104, 7, 24, -8, 0 },
{ -1, -3, 95, 7, 49, -12, 0 },
{ 0, -11, 77, 7, 74, -12, 0 },
{ 0, -12, 52, 7, 94, -5, -1 },
{ 0, -9, 27, 7, 104, 11, -5 },
{ -4, 6, 102, 7, 34, -10, 0 },
{ 0, -8, 89, 7, 60, -13, 0 },
{ 0, -12, 67, 7, 83, -10, 0 },
{ 0, -11, 41, 7, 100, 1, -3 },
{ -7, 17, 105, 7, 20, -7, 0 },
{ -2, -1, 97, 7, 45, -11, 0 },
{ 0, -11, 80, 7, 71, -12, 0 },
{ 0, -12, 56, 7, 91, -6, -1 },
{ 0, -9, 31, 7, 101, 9, -4 } } },
.ptrn_arr = { { 0xff9ff3ff, 0xff3fe7fc, 0xff9 } },
.sample_patrn_length = 78,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 8) = 0.8 */
.hor_phase_arr = {
.even = { { -8, 21, 102, 7, 21, -8, 0 },
{ -3, 0, 95, 7, 48, -12, 0 },
{ 0, -11, 75, 7, 75, -11, 0 },
{ 0, -12, 48, 7, 95, 0, -3 } },
.odd = { { -5, 9, 100, 7, 34, -10, 0 },
{ -1, -7, 86, 7, 62, -12, 0 },
{ 0, -12, 62, 7, 86, -7, -1 },
{ 0, -10, 34, 7, 100, 9, -5 } } },
.ver_phase_arr = {
.even = { { -8, 21, 102, 7, 21, -8, 0 },
{ -3, 0, 95, 7, 48, -12, 0 },
{ 0, -11, 75, 7, 75, -11, 0 },
{ 0, -12, 48, 7, 95, 0, -3 } },
.odd = { { -5, 9, 100, 7, 34, -10, 0 },
{ -1, -7, 86, 7, 62, -12, 0 },
{ 0, -12, 62, 7, 86, -7, -1 },
{ 0, -10, 34, 7, 100, 9, -5 } } },
.ptrn_arr = { { 0xff } },
.sample_patrn_length = 10,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 9) = 0.780488 */
.hor_phase_arr = {
.even = { { -9, 23, 100, 7, 23, -9, 0 },
{ -3, 0, 91, 7, 52, -12, 0 },
{ 0, -11, 68, 7, 80, -8, -1 },
{ 0, -11, 39, 7, 96, 9, -5 },
{ -6, 12, 98, 7, 35, -11, 0 },
{ -1, -6, 81, 7, 65, -11, 0 },
{ 0, -12, 55, 7, 89, -2, -2 },
{ 0, -9, 26, 7, 99, 20, -8 },
{ -4, 2, 93, 7, 49, -12, 0 },
{ 0, -10, 71, 7, 76, -9, 0 },
{ 0, -11, 42, 7, 95, 7, -5 },
{ -7, 14, 99, 7, 32, -10, 0 },
{ -1, -5, 84, 7, 62, -12, 0 },
{ 0, -12, 59, 7, 87, -4, -2 },
{ 0, -10, 29, 7, 99, 17, -7 },
{ -4, 4, 95, 7, 45, -12, 0 },
{ 0, -9, 72, 7, 74, -9, 0 },
{ 0, -12, 45, 7, 95, 4, -4 },
{ -7, 17, 99, 7, 29, -10, 0 },
{ -2, -4, 87, 7, 59, -12, 0 },
{ 0, -12, 62, 7, 84, -5, -1 },
{ 0, -10, 32, 7, 99, 14, -7 },
{ -5, 7, 95, 7, 42, -11, 0 },
{ 0, -9, 76, 7, 71, -10, 0 },
{ 0, -12, 49, 7, 93, 2, -4 },
{ -8, 20, 99, 7, 26, -9, 0 },
{ -2, -2, 89, 7, 55, -12, 0 },
{ 0, -11, 65, 7, 81, -6, -1 },
{ 0, -11, 35, 7, 98, 12, -6 },
{ -5, 9, 96, 7, 39, -11, 0 },
{ -1, -8, 80, 7, 68, -11, 0 },
{ 0, -12, 52, 7, 91, 0, -3 } },
.odd = { { -6, 10, 98, 7, 37, -11, 0 },
{ -1, -7, 81, 7, 66, -11, 0 },
{ 0, -12, 54, 7, 90, -1, -3 },
{ 0, -9, 24, 7, 100, 21, -8 },
{ -3, 1, 92, 7, 50, -12, 0 },
{ 0, -10, 69, 7, 78, -8, -1 },
{ 0, -11, 40, 7, 96, 8, -5 },
{ -6, 13, 97, 7, 34, -10, 0 },
{ -1, -6, 83, 7, 63, -11, 0 },
{ 0, -12, 57, 7, 88, -3, -2 },
{ 0, -9, 27, 7, 100, 18, -8 },
{ -4, 3, 94, 7, 47, -12, 0 },
{ 0, -10, 72, 7, 75, -9, 0 },
{ 0, -11, 44, 7, 95, 5, -5 },
{ -7, 16, 98, 7, 31, -10, 0 },
{ -2, -4, 86, 7, 60, -12, 0 },
{ 0, -12, 60, 7, 86, -4, -2 },
{ 0, -10, 31, 7, 98, 16, -7 },
{ -5, 5, 95, 7, 44, -11, 0 },
{ 0, -9, 75, 7, 72, -10, 0 },
{ 0, -12, 47, 7, 94, 3, -4 },
{ -8, 18, 100, 7, 27, -9, 0 },
{ -2, -3, 88, 7, 57, -12, 0 },
{ 0, -11, 63, 7, 83, -6, -1 },
{ 0, -10, 34, 7, 97, 13, -6 },
{ -5, 8, 96, 7, 40, -11, 0 },
{ -1, -8, 78, 7, 69, -10, 0 },
{ 0, -12, 50, 7, 92, 1, -3 },
{ -8, 21, 100, 7, 24, -9, 0 },
{ -3, -1, 90, 7, 54, -12, 0 },
{ 0, -11, 66, 7, 81, -7, -1 },
{ 0, -11, 37, 7, 98, 10, -6 } } },
.ver_phase_arr = {
.even = { { -9, 23, 100, 7, 23, -9, 0 },
{ -3, 0, 91, 7, 52, -12, 0 },
{ 0, -11, 68, 7, 80, -8, -1 },
{ 0, -11, 39, 7, 96, 9, -5 },
{ -6, 12, 98, 7, 35, -11, 0 },
{ -1, -6, 81, 7, 65, -11, 0 },
{ 0, -12, 55, 7, 89, -2, -2 },
{ 0, -9, 26, 7, 99, 20, -8 },
{ -4, 2, 93, 7, 49, -12, 0 },
{ 0, -10, 71, 7, 76, -9, 0 },
{ 0, -11, 42, 7, 95, 7, -5 },
{ -7, 14, 99, 7, 32, -10, 0 },
{ -1, -5, 84, 7, 62, -12, 0 },
{ 0, -12, 59, 7, 87, -4, -2 },
{ 0, -10, 29, 7, 99, 17, -7 },
{ -4, 4, 95, 7, 45, -12, 0 },
{ 0, -9, 72, 7, 74, -9, 0 },
{ 0, -12, 45, 7, 95, 4, -4 },
{ -7, 17, 99, 7, 29, -10, 0 },
{ -2, -4, 87, 7, 59, -12, 0 },
{ 0, -12, 62, 7, 84, -5, -1 },
{ 0, -10, 32, 7, 99, 14, -7 },
{ -5, 7, 95, 7, 42, -11, 0 },
{ 0, -9, 76, 7, 71, -10, 0 },
{ 0, -12, 49, 7, 93, 2, -4 },
{ -8, 20, 99, 7, 26, -9, 0 },
{ -2, -2, 89, 7, 55, -12, 0 },
{ 0, -11, 65, 7, 81, -6, -1 },
{ 0, -11, 35, 7, 98, 12, -6 },
{ -5, 9, 96, 7, 39, -11, 0 },
{ -1, -8, 80, 7, 68, -11, 0 },
{ 0, -12, 52, 7, 91, 0, -3 } },
.odd = { { -6, 10, 98, 7, 37, -11, 0 },
{ -1, -7, 81, 7, 66, -11, 0 },
{ 0, -12, 54, 7, 90, -1, -3 },
{ 0, -9, 24, 7, 100, 21, -8 },
{ -3, 1, 92, 7, 50, -12, 0 },
{ 0, -10, 69, 7, 78, -8, -1 },
{ 0, -11, 40, 7, 96, 8, -5 },
{ -6, 13, 97, 7, 34, -10, 0 },
{ -1, -6, 83, 7, 63, -11, 0 },
{ 0, -12, 57, 7, 88, -3, -2 },
{ 0, -9, 27, 7, 100, 18, -8 },
{ -4, 3, 94, 7, 47, -12, 0 },
{ 0, -10, 72, 7, 75, -9, 0 },
{ 0, -11, 44, 7, 95, 5, -5 },
{ -7, 16, 98, 7, 31, -10, 0 },
{ -2, -4, 86, 7, 60, -12, 0 },
{ 0, -12, 60, 7, 86, -4, -2 },
{ 0, -10, 31, 7, 98, 16, -7 },
{ -5, 5, 95, 7, 44, -11, 0 },
{ 0, -9, 75, 7, 72, -10, 0 },
{ 0, -12, 47, 7, 94, 3, -4 },
{ -8, 18, 100, 7, 27, -9, 0 },
{ -2, -3, 88, 7, 57, -12, 0 },
{ 0, -11, 63, 7, 83, -6, -1 },
{ 0, -10, 34, 7, 97, 13, -6 },
{ -5, 8, 96, 7, 40, -11, 0 },
{ -1, -8, 78, 7, 69, -10, 0 },
{ 0, -12, 50, 7, 92, 1, -3 },
{ -8, 21, 100, 7, 24, -9, 0 },
{ -3, -1, 90, 7, 54, -12, 0 },
{ 0, -11, 66, 7, 81, -7, -1 },
{ 0, -11, 37, 7, 98, 10, -6 } } },
.ptrn_arr = { { 0xf3f9fcff, 0x3f9fcfe7, 0xfe7f } },
.sample_patrn_length = 82,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 10) = 0.761905 */
.hor_phase_arr = {
.even = { { -9, 25, 96, 7, 25, -9, 0 },
{ -3, 0, 86, 7, 56, -11, 0 },
{ 0, -11, 62, 7, 82, -3, -2 },
{ 0, -10, 31, 7, 96, 19, -8 },
{ -5, 4, 92, 7, 49, -12, 0 },
{ 0, -10, 67, 7, 78, -6, -1 },
{ 0, -11, 37, 7, 95, 14, -7 },
{ -6, 9, 93, 7, 43, -11, 0 },
{ -1, -8, 73, 7, 73, -8, -1 },
{ 0, -11, 43, 7, 93, 9, -6 },
{ -7, 14, 95, 7, 37, -11, 0 },
{ -1, -6, 78, 7, 67, -10, 0 },
{ 0, -12, 49, 7, 92, 4, -5 },
{ -8, 19, 96, 7, 31, -10, 0 },
{ -2, -3, 82, 7, 62, -11, 0 },
{ 0, -11, 56, 7, 86, 0, -3 } },
.odd = { { -6, 11, 94, 7, 40, -11, 0 },
{ -1, -7, 75, 7, 70, -9, 0 },
{ 0, -12, 46, 7, 93, 6, -5 },
{ -8, 16, 97, 7, 34, -11, 0 },
{ -2, -5, 81, 7, 64, -10, 0 },
{ 0, -12, 53, 7, 89, 2, -4 },
{ -9, 22, 97, 7, 28, -10, 0 },
{ -3, -2, 85, 7, 59, -11, 0 },
{ 0, -11, 59, 7, 85, -2, -3 },
{ 0, -10, 28, 7, 97, 22, -9 },
{ -4, 2, 89, 7, 53, -12, 0 },
{ 0, -10, 64, 7, 81, -5, -2 },
{ 0, -11, 34, 7, 97, 16, -8 },
{ -5, 6, 93, 7, 46, -12, 0 },
{ 0, -9, 70, 7, 75, -7, -1 },
{ 0, -11, 40, 7, 94, 11, -6 } } },
.ver_phase_arr = {
.even = { { -9, 25, 96, 7, 25, -9, 0 },
{ -3, 0, 86, 7, 56, -11, 0 },
{ 0, -11, 62, 7, 82, -3, -2 },
{ 0, -10, 31, 7, 96, 19, -8 },
{ -5, 4, 92, 7, 49, -12, 0 },
{ 0, -10, 67, 7, 78, -6, -1 },
{ 0, -11, 37, 7, 95, 14, -7 },
{ -6, 9, 93, 7, 43, -11, 0 },
{ -1, -8, 73, 7, 73, -8, -1 },
{ 0, -11, 43, 7, 93, 9, -6 },
{ -7, 14, 95, 7, 37, -11, 0 },
{ -1, -6, 78, 7, 67, -10, 0 },
{ 0, -12, 49, 7, 92, 4, -5 },
{ -8, 19, 96, 7, 31, -10, 0 },
{ -2, -3, 82, 7, 62, -11, 0 },
{ 0, -11, 56, 7, 86, 0, -3 } },
.odd = { { -6, 11, 94, 7, 40, -11, 0 },
{ -1, -7, 75, 7, 70, -9, 0 },
{ 0, -12, 46, 7, 93, 6, -5 },
{ -8, 16, 97, 7, 34, -11, 0 },
{ -2, -5, 81, 7, 64, -10, 0 },
{ 0, -12, 53, 7, 89, 2, -4 },
{ -9, 22, 97, 7, 28, -10, 0 },
{ -3, -2, 85, 7, 59, -11, 0 },
{ 0, -11, 59, 7, 85, -2, -3 },
{ 0, -10, 28, 7, 97, 22, -9 },
{ -4, 2, 89, 7, 53, -12, 0 },
{ 0, -10, 64, 7, 81, -5, -2 },
{ 0, -11, 34, 7, 97, 16, -8 },
{ -5, 6, 93, 7, 46, -12, 0 },
{ 0, -9, 70, 7, 75, -7, -1 },
{ 0, -11, 40, 7, 94, 11, -6 } } },
.ptrn_arr = { { 0xfcfe7e7f, 0xfc } },
.sample_patrn_length = 42,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 11) = 0.744186 */
.hor_phase_arr = {
.even = { { -10, 26, 96, 7, 26, -10, 0 },
{ -4, 0, 83, 7, 59, -10, 0 },
{ 0, -11, 56, 7, 85, 2, -4 },
{ -9, 23, 95, 7, 29, -10, 0 },
{ -3, -2, 82, 7, 61, -10, 0 },
{ 0, -11, 53, 7, 87, 4, -5 },
{ -9, 21, 94, 7, 32, -10, 0 },
{ -3, -3, 79, 7, 64, -9, 0 },
{ 0, -11, 50, 7, 88, 6, -5 },
{ -8, 18, 94, 7, 35, -11, 0 },
{ -2, -5, 78, 7, 67, -9, -1 },
{ 0, -11, 47, 7, 90, 8, -6 },
{ -8, 15, 94, 7, 38, -11, 0 },
{ -2, -6, 75, 7, 70, -8, -1 },
{ 0, -11, 44, 7, 92, 10, -7 },
{ -7, 13, 92, 7, 41, -11, 0 },
{ -1, -7, 72, 7, 72, -7, -1 },
{ 0, -11, 41, 7, 92, 13, -7 },
{ -7, 10, 92, 7, 44, -11, 0 },
{ -1, -8, 70, 7, 75, -6, -2 },
{ 0, -11, 38, 7, 94, 15, -8 },
{ -6, 8, 90, 7, 47, -11, 0 },
{ -1, -9, 67, 7, 78, -5, -2 },
{ 0, -11, 35, 7, 94, 18, -8 },
{ -5, 6, 88, 7, 50, -11, 0 },
{ 0, -9, 64, 7, 79, -3, -3 },
{ 0, -10, 32, 7, 94, 21, -9 },
{ -5, 4, 87, 7, 53, -11, 0 },
{ 0, -10, 61, 7, 82, -2, -3 },
{ 0, -10, 29, 7, 95, 23, -9 },
{ -4, 2, 85, 7, 56, -11, 0 },
{ 0, -10, 59, 7, 83, 0, -4 } },
.odd = { { -7, 12, 92, 7, 42, -11, 0 },
{ -1, -7, 71, 7, 72, -6, -1 },
{ 0, -11, 39, 7, 93, 14, -7 },
{ -6, 9, 91, 7, 45, -11, 0 },
{ -1, -8, 68, 7, 76, -5, -2 },
{ 0, -11, 36, 7, 94, 17, -8 },
{ -6, 7, 90, 7, 48, -11, 0 },
{ 0, -9, 66, 7, 77, -4, -2 },
{ 0, -11, 33, 7, 96, 19, -9 },
{ -5, 5, 88, 7, 51, -11, 0 },
{ 0, -10, 63, 7, 80, -2, -3 },
{ 0, -10, 31, 7, 94, 22, -9 },
{ -5, 3, 87, 7, 54, -11, 0 },
{ 0, -10, 60, 7, 82, -1, -3 },
{ 0, -10, 28, 7, 94, 25, -9 },
{ -4, 1, 85, 7, 57, -11, 0 },
{ 0, -11, 57, 7, 85, 1, -4 },
{ -9, 25, 94, 7, 28, -10, 0 },
{ -3, -1, 82, 7, 60, -10, 0 },
{ 0, -11, 54, 7, 87, 3, -5 },
{ -9, 22, 94, 7, 31, -10, 0 },
{ -3, -2, 80, 7, 63, -10, 0 },
{ 0, -11, 51, 7, 88, 5, -5 },
{ -9, 19, 96, 7, 33, -11, 0 },
{ -2, -4, 77, 7, 66, -9, 0 },
{ 0, -11, 48, 7, 90, 7, -6 },
{ -8, 17, 94, 7, 36, -11, 0 },
{ -2, -5, 76, 7, 68, -8, -1 },
{ 0, -11, 45, 7, 91, 9, -6 },
{ -7, 14, 93, 7, 39, -11, 0 },
{ -1, -6, 72, 7, 71, -7, -1 },
{ 0, -11, 42, 7, 92, 12, -7 } } },
.ver_phase_arr = {
.even = { { -10, 26, 96, 7, 26, -10, 0 },
{ -4, 0, 83, 7, 59, -10, 0 },
{ 0, -11, 56, 7, 85, 2, -4 },
{ -9, 23, 95, 7, 29, -10, 0 },
{ -3, -2, 82, 7, 61, -10, 0 },
{ 0, -11, 53, 7, 87, 4, -5 },
{ -9, 21, 94, 7, 32, -10, 0 },
{ -3, -3, 79, 7, 64, -9, 0 },
{ 0, -11, 50, 7, 88, 6, -5 },
{ -8, 18, 94, 7, 35, -11, 0 },
{ -2, -5, 78, 7, 67, -9, -1 },
{ 0, -11, 47, 7, 90, 8, -6 },
{ -8, 15, 94, 7, 38, -11, 0 },
{ -2, -6, 75, 7, 70, -8, -1 },
{ 0, -11, 44, 7, 92, 10, -7 },
{ -7, 13, 92, 7, 41, -11, 0 },
{ -1, -7, 72, 7, 72, -7, -1 },
{ 0, -11, 41, 7, 92, 13, -7 },
{ -7, 10, 92, 7, 44, -11, 0 },
{ -1, -8, 70, 7, 75, -6, -2 },
{ 0, -11, 38, 7, 94, 15, -8 },
{ -6, 8, 90, 7, 47, -11, 0 },
{ -1, -9, 67, 7, 78, -5, -2 },
{ 0, -11, 35, 7, 94, 18, -8 },
{ -5, 6, 88, 7, 50, -11, 0 },
{ 0, -9, 64, 7, 79, -3, -3 },
{ 0, -10, 32, 7, 94, 21, -9 },
{ -5, 4, 87, 7, 53, -11, 0 },
{ 0, -10, 61, 7, 82, -2, -3 },
{ 0, -10, 29, 7, 95, 23, -9 },
{ -4, 2, 85, 7, 56, -11, 0 },
{ 0, -10, 59, 7, 83, 0, -4 } },
.odd = { { -7, 12, 92, 7, 42, -11, 0 },
{ -1, -7, 71, 7, 72, -6, -1 },
{ 0, -11, 39, 7, 93, 14, -7 },
{ -6, 9, 91, 7, 45, -11, 0 },
{ -1, -8, 68, 7, 76, -5, -2 },
{ 0, -11, 36, 7, 94, 17, -8 },
{ -6, 7, 90, 7, 48, -11, 0 },
{ 0, -9, 66, 7, 77, -4, -2 },
{ 0, -11, 33, 7, 96, 19, -9 },
{ -5, 5, 88, 7, 51, -11, 0 },
{ 0, -10, 63, 7, 80, -2, -3 },
{ 0, -10, 31, 7, 94, 22, -9 },
{ -5, 3, 87, 7, 54, -11, 0 },
{ 0, -10, 60, 7, 82, -1, -3 },
{ 0, -10, 28, 7, 94, 25, -9 },
{ -4, 1, 85, 7, 57, -11, 0 },
{ 0, -11, 57, 7, 85, 1, -4 },
{ -9, 25, 94, 7, 28, -10, 0 },
{ -3, -1, 82, 7, 60, -10, 0 },
{ 0, -11, 54, 7, 87, 3, -5 },
{ -9, 22, 94, 7, 31, -10, 0 },
{ -3, -2, 80, 7, 63, -10, 0 },
{ 0, -11, 51, 7, 88, 5, -5 },
{ -9, 19, 96, 7, 33, -11, 0 },
{ -2, -4, 77, 7, 66, -9, 0 },
{ 0, -11, 48, 7, 90, 7, -6 },
{ -8, 17, 94, 7, 36, -11, 0 },
{ -2, -5, 76, 7, 68, -8, -1 },
{ 0, -11, 45, 7, 91, 9, -6 },
{ -7, 14, 93, 7, 39, -11, 0 },
{ -1, -6, 72, 7, 71, -7, -1 },
{ 0, -11, 42, 7, 92, 12, -7 } } },
.ptrn_arr = { { 0x3f3f3f3f, 0x9f9f9f3f, 0xf9f9f } },
.sample_patrn_length = 86,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 12) = 0.727273 */
.hor_phase_arr = {
.even = { { -10, 28, 92, 7, 28, -10, 0 },
{ -4, 0, 81, 7, 61, -9, -1 },
{ 0, -11, 50, 7, 87, 8, -6 },
{ -8, 17, 91, 7, 39, -11, 0 },
{ -2, -6, 72, 7, 72, -6, -2 },
{ 0, -11, 39, 7, 91, 17, -8 },
{ -6, 8, 87, 7, 50, -11, 0 },
{ -1, -9, 61, 7, 81, 0, -4 } },
.odd = { { -7, 12, 89, 7, 45, -11, 0 },
{ -1, -8, 67, 7, 76, -3, -3 },
{ 0, -11, 33, 7, 93, 22, -9 },
{ -5, 4, 83, 7, 56, -10, 0 },
{ 0, -10, 56, 7, 83, 4, -5 },
{ -9, 22, 93, 7, 33, -11, 0 },
{ -3, -3, 76, 7, 67, -8, -1 },
{ 0, -11, 45, 7, 89, 12, -7 } } },
.ver_phase_arr = {
.even = { { -10, 28, 92, 7, 28, -10, 0 },
{ -4, 0, 81, 7, 61, -9, -1 },
{ 0, -11, 50, 7, 87, 8, -6 },
{ -8, 17, 91, 7, 39, -11, 0 },
{ -2, -6, 72, 7, 72, -6, -2 },
{ 0, -11, 39, 7, 91, 17, -8 },
{ -6, 8, 87, 7, 50, -11, 0 },
{ -1, -9, 61, 7, 81, 0, -4 } },
.odd = { { -7, 12, 89, 7, 45, -11, 0 },
{ -1, -8, 67, 7, 76, -3, -3 },
{ 0, -11, 33, 7, 93, 22, -9 },
{ -5, 4, 83, 7, 56, -10, 0 },
{ 0, -10, 56, 7, 83, 4, -5 },
{ -9, 22, 93, 7, 33, -11, 0 },
{ -3, -3, 76, 7, 67, -8, -1 },
{ 0, -11, 45, 7, 89, 12, -7 } } },
.ptrn_arr = { { 0xf9f3f } },
.sample_patrn_length = 22,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 13) = 0.711111 */
.hor_phase_arr = {
.even = { { -10, 29, 90, 7, 29, -10, 0 },
{ -4, 0, 76, 7, 64, -7, -1 },
{ 0, -11, 45, 7, 88, 14, -8 },
{ -7, 12, 85, 7, 48, -10, 0 },
{ -1, -8, 61, 7, 79, 2, -5 },
{ -10, 26, 90, 7, 32, -10, 0 },
{ -4, -2, 76, 7, 66, -6, -2 },
{ 0, -11, 42, 7, 89, 16, -8 },
{ -7, 10, 84, 7, 51, -10, 0 },
{ -1, -9, 59, 7, 81, 3, -5 },
{ -10, 24, 91, 7, 34, -11, 0 },
{ -3, -3, 72, 7, 69, -5, -2 },
{ 0, -11, 40, 7, 89, 19, -9 },
{ -6, 7, 84, 7, 53, -10, 0 },
{ -1, -9, 56, 7, 83, 5, -6 },
{ -9, 21, 90, 7, 37, -11, 0 },
{ -3, -4, 71, 7, 71, -4, -3 },
{ 0, -11, 37, 7, 90, 21, -9 },
{ -6, 5, 83, 7, 56, -9, -1 },
{ 0, -10, 53, 7, 84, 7, -6 },
{ -9, 19, 89, 7, 40, -11, 0 },
{ -2, -5, 69, 7, 72, -3, -3 },
{ 0, -11, 34, 7, 91, 24, -10 },
{ -5, 3, 81, 7, 59, -9, -1 },
{ 0, -10, 51, 7, 84, 10, -7 },
{ -8, 16, 89, 7, 42, -11, 0 },
{ -2, -6, 66, 7, 76, -2, -4 },
{ 0, -10, 32, 7, 90, 26, -10 },
{ -5, 2, 79, 7, 61, -8, -1 },
{ 0, -10, 48, 7, 85, 12, -7 },
{ -8, 14, 88, 7, 45, -11, 0 },
{ -1, -7, 64, 7, 76, 0, -4 } },
.odd = { { -8, 13, 88, 7, 46, -11, 0 },
{ -1, -8, 63, 7, 78, 1, -5 },
{ -10, 28, 90, 7, 30, -10, 0 },
{ -4, -1, 77, 7, 65, -7, -2 },
{ 0, -11, 44, 7, 88, 15, -8 },
{ -7, 11, 85, 7, 49, -10, 0 },
{ -1, -8, 60, 7, 79, 3, -5 },
{ -10, 25, 91, 7, 33, -11, 0 },
{ -4, -2, 74, 7, 68, -6, -2 },
{ 0, -11, 41, 7, 89, 18, -9 },
{ -7, 8, 85, 7, 52, -10, 0 },
{ -1, -9, 57, 7, 83, 4, -6 },
{ -9, 22, 90, 7, 36, -11, 0 },
{ -3, -4, 73, 7, 70, -5, -3 },
{ 0, -11, 38, 7, 90, 20, -9 },
{ -6, 6, 83, 7, 55, -10, 0 },
{ 0, -10, 55, 7, 83, 6, -6 },
{ -9, 20, 90, 7, 38, -11, 0 },
{ -3, -5, 70, 7, 73, -4, -3 },
{ 0, -11, 36, 7, 90, 22, -9 },
{ -6, 4, 83, 7, 57, -9, -1 },
{ 0, -10, 52, 7, 85, 8, -7 },
{ -9, 18, 89, 7, 41, -11, 0 },
{ -2, -6, 68, 7, 74, -2, -4 },
{ 0, -11, 33, 7, 91, 25, -10 },
{ -5, 3, 79, 7, 60, -8, -1 },
{ 0, -10, 49, 7, 85, 11, -7 },
{ -8, 15, 88, 7, 44, -11, 0 },
{ -2, -7, 65, 7, 77, -1, -4 },
{ 0, -10, 30, 7, 90, 28, -10 },
{ -5, 1, 78, 7, 63, -8, -1 },
{ 0, -11, 46, 7, 88, 13, -8 } } },
.ver_phase_arr = {
.even = { { -10, 29, 90, 7, 29, -10, 0 },
{ -4, 0, 76, 7, 64, -7, -1 },
{ 0, -11, 45, 7, 88, 14, -8 },
{ -7, 12, 85, 7, 48, -10, 0 },
{ -1, -8, 61, 7, 79, 2, -5 },
{ -10, 26, 90, 7, 32, -10, 0 },
{ -4, -2, 76, 7, 66, -6, -2 },
{ 0, -11, 42, 7, 89, 16, -8 },
{ -7, 10, 84, 7, 51, -10, 0 },
{ -1, -9, 59, 7, 81, 3, -5 },
{ -10, 24, 91, 7, 34, -11, 0 },
{ -3, -3, 72, 7, 69, -5, -2 },
{ 0, -11, 40, 7, 89, 19, -9 },
{ -6, 7, 84, 7, 53, -10, 0 },
{ -1, -9, 56, 7, 83, 5, -6 },
{ -9, 21, 90, 7, 37, -11, 0 },
{ -3, -4, 71, 7, 71, -4, -3 },
{ 0, -11, 37, 7, 90, 21, -9 },
{ -6, 5, 83, 7, 56, -9, -1 },
{ 0, -10, 53, 7, 84, 7, -6 },
{ -9, 19, 89, 7, 40, -11, 0 },
{ -2, -5, 69, 7, 72, -3, -3 },
{ 0, -11, 34, 7, 91, 24, -10 },
{ -5, 3, 81, 7, 59, -9, -1 },
{ 0, -10, 51, 7, 84, 10, -7 },
{ -8, 16, 89, 7, 42, -11, 0 },
{ -2, -6, 66, 7, 76, -2, -4 },
{ 0, -10, 32, 7, 90, 26, -10 },
{ -5, 2, 79, 7, 61, -8, -1 },
{ 0, -10, 48, 7, 85, 12, -7 },
{ -8, 14, 88, 7, 45, -11, 0 },
{ -1, -7, 64, 7, 76, 0, -4 } },
.odd = { { -8, 13, 88, 7, 46, -11, 0 },
{ -1, -8, 63, 7, 78, 1, -5 },
{ -10, 28, 90, 7, 30, -10, 0 },
{ -4, -1, 77, 7, 65, -7, -2 },
{ 0, -11, 44, 7, 88, 15, -8 },
{ -7, 11, 85, 7, 49, -10, 0 },
{ -1, -8, 60, 7, 79, 3, -5 },
{ -10, 25, 91, 7, 33, -11, 0 },
{ -4, -2, 74, 7, 68, -6, -2 },
{ 0, -11, 41, 7, 89, 18, -9 },
{ -7, 8, 85, 7, 52, -10, 0 },
{ -1, -9, 57, 7, 83, 4, -6 },
{ -9, 22, 90, 7, 36, -11, 0 },
{ -3, -4, 73, 7, 70, -5, -3 },
{ 0, -11, 38, 7, 90, 20, -9 },
{ -6, 6, 83, 7, 55, -10, 0 },
{ 0, -10, 55, 7, 83, 6, -6 },
{ -9, 20, 90, 7, 38, -11, 0 },
{ -3, -5, 70, 7, 73, -4, -3 },
{ 0, -11, 36, 7, 90, 22, -9 },
{ -6, 4, 83, 7, 57, -9, -1 },
{ 0, -10, 52, 7, 85, 8, -7 },
{ -9, 18, 89, 7, 41, -11, 0 },
{ -2, -6, 68, 7, 74, -2, -4 },
{ 0, -11, 33, 7, 91, 25, -10 },
{ -5, 3, 79, 7, 60, -8, -1 },
{ 0, -10, 49, 7, 85, 11, -7 },
{ -8, 15, 88, 7, 44, -11, 0 },
{ -2, -7, 65, 7, 77, -1, -4 },
{ 0, -10, 30, 7, 90, 28, -10 },
{ -5, 1, 78, 7, 63, -8, -1 },
{ 0, -11, 46, 7, 88, 13, -8 } } },
.ptrn_arr = { { 0xf3e7cf9f, 0x9f3e7cf9, 0xf3e7cf } },
.sample_patrn_length = 90,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 14) = 0.695652 */
.hor_phase_arr = {
.even = { { -10, 30, 88, 7, 30, -10, 0 },
{ -5, 0, 75, 7, 66, -5, -3 },
{ 0, -10, 40, 7, 87, 20, -9 },
{ -7, 7, 81, 7, 56, -8, -1 },
{ 0, -9, 51, 7, 83, 11, -8 },
{ -8, 16, 84, 7, 46, -10, 0 },
{ -2, -7, 61, 7, 79, 3, -6 },
{ -10, 25, 88, 7, 35, -10, 0 },
{ -4, -3, 72, 7, 70, -3, -4 },
{ 0, -10, 35, 7, 88, 25, -10 },
{ -6, 3, 79, 7, 61, -7, -2 },
{ 0, -10, 46, 7, 84, 16, -8 },
{ -8, 11, 83, 7, 51, -9, 0 },
{ -1, -8, 56, 7, 81, 7, -7 },
{ -9, 20, 87, 7, 40, -10, 0 },
{ -3, -5, 66, 7, 75, 0, -5 } },
.odd = { { -8, 13, 85, 7, 48, -10, 0 },
{ -1, -8, 59, 7, 79, 5, -6 },
{ -10, 23, 87, 7, 38, -10, 0 },
{ -3, -4, 68, 7, 72, -1, -4 },
{ 0, -10, 33, 7, 87, 28, -10 },
{ -5, 2, 75, 7, 64, -6, -2 },
{ 0, -10, 43, 7, 86, 18, -9 },
{ -7, 9, 83, 7, 53, -9, -1 },
{ -1, -9, 53, 7, 83, 9, -7 },
{ -9, 18, 86, 7, 43, -10, 0 },
{ -2, -6, 64, 7, 75, 2, -5 },
{ -10, 28, 87, 7, 33, -10, 0 },
{ -4, -1, 72, 7, 68, -4, -3 },
{ 0, -10, 38, 7, 87, 23, -10 },
{ -6, 5, 79, 7, 59, -8, -1 },
{ 0, -10, 48, 7, 85, 13, -8 } } },
.ver_phase_arr = {
.even = { { -10, 30, 88, 7, 30, -10, 0 },
{ -5, 0, 75, 7, 66, -5, -3 },
{ 0, -10, 40, 7, 87, 20, -9 },
{ -7, 7, 81, 7, 56, -8, -1 },
{ 0, -9, 51, 7, 83, 11, -8 },
{ -8, 16, 84, 7, 46, -10, 0 },
{ -2, -7, 61, 7, 79, 3, -6 },
{ -10, 25, 88, 7, 35, -10, 0 },
{ -4, -3, 72, 7, 70, -3, -4 },
{ 0, -10, 35, 7, 88, 25, -10 },
{ -6, 3, 79, 7, 61, -7, -2 },
{ 0, -10, 46, 7, 84, 16, -8 },
{ -8, 11, 83, 7, 51, -9, 0 },
{ -1, -8, 56, 7, 81, 7, -7 },
{ -9, 20, 87, 7, 40, -10, 0 },
{ -3, -5, 66, 7, 75, 0, -5 } },
.odd = { { -8, 13, 85, 7, 48, -10, 0 },
{ -1, -8, 59, 7, 79, 5, -6 },
{ -10, 23, 87, 7, 38, -10, 0 },
{ -3, -4, 68, 7, 72, -1, -4 },
{ 0, -10, 33, 7, 87, 28, -10 },
{ -5, 2, 75, 7, 64, -6, -2 },
{ 0, -10, 43, 7, 86, 18, -9 },
{ -7, 9, 83, 7, 53, -9, -1 },
{ -1, -9, 53, 7, 83, 9, -7 },
{ -9, 18, 86, 7, 43, -10, 0 },
{ -2, -6, 64, 7, 75, 2, -5 },
{ -10, 28, 87, 7, 33, -10, 0 },
{ -4, -1, 72, 7, 68, -4, -3 },
{ 0, -10, 38, 7, 87, 23, -10 },
{ -6, 5, 79, 7, 59, -8, -1 },
{ 0, -10, 48, 7, 85, 13, -8 } } },
.ptrn_arr = { { 0x79f3cf9f, 0xf3e } },
.sample_patrn_length = 46,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 15) = 0.680851 */
.hor_phase_arr = {
.even = { { -10, 31, 86, 7, 31, -10, 0 },
{ -5, 0, 72, 7, 68, -3, -4 },
{ 0, -10, 36, 7, 86, 26, -10 },
{ -6, 3, 76, 7, 63, -5, -3 },
{ 0, -10, 41, 7, 85, 21, -9 },
{ -7, 7, 78, 7, 59, -7, -2 },
{ 0, -10, 46, 7, 84, 17, -9 },
{ -8, 11, 80, 7, 54, -8, -1 },
{ -1, -9, 51, 7, 82, 13, -8 },
{ -9, 15, 83, 7, 49, -9, -1 },
{ -2, -8, 56, 7, 80, 9, -7 },
{ -9, 19, 85, 7, 43, -10, 0 },
{ -3, -6, 61, 7, 77, 5, -6 },
{ -10, 24, 86, 7, 38, -10, 0 },
{ -3, -4, 66, 7, 72, 2, -5 },
{ -10, 29, 86, 7, 33, -10, 0 },
{ -4, -1, 68, 7, 70, -1, -4 },
{ 0, -10, 33, 7, 86, 29, -10 },
{ -5, 2, 72, 7, 66, -4, -3 },
{ 0, -10, 38, 7, 86, 24, -10 },
{ -6, 5, 77, 7, 61, -6, -3 },
{ 0, -10, 43, 7, 85, 19, -9 },
{ -7, 9, 80, 7, 56, -8, -2 },
{ -1, -9, 49, 7, 83, 15, -9 },
{ -8, 13, 82, 7, 51, -9, -1 },
{ -1, -8, 54, 7, 80, 11, -8 },
{ -9, 17, 84, 7, 46, -10, 0 },
{ -2, -7, 59, 7, 78, 7, -7 },
{ -9, 21, 85, 7, 41, -10, 0 },
{ -3, -5, 63, 7, 76, 3, -6 },
{ -10, 26, 86, 7, 36, -10, 0 },
{ -4, -3, 68, 7, 72, 0, -5 } },
.odd = { { -8, 14, 82, 7, 50, -9, -1 },
{ -1, -8, 55, 7, 79, 10, -7 },
{ -9, 18, 84, 7, 45, -10, 0 },
{ -2, -6, 60, 7, 77, 6, -7 },
{ -10, 23, 85, 7, 40, -10, 0 },
{ -3, -4, 64, 7, 75, 2, -6 },
{ -10, 27, 86, 7, 35, -10, 0 },
{ -4, -2, 69, 7, 71, -1, -5 },
{ 0, -10, 32, 7, 86, 30, -10 },
{ -5, 1, 72, 7, 67, -3, -4 },
{ 0, -10, 37, 7, 86, 25, -10 },
{ -6, 4, 77, 7, 62, -6, -3 },
{ 0, -10, 42, 7, 85, 20, -9 },
{ -7, 8, 79, 7, 57, -7, -2 },
{ -1, -9, 47, 7, 84, 16, -9 },
{ -8, 12, 81, 7, 52, -8, -1 },
{ -1, -8, 52, 7, 81, 12, -8 },
{ -9, 16, 84, 7, 47, -9, -1 },
{ -2, -7, 57, 7, 79, 8, -7 },
{ -9, 20, 85, 7, 42, -10, 0 },
{ -3, -6, 62, 7, 77, 4, -6 },
{ -10, 25, 86, 7, 37, -10, 0 },
{ -4, -3, 67, 7, 72, 1, -5 },
{ -10, 30, 86, 7, 32, -10, 0 },
{ -5, -1, 71, 7, 69, -2, -4 },
{ 0, -10, 35, 7, 86, 27, -10 },
{ -6, 2, 75, 7, 64, -4, -3 },
{ 0, -10, 40, 7, 85, 23, -10 },
{ -7, 6, 77, 7, 60, -6, -2 },
{ 0, -10, 45, 7, 84, 18, -9 },
{ -7, 10, 79, 7, 55, -8, -1 },
{ -1, -9, 50, 7, 82, 14, -8 } } },
.ver_phase_arr = {
.even = { { -10, 31, 86, 7, 31, -10, 0 },
{ -5, 0, 72, 7, 68, -3, -4 },
{ 0, -10, 36, 7, 86, 26, -10 },
{ -6, 3, 76, 7, 63, -5, -3 },
{ 0, -10, 41, 7, 85, 21, -9 },
{ -7, 7, 78, 7, 59, -7, -2 },
{ 0, -10, 46, 7, 84, 17, -9 },
{ -8, 11, 80, 7, 54, -8, -1 },
{ -1, -9, 51, 7, 82, 13, -8 },
{ -9, 15, 83, 7, 49, -9, -1 },
{ -2, -8, 56, 7, 80, 9, -7 },
{ -9, 19, 85, 7, 43, -10, 0 },
{ -3, -6, 61, 7, 77, 5, -6 },
{ -10, 24, 86, 7, 38, -10, 0 },
{ -3, -4, 66, 7, 72, 2, -5 },
{ -10, 29, 86, 7, 33, -10, 0 },
{ -4, -1, 68, 7, 70, -1, -4 },
{ 0, -10, 33, 7, 86, 29, -10 },
{ -5, 2, 72, 7, 66, -4, -3 },
{ 0, -10, 38, 7, 86, 24, -10 },
{ -6, 5, 77, 7, 61, -6, -3 },
{ 0, -10, 43, 7, 85, 19, -9 },
{ -7, 9, 80, 7, 56, -8, -2 },
{ -1, -9, 49, 7, 83, 15, -9 },
{ -8, 13, 82, 7, 51, -9, -1 },
{ -1, -8, 54, 7, 80, 11, -8 },
{ -9, 17, 84, 7, 46, -10, 0 },
{ -2, -7, 59, 7, 78, 7, -7 },
{ -9, 21, 85, 7, 41, -10, 0 },
{ -3, -5, 63, 7, 76, 3, -6 },
{ -10, 26, 86, 7, 36, -10, 0 },
{ -4, -3, 68, 7, 72, 0, -5 } },
.odd = { { -8, 14, 82, 7, 50, -9, -1 },
{ -1, -8, 55, 7, 79, 10, -7 },
{ -9, 18, 84, 7, 45, -10, 0 },
{ -2, -6, 60, 7, 77, 6, -7 },
{ -10, 23, 85, 7, 40, -10, 0 },
{ -3, -4, 64, 7, 75, 2, -6 },
{ -10, 27, 86, 7, 35, -10, 0 },
{ -4, -2, 69, 7, 71, -1, -5 },
{ 0, -10, 32, 7, 86, 30, -10 },
{ -5, 1, 72, 7, 67, -3, -4 },
{ 0, -10, 37, 7, 86, 25, -10 },
{ -6, 4, 77, 7, 62, -6, -3 },
{ 0, -10, 42, 7, 85, 20, -9 },
{ -7, 8, 79, 7, 57, -7, -2 },
{ -1, -9, 47, 7, 84, 16, -9 },
{ -8, 12, 81, 7, 52, -8, -1 },
{ -1, -8, 52, 7, 81, 12, -8 },
{ -9, 16, 84, 7, 47, -9, -1 },
{ -2, -7, 57, 7, 79, 8, -7 },
{ -9, 20, 85, 7, 42, -10, 0 },
{ -3, -6, 62, 7, 77, 4, -6 },
{ -10, 25, 86, 7, 37, -10, 0 },
{ -4, -3, 67, 7, 72, 1, -5 },
{ -10, 30, 86, 7, 32, -10, 0 },
{ -5, -1, 71, 7, 69, -2, -4 },
{ 0, -10, 35, 7, 86, 27, -10 },
{ -6, 2, 75, 7, 64, -4, -3 },
{ 0, -10, 40, 7, 85, 23, -10 },
{ -7, 6, 77, 7, 60, -6, -2 },
{ 0, -10, 45, 7, 84, 18, -9 },
{ -7, 10, 79, 7, 55, -8, -1 },
{ -1, -9, 50, 7, 82, 14, -8 } } },
.ptrn_arr = { { 0x3cf9e79f, 0x9e79f3cf, 0xf3cf3e7 } },
.sample_patrn_length = 94,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 16) = 0.666667 */
.hor_phase_arr = {
.even = { { -10, 32, 84, 7, 32, -10, 0 },
{ -5, 0, 69, 7, 69, 0, -5 } },
.odd = { { -9, 14, 82, 7, 51, -8, -2 },
{ -2, -8, 51, 7, 82, 14, -9 } } },
.ver_phase_arr = {
.even = { { -10, 32, 84, 7, 32, -10, 0 },
{ -5, 0, 69, 7, 69, 0, -5 } },
.odd = { { -9, 14, 82, 7, 51, -8, -2 },
{ -2, -8, 51, 7, 82, 14, -9 } } },
.ptrn_arr = { { 0xf } },
.sample_patrn_length = 6,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 17) = 0.653061 */
.hor_phase_arr = {
.even = { { -10, 33, 82, 7, 33, -10, 0 },
{ -5, 0, 66, 7, 70, 3, -6 },
{ -10, 28, 82, 7, 37, -9, 0 },
{ -4, -3, 62, 7, 74, 6, -7 },
{ -10, 24, 82, 7, 42, -9, -1 },
{ -3, -5, 58, 7, 76, 10, -8 },
{ -9, 20, 79, 7, 47, -8, -1 },
{ -3, -6, 54, 7, 78, 14, -9 },
{ -9, 16, 79, 7, 51, -7, -2 },
{ -2, -8, 49, 7, 80, 18, -9 },
{ -8, 12, 77, 7, 56, -6, -3 },
{ -1, -9, 44, 7, 81, 22, -9 },
{ -7, 8, 75, 7, 60, -4, -4 },
{ -1, -9, 40, 7, 82, 26, -10 },
{ -7, 5, 71, 7, 65, -1, -5 },
{ 0, -10, 35, 7, 83, 30, -10 },
{ -6, 1, 70, 7, 68, 1, -6 },
{ -10, 30, 83, 7, 35, -10, 0 },
{ -5, -1, 65, 7, 71, 5, -7 },
{ -10, 26, 82, 7, 40, -9, -1 },
{ -4, -4, 60, 7, 75, 8, -7 },
{ -9, 22, 81, 7, 44, -9, -1 },
{ -3, -6, 56, 7, 77, 12, -8 },
{ -9, 18, 80, 7, 49, -8, -2 },
{ -2, -7, 51, 7, 79, 16, -9 },
{ -9, 14, 78, 7, 54, -6, -3 },
{ -1, -8, 47, 7, 79, 20, -9 },
{ -8, 10, 76, 7, 58, -5, -3 },
{ -1, -9, 42, 7, 82, 24, -10 },
{ -7, 6, 74, 7, 62, -3, -4 },
{ 0, -9, 37, 7, 82, 28, -10 },
{ -6, 3, 70, 7, 66, 0, -5 } },
.odd = { { -9, 15, 79, 7, 52, -7, -2 },
{ -2, -8, 48, 7, 80, 19, -9 },
{ -8, 11, 76, 7, 57, -5, -3 },
{ -1, -9, 43, 7, 82, 23, -10 },
{ -7, 7, 74, 7, 61, -3, -4 },
{ -1, -9, 38, 7, 83, 27, -10 },
{ -6, 4, 70, 7, 66, -1, -5 },
{ 0, -10, 34, 7, 83, 31, -10 },
{ -6, 1, 67, 7, 70, 2, -6 },
{ -10, 29, 83, 7, 36, -10, 0 },
{ -5, -2, 64, 7, 73, 5, -7 },
{ -10, 25, 82, 7, 41, -9, -1 },
{ -4, -4, 59, 7, 76, 9, -8 },
{ -9, 21, 80, 7, 45, -8, -1 },
{ -3, -6, 55, 7, 77, 13, -8 },
{ -9, 17, 79, 7, 50, -7, -2 },
{ -2, -7, 50, 7, 79, 17, -9 },
{ -8, 13, 77, 7, 55, -6, -3 },
{ -1, -8, 45, 7, 80, 21, -9 },
{ -8, 9, 76, 7, 59, -4, -4 },
{ -1, -9, 41, 7, 82, 25, -10 },
{ -7, 5, 73, 7, 64, -2, -5 },
{ 0, -10, 36, 7, 83, 29, -10 },
{ -6, 2, 70, 7, 67, 1, -6 },
{ -10, 31, 83, 7, 34, -10, 0 },
{ -5, -1, 66, 7, 70, 4, -6 },
{ -10, 27, 83, 7, 38, -9, -1 },
{ -4, -3, 61, 7, 74, 7, -7 },
{ -10, 23, 82, 7, 43, -9, -1 },
{ -3, -5, 57, 7, 76, 11, -8 },
{ -9, 19, 80, 7, 48, -8, -2 },
{ -2, -7, 52, 7, 79, 15, -9 } } },
.ver_phase_arr = {
.even = { { -10, 33, 82, 7, 33, -10, 0 },
{ -5, 0, 66, 7, 70, 3, -6 },
{ -10, 28, 82, 7, 37, -9, 0 },
{ -4, -3, 62, 7, 74, 6, -7 },
{ -10, 24, 82, 7, 42, -9, -1 },
{ -3, -5, 58, 7, 76, 10, -8 },
{ -9, 20, 79, 7, 47, -8, -1 },
{ -3, -6, 54, 7, 78, 14, -9 },
{ -9, 16, 79, 7, 51, -7, -2 },
{ -2, -8, 49, 7, 80, 18, -9 },
{ -8, 12, 77, 7, 56, -6, -3 },
{ -1, -9, 44, 7, 81, 22, -9 },
{ -7, 8, 75, 7, 60, -4, -4 },
{ -1, -9, 40, 7, 82, 26, -10 },
{ -7, 5, 71, 7, 65, -1, -5 },
{ 0, -10, 35, 7, 83, 30, -10 },
{ -6, 1, 70, 7, 68, 1, -6 },
{ -10, 30, 83, 7, 35, -10, 0 },
{ -5, -1, 65, 7, 71, 5, -7 },
{ -10, 26, 82, 7, 40, -9, -1 },
{ -4, -4, 60, 7, 75, 8, -7 },
{ -9, 22, 81, 7, 44, -9, -1 },
{ -3, -6, 56, 7, 77, 12, -8 },
{ -9, 18, 80, 7, 49, -8, -2 },
{ -2, -7, 51, 7, 79, 16, -9 },
{ -9, 14, 78, 7, 54, -6, -3 },
{ -1, -8, 47, 7, 79, 20, -9 },
{ -8, 10, 76, 7, 58, -5, -3 },
{ -1, -9, 42, 7, 82, 24, -10 },
{ -7, 6, 74, 7, 62, -3, -4 },
{ 0, -9, 37, 7, 82, 28, -10 },
{ -6, 3, 70, 7, 66, 0, -5 } },
.odd = { { -9, 15, 79, 7, 52, -7, -2 },
{ -2, -8, 48, 7, 80, 19, -9 },
{ -8, 11, 76, 7, 57, -5, -3 },
{ -1, -9, 43, 7, 82, 23, -10 },
{ -7, 7, 74, 7, 61, -3, -4 },
{ -1, -9, 38, 7, 83, 27, -10 },
{ -6, 4, 70, 7, 66, -1, -5 },
{ 0, -10, 34, 7, 83, 31, -10 },
{ -6, 1, 67, 7, 70, 2, -6 },
{ -10, 29, 83, 7, 36, -10, 0 },
{ -5, -2, 64, 7, 73, 5, -7 },
{ -10, 25, 82, 7, 41, -9, -1 },
{ -4, -4, 59, 7, 76, 9, -8 },
{ -9, 21, 80, 7, 45, -8, -1 },
{ -3, -6, 55, 7, 77, 13, -8 },
{ -9, 17, 79, 7, 50, -7, -2 },
{ -2, -7, 50, 7, 79, 17, -9 },
{ -8, 13, 77, 7, 55, -6, -3 },
{ -1, -8, 45, 7, 80, 21, -9 },
{ -8, 9, 76, 7, 59, -4, -4 },
{ -1, -9, 41, 7, 82, 25, -10 },
{ -7, 5, 73, 7, 64, -2, -5 },
{ 0, -10, 36, 7, 83, 29, -10 },
{ -6, 2, 70, 7, 67, 1, -6 },
{ -10, 31, 83, 7, 34, -10, 0 },
{ -5, -1, 66, 7, 70, 4, -6 },
{ -10, 27, 83, 7, 38, -9, -1 },
{ -4, -3, 61, 7, 74, 7, -7 },
{ -10, 23, 82, 7, 43, -9, -1 },
{ -3, -5, 57, 7, 76, 11, -8 },
{ -9, 19, 80, 7, 48, -8, -2 },
{ -2, -7, 52, 7, 79, 15, -9 } } },
.ptrn_arr = { { 0xe73cf3cf, 0x3cf39e79, 0xe79e79cf } },
.sample_patrn_length = 98,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 18) = 0.64 */
.hor_phase_arr = {
.even = { { -9, 33, 80, 7, 33, -9, 0 },
{ -6, 0, 64, 7, 71, 6, -7 },
{ -10, 25, 80, 7, 42, -8, -1 },
{ -4, -4, 56, 7, 76, 13, -9 },
{ -9, 17, 78, 7, 51, -6, -3 },
{ -2, -7, 47, 7, 78, 21, -9 },
{ -8, 9, 74, 7, 60, -2, -5 },
{ -1, -9, 38, 7, 81, 29, -10 },
{ -6, 3, 66, 7, 68, 3, -6 },
{ -10, 29, 81, 7, 38, -9, -1 },
{ -5, -2, 60, 7, 74, 9, -8 },
{ -9, 21, 78, 7, 47, -7, -2 },
{ -3, -6, 51, 7, 78, 17, -9 },
{ -9, 13, 76, 7, 56, -4, -4 },
{ -1, -8, 42, 7, 80, 25, -10 },
{ -7, 6, 71, 7, 64, 0, -6 } },
.odd = { { -9, 15, 76, 7, 54, -5, -3 },
{ -2, -8, 45, 7, 80, 23, -10 },
{ -8, 8, 72, 7, 62, -1, -5 },
{ -1, -9, 36, 7, 80, 31, -9 },
{ -6, 1, 66, 7, 70, 4, -7 },
{ -10, 27, 81, 7, 40, -9, -1 },
{ -4, -4, 58, 7, 75, 11, -8 },
{ -9, 19, 78, 7, 49, -7, -2 },
{ -2, -7, 49, 7, 78, 19, -9 },
{ -8, 11, 75, 7, 58, -4, -4 },
{ -1, -9, 40, 7, 81, 27, -10 },
{ -7, 4, 70, 7, 66, 1, -6 },
{ -9, 31, 80, 7, 36, -9, -1 },
{ -5, -1, 62, 7, 72, 8, -8 },
{ -10, 23, 80, 7, 45, -8, -2 },
{ -3, -5, 54, 7, 76, 15, -9 } } },
.ver_phase_arr = {
.even = { { -9, 33, 80, 7, 33, -9, 0 },
{ -6, 0, 64, 7, 71, 6, -7 },
{ -10, 25, 80, 7, 42, -8, -1 },
{ -4, -4, 56, 7, 76, 13, -9 },
{ -9, 17, 78, 7, 51, -6, -3 },
{ -2, -7, 47, 7, 78, 21, -9 },
{ -8, 9, 74, 7, 60, -2, -5 },
{ -1, -9, 38, 7, 81, 29, -10 },
{ -6, 3, 66, 7, 68, 3, -6 },
{ -10, 29, 81, 7, 38, -9, -1 },
{ -5, -2, 60, 7, 74, 9, -8 },
{ -9, 21, 78, 7, 47, -7, -2 },
{ -3, -6, 51, 7, 78, 17, -9 },
{ -9, 13, 76, 7, 56, -4, -4 },
{ -1, -8, 42, 7, 80, 25, -10 },
{ -7, 6, 71, 7, 64, 0, -6 } },
.odd = { { -9, 15, 76, 7, 54, -5, -3 },
{ -2, -8, 45, 7, 80, 23, -10 },
{ -8, 8, 72, 7, 62, -1, -5 },
{ -1, -9, 36, 7, 80, 31, -9 },
{ -6, 1, 66, 7, 70, 4, -7 },
{ -10, 27, 81, 7, 40, -9, -1 },
{ -4, -4, 58, 7, 75, 11, -8 },
{ -9, 19, 78, 7, 49, -7, -2 },
{ -2, -7, 49, 7, 78, 19, -9 },
{ -8, 11, 75, 7, 58, -4, -4 },
{ -1, -9, 40, 7, 81, 27, -10 },
{ -7, 4, 70, 7, 66, 1, -6 },
{ -9, 31, 80, 7, 36, -9, -1 },
{ -5, -1, 62, 7, 72, 8, -8 },
{ -10, 23, 80, 7, 45, -8, -2 },
{ -3, -5, 54, 7, 76, 15, -9 } } },
.ptrn_arr = { { 0xf39e73cf, 0xe79c } },
.sample_patrn_length = 50,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 19) = 0.627451 */
.hor_phase_arr = {
.even = { { -9, 34, 79, 7, 34, -9, -1 },
{ -6, 0, 61, 7, 72, 9, -8 },
{ -9, 22, 78, 7, 47, -7, -3 },
{ -3, -6, 49, 7, 77, 20, -9 },
{ -8, 11, 72, 7, 59, -1, -5 },
{ -1, -9, 36, 7, 79, 32, -9 },
{ -6, 1, 63, 7, 71, 7, -8 },
{ -9, 24, 77, 7, 45, -7, -2 },
{ -4, -5, 51, 7, 77, 18, -9 },
{ -9, 13, 73, 7, 58, -2, -5 },
{ -1, -8, 38, 7, 78, 30, -9 },
{ -6, 3, 65, 7, 67, 6, -7 },
{ -9, 26, 78, 7, 43, -8, -2 },
{ -4, -4, 53, 7, 76, 16, -9 },
{ -9, 14, 75, 7, 55, -3, -4 },
{ -2, -8, 40, 7, 79, 28, -9 },
{ -7, 4, 67, 7, 67, 4, -7 },
{ -9, 28, 79, 7, 40, -8, -2 },
{ -4, -3, 55, 7, 75, 14, -9 },
{ -9, 16, 76, 7, 53, -4, -4 },
{ -2, -8, 43, 7, 78, 26, -9 },
{ -7, 6, 67, 7, 65, 3, -6 },
{ -9, 30, 78, 7, 38, -8, -1 },
{ -5, -2, 58, 7, 73, 13, -9 },
{ -9, 18, 77, 7, 51, -5, -4 },
{ -2, -7, 45, 7, 77, 24, -9 },
{ -8, 7, 71, 7, 63, 1, -6 },
{ -9, 32, 79, 7, 36, -9, -1 },
{ -5, -1, 59, 7, 72, 11, -8 },
{ -9, 20, 77, 7, 49, -6, -3 },
{ -3, -7, 47, 7, 78, 22, -9 },
{ -8, 9, 72, 7, 61, 0, -6 } },
.odd = { { -9, 15, 76, 7, 54, -4, -4 },
{ -2, -8, 41, 7, 79, 27, -9 },
{ -7, 5, 68, 7, 66, 3, -7 },
{ -9, 29, 78, 7, 39, -8, -1 },
{ -5, -3, 56, 7, 76, 13, -9 },
{ -9, 17, 77, 7, 52, -5, -4 },
{ -2, -7, 44, 7, 77, 25, -9 },
{ -7, 7, 68, 7, 64, 2, -6 },
{ -9, 31, 79, 7, 37, -9, -1 },
{ -5, -2, 59, 7, 72, 12, -8 },
{ -9, 19, 77, 7, 50, -6, -3 },
{ -3, -7, 46, 7, 78, 23, -9 },
{ -8, 8, 71, 7, 62, 1, -6 },
{ -9, 33, 79, 7, 35, -9, -1 },
{ -5, -1, 60, 7, 72, 10, -8 },
{ -9, 21, 77, 7, 48, -6, -3 },
{ -3, -6, 48, 7, 77, 21, -9 },
{ -8, 10, 72, 7, 60, -1, -5 },
{ -1, -9, 35, 7, 79, 33, -9 },
{ -6, 1, 62, 7, 71, 8, -8 },
{ -9, 23, 78, 7, 46, -7, -3 },
{ -3, -6, 50, 7, 77, 19, -9 },
{ -8, 12, 72, 7, 59, -2, -5 },
{ -1, -9, 37, 7, 79, 31, -9 },
{ -6, 2, 64, 7, 68, 7, -7 },
{ -9, 25, 77, 7, 44, -7, -2 },
{ -4, -5, 52, 7, 77, 17, -9 },
{ -9, 13, 76, 7, 56, -3, -5 },
{ -1, -8, 39, 7, 78, 29, -9 },
{ -7, 3, 66, 7, 68, 5, -7 },
{ -9, 27, 79, 7, 41, -8, -2 },
{ -4, -4, 54, 7, 76, 15, -9 } } },
.ver_phase_arr = {
.even = { { -9, 34, 79, 7, 34, -9, -1 },
{ -6, 0, 61, 7, 72, 9, -8 },
{ -9, 22, 78, 7, 47, -7, -3 },
{ -3, -6, 49, 7, 77, 20, -9 },
{ -8, 11, 72, 7, 59, -1, -5 },
{ -1, -9, 36, 7, 79, 32, -9 },
{ -6, 1, 63, 7, 71, 7, -8 },
{ -9, 24, 77, 7, 45, -7, -2 },
{ -4, -5, 51, 7, 77, 18, -9 },
{ -9, 13, 73, 7, 58, -2, -5 },
{ -1, -8, 38, 7, 78, 30, -9 },
{ -6, 3, 65, 7, 67, 6, -7 },
{ -9, 26, 78, 7, 43, -8, -2 },
{ -4, -4, 53, 7, 76, 16, -9 },
{ -9, 14, 75, 7, 55, -3, -4 },
{ -2, -8, 40, 7, 79, 28, -9 },
{ -7, 4, 67, 7, 67, 4, -7 },
{ -9, 28, 79, 7, 40, -8, -2 },
{ -4, -3, 55, 7, 75, 14, -9 },
{ -9, 16, 76, 7, 53, -4, -4 },
{ -2, -8, 43, 7, 78, 26, -9 },
{ -7, 6, 67, 7, 65, 3, -6 },
{ -9, 30, 78, 7, 38, -8, -1 },
{ -5, -2, 58, 7, 73, 13, -9 },
{ -9, 18, 77, 7, 51, -5, -4 },
{ -2, -7, 45, 7, 77, 24, -9 },
{ -8, 7, 71, 7, 63, 1, -6 },
{ -9, 32, 79, 7, 36, -9, -1 },
{ -5, -1, 59, 7, 72, 11, -8 },
{ -9, 20, 77, 7, 49, -6, -3 },
{ -3, -7, 47, 7, 78, 22, -9 },
{ -8, 9, 72, 7, 61, 0, -6 } },
.odd = { { -9, 15, 76, 7, 54, -4, -4 },
{ -2, -8, 41, 7, 79, 27, -9 },
{ -7, 5, 68, 7, 66, 3, -7 },
{ -9, 29, 78, 7, 39, -8, -1 },
{ -5, -3, 56, 7, 76, 13, -9 },
{ -9, 17, 77, 7, 52, -5, -4 },
{ -2, -7, 44, 7, 77, 25, -9 },
{ -7, 7, 68, 7, 64, 2, -6 },
{ -9, 31, 79, 7, 37, -9, -1 },
{ -5, -2, 59, 7, 72, 12, -8 },
{ -9, 19, 77, 7, 50, -6, -3 },
{ -3, -7, 46, 7, 78, 23, -9 },
{ -8, 8, 71, 7, 62, 1, -6 },
{ -9, 33, 79, 7, 35, -9, -1 },
{ -5, -1, 60, 7, 72, 10, -8 },
{ -9, 21, 77, 7, 48, -6, -3 },
{ -3, -6, 48, 7, 77, 21, -9 },
{ -8, 10, 72, 7, 60, -1, -5 },
{ -1, -9, 35, 7, 79, 33, -9 },
{ -6, 1, 62, 7, 71, 8, -8 },
{ -9, 23, 78, 7, 46, -7, -3 },
{ -3, -6, 50, 7, 77, 19, -9 },
{ -8, 12, 72, 7, 59, -2, -5 },
{ -1, -9, 37, 7, 79, 31, -9 },
{ -6, 2, 64, 7, 68, 7, -7 },
{ -9, 25, 77, 7, 44, -7, -2 },
{ -4, -5, 52, 7, 77, 17, -9 },
{ -9, 13, 76, 7, 56, -3, -5 },
{ -1, -8, 39, 7, 78, 29, -9 },
{ -7, 3, 66, 7, 68, 5, -7 },
{ -9, 27, 79, 7, 41, -8, -2 },
{ -4, -4, 54, 7, 76, 15, -9 } } },
.ptrn_arr = { { 0x79ce79cf, 0x73ce79ce, 0x73ce73ce, 0xe } },
.sample_patrn_length = 102,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 20) = 0.615385 */
.hor_phase_arr = {
.even = { { -8, 34, 77, 7, 34, -8, -1 },
{ -6, 0, 59, 7, 71, 12, -8 },
{ -9, 19, 75, 7, 51, -4, -4 },
{ -3, -7, 43, 7, 77, 27, -9 },
{ -7, 6, 64, 7, 66, 6, -7 },
{ -9, 27, 77, 7, 43, -7, -3 },
{ -4, -4, 51, 7, 75, 19, -9 },
{ -8, 12, 71, 7, 59, 0, -6 } },
.odd = { { -9, 16, 73, 7, 55, -2, -5 },
{ -2, -8, 39, 7, 77, 31, -9 },
{ -7, 3, 63, 7, 68, 9, -8 },
{ -9, 23, 76, 7, 47, -6, -3 },
{ -3, -6, 47, 7, 76, 23, -9 },
{ -8, 9, 68, 7, 63, 3, -7 },
{ -9, 31, 77, 7, 39, -8, -2 },
{ -5, -2, 55, 7, 73, 16, -9 } } },
.ver_phase_arr = {
.even = { { -8, 34, 77, 7, 34, -8, -1 },
{ -6, 0, 59, 7, 71, 12, -8 },
{ -9, 19, 75, 7, 51, -4, -4 },
{ -3, -7, 43, 7, 77, 27, -9 },
{ -7, 6, 64, 7, 66, 6, -7 },
{ -9, 27, 77, 7, 43, -7, -3 },
{ -4, -4, 51, 7, 75, 19, -9 },
{ -8, 12, 71, 7, 59, 0, -6 } },
.odd = { { -9, 16, 73, 7, 55, -2, -5 },
{ -2, -8, 39, 7, 77, 31, -9 },
{ -7, 3, 63, 7, 68, 9, -8 },
{ -9, 23, 76, 7, 47, -6, -3 },
{ -3, -6, 47, 7, 76, 23, -9 },
{ -8, 9, 68, 7, 63, 3, -7 },
{ -9, 31, 77, 7, 39, -8, -2 },
{ -5, -2, 55, 7, 73, 16, -9 } } },
.ptrn_arr = { { 0xe739cf } },
.sample_patrn_length = 26,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 21) = 0.603774 */
.hor_phase_arr = {
.even = { { -8, 35, 76, 7, 35, -8, -2 },
{ -6, 0, 57, 7, 71, 15, -9 },
{ -9, 17, 71, 7, 55, -1, -5 },
{ -2, -8, 37, 7, 76, 33, -8 },
{ -6, 1, 58, 7, 71, 13, -9 },
{ -9, 18, 73, 7, 53, -2, -5 },
{ -2, -7, 39, 7, 75, 31, -8 },
{ -7, 2, 60, 7, 69, 12, -8 },
{ -9, 20, 74, 7, 51, -3, -5 },
{ -3, -7, 41, 7, 77, 29, -9 },
{ -7, 4, 62, 7, 67, 10, -8 },
{ -9, 22, 74, 7, 49, -4, -4 },
{ -3, -6, 43, 7, 75, 28, -9 },
{ -7, 5, 63, 7, 67, 8, -8 },
{ -9, 24, 75, 7, 47, -5, -4 },
{ -4, -5, 45, 7, 75, 26, -9 },
{ -8, 7, 65, 7, 65, 7, -8 },
{ -9, 26, 75, 7, 45, -5, -4 },
{ -4, -5, 47, 7, 75, 24, -9 },
{ -8, 8, 67, 7, 63, 5, -7 },
{ -9, 28, 75, 7, 43, -6, -3 },
{ -4, -4, 49, 7, 74, 22, -9 },
{ -8, 10, 67, 7, 62, 4, -7 },
{ -9, 29, 77, 7, 41, -7, -3 },
{ -5, -3, 51, 7, 74, 20, -9 },
{ -8, 12, 69, 7, 60, 2, -7 },
{ -8, 31, 75, 7, 39, -7, -2 },
{ -5, -2, 53, 7, 73, 18, -9 },
{ -9, 13, 71, 7, 58, 1, -6 },
{ -8, 33, 76, 7, 37, -8, -2 },
{ -5, -1, 55, 7, 71, 17, -9 },
{ -9, 15, 71, 7, 57, 0, -6 } },
.odd = { { -9, 16, 72, 7, 56, -1, -6 },
{ -2, -8, 36, 7, 76, 34, -8 },
{ -6, 1, 58, 7, 70, 14, -9 },
{ -9, 18, 72, 7, 54, -2, -5 },
{ -2, -7, 38, 7, 75, 32, -8 },
{ -6, 2, 59, 7, 70, 12, -9 },
{ -9, 19, 74, 7, 52, -3, -5 },
{ -3, -7, 40, 7, 77, 30, -9 },
{ -7, 3, 61, 7, 68, 11, -8 },
{ -9, 21, 75, 7, 50, -4, -5 },
{ -3, -6, 42, 7, 75, 29, -9 },
{ -7, 5, 63, 7, 66, 9, -8 },
{ -9, 23, 74, 7, 48, -4, -4 },
{ -3, -6, 44, 7, 75, 27, -9 },
{ -7, 6, 64, 7, 65, 8, -8 },
{ -9, 25, 75, 7, 46, -5, -4 },
{ -4, -5, 46, 7, 75, 25, -9 },
{ -8, 8, 65, 7, 64, 6, -7 },
{ -9, 27, 75, 7, 44, -6, -3 },
{ -4, -4, 48, 7, 74, 23, -9 },
{ -8, 9, 66, 7, 63, 5, -7 },
{ -9, 29, 75, 7, 42, -6, -3 },
{ -5, -4, 50, 7, 75, 21, -9 },
{ -8, 11, 68, 7, 61, 3, -7 },
{ -9, 30, 77, 7, 40, -7, -3 },
{ -5, -3, 52, 7, 74, 19, -9 },
{ -9, 12, 70, 7, 59, 2, -6 },
{ -8, 32, 75, 7, 38, -7, -2 },
{ -5, -2, 54, 7, 72, 18, -9 },
{ -9, 14, 70, 7, 58, 1, -6 },
{ -8, 34, 76, 7, 36, -8, -2 },
{ -6, -1, 56, 7, 72, 16, -9 } } },
.ver_phase_arr = {
.even = { { -8, 35, 76, 7, 35, -8, -2 },
{ -6, 0, 57, 7, 71, 15, -9 },
{ -9, 17, 71, 7, 55, -1, -5 },
{ -2, -8, 37, 7, 76, 33, -8 },
{ -6, 1, 58, 7, 71, 13, -9 },
{ -9, 18, 73, 7, 53, -2, -5 },
{ -2, -7, 39, 7, 75, 31, -8 },
{ -7, 2, 60, 7, 69, 12, -8 },
{ -9, 20, 74, 7, 51, -3, -5 },
{ -3, -7, 41, 7, 77, 29, -9 },
{ -7, 4, 62, 7, 67, 10, -8 },
{ -9, 22, 74, 7, 49, -4, -4 },
{ -3, -6, 43, 7, 75, 28, -9 },
{ -7, 5, 63, 7, 67, 8, -8 },
{ -9, 24, 75, 7, 47, -5, -4 },
{ -4, -5, 45, 7, 75, 26, -9 },
{ -8, 7, 65, 7, 65, 7, -8 },
{ -9, 26, 75, 7, 45, -5, -4 },
{ -4, -5, 47, 7, 75, 24, -9 },
{ -8, 8, 67, 7, 63, 5, -7 },
{ -9, 28, 75, 7, 43, -6, -3 },
{ -4, -4, 49, 7, 74, 22, -9 },
{ -8, 10, 67, 7, 62, 4, -7 },
{ -9, 29, 77, 7, 41, -7, -3 },
{ -5, -3, 51, 7, 74, 20, -9 },
{ -8, 12, 69, 7, 60, 2, -7 },
{ -8, 31, 75, 7, 39, -7, -2 },
{ -5, -2, 53, 7, 73, 18, -9 },
{ -9, 13, 71, 7, 58, 1, -6 },
{ -8, 33, 76, 7, 37, -8, -2 },
{ -5, -1, 55, 7, 71, 17, -9 },
{ -9, 15, 71, 7, 57, 0, -6 } },
.odd = { { -9, 16, 72, 7, 56, -1, -6 },
{ -2, -8, 36, 7, 76, 34, -8 },
{ -6, 1, 58, 7, 70, 14, -9 },
{ -9, 18, 72, 7, 54, -2, -5 },
{ -2, -7, 38, 7, 75, 32, -8 },
{ -6, 2, 59, 7, 70, 12, -9 },
{ -9, 19, 74, 7, 52, -3, -5 },
{ -3, -7, 40, 7, 77, 30, -9 },
{ -7, 3, 61, 7, 68, 11, -8 },
{ -9, 21, 75, 7, 50, -4, -5 },
{ -3, -6, 42, 7, 75, 29, -9 },
{ -7, 5, 63, 7, 66, 9, -8 },
{ -9, 23, 74, 7, 48, -4, -4 },
{ -3, -6, 44, 7, 75, 27, -9 },
{ -7, 6, 64, 7, 65, 8, -8 },
{ -9, 25, 75, 7, 46, -5, -4 },
{ -4, -5, 46, 7, 75, 25, -9 },
{ -8, 8, 65, 7, 64, 6, -7 },
{ -9, 27, 75, 7, 44, -6, -3 },
{ -4, -4, 48, 7, 74, 23, -9 },
{ -8, 9, 66, 7, 63, 5, -7 },
{ -9, 29, 75, 7, 42, -6, -3 },
{ -5, -4, 50, 7, 75, 21, -9 },
{ -8, 11, 68, 7, 61, 3, -7 },
{ -9, 30, 77, 7, 40, -7, -3 },
{ -5, -3, 52, 7, 74, 19, -9 },
{ -9, 12, 70, 7, 59, 2, -6 },
{ -8, 32, 75, 7, 38, -7, -2 },
{ -5, -2, 54, 7, 72, 18, -9 },
{ -9, 14, 70, 7, 58, 1, -6 },
{ -8, 34, 76, 7, 36, -8, -2 },
{ -6, -1, 56, 7, 72, 16, -9 } } },
.ptrn_arr = { { 0x9ce739cf, 0xe739ce73, 0x39ce739c, 0xe7 } },
.sample_patrn_length = 106,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 22) = 0.592593 */
.hor_phase_arr = {
.even = { { -7, 35, 74, 7, 35, -7, -2 },
{ -6, 0, 54, 7, 71, 18, -9 },
{ -9, 14, 70, 7, 58, 2, -7 },
{ -8, 32, 74, 7, 39, -6, -3 },
{ -5, -2, 51, 7, 72, 21, -9 },
{ -8, 11, 66, 7, 61, 5, -7 },
{ -9, 28, 75, 7, 43, -5, -4 },
{ -4, -4, 47, 7, 73, 25, -9 },
{ -8, 8, 64, 7, 64, 8, -8 },
{ -9, 25, 73, 7, 47, -4, -4 },
{ -4, -5, 43, 7, 75, 28, -9 },
{ -7, 5, 61, 7, 66, 11, -8 },
{ -9, 21, 72, 7, 51, -2, -5 },
{ -3, -6, 39, 7, 74, 32, -8 },
{ -7, 2, 58, 7, 70, 14, -9 },
{ -9, 18, 71, 7, 54, 0, -6 } },
.odd = { { -9, 16, 70, 7, 56, 1, -6 },
{ -8, 34, 75, 7, 37, -7, -3 },
{ -6, -1, 53, 7, 72, 19, -9 },
{ -9, 13, 68, 7, 59, 4, -7 },
{ -8, 30, 74, 7, 41, -6, -3 },
{ -5, -3, 49, 7, 73, 23, -9 },
{ -8, 10, 66, 7, 62, 6, -8 },
{ -9, 27, 74, 7, 45, -5, -4 },
{ -4, -5, 45, 7, 74, 27, -9 },
{ -8, 6, 62, 7, 66, 10, -8 },
{ -9, 23, 73, 7, 49, -3, -5 },
{ -3, -6, 41, 7, 74, 30, -8 },
{ -7, 4, 59, 7, 68, 13, -9 },
{ -9, 19, 72, 7, 53, -1, -6 },
{ -3, -7, 37, 7, 75, 34, -8 },
{ -6, 1, 56, 7, 70, 16, -9 } } },
.ver_phase_arr = {
.even = { { -7, 35, 74, 7, 35, -7, -2 },
{ -6, 0, 54, 7, 71, 18, -9 },
{ -9, 14, 70, 7, 58, 2, -7 },
{ -8, 32, 74, 7, 39, -6, -3 },
{ -5, -2, 51, 7, 72, 21, -9 },
{ -8, 11, 66, 7, 61, 5, -7 },
{ -9, 28, 75, 7, 43, -5, -4 },
{ -4, -4, 47, 7, 73, 25, -9 },
{ -8, 8, 64, 7, 64, 8, -8 },
{ -9, 25, 73, 7, 47, -4, -4 },
{ -4, -5, 43, 7, 75, 28, -9 },
{ -7, 5, 61, 7, 66, 11, -8 },
{ -9, 21, 72, 7, 51, -2, -5 },
{ -3, -6, 39, 7, 74, 32, -8 },
{ -7, 2, 58, 7, 70, 14, -9 },
{ -9, 18, 71, 7, 54, 0, -6 } },
.odd = { { -9, 16, 70, 7, 56, 1, -6 },
{ -8, 34, 75, 7, 37, -7, -3 },
{ -6, -1, 53, 7, 72, 19, -9 },
{ -9, 13, 68, 7, 59, 4, -7 },
{ -8, 30, 74, 7, 41, -6, -3 },
{ -5, -3, 49, 7, 73, 23, -9 },
{ -8, 10, 66, 7, 62, 6, -8 },
{ -9, 27, 74, 7, 45, -5, -4 },
{ -4, -5, 45, 7, 74, 27, -9 },
{ -8, 6, 62, 7, 66, 10, -8 },
{ -9, 23, 73, 7, 49, -3, -5 },
{ -3, -6, 41, 7, 74, 30, -8 },
{ -7, 4, 59, 7, 68, 13, -9 },
{ -9, 19, 72, 7, 53, -1, -6 },
{ -3, -7, 37, 7, 75, 34, -8 },
{ -6, 1, 56, 7, 70, 16, -9 } } },
.ptrn_arr = { { 0xce739ce7, 0xce739 } },
.sample_patrn_length = 54,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 23) = 0.581818 */
.hor_phase_arr = {
.even = { { -7, 36, 73, 7, 36, -7, -3 },
{ -6, 0, 52, 7, 71, 20, -9 },
{ -8, 12, 66, 7, 60, 6, -8 },
{ -8, 27, 73, 7, 45, -4, -5 },
{ -4, -4, 43, 7, 72, 29, -8 },
{ -7, 5, 59, 7, 66, 14, -9 },
{ -9, 19, 69, 7, 54, 1, -6 },
{ -7, 34, 72, 7, 38, -6, -3 },
{ -6, -1, 50, 7, 72, 22, -9 },
{ -8, 11, 63, 7, 62, 8, -8 },
{ -9, 26, 72, 7, 47, -3, -5 },
{ -4, -5, 41, 7, 73, 31, -8 },
{ -7, 4, 57, 7, 68, 15, -9 },
{ -9, 17, 69, 7, 56, 2, -7 },
{ -7, 32, 74, 7, 39, -6, -4 },
{ -5, -2, 49, 7, 71, 24, -9 },
{ -8, 9, 63, 7, 63, 9, -8 },
{ -9, 24, 71, 7, 49, -2, -5 },
{ -4, -6, 39, 7, 74, 32, -7 },
{ -7, 2, 56, 7, 69, 17, -9 },
{ -9, 15, 68, 7, 57, 4, -7 },
{ -8, 31, 73, 7, 41, -5, -4 },
{ -5, -3, 47, 7, 72, 26, -9 },
{ -8, 8, 62, 7, 63, 11, -8 },
{ -9, 22, 72, 7, 50, -1, -6 },
{ -3, -6, 38, 7, 72, 34, -7 },
{ -6, 1, 54, 7, 69, 19, -9 },
{ -9, 14, 66, 7, 59, 5, -7 },
{ -8, 29, 72, 7, 43, -4, -4 },
{ -5, -4, 45, 7, 73, 27, -8 },
{ -8, 6, 60, 7, 66, 12, -8 },
{ -9, 20, 71, 7, 52, 0, -6 } },
.odd = { { -9, 16, 69, 7, 56, 3, -7 },
{ -8, 31, 74, 7, 40, -5, -4 },
{ -5, -2, 48, 7, 71, 25, -9 },
{ -8, 8, 62, 7, 64, 10, -8 },
{ -9, 23, 72, 7, 50, -2, -6 },
{ -3, -6, 39, 7, 72, 33, -7 },
{ -7, 2, 55, 7, 69, 18, -9 },
{ -9, 15, 67, 7, 58, 4, -7 },
{ -8, 30, 73, 7, 42, -5, -4 },
{ -5, -3, 46, 7, 72, 26, -8 },
{ -8, 7, 61, 7, 65, 11, -8 },
{ -9, 21, 72, 7, 51, -1, -6 },
{ -3, -6, 37, 7, 72, 35, -7 },
{ -6, 1, 53, 7, 69, 20, -9 },
{ -9, 13, 66, 7, 59, 6, -7 },
{ -8, 28, 72, 7, 44, -4, -4 },
{ -4, -4, 44, 7, 72, 28, -8 },
{ -7, 6, 59, 7, 66, 13, -9 },
{ -9, 20, 69, 7, 53, 1, -6 },
{ -7, 35, 72, 7, 37, -6, -3 },
{ -6, -1, 51, 7, 72, 21, -9 },
{ -8, 11, 65, 7, 61, 7, -8 },
{ -8, 26, 72, 7, 46, -3, -5 },
{ -4, -5, 42, 7, 73, 30, -8 },
{ -7, 4, 58, 7, 67, 15, -9 },
{ -9, 18, 69, 7, 55, 2, -7 },
{ -7, 33, 72, 7, 39, -6, -3 },
{ -6, -2, 50, 7, 72, 23, -9 },
{ -8, 10, 64, 7, 62, 8, -8 },
{ -9, 25, 71, 7, 48, -2, -5 },
{ -4, -5, 40, 7, 74, 31, -8 },
{ -7, 3, 56, 7, 69, 16, -9 } } },
.ver_phase_arr = {
.even = { { -7, 36, 73, 7, 36, -7, -3 },
{ -6, 0, 52, 7, 71, 20, -9 },
{ -8, 12, 66, 7, 60, 6, -8 },
{ -8, 27, 73, 7, 45, -4, -5 },
{ -4, -4, 43, 7, 72, 29, -8 },
{ -7, 5, 59, 7, 66, 14, -9 },
{ -9, 19, 69, 7, 54, 1, -6 },
{ -7, 34, 72, 7, 38, -6, -3 },
{ -6, -1, 50, 7, 72, 22, -9 },
{ -8, 11, 63, 7, 62, 8, -8 },
{ -9, 26, 72, 7, 47, -3, -5 },
{ -4, -5, 41, 7, 73, 31, -8 },
{ -7, 4, 57, 7, 68, 15, -9 },
{ -9, 17, 69, 7, 56, 2, -7 },
{ -7, 32, 74, 7, 39, -6, -4 },
{ -5, -2, 49, 7, 71, 24, -9 },
{ -8, 9, 63, 7, 63, 9, -8 },
{ -9, 24, 71, 7, 49, -2, -5 },
{ -4, -6, 39, 7, 74, 32, -7 },
{ -7, 2, 56, 7, 69, 17, -9 },
{ -9, 15, 68, 7, 57, 4, -7 },
{ -8, 31, 73, 7, 41, -5, -4 },
{ -5, -3, 47, 7, 72, 26, -9 },
{ -8, 8, 62, 7, 63, 11, -8 },
{ -9, 22, 72, 7, 50, -1, -6 },
{ -3, -6, 38, 7, 72, 34, -7 },
{ -6, 1, 54, 7, 69, 19, -9 },
{ -9, 14, 66, 7, 59, 5, -7 },
{ -8, 29, 72, 7, 43, -4, -4 },
{ -5, -4, 45, 7, 73, 27, -8 },
{ -8, 6, 60, 7, 66, 12, -8 },
{ -9, 20, 71, 7, 52, 0, -6 } },
.odd = { { -9, 16, 69, 7, 56, 3, -7 },
{ -8, 31, 74, 7, 40, -5, -4 },
{ -5, -2, 48, 7, 71, 25, -9 },
{ -8, 8, 62, 7, 64, 10, -8 },
{ -9, 23, 72, 7, 50, -2, -6 },
{ -3, -6, 39, 7, 72, 33, -7 },
{ -7, 2, 55, 7, 69, 18, -9 },
{ -9, 15, 67, 7, 58, 4, -7 },
{ -8, 30, 73, 7, 42, -5, -4 },
{ -5, -3, 46, 7, 72, 26, -8 },
{ -8, 7, 61, 7, 65, 11, -8 },
{ -9, 21, 72, 7, 51, -1, -6 },
{ -3, -6, 37, 7, 72, 35, -7 },
{ -6, 1, 53, 7, 69, 20, -9 },
{ -9, 13, 66, 7, 59, 6, -7 },
{ -8, 28, 72, 7, 44, -4, -4 },
{ -4, -4, 44, 7, 72, 28, -8 },
{ -7, 6, 59, 7, 66, 13, -9 },
{ -9, 20, 69, 7, 53, 1, -6 },
{ -7, 35, 72, 7, 37, -6, -3 },
{ -6, -1, 51, 7, 72, 21, -9 },
{ -8, 11, 65, 7, 61, 7, -8 },
{ -8, 26, 72, 7, 46, -3, -5 },
{ -4, -5, 42, 7, 73, 30, -8 },
{ -7, 4, 58, 7, 67, 15, -9 },
{ -9, 18, 69, 7, 55, 2, -7 },
{ -7, 33, 72, 7, 39, -6, -3 },
{ -6, -2, 50, 7, 72, 23, -9 },
{ -8, 10, 64, 7, 62, 8, -8 },
{ -9, 25, 71, 7, 48, -2, -5 },
{ -4, -5, 40, 7, 74, 31, -8 },
{ -7, 3, 56, 7, 69, 16, -9 } } },
.ptrn_arr = { { 0xe7339ce7, 0x9ce7339c, 0x399ce739, 0xce7 } },
.sample_patrn_length = 110,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 24) = 0.571429 */
.hor_phase_arr = {
.even = { { -6, 36, 71, 7, 36, -6, -3 },
{ -6, 0, 50, 7, 69, 23, -8 },
{ -8, 10, 62, 7, 62, 10, -8 },
{ -8, 23, 69, 7, 50, 0, -6 } },
.odd = { { -9, 16, 67, 7, 56, 5, -7 },
{ -8, 29, 73, 7, 43, -4, -5 },
{ -5, -4, 43, 7, 73, 29, -8 },
{ -7, 5, 56, 7, 67, 16, -9 } } },
.ver_phase_arr = {
.even = { { -6, 36, 71, 7, 36, -6, -3 },
{ -6, 0, 50, 7, 69, 23, -8 },
{ -8, 10, 62, 7, 62, 10, -8 },
{ -8, 23, 69, 7, 50, 0, -6 } },
.odd = { { -9, 16, 67, 7, 56, 5, -7 },
{ -8, 29, 73, 7, 43, -4, -5 },
{ -5, -4, 43, 7, 73, 29, -8 },
{ -7, 5, 56, 7, 67, 16, -9 } } },
.ptrn_arr = { { 0xce7 } },
.sample_patrn_length = 14,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 25) = 0.561404 */
.hor_phase_arr = {
.even = { { -5, 36, 70, 7, 36, -5, -4 },
{ -6, 0, 48, 7, 69, 25, -8 },
{ -8, 8, 59, 7, 63, 14, -8 },
{ -8, 19, 66, 7, 54, 4, -7 },
{ -7, 30, 70, 7, 43, -3, -5 },
{ -5, -3, 41, 7, 70, 32, -7 },
{ -7, 3, 53, 7, 67, 20, -8 },
{ -8, 13, 61, 7, 60, 10, -8 },
{ -8, 24, 67, 7, 50, 1, -6 },
{ -6, 35, 70, 7, 38, -5, -4 },
{ -6, -1, 46, 7, 70, 27, -8 },
{ -8, 7, 57, 7, 64, 16, -8 },
{ -8, 17, 64, 7, 56, 6, -7 },
{ -7, 28, 69, 7, 45, -2, -5 },
{ -4, -4, 40, 7, 69, 33, -6 },
{ -7, 2, 51, 7, 68, 22, -8 },
{ -8, 11, 61, 7, 61, 11, -8 },
{ -8, 22, 68, 7, 51, 2, -7 },
{ -6, 33, 69, 7, 40, -4, -4 },
{ -5, -2, 45, 7, 69, 28, -7 },
{ -7, 6, 56, 7, 64, 17, -8 },
{ -8, 16, 64, 7, 57, 7, -8 },
{ -8, 27, 70, 7, 46, -1, -6 },
{ -4, -5, 38, 7, 70, 35, -6 },
{ -6, 1, 50, 7, 67, 24, -8 },
{ -8, 10, 60, 7, 61, 13, -8 },
{ -8, 20, 67, 7, 53, 3, -7 },
{ -7, 32, 70, 7, 41, -3, -5 },
{ -5, -3, 43, 7, 70, 30, -7 },
{ -7, 4, 54, 7, 66, 19, -8 },
{ -8, 14, 63, 7, 59, 8, -8 },
{ -8, 25, 69, 7, 48, 0, -6 } },
.odd = { { -8, 16, 66, 7, 56, 6, -8 },
{ -8, 28, 69, 7, 46, -1, -6 },
{ -4, -4, 39, 7, 69, 34, -6 },
{ -7, 2, 51, 7, 67, 23, -8 },
{ -8, 10, 60, 7, 62, 12, -8 },
{ -8, 21, 67, 7, 52, 3, -7 },
{ -7, 32, 71, 7, 41, -4, -5 },
{ -5, -2, 44, 7, 69, 29, -7 },
{ -7, 5, 55, 7, 65, 18, -8 },
{ -8, 15, 63, 7, 58, 8, -8 },
{ -8, 26, 69, 7, 47, 0, -6 },
{ -4, -5, 37, 7, 71, 35, -6 },
{ -6, 1, 49, 7, 68, 24, -8 },
{ -8, 9, 59, 7, 63, 13, -8 },
{ -8, 20, 65, 7, 54, 4, -7 },
{ -7, 31, 70, 7, 42, -3, -5 },
{ -5, -3, 42, 7, 70, 31, -7 },
{ -7, 4, 54, 7, 65, 20, -8 },
{ -8, 13, 63, 7, 59, 9, -8 },
{ -8, 24, 68, 7, 49, 1, -6 },
{ -6, 35, 71, 7, 37, -5, -4 },
{ -6, 0, 47, 7, 69, 26, -8 },
{ -8, 8, 58, 7, 63, 15, -8 },
{ -8, 18, 65, 7, 55, 5, -7 },
{ -7, 29, 69, 7, 44, -2, -5 },
{ -5, -4, 41, 7, 71, 32, -7 },
{ -7, 3, 52, 7, 67, 21, -8 },
{ -8, 12, 62, 7, 60, 10, -8 },
{ -8, 23, 67, 7, 51, 2, -7 },
{ -6, 34, 69, 7, 39, -4, -4 },
{ -6, -1, 46, 7, 69, 28, -8 },
{ -8, 6, 56, 7, 66, 16, -8 } } },
.ver_phase_arr = {
.even = { { -5, 36, 70, 7, 36, -5, -4 },
{ -6, 0, 48, 7, 69, 25, -8 },
{ -8, 8, 59, 7, 63, 14, -8 },
{ -8, 19, 66, 7, 54, 4, -7 },
{ -7, 30, 70, 7, 43, -3, -5 },
{ -5, -3, 41, 7, 70, 32, -7 },
{ -7, 3, 53, 7, 67, 20, -8 },
{ -8, 13, 61, 7, 60, 10, -8 },
{ -8, 24, 67, 7, 50, 1, -6 },
{ -6, 35, 70, 7, 38, -5, -4 },
{ -6, -1, 46, 7, 70, 27, -8 },
{ -8, 7, 57, 7, 64, 16, -8 },
{ -8, 17, 64, 7, 56, 6, -7 },
{ -7, 28, 69, 7, 45, -2, -5 },
{ -4, -4, 40, 7, 69, 33, -6 },
{ -7, 2, 51, 7, 68, 22, -8 },
{ -8, 11, 61, 7, 61, 11, -8 },
{ -8, 22, 68, 7, 51, 2, -7 },
{ -6, 33, 69, 7, 40, -4, -4 },
{ -5, -2, 45, 7, 69, 28, -7 },
{ -7, 6, 56, 7, 64, 17, -8 },
{ -8, 16, 64, 7, 57, 7, -8 },
{ -8, 27, 70, 7, 46, -1, -6 },
{ -4, -5, 38, 7, 70, 35, -6 },
{ -6, 1, 50, 7, 67, 24, -8 },
{ -8, 10, 60, 7, 61, 13, -8 },
{ -8, 20, 67, 7, 53, 3, -7 },
{ -7, 32, 70, 7, 41, -3, -5 },
{ -5, -3, 43, 7, 70, 30, -7 },
{ -7, 4, 54, 7, 66, 19, -8 },
{ -8, 14, 63, 7, 59, 8, -8 },
{ -8, 25, 69, 7, 48, 0, -6 } },
.odd = { { -8, 16, 66, 7, 56, 6, -8 },
{ -8, 28, 69, 7, 46, -1, -6 },
{ -4, -4, 39, 7, 69, 34, -6 },
{ -7, 2, 51, 7, 67, 23, -8 },
{ -8, 10, 60, 7, 62, 12, -8 },
{ -8, 21, 67, 7, 52, 3, -7 },
{ -7, 32, 71, 7, 41, -4, -5 },
{ -5, -2, 44, 7, 69, 29, -7 },
{ -7, 5, 55, 7, 65, 18, -8 },
{ -8, 15, 63, 7, 58, 8, -8 },
{ -8, 26, 69, 7, 47, 0, -6 },
{ -4, -5, 37, 7, 71, 35, -6 },
{ -6, 1, 49, 7, 68, 24, -8 },
{ -8, 9, 59, 7, 63, 13, -8 },
{ -8, 20, 65, 7, 54, 4, -7 },
{ -7, 31, 70, 7, 42, -3, -5 },
{ -5, -3, 42, 7, 70, 31, -7 },
{ -7, 4, 54, 7, 65, 20, -8 },
{ -8, 13, 63, 7, 59, 9, -8 },
{ -8, 24, 68, 7, 49, 1, -6 },
{ -6, 35, 71, 7, 37, -5, -4 },
{ -6, 0, 47, 7, 69, 26, -8 },
{ -8, 8, 58, 7, 63, 15, -8 },
{ -8, 18, 65, 7, 55, 5, -7 },
{ -7, 29, 69, 7, 44, -2, -5 },
{ -5, -4, 41, 7, 71, 32, -7 },
{ -7, 3, 52, 7, 67, 21, -8 },
{ -8, 12, 62, 7, 60, 10, -8 },
{ -8, 23, 67, 7, 51, 2, -7 },
{ -6, 34, 69, 7, 39, -4, -4 },
{ -6, -1, 46, 7, 69, 28, -8 },
{ -8, 6, 56, 7, 66, 16, -8 } } },
.ptrn_arr = { { 0x3399cce7, 0x3399cce7, 0x3399ce67, 0xce67 } },
.sample_patrn_length = 114,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 26) = 0.551724 */
.hor_phase_arr = {
.even = { { -5, 36, 70, 7, 36, -5, -4 },
{ -6, 0, 46, 7, 68, 27, -7 },
{ -8, 7, 55, 7, 64, 18, -8 },
{ -8, 15, 62, 7, 58, 9, -8 },
{ -8, 24, 68, 7, 49, 2, -7 },
{ -6, 33, 69, 7, 40, -3, -5 },
{ -6, -2, 43, 7, 70, 30, -7 },
{ -7, 4, 52, 7, 66, 21, -8 },
{ -8, 12, 60, 7, 60, 12, -8 },
{ -8, 21, 66, 7, 52, 4, -7 },
{ -7, 30, 70, 7, 43, -2, -6 },
{ -5, -3, 40, 7, 69, 33, -6 },
{ -7, 2, 49, 7, 68, 24, -8 },
{ -8, 9, 58, 7, 62, 15, -8 },
{ -8, 18, 64, 7, 55, 7, -8 },
{ -7, 27, 68, 7, 46, 0, -6 } },
.odd = { { -8, 17, 63, 7, 56, 8, -8 },
{ -8, 26, 67, 7, 48, 1, -6 },
{ -5, 35, 69, 7, 38, -4, -5 },
{ -6, -1, 45, 7, 68, 29, -7 },
{ -7, 5, 54, 7, 64, 20, -8 },
{ -8, 14, 60, 7, 59, 11, -8 },
{ -8, 23, 66, 7, 51, 3, -7 },
{ -6, 32, 69, 7, 41, -3, -5 },
{ -5, -3, 41, 7, 69, 32, -6 },
{ -7, 3, 51, 7, 66, 23, -8 },
{ -8, 11, 59, 7, 60, 14, -8 },
{ -8, 20, 64, 7, 54, 5, -7 },
{ -7, 29, 68, 7, 45, -1, -6 },
{ -5, -4, 38, 7, 69, 35, -5 },
{ -6, 1, 48, 7, 67, 26, -8 },
{ -8, 8, 56, 7, 63, 17, -8 } } },
.ver_phase_arr = {
.even = { { -5, 36, 70, 7, 36, -5, -4 },
{ -6, 0, 46, 7, 68, 27, -7 },
{ -8, 7, 55, 7, 64, 18, -8 },
{ -8, 15, 62, 7, 58, 9, -8 },
{ -8, 24, 68, 7, 49, 2, -7 },
{ -6, 33, 69, 7, 40, -3, -5 },
{ -6, -2, 43, 7, 70, 30, -7 },
{ -7, 4, 52, 7, 66, 21, -8 },
{ -8, 12, 60, 7, 60, 12, -8 },
{ -8, 21, 66, 7, 52, 4, -7 },
{ -7, 30, 70, 7, 43, -2, -6 },
{ -5, -3, 40, 7, 69, 33, -6 },
{ -7, 2, 49, 7, 68, 24, -8 },
{ -8, 9, 58, 7, 62, 15, -8 },
{ -8, 18, 64, 7, 55, 7, -8 },
{ -7, 27, 68, 7, 46, 0, -6 } },
.odd = { { -8, 17, 63, 7, 56, 8, -8 },
{ -8, 26, 67, 7, 48, 1, -6 },
{ -5, 35, 69, 7, 38, -4, -5 },
{ -6, -1, 45, 7, 68, 29, -7 },
{ -7, 5, 54, 7, 64, 20, -8 },
{ -8, 14, 60, 7, 59, 11, -8 },
{ -8, 23, 66, 7, 51, 3, -7 },
{ -6, 32, 69, 7, 41, -3, -5 },
{ -5, -3, 41, 7, 69, 32, -6 },
{ -7, 3, 51, 7, 66, 23, -8 },
{ -8, 11, 59, 7, 60, 14, -8 },
{ -8, 20, 64, 7, 54, 5, -7 },
{ -7, 29, 68, 7, 45, -1, -6 },
{ -5, -4, 38, 7, 69, 35, -5 },
{ -6, 1, 48, 7, 67, 26, -8 },
{ -8, 8, 56, 7, 63, 17, -8 } } },
.ptrn_arr = { { 0x399cce67, 0xcce673 } },
.sample_patrn_length = 58,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 27) = 0.542373 */
.hor_phase_arr = {
.even = { { -4, 37, 67, 7, 37, -4, -5 },
{ -6, 0, 44, 7, 67, 29, -6 },
{ -7, 5, 52, 7, 64, 22, -8 },
{ -8, 12, 58, 7, 60, 14, -8 },
{ -8, 19, 63, 7, 54, 8, -8 },
{ -7, 26, 67, 7, 47, 2, -7 },
{ -5, 34, 66, 7, 40, -2, -5 },
{ -6, -2, 41, 7, 68, 32, -5 },
{ -7, 3, 49, 7, 65, 25, -7 },
{ -8, 9, 56, 7, 62, 17, -8 },
{ -8, 16, 61, 7, 57, 10, -8 },
{ -8, 23, 66, 7, 50, 4, -7 },
{ -6, 31, 67, 7, 43, -1, -6 },
{ -5, -3, 38, 7, 67, 35, -4 },
{ -6, 1, 46, 7, 66, 28, -7 },
{ -8, 6, 53, 7, 65, 20, -8 },
{ -8, 13, 59, 7, 59, 13, -8 },
{ -8, 20, 65, 7, 53, 6, -8 },
{ -7, 28, 66, 7, 46, 1, -6 },
{ -4, 35, 67, 7, 38, -3, -5 },
{ -6, -1, 43, 7, 67, 31, -6 },
{ -7, 4, 50, 7, 66, 23, -8 },
{ -8, 10, 57, 7, 61, 16, -8 },
{ -8, 17, 62, 7, 56, 9, -8 },
{ -7, 25, 65, 7, 49, 3, -7 },
{ -5, 32, 68, 7, 41, -2, -6 },
{ -5, -2, 40, 7, 66, 34, -5 },
{ -7, 2, 47, 7, 67, 26, -7 },
{ -8, 8, 54, 7, 63, 19, -8 },
{ -8, 14, 60, 7, 58, 12, -8 },
{ -8, 22, 64, 7, 52, 5, -7 },
{ -6, 29, 67, 7, 44, 0, -6 } },
.odd = { { -8, 17, 61, 7, 56, 10, -8 },
{ -7, 24, 64, 7, 50, 4, -7 },
{ -6, 31, 68, 7, 42, -1, -6 },
{ -5, -3, 39, 7, 68, 34, -5 },
{ -7, 1, 47, 7, 67, 27, -7 },
{ -8, 7, 54, 7, 64, 19, -8 },
{ -8, 14, 59, 7, 59, 12, -8 },
{ -8, 21, 64, 7, 52, 6, -7 },
{ -7, 28, 68, 7, 45, 0, -6 },
{ -4, 36, 68, 7, 37, -4, -5 },
{ -6, 0, 44, 7, 66, 30, -6 },
{ -7, 5, 51, 7, 65, 22, -8 },
{ -8, 11, 57, 7, 61, 15, -8 },
{ -8, 18, 63, 7, 55, 8, -8 },
{ -7, 25, 67, 7, 48, 2, -7 },
{ -5, 33, 66, 7, 41, -2, -5 },
{ -5, -2, 41, 7, 66, 33, -5 },
{ -7, 2, 48, 7, 67, 25, -7 },
{ -8, 8, 55, 7, 63, 18, -8 },
{ -8, 15, 61, 7, 57, 11, -8 },
{ -8, 22, 65, 7, 51, 5, -7 },
{ -6, 30, 66, 7, 44, 0, -6 },
{ -5, -4, 37, 7, 68, 36, -4 },
{ -6, 0, 45, 7, 68, 28, -7 },
{ -7, 6, 52, 7, 64, 21, -8 },
{ -8, 12, 59, 7, 59, 14, -8 },
{ -8, 19, 64, 7, 54, 7, -8 },
{ -7, 27, 67, 7, 47, 1, -7 },
{ -5, 34, 68, 7, 39, -3, -5 },
{ -6, -1, 42, 7, 68, 31, -6 },
{ -7, 4, 50, 7, 64, 24, -7 },
{ -8, 10, 56, 7, 61, 17, -8 } } },
.ver_phase_arr = {
.even = { { -4, 37, 67, 7, 37, -4, -5 },
{ -6, 0, 44, 7, 67, 29, -6 },
{ -7, 5, 52, 7, 64, 22, -8 },
{ -8, 12, 58, 7, 60, 14, -8 },
{ -8, 19, 63, 7, 54, 8, -8 },
{ -7, 26, 67, 7, 47, 2, -7 },
{ -5, 34, 66, 7, 40, -2, -5 },
{ -6, -2, 41, 7, 68, 32, -5 },
{ -7, 3, 49, 7, 65, 25, -7 },
{ -8, 9, 56, 7, 62, 17, -8 },
{ -8, 16, 61, 7, 57, 10, -8 },
{ -8, 23, 66, 7, 50, 4, -7 },
{ -6, 31, 67, 7, 43, -1, -6 },
{ -5, -3, 38, 7, 67, 35, -4 },
{ -6, 1, 46, 7, 66, 28, -7 },
{ -8, 6, 53, 7, 65, 20, -8 },
{ -8, 13, 59, 7, 59, 13, -8 },
{ -8, 20, 65, 7, 53, 6, -8 },
{ -7, 28, 66, 7, 46, 1, -6 },
{ -4, 35, 67, 7, 38, -3, -5 },
{ -6, -1, 43, 7, 67, 31, -6 },
{ -7, 4, 50, 7, 66, 23, -8 },
{ -8, 10, 57, 7, 61, 16, -8 },
{ -8, 17, 62, 7, 56, 9, -8 },
{ -7, 25, 65, 7, 49, 3, -7 },
{ -5, 32, 68, 7, 41, -2, -6 },
{ -5, -2, 40, 7, 66, 34, -5 },
{ -7, 2, 47, 7, 67, 26, -7 },
{ -8, 8, 54, 7, 63, 19, -8 },
{ -8, 14, 60, 7, 58, 12, -8 },
{ -8, 22, 64, 7, 52, 5, -7 },
{ -6, 29, 67, 7, 44, 0, -6 } },
.odd = { { -8, 17, 61, 7, 56, 10, -8 },
{ -7, 24, 64, 7, 50, 4, -7 },
{ -6, 31, 68, 7, 42, -1, -6 },
{ -5, -3, 39, 7, 68, 34, -5 },
{ -7, 1, 47, 7, 67, 27, -7 },
{ -8, 7, 54, 7, 64, 19, -8 },
{ -8, 14, 59, 7, 59, 12, -8 },
{ -8, 21, 64, 7, 52, 6, -7 },
{ -7, 28, 68, 7, 45, 0, -6 },
{ -4, 36, 68, 7, 37, -4, -5 },
{ -6, 0, 44, 7, 66, 30, -6 },
{ -7, 5, 51, 7, 65, 22, -8 },
{ -8, 11, 57, 7, 61, 15, -8 },
{ -8, 18, 63, 7, 55, 8, -8 },
{ -7, 25, 67, 7, 48, 2, -7 },
{ -5, 33, 66, 7, 41, -2, -5 },
{ -5, -2, 41, 7, 66, 33, -5 },
{ -7, 2, 48, 7, 67, 25, -7 },
{ -8, 8, 55, 7, 63, 18, -8 },
{ -8, 15, 61, 7, 57, 11, -8 },
{ -8, 22, 65, 7, 51, 5, -7 },
{ -6, 30, 66, 7, 44, 0, -6 },
{ -5, -4, 37, 7, 68, 36, -4 },
{ -6, 0, 45, 7, 68, 28, -7 },
{ -7, 6, 52, 7, 64, 21, -8 },
{ -8, 12, 59, 7, 59, 14, -8 },
{ -8, 19, 64, 7, 54, 7, -8 },
{ -7, 27, 67, 7, 47, 1, -7 },
{ -5, 34, 68, 7, 39, -3, -5 },
{ -6, -1, 42, 7, 68, 31, -6 },
{ -7, 4, 50, 7, 64, 24, -7 },
{ -8, 10, 56, 7, 61, 17, -8 } } },
.ptrn_arr = { { 0x99ccce67, 0xce667339, 0x733399cc, 0xcce66 } },
.sample_patrn_length = 118,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 28) = 0.533333 */
.hor_phase_arr = {
.even = { { -3, 37, 65, 7, 37, -3, -5 },
{ -6, 0, 43, 7, 65, 31, -5 },
{ -7, 4, 48, 7, 65, 25, -7 },
{ -8, 9, 54, 7, 62, 19, -8 },
{ -8, 14, 58, 7, 58, 14, -8 },
{ -8, 19, 62, 7, 54, 9, -8 },
{ -7, 25, 65, 7, 48, 4, -7 },
{ -5, 31, 65, 7, 43, 0, -6 } },
.odd = { { -8, 17, 60, 7, 56, 11, -8 },
{ -7, 22, 63, 7, 51, 6, -7 },
{ -6, 28, 65, 7, 46, 2, -7 },
{ -4, 34, 66, 7, 40, -2, -6 },
{ -6, -2, 40, 7, 66, 34, -4 },
{ -7, 2, 46, 7, 65, 28, -6 },
{ -7, 6, 51, 7, 63, 22, -7 },
{ -8, 11, 56, 7, 60, 17, -8 } } },
.ver_phase_arr = {
.even = { { -3, 37, 65, 7, 37, -3, -5 },
{ -6, 0, 43, 7, 65, 31, -5 },
{ -7, 4, 48, 7, 65, 25, -7 },
{ -8, 9, 54, 7, 62, 19, -8 },
{ -8, 14, 58, 7, 58, 14, -8 },
{ -8, 19, 62, 7, 54, 9, -8 },
{ -7, 25, 65, 7, 48, 4, -7 },
{ -5, 31, 65, 7, 43, 0, -6 } },
.odd = { { -8, 17, 60, 7, 56, 11, -8 },
{ -7, 22, 63, 7, 51, 6, -7 },
{ -6, 28, 65, 7, 46, 2, -7 },
{ -4, 34, 66, 7, 40, -2, -6 },
{ -6, -2, 40, 7, 66, 34, -4 },
{ -7, 2, 46, 7, 65, 28, -6 },
{ -7, 6, 51, 7, 63, 22, -7 },
{ -8, 11, 56, 7, 60, 17, -8 } } },
.ptrn_arr = { { 0xccce667 } },
.sample_patrn_length = 30,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 29) = 0.52459 */
.hor_phase_arr = {
.even = { { -2, 37, 63, 7, 37, -2, -5 },
{ -6, 0, 41, 7, 64, 33, -4 },
{ -7, 3, 45, 7, 65, 28, -6 },
{ -7, 6, 49, 7, 63, 24, -7 },
{ -8, 9, 53, 7, 61, 20, -7 },
{ -8, 13, 56, 7, 59, 16, -8 },
{ -8, 17, 60, 7, 55, 12, -8 },
{ -7, 21, 62, 7, 52, 8, -8 },
{ -6, 26, 62, 7, 48, 5, -7 },
{ -5, 30, 64, 7, 44, 2, -7 },
{ -4, 34, 65, 7, 40, -1, -6 },
{ -6, -2, 38, 7, 66, 35, -3 },
{ -6, 1, 42, 7, 65, 31, -5 },
{ -7, 4, 47, 7, 63, 27, -6 },
{ -7, 7, 50, 7, 62, 23, -7 },
{ -8, 11, 54, 7, 59, 19, -7 },
{ -8, 15, 57, 7, 57, 15, -8 },
{ -7, 19, 59, 7, 54, 11, -8 },
{ -7, 23, 62, 7, 50, 7, -7 },
{ -6, 27, 63, 7, 47, 4, -7 },
{ -5, 31, 65, 7, 42, 1, -6 },
{ -3, 35, 66, 7, 38, -2, -6 },
{ -6, -1, 40, 7, 65, 34, -4 },
{ -7, 2, 44, 7, 64, 30, -5 },
{ -7, 5, 48, 7, 62, 26, -6 },
{ -8, 8, 52, 7, 62, 21, -7 },
{ -8, 12, 55, 7, 60, 17, -8 },
{ -8, 16, 59, 7, 56, 13, -8 },
{ -7, 20, 61, 7, 53, 9, -8 },
{ -7, 24, 63, 7, 49, 6, -7 },
{ -6, 28, 65, 7, 45, 3, -7 },
{ -4, 33, 64, 7, 41, 0, -6 } },
.odd = { { -8, 17, 58, 7, 56, 13, -8 },
{ -7, 21, 61, 7, 52, 9, -8 },
{ -6, 25, 62, 7, 49, 5, -7 },
{ -5, 29, 64, 7, 45, 2, -7 },
{ -4, 33, 65, 7, 40, 0, -6 },
{ -6, -2, 37, 7, 66, 36, -3 },
{ -6, 0, 42, 7, 64, 32, -4 },
{ -7, 3, 46, 7, 64, 28, -6 },
{ -7, 7, 50, 7, 61, 24, -7 },
{ -8, 10, 53, 7, 61, 19, -7 },
{ -8, 14, 57, 7, 58, 15, -8 },
{ -8, 18, 60, 7, 55, 11, -8 },
{ -7, 22, 62, 7, 51, 8, -8 },
{ -6, 26, 64, 7, 47, 4, -7 },
{ -5, 31, 65, 7, 43, 1, -7 },
{ -3, 35, 64, 7, 39, -1, -6 },
{ -6, -1, 39, 7, 64, 35, -3 },
{ -7, 1, 43, 7, 65, 31, -5 },
{ -7, 4, 47, 7, 64, 26, -6 },
{ -8, 8, 51, 7, 62, 22, -7 },
{ -8, 11, 55, 7, 60, 18, -8 },
{ -8, 15, 58, 7, 57, 14, -8 },
{ -7, 19, 61, 7, 53, 10, -8 },
{ -7, 24, 61, 7, 50, 7, -7 },
{ -6, 28, 64, 7, 46, 3, -7 },
{ -4, 32, 64, 7, 42, 0, -6 },
{ -3, 36, 66, 7, 37, -2, -6 },
{ -6, 0, 40, 7, 65, 33, -4 },
{ -7, 2, 45, 7, 64, 29, -5 },
{ -7, 5, 49, 7, 62, 25, -6 },
{ -8, 9, 52, 7, 61, 21, -7 },
{ -8, 13, 56, 7, 58, 17, -8 } } },
.ver_phase_arr = {
.even = { { -2, 37, 63, 7, 37, -2, -5 },
{ -6, 0, 41, 7, 64, 33, -4 },
{ -7, 3, 45, 7, 65, 28, -6 },
{ -7, 6, 49, 7, 63, 24, -7 },
{ -8, 9, 53, 7, 61, 20, -7 },
{ -8, 13, 56, 7, 59, 16, -8 },
{ -8, 17, 60, 7, 55, 12, -8 },
{ -7, 21, 62, 7, 52, 8, -8 },
{ -6, 26, 62, 7, 48, 5, -7 },
{ -5, 30, 64, 7, 44, 2, -7 },
{ -4, 34, 65, 7, 40, -1, -6 },
{ -6, -2, 38, 7, 66, 35, -3 },
{ -6, 1, 42, 7, 65, 31, -5 },
{ -7, 4, 47, 7, 63, 27, -6 },
{ -7, 7, 50, 7, 62, 23, -7 },
{ -8, 11, 54, 7, 59, 19, -7 },
{ -8, 15, 57, 7, 57, 15, -8 },
{ -7, 19, 59, 7, 54, 11, -8 },
{ -7, 23, 62, 7, 50, 7, -7 },
{ -6, 27, 63, 7, 47, 4, -7 },
{ -5, 31, 65, 7, 42, 1, -6 },
{ -3, 35, 66, 7, 38, -2, -6 },
{ -6, -1, 40, 7, 65, 34, -4 },
{ -7, 2, 44, 7, 64, 30, -5 },
{ -7, 5, 48, 7, 62, 26, -6 },
{ -8, 8, 52, 7, 62, 21, -7 },
{ -8, 12, 55, 7, 60, 17, -8 },
{ -8, 16, 59, 7, 56, 13, -8 },
{ -7, 20, 61, 7, 53, 9, -8 },
{ -7, 24, 63, 7, 49, 6, -7 },
{ -6, 28, 65, 7, 45, 3, -7 },
{ -4, 33, 64, 7, 41, 0, -6 } },
.odd = { { -8, 17, 58, 7, 56, 13, -8 },
{ -7, 21, 61, 7, 52, 9, -8 },
{ -6, 25, 62, 7, 49, 5, -7 },
{ -5, 29, 64, 7, 45, 2, -7 },
{ -4, 33, 65, 7, 40, 0, -6 },
{ -6, -2, 37, 7, 66, 36, -3 },
{ -6, 0, 42, 7, 64, 32, -4 },
{ -7, 3, 46, 7, 64, 28, -6 },
{ -7, 7, 50, 7, 61, 24, -7 },
{ -8, 10, 53, 7, 61, 19, -7 },
{ -8, 14, 57, 7, 58, 15, -8 },
{ -8, 18, 60, 7, 55, 11, -8 },
{ -7, 22, 62, 7, 51, 8, -8 },
{ -6, 26, 64, 7, 47, 4, -7 },
{ -5, 31, 65, 7, 43, 1, -7 },
{ -3, 35, 64, 7, 39, -1, -6 },
{ -6, -1, 39, 7, 64, 35, -3 },
{ -7, 1, 43, 7, 65, 31, -5 },
{ -7, 4, 47, 7, 64, 26, -6 },
{ -8, 8, 51, 7, 62, 22, -7 },
{ -8, 11, 55, 7, 60, 18, -8 },
{ -8, 15, 58, 7, 57, 14, -8 },
{ -7, 19, 61, 7, 53, 10, -8 },
{ -7, 24, 61, 7, 50, 7, -7 },
{ -6, 28, 64, 7, 46, 3, -7 },
{ -4, 32, 64, 7, 42, 0, -6 },
{ -3, 36, 66, 7, 37, -2, -6 },
{ -6, 0, 40, 7, 65, 33, -4 },
{ -7, 2, 45, 7, 64, 29, -5 },
{ -7, 5, 49, 7, 62, 25, -6 },
{ -8, 9, 52, 7, 61, 21, -7 },
{ -8, 13, 56, 7, 58, 17, -8 } } },
.ptrn_arr = { { 0xccce6667, 0x399999cc, 0x66673333, 0xcccce6 } },
.sample_patrn_length = 122,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 30) = 0.516129 */
.hor_phase_arr = {
.even = { { -2, 37, 64, 7, 37, -2, -6 },
{ -6, 0, 39, 7, 64, 34, -3 },
{ -7, 2, 42, 7, 64, 31, -4 },
{ -7, 4, 45, 7, 62, 29, -5 },
{ -7, 6, 47, 7, 62, 26, -6 },
{ -7, 8, 50, 7, 60, 23, -6 },
{ -8, 10, 52, 7, 60, 21, -7 },
{ -8, 13, 54, 7, 58, 18, -7 },
{ -8, 15, 58, 7, 56, 15, -8 },
{ -7, 18, 58, 7, 54, 13, -8 },
{ -7, 21, 60, 7, 52, 10, -8 },
{ -6, 23, 60, 7, 50, 8, -7 },
{ -6, 26, 62, 7, 47, 6, -7 },
{ -5, 29, 62, 7, 45, 4, -7 },
{ -4, 31, 64, 7, 42, 2, -7 },
{ -3, 34, 64, 7, 39, 0, -6 } },
.odd = { { -7, 17, 57, 7, 55, 14, -8 },
{ -7, 19, 59, 7, 53, 12, -8 },
{ -7, 22, 61, 7, 51, 9, -8 },
{ -6, 25, 60, 7, 49, 7, -7 },
{ -5, 27, 62, 7, 46, 5, -7 },
{ -5, 30, 63, 7, 44, 3, -7 },
{ -3, 33, 62, 7, 41, 1, -6 },
{ -2, 35, 64, 7, 38, -1, -6 },
{ -6, -1, 38, 7, 64, 35, -2 },
{ -6, 1, 41, 7, 62, 33, -3 },
{ -7, 3, 44, 7, 63, 30, -5 },
{ -7, 5, 46, 7, 62, 27, -5 },
{ -7, 7, 49, 7, 60, 25, -6 },
{ -8, 9, 51, 7, 61, 22, -7 },
{ -8, 12, 53, 7, 59, 19, -7 },
{ -8, 14, 55, 7, 57, 17, -7 } } },
.ver_phase_arr = {
.even = { { -2, 37, 64, 7, 37, -2, -6 },
{ -6, 0, 39, 7, 64, 34, -3 },
{ -7, 2, 42, 7, 64, 31, -4 },
{ -7, 4, 45, 7, 62, 29, -5 },
{ -7, 6, 47, 7, 62, 26, -6 },
{ -7, 8, 50, 7, 60, 23, -6 },
{ -8, 10, 52, 7, 60, 21, -7 },
{ -8, 13, 54, 7, 58, 18, -7 },
{ -8, 15, 58, 7, 56, 15, -8 },
{ -7, 18, 58, 7, 54, 13, -8 },
{ -7, 21, 60, 7, 52, 10, -8 },
{ -6, 23, 60, 7, 50, 8, -7 },
{ -6, 26, 62, 7, 47, 6, -7 },
{ -5, 29, 62, 7, 45, 4, -7 },
{ -4, 31, 64, 7, 42, 2, -7 },
{ -3, 34, 64, 7, 39, 0, -6 } },
.odd = { { -7, 17, 57, 7, 55, 14, -8 },
{ -7, 19, 59, 7, 53, 12, -8 },
{ -7, 22, 61, 7, 51, 9, -8 },
{ -6, 25, 60, 7, 49, 7, -7 },
{ -5, 27, 62, 7, 46, 5, -7 },
{ -5, 30, 63, 7, 44, 3, -7 },
{ -3, 33, 62, 7, 41, 1, -6 },
{ -2, 35, 64, 7, 38, -1, -6 },
{ -6, -1, 38, 7, 64, 35, -2 },
{ -6, 1, 41, 7, 62, 33, -3 },
{ -7, 3, 44, 7, 63, 30, -5 },
{ -7, 5, 46, 7, 62, 27, -5 },
{ -7, 7, 49, 7, 60, 25, -6 },
{ -8, 9, 51, 7, 61, 22, -7 },
{ -8, 12, 53, 7, 59, 19, -7 },
{ -8, 14, 55, 7, 57, 17, -7 } } },
.ptrn_arr = { { 0xe6666667, 0xccccccc } },
.sample_patrn_length = 62,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 31) = 0.507937 */
.hor_phase_arr = {
.even = { { -1, 37, 62, 7, 37, -1, -6 },
{ -6, 0, 38, 7, 62, 35, -1 },
{ -6, 1, 39, 7, 62, 34, -2 },
{ -7, 2, 41, 7, 62, 33, -3 },
{ -7, 3, 42, 7, 61, 32, -3 },
{ -7, 4, 43, 7, 62, 30, -4 },
{ -7, 4, 44, 7, 62, 29, -4 },
{ -7, 6, 46, 7, 60, 28, -5 },
{ -7, 7, 47, 7, 60, 26, -5 },
{ -7, 8, 48, 7, 60, 25, -6 },
{ -7, 9, 49, 7, 59, 24, -6 },
{ -7, 10, 50, 7, 59, 22, -6 },
{ -7, 11, 51, 7, 59, 21, -7 },
{ -7, 12, 52, 7, 58, 20, -7 },
{ -7, 13, 53, 7, 57, 19, -7 },
{ -7, 15, 54, 7, 56, 17, -7 },
{ -7, 16, 55, 7, 55, 16, -7 },
{ -7, 17, 56, 7, 54, 15, -7 },
{ -7, 19, 57, 7, 53, 13, -7 },
{ -7, 20, 58, 7, 52, 12, -7 },
{ -7, 21, 59, 7, 51, 11, -7 },
{ -6, 22, 59, 7, 50, 10, -7 },
{ -6, 24, 59, 7, 49, 9, -7 },
{ -6, 25, 60, 7, 48, 8, -7 },
{ -5, 26, 60, 7, 47, 7, -7 },
{ -5, 28, 60, 7, 46, 6, -7 },
{ -4, 29, 62, 7, 44, 4, -7 },
{ -4, 30, 62, 7, 43, 4, -7 },
{ -3, 32, 61, 7, 42, 3, -7 },
{ -3, 33, 62, 7, 41, 2, -7 },
{ -2, 34, 62, 7, 39, 1, -6 },
{ -1, 35, 62, 7, 38, 0, -6 } },
.odd = { { -7, 17, 55, 7, 55, 15, -7 },
{ -7, 18, 56, 7, 54, 14, -7 },
{ -7, 19, 57, 7, 53, 13, -7 },
{ -7, 20, 58, 7, 52, 12, -7 },
{ -6, 22, 58, 7, 51, 10, -7 },
{ -6, 23, 59, 7, 50, 9, -7 },
{ -6, 24, 60, 7, 49, 8, -7 },
{ -5, 26, 60, 7, 47, 7, -7 },
{ -5, 27, 61, 7, 46, 6, -7 },
{ -5, 28, 62, 7, 45, 5, -7 },
{ -4, 30, 61, 7, 44, 4, -7 },
{ -4, 31, 62, 7, 43, 3, -7 },
{ -3, 32, 63, 7, 41, 2, -7 },
{ -2, 34, 61, 7, 40, 1, -6 },
{ -2, 35, 62, 7, 39, 0, -6 },
{ -1, 36, 62, 7, 37, 0, -6 },
{ -6, 0, 37, 7, 62, 36, -1 },
{ -6, 0, 39, 7, 62, 35, -2 },
{ -6, 1, 40, 7, 61, 34, -2 },
{ -7, 2, 41, 7, 63, 32, -3 },
{ -7, 3, 43, 7, 62, 31, -4 },
{ -7, 4, 44, 7, 61, 30, -4 },
{ -7, 5, 45, 7, 62, 28, -5 },
{ -7, 6, 46, 7, 61, 27, -5 },
{ -7, 7, 47, 7, 60, 26, -5 },
{ -7, 8, 49, 7, 60, 24, -6 },
{ -7, 9, 50, 7, 59, 23, -6 },
{ -7, 10, 51, 7, 58, 22, -6 },
{ -7, 12, 52, 7, 58, 20, -7 },
{ -7, 13, 53, 7, 57, 19, -7 },
{ -7, 14, 54, 7, 56, 18, -7 },
{ -7, 15, 55, 7, 55, 17, -7 } } },
.ver_phase_arr = {
.even = { { -1, 37, 62, 7, 37, -1, -6 },
{ -6, 0, 38, 7, 62, 35, -1 },
{ -6, 1, 39, 7, 62, 34, -2 },
{ -7, 2, 41, 7, 62, 33, -3 },
{ -7, 3, 42, 7, 61, 32, -3 },
{ -7, 4, 43, 7, 62, 30, -4 },
{ -7, 4, 44, 7, 62, 29, -4 },
{ -7, 6, 46, 7, 60, 28, -5 },
{ -7, 7, 47, 7, 60, 26, -5 },
{ -7, 8, 48, 7, 60, 25, -6 },
{ -7, 9, 49, 7, 59, 24, -6 },
{ -7, 10, 50, 7, 59, 22, -6 },
{ -7, 11, 51, 7, 59, 21, -7 },
{ -7, 12, 52, 7, 58, 20, -7 },
{ -7, 13, 53, 7, 57, 19, -7 },
{ -7, 15, 54, 7, 56, 17, -7 },
{ -7, 16, 55, 7, 55, 16, -7 },
{ -7, 17, 56, 7, 54, 15, -7 },
{ -7, 19, 57, 7, 53, 13, -7 },
{ -7, 20, 58, 7, 52, 12, -7 },
{ -7, 21, 59, 7, 51, 11, -7 },
{ -6, 22, 59, 7, 50, 10, -7 },
{ -6, 24, 59, 7, 49, 9, -7 },
{ -6, 25, 60, 7, 48, 8, -7 },
{ -5, 26, 60, 7, 47, 7, -7 },
{ -5, 28, 60, 7, 46, 6, -7 },
{ -4, 29, 62, 7, 44, 4, -7 },
{ -4, 30, 62, 7, 43, 4, -7 },
{ -3, 32, 61, 7, 42, 3, -7 },
{ -3, 33, 62, 7, 41, 2, -7 },
{ -2, 34, 62, 7, 39, 1, -6 },
{ -1, 35, 62, 7, 38, 0, -6 } },
.odd = { { -7, 17, 55, 7, 55, 15, -7 },
{ -7, 18, 56, 7, 54, 14, -7 },
{ -7, 19, 57, 7, 53, 13, -7 },
{ -7, 20, 58, 7, 52, 12, -7 },
{ -6, 22, 58, 7, 51, 10, -7 },
{ -6, 23, 59, 7, 50, 9, -7 },
{ -6, 24, 60, 7, 49, 8, -7 },
{ -5, 26, 60, 7, 47, 7, -7 },
{ -5, 27, 61, 7, 46, 6, -7 },
{ -5, 28, 62, 7, 45, 5, -7 },
{ -4, 30, 61, 7, 44, 4, -7 },
{ -4, 31, 62, 7, 43, 3, -7 },
{ -3, 32, 63, 7, 41, 2, -7 },
{ -2, 34, 61, 7, 40, 1, -6 },
{ -2, 35, 62, 7, 39, 0, -6 },
{ -1, 36, 62, 7, 37, 0, -6 },
{ -6, 0, 37, 7, 62, 36, -1 },
{ -6, 0, 39, 7, 62, 35, -2 },
{ -6, 1, 40, 7, 61, 34, -2 },
{ -7, 2, 41, 7, 63, 32, -3 },
{ -7, 3, 43, 7, 62, 31, -4 },
{ -7, 4, 44, 7, 61, 30, -4 },
{ -7, 5, 45, 7, 62, 28, -5 },
{ -7, 6, 46, 7, 61, 27, -5 },
{ -7, 7, 47, 7, 60, 26, -5 },
{ -7, 8, 49, 7, 60, 24, -6 },
{ -7, 9, 50, 7, 59, 23, -6 },
{ -7, 10, 51, 7, 58, 22, -6 },
{ -7, 12, 52, 7, 58, 20, -7 },
{ -7, 13, 53, 7, 57, 19, -7 },
{ -7, 14, 54, 7, 56, 18, -7 },
{ -7, 15, 55, 7, 55, 17, -7 } } },
.ptrn_arr = { { 0x66666667, 0xe6666666, 0xcccccccc, 0xccccccc } },
.sample_patrn_length = 126,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 32) = 0.5 */
.hor_phase_arr = {
.even = { { 0, 8, 112, 7, 8, 0, 0 } },
.odd = { { 0, 0, 64, 7, 64, 0, 0 } } },
.ver_phase_arr = {
.even = { { 0, 8, 112, 7, 8, 0, 0 } },
.odd = { { 0, 0, 64, 7, 64, 0, 0 } } },
.ptrn_arr = { { 0x3 } },
.sample_patrn_length = 4,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 33) = 0.492308 */
.hor_phase_arr = {
.even = { { 0, 9, 110, 7, 9, 0, 0 },
{ 0, 8, 109, 7, 11, 0, 0 },
{ 0, 7, 109, 7, 12, 0, 0 },
{ 0, 6, 108, 7, 14, 0, 0 },
{ 0, 5, 107, 7, 16, 0, 0 },
{ 0, 4, 105, 7, 19, 0, 0 },
{ 0, 3, 103, 7, 22, 0, 0 },
{ 0, 3, 100, 7, 25, 0, 0 },
{ 0, 2, 98, 7, 28, 0, 0 },
{ 0, 2, 94, 7, 32, 0, 0 },
{ 0, 2, 90, 7, 36, 0, 0 },
{ 0, 1, 87, 7, 40, 0, 0 },
{ 0, 1, 83, 7, 44, 0, 0 },
{ 0, 1, 78, 7, 49, 0, 0 },
{ 0, 1, 73, 7, 54, 0, 0 },
{ 0, 1, 68, 7, 59, 0, 0 },
{ 0, 0, 64, 7, 64, 0, 0 },
{ 0, 0, 59, 7, 68, 1, 0 },
{ 0, 0, 54, 7, 73, 1, 0 },
{ 0, 0, 49, 7, 78, 1, 0 },
{ 0, 0, 44, 7, 83, 1, 0 },
{ 0, 0, 40, 7, 87, 1, 0 },
{ 0, 0, 36, 7, 90, 2, 0 },
{ 0, 0, 32, 7, 94, 2, 0 },
{ 0, 0, 28, 7, 98, 2, 0 },
{ 0, 0, 25, 7, 100, 3, 0 },
{ 0, 0, 22, 7, 103, 3, 0 },
{ 0, 0, 19, 7, 105, 4, 0 },
{ 0, 0, 16, 7, 107, 5, 0 },
{ 0, 0, 14, 7, 108, 6, 0 },
{ 0, 0, 12, 7, 109, 7, 0 },
{ 0, 0, 11, 7, 109, 8, 0 } },
.odd = { { 0, 0, 61, 7, 67, 0, 0 },
{ 0, 0, 56, 7, 71, 1, 0 },
{ 0, 0, 51, 7, 76, 1, 0 },
{ 0, 0, 46, 7, 81, 1, 0 },
{ 0, 0, 42, 7, 85, 1, 0 },
{ 0, 0, 38, 7, 89, 1, 0 },
{ 0, 0, 34, 7, 92, 2, 0 },
{ 0, 0, 30, 7, 96, 2, 0 },
{ 0, 0, 26, 7, 99, 3, 0 },
{ 0, 0, 23, 7, 102, 3, 0 },
{ 0, 0, 20, 7, 104, 4, 0 },
{ 0, 0, 18, 7, 106, 4, 0 },
{ 0, 0, 15, 7, 108, 5, 0 },
{ 0, 0, 13, 7, 109, 6, 0 },
{ 0, 0, 11, 7, 110, 7, 0 },
{ 0, 0, 10, 7, 110, 8, 0 },
{ 0, 8, 110, 7, 10, 0, 0 },
{ 0, 7, 110, 7, 11, 0, 0 },
{ 0, 6, 109, 7, 13, 0, 0 },
{ 0, 5, 108, 7, 15, 0, 0 },
{ 0, 4, 106, 7, 18, 0, 0 },
{ 0, 4, 104, 7, 20, 0, 0 },
{ 0, 3, 102, 7, 23, 0, 0 },
{ 0, 3, 99, 7, 26, 0, 0 },
{ 0, 2, 96, 7, 30, 0, 0 },
{ 0, 2, 92, 7, 34, 0, 0 },
{ 0, 1, 89, 7, 38, 0, 0 },
{ 0, 1, 85, 7, 42, 0, 0 },
{ 0, 1, 81, 7, 46, 0, 0 },
{ 0, 1, 76, 7, 51, 0, 0 },
{ 0, 1, 71, 7, 56, 0, 0 },
{ 0, 0, 67, 7, 61, 0, 0 } } },
.ver_phase_arr = {
.even = { { 0, 9, 110, 7, 9, 0, 0 },
{ 0, 8, 109, 7, 11, 0, 0 },
{ 0, 7, 109, 7, 12, 0, 0 },
{ 0, 6, 108, 7, 14, 0, 0 },
{ 0, 5, 107, 7, 16, 0, 0 },
{ 0, 4, 105, 7, 19, 0, 0 },
{ 0, 3, 103, 7, 22, 0, 0 },
{ 0, 3, 100, 7, 25, 0, 0 },
{ 0, 2, 98, 7, 28, 0, 0 },
{ 0, 2, 94, 7, 32, 0, 0 },
{ 0, 2, 90, 7, 36, 0, 0 },
{ 0, 1, 87, 7, 40, 0, 0 },
{ 0, 1, 83, 7, 44, 0, 0 },
{ 0, 1, 78, 7, 49, 0, 0 },
{ 0, 1, 73, 7, 54, 0, 0 },
{ 0, 1, 68, 7, 59, 0, 0 },
{ 0, 0, 64, 7, 64, 0, 0 },
{ 0, 0, 59, 7, 68, 1, 0 },
{ 0, 0, 54, 7, 73, 1, 0 },
{ 0, 0, 49, 7, 78, 1, 0 },
{ 0, 0, 44, 7, 83, 1, 0 },
{ 0, 0, 40, 7, 87, 1, 0 },
{ 0, 0, 36, 7, 90, 2, 0 },
{ 0, 0, 32, 7, 94, 2, 0 },
{ 0, 0, 28, 7, 98, 2, 0 },
{ 0, 0, 25, 7, 100, 3, 0 },
{ 0, 0, 22, 7, 103, 3, 0 },
{ 0, 0, 19, 7, 105, 4, 0 },
{ 0, 0, 16, 7, 107, 5, 0 },
{ 0, 0, 14, 7, 108, 6, 0 },
{ 0, 0, 12, 7, 109, 7, 0 },
{ 0, 0, 11, 7, 109, 8, 0 } },
.odd = { { 0, 0, 61, 7, 67, 0, 0 },
{ 0, 0, 56, 7, 71, 1, 0 },
{ 0, 0, 51, 7, 76, 1, 0 },
{ 0, 0, 46, 7, 81, 1, 0 },
{ 0, 0, 42, 7, 85, 1, 0 },
{ 0, 0, 38, 7, 89, 1, 0 },
{ 0, 0, 34, 7, 92, 2, 0 },
{ 0, 0, 30, 7, 96, 2, 0 },
{ 0, 0, 26, 7, 99, 3, 0 },
{ 0, 0, 23, 7, 102, 3, 0 },
{ 0, 0, 20, 7, 104, 4, 0 },
{ 0, 0, 18, 7, 106, 4, 0 },
{ 0, 0, 15, 7, 108, 5, 0 },
{ 0, 0, 13, 7, 109, 6, 0 },
{ 0, 0, 11, 7, 110, 7, 0 },
{ 0, 0, 10, 7, 110, 8, 0 },
{ 0, 8, 110, 7, 10, 0, 0 },
{ 0, 7, 110, 7, 11, 0, 0 },
{ 0, 6, 109, 7, 13, 0, 0 },
{ 0, 5, 108, 7, 15, 0, 0 },
{ 0, 4, 106, 7, 18, 0, 0 },
{ 0, 4, 104, 7, 20, 0, 0 },
{ 0, 3, 102, 7, 23, 0, 0 },
{ 0, 3, 99, 7, 26, 0, 0 },
{ 0, 2, 96, 7, 30, 0, 0 },
{ 0, 2, 92, 7, 34, 0, 0 },
{ 0, 1, 89, 7, 38, 0, 0 },
{ 0, 1, 85, 7, 42, 0, 0 },
{ 0, 1, 81, 7, 46, 0, 0 },
{ 0, 1, 76, 7, 51, 0, 0 },
{ 0, 1, 71, 7, 56, 0, 0 },
{ 0, 0, 67, 7, 61, 0, 0 } } },
.ptrn_arr = { { 0x33333333, 0x33333333, 0x99999999, 0x99999999 } },
.sample_patrn_length = 130,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 34) = 0.484848 */
.hor_phase_arr = {
.even = { { 0, 10, 108, 7, 10, 0, 0 },
{ 0, 7, 108, 7, 13, 0, 0 },
{ 0, 5, 106, 7, 17, 0, 0 },
{ 0, 4, 102, 7, 22, 0, 0 },
{ 0, 3, 96, 7, 29, 0, 0 },
{ 0, 2, 90, 7, 36, 0, 0 },
{ 0, 1, 82, 7, 45, 0, 0 },
{ 0, 1, 73, 7, 54, 0, 0 },
{ 0, 0, 64, 7, 64, 0, 0 },
{ 0, 0, 54, 7, 73, 1, 0 },
{ 0, 0, 45, 7, 82, 1, 0 },
{ 0, 0, 36, 7, 90, 2, 0 },
{ 0, 0, 29, 7, 96, 3, 0 },
{ 0, 0, 22, 7, 102, 4, 0 },
{ 0, 0, 17, 7, 106, 5, 0 },
{ 0, 0, 13, 7, 108, 7, 0 } },
.odd = { { 0, 0, 59, 7, 68, 1, 0 },
{ 0, 0, 49, 7, 78, 1, 0 },
{ 0, 0, 40, 7, 87, 1, 0 },
{ 0, 0, 32, 7, 94, 2, 0 },
{ 0, 0, 25, 7, 100, 3, 0 },
{ 0, 0, 20, 7, 104, 4, 0 },
{ 0, 0, 15, 7, 107, 6, 0 },
{ 0, 0, 11, 7, 109, 8, 0 },
{ 0, 8, 109, 7, 11, 0, 0 },
{ 0, 6, 107, 7, 15, 0, 0 },
{ 0, 4, 104, 7, 20, 0, 0 },
{ 0, 3, 100, 7, 25, 0, 0 },
{ 0, 2, 94, 7, 32, 0, 0 },
{ 0, 1, 87, 7, 40, 0, 0 },
{ 0, 1, 78, 7, 49, 0, 0 },
{ 0, 1, 68, 7, 59, 0, 0 } } },
.ver_phase_arr = {
.even = { { 0, 10, 108, 7, 10, 0, 0 },
{ 0, 7, 108, 7, 13, 0, 0 },
{ 0, 5, 106, 7, 17, 0, 0 },
{ 0, 4, 102, 7, 22, 0, 0 },
{ 0, 3, 96, 7, 29, 0, 0 },
{ 0, 2, 90, 7, 36, 0, 0 },
{ 0, 1, 82, 7, 45, 0, 0 },
{ 0, 1, 73, 7, 54, 0, 0 },
{ 0, 0, 64, 7, 64, 0, 0 },
{ 0, 0, 54, 7, 73, 1, 0 },
{ 0, 0, 45, 7, 82, 1, 0 },
{ 0, 0, 36, 7, 90, 2, 0 },
{ 0, 0, 29, 7, 96, 3, 0 },
{ 0, 0, 22, 7, 102, 4, 0 },
{ 0, 0, 17, 7, 106, 5, 0 },
{ 0, 0, 13, 7, 108, 7, 0 } },
.odd = { { 0, 0, 59, 7, 68, 1, 0 },
{ 0, 0, 49, 7, 78, 1, 0 },
{ 0, 0, 40, 7, 87, 1, 0 },
{ 0, 0, 32, 7, 94, 2, 0 },
{ 0, 0, 25, 7, 100, 3, 0 },
{ 0, 0, 20, 7, 104, 4, 0 },
{ 0, 0, 15, 7, 107, 6, 0 },
{ 0, 0, 11, 7, 109, 8, 0 },
{ 0, 8, 109, 7, 11, 0, 0 },
{ 0, 6, 107, 7, 15, 0, 0 },
{ 0, 4, 104, 7, 20, 0, 0 },
{ 0, 3, 100, 7, 25, 0, 0 },
{ 0, 2, 94, 7, 32, 0, 0 },
{ 0, 1, 87, 7, 40, 0, 0 },
{ 0, 1, 78, 7, 49, 0, 0 },
{ 0, 1, 68, 7, 59, 0, 0 } } },
.ptrn_arr = { { 0x33333333, 0x99999999 } },
.sample_patrn_length = 66,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 35) = 0.477612 */
.hor_phase_arr = {
.even = { { 0, 10, 108, 7, 10, 0, 0 },
{ 0, 6, 106, 7, 16, 0, 0 },
{ 0, 4, 101, 7, 23, 0, 0 },
{ 0, 2, 93, 7, 33, 0, 0 },
{ 0, 1, 82, 7, 45, 0, 0 },
{ 0, 1, 68, 7, 59, 0, 0 },
{ 0, 0, 54, 7, 73, 1, 0 },
{ 0, 0, 41, 7, 85, 2, 0 },
{ 0, 0, 29, 7, 96, 3, 0 },
{ 0, 0, 20, 7, 103, 5, 0 },
{ 0, 0, 14, 7, 106, 8, 0 },
{ 0, 9, 107, 7, 12, 0, 0 },
{ 0, 5, 105, 7, 18, 0, 0 },
{ 0, 3, 99, 7, 26, 0, 0 },
{ 0, 2, 89, 7, 37, 0, 0 },
{ 0, 1, 77, 7, 50, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 50, 7, 77, 1, 0 },
{ 0, 0, 37, 7, 89, 2, 0 },
{ 0, 0, 26, 7, 99, 3, 0 },
{ 0, 0, 18, 7, 105, 5, 0 },
{ 0, 0, 12, 7, 107, 9, 0 },
{ 0, 8, 106, 7, 14, 0, 0 },
{ 0, 5, 103, 7, 20, 0, 0 },
{ 0, 3, 96, 7, 29, 0, 0 },
{ 0, 2, 85, 7, 41, 0, 0 },
{ 0, 1, 73, 7, 54, 0, 0 },
{ 0, 0, 59, 7, 68, 1, 0 },
{ 0, 0, 45, 7, 82, 1, 0 },
{ 0, 0, 33, 7, 93, 2, 0 },
{ 0, 0, 23, 7, 101, 4, 0 },
{ 0, 0, 16, 7, 106, 6, 0 } },
.odd = { { 0, 0, 56, 7, 71, 1, 0 },
{ 0, 0, 43, 7, 84, 1, 0 },
{ 0, 0, 31, 7, 94, 3, 0 },
{ 0, 0, 22, 7, 102, 4, 0 },
{ 0, 0, 15, 7, 106, 7, 0 },
{ 0, 9, 108, 7, 11, 0, 0 },
{ 0, 6, 105, 7, 17, 0, 0 },
{ 0, 4, 99, 7, 25, 0, 0 },
{ 0, 2, 91, 7, 35, 0, 0 },
{ 0, 1, 80, 7, 47, 0, 0 },
{ 0, 1, 65, 7, 61, 1, 0 },
{ 0, 0, 52, 7, 75, 1, 0 },
{ 0, 0, 39, 7, 87, 2, 0 },
{ 0, 0, 28, 7, 97, 3, 0 },
{ 0, 0, 19, 7, 104, 5, 0 },
{ 0, 0, 13, 7, 107, 8, 0 },
{ 0, 8, 107, 7, 13, 0, 0 },
{ 0, 5, 104, 7, 19, 0, 0 },
{ 0, 3, 97, 7, 28, 0, 0 },
{ 0, 2, 87, 7, 39, 0, 0 },
{ 0, 1, 75, 7, 52, 0, 0 },
{ 0, 1, 61, 7, 65, 1, 0 },
{ 0, 0, 47, 7, 80, 1, 0 },
{ 0, 0, 35, 7, 91, 2, 0 },
{ 0, 0, 25, 7, 99, 4, 0 },
{ 0, 0, 17, 7, 105, 6, 0 },
{ 0, 0, 11, 7, 108, 9, 0 },
{ 0, 7, 106, 7, 15, 0, 0 },
{ 0, 4, 102, 7, 22, 0, 0 },
{ 0, 3, 94, 7, 31, 0, 0 },
{ 0, 1, 84, 7, 43, 0, 0 },
{ 0, 1, 71, 7, 56, 0, 0 } } },
.ver_phase_arr = {
.even = { { 0, 10, 108, 7, 10, 0, 0 },
{ 0, 6, 106, 7, 16, 0, 0 },
{ 0, 4, 101, 7, 23, 0, 0 },
{ 0, 2, 93, 7, 33, 0, 0 },
{ 0, 1, 82, 7, 45, 0, 0 },
{ 0, 1, 68, 7, 59, 0, 0 },
{ 0, 0, 54, 7, 73, 1, 0 },
{ 0, 0, 41, 7, 85, 2, 0 },
{ 0, 0, 29, 7, 96, 3, 0 },
{ 0, 0, 20, 7, 103, 5, 0 },
{ 0, 0, 14, 7, 106, 8, 0 },
{ 0, 9, 107, 7, 12, 0, 0 },
{ 0, 5, 105, 7, 18, 0, 0 },
{ 0, 3, 99, 7, 26, 0, 0 },
{ 0, 2, 89, 7, 37, 0, 0 },
{ 0, 1, 77, 7, 50, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 50, 7, 77, 1, 0 },
{ 0, 0, 37, 7, 89, 2, 0 },
{ 0, 0, 26, 7, 99, 3, 0 },
{ 0, 0, 18, 7, 105, 5, 0 },
{ 0, 0, 12, 7, 107, 9, 0 },
{ 0, 8, 106, 7, 14, 0, 0 },
{ 0, 5, 103, 7, 20, 0, 0 },
{ 0, 3, 96, 7, 29, 0, 0 },
{ 0, 2, 85, 7, 41, 0, 0 },
{ 0, 1, 73, 7, 54, 0, 0 },
{ 0, 0, 59, 7, 68, 1, 0 },
{ 0, 0, 45, 7, 82, 1, 0 },
{ 0, 0, 33, 7, 93, 2, 0 },
{ 0, 0, 23, 7, 101, 4, 0 },
{ 0, 0, 16, 7, 106, 6, 0 } },
.odd = { { 0, 0, 56, 7, 71, 1, 0 },
{ 0, 0, 43, 7, 84, 1, 0 },
{ 0, 0, 31, 7, 94, 3, 0 },
{ 0, 0, 22, 7, 102, 4, 0 },
{ 0, 0, 15, 7, 106, 7, 0 },
{ 0, 9, 108, 7, 11, 0, 0 },
{ 0, 6, 105, 7, 17, 0, 0 },
{ 0, 4, 99, 7, 25, 0, 0 },
{ 0, 2, 91, 7, 35, 0, 0 },
{ 0, 1, 80, 7, 47, 0, 0 },
{ 0, 1, 65, 7, 61, 1, 0 },
{ 0, 0, 52, 7, 75, 1, 0 },
{ 0, 0, 39, 7, 87, 2, 0 },
{ 0, 0, 28, 7, 97, 3, 0 },
{ 0, 0, 19, 7, 104, 5, 0 },
{ 0, 0, 13, 7, 107, 8, 0 },
{ 0, 8, 107, 7, 13, 0, 0 },
{ 0, 5, 104, 7, 19, 0, 0 },
{ 0, 3, 97, 7, 28, 0, 0 },
{ 0, 2, 87, 7, 39, 0, 0 },
{ 0, 1, 75, 7, 52, 0, 0 },
{ 0, 1, 61, 7, 65, 1, 0 },
{ 0, 0, 47, 7, 80, 1, 0 },
{ 0, 0, 35, 7, 91, 2, 0 },
{ 0, 0, 25, 7, 99, 4, 0 },
{ 0, 0, 17, 7, 105, 6, 0 },
{ 0, 0, 11, 7, 108, 9, 0 },
{ 0, 7, 106, 7, 15, 0, 0 },
{ 0, 4, 102, 7, 22, 0, 0 },
{ 0, 3, 94, 7, 31, 0, 0 },
{ 0, 1, 84, 7, 43, 0, 0 },
{ 0, 1, 71, 7, 56, 0, 0 } } },
.ptrn_arr = { { 0x99933333, 0xccccc999, 0x32666664, 0x99993333,
0x9 } },
.sample_patrn_length = 134,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 36) = 0.470588 */
.hor_phase_arr = {
.even = { { 0, 11, 106, 7, 11, 0, 0 },
{ 0, 6, 103, 7, 19, 0, 0 },
{ 0, 3, 95, 7, 30, 0, 0 },
{ 0, 1, 81, 7, 46, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 46, 7, 81, 1, 0 },
{ 0, 0, 30, 7, 95, 3, 0 },
{ 0, 0, 19, 7, 103, 6, 0 } },
.odd = { { 0, 0, 54, 7, 73, 1, 0 },
{ 0, 0, 37, 7, 89, 2, 0 },
{ 0, 0, 24, 7, 100, 4, 0 },
{ 0, 0, 14, 7, 106, 8, 0 },
{ 0, 8, 106, 7, 14, 0, 0 },
{ 0, 4, 100, 7, 24, 0, 0 },
{ 0, 2, 89, 7, 37, 0, 0 },
{ 0, 1, 73, 7, 54, 0, 0 } } },
.ver_phase_arr = {
.even = { { 0, 11, 106, 7, 11, 0, 0 },
{ 0, 6, 103, 7, 19, 0, 0 },
{ 0, 3, 95, 7, 30, 0, 0 },
{ 0, 1, 81, 7, 46, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 46, 7, 81, 1, 0 },
{ 0, 0, 30, 7, 95, 3, 0 },
{ 0, 0, 19, 7, 103, 6, 0 } },
.odd = { { 0, 0, 54, 7, 73, 1, 0 },
{ 0, 0, 37, 7, 89, 2, 0 },
{ 0, 0, 24, 7, 100, 4, 0 },
{ 0, 0, 14, 7, 106, 8, 0 },
{ 0, 8, 106, 7, 14, 0, 0 },
{ 0, 4, 100, 7, 24, 0, 0 },
{ 0, 2, 89, 7, 37, 0, 0 },
{ 0, 1, 73, 7, 54, 0, 0 } } },
.ptrn_arr = { { 0x99993333 } },
.sample_patrn_length = 34,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 37) = 0.463768 */
.hor_phase_arr = {
.even = { { 0, 11, 106, 7, 11, 0, 0 },
{ 0, 5, 101, 7, 22, 0, 0 },
{ 0, 2, 88, 7, 38, 0, 0 },
{ 0, 1, 67, 7, 59, 1, 0 },
{ 0, 0, 46, 7, 80, 2, 0 },
{ 0, 0, 28, 7, 96, 4, 0 },
{ 0, 0, 15, 7, 104, 9, 0 },
{ 0, 7, 104, 7, 17, 0, 0 },
{ 0, 3, 94, 7, 31, 0, 0 },
{ 0, 1, 77, 7, 50, 0, 0 },
{ 0, 0, 54, 7, 73, 1, 0 },
{ 0, 0, 34, 7, 91, 3, 0 },
{ 0, 0, 19, 7, 103, 6, 0 },
{ 0, 10, 105, 7, 13, 0, 0 },
{ 0, 5, 98, 7, 25, 0, 0 },
{ 0, 2, 84, 7, 42, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 42, 7, 84, 2, 0 },
{ 0, 0, 25, 7, 98, 5, 0 },
{ 0, 0, 13, 7, 105, 10, 0 },
{ 0, 6, 103, 7, 19, 0, 0 },
{ 0, 3, 91, 7, 34, 0, 0 },
{ 0, 1, 73, 7, 54, 0, 0 },
{ 0, 0, 50, 7, 77, 1, 0 },
{ 0, 0, 31, 7, 94, 3, 0 },
{ 0, 0, 17, 7, 104, 7, 0 },
{ 0, 9, 104, 7, 15, 0, 0 },
{ 0, 4, 96, 7, 28, 0, 0 },
{ 0, 2, 80, 7, 46, 0, 0 },
{ 0, 1, 59, 7, 67, 1, 0 },
{ 0, 0, 38, 7, 88, 2, 0 },
{ 0, 0, 22, 7, 101, 5, 0 } },
.odd = { { 0, 0, 52, 7, 75, 1, 0 },
{ 0, 0, 33, 7, 92, 3, 0 },
{ 0, 0, 18, 7, 103, 7, 0 },
{ 0, 9, 105, 7, 14, 0, 0 },
{ 0, 4, 98, 7, 26, 0, 0 },
{ 0, 2, 82, 7, 44, 0, 0 },
{ 0, 1, 61, 7, 65, 1, 0 },
{ 0, 0, 40, 7, 86, 2, 0 },
{ 0, 0, 23, 7, 100, 5, 0 },
{ 0, 0, 12, 7, 105, 11, 0 },
{ 0, 6, 101, 7, 21, 0, 0 },
{ 0, 3, 89, 7, 36, 0, 0 },
{ 0, 1, 69, 7, 57, 1, 0 },
{ 0, 0, 48, 7, 79, 1, 0 },
{ 0, 0, 29, 7, 95, 4, 0 },
{ 0, 0, 16, 7, 104, 8, 0 },
{ 0, 8, 104, 7, 16, 0, 0 },
{ 0, 4, 95, 7, 29, 0, 0 },
{ 0, 1, 79, 7, 48, 0, 0 },
{ 0, 1, 57, 7, 69, 1, 0 },
{ 0, 0, 36, 7, 89, 3, 0 },
{ 0, 0, 21, 7, 101, 6, 0 },
{ 0, 11, 105, 7, 12, 0, 0 },
{ 0, 5, 100, 7, 23, 0, 0 },
{ 0, 2, 86, 7, 40, 0, 0 },
{ 0, 1, 65, 7, 61, 1, 0 },
{ 0, 0, 44, 7, 82, 2, 0 },
{ 0, 0, 26, 7, 98, 4, 0 },
{ 0, 0, 14, 7, 105, 9, 0 },
{ 0, 7, 103, 7, 18, 0, 0 },
{ 0, 3, 92, 7, 33, 0, 0 },
{ 0, 1, 75, 7, 52, 0, 0 } } },
.ver_phase_arr = {
.even = { { 0, 11, 106, 7, 11, 0, 0 },
{ 0, 5, 101, 7, 22, 0, 0 },
{ 0, 2, 88, 7, 38, 0, 0 },
{ 0, 1, 67, 7, 59, 1, 0 },
{ 0, 0, 46, 7, 80, 2, 0 },
{ 0, 0, 28, 7, 96, 4, 0 },
{ 0, 0, 15, 7, 104, 9, 0 },
{ 0, 7, 104, 7, 17, 0, 0 },
{ 0, 3, 94, 7, 31, 0, 0 },
{ 0, 1, 77, 7, 50, 0, 0 },
{ 0, 0, 54, 7, 73, 1, 0 },
{ 0, 0, 34, 7, 91, 3, 0 },
{ 0, 0, 19, 7, 103, 6, 0 },
{ 0, 10, 105, 7, 13, 0, 0 },
{ 0, 5, 98, 7, 25, 0, 0 },
{ 0, 2, 84, 7, 42, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 42, 7, 84, 2, 0 },
{ 0, 0, 25, 7, 98, 5, 0 },
{ 0, 0, 13, 7, 105, 10, 0 },
{ 0, 6, 103, 7, 19, 0, 0 },
{ 0, 3, 91, 7, 34, 0, 0 },
{ 0, 1, 73, 7, 54, 0, 0 },
{ 0, 0, 50, 7, 77, 1, 0 },
{ 0, 0, 31, 7, 94, 3, 0 },
{ 0, 0, 17, 7, 104, 7, 0 },
{ 0, 9, 104, 7, 15, 0, 0 },
{ 0, 4, 96, 7, 28, 0, 0 },
{ 0, 2, 80, 7, 46, 0, 0 },
{ 0, 1, 59, 7, 67, 1, 0 },
{ 0, 0, 38, 7, 88, 2, 0 },
{ 0, 0, 22, 7, 101, 5, 0 } },
.odd = { { 0, 0, 52, 7, 75, 1, 0 },
{ 0, 0, 33, 7, 92, 3, 0 },
{ 0, 0, 18, 7, 103, 7, 0 },
{ 0, 9, 105, 7, 14, 0, 0 },
{ 0, 4, 98, 7, 26, 0, 0 },
{ 0, 2, 82, 7, 44, 0, 0 },
{ 0, 1, 61, 7, 65, 1, 0 },
{ 0, 0, 40, 7, 86, 2, 0 },
{ 0, 0, 23, 7, 100, 5, 0 },
{ 0, 0, 12, 7, 105, 11, 0 },
{ 0, 6, 101, 7, 21, 0, 0 },
{ 0, 3, 89, 7, 36, 0, 0 },
{ 0, 1, 69, 7, 57, 1, 0 },
{ 0, 0, 48, 7, 79, 1, 0 },
{ 0, 0, 29, 7, 95, 4, 0 },
{ 0, 0, 16, 7, 104, 8, 0 },
{ 0, 8, 104, 7, 16, 0, 0 },
{ 0, 4, 95, 7, 29, 0, 0 },
{ 0, 1, 79, 7, 48, 0, 0 },
{ 0, 1, 57, 7, 69, 1, 0 },
{ 0, 0, 36, 7, 89, 3, 0 },
{ 0, 0, 21, 7, 101, 6, 0 },
{ 0, 11, 105, 7, 12, 0, 0 },
{ 0, 5, 100, 7, 23, 0, 0 },
{ 0, 2, 86, 7, 40, 0, 0 },
{ 0, 1, 65, 7, 61, 1, 0 },
{ 0, 0, 44, 7, 82, 2, 0 },
{ 0, 0, 26, 7, 98, 4, 0 },
{ 0, 0, 14, 7, 105, 9, 0 },
{ 0, 7, 103, 7, 18, 0, 0 },
{ 0, 3, 92, 7, 33, 0, 0 },
{ 0, 1, 75, 7, 52, 0, 0 } } },
.ptrn_arr = { { 0xc9999333, 0x332664cc, 0x4cc99993, 0x93332666,
0x99 } },
.sample_patrn_length = 138,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 38) = 0.457143 */
.hor_phase_arr = {
.even = { { 0, 12, 104, 7, 12, 0, 0 },
{ 0, 5, 98, 7, 25, 0, 0 },
{ 0, 2, 80, 7, 46, 0, 0 },
{ 0, 1, 55, 7, 71, 1, 0 },
{ 0, 0, 32, 7, 92, 4, 0 },
{ 0, 0, 16, 7, 103, 9, 0 },
{ 0, 7, 101, 7, 20, 0, 0 },
{ 0, 3, 86, 7, 39, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 39, 7, 86, 3, 0 },
{ 0, 0, 20, 7, 101, 7, 0 },
{ 0, 9, 103, 7, 16, 0, 0 },
{ 0, 4, 92, 7, 32, 0, 0 },
{ 0, 1, 71, 7, 55, 1, 0 },
{ 0, 0, 46, 7, 80, 2, 0 },
{ 0, 0, 25, 7, 98, 5, 0 } },
.odd = { { 0, 0, 50, 7, 76, 2, 0 },
{ 0, 0, 28, 7, 96, 4, 0 },
{ 0, 0, 14, 7, 104, 10, 0 },
{ 0, 6, 99, 7, 23, 0, 0 },
{ 0, 2, 84, 7, 42, 0, 0 },
{ 0, 1, 59, 7, 67, 1, 0 },
{ 0, 0, 35, 7, 90, 3, 0 },
{ 0, 0, 18, 7, 102, 8, 0 },
{ 0, 8, 102, 7, 18, 0, 0 },
{ 0, 3, 90, 7, 35, 0, 0 },
{ 0, 1, 67, 7, 59, 1, 0 },
{ 0, 0, 42, 7, 84, 2, 0 },
{ 0, 0, 23, 7, 99, 6, 0 },
{ 0, 10, 104, 7, 14, 0, 0 },
{ 0, 4, 96, 7, 28, 0, 0 },
{ 0, 2, 76, 7, 50, 0, 0 } } },
.ver_phase_arr = {
.even = { { 0, 12, 104, 7, 12, 0, 0 },
{ 0, 5, 98, 7, 25, 0, 0 },
{ 0, 2, 80, 7, 46, 0, 0 },
{ 0, 1, 55, 7, 71, 1, 0 },
{ 0, 0, 32, 7, 92, 4, 0 },
{ 0, 0, 16, 7, 103, 9, 0 },
{ 0, 7, 101, 7, 20, 0, 0 },
{ 0, 3, 86, 7, 39, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 39, 7, 86, 3, 0 },
{ 0, 0, 20, 7, 101, 7, 0 },
{ 0, 9, 103, 7, 16, 0, 0 },
{ 0, 4, 92, 7, 32, 0, 0 },
{ 0, 1, 71, 7, 55, 1, 0 },
{ 0, 0, 46, 7, 80, 2, 0 },
{ 0, 0, 25, 7, 98, 5, 0 } },
.odd = { { 0, 0, 50, 7, 76, 2, 0 },
{ 0, 0, 28, 7, 96, 4, 0 },
{ 0, 0, 14, 7, 104, 10, 0 },
{ 0, 6, 99, 7, 23, 0, 0 },
{ 0, 2, 84, 7, 42, 0, 0 },
{ 0, 1, 59, 7, 67, 1, 0 },
{ 0, 0, 35, 7, 90, 3, 0 },
{ 0, 0, 18, 7, 102, 8, 0 },
{ 0, 8, 102, 7, 18, 0, 0 },
{ 0, 3, 90, 7, 35, 0, 0 },
{ 0, 1, 67, 7, 59, 1, 0 },
{ 0, 0, 42, 7, 84, 2, 0 },
{ 0, 0, 23, 7, 99, 6, 0 },
{ 0, 10, 104, 7, 14, 0, 0 },
{ 0, 4, 96, 7, 28, 0, 0 },
{ 0, 2, 76, 7, 50, 0, 0 } } },
.ptrn_arr = { { 0xcc999333, 0x99332664, 0x9 } },
.sample_patrn_length = 70,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 39) = 0.450704 */
.hor_phase_arr = {
.even = { { 0, 13, 102, 7, 13, 0, 0 },
{ 0, 5, 94, 7, 29, 0, 0 },
{ 0, 1, 71, 7, 55, 1, 0 },
{ 0, 0, 43, 7, 83, 2, 0 },
{ 0, 0, 21, 7, 100, 7, 0 },
{ 0, 8, 102, 7, 18, 0, 0 },
{ 0, 3, 86, 7, 39, 0, 0 },
{ 0, 1, 59, 7, 67, 1, 0 },
{ 0, 0, 32, 7, 92, 4, 0 },
{ 0, 0, 14, 7, 103, 11, 0 },
{ 0, 5, 97, 7, 26, 0, 0 },
{ 0, 2, 74, 7, 51, 1, 0 },
{ 0, 0, 47, 7, 79, 2, 0 },
{ 0, 0, 23, 7, 99, 6, 0 },
{ 0, 10, 102, 7, 16, 0, 0 },
{ 0, 3, 89, 7, 36, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 36, 7, 89, 3, 0 },
{ 0, 0, 16, 7, 102, 10, 0 },
{ 0, 6, 99, 7, 23, 0, 0 },
{ 0, 2, 79, 7, 47, 0, 0 },
{ 0, 1, 51, 7, 74, 2, 0 },
{ 0, 0, 26, 7, 97, 5, 0 },
{ 0, 11, 103, 7, 14, 0, 0 },
{ 0, 4, 92, 7, 32, 0, 0 },
{ 0, 1, 67, 7, 59, 1, 0 },
{ 0, 0, 39, 7, 86, 3, 0 },
{ 0, 0, 18, 7, 102, 8, 0 },
{ 0, 7, 100, 7, 21, 0, 0 },
{ 0, 2, 83, 7, 43, 0, 0 },
{ 0, 1, 55, 7, 71, 1, 0 },
{ 0, 0, 29, 7, 94, 5, 0 } },
.odd = { { 0, 0, 49, 7, 77, 2, 0 },
{ 0, 0, 25, 7, 97, 6, 0 },
{ 0, 10, 103, 7, 15, 0, 0 },
{ 0, 4, 90, 7, 34, 0, 0 },
{ 0, 1, 65, 7, 61, 1, 0 },
{ 0, 0, 37, 7, 88, 3, 0 },
{ 0, 0, 17, 7, 102, 9, 0 },
{ 0, 7, 99, 7, 22, 0, 0 },
{ 0, 2, 81, 7, 45, 0, 0 },
{ 0, 1, 53, 7, 72, 2, 0 },
{ 0, 0, 27, 7, 96, 5, 0 },
{ 0, 12, 103, 7, 13, 0, 0 },
{ 0, 4, 93, 7, 31, 0, 0 },
{ 0, 1, 69, 7, 57, 1, 0 },
{ 0, 0, 41, 7, 84, 3, 0 },
{ 0, 0, 20, 7, 100, 8, 0 },
{ 0, 8, 100, 7, 20, 0, 0 },
{ 0, 3, 84, 7, 41, 0, 0 },
{ 0, 1, 57, 7, 69, 1, 0 },
{ 0, 0, 31, 7, 93, 4, 0 },
{ 0, 0, 13, 7, 103, 12, 0 },
{ 0, 5, 96, 7, 27, 0, 0 },
{ 0, 2, 72, 7, 53, 1, 0 },
{ 0, 0, 45, 7, 81, 2, 0 },
{ 0, 0, 22, 7, 99, 7, 0 },
{ 0, 9, 102, 7, 17, 0, 0 },
{ 0, 3, 88, 7, 37, 0, 0 },
{ 0, 1, 61, 7, 65, 1, 0 },
{ 0, 0, 34, 7, 90, 4, 0 },
{ 0, 0, 15, 7, 103, 10, 0 },
{ 0, 6, 97, 7, 25, 0, 0 },
{ 0, 2, 77, 7, 49, 0, 0 } } },
.ver_phase_arr = {
.even = { { 0, 13, 102, 7, 13, 0, 0 },
{ 0, 5, 94, 7, 29, 0, 0 },
{ 0, 1, 71, 7, 55, 1, 0 },
{ 0, 0, 43, 7, 83, 2, 0 },
{ 0, 0, 21, 7, 100, 7, 0 },
{ 0, 8, 102, 7, 18, 0, 0 },
{ 0, 3, 86, 7, 39, 0, 0 },
{ 0, 1, 59, 7, 67, 1, 0 },
{ 0, 0, 32, 7, 92, 4, 0 },
{ 0, 0, 14, 7, 103, 11, 0 },
{ 0, 5, 97, 7, 26, 0, 0 },
{ 0, 2, 74, 7, 51, 1, 0 },
{ 0, 0, 47, 7, 79, 2, 0 },
{ 0, 0, 23, 7, 99, 6, 0 },
{ 0, 10, 102, 7, 16, 0, 0 },
{ 0, 3, 89, 7, 36, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 36, 7, 89, 3, 0 },
{ 0, 0, 16, 7, 102, 10, 0 },
{ 0, 6, 99, 7, 23, 0, 0 },
{ 0, 2, 79, 7, 47, 0, 0 },
{ 0, 1, 51, 7, 74, 2, 0 },
{ 0, 0, 26, 7, 97, 5, 0 },
{ 0, 11, 103, 7, 14, 0, 0 },
{ 0, 4, 92, 7, 32, 0, 0 },
{ 0, 1, 67, 7, 59, 1, 0 },
{ 0, 0, 39, 7, 86, 3, 0 },
{ 0, 0, 18, 7, 102, 8, 0 },
{ 0, 7, 100, 7, 21, 0, 0 },
{ 0, 2, 83, 7, 43, 0, 0 },
{ 0, 1, 55, 7, 71, 1, 0 },
{ 0, 0, 29, 7, 94, 5, 0 } },
.odd = { { 0, 0, 49, 7, 77, 2, 0 },
{ 0, 0, 25, 7, 97, 6, 0 },
{ 0, 10, 103, 7, 15, 0, 0 },
{ 0, 4, 90, 7, 34, 0, 0 },
{ 0, 1, 65, 7, 61, 1, 0 },
{ 0, 0, 37, 7, 88, 3, 0 },
{ 0, 0, 17, 7, 102, 9, 0 },
{ 0, 7, 99, 7, 22, 0, 0 },
{ 0, 2, 81, 7, 45, 0, 0 },
{ 0, 1, 53, 7, 72, 2, 0 },
{ 0, 0, 27, 7, 96, 5, 0 },
{ 0, 12, 103, 7, 13, 0, 0 },
{ 0, 4, 93, 7, 31, 0, 0 },
{ 0, 1, 69, 7, 57, 1, 0 },
{ 0, 0, 41, 7, 84, 3, 0 },
{ 0, 0, 20, 7, 100, 8, 0 },
{ 0, 8, 100, 7, 20, 0, 0 },
{ 0, 3, 84, 7, 41, 0, 0 },
{ 0, 1, 57, 7, 69, 1, 0 },
{ 0, 0, 31, 7, 93, 4, 0 },
{ 0, 0, 13, 7, 103, 12, 0 },
{ 0, 5, 96, 7, 27, 0, 0 },
{ 0, 2, 72, 7, 53, 1, 0 },
{ 0, 0, 45, 7, 81, 2, 0 },
{ 0, 0, 22, 7, 99, 7, 0 },
{ 0, 9, 102, 7, 17, 0, 0 },
{ 0, 3, 88, 7, 37, 0, 0 },
{ 0, 1, 61, 7, 65, 1, 0 },
{ 0, 0, 34, 7, 90, 4, 0 },
{ 0, 0, 15, 7, 103, 10, 0 },
{ 0, 6, 97, 7, 25, 0, 0 },
{ 0, 2, 77, 7, 49, 0, 0 } } },
.ptrn_arr = { { 0x4cc99933, 0xc9993266, 0x9332664c, 0x32664cc9,
0x993 } },
.sample_patrn_length = 142,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 40) = 0.444444 */
.hor_phase_arr = {
.even = { { 0, 13, 102, 7, 13, 0, 0 },
{ 0, 4, 91, 7, 33, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 33, 7, 91, 4, 0 } },
.odd = { { 0, 0, 47, 7, 79, 2, 0 },
{ 0, 0, 21, 7, 99, 8, 0 },
{ 0, 8, 99, 7, 21, 0, 0 },
{ 0, 2, 79, 7, 47, 0, 0 } } },
.ver_phase_arr = {
.even = { { 0, 13, 102, 7, 13, 0, 0 },
{ 0, 4, 91, 7, 33, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 33, 7, 91, 4, 0 } },
.odd = { { 0, 0, 47, 7, 79, 2, 0 },
{ 0, 0, 21, 7, 99, 8, 0 },
{ 0, 8, 99, 7, 21, 0, 0 },
{ 0, 2, 79, 7, 47, 0, 0 } } },
.ptrn_arr = { { 0x9933 } },
.sample_patrn_length = 18,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 41) = 0.438356 */
.hor_phase_arr = {
.even = { { 0, 14, 100, 7, 14, 0, 0 },
{ 0, 4, 87, 7, 37, 0, 0 },
{ 0, 1, 55, 7, 70, 2, 0 },
{ 0, 0, 25, 7, 96, 7, 0 },
{ 0, 8, 98, 7, 22, 0, 0 },
{ 0, 2, 74, 7, 51, 1, 0 },
{ 0, 0, 40, 7, 85, 3, 0 },
{ 0, 0, 16, 7, 100, 12, 0 },
{ 0, 5, 90, 7, 33, 0, 0 },
{ 0, 1, 59, 7, 67, 1, 0 },
{ 0, 0, 27, 7, 95, 6, 0 },
{ 0, 9, 99, 7, 20, 0, 0 },
{ 0, 2, 78, 7, 47, 1, 0 },
{ 0, 0, 44, 7, 81, 3, 0 },
{ 0, 0, 18, 7, 99, 11, 0 },
{ 0, 5, 93, 7, 30, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 30, 7, 93, 5, 0 },
{ 0, 11, 99, 7, 18, 0, 0 },
{ 0, 3, 81, 7, 44, 0, 0 },
{ 0, 1, 47, 7, 78, 2, 0 },
{ 0, 0, 20, 7, 99, 9, 0 },
{ 0, 6, 95, 7, 27, 0, 0 },
{ 0, 1, 67, 7, 59, 1, 0 },
{ 0, 0, 33, 7, 90, 5, 0 },
{ 0, 12, 100, 7, 16, 0, 0 },
{ 0, 3, 85, 7, 40, 0, 0 },
{ 0, 1, 51, 7, 74, 2, 0 },
{ 0, 0, 22, 7, 98, 8, 0 },
{ 0, 7, 96, 7, 25, 0, 0 },
{ 0, 2, 70, 7, 55, 1, 0 },
{ 0, 0, 37, 7, 87, 4, 0 } },
.odd = { { 0, 0, 45, 7, 80, 3, 0 },
{ 0, 0, 19, 7, 99, 10, 0 },
{ 0, 6, 93, 7, 29, 0, 0 },
{ 0, 1, 65, 7, 61, 1, 0 },
{ 0, 0, 32, 7, 91, 5, 0 },
{ 0, 11, 100, 7, 17, 0, 0 },
{ 0, 3, 83, 7, 42, 0, 0 },
{ 0, 1, 49, 7, 76, 2, 0 },
{ 0, 0, 21, 7, 98, 9, 0 },
{ 0, 7, 95, 7, 26, 0, 0 },
{ 0, 2, 68, 7, 57, 1, 0 },
{ 0, 0, 35, 7, 89, 4, 0 },
{ 0, 13, 100, 7, 15, 0, 0 },
{ 0, 4, 86, 7, 38, 0, 0 },
{ 0, 1, 53, 7, 72, 2, 0 },
{ 0, 0, 23, 7, 97, 8, 0 },
{ 0, 8, 97, 7, 23, 0, 0 },
{ 0, 2, 72, 7, 53, 1, 0 },
{ 0, 0, 38, 7, 86, 4, 0 },
{ 0, 0, 15, 7, 100, 13, 0 },
{ 0, 4, 89, 7, 35, 0, 0 },
{ 0, 1, 57, 7, 68, 2, 0 },
{ 0, 0, 26, 7, 95, 7, 0 },
{ 0, 9, 98, 7, 21, 0, 0 },
{ 0, 2, 76, 7, 49, 1, 0 },
{ 0, 0, 42, 7, 83, 3, 0 },
{ 0, 0, 17, 7, 100, 11, 0 },
{ 0, 5, 91, 7, 32, 0, 0 },
{ 0, 1, 61, 7, 65, 1, 0 },
{ 0, 0, 29, 7, 93, 6, 0 },
{ 0, 10, 99, 7, 19, 0, 0 },
{ 0, 3, 80, 7, 45, 0, 0 } } },
.ver_phase_arr = {
.even = { { 0, 14, 100, 7, 14, 0, 0 },
{ 0, 4, 87, 7, 37, 0, 0 },
{ 0, 1, 55, 7, 70, 2, 0 },
{ 0, 0, 25, 7, 96, 7, 0 },
{ 0, 8, 98, 7, 22, 0, 0 },
{ 0, 2, 74, 7, 51, 1, 0 },
{ 0, 0, 40, 7, 85, 3, 0 },
{ 0, 0, 16, 7, 100, 12, 0 },
{ 0, 5, 90, 7, 33, 0, 0 },
{ 0, 1, 59, 7, 67, 1, 0 },
{ 0, 0, 27, 7, 95, 6, 0 },
{ 0, 9, 99, 7, 20, 0, 0 },
{ 0, 2, 78, 7, 47, 1, 0 },
{ 0, 0, 44, 7, 81, 3, 0 },
{ 0, 0, 18, 7, 99, 11, 0 },
{ 0, 5, 93, 7, 30, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 30, 7, 93, 5, 0 },
{ 0, 11, 99, 7, 18, 0, 0 },
{ 0, 3, 81, 7, 44, 0, 0 },
{ 0, 1, 47, 7, 78, 2, 0 },
{ 0, 0, 20, 7, 99, 9, 0 },
{ 0, 6, 95, 7, 27, 0, 0 },
{ 0, 1, 67, 7, 59, 1, 0 },
{ 0, 0, 33, 7, 90, 5, 0 },
{ 0, 12, 100, 7, 16, 0, 0 },
{ 0, 3, 85, 7, 40, 0, 0 },
{ 0, 1, 51, 7, 74, 2, 0 },
{ 0, 0, 22, 7, 98, 8, 0 },
{ 0, 7, 96, 7, 25, 0, 0 },
{ 0, 2, 70, 7, 55, 1, 0 },
{ 0, 0, 37, 7, 87, 4, 0 } },
.odd = { { 0, 0, 45, 7, 80, 3, 0 },
{ 0, 0, 19, 7, 99, 10, 0 },
{ 0, 6, 93, 7, 29, 0, 0 },
{ 0, 1, 65, 7, 61, 1, 0 },
{ 0, 0, 32, 7, 91, 5, 0 },
{ 0, 11, 100, 7, 17, 0, 0 },
{ 0, 3, 83, 7, 42, 0, 0 },
{ 0, 1, 49, 7, 76, 2, 0 },
{ 0, 0, 21, 7, 98, 9, 0 },
{ 0, 7, 95, 7, 26, 0, 0 },
{ 0, 2, 68, 7, 57, 1, 0 },
{ 0, 0, 35, 7, 89, 4, 0 },
{ 0, 13, 100, 7, 15, 0, 0 },
{ 0, 4, 86, 7, 38, 0, 0 },
{ 0, 1, 53, 7, 72, 2, 0 },
{ 0, 0, 23, 7, 97, 8, 0 },
{ 0, 8, 97, 7, 23, 0, 0 },
{ 0, 2, 72, 7, 53, 1, 0 },
{ 0, 0, 38, 7, 86, 4, 0 },
{ 0, 0, 15, 7, 100, 13, 0 },
{ 0, 4, 89, 7, 35, 0, 0 },
{ 0, 1, 57, 7, 68, 2, 0 },
{ 0, 0, 26, 7, 95, 7, 0 },
{ 0, 9, 98, 7, 21, 0, 0 },
{ 0, 2, 76, 7, 49, 1, 0 },
{ 0, 0, 42, 7, 83, 3, 0 },
{ 0, 0, 17, 7, 100, 11, 0 },
{ 0, 5, 91, 7, 32, 0, 0 },
{ 0, 1, 61, 7, 65, 1, 0 },
{ 0, 0, 29, 7, 93, 6, 0 },
{ 0, 10, 99, 7, 19, 0, 0 },
{ 0, 3, 80, 7, 45, 0, 0 } } },
.ptrn_arr = { { 0x664c9933, 0x664c9932, 0x64cc9932, 0x64cc9932,
0x9932 } },
.sample_patrn_length = 146,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 42) = 0.432432 */
.hor_phase_arr = {
.even = { { 0, 14, 100, 7, 14, 0, 0 },
{ 0, 4, 84, 7, 40, 0, 0 },
{ 0, 1, 48, 7, 76, 3, 0 },
{ 0, 0, 18, 7, 99, 11, 0 },
{ 0, 5, 89, 7, 34, 0, 0 },
{ 0, 1, 55, 7, 70, 2, 0 },
{ 0, 0, 23, 7, 96, 9, 0 },
{ 0, 7, 93, 7, 28, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 28, 7, 93, 7, 0 },
{ 0, 9, 96, 7, 23, 0, 0 },
{ 0, 2, 70, 7, 55, 1, 0 },
{ 0, 0, 34, 7, 89, 5, 0 },
{ 0, 11, 99, 7, 18, 0, 0 },
{ 0, 3, 76, 7, 48, 1, 0 },
{ 0, 0, 40, 7, 84, 4, 0 } },
.odd = { { 0, 1, 44, 7, 80, 3, 0 },
{ 0, 0, 16, 7, 99, 13, 0 },
{ 0, 4, 87, 7, 37, 0, 0 },
{ 0, 1, 51, 7, 74, 2, 0 },
{ 0, 0, 20, 7, 98, 10, 0 },
{ 0, 6, 91, 7, 31, 0, 0 },
{ 0, 1, 59, 7, 66, 2, 0 },
{ 0, 0, 25, 7, 95, 8, 0 },
{ 0, 8, 95, 7, 25, 0, 0 },
{ 0, 2, 66, 7, 59, 1, 0 },
{ 0, 0, 31, 7, 91, 6, 0 },
{ 0, 10, 98, 7, 20, 0, 0 },
{ 0, 2, 74, 7, 51, 1, 0 },
{ 0, 0, 37, 7, 87, 4, 0 },
{ 0, 13, 99, 7, 16, 0, 0 },
{ 0, 3, 80, 7, 44, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 14, 100, 7, 14, 0, 0 },
{ 0, 4, 84, 7, 40, 0, 0 },
{ 0, 1, 48, 7, 76, 3, 0 },
{ 0, 0, 18, 7, 99, 11, 0 },
{ 0, 5, 89, 7, 34, 0, 0 },
{ 0, 1, 55, 7, 70, 2, 0 },
{ 0, 0, 23, 7, 96, 9, 0 },
{ 0, 7, 93, 7, 28, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 28, 7, 93, 7, 0 },
{ 0, 9, 96, 7, 23, 0, 0 },
{ 0, 2, 70, 7, 55, 1, 0 },
{ 0, 0, 34, 7, 89, 5, 0 },
{ 0, 11, 99, 7, 18, 0, 0 },
{ 0, 3, 76, 7, 48, 1, 0 },
{ 0, 0, 40, 7, 84, 4, 0 } },
.odd = { { 0, 1, 44, 7, 80, 3, 0 },
{ 0, 0, 16, 7, 99, 13, 0 },
{ 0, 4, 87, 7, 37, 0, 0 },
{ 0, 1, 51, 7, 74, 2, 0 },
{ 0, 0, 20, 7, 98, 10, 0 },
{ 0, 6, 91, 7, 31, 0, 0 },
{ 0, 1, 59, 7, 66, 2, 0 },
{ 0, 0, 25, 7, 95, 8, 0 },
{ 0, 8, 95, 7, 25, 0, 0 },
{ 0, 2, 66, 7, 59, 1, 0 },
{ 0, 0, 31, 7, 91, 6, 0 },
{ 0, 10, 98, 7, 20, 0, 0 },
{ 0, 2, 74, 7, 51, 1, 0 },
{ 0, 0, 37, 7, 87, 4, 0 },
{ 0, 13, 99, 7, 16, 0, 0 },
{ 0, 3, 80, 7, 44, 1, 0 } } },
.ptrn_arr = { { 0x264c9933, 0x3264c993, 0x99 } },
.sample_patrn_length = 74,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 43) = 0.426667 */
.hor_phase_arr = {
.even = { { 0, 15, 98, 7, 15, 0, 0 },
{ 0, 3, 80, 7, 44, 1, 0 },
{ 0, 0, 41, 7, 83, 4, 0 },
{ 0, 13, 98, 7, 17, 0, 0 },
{ 0, 3, 76, 7, 48, 1, 0 },
{ 0, 0, 38, 7, 85, 5, 0 },
{ 0, 12, 97, 7, 19, 0, 0 },
{ 0, 2, 74, 7, 51, 1, 0 },
{ 0, 0, 34, 7, 89, 5, 0 },
{ 0, 10, 97, 7, 21, 0, 0 },
{ 0, 2, 70, 7, 55, 1, 0 },
{ 0, 0, 31, 7, 91, 6, 0 },
{ 0, 9, 96, 7, 23, 0, 0 },
{ 0, 2, 66, 7, 59, 1, 0 },
{ 0, 0, 29, 7, 92, 7, 0 },
{ 0, 8, 94, 7, 26, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 26, 7, 94, 8, 0 },
{ 0, 7, 92, 7, 29, 0, 0 },
{ 0, 1, 59, 7, 66, 2, 0 },
{ 0, 0, 23, 7, 96, 9, 0 },
{ 0, 6, 91, 7, 31, 0, 0 },
{ 0, 1, 55, 7, 70, 2, 0 },
{ 0, 0, 21, 7, 97, 10, 0 },
{ 0, 5, 89, 7, 34, 0, 0 },
{ 0, 1, 51, 7, 74, 2, 0 },
{ 0, 0, 19, 7, 97, 12, 0 },
{ 0, 5, 85, 7, 38, 0, 0 },
{ 0, 1, 48, 7, 76, 3, 0 },
{ 0, 0, 17, 7, 98, 13, 0 },
{ 0, 4, 83, 7, 41, 0, 0 },
{ 0, 1, 44, 7, 80, 3, 0 } },
.odd = { { 0, 1, 43, 7, 80, 4, 0 },
{ 0, 14, 98, 7, 16, 0, 0 },
{ 0, 3, 78, 7, 46, 1, 0 },
{ 0, 0, 39, 7, 85, 4, 0 },
{ 0, 12, 98, 7, 18, 0, 0 },
{ 0, 3, 74, 7, 50, 1, 0 },
{ 0, 0, 36, 7, 87, 5, 0 },
{ 0, 11, 97, 7, 20, 0, 0 },
{ 0, 2, 72, 7, 53, 1, 0 },
{ 0, 0, 33, 7, 89, 6, 0 },
{ 0, 10, 96, 7, 22, 0, 0 },
{ 0, 2, 68, 7, 57, 1, 0 },
{ 0, 0, 30, 7, 92, 6, 0 },
{ 0, 9, 94, 7, 25, 0, 0 },
{ 0, 2, 64, 7, 61, 1, 0 },
{ 0, 0, 27, 7, 94, 7, 0 },
{ 0, 7, 94, 7, 27, 0, 0 },
{ 0, 1, 61, 7, 64, 2, 0 },
{ 0, 0, 25, 7, 94, 9, 0 },
{ 0, 6, 92, 7, 30, 0, 0 },
{ 0, 1, 57, 7, 68, 2, 0 },
{ 0, 0, 22, 7, 96, 10, 0 },
{ 0, 6, 89, 7, 33, 0, 0 },
{ 0, 1, 53, 7, 72, 2, 0 },
{ 0, 0, 20, 7, 97, 11, 0 },
{ 0, 5, 87, 7, 36, 0, 0 },
{ 0, 1, 50, 7, 74, 3, 0 },
{ 0, 0, 18, 7, 98, 12, 0 },
{ 0, 4, 85, 7, 39, 0, 0 },
{ 0, 1, 46, 7, 78, 3, 0 },
{ 0, 0, 16, 7, 98, 14, 0 },
{ 0, 4, 80, 7, 43, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 15, 98, 7, 15, 0, 0 },
{ 0, 3, 80, 7, 44, 1, 0 },
{ 0, 0, 41, 7, 83, 4, 0 },
{ 0, 13, 98, 7, 17, 0, 0 },
{ 0, 3, 76, 7, 48, 1, 0 },
{ 0, 0, 38, 7, 85, 5, 0 },
{ 0, 12, 97, 7, 19, 0, 0 },
{ 0, 2, 74, 7, 51, 1, 0 },
{ 0, 0, 34, 7, 89, 5, 0 },
{ 0, 10, 97, 7, 21, 0, 0 },
{ 0, 2, 70, 7, 55, 1, 0 },
{ 0, 0, 31, 7, 91, 6, 0 },
{ 0, 9, 96, 7, 23, 0, 0 },
{ 0, 2, 66, 7, 59, 1, 0 },
{ 0, 0, 29, 7, 92, 7, 0 },
{ 0, 8, 94, 7, 26, 0, 0 },
{ 0, 1, 63, 7, 63, 1, 0 },
{ 0, 0, 26, 7, 94, 8, 0 },
{ 0, 7, 92, 7, 29, 0, 0 },
{ 0, 1, 59, 7, 66, 2, 0 },
{ 0, 0, 23, 7, 96, 9, 0 },
{ 0, 6, 91, 7, 31, 0, 0 },
{ 0, 1, 55, 7, 70, 2, 0 },
{ 0, 0, 21, 7, 97, 10, 0 },
{ 0, 5, 89, 7, 34, 0, 0 },
{ 0, 1, 51, 7, 74, 2, 0 },
{ 0, 0, 19, 7, 97, 12, 0 },
{ 0, 5, 85, 7, 38, 0, 0 },
{ 0, 1, 48, 7, 76, 3, 0 },
{ 0, 0, 17, 7, 98, 13, 0 },
{ 0, 4, 83, 7, 41, 0, 0 },
{ 0, 1, 44, 7, 80, 3, 0 } },
.odd = { { 0, 1, 43, 7, 80, 4, 0 },
{ 0, 14, 98, 7, 16, 0, 0 },
{ 0, 3, 78, 7, 46, 1, 0 },
{ 0, 0, 39, 7, 85, 4, 0 },
{ 0, 12, 98, 7, 18, 0, 0 },
{ 0, 3, 74, 7, 50, 1, 0 },
{ 0, 0, 36, 7, 87, 5, 0 },
{ 0, 11, 97, 7, 20, 0, 0 },
{ 0, 2, 72, 7, 53, 1, 0 },
{ 0, 0, 33, 7, 89, 6, 0 },
{ 0, 10, 96, 7, 22, 0, 0 },
{ 0, 2, 68, 7, 57, 1, 0 },
{ 0, 0, 30, 7, 92, 6, 0 },
{ 0, 9, 94, 7, 25, 0, 0 },
{ 0, 2, 64, 7, 61, 1, 0 },
{ 0, 0, 27, 7, 94, 7, 0 },
{ 0, 7, 94, 7, 27, 0, 0 },
{ 0, 1, 61, 7, 64, 2, 0 },
{ 0, 0, 25, 7, 94, 9, 0 },
{ 0, 6, 92, 7, 30, 0, 0 },
{ 0, 1, 57, 7, 68, 2, 0 },
{ 0, 0, 22, 7, 96, 10, 0 },
{ 0, 6, 89, 7, 33, 0, 0 },
{ 0, 1, 53, 7, 72, 2, 0 },
{ 0, 0, 20, 7, 97, 11, 0 },
{ 0, 5, 87, 7, 36, 0, 0 },
{ 0, 1, 50, 7, 74, 3, 0 },
{ 0, 0, 18, 7, 98, 12, 0 },
{ 0, 4, 85, 7, 39, 0, 0 },
{ 0, 1, 46, 7, 78, 3, 0 },
{ 0, 0, 16, 7, 98, 14, 0 },
{ 0, 4, 80, 7, 43, 1, 0 } } },
.ptrn_arr = { { 0x3264c993, 0x93264c99, 0x993264c9, 0xc993264c,
0x93264 } },
.sample_patrn_length = 150,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 44) = 0.421053 */
.hor_phase_arr = {
.even = { { 0, 16, 96, 7, 16, 0, 0 },
{ 0, 3, 76, 7, 48, 1, 0 },
{ 0, 0, 35, 7, 87, 6, 0 },
{ 0, 10, 94, 7, 24, 0, 0 },
{ 0, 2, 62, 7, 62, 2, 0 },
{ 0, 0, 24, 7, 94, 10, 0 },
{ 0, 6, 87, 7, 35, 0, 0 },
{ 0, 1, 48, 7, 76, 3, 0 } },
.odd = { { 0, 1, 41, 7, 82, 4, 0 },
{ 0, 12, 97, 7, 19, 0, 0 },
{ 0, 2, 70, 7, 55, 1, 0 },
{ 0, 0, 29, 7, 92, 7, 0 },
{ 0, 7, 92, 7, 29, 0, 0 },
{ 0, 1, 55, 7, 70, 2, 0 },
{ 0, 0, 19, 7, 97, 12, 0 },
{ 0, 4, 82, 7, 41, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 16, 96, 7, 16, 0, 0 },
{ 0, 3, 76, 7, 48, 1, 0 },
{ 0, 0, 35, 7, 87, 6, 0 },
{ 0, 10, 94, 7, 24, 0, 0 },
{ 0, 2, 62, 7, 62, 2, 0 },
{ 0, 0, 24, 7, 94, 10, 0 },
{ 0, 6, 87, 7, 35, 0, 0 },
{ 0, 1, 48, 7, 76, 3, 0 } },
.odd = { { 0, 1, 41, 7, 82, 4, 0 },
{ 0, 12, 97, 7, 19, 0, 0 },
{ 0, 2, 70, 7, 55, 1, 0 },
{ 0, 0, 29, 7, 92, 7, 0 },
{ 0, 7, 92, 7, 29, 0, 0 },
{ 0, 1, 55, 7, 70, 2, 0 },
{ 0, 0, 19, 7, 97, 12, 0 },
{ 0, 4, 82, 7, 41, 1, 0 } } },
.ptrn_arr = { { 0x3264c993, 0x9 } },
.sample_patrn_length = 38,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 45) = 0.415584 */
.hor_phase_arr = {
.even = { { 0, 16, 96, 7, 16, 0, 0 },
{ 0, 3, 72, 7, 52, 1, 0 },
{ 0, 0, 30, 7, 90, 8, 0 },
{ 0, 7, 89, 7, 32, 0, 0 },
{ 0, 1, 48, 7, 76, 3, 0 },
{ 0, 14, 96, 7, 18, 0, 0 },
{ 0, 2, 70, 7, 55, 1, 0 },
{ 0, 0, 27, 7, 92, 9, 0 },
{ 0, 6, 87, 7, 35, 0, 0 },
{ 0, 1, 45, 7, 78, 4, 0 },
{ 0, 13, 95, 7, 20, 0, 0 },
{ 0, 2, 66, 7, 59, 1, 0 },
{ 0, 0, 24, 7, 94, 10, 0 },
{ 0, 5, 85, 7, 38, 0, 0 },
{ 0, 1, 42, 7, 81, 4, 0 },
{ 0, 11, 95, 7, 22, 0, 0 },
{ 0, 2, 62, 7, 62, 2, 0 },
{ 0, 0, 22, 7, 95, 11, 0 },
{ 0, 4, 81, 7, 42, 1, 0 },
{ 0, 0, 38, 7, 85, 5, 0 },
{ 0, 10, 94, 7, 24, 0, 0 },
{ 0, 1, 59, 7, 66, 2, 0 },
{ 0, 0, 20, 7, 95, 13, 0 },
{ 0, 4, 78, 7, 45, 1, 0 },
{ 0, 0, 35, 7, 87, 6, 0 },
{ 0, 9, 92, 7, 27, 0, 0 },
{ 0, 1, 55, 7, 70, 2, 0 },
{ 0, 0, 18, 7, 96, 14, 0 },
{ 0, 3, 76, 7, 48, 1, 0 },
{ 0, 0, 32, 7, 89, 7, 0 },
{ 0, 8, 90, 7, 30, 0, 0 },
{ 0, 1, 52, 7, 72, 3, 0 } },
.odd = { { 0, 1, 40, 7, 82, 5, 0 },
{ 0, 11, 94, 7, 23, 0, 0 },
{ 0, 2, 61, 7, 63, 2, 0 },
{ 0, 0, 21, 7, 95, 12, 0 },
{ 0, 4, 80, 7, 43, 1, 0 },
{ 0, 0, 37, 7, 85, 6, 0 },
{ 0, 9, 93, 7, 26, 0, 0 },
{ 0, 1, 57, 7, 68, 2, 0 },
{ 0, 0, 19, 7, 95, 14, 0 },
{ 0, 4, 76, 7, 47, 1, 0 },
{ 0, 0, 34, 7, 88, 6, 0 },
{ 0, 8, 92, 7, 28, 0, 0 },
{ 0, 1, 54, 7, 70, 3, 0 },
{ 0, 0, 17, 7, 96, 15, 0 },
{ 0, 3, 74, 7, 50, 1, 0 },
{ 0, 0, 31, 7, 90, 7, 0 },
{ 0, 7, 90, 7, 31, 0, 0 },
{ 0, 1, 50, 7, 74, 3, 0 },
{ 0, 15, 96, 7, 17, 0, 0 },
{ 0, 3, 70, 7, 54, 1, 0 },
{ 0, 0, 28, 7, 92, 8, 0 },
{ 0, 6, 88, 7, 34, 0, 0 },
{ 0, 1, 47, 7, 76, 4, 0 },
{ 0, 14, 95, 7, 19, 0, 0 },
{ 0, 2, 68, 7, 57, 1, 0 },
{ 0, 0, 26, 7, 93, 9, 0 },
{ 0, 6, 85, 7, 37, 0, 0 },
{ 0, 1, 43, 7, 80, 4, 0 },
{ 0, 12, 95, 7, 21, 0, 0 },
{ 0, 2, 63, 7, 61, 2, 0 },
{ 0, 0, 23, 7, 94, 11, 0 },
{ 0, 5, 82, 7, 40, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 16, 96, 7, 16, 0, 0 },
{ 0, 3, 72, 7, 52, 1, 0 },
{ 0, 0, 30, 7, 90, 8, 0 },
{ 0, 7, 89, 7, 32, 0, 0 },
{ 0, 1, 48, 7, 76, 3, 0 },
{ 0, 14, 96, 7, 18, 0, 0 },
{ 0, 2, 70, 7, 55, 1, 0 },
{ 0, 0, 27, 7, 92, 9, 0 },
{ 0, 6, 87, 7, 35, 0, 0 },
{ 0, 1, 45, 7, 78, 4, 0 },
{ 0, 13, 95, 7, 20, 0, 0 },
{ 0, 2, 66, 7, 59, 1, 0 },
{ 0, 0, 24, 7, 94, 10, 0 },
{ 0, 5, 85, 7, 38, 0, 0 },
{ 0, 1, 42, 7, 81, 4, 0 },
{ 0, 11, 95, 7, 22, 0, 0 },
{ 0, 2, 62, 7, 62, 2, 0 },
{ 0, 0, 22, 7, 95, 11, 0 },
{ 0, 4, 81, 7, 42, 1, 0 },
{ 0, 0, 38, 7, 85, 5, 0 },
{ 0, 10, 94, 7, 24, 0, 0 },
{ 0, 1, 59, 7, 66, 2, 0 },
{ 0, 0, 20, 7, 95, 13, 0 },
{ 0, 4, 78, 7, 45, 1, 0 },
{ 0, 0, 35, 7, 87, 6, 0 },
{ 0, 9, 92, 7, 27, 0, 0 },
{ 0, 1, 55, 7, 70, 2, 0 },
{ 0, 0, 18, 7, 96, 14, 0 },
{ 0, 3, 76, 7, 48, 1, 0 },
{ 0, 0, 32, 7, 89, 7, 0 },
{ 0, 8, 90, 7, 30, 0, 0 },
{ 0, 1, 52, 7, 72, 3, 0 } },
.odd = { { 0, 1, 40, 7, 82, 5, 0 },
{ 0, 11, 94, 7, 23, 0, 0 },
{ 0, 2, 61, 7, 63, 2, 0 },
{ 0, 0, 21, 7, 95, 12, 0 },
{ 0, 4, 80, 7, 43, 1, 0 },
{ 0, 0, 37, 7, 85, 6, 0 },
{ 0, 9, 93, 7, 26, 0, 0 },
{ 0, 1, 57, 7, 68, 2, 0 },
{ 0, 0, 19, 7, 95, 14, 0 },
{ 0, 4, 76, 7, 47, 1, 0 },
{ 0, 0, 34, 7, 88, 6, 0 },
{ 0, 8, 92, 7, 28, 0, 0 },
{ 0, 1, 54, 7, 70, 3, 0 },
{ 0, 0, 17, 7, 96, 15, 0 },
{ 0, 3, 74, 7, 50, 1, 0 },
{ 0, 0, 31, 7, 90, 7, 0 },
{ 0, 7, 90, 7, 31, 0, 0 },
{ 0, 1, 50, 7, 74, 3, 0 },
{ 0, 15, 96, 7, 17, 0, 0 },
{ 0, 3, 70, 7, 54, 1, 0 },
{ 0, 0, 28, 7, 92, 8, 0 },
{ 0, 6, 88, 7, 34, 0, 0 },
{ 0, 1, 47, 7, 76, 4, 0 },
{ 0, 14, 95, 7, 19, 0, 0 },
{ 0, 2, 68, 7, 57, 1, 0 },
{ 0, 0, 26, 7, 93, 9, 0 },
{ 0, 6, 85, 7, 37, 0, 0 },
{ 0, 1, 43, 7, 80, 4, 0 },
{ 0, 12, 95, 7, 21, 0, 0 },
{ 0, 2, 63, 7, 61, 2, 0 },
{ 0, 0, 23, 7, 94, 11, 0 },
{ 0, 5, 82, 7, 40, 1, 0 } } },
.ptrn_arr = { { 0x9324c993, 0xc99324c9, 0x26499324, 0x93264993,
0x932649 } },
.sample_patrn_length = 154,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 46) = 0.410256 */
.hor_phase_arr = {
.even = { { 0, 17, 94, 7, 17, 0, 0 },
{ 0, 3, 69, 7, 55, 1, 0 },
{ 0, 0, 25, 7, 93, 10, 0 },
{ 0, 5, 80, 7, 42, 1, 0 },
{ 0, 0, 36, 7, 86, 6, 0 },
{ 0, 8, 90, 7, 30, 0, 0 },
{ 0, 1, 49, 7, 74, 4, 0 },
{ 0, 13, 94, 7, 21, 0, 0 },
{ 0, 2, 62, 7, 62, 2, 0 },
{ 0, 0, 21, 7, 94, 13, 0 },
{ 0, 4, 74, 7, 49, 1, 0 },
{ 0, 0, 30, 7, 90, 8, 0 },
{ 0, 6, 86, 7, 36, 0, 0 },
{ 0, 1, 42, 7, 80, 5, 0 },
{ 0, 10, 93, 7, 25, 0, 0 },
{ 0, 1, 55, 7, 69, 3, 0 } },
.odd = { { 0, 1, 39, 7, 83, 5, 0 },
{ 0, 9, 91, 7, 28, 0, 0 },
{ 0, 1, 52, 7, 72, 3, 0 },
{ 0, 15, 94, 7, 19, 0, 0 },
{ 0, 2, 65, 7, 59, 2, 0 },
{ 0, 0, 23, 7, 93, 12, 0 },
{ 0, 4, 78, 7, 45, 1, 0 },
{ 0, 0, 33, 7, 88, 7, 0 },
{ 0, 7, 88, 7, 33, 0, 0 },
{ 0, 1, 45, 7, 78, 4, 0 },
{ 0, 12, 93, 7, 23, 0, 0 },
{ 0, 2, 59, 7, 65, 2, 0 },
{ 0, 0, 19, 7, 94, 15, 0 },
{ 0, 3, 72, 7, 52, 1, 0 },
{ 0, 0, 28, 7, 91, 9, 0 },
{ 0, 5, 83, 7, 39, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 17, 94, 7, 17, 0, 0 },
{ 0, 3, 69, 7, 55, 1, 0 },
{ 0, 0, 25, 7, 93, 10, 0 },
{ 0, 5, 80, 7, 42, 1, 0 },
{ 0, 0, 36, 7, 86, 6, 0 },
{ 0, 8, 90, 7, 30, 0, 0 },
{ 0, 1, 49, 7, 74, 4, 0 },
{ 0, 13, 94, 7, 21, 0, 0 },
{ 0, 2, 62, 7, 62, 2, 0 },
{ 0, 0, 21, 7, 94, 13, 0 },
{ 0, 4, 74, 7, 49, 1, 0 },
{ 0, 0, 30, 7, 90, 8, 0 },
{ 0, 6, 86, 7, 36, 0, 0 },
{ 0, 1, 42, 7, 80, 5, 0 },
{ 0, 10, 93, 7, 25, 0, 0 },
{ 0, 1, 55, 7, 69, 3, 0 } },
.odd = { { 0, 1, 39, 7, 83, 5, 0 },
{ 0, 9, 91, 7, 28, 0, 0 },
{ 0, 1, 52, 7, 72, 3, 0 },
{ 0, 15, 94, 7, 19, 0, 0 },
{ 0, 2, 65, 7, 59, 2, 0 },
{ 0, 0, 23, 7, 93, 12, 0 },
{ 0, 4, 78, 7, 45, 1, 0 },
{ 0, 0, 33, 7, 88, 7, 0 },
{ 0, 7, 88, 7, 33, 0, 0 },
{ 0, 1, 45, 7, 78, 4, 0 },
{ 0, 12, 93, 7, 23, 0, 0 },
{ 0, 2, 59, 7, 65, 2, 0 },
{ 0, 0, 19, 7, 94, 15, 0 },
{ 0, 3, 72, 7, 52, 1, 0 },
{ 0, 0, 28, 7, 91, 9, 0 },
{ 0, 5, 83, 7, 39, 1, 0 } } },
.ptrn_arr = { { 0x93264993, 0x4c99264c, 0x932 } },
.sample_patrn_length = 78,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 47) = 0.405063 */
.hor_phase_arr = {
.even = { { 0, 17, 94, 7, 17, 0, 0 },
{ 0, 2, 65, 7, 59, 2, 0 },
{ 0, 0, 21, 7, 93, 14, 0 },
{ 0, 3, 72, 7, 52, 1, 0 },
{ 0, 0, 26, 7, 91, 11, 0 },
{ 0, 4, 78, 7, 45, 1, 0 },
{ 0, 0, 31, 7, 88, 9, 0 },
{ 0, 6, 82, 7, 39, 1, 0 },
{ 0, 1, 36, 7, 84, 7, 0 },
{ 0, 8, 87, 7, 33, 0, 0 },
{ 0, 1, 42, 7, 80, 5, 0 },
{ 0, 10, 90, 7, 28, 0, 0 },
{ 0, 1, 49, 7, 74, 4, 0 },
{ 0, 12, 93, 7, 23, 0, 0 },
{ 0, 2, 55, 7, 68, 3, 0 },
{ 0, 15, 94, 7, 19, 0, 0 },
{ 0, 2, 62, 7, 62, 2, 0 },
{ 0, 0, 19, 7, 94, 15, 0 },
{ 0, 3, 68, 7, 55, 2, 0 },
{ 0, 0, 23, 7, 93, 12, 0 },
{ 0, 4, 74, 7, 49, 1, 0 },
{ 0, 0, 28, 7, 90, 10, 0 },
{ 0, 5, 80, 7, 42, 1, 0 },
{ 0, 0, 33, 7, 87, 8, 0 },
{ 0, 7, 84, 7, 36, 1, 0 },
{ 0, 1, 39, 7, 82, 6, 0 },
{ 0, 9, 88, 7, 31, 0, 0 },
{ 0, 1, 45, 7, 78, 4, 0 },
{ 0, 11, 91, 7, 26, 0, 0 },
{ 0, 1, 52, 7, 72, 3, 0 },
{ 0, 14, 93, 7, 21, 0, 0 },
{ 0, 2, 59, 7, 65, 2, 0 } },
.odd = { { 0, 1, 38, 7, 83, 6, 0 },
{ 0, 8, 88, 7, 32, 0, 0 },
{ 0, 1, 44, 7, 78, 5, 0 },
{ 0, 10, 91, 7, 27, 0, 0 },
{ 0, 1, 50, 7, 73, 4, 0 },
{ 0, 13, 93, 7, 22, 0, 0 },
{ 0, 2, 57, 7, 66, 3, 0 },
{ 0, 16, 94, 7, 18, 0, 0 },
{ 0, 2, 64, 7, 60, 2, 0 },
{ 0, 0, 20, 7, 93, 15, 0 },
{ 0, 3, 70, 7, 54, 1, 0 },
{ 0, 0, 24, 7, 92, 12, 0 },
{ 0, 4, 76, 7, 47, 1, 0 },
{ 0, 0, 29, 7, 90, 9, 0 },
{ 0, 5, 81, 7, 41, 1, 0 },
{ 0, 0, 35, 7, 86, 7, 0 },
{ 0, 7, 86, 7, 35, 0, 0 },
{ 0, 1, 41, 7, 81, 5, 0 },
{ 0, 9, 90, 7, 29, 0, 0 },
{ 0, 1, 47, 7, 76, 4, 0 },
{ 0, 12, 92, 7, 24, 0, 0 },
{ 0, 1, 54, 7, 70, 3, 0 },
{ 0, 15, 93, 7, 20, 0, 0 },
{ 0, 2, 60, 7, 64, 2, 0 },
{ 0, 0, 18, 7, 94, 16, 0 },
{ 0, 3, 66, 7, 57, 2, 0 },
{ 0, 0, 22, 7, 93, 13, 0 },
{ 0, 4, 73, 7, 50, 1, 0 },
{ 0, 0, 27, 7, 91, 10, 0 },
{ 0, 5, 78, 7, 44, 1, 0 },
{ 0, 0, 32, 7, 88, 8, 0 },
{ 0, 6, 83, 7, 38, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 17, 94, 7, 17, 0, 0 },
{ 0, 2, 65, 7, 59, 2, 0 },
{ 0, 0, 21, 7, 93, 14, 0 },
{ 0, 3, 72, 7, 52, 1, 0 },
{ 0, 0, 26, 7, 91, 11, 0 },
{ 0, 4, 78, 7, 45, 1, 0 },
{ 0, 0, 31, 7, 88, 9, 0 },
{ 0, 6, 82, 7, 39, 1, 0 },
{ 0, 1, 36, 7, 84, 7, 0 },
{ 0, 8, 87, 7, 33, 0, 0 },
{ 0, 1, 42, 7, 80, 5, 0 },
{ 0, 10, 90, 7, 28, 0, 0 },
{ 0, 1, 49, 7, 74, 4, 0 },
{ 0, 12, 93, 7, 23, 0, 0 },
{ 0, 2, 55, 7, 68, 3, 0 },
{ 0, 15, 94, 7, 19, 0, 0 },
{ 0, 2, 62, 7, 62, 2, 0 },
{ 0, 0, 19, 7, 94, 15, 0 },
{ 0, 3, 68, 7, 55, 2, 0 },
{ 0, 0, 23, 7, 93, 12, 0 },
{ 0, 4, 74, 7, 49, 1, 0 },
{ 0, 0, 28, 7, 90, 10, 0 },
{ 0, 5, 80, 7, 42, 1, 0 },
{ 0, 0, 33, 7, 87, 8, 0 },
{ 0, 7, 84, 7, 36, 1, 0 },
{ 0, 1, 39, 7, 82, 6, 0 },
{ 0, 9, 88, 7, 31, 0, 0 },
{ 0, 1, 45, 7, 78, 4, 0 },
{ 0, 11, 91, 7, 26, 0, 0 },
{ 0, 1, 52, 7, 72, 3, 0 },
{ 0, 14, 93, 7, 21, 0, 0 },
{ 0, 2, 59, 7, 65, 2, 0 } },
.odd = { { 0, 1, 38, 7, 83, 6, 0 },
{ 0, 8, 88, 7, 32, 0, 0 },
{ 0, 1, 44, 7, 78, 5, 0 },
{ 0, 10, 91, 7, 27, 0, 0 },
{ 0, 1, 50, 7, 73, 4, 0 },
{ 0, 13, 93, 7, 22, 0, 0 },
{ 0, 2, 57, 7, 66, 3, 0 },
{ 0, 16, 94, 7, 18, 0, 0 },
{ 0, 2, 64, 7, 60, 2, 0 },
{ 0, 0, 20, 7, 93, 15, 0 },
{ 0, 3, 70, 7, 54, 1, 0 },
{ 0, 0, 24, 7, 92, 12, 0 },
{ 0, 4, 76, 7, 47, 1, 0 },
{ 0, 0, 29, 7, 90, 9, 0 },
{ 0, 5, 81, 7, 41, 1, 0 },
{ 0, 0, 35, 7, 86, 7, 0 },
{ 0, 7, 86, 7, 35, 0, 0 },
{ 0, 1, 41, 7, 81, 5, 0 },
{ 0, 9, 90, 7, 29, 0, 0 },
{ 0, 1, 47, 7, 76, 4, 0 },
{ 0, 12, 92, 7, 24, 0, 0 },
{ 0, 1, 54, 7, 70, 3, 0 },
{ 0, 15, 93, 7, 20, 0, 0 },
{ 0, 2, 60, 7, 64, 2, 0 },
{ 0, 0, 18, 7, 94, 16, 0 },
{ 0, 3, 66, 7, 57, 2, 0 },
{ 0, 0, 22, 7, 93, 13, 0 },
{ 0, 4, 73, 7, 50, 1, 0 },
{ 0, 0, 27, 7, 91, 10, 0 },
{ 0, 5, 78, 7, 44, 1, 0 },
{ 0, 0, 32, 7, 88, 8, 0 },
{ 0, 6, 83, 7, 38, 1, 0 } } },
.ptrn_arr = { { 0x99264993, 0x24c93264, 0x99264c93, 0x24c99264,
0x9324c93 } },
.sample_patrn_length = 158,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 48) = 0.4 */
.hor_phase_arr = {
.even = { { 0, 18, 92, 7, 18, 0, 0 },
{ 0, 2, 62, 7, 62, 2, 0 } },
.odd = { { 0, 1, 37, 7, 83, 7, 0 },
{ 0, 7, 83, 7, 37, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 18, 92, 7, 18, 0, 0 },
{ 0, 2, 62, 7, 62, 2, 0 } },
.odd = { { 0, 1, 37, 7, 83, 7, 0 },
{ 0, 7, 83, 7, 37, 1, 0 } } },
.ptrn_arr = { { 0x93 } },
.sample_patrn_length = 10,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 49) = 0.395062 */
.hor_phase_arr = {
.even = { { 0, 18, 92, 7, 18, 0, 0 },
{ 0, 2, 58, 7, 65, 3, 0 },
{ 0, 15, 91, 7, 22, 0, 0 },
{ 0, 2, 52, 7, 70, 4, 0 },
{ 0, 12, 89, 7, 27, 0, 0 },
{ 0, 1, 46, 7, 76, 5, 0 },
{ 0, 9, 87, 7, 32, 0, 0 },
{ 0, 1, 40, 7, 80, 7, 0 },
{ 0, 7, 83, 7, 37, 1, 0 },
{ 0, 1, 34, 7, 85, 8, 0 },
{ 0, 6, 78, 7, 43, 1, 0 },
{ 0, 0, 29, 7, 88, 11, 0 },
{ 0, 4, 74, 7, 49, 1, 0 },
{ 0, 0, 24, 7, 91, 13, 0 },
{ 0, 3, 68, 7, 55, 2, 0 },
{ 0, 0, 20, 7, 92, 16, 0 },
{ 0, 2, 62, 7, 62, 2, 0 },
{ 0, 16, 92, 7, 20, 0, 0 },
{ 0, 2, 55, 7, 68, 3, 0 },
{ 0, 13, 91, 7, 24, 0, 0 },
{ 0, 1, 49, 7, 74, 4, 0 },
{ 0, 11, 88, 7, 29, 0, 0 },
{ 0, 1, 43, 7, 78, 6, 0 },
{ 0, 8, 85, 7, 34, 1, 0 },
{ 0, 1, 37, 7, 83, 7, 0 },
{ 0, 7, 80, 7, 40, 1, 0 },
{ 0, 0, 32, 7, 87, 9, 0 },
{ 0, 5, 76, 7, 46, 1, 0 },
{ 0, 0, 27, 7, 89, 12, 0 },
{ 0, 4, 70, 7, 52, 2, 0 },
{ 0, 0, 22, 7, 91, 15, 0 },
{ 0, 3, 65, 7, 58, 2, 0 } },
.odd = { { 0, 1, 36, 7, 83, 8, 0 },
{ 0, 6, 80, 7, 41, 1, 0 },
{ 0, 0, 30, 7, 88, 10, 0 },
{ 0, 5, 75, 7, 47, 1, 0 },
{ 0, 0, 25, 7, 90, 13, 0 },
{ 0, 4, 68, 7, 54, 2, 0 },
{ 0, 0, 21, 7, 91, 16, 0 },
{ 0, 3, 63, 7, 60, 2, 0 },
{ 0, 17, 92, 7, 19, 0, 0 },
{ 0, 2, 57, 7, 66, 3, 0 },
{ 0, 14, 91, 7, 23, 0, 0 },
{ 0, 1, 51, 7, 72, 4, 0 },
{ 0, 11, 89, 7, 28, 0, 0 },
{ 0, 1, 44, 7, 78, 5, 0 },
{ 0, 9, 85, 7, 33, 1, 0 },
{ 0, 1, 38, 7, 82, 7, 0 },
{ 0, 7, 82, 7, 38, 1, 0 },
{ 0, 1, 33, 7, 85, 9, 0 },
{ 0, 5, 78, 7, 44, 1, 0 },
{ 0, 0, 28, 7, 89, 11, 0 },
{ 0, 4, 72, 7, 51, 1, 0 },
{ 0, 0, 23, 7, 91, 14, 0 },
{ 0, 3, 66, 7, 57, 2, 0 },
{ 0, 0, 19, 7, 92, 17, 0 },
{ 0, 2, 60, 7, 63, 3, 0 },
{ 0, 16, 91, 7, 21, 0, 0 },
{ 0, 2, 54, 7, 68, 4, 0 },
{ 0, 13, 90, 7, 25, 0, 0 },
{ 0, 1, 47, 7, 75, 5, 0 },
{ 0, 10, 88, 7, 30, 0, 0 },
{ 0, 1, 41, 7, 80, 6, 0 },
{ 0, 8, 83, 7, 36, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 18, 92, 7, 18, 0, 0 },
{ 0, 2, 58, 7, 65, 3, 0 },
{ 0, 15, 91, 7, 22, 0, 0 },
{ 0, 2, 52, 7, 70, 4, 0 },
{ 0, 12, 89, 7, 27, 0, 0 },
{ 0, 1, 46, 7, 76, 5, 0 },
{ 0, 9, 87, 7, 32, 0, 0 },
{ 0, 1, 40, 7, 80, 7, 0 },
{ 0, 7, 83, 7, 37, 1, 0 },
{ 0, 1, 34, 7, 85, 8, 0 },
{ 0, 6, 78, 7, 43, 1, 0 },
{ 0, 0, 29, 7, 88, 11, 0 },
{ 0, 4, 74, 7, 49, 1, 0 },
{ 0, 0, 24, 7, 91, 13, 0 },
{ 0, 3, 68, 7, 55, 2, 0 },
{ 0, 0, 20, 7, 92, 16, 0 },
{ 0, 2, 62, 7, 62, 2, 0 },
{ 0, 16, 92, 7, 20, 0, 0 },
{ 0, 2, 55, 7, 68, 3, 0 },
{ 0, 13, 91, 7, 24, 0, 0 },
{ 0, 1, 49, 7, 74, 4, 0 },
{ 0, 11, 88, 7, 29, 0, 0 },
{ 0, 1, 43, 7, 78, 6, 0 },
{ 0, 8, 85, 7, 34, 1, 0 },
{ 0, 1, 37, 7, 83, 7, 0 },
{ 0, 7, 80, 7, 40, 1, 0 },
{ 0, 0, 32, 7, 87, 9, 0 },
{ 0, 5, 76, 7, 46, 1, 0 },
{ 0, 0, 27, 7, 89, 12, 0 },
{ 0, 4, 70, 7, 52, 2, 0 },
{ 0, 0, 22, 7, 91, 15, 0 },
{ 0, 3, 65, 7, 58, 2, 0 } },
.odd = { { 0, 1, 36, 7, 83, 8, 0 },
{ 0, 6, 80, 7, 41, 1, 0 },
{ 0, 0, 30, 7, 88, 10, 0 },
{ 0, 5, 75, 7, 47, 1, 0 },
{ 0, 0, 25, 7, 90, 13, 0 },
{ 0, 4, 68, 7, 54, 2, 0 },
{ 0, 0, 21, 7, 91, 16, 0 },
{ 0, 3, 63, 7, 60, 2, 0 },
{ 0, 17, 92, 7, 19, 0, 0 },
{ 0, 2, 57, 7, 66, 3, 0 },
{ 0, 14, 91, 7, 23, 0, 0 },
{ 0, 1, 51, 7, 72, 4, 0 },
{ 0, 11, 89, 7, 28, 0, 0 },
{ 0, 1, 44, 7, 78, 5, 0 },
{ 0, 9, 85, 7, 33, 1, 0 },
{ 0, 1, 38, 7, 82, 7, 0 },
{ 0, 7, 82, 7, 38, 1, 0 },
{ 0, 1, 33, 7, 85, 9, 0 },
{ 0, 5, 78, 7, 44, 1, 0 },
{ 0, 0, 28, 7, 89, 11, 0 },
{ 0, 4, 72, 7, 51, 1, 0 },
{ 0, 0, 23, 7, 91, 14, 0 },
{ 0, 3, 66, 7, 57, 2, 0 },
{ 0, 0, 19, 7, 92, 17, 0 },
{ 0, 2, 60, 7, 63, 3, 0 },
{ 0, 16, 91, 7, 21, 0, 0 },
{ 0, 2, 54, 7, 68, 4, 0 },
{ 0, 13, 90, 7, 25, 0, 0 },
{ 0, 1, 47, 7, 75, 5, 0 },
{ 0, 10, 88, 7, 30, 0, 0 },
{ 0, 1, 41, 7, 80, 6, 0 },
{ 0, 8, 83, 7, 36, 1, 0 } } },
.ptrn_arr = { { 0xc9324c93, 0x92649924, 0x24c92649, 0x49324c93,
0x92649926 } },
.sample_patrn_length = 162,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 50) = 0.390244 */
.hor_phase_arr = {
.even = { { 0, 19, 90, 7, 19, 0, 0 },
{ 0, 2, 55, 7, 67, 4, 0 },
{ 0, 12, 89, 7, 27, 0, 0 },
{ 0, 1, 43, 7, 78, 6, 0 },
{ 0, 8, 82, 7, 37, 1, 0 },
{ 0, 1, 32, 7, 85, 10, 0 },
{ 0, 5, 73, 7, 49, 1, 0 },
{ 0, 0, 23, 7, 90, 15, 0 },
{ 0, 3, 61, 7, 61, 3, 0 },
{ 0, 15, 90, 7, 23, 0, 0 },
{ 0, 1, 49, 7, 73, 5, 0 },
{ 0, 10, 85, 7, 32, 1, 0 },
{ 0, 1, 37, 7, 82, 8, 0 },
{ 0, 6, 78, 7, 43, 1, 0 },
{ 0, 0, 27, 7, 89, 12, 0 },
{ 0, 4, 67, 7, 55, 2, 0 } },
.odd = { { 0, 1, 35, 7, 83, 9, 0 },
{ 0, 5, 76, 7, 46, 1, 0 },
{ 0, 0, 25, 7, 89, 14, 0 },
{ 0, 3, 65, 7, 58, 2, 0 },
{ 0, 17, 90, 7, 21, 0, 0 },
{ 0, 2, 52, 7, 70, 4, 0 },
{ 0, 11, 88, 7, 29, 0, 0 },
{ 0, 1, 40, 7, 80, 7, 0 },
{ 0, 7, 80, 7, 40, 1, 0 },
{ 0, 0, 29, 7, 88, 11, 0 },
{ 0, 4, 70, 7, 52, 2, 0 },
{ 0, 0, 21, 7, 90, 17, 0 },
{ 0, 2, 58, 7, 65, 3, 0 },
{ 0, 14, 89, 7, 25, 0, 0 },
{ 0, 1, 46, 7, 76, 5, 0 },
{ 0, 9, 83, 7, 35, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 19, 90, 7, 19, 0, 0 },
{ 0, 2, 55, 7, 67, 4, 0 },
{ 0, 12, 89, 7, 27, 0, 0 },
{ 0, 1, 43, 7, 78, 6, 0 },
{ 0, 8, 82, 7, 37, 1, 0 },
{ 0, 1, 32, 7, 85, 10, 0 },
{ 0, 5, 73, 7, 49, 1, 0 },
{ 0, 0, 23, 7, 90, 15, 0 },
{ 0, 3, 61, 7, 61, 3, 0 },
{ 0, 15, 90, 7, 23, 0, 0 },
{ 0, 1, 49, 7, 73, 5, 0 },
{ 0, 10, 85, 7, 32, 1, 0 },
{ 0, 1, 37, 7, 82, 8, 0 },
{ 0, 6, 78, 7, 43, 1, 0 },
{ 0, 0, 27, 7, 89, 12, 0 },
{ 0, 4, 67, 7, 55, 2, 0 } },
.odd = { { 0, 1, 35, 7, 83, 9, 0 },
{ 0, 5, 76, 7, 46, 1, 0 },
{ 0, 0, 25, 7, 89, 14, 0 },
{ 0, 3, 65, 7, 58, 2, 0 },
{ 0, 17, 90, 7, 21, 0, 0 },
{ 0, 2, 52, 7, 70, 4, 0 },
{ 0, 11, 88, 7, 29, 0, 0 },
{ 0, 1, 40, 7, 80, 7, 0 },
{ 0, 7, 80, 7, 40, 1, 0 },
{ 0, 0, 29, 7, 88, 11, 0 },
{ 0, 4, 70, 7, 52, 2, 0 },
{ 0, 0, 21, 7, 90, 17, 0 },
{ 0, 2, 58, 7, 65, 3, 0 },
{ 0, 14, 89, 7, 25, 0, 0 },
{ 0, 1, 46, 7, 76, 5, 0 },
{ 0, 9, 83, 7, 35, 1, 0 } } },
.ptrn_arr = { { 0x49924c93, 0x9324c926, 0x9264 } },
.sample_patrn_length = 82,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 51) = 0.385542 */
.hor_phase_arr = {
.even = { { 0, 19, 90, 7, 19, 0, 0 },
{ 0, 2, 52, 7, 70, 4, 0 },
{ 0, 10, 85, 7, 32, 1, 0 },
{ 0, 1, 35, 7, 83, 9, 0 },
{ 0, 5, 72, 7, 49, 2, 0 },
{ 0, 0, 21, 7, 90, 17, 0 },
{ 0, 2, 55, 7, 67, 4, 0 },
{ 0, 11, 87, 7, 30, 0, 0 },
{ 0, 1, 38, 7, 81, 8, 0 },
{ 0, 6, 75, 7, 46, 1, 0 },
{ 0, 0, 23, 7, 89, 16, 0 },
{ 0, 2, 58, 7, 65, 3, 0 },
{ 0, 13, 87, 7, 28, 0, 0 },
{ 0, 1, 41, 7, 79, 7, 0 },
{ 0, 6, 78, 7, 43, 1, 0 },
{ 0, 0, 25, 7, 89, 14, 0 },
{ 0, 3, 61, 7, 61, 3, 0 },
{ 0, 14, 89, 7, 25, 0, 0 },
{ 0, 1, 43, 7, 78, 6, 0 },
{ 0, 7, 79, 7, 41, 1, 0 },
{ 0, 0, 28, 7, 87, 13, 0 },
{ 0, 3, 65, 7, 58, 2, 0 },
{ 0, 16, 89, 7, 23, 0, 0 },
{ 0, 1, 46, 7, 75, 6, 0 },
{ 0, 8, 81, 7, 38, 1, 0 },
{ 0, 0, 30, 7, 87, 11, 0 },
{ 0, 4, 67, 7, 55, 2, 0 },
{ 0, 17, 90, 7, 21, 0, 0 },
{ 0, 2, 49, 7, 72, 5, 0 },
{ 0, 9, 83, 7, 35, 1, 0 },
{ 0, 1, 32, 7, 85, 10, 0 },
{ 0, 4, 70, 7, 52, 2, 0 } },
.odd = { { 0, 1, 34, 7, 83, 10, 0 },
{ 0, 5, 70, 7, 51, 2, 0 },
{ 0, 0, 20, 7, 90, 18, 0 },
{ 0, 2, 54, 7, 68, 4, 0 },
{ 0, 11, 85, 7, 31, 1, 0 },
{ 0, 1, 36, 7, 82, 9, 0 },
{ 0, 5, 74, 7, 48, 1, 0 },
{ 0, 0, 22, 7, 89, 17, 0 },
{ 0, 2, 57, 7, 65, 4, 0 },
{ 0, 12, 87, 7, 29, 0, 0 },
{ 0, 1, 39, 7, 80, 8, 0 },
{ 0, 6, 76, 7, 45, 1, 0 },
{ 0, 0, 24, 7, 89, 15, 0 },
{ 0, 3, 60, 7, 62, 3, 0 },
{ 0, 13, 89, 7, 26, 0, 0 },
{ 0, 1, 42, 7, 78, 7, 0 },
{ 0, 7, 78, 7, 42, 1, 0 },
{ 0, 0, 26, 7, 89, 13, 0 },
{ 0, 3, 62, 7, 60, 3, 0 },
{ 0, 15, 89, 7, 24, 0, 0 },
{ 0, 1, 45, 7, 76, 6, 0 },
{ 0, 8, 80, 7, 39, 1, 0 },
{ 0, 0, 29, 7, 87, 12, 0 },
{ 0, 4, 65, 7, 57, 2, 0 },
{ 0, 17, 89, 7, 22, 0, 0 },
{ 0, 1, 48, 7, 74, 5, 0 },
{ 0, 9, 82, 7, 36, 1, 0 },
{ 0, 1, 31, 7, 85, 11, 0 },
{ 0, 4, 68, 7, 54, 2, 0 },
{ 0, 18, 90, 7, 20, 0, 0 },
{ 0, 2, 51, 7, 70, 5, 0 },
{ 0, 10, 83, 7, 34, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 19, 90, 7, 19, 0, 0 },
{ 0, 2, 52, 7, 70, 4, 0 },
{ 0, 10, 85, 7, 32, 1, 0 },
{ 0, 1, 35, 7, 83, 9, 0 },
{ 0, 5, 72, 7, 49, 2, 0 },
{ 0, 0, 21, 7, 90, 17, 0 },
{ 0, 2, 55, 7, 67, 4, 0 },
{ 0, 11, 87, 7, 30, 0, 0 },
{ 0, 1, 38, 7, 81, 8, 0 },
{ 0, 6, 75, 7, 46, 1, 0 },
{ 0, 0, 23, 7, 89, 16, 0 },
{ 0, 2, 58, 7, 65, 3, 0 },
{ 0, 13, 87, 7, 28, 0, 0 },
{ 0, 1, 41, 7, 79, 7, 0 },
{ 0, 6, 78, 7, 43, 1, 0 },
{ 0, 0, 25, 7, 89, 14, 0 },
{ 0, 3, 61, 7, 61, 3, 0 },
{ 0, 14, 89, 7, 25, 0, 0 },
{ 0, 1, 43, 7, 78, 6, 0 },
{ 0, 7, 79, 7, 41, 1, 0 },
{ 0, 0, 28, 7, 87, 13, 0 },
{ 0, 3, 65, 7, 58, 2, 0 },
{ 0, 16, 89, 7, 23, 0, 0 },
{ 0, 1, 46, 7, 75, 6, 0 },
{ 0, 8, 81, 7, 38, 1, 0 },
{ 0, 0, 30, 7, 87, 11, 0 },
{ 0, 4, 67, 7, 55, 2, 0 },
{ 0, 17, 90, 7, 21, 0, 0 },
{ 0, 2, 49, 7, 72, 5, 0 },
{ 0, 9, 83, 7, 35, 1, 0 },
{ 0, 1, 32, 7, 85, 10, 0 },
{ 0, 4, 70, 7, 52, 2, 0 } },
.odd = { { 0, 1, 34, 7, 83, 10, 0 },
{ 0, 5, 70, 7, 51, 2, 0 },
{ 0, 0, 20, 7, 90, 18, 0 },
{ 0, 2, 54, 7, 68, 4, 0 },
{ 0, 11, 85, 7, 31, 1, 0 },
{ 0, 1, 36, 7, 82, 9, 0 },
{ 0, 5, 74, 7, 48, 1, 0 },
{ 0, 0, 22, 7, 89, 17, 0 },
{ 0, 2, 57, 7, 65, 4, 0 },
{ 0, 12, 87, 7, 29, 0, 0 },
{ 0, 1, 39, 7, 80, 8, 0 },
{ 0, 6, 76, 7, 45, 1, 0 },
{ 0, 0, 24, 7, 89, 15, 0 },
{ 0, 3, 60, 7, 62, 3, 0 },
{ 0, 13, 89, 7, 26, 0, 0 },
{ 0, 1, 42, 7, 78, 7, 0 },
{ 0, 7, 78, 7, 42, 1, 0 },
{ 0, 0, 26, 7, 89, 13, 0 },
{ 0, 3, 62, 7, 60, 3, 0 },
{ 0, 15, 89, 7, 24, 0, 0 },
{ 0, 1, 45, 7, 76, 6, 0 },
{ 0, 8, 80, 7, 39, 1, 0 },
{ 0, 0, 29, 7, 87, 12, 0 },
{ 0, 4, 65, 7, 57, 2, 0 },
{ 0, 17, 89, 7, 22, 0, 0 },
{ 0, 1, 48, 7, 74, 5, 0 },
{ 0, 9, 82, 7, 36, 1, 0 },
{ 0, 1, 31, 7, 85, 11, 0 },
{ 0, 4, 68, 7, 54, 2, 0 },
{ 0, 18, 90, 7, 20, 0, 0 },
{ 0, 2, 51, 7, 70, 5, 0 },
{ 0, 10, 83, 7, 34, 1, 0 } } },
.ptrn_arr = { { 0x49924c93, 0xc9264932, 0x93249924, 0x924c9264,
0x26493249, 0x9 } },
.sample_patrn_length = 166,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 52) = 0.380952 */
.hor_phase_arr = {
.even = { { 0, 20, 88, 7, 20, 0, 0 },
{ 0, 2, 49, 7, 72, 5, 0 },
{ 0, 8, 81, 7, 38, 1, 0 },
{ 0, 0, 28, 7, 87, 13, 0 },
{ 0, 3, 61, 7, 61, 3, 0 },
{ 0, 13, 87, 7, 28, 0, 0 },
{ 0, 1, 38, 7, 81, 8, 0 },
{ 0, 5, 72, 7, 49, 2, 0 } },
.odd = { { 0, 1, 33, 7, 83, 11, 0 },
{ 0, 4, 67, 7, 55, 2, 0 },
{ 0, 16, 88, 7, 24, 0, 0 },
{ 0, 1, 44, 7, 76, 7, 0 },
{ 0, 7, 76, 7, 44, 1, 0 },
{ 0, 0, 24, 7, 88, 16, 0 },
{ 0, 2, 55, 7, 67, 4, 0 },
{ 0, 11, 83, 7, 33, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 20, 88, 7, 20, 0, 0 },
{ 0, 2, 49, 7, 72, 5, 0 },
{ 0, 8, 81, 7, 38, 1, 0 },
{ 0, 0, 28, 7, 87, 13, 0 },
{ 0, 3, 61, 7, 61, 3, 0 },
{ 0, 13, 87, 7, 28, 0, 0 },
{ 0, 1, 38, 7, 81, 8, 0 },
{ 0, 5, 72, 7, 49, 2, 0 } },
.odd = { { 0, 1, 33, 7, 83, 11, 0 },
{ 0, 4, 67, 7, 55, 2, 0 },
{ 0, 16, 88, 7, 24, 0, 0 },
{ 0, 1, 44, 7, 76, 7, 0 },
{ 0, 7, 76, 7, 44, 1, 0 },
{ 0, 0, 24, 7, 88, 16, 0 },
{ 0, 2, 55, 7, 67, 4, 0 },
{ 0, 11, 83, 7, 33, 1, 0 } } },
.ptrn_arr = { { 0x4c926493, 0x92 } },
.sample_patrn_length = 42,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 53) = 0.376471 */
.hor_phase_arr = {
.even = { { 0, 20, 88, 7, 20, 0, 0 },
{ 0, 2, 47, 7, 73, 6, 0 },
{ 0, 7, 76, 7, 44, 1, 0 },
{ 0, 0, 22, 7, 88, 18, 0 },
{ 0, 2, 49, 7, 72, 5, 0 },
{ 0, 8, 78, 7, 41, 1, 0 },
{ 0, 0, 24, 7, 87, 17, 0 },
{ 0, 2, 52, 7, 69, 5, 0 },
{ 0, 9, 80, 7, 38, 1, 0 },
{ 0, 0, 26, 7, 87, 15, 0 },
{ 0, 2, 55, 7, 67, 4, 0 },
{ 0, 10, 81, 7, 36, 1, 0 },
{ 0, 1, 28, 7, 85, 14, 0 },
{ 0, 3, 58, 7, 63, 4, 0 },
{ 0, 11, 83, 7, 33, 1, 0 },
{ 0, 1, 31, 7, 84, 12, 0 },
{ 0, 3, 61, 7, 61, 3, 0 },
{ 0, 12, 84, 7, 31, 1, 0 },
{ 0, 1, 33, 7, 83, 11, 0 },
{ 0, 4, 63, 7, 58, 3, 0 },
{ 0, 14, 85, 7, 28, 1, 0 },
{ 0, 1, 36, 7, 81, 10, 0 },
{ 0, 4, 67, 7, 55, 2, 0 },
{ 0, 15, 87, 7, 26, 0, 0 },
{ 0, 1, 38, 7, 80, 9, 0 },
{ 0, 5, 69, 7, 52, 2, 0 },
{ 0, 17, 87, 7, 24, 0, 0 },
{ 0, 1, 41, 7, 78, 8, 0 },
{ 0, 5, 72, 7, 49, 2, 0 },
{ 0, 18, 88, 7, 22, 0, 0 },
{ 0, 1, 44, 7, 76, 7, 0 },
{ 0, 6, 73, 7, 47, 2, 0 } },
.odd = { { 0, 1, 32, 7, 83, 12, 0 },
{ 0, 3, 63, 7, 59, 3, 0 },
{ 0, 13, 84, 7, 30, 1, 0 },
{ 0, 1, 34, 7, 83, 10, 0 },
{ 0, 4, 64, 7, 57, 3, 0 },
{ 0, 14, 87, 7, 27, 0, 0 },
{ 0, 1, 37, 7, 81, 9, 0 },
{ 0, 5, 67, 7, 54, 2, 0 },
{ 0, 16, 87, 7, 25, 0, 0 },
{ 0, 1, 40, 7, 79, 8, 0 },
{ 0, 5, 70, 7, 51, 2, 0 },
{ 0, 18, 87, 7, 23, 0, 0 },
{ 0, 1, 42, 7, 78, 7, 0 },
{ 0, 6, 72, 7, 48, 2, 0 },
{ 0, 19, 88, 7, 21, 0, 0 },
{ 0, 1, 45, 7, 75, 7, 0 },
{ 0, 7, 75, 7, 45, 1, 0 },
{ 0, 0, 21, 7, 88, 19, 0 },
{ 0, 2, 48, 7, 72, 6, 0 },
{ 0, 7, 78, 7, 42, 1, 0 },
{ 0, 0, 23, 7, 87, 18, 0 },
{ 0, 2, 51, 7, 70, 5, 0 },
{ 0, 8, 79, 7, 40, 1, 0 },
{ 0, 0, 25, 7, 87, 16, 0 },
{ 0, 2, 54, 7, 67, 5, 0 },
{ 0, 9, 81, 7, 37, 1, 0 },
{ 0, 0, 27, 7, 87, 14, 0 },
{ 0, 3, 57, 7, 64, 4, 0 },
{ 0, 10, 83, 7, 34, 1, 0 },
{ 0, 1, 30, 7, 84, 13, 0 },
{ 0, 3, 59, 7, 63, 3, 0 },
{ 0, 12, 83, 7, 32, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 20, 88, 7, 20, 0, 0 },
{ 0, 2, 47, 7, 73, 6, 0 },
{ 0, 7, 76, 7, 44, 1, 0 },
{ 0, 0, 22, 7, 88, 18, 0 },
{ 0, 2, 49, 7, 72, 5, 0 },
{ 0, 8, 78, 7, 41, 1, 0 },
{ 0, 0, 24, 7, 87, 17, 0 },
{ 0, 2, 52, 7, 69, 5, 0 },
{ 0, 9, 80, 7, 38, 1, 0 },
{ 0, 0, 26, 7, 87, 15, 0 },
{ 0, 2, 55, 7, 67, 4, 0 },
{ 0, 10, 81, 7, 36, 1, 0 },
{ 0, 1, 28, 7, 85, 14, 0 },
{ 0, 3, 58, 7, 63, 4, 0 },
{ 0, 11, 83, 7, 33, 1, 0 },
{ 0, 1, 31, 7, 84, 12, 0 },
{ 0, 3, 61, 7, 61, 3, 0 },
{ 0, 12, 84, 7, 31, 1, 0 },
{ 0, 1, 33, 7, 83, 11, 0 },
{ 0, 4, 63, 7, 58, 3, 0 },
{ 0, 14, 85, 7, 28, 1, 0 },
{ 0, 1, 36, 7, 81, 10, 0 },
{ 0, 4, 67, 7, 55, 2, 0 },
{ 0, 15, 87, 7, 26, 0, 0 },
{ 0, 1, 38, 7, 80, 9, 0 },
{ 0, 5, 69, 7, 52, 2, 0 },
{ 0, 17, 87, 7, 24, 0, 0 },
{ 0, 1, 41, 7, 78, 8, 0 },
{ 0, 5, 72, 7, 49, 2, 0 },
{ 0, 18, 88, 7, 22, 0, 0 },
{ 0, 1, 44, 7, 76, 7, 0 },
{ 0, 6, 73, 7, 47, 2, 0 } },
.odd = { { 0, 1, 32, 7, 83, 12, 0 },
{ 0, 3, 63, 7, 59, 3, 0 },
{ 0, 13, 84, 7, 30, 1, 0 },
{ 0, 1, 34, 7, 83, 10, 0 },
{ 0, 4, 64, 7, 57, 3, 0 },
{ 0, 14, 87, 7, 27, 0, 0 },
{ 0, 1, 37, 7, 81, 9, 0 },
{ 0, 5, 67, 7, 54, 2, 0 },
{ 0, 16, 87, 7, 25, 0, 0 },
{ 0, 1, 40, 7, 79, 8, 0 },
{ 0, 5, 70, 7, 51, 2, 0 },
{ 0, 18, 87, 7, 23, 0, 0 },
{ 0, 1, 42, 7, 78, 7, 0 },
{ 0, 6, 72, 7, 48, 2, 0 },
{ 0, 19, 88, 7, 21, 0, 0 },
{ 0, 1, 45, 7, 75, 7, 0 },
{ 0, 7, 75, 7, 45, 1, 0 },
{ 0, 0, 21, 7, 88, 19, 0 },
{ 0, 2, 48, 7, 72, 6, 0 },
{ 0, 7, 78, 7, 42, 1, 0 },
{ 0, 0, 23, 7, 87, 18, 0 },
{ 0, 2, 51, 7, 70, 5, 0 },
{ 0, 8, 79, 7, 40, 1, 0 },
{ 0, 0, 25, 7, 87, 16, 0 },
{ 0, 2, 54, 7, 67, 5, 0 },
{ 0, 9, 81, 7, 37, 1, 0 },
{ 0, 0, 27, 7, 87, 14, 0 },
{ 0, 3, 57, 7, 64, 4, 0 },
{ 0, 10, 83, 7, 34, 1, 0 },
{ 0, 1, 30, 7, 84, 13, 0 },
{ 0, 3, 59, 7, 63, 3, 0 },
{ 0, 12, 83, 7, 32, 1, 0 } } },
.ptrn_arr = { { 0x64926493, 0x64926492, 0x4c926492, 0x4c924c92,
0x4c924c92, 0x92 } },
.sample_patrn_length = 170,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 54) = 0.372093 */
.hor_phase_arr = {
.even = { { 0, 21, 86, 7, 21, 0, 0 },
{ 0, 1, 44, 7, 76, 7, 0 },
{ 0, 6, 71, 7, 49, 2, 0 },
{ 0, 17, 86, 7, 25, 0, 0 },
{ 0, 1, 39, 7, 79, 9, 0 },
{ 0, 5, 65, 7, 55, 3, 0 },
{ 0, 14, 84, 7, 29, 1, 0 },
{ 0, 1, 34, 7, 82, 11, 0 },
{ 0, 3, 61, 7, 61, 3, 0 },
{ 0, 11, 82, 7, 34, 1, 0 },
{ 0, 1, 29, 7, 84, 14, 0 },
{ 0, 3, 55, 7, 65, 5, 0 },
{ 0, 9, 79, 7, 39, 1, 0 },
{ 0, 0, 25, 7, 86, 17, 0 },
{ 0, 2, 49, 7, 71, 6, 0 },
{ 0, 7, 76, 7, 44, 1, 0 } },
.odd = { { 0, 1, 31, 7, 83, 13, 0 },
{ 0, 3, 58, 7, 63, 4, 0 },
{ 0, 10, 81, 7, 36, 1, 0 },
{ 0, 0, 27, 7, 85, 16, 0 },
{ 0, 2, 52, 7, 69, 5, 0 },
{ 0, 8, 78, 7, 41, 1, 0 },
{ 0, 0, 23, 7, 86, 19, 0 },
{ 0, 2, 47, 7, 72, 7, 0 },
{ 0, 7, 72, 7, 47, 2, 0 },
{ 0, 19, 86, 7, 23, 0, 0 },
{ 0, 1, 41, 7, 78, 8, 0 },
{ 0, 5, 69, 7, 52, 2, 0 },
{ 0, 16, 85, 7, 27, 0, 0 },
{ 0, 1, 36, 7, 81, 10, 0 },
{ 0, 4, 63, 7, 58, 3, 0 },
{ 0, 13, 83, 7, 31, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 21, 86, 7, 21, 0, 0 },
{ 0, 1, 44, 7, 76, 7, 0 },
{ 0, 6, 71, 7, 49, 2, 0 },
{ 0, 17, 86, 7, 25, 0, 0 },
{ 0, 1, 39, 7, 79, 9, 0 },
{ 0, 5, 65, 7, 55, 3, 0 },
{ 0, 14, 84, 7, 29, 1, 0 },
{ 0, 1, 34, 7, 82, 11, 0 },
{ 0, 3, 61, 7, 61, 3, 0 },
{ 0, 11, 82, 7, 34, 1, 0 },
{ 0, 1, 29, 7, 84, 14, 0 },
{ 0, 3, 55, 7, 65, 5, 0 },
{ 0, 9, 79, 7, 39, 1, 0 },
{ 0, 0, 25, 7, 86, 17, 0 },
{ 0, 2, 49, 7, 71, 6, 0 },
{ 0, 7, 76, 7, 44, 1, 0 } },
.odd = { { 0, 1, 31, 7, 83, 13, 0 },
{ 0, 3, 58, 7, 63, 4, 0 },
{ 0, 10, 81, 7, 36, 1, 0 },
{ 0, 0, 27, 7, 85, 16, 0 },
{ 0, 2, 52, 7, 69, 5, 0 },
{ 0, 8, 78, 7, 41, 1, 0 },
{ 0, 0, 23, 7, 86, 19, 0 },
{ 0, 2, 47, 7, 72, 7, 0 },
{ 0, 7, 72, 7, 47, 2, 0 },
{ 0, 19, 86, 7, 23, 0, 0 },
{ 0, 1, 41, 7, 78, 8, 0 },
{ 0, 5, 69, 7, 52, 2, 0 },
{ 0, 16, 85, 7, 27, 0, 0 },
{ 0, 1, 36, 7, 81, 10, 0 },
{ 0, 4, 63, 7, 58, 3, 0 },
{ 0, 13, 83, 7, 31, 1, 0 } } },
.ptrn_arr = { { 0x24932493, 0x24992493, 0x92499 } },
.sample_patrn_length = 86,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 55) = 0.367816 */
.hor_phase_arr = {
.even = { { 0, 21, 86, 7, 21, 0, 0 },
{ 0, 1, 41, 7, 77, 9, 0 },
{ 0, 5, 65, 7, 55, 3, 0 },
{ 0, 13, 82, 7, 32, 1, 0 },
{ 0, 1, 29, 7, 83, 15, 0 },
{ 0, 2, 52, 7, 69, 5, 0 },
{ 0, 8, 74, 7, 44, 2, 0 },
{ 0, 19, 86, 7, 23, 0, 0 },
{ 0, 1, 39, 7, 78, 10, 0 },
{ 0, 4, 63, 7, 58, 3, 0 },
{ 0, 12, 81, 7, 34, 1, 0 },
{ 0, 1, 27, 7, 84, 16, 0 },
{ 0, 2, 50, 7, 70, 6, 0 },
{ 0, 7, 72, 7, 47, 2, 0 },
{ 0, 18, 85, 7, 25, 0, 0 },
{ 0, 1, 36, 7, 80, 11, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 11, 80, 7, 36, 1, 0 },
{ 0, 0, 25, 7, 85, 18, 0 },
{ 0, 2, 47, 7, 72, 7, 0 },
{ 0, 6, 70, 7, 50, 2, 0 },
{ 0, 16, 84, 7, 27, 1, 0 },
{ 0, 1, 34, 7, 81, 12, 0 },
{ 0, 3, 58, 7, 63, 4, 0 },
{ 0, 10, 78, 7, 39, 1, 0 },
{ 0, 0, 23, 7, 86, 19, 0 },
{ 0, 2, 44, 7, 74, 8, 0 },
{ 0, 5, 69, 7, 52, 2, 0 },
{ 0, 15, 83, 7, 29, 1, 0 },
{ 0, 1, 32, 7, 82, 13, 0 },
{ 0, 3, 55, 7, 65, 5, 0 },
{ 0, 9, 77, 7, 41, 1, 0 } },
.odd = { { 0, 1, 30, 7, 83, 14, 0 },
{ 0, 3, 54, 7, 66, 5, 0 },
{ 0, 8, 76, 7, 43, 1, 0 },
{ 0, 20, 86, 7, 22, 0, 0 },
{ 0, 1, 40, 7, 78, 9, 0 },
{ 0, 4, 65, 7, 56, 3, 0 },
{ 0, 13, 81, 7, 33, 1, 0 },
{ 0, 1, 28, 7, 84, 15, 0 },
{ 0, 2, 51, 7, 69, 6, 0 },
{ 0, 7, 74, 7, 45, 2, 0 },
{ 0, 18, 86, 7, 24, 0, 0 },
{ 0, 1, 38, 7, 79, 10, 0 },
{ 0, 4, 62, 7, 59, 3, 0 },
{ 0, 11, 81, 7, 35, 1, 0 },
{ 0, 0, 26, 7, 85, 17, 0 },
{ 0, 2, 48, 7, 72, 6, 0 },
{ 0, 6, 72, 7, 48, 2, 0 },
{ 0, 17, 85, 7, 26, 0, 0 },
{ 0, 1, 35, 7, 81, 11, 0 },
{ 0, 3, 59, 7, 62, 4, 0 },
{ 0, 10, 79, 7, 38, 1, 0 },
{ 0, 0, 24, 7, 86, 18, 0 },
{ 0, 2, 45, 7, 74, 7, 0 },
{ 0, 6, 69, 7, 51, 2, 0 },
{ 0, 15, 84, 7, 28, 1, 0 },
{ 0, 1, 33, 7, 81, 13, 0 },
{ 0, 3, 56, 7, 65, 4, 0 },
{ 0, 9, 78, 7, 40, 1, 0 },
{ 0, 0, 22, 7, 86, 20, 0 },
{ 0, 1, 43, 7, 76, 8, 0 },
{ 0, 5, 66, 7, 54, 3, 0 },
{ 0, 14, 83, 7, 30, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 21, 86, 7, 21, 0, 0 },
{ 0, 1, 41, 7, 77, 9, 0 },
{ 0, 5, 65, 7, 55, 3, 0 },
{ 0, 13, 82, 7, 32, 1, 0 },
{ 0, 1, 29, 7, 83, 15, 0 },
{ 0, 2, 52, 7, 69, 5, 0 },
{ 0, 8, 74, 7, 44, 2, 0 },
{ 0, 19, 86, 7, 23, 0, 0 },
{ 0, 1, 39, 7, 78, 10, 0 },
{ 0, 4, 63, 7, 58, 3, 0 },
{ 0, 12, 81, 7, 34, 1, 0 },
{ 0, 1, 27, 7, 84, 16, 0 },
{ 0, 2, 50, 7, 70, 6, 0 },
{ 0, 7, 72, 7, 47, 2, 0 },
{ 0, 18, 85, 7, 25, 0, 0 },
{ 0, 1, 36, 7, 80, 11, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 11, 80, 7, 36, 1, 0 },
{ 0, 0, 25, 7, 85, 18, 0 },
{ 0, 2, 47, 7, 72, 7, 0 },
{ 0, 6, 70, 7, 50, 2, 0 },
{ 0, 16, 84, 7, 27, 1, 0 },
{ 0, 1, 34, 7, 81, 12, 0 },
{ 0, 3, 58, 7, 63, 4, 0 },
{ 0, 10, 78, 7, 39, 1, 0 },
{ 0, 0, 23, 7, 86, 19, 0 },
{ 0, 2, 44, 7, 74, 8, 0 },
{ 0, 5, 69, 7, 52, 2, 0 },
{ 0, 15, 83, 7, 29, 1, 0 },
{ 0, 1, 32, 7, 82, 13, 0 },
{ 0, 3, 55, 7, 65, 5, 0 },
{ 0, 9, 77, 7, 41, 1, 0 } },
.odd = { { 0, 1, 30, 7, 83, 14, 0 },
{ 0, 3, 54, 7, 66, 5, 0 },
{ 0, 8, 76, 7, 43, 1, 0 },
{ 0, 20, 86, 7, 22, 0, 0 },
{ 0, 1, 40, 7, 78, 9, 0 },
{ 0, 4, 65, 7, 56, 3, 0 },
{ 0, 13, 81, 7, 33, 1, 0 },
{ 0, 1, 28, 7, 84, 15, 0 },
{ 0, 2, 51, 7, 69, 6, 0 },
{ 0, 7, 74, 7, 45, 2, 0 },
{ 0, 18, 86, 7, 24, 0, 0 },
{ 0, 1, 38, 7, 79, 10, 0 },
{ 0, 4, 62, 7, 59, 3, 0 },
{ 0, 11, 81, 7, 35, 1, 0 },
{ 0, 0, 26, 7, 85, 17, 0 },
{ 0, 2, 48, 7, 72, 6, 0 },
{ 0, 6, 72, 7, 48, 2, 0 },
{ 0, 17, 85, 7, 26, 0, 0 },
{ 0, 1, 35, 7, 81, 11, 0 },
{ 0, 3, 59, 7, 62, 4, 0 },
{ 0, 10, 79, 7, 38, 1, 0 },
{ 0, 0, 24, 7, 86, 18, 0 },
{ 0, 2, 45, 7, 74, 7, 0 },
{ 0, 6, 69, 7, 51, 2, 0 },
{ 0, 15, 84, 7, 28, 1, 0 },
{ 0, 1, 33, 7, 81, 13, 0 },
{ 0, 3, 56, 7, 65, 4, 0 },
{ 0, 9, 78, 7, 40, 1, 0 },
{ 0, 0, 22, 7, 86, 20, 0 },
{ 0, 1, 43, 7, 76, 8, 0 },
{ 0, 5, 66, 7, 54, 3, 0 },
{ 0, 14, 83, 7, 30, 1, 0 } } },
.ptrn_arr = { { 0x24992493, 0x264924c9, 0x92493249, 0x924c9249,
0x93249264, 0x924 } },
.sample_patrn_length = 174,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 56) = 0.363636 */
.hor_phase_arr = {
.even = { { 0, 22, 84, 7, 22, 0, 0 },
{ 0, 1, 39, 7, 78, 10, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 10, 78, 7, 39, 1, 0 } },
.odd = { { 0, 1, 30, 7, 82, 15, 0 },
{ 0, 2, 50, 7, 70, 6, 0 },
{ 0, 6, 70, 7, 50, 2, 0 },
{ 0, 15, 82, 7, 30, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 22, 84, 7, 22, 0, 0 },
{ 0, 1, 39, 7, 78, 10, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 10, 78, 7, 39, 1, 0 } },
.odd = { { 0, 1, 30, 7, 82, 15, 0 },
{ 0, 2, 50, 7, 70, 6, 0 },
{ 0, 6, 70, 7, 50, 2, 0 },
{ 0, 15, 82, 7, 30, 1, 0 } } },
.ptrn_arr = { { 0x92493 } },
.sample_patrn_length = 22,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 57) = 0.359551 */
.hor_phase_arr = {
.even = { { 0, 22, 84, 7, 22, 0, 0 },
{ 0, 1, 37, 7, 79, 11, 0 },
{ 0, 3, 55, 7, 65, 5, 0 },
{ 0, 7, 72, 7, 47, 2, 0 },
{ 0, 15, 82, 7, 30, 1, 0 },
{ 0, 1, 28, 7, 82, 17, 0 },
{ 0, 2, 44, 7, 74, 8, 0 },
{ 0, 5, 62, 7, 57, 4, 0 },
{ 0, 10, 78, 7, 39, 1, 0 },
{ 0, 20, 84, 7, 24, 0, 0 },
{ 0, 1, 35, 7, 79, 13, 0 },
{ 0, 3, 52, 7, 67, 6, 0 },
{ 0, 7, 69, 7, 50, 2, 0 },
{ 0, 14, 81, 7, 32, 1, 0 },
{ 0, 1, 26, 7, 83, 18, 0 },
{ 0, 2, 42, 7, 75, 9, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 9, 75, 7, 42, 2, 0 },
{ 0, 18, 83, 7, 26, 1, 0 },
{ 0, 1, 32, 7, 81, 14, 0 },
{ 0, 2, 50, 7, 69, 7, 0 },
{ 0, 6, 67, 7, 52, 3, 0 },
{ 0, 13, 79, 7, 35, 1, 0 },
{ 0, 0, 24, 7, 84, 20, 0 },
{ 0, 1, 39, 7, 78, 10, 0 },
{ 0, 4, 57, 7, 62, 5, 0 },
{ 0, 8, 74, 7, 44, 2, 0 },
{ 0, 17, 82, 7, 28, 1, 0 },
{ 0, 1, 30, 7, 82, 15, 0 },
{ 0, 2, 47, 7, 72, 7, 0 },
{ 0, 5, 65, 7, 55, 3, 0 },
{ 0, 11, 79, 7, 37, 1, 0 } },
.odd = { { 0, 1, 29, 7, 82, 16, 0 },
{ 0, 2, 46, 7, 72, 8, 0 },
{ 0, 5, 64, 7, 56, 3, 0 },
{ 0, 11, 78, 7, 38, 1, 0 },
{ 0, 21, 84, 7, 23, 0, 0 },
{ 0, 1, 36, 7, 79, 12, 0 },
{ 0, 3, 53, 7, 66, 6, 0 },
{ 0, 7, 71, 7, 48, 2, 0 },
{ 0, 15, 81, 7, 31, 1, 0 },
{ 0, 1, 27, 7, 82, 18, 0 },
{ 0, 2, 43, 7, 74, 9, 0 },
{ 0, 4, 61, 7, 59, 4, 0 },
{ 0, 10, 75, 7, 41, 2, 0 },
{ 0, 19, 83, 7, 25, 1, 0 },
{ 0, 1, 33, 7, 81, 13, 0 },
{ 0, 3, 51, 7, 68, 6, 0 },
{ 0, 6, 68, 7, 51, 3, 0 },
{ 0, 13, 81, 7, 33, 1, 0 },
{ 0, 1, 25, 7, 83, 19, 0 },
{ 0, 2, 41, 7, 75, 10, 0 },
{ 0, 4, 59, 7, 61, 4, 0 },
{ 0, 9, 74, 7, 43, 2, 0 },
{ 0, 18, 82, 7, 27, 1, 0 },
{ 0, 1, 31, 7, 81, 15, 0 },
{ 0, 2, 48, 7, 71, 7, 0 },
{ 0, 6, 66, 7, 53, 3, 0 },
{ 0, 12, 79, 7, 36, 1, 0 },
{ 0, 0, 23, 7, 84, 21, 0 },
{ 0, 1, 38, 7, 78, 11, 0 },
{ 0, 3, 56, 7, 64, 5, 0 },
{ 0, 8, 72, 7, 46, 2, 0 },
{ 0, 16, 82, 7, 29, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 22, 84, 7, 22, 0, 0 },
{ 0, 1, 37, 7, 79, 11, 0 },
{ 0, 3, 55, 7, 65, 5, 0 },
{ 0, 7, 72, 7, 47, 2, 0 },
{ 0, 15, 82, 7, 30, 1, 0 },
{ 0, 1, 28, 7, 82, 17, 0 },
{ 0, 2, 44, 7, 74, 8, 0 },
{ 0, 5, 62, 7, 57, 4, 0 },
{ 0, 10, 78, 7, 39, 1, 0 },
{ 0, 20, 84, 7, 24, 0, 0 },
{ 0, 1, 35, 7, 79, 13, 0 },
{ 0, 3, 52, 7, 67, 6, 0 },
{ 0, 7, 69, 7, 50, 2, 0 },
{ 0, 14, 81, 7, 32, 1, 0 },
{ 0, 1, 26, 7, 83, 18, 0 },
{ 0, 2, 42, 7, 75, 9, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 9, 75, 7, 42, 2, 0 },
{ 0, 18, 83, 7, 26, 1, 0 },
{ 0, 1, 32, 7, 81, 14, 0 },
{ 0, 2, 50, 7, 69, 7, 0 },
{ 0, 6, 67, 7, 52, 3, 0 },
{ 0, 13, 79, 7, 35, 1, 0 },
{ 0, 0, 24, 7, 84, 20, 0 },
{ 0, 1, 39, 7, 78, 10, 0 },
{ 0, 4, 57, 7, 62, 5, 0 },
{ 0, 8, 74, 7, 44, 2, 0 },
{ 0, 17, 82, 7, 28, 1, 0 },
{ 0, 1, 30, 7, 82, 15, 0 },
{ 0, 2, 47, 7, 72, 7, 0 },
{ 0, 5, 65, 7, 55, 3, 0 },
{ 0, 11, 79, 7, 37, 1, 0 } },
.odd = { { 0, 1, 29, 7, 82, 16, 0 },
{ 0, 2, 46, 7, 72, 8, 0 },
{ 0, 5, 64, 7, 56, 3, 0 },
{ 0, 11, 78, 7, 38, 1, 0 },
{ 0, 21, 84, 7, 23, 0, 0 },
{ 0, 1, 36, 7, 79, 12, 0 },
{ 0, 3, 53, 7, 66, 6, 0 },
{ 0, 7, 71, 7, 48, 2, 0 },
{ 0, 15, 81, 7, 31, 1, 0 },
{ 0, 1, 27, 7, 82, 18, 0 },
{ 0, 2, 43, 7, 74, 9, 0 },
{ 0, 4, 61, 7, 59, 4, 0 },
{ 0, 10, 75, 7, 41, 2, 0 },
{ 0, 19, 83, 7, 25, 1, 0 },
{ 0, 1, 33, 7, 81, 13, 0 },
{ 0, 3, 51, 7, 68, 6, 0 },
{ 0, 6, 68, 7, 51, 3, 0 },
{ 0, 13, 81, 7, 33, 1, 0 },
{ 0, 1, 25, 7, 83, 19, 0 },
{ 0, 2, 41, 7, 75, 10, 0 },
{ 0, 4, 59, 7, 61, 4, 0 },
{ 0, 9, 74, 7, 43, 2, 0 },
{ 0, 18, 82, 7, 27, 1, 0 },
{ 0, 1, 31, 7, 81, 15, 0 },
{ 0, 2, 48, 7, 71, 7, 0 },
{ 0, 6, 66, 7, 53, 3, 0 },
{ 0, 12, 79, 7, 36, 1, 0 },
{ 0, 0, 23, 7, 84, 21, 0 },
{ 0, 1, 38, 7, 78, 11, 0 },
{ 0, 3, 56, 7, 64, 5, 0 },
{ 0, 8, 72, 7, 46, 2, 0 },
{ 0, 16, 82, 7, 29, 1, 0 } } },
.ptrn_arr = { { 0x26492493, 0x924c9249, 0x49249924, 0x64924932,
0x24c92492, 0x9249 } },
.sample_patrn_length = 178,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 58) = 0.355556 */
.hor_phase_arr = {
.even = { { 0, 22, 84, 7, 22, 0, 0 },
{ 0, 1, 35, 7, 79, 13, 0 },
{ 0, 3, 50, 7, 68, 7, 0 },
{ 0, 6, 64, 7, 55, 3, 0 },
{ 0, 11, 75, 7, 40, 2, 0 },
{ 0, 19, 82, 7, 26, 1, 0 },
{ 0, 1, 30, 7, 81, 16, 0 },
{ 0, 2, 45, 7, 72, 9, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 9, 72, 7, 45, 2, 0 },
{ 0, 16, 81, 7, 30, 1, 0 },
{ 0, 1, 26, 7, 82, 19, 0 },
{ 0, 2, 40, 7, 75, 11, 0 },
{ 0, 3, 55, 7, 64, 6, 0 },
{ 0, 7, 68, 7, 50, 3, 0 },
{ 0, 13, 79, 7, 35, 1, 0 } },
.odd = { { 0, 1, 28, 7, 82, 17, 0 },
{ 0, 2, 42, 7, 74, 10, 0 },
{ 0, 4, 57, 7, 62, 5, 0 },
{ 0, 8, 71, 7, 47, 2, 0 },
{ 0, 14, 80, 7, 33, 1, 0 },
{ 0, 1, 24, 7, 82, 21, 0 },
{ 0, 1, 37, 7, 78, 12, 0 },
{ 0, 3, 52, 7, 67, 6, 0 },
{ 0, 6, 67, 7, 52, 3, 0 },
{ 0, 12, 78, 7, 37, 1, 0 },
{ 0, 21, 82, 7, 24, 1, 0 },
{ 0, 1, 33, 7, 80, 14, 0 },
{ 0, 2, 47, 7, 71, 8, 0 },
{ 0, 5, 62, 7, 57, 4, 0 },
{ 0, 10, 74, 7, 42, 2, 0 },
{ 0, 17, 82, 7, 28, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 22, 84, 7, 22, 0, 0 },
{ 0, 1, 35, 7, 79, 13, 0 },
{ 0, 3, 50, 7, 68, 7, 0 },
{ 0, 6, 64, 7, 55, 3, 0 },
{ 0, 11, 75, 7, 40, 2, 0 },
{ 0, 19, 82, 7, 26, 1, 0 },
{ 0, 1, 30, 7, 81, 16, 0 },
{ 0, 2, 45, 7, 72, 9, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 9, 72, 7, 45, 2, 0 },
{ 0, 16, 81, 7, 30, 1, 0 },
{ 0, 1, 26, 7, 82, 19, 0 },
{ 0, 2, 40, 7, 75, 11, 0 },
{ 0, 3, 55, 7, 64, 6, 0 },
{ 0, 7, 68, 7, 50, 3, 0 },
{ 0, 13, 79, 7, 35, 1, 0 } },
.odd = { { 0, 1, 28, 7, 82, 17, 0 },
{ 0, 2, 42, 7, 74, 10, 0 },
{ 0, 4, 57, 7, 62, 5, 0 },
{ 0, 8, 71, 7, 47, 2, 0 },
{ 0, 14, 80, 7, 33, 1, 0 },
{ 0, 1, 24, 7, 82, 21, 0 },
{ 0, 1, 37, 7, 78, 12, 0 },
{ 0, 3, 52, 7, 67, 6, 0 },
{ 0, 6, 67, 7, 52, 3, 0 },
{ 0, 12, 78, 7, 37, 1, 0 },
{ 0, 21, 82, 7, 24, 1, 0 },
{ 0, 1, 33, 7, 80, 14, 0 },
{ 0, 2, 47, 7, 71, 8, 0 },
{ 0, 5, 62, 7, 57, 4, 0 },
{ 0, 10, 74, 7, 42, 2, 0 },
{ 0, 17, 82, 7, 28, 1, 0 } } },
.ptrn_arr = { { 0x32492493, 0x99249249, 0x924924 } },
.sample_patrn_length = 90,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 59) = 0.351648 */
.hor_phase_arr = {
.even = { { 0, 23, 82, 7, 23, 0, 0 },
{ 0, 1, 33, 7, 79, 15, 0 },
{ 0, 2, 45, 7, 72, 9, 0 },
{ 0, 4, 57, 7, 62, 5, 0 },
{ 0, 7, 68, 7, 50, 3, 0 },
{ 0, 12, 78, 7, 37, 1, 0 },
{ 0, 19, 81, 7, 27, 1, 0 },
{ 0, 1, 29, 7, 80, 18, 0 },
{ 0, 2, 40, 7, 75, 11, 0 },
{ 0, 3, 52, 7, 66, 7, 0 },
{ 0, 6, 63, 7, 55, 4, 0 },
{ 0, 10, 74, 7, 42, 2, 0 },
{ 0, 16, 80, 7, 31, 1, 0 },
{ 0, 1, 25, 7, 81, 21, 0 },
{ 0, 1, 35, 7, 79, 13, 0 },
{ 0, 2, 47, 7, 71, 8, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 8, 71, 7, 47, 2, 0 },
{ 0, 13, 79, 7, 35, 1, 0 },
{ 0, 21, 81, 7, 25, 1, 0 },
{ 0, 1, 31, 7, 80, 16, 0 },
{ 0, 2, 42, 7, 74, 10, 0 },
{ 0, 4, 55, 7, 63, 6, 0 },
{ 0, 7, 66, 7, 52, 3, 0 },
{ 0, 11, 75, 7, 40, 2, 0 },
{ 0, 18, 80, 7, 29, 1, 0 },
{ 0, 1, 27, 7, 81, 19, 0 },
{ 0, 1, 37, 7, 78, 12, 0 },
{ 0, 3, 50, 7, 68, 7, 0 },
{ 0, 5, 62, 7, 57, 4, 0 },
{ 0, 9, 72, 7, 45, 2, 0 },
{ 0, 15, 79, 7, 33, 1, 0 } },
.odd = { { 0, 1, 28, 7, 81, 18, 0 },
{ 0, 2, 39, 7, 75, 12, 0 },
{ 0, 3, 51, 7, 67, 7, 0 },
{ 0, 6, 62, 7, 56, 4, 0 },
{ 0, 10, 73, 7, 43, 2, 0 },
{ 0, 15, 80, 7, 32, 1, 0 },
{ 0, 1, 24, 7, 81, 22, 0 },
{ 0, 1, 34, 7, 79, 14, 0 },
{ 0, 2, 46, 7, 71, 9, 0 },
{ 0, 4, 58, 7, 61, 5, 0 },
{ 0, 8, 69, 7, 48, 3, 0 },
{ 0, 13, 78, 7, 36, 1, 0 },
{ 0, 20, 81, 7, 26, 1, 0 },
{ 0, 1, 30, 7, 80, 17, 0 },
{ 0, 2, 41, 7, 74, 11, 0 },
{ 0, 3, 53, 7, 66, 6, 0 },
{ 0, 6, 66, 7, 53, 3, 0 },
{ 0, 11, 74, 7, 41, 2, 0 },
{ 0, 17, 80, 7, 30, 1, 0 },
{ 0, 1, 26, 7, 81, 20, 0 },
{ 0, 1, 36, 7, 78, 13, 0 },
{ 0, 3, 48, 7, 69, 8, 0 },
{ 0, 5, 61, 7, 58, 4, 0 },
{ 0, 9, 71, 7, 46, 2, 0 },
{ 0, 14, 79, 7, 34, 1, 0 },
{ 0, 22, 81, 7, 24, 1, 0 },
{ 0, 1, 32, 7, 80, 15, 0 },
{ 0, 2, 43, 7, 73, 10, 0 },
{ 0, 4, 56, 7, 62, 6, 0 },
{ 0, 7, 67, 7, 51, 3, 0 },
{ 0, 12, 75, 7, 39, 2, 0 },
{ 0, 18, 81, 7, 28, 1, 0 } } },
.ver_phase_arr = {
.even = { { 0, 23, 82, 7, 23, 0, 0 },
{ 0, 1, 33, 7, 79, 15, 0 },
{ 0, 2, 45, 7, 72, 9, 0 },
{ 0, 4, 57, 7, 62, 5, 0 },
{ 0, 7, 68, 7, 50, 3, 0 },
{ 0, 12, 78, 7, 37, 1, 0 },
{ 0, 19, 81, 7, 27, 1, 0 },
{ 0, 1, 29, 7, 80, 18, 0 },
{ 0, 2, 40, 7, 75, 11, 0 },
{ 0, 3, 52, 7, 66, 7, 0 },
{ 0, 6, 63, 7, 55, 4, 0 },
{ 0, 10, 74, 7, 42, 2, 0 },
{ 0, 16, 80, 7, 31, 1, 0 },
{ 0, 1, 25, 7, 81, 21, 0 },
{ 0, 1, 35, 7, 79, 13, 0 },
{ 0, 2, 47, 7, 71, 8, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 8, 71, 7, 47, 2, 0 },
{ 0, 13, 79, 7, 35, 1, 0 },
{ 0, 21, 81, 7, 25, 1, 0 },
{ 0, 1, 31, 7, 80, 16, 0 },
{ 0, 2, 42, 7, 74, 10, 0 },
{ 0, 4, 55, 7, 63, 6, 0 },
{ 0, 7, 66, 7, 52, 3, 0 },
{ 0, 11, 75, 7, 40, 2, 0 },
{ 0, 18, 80, 7, 29, 1, 0 },
{ 0, 1, 27, 7, 81, 19, 0 },
{ 0, 1, 37, 7, 78, 12, 0 },
{ 0, 3, 50, 7, 68, 7, 0 },
{ 0, 5, 62, 7, 57, 4, 0 },
{ 0, 9, 72, 7, 45, 2, 0 },
{ 0, 15, 79, 7, 33, 1, 0 } },
.odd = { { 0, 1, 28, 7, 81, 18, 0 },
{ 0, 2, 39, 7, 75, 12, 0 },
{ 0, 3, 51, 7, 67, 7, 0 },
{ 0, 6, 62, 7, 56, 4, 0 },
{ 0, 10, 73, 7, 43, 2, 0 },
{ 0, 15, 80, 7, 32, 1, 0 },
{ 0, 1, 24, 7, 81, 22, 0 },
{ 0, 1, 34, 7, 79, 14, 0 },
{ 0, 2, 46, 7, 71, 9, 0 },
{ 0, 4, 58, 7, 61, 5, 0 },
{ 0, 8, 69, 7, 48, 3, 0 },
{ 0, 13, 78, 7, 36, 1, 0 },
{ 0, 20, 81, 7, 26, 1, 0 },
{ 0, 1, 30, 7, 80, 17, 0 },
{ 0, 2, 41, 7, 74, 11, 0 },
{ 0, 3, 53, 7, 66, 6, 0 },
{ 0, 6, 66, 7, 53, 3, 0 },
{ 0, 11, 74, 7, 41, 2, 0 },
{ 0, 17, 80, 7, 30, 1, 0 },
{ 0, 1, 26, 7, 81, 20, 0 },
{ 0, 1, 36, 7, 78, 13, 0 },
{ 0, 3, 48, 7, 69, 8, 0 },
{ 0, 5, 61, 7, 58, 4, 0 },
{ 0, 9, 71, 7, 46, 2, 0 },
{ 0, 14, 79, 7, 34, 1, 0 },
{ 0, 22, 81, 7, 24, 1, 0 },
{ 0, 1, 32, 7, 80, 15, 0 },
{ 0, 2, 43, 7, 73, 10, 0 },
{ 0, 4, 56, 7, 62, 6, 0 },
{ 0, 7, 67, 7, 51, 3, 0 },
{ 0, 12, 75, 7, 39, 2, 0 },
{ 0, 18, 81, 7, 28, 1, 0 } } },
.ptrn_arr = { { 0x92492493, 0x4924924c, 0x24924992, 0x92493249,
0x49264924, 0x92492 } },
.sample_patrn_length = 182,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 60) = 0.347826 */
.hor_phase_arr = {
.even = { { 1, 23, 80, 7, 23, 1, 0 },
{ 0, 1, 31, 7, 79, 17, 0 },
{ 0, 2, 40, 7, 75, 11, 0 },
{ 0, 3, 50, 7, 67, 8, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 8, 67, 7, 50, 3, 0 },
{ 0, 11, 75, 7, 40, 2, 0 },
{ 0, 17, 79, 7, 31, 1, 0 } },
.odd = { { 0, 1, 27, 7, 80, 20, 0 },
{ 0, 1, 35, 7, 78, 14, 0 },
{ 0, 2, 45, 7, 72, 9, 0 },
{ 0, 4, 54, 7, 64, 6, 0 },
{ 0, 6, 64, 7, 54, 4, 0 },
{ 0, 9, 72, 7, 45, 2, 0 },
{ 0, 14, 78, 7, 35, 1, 0 },
{ 0, 20, 80, 7, 27, 1, 0 } } },
.ver_phase_arr = {
.even = { { 1, 23, 80, 7, 23, 1, 0 },
{ 0, 1, 31, 7, 79, 17, 0 },
{ 0, 2, 40, 7, 75, 11, 0 },
{ 0, 3, 50, 7, 67, 8, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 8, 67, 7, 50, 3, 0 },
{ 0, 11, 75, 7, 40, 2, 0 },
{ 0, 17, 79, 7, 31, 1, 0 } },
.odd = { { 0, 1, 27, 7, 80, 20, 0 },
{ 0, 1, 35, 7, 78, 14, 0 },
{ 0, 2, 45, 7, 72, 9, 0 },
{ 0, 4, 54, 7, 64, 6, 0 },
{ 0, 6, 64, 7, 54, 4, 0 },
{ 0, 9, 72, 7, 45, 2, 0 },
{ 0, 14, 78, 7, 35, 1, 0 },
{ 0, 20, 80, 7, 27, 1, 0 } } },
.ptrn_arr = { { 0x92492493, 0x924 } },
.sample_patrn_length = 46,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 61) = 0.344086 */
.hor_phase_arr = {
.even = { { 1, 23, 80, 7, 23, 1, 0 },
{ 0, 1, 29, 7, 80, 18, 0 },
{ 0, 1, 36, 7, 77, 14, 0 },
{ 0, 2, 42, 7, 73, 11, 0 },
{ 0, 3, 50, 7, 67, 8, 0 },
{ 0, 5, 57, 7, 60, 6, 0 },
{ 0, 6, 64, 7, 54, 4, 0 },
{ 0, 9, 69, 7, 47, 3, 0 },
{ 0, 12, 74, 7, 40, 2, 0 },
{ 0, 16, 78, 7, 33, 1, 0 },
{ 0, 20, 80, 7, 27, 1, 0 },
{ 0, 1, 25, 7, 79, 22, 1 },
{ 0, 1, 31, 7, 79, 17, 0 },
{ 0, 2, 38, 7, 75, 13, 0 },
{ 0, 2, 45, 7, 71, 10, 0 },
{ 0, 4, 52, 7, 65, 7, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 7, 65, 7, 52, 4, 0 },
{ 0, 10, 71, 7, 45, 2, 0 },
{ 0, 13, 75, 7, 38, 2, 0 },
{ 0, 17, 79, 7, 31, 1, 0 },
{ 1, 22, 79, 7, 25, 1, 0 },
{ 0, 1, 27, 7, 80, 20, 0 },
{ 0, 1, 33, 7, 78, 16, 0 },
{ 0, 2, 40, 7, 74, 12, 0 },
{ 0, 3, 47, 7, 69, 9, 0 },
{ 0, 4, 54, 7, 64, 6, 0 },
{ 0, 6, 60, 7, 57, 5, 0 },
{ 0, 8, 67, 7, 50, 3, 0 },
{ 0, 11, 73, 7, 42, 2, 0 },
{ 0, 14, 77, 7, 36, 1, 0 },
{ 0, 18, 80, 7, 29, 1, 0 } },
.odd = { { 0, 1, 26, 7, 80, 21, 0 },
{ 0, 1, 32, 7, 79, 16, 0 },
{ 0, 2, 39, 7, 75, 12, 0 },
{ 0, 3, 46, 7, 70, 9, 0 },
{ 0, 4, 53, 7, 64, 7, 0 },
{ 0, 5, 60, 7, 58, 5, 0 },
{ 0, 8, 66, 7, 51, 3, 0 },
{ 0, 10, 72, 7, 44, 2, 0 },
{ 0, 14, 75, 7, 37, 2, 0 },
{ 0, 18, 79, 7, 30, 1, 0 },
{ 1, 23, 79, 7, 24, 1, 0 },
{ 0, 1, 28, 7, 80, 19, 0 },
{ 0, 1, 35, 7, 77, 15, 0 },
{ 0, 2, 41, 7, 74, 11, 0 },
{ 0, 3, 48, 7, 69, 8, 0 },
{ 0, 4, 55, 7, 63, 6, 0 },
{ 0, 6, 63, 7, 55, 4, 0 },
{ 0, 8, 69, 7, 48, 3, 0 },
{ 0, 11, 74, 7, 41, 2, 0 },
{ 0, 15, 77, 7, 35, 1, 0 },
{ 0, 19, 80, 7, 28, 1, 0 },
{ 0, 1, 24, 7, 79, 23, 1 },
{ 0, 1, 30, 7, 79, 18, 0 },
{ 0, 2, 37, 7, 75, 14, 0 },
{ 0, 2, 44, 7, 72, 10, 0 },
{ 0, 3, 51, 7, 66, 8, 0 },
{ 0, 5, 58, 7, 60, 5, 0 },
{ 0, 7, 64, 7, 53, 4, 0 },
{ 0, 9, 70, 7, 46, 3, 0 },
{ 0, 12, 75, 7, 39, 2, 0 },
{ 0, 16, 79, 7, 32, 1, 0 },
{ 0, 21, 80, 7, 26, 1, 0 } } },
.ver_phase_arr = {
.even = { { 1, 23, 80, 7, 23, 1, 0 },
{ 0, 1, 29, 7, 80, 18, 0 },
{ 0, 1, 36, 7, 77, 14, 0 },
{ 0, 2, 42, 7, 73, 11, 0 },
{ 0, 3, 50, 7, 67, 8, 0 },
{ 0, 5, 57, 7, 60, 6, 0 },
{ 0, 6, 64, 7, 54, 4, 0 },
{ 0, 9, 69, 7, 47, 3, 0 },
{ 0, 12, 74, 7, 40, 2, 0 },
{ 0, 16, 78, 7, 33, 1, 0 },
{ 0, 20, 80, 7, 27, 1, 0 },
{ 0, 1, 25, 7, 79, 22, 1 },
{ 0, 1, 31, 7, 79, 17, 0 },
{ 0, 2, 38, 7, 75, 13, 0 },
{ 0, 2, 45, 7, 71, 10, 0 },
{ 0, 4, 52, 7, 65, 7, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 7, 65, 7, 52, 4, 0 },
{ 0, 10, 71, 7, 45, 2, 0 },
{ 0, 13, 75, 7, 38, 2, 0 },
{ 0, 17, 79, 7, 31, 1, 0 },
{ 1, 22, 79, 7, 25, 1, 0 },
{ 0, 1, 27, 7, 80, 20, 0 },
{ 0, 1, 33, 7, 78, 16, 0 },
{ 0, 2, 40, 7, 74, 12, 0 },
{ 0, 3, 47, 7, 69, 9, 0 },
{ 0, 4, 54, 7, 64, 6, 0 },
{ 0, 6, 60, 7, 57, 5, 0 },
{ 0, 8, 67, 7, 50, 3, 0 },
{ 0, 11, 73, 7, 42, 2, 0 },
{ 0, 14, 77, 7, 36, 1, 0 },
{ 0, 18, 80, 7, 29, 1, 0 } },
.odd = { { 0, 1, 26, 7, 80, 21, 0 },
{ 0, 1, 32, 7, 79, 16, 0 },
{ 0, 2, 39, 7, 75, 12, 0 },
{ 0, 3, 46, 7, 70, 9, 0 },
{ 0, 4, 53, 7, 64, 7, 0 },
{ 0, 5, 60, 7, 58, 5, 0 },
{ 0, 8, 66, 7, 51, 3, 0 },
{ 0, 10, 72, 7, 44, 2, 0 },
{ 0, 14, 75, 7, 37, 2, 0 },
{ 0, 18, 79, 7, 30, 1, 0 },
{ 1, 23, 79, 7, 24, 1, 0 },
{ 0, 1, 28, 7, 80, 19, 0 },
{ 0, 1, 35, 7, 77, 15, 0 },
{ 0, 2, 41, 7, 74, 11, 0 },
{ 0, 3, 48, 7, 69, 8, 0 },
{ 0, 4, 55, 7, 63, 6, 0 },
{ 0, 6, 63, 7, 55, 4, 0 },
{ 0, 8, 69, 7, 48, 3, 0 },
{ 0, 11, 74, 7, 41, 2, 0 },
{ 0, 15, 77, 7, 35, 1, 0 },
{ 0, 19, 80, 7, 28, 1, 0 },
{ 0, 1, 24, 7, 79, 23, 1 },
{ 0, 1, 30, 7, 79, 18, 0 },
{ 0, 2, 37, 7, 75, 14, 0 },
{ 0, 2, 44, 7, 72, 10, 0 },
{ 0, 3, 51, 7, 66, 8, 0 },
{ 0, 5, 58, 7, 60, 5, 0 },
{ 0, 7, 64, 7, 53, 4, 0 },
{ 0, 9, 70, 7, 46, 3, 0 },
{ 0, 12, 75, 7, 39, 2, 0 },
{ 0, 16, 79, 7, 32, 1, 0 },
{ 0, 21, 80, 7, 26, 1, 0 } } },
.ptrn_arr = { { 0x92492493, 0x64924924, 0x92492492, 0x4c924924,
0x92492492, 0x924924 } },
.sample_patrn_length = 186,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 62) = 0.340426 */
.hor_phase_arr = {
.even = { { 1, 24, 78, 7, 24, 1, 0 },
{ 0, 1, 28, 7, 79, 20, 0 },
{ 0, 1, 32, 7, 78, 17, 0 },
{ 0, 2, 36, 7, 75, 15, 0 },
{ 0, 2, 40, 7, 74, 12, 0 },
{ 0, 3, 45, 7, 70, 10, 0 },
{ 0, 3, 50, 7, 67, 8, 0 },
{ 0, 4, 54, 7, 63, 7, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 7, 63, 7, 54, 4, 0 },
{ 0, 8, 67, 7, 50, 3, 0 },
{ 0, 10, 70, 7, 45, 3, 0 },
{ 0, 12, 74, 7, 40, 2, 0 },
{ 0, 15, 75, 7, 36, 2, 0 },
{ 0, 17, 78, 7, 32, 1, 0 },
{ 0, 20, 79, 7, 28, 1, 0 } },
.odd = { { 0, 1, 26, 7, 78, 22, 1 },
{ 0, 1, 30, 7, 78, 19, 0 },
{ 0, 1, 34, 7, 77, 16, 0 },
{ 0, 2, 38, 7, 75, 13, 0 },
{ 0, 2, 43, 7, 72, 11, 0 },
{ 0, 3, 47, 7, 69, 9, 0 },
{ 0, 4, 52, 7, 65, 7, 0 },
{ 0, 5, 56, 7, 61, 6, 0 },
{ 0, 6, 61, 7, 56, 5, 0 },
{ 0, 7, 65, 7, 52, 4, 0 },
{ 0, 9, 69, 7, 47, 3, 0 },
{ 0, 11, 72, 7, 43, 2, 0 },
{ 0, 13, 75, 7, 38, 2, 0 },
{ 0, 16, 77, 7, 34, 1, 0 },
{ 0, 19, 78, 7, 30, 1, 0 },
{ 1, 22, 78, 7, 26, 1, 0 } } },
.ver_phase_arr = {
.even = { { 1, 24, 78, 7, 24, 1, 0 },
{ 0, 1, 28, 7, 79, 20, 0 },
{ 0, 1, 32, 7, 78, 17, 0 },
{ 0, 2, 36, 7, 75, 15, 0 },
{ 0, 2, 40, 7, 74, 12, 0 },
{ 0, 3, 45, 7, 70, 10, 0 },
{ 0, 3, 50, 7, 67, 8, 0 },
{ 0, 4, 54, 7, 63, 7, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 7, 63, 7, 54, 4, 0 },
{ 0, 8, 67, 7, 50, 3, 0 },
{ 0, 10, 70, 7, 45, 3, 0 },
{ 0, 12, 74, 7, 40, 2, 0 },
{ 0, 15, 75, 7, 36, 2, 0 },
{ 0, 17, 78, 7, 32, 1, 0 },
{ 0, 20, 79, 7, 28, 1, 0 } },
.odd = { { 0, 1, 26, 7, 78, 22, 1 },
{ 0, 1, 30, 7, 78, 19, 0 },
{ 0, 1, 34, 7, 77, 16, 0 },
{ 0, 2, 38, 7, 75, 13, 0 },
{ 0, 2, 43, 7, 72, 11, 0 },
{ 0, 3, 47, 7, 69, 9, 0 },
{ 0, 4, 52, 7, 65, 7, 0 },
{ 0, 5, 56, 7, 61, 6, 0 },
{ 0, 6, 61, 7, 56, 5, 0 },
{ 0, 7, 65, 7, 52, 4, 0 },
{ 0, 9, 69, 7, 47, 3, 0 },
{ 0, 11, 72, 7, 43, 2, 0 },
{ 0, 13, 75, 7, 38, 2, 0 },
{ 0, 16, 77, 7, 34, 1, 0 },
{ 0, 19, 78, 7, 30, 1, 0 },
{ 1, 22, 78, 7, 26, 1, 0 } } },
.ptrn_arr = { { 0x92492493, 0x24924924, 0x9249249 } },
.sample_patrn_length = 94,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 63) = 0.336842 */
.hor_phase_arr = {
.even = { { 1, 24, 78, 7, 24, 1, 0 },
{ 0, 1, 26, 7, 78, 22, 1 },
{ 0, 1, 28, 7, 77, 21, 1 },
{ 0, 1, 30, 7, 78, 19, 0 },
{ 0, 1, 32, 7, 77, 18, 0 },
{ 0, 1, 34, 7, 77, 16, 0 },
{ 0, 2, 36, 7, 75, 15, 0 },
{ 0, 2, 38, 7, 74, 14, 0 },
{ 0, 2, 40, 7, 73, 13, 0 },
{ 0, 2, 43, 7, 72, 11, 0 },
{ 0, 3, 45, 7, 70, 10, 0 },
{ 0, 3, 47, 7, 69, 9, 0 },
{ 0, 4, 49, 7, 66, 9, 0 },
{ 0, 4, 52, 7, 64, 8, 0 },
{ 0, 4, 54, 7, 63, 7, 0 },
{ 0, 5, 56, 7, 61, 6, 0 },
{ 0, 6, 58, 7, 58, 6, 0 },
{ 0, 6, 61, 7, 56, 5, 0 },
{ 0, 7, 63, 7, 54, 4, 0 },
{ 0, 8, 64, 7, 52, 4, 0 },
{ 0, 9, 66, 7, 49, 4, 0 },
{ 0, 9, 69, 7, 47, 3, 0 },
{ 0, 10, 70, 7, 45, 3, 0 },
{ 0, 11, 72, 7, 43, 2, 0 },
{ 0, 13, 73, 7, 40, 2, 0 },
{ 0, 14, 74, 7, 38, 2, 0 },
{ 0, 15, 75, 7, 36, 2, 0 },
{ 0, 16, 77, 7, 34, 1, 0 },
{ 0, 18, 77, 7, 32, 1, 0 },
{ 0, 19, 78, 7, 30, 1, 0 },
{ 1, 21, 77, 7, 28, 1, 0 },
{ 1, 22, 78, 7, 26, 1, 0 } },
.odd = { { 0, 1, 25, 7, 78, 23, 1 },
{ 0, 1, 27, 7, 77, 22, 1 },
{ 0, 1, 29, 7, 78, 20, 0 },
{ 0, 1, 31, 7, 78, 18, 0 },
{ 0, 1, 33, 7, 77, 17, 0 },
{ 0, 2, 35, 7, 75, 16, 0 },
{ 0, 2, 37, 7, 75, 14, 0 },
{ 0, 2, 39, 7, 74, 13, 0 },
{ 0, 2, 42, 7, 72, 12, 0 },
{ 0, 3, 44, 7, 70, 11, 0 },
{ 0, 3, 46, 7, 69, 10, 0 },
{ 0, 3, 48, 7, 68, 9, 0 },
{ 0, 4, 51, 7, 65, 8, 0 },
{ 0, 4, 53, 7, 64, 7, 0 },
{ 0, 5, 55, 7, 61, 7, 0 },
{ 0, 5, 57, 7, 60, 6, 0 },
{ 0, 6, 60, 7, 57, 5, 0 },
{ 0, 7, 61, 7, 55, 5, 0 },
{ 0, 7, 64, 7, 53, 4, 0 },
{ 0, 8, 65, 7, 51, 4, 0 },
{ 0, 9, 68, 7, 48, 3, 0 },
{ 0, 10, 69, 7, 46, 3, 0 },
{ 0, 11, 70, 7, 44, 3, 0 },
{ 0, 12, 72, 7, 42, 2, 0 },
{ 0, 13, 74, 7, 39, 2, 0 },
{ 0, 14, 75, 7, 37, 2, 0 },
{ 0, 16, 75, 7, 35, 2, 0 },
{ 0, 17, 77, 7, 33, 1, 0 },
{ 0, 18, 78, 7, 31, 1, 0 },
{ 0, 20, 78, 7, 29, 1, 0 },
{ 1, 22, 77, 7, 27, 1, 0 },
{ 1, 23, 78, 7, 25, 1, 0 } } },
.ver_phase_arr = {
.even = { { 1, 24, 78, 7, 24, 1, 0 },
{ 0, 1, 26, 7, 78, 22, 1 },
{ 0, 1, 28, 7, 77, 21, 1 },
{ 0, 1, 30, 7, 78, 19, 0 },
{ 0, 1, 32, 7, 77, 18, 0 },
{ 0, 1, 34, 7, 77, 16, 0 },
{ 0, 2, 36, 7, 75, 15, 0 },
{ 0, 2, 38, 7, 74, 14, 0 },
{ 0, 2, 40, 7, 73, 13, 0 },
{ 0, 2, 43, 7, 72, 11, 0 },
{ 0, 3, 45, 7, 70, 10, 0 },
{ 0, 3, 47, 7, 69, 9, 0 },
{ 0, 4, 49, 7, 66, 9, 0 },
{ 0, 4, 52, 7, 64, 8, 0 },
{ 0, 4, 54, 7, 63, 7, 0 },
{ 0, 5, 56, 7, 61, 6, 0 },
{ 0, 6, 58, 7, 58, 6, 0 },
{ 0, 6, 61, 7, 56, 5, 0 },
{ 0, 7, 63, 7, 54, 4, 0 },
{ 0, 8, 64, 7, 52, 4, 0 },
{ 0, 9, 66, 7, 49, 4, 0 },
{ 0, 9, 69, 7, 47, 3, 0 },
{ 0, 10, 70, 7, 45, 3, 0 },
{ 0, 11, 72, 7, 43, 2, 0 },
{ 0, 13, 73, 7, 40, 2, 0 },
{ 0, 14, 74, 7, 38, 2, 0 },
{ 0, 15, 75, 7, 36, 2, 0 },
{ 0, 16, 77, 7, 34, 1, 0 },
{ 0, 18, 77, 7, 32, 1, 0 },
{ 0, 19, 78, 7, 30, 1, 0 },
{ 1, 21, 77, 7, 28, 1, 0 },
{ 1, 22, 78, 7, 26, 1, 0 } },
.odd = { { 0, 1, 25, 7, 78, 23, 1 },
{ 0, 1, 27, 7, 77, 22, 1 },
{ 0, 1, 29, 7, 78, 20, 0 },
{ 0, 1, 31, 7, 78, 18, 0 },
{ 0, 1, 33, 7, 77, 17, 0 },
{ 0, 2, 35, 7, 75, 16, 0 },
{ 0, 2, 37, 7, 75, 14, 0 },
{ 0, 2, 39, 7, 74, 13, 0 },
{ 0, 2, 42, 7, 72, 12, 0 },
{ 0, 3, 44, 7, 70, 11, 0 },
{ 0, 3, 46, 7, 69, 10, 0 },
{ 0, 3, 48, 7, 68, 9, 0 },
{ 0, 4, 51, 7, 65, 8, 0 },
{ 0, 4, 53, 7, 64, 7, 0 },
{ 0, 5, 55, 7, 61, 7, 0 },
{ 0, 5, 57, 7, 60, 6, 0 },
{ 0, 6, 60, 7, 57, 5, 0 },
{ 0, 7, 61, 7, 55, 5, 0 },
{ 0, 7, 64, 7, 53, 4, 0 },
{ 0, 8, 65, 7, 51, 4, 0 },
{ 0, 9, 68, 7, 48, 3, 0 },
{ 0, 10, 69, 7, 46, 3, 0 },
{ 0, 11, 70, 7, 44, 3, 0 },
{ 0, 12, 72, 7, 42, 2, 0 },
{ 0, 13, 74, 7, 39, 2, 0 },
{ 0, 14, 75, 7, 37, 2, 0 },
{ 0, 16, 75, 7, 35, 2, 0 },
{ 0, 17, 77, 7, 33, 1, 0 },
{ 0, 18, 78, 7, 31, 1, 0 },
{ 0, 20, 78, 7, 29, 1, 0 },
{ 1, 22, 77, 7, 27, 1, 0 },
{ 1, 23, 78, 7, 25, 1, 0 } } },
.ptrn_arr = { { 0x92492493, 0x24924924, 0x49249249, 0x92492492,
0x24924924, 0x9249249 } },
.sample_patrn_length = 190,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 64) = 0.333333 */
.hor_phase_arr = {
.even = { { 0, 21, 86, 7, 21, 0, 0 } },
.odd = { { 0, 4, 60, 7, 60, 4, 0 } } },
.ver_phase_arr = {
.even = { { 0, 21, 86, 7, 21, 0, 0 } },
.odd = { { 0, 4, 60, 7, 60, 4, 0 } } },
.ptrn_arr = { { 0x9 } },
.sample_patrn_length = 6,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 65) = 0.329897 */
.hor_phase_arr = {
.even = { { 0, 22, 84, 7, 22, 0, 0 },
{ 0, 20, 85, 7, 23, 0, 0 },
{ 0, 18, 84, 7, 25, 1, 0 },
{ 0, 17, 82, 7, 28, 1, 0 },
{ 0, 15, 82, 7, 30, 1, 0 },
{ 0, 14, 81, 7, 32, 1, 0 },
{ 0, 12, 81, 7, 34, 1, 0 },
{ 0, 11, 79, 7, 37, 1, 0 },
{ 0, 10, 78, 7, 39, 1, 0 },
{ 0, 9, 75, 7, 42, 2, 0 },
{ 0, 8, 74, 7, 44, 2, 0 },
{ 0, 7, 72, 7, 47, 2, 0 },
{ 0, 6, 70, 7, 50, 2, 0 },
{ 0, 6, 67, 7, 52, 3, 0 },
{ 0, 5, 65, 7, 55, 3, 0 },
{ 0, 4, 64, 7, 57, 3, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 3, 57, 7, 64, 4, 0 },
{ 0, 3, 55, 7, 65, 5, 0 },
{ 0, 3, 52, 7, 67, 6, 0 },
{ 0, 2, 50, 7, 70, 6, 0 },
{ 0, 2, 47, 7, 72, 7, 0 },
{ 0, 2, 44, 7, 74, 8, 0 },
{ 0, 2, 42, 7, 75, 9, 0 },
{ 0, 1, 39, 7, 78, 10, 0 },
{ 0, 1, 37, 7, 79, 11, 0 },
{ 0, 1, 34, 7, 81, 12, 0 },
{ 0, 1, 32, 7, 81, 14, 0 },
{ 0, 1, 30, 7, 82, 15, 0 },
{ 0, 1, 28, 7, 82, 17, 0 },
{ 0, 1, 25, 7, 84, 18, 0 },
{ 0, 0, 23, 7, 85, 20, 0 } },
.odd = { { 0, 21, 84, 7, 23, 0, 0 },
{ 0, 19, 85, 7, 24, 0, 0 },
{ 0, 17, 84, 7, 26, 1, 0 },
{ 0, 16, 82, 7, 29, 1, 0 },
{ 0, 14, 82, 7, 31, 1, 0 },
{ 0, 13, 81, 7, 33, 1, 0 },
{ 0, 12, 80, 7, 35, 1, 0 },
{ 0, 11, 78, 7, 38, 1, 0 },
{ 0, 10, 77, 7, 40, 1, 0 },
{ 0, 9, 74, 7, 43, 2, 0 },
{ 0, 8, 72, 7, 46, 2, 0 },
{ 0, 7, 71, 7, 48, 2, 0 },
{ 0, 6, 69, 7, 51, 2, 0 },
{ 0, 5, 66, 7, 54, 3, 0 },
{ 0, 5, 64, 7, 56, 3, 0 },
{ 0, 4, 61, 7, 59, 4, 0 },
{ 0, 4, 59, 7, 61, 4, 0 },
{ 0, 3, 56, 7, 64, 5, 0 },
{ 0, 3, 54, 7, 66, 5, 0 },
{ 0, 2, 51, 7, 69, 6, 0 },
{ 0, 2, 48, 7, 71, 7, 0 },
{ 0, 2, 46, 7, 72, 8, 0 },
{ 0, 2, 43, 7, 74, 9, 0 },
{ 0, 1, 40, 7, 77, 10, 0 },
{ 0, 1, 38, 7, 78, 11, 0 },
{ 0, 1, 35, 7, 80, 12, 0 },
{ 0, 1, 33, 7, 81, 13, 0 },
{ 0, 1, 31, 7, 82, 14, 0 },
{ 0, 1, 29, 7, 82, 16, 0 },
{ 0, 1, 26, 7, 84, 17, 0 },
{ 0, 0, 24, 7, 85, 19, 0 },
{ 0, 0, 23, 7, 84, 21, 0 } } },
.ver_phase_arr = {
.even = { { 0, 22, 84, 7, 22, 0, 0 },
{ 0, 20, 85, 7, 23, 0, 0 },
{ 0, 18, 84, 7, 25, 1, 0 },
{ 0, 17, 82, 7, 28, 1, 0 },
{ 0, 15, 82, 7, 30, 1, 0 },
{ 0, 14, 81, 7, 32, 1, 0 },
{ 0, 12, 81, 7, 34, 1, 0 },
{ 0, 11, 79, 7, 37, 1, 0 },
{ 0, 10, 78, 7, 39, 1, 0 },
{ 0, 9, 75, 7, 42, 2, 0 },
{ 0, 8, 74, 7, 44, 2, 0 },
{ 0, 7, 72, 7, 47, 2, 0 },
{ 0, 6, 70, 7, 50, 2, 0 },
{ 0, 6, 67, 7, 52, 3, 0 },
{ 0, 5, 65, 7, 55, 3, 0 },
{ 0, 4, 64, 7, 57, 3, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 3, 57, 7, 64, 4, 0 },
{ 0, 3, 55, 7, 65, 5, 0 },
{ 0, 3, 52, 7, 67, 6, 0 },
{ 0, 2, 50, 7, 70, 6, 0 },
{ 0, 2, 47, 7, 72, 7, 0 },
{ 0, 2, 44, 7, 74, 8, 0 },
{ 0, 2, 42, 7, 75, 9, 0 },
{ 0, 1, 39, 7, 78, 10, 0 },
{ 0, 1, 37, 7, 79, 11, 0 },
{ 0, 1, 34, 7, 81, 12, 0 },
{ 0, 1, 32, 7, 81, 14, 0 },
{ 0, 1, 30, 7, 82, 15, 0 },
{ 0, 1, 28, 7, 82, 17, 0 },
{ 0, 1, 25, 7, 84, 18, 0 },
{ 0, 0, 23, 7, 85, 20, 0 } },
.odd = { { 0, 21, 84, 7, 23, 0, 0 },
{ 0, 19, 85, 7, 24, 0, 0 },
{ 0, 17, 84, 7, 26, 1, 0 },
{ 0, 16, 82, 7, 29, 1, 0 },
{ 0, 14, 82, 7, 31, 1, 0 },
{ 0, 13, 81, 7, 33, 1, 0 },
{ 0, 12, 80, 7, 35, 1, 0 },
{ 0, 11, 78, 7, 38, 1, 0 },
{ 0, 10, 77, 7, 40, 1, 0 },
{ 0, 9, 74, 7, 43, 2, 0 },
{ 0, 8, 72, 7, 46, 2, 0 },
{ 0, 7, 71, 7, 48, 2, 0 },
{ 0, 6, 69, 7, 51, 2, 0 },
{ 0, 5, 66, 7, 54, 3, 0 },
{ 0, 5, 64, 7, 56, 3, 0 },
{ 0, 4, 61, 7, 59, 4, 0 },
{ 0, 4, 59, 7, 61, 4, 0 },
{ 0, 3, 56, 7, 64, 5, 0 },
{ 0, 3, 54, 7, 66, 5, 0 },
{ 0, 2, 51, 7, 69, 6, 0 },
{ 0, 2, 48, 7, 71, 7, 0 },
{ 0, 2, 46, 7, 72, 8, 0 },
{ 0, 2, 43, 7, 74, 9, 0 },
{ 0, 1, 40, 7, 77, 10, 0 },
{ 0, 1, 38, 7, 78, 11, 0 },
{ 0, 1, 35, 7, 80, 12, 0 },
{ 0, 1, 33, 7, 81, 13, 0 },
{ 0, 1, 31, 7, 82, 14, 0 },
{ 0, 1, 29, 7, 82, 16, 0 },
{ 0, 1, 26, 7, 84, 17, 0 },
{ 0, 0, 24, 7, 85, 19, 0 },
{ 0, 0, 23, 7, 84, 21, 0 } } },
.ptrn_arr = { { 0x49249249, 0x92492492, 0x24924924, 0x49249249,
0x92492492, 0x24924924 } },
.sample_patrn_length = 194,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 66) = 0.326531 */
.hor_phase_arr = {
.even = { { 0, 22, 84, 7, 22, 0, 0 },
{ 0, 18, 83, 7, 26, 1, 0 },
{ 0, 15, 82, 7, 30, 1, 0 },
{ 0, 13, 79, 7, 35, 1, 0 },
{ 0, 10, 78, 7, 39, 1, 0 },
{ 0, 8, 74, 7, 44, 2, 0 },
{ 0, 7, 69, 7, 50, 2, 0 },
{ 0, 5, 65, 7, 55, 3, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 3, 55, 7, 65, 5, 0 },
{ 0, 2, 50, 7, 69, 7, 0 },
{ 0, 2, 44, 7, 74, 8, 0 },
{ 0, 1, 39, 7, 78, 10, 0 },
{ 0, 1, 35, 7, 79, 13, 0 },
{ 0, 1, 30, 7, 82, 15, 0 },
{ 0, 1, 26, 7, 83, 18, 0 } },
.odd = { { 0, 20, 84, 7, 24, 0, 0 },
{ 0, 17, 82, 7, 28, 1, 0 },
{ 0, 14, 81, 7, 32, 1, 0 },
{ 0, 12, 78, 7, 37, 1, 0 },
{ 0, 9, 75, 7, 42, 2, 0 },
{ 0, 8, 71, 7, 47, 2, 0 },
{ 0, 6, 67, 7, 52, 3, 0 },
{ 0, 5, 62, 7, 57, 4, 0 },
{ 0, 4, 57, 7, 62, 5, 0 },
{ 0, 3, 52, 7, 67, 6, 0 },
{ 0, 2, 47, 7, 71, 8, 0 },
{ 0, 2, 42, 7, 75, 9, 0 },
{ 0, 1, 37, 7, 78, 12, 0 },
{ 0, 1, 32, 7, 81, 14, 0 },
{ 0, 1, 28, 7, 82, 17, 0 },
{ 0, 0, 24, 7, 84, 20, 0 } } },
.ver_phase_arr = {
.even = { { 0, 22, 84, 7, 22, 0, 0 },
{ 0, 18, 83, 7, 26, 1, 0 },
{ 0, 15, 82, 7, 30, 1, 0 },
{ 0, 13, 79, 7, 35, 1, 0 },
{ 0, 10, 78, 7, 39, 1, 0 },
{ 0, 8, 74, 7, 44, 2, 0 },
{ 0, 7, 69, 7, 50, 2, 0 },
{ 0, 5, 65, 7, 55, 3, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 3, 55, 7, 65, 5, 0 },
{ 0, 2, 50, 7, 69, 7, 0 },
{ 0, 2, 44, 7, 74, 8, 0 },
{ 0, 1, 39, 7, 78, 10, 0 },
{ 0, 1, 35, 7, 79, 13, 0 },
{ 0, 1, 30, 7, 82, 15, 0 },
{ 0, 1, 26, 7, 83, 18, 0 } },
.odd = { { 0, 20, 84, 7, 24, 0, 0 },
{ 0, 17, 82, 7, 28, 1, 0 },
{ 0, 14, 81, 7, 32, 1, 0 },
{ 0, 12, 78, 7, 37, 1, 0 },
{ 0, 9, 75, 7, 42, 2, 0 },
{ 0, 8, 71, 7, 47, 2, 0 },
{ 0, 6, 67, 7, 52, 3, 0 },
{ 0, 5, 62, 7, 57, 4, 0 },
{ 0, 4, 57, 7, 62, 5, 0 },
{ 0, 3, 52, 7, 67, 6, 0 },
{ 0, 2, 47, 7, 71, 8, 0 },
{ 0, 2, 42, 7, 75, 9, 0 },
{ 0, 1, 37, 7, 78, 12, 0 },
{ 0, 1, 32, 7, 81, 14, 0 },
{ 0, 1, 28, 7, 82, 17, 0 },
{ 0, 0, 24, 7, 84, 20, 0 } } },
.ptrn_arr = { { 0x49249249, 0x92492492, 0x24924924 } },
.sample_patrn_length = 98,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 67) = 0.323232 */
.hor_phase_arr = {
.even = { { 0, 22, 84, 7, 22, 0, 0 },
{ 0, 17, 82, 7, 28, 1, 0 },
{ 0, 13, 79, 7, 35, 1, 0 },
{ 0, 10, 74, 7, 42, 2, 0 },
{ 0, 7, 68, 7, 50, 3, 0 },
{ 0, 5, 62, 7, 57, 4, 0 },
{ 0, 3, 55, 7, 64, 6, 0 },
{ 0, 2, 47, 7, 71, 8, 0 },
{ 0, 2, 40, 7, 75, 11, 0 },
{ 0, 1, 33, 7, 80, 14, 0 },
{ 0, 1, 26, 7, 82, 19, 0 },
{ 0, 21, 82, 7, 24, 1, 0 },
{ 0, 16, 81, 7, 30, 1, 0 },
{ 0, 12, 78, 7, 37, 1, 0 },
{ 0, 9, 72, 7, 45, 2, 0 },
{ 0, 6, 67, 7, 52, 3, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 3, 52, 7, 67, 6, 0 },
{ 0, 2, 45, 7, 72, 9, 0 },
{ 0, 1, 37, 7, 78, 12, 0 },
{ 0, 1, 30, 7, 81, 16, 0 },
{ 0, 1, 24, 7, 82, 21, 0 },
{ 0, 19, 82, 7, 26, 1, 0 },
{ 0, 14, 80, 7, 33, 1, 0 },
{ 0, 11, 75, 7, 40, 2, 0 },
{ 0, 8, 71, 7, 47, 2, 0 },
{ 0, 6, 64, 7, 55, 3, 0 },
{ 0, 4, 57, 7, 62, 5, 0 },
{ 0, 3, 50, 7, 68, 7, 0 },
{ 0, 2, 42, 7, 74, 10, 0 },
{ 0, 1, 35, 7, 79, 13, 0 },
{ 0, 1, 28, 7, 82, 17, 0 } },
.odd = { { 0, 20, 82, 7, 25, 1, 0 },
{ 0, 15, 81, 7, 31, 1, 0 },
{ 0, 11, 78, 7, 38, 1, 0 },
{ 0, 8, 72, 7, 46, 2, 0 },
{ 0, 6, 66, 7, 53, 3, 0 },
{ 0, 4, 58, 7, 61, 5, 0 },
{ 0, 3, 51, 7, 67, 7, 0 },
{ 0, 2, 43, 7, 74, 9, 0 },
{ 0, 1, 36, 7, 79, 12, 0 },
{ 0, 1, 29, 7, 81, 17, 0 },
{ 0, 0, 23, 7, 84, 21, 0 },
{ 0, 18, 82, 7, 27, 1, 0 },
{ 0, 14, 79, 7, 34, 1, 0 },
{ 0, 10, 75, 7, 41, 2, 0 },
{ 0, 7, 71, 7, 48, 2, 0 },
{ 0, 5, 63, 7, 56, 4, 0 },
{ 0, 4, 56, 7, 63, 5, 0 },
{ 0, 2, 48, 7, 71, 7, 0 },
{ 0, 2, 41, 7, 75, 10, 0 },
{ 0, 1, 34, 7, 79, 14, 0 },
{ 0, 1, 27, 7, 82, 18, 0 },
{ 0, 21, 84, 7, 23, 0, 0 },
{ 0, 17, 81, 7, 29, 1, 0 },
{ 0, 12, 79, 7, 36, 1, 0 },
{ 0, 9, 74, 7, 43, 2, 0 },
{ 0, 7, 67, 7, 51, 3, 0 },
{ 0, 5, 61, 7, 58, 4, 0 },
{ 0, 3, 53, 7, 66, 6, 0 },
{ 0, 2, 46, 7, 72, 8, 0 },
{ 0, 1, 38, 7, 78, 11, 0 },
{ 0, 1, 31, 7, 81, 15, 0 },
{ 0, 1, 25, 7, 82, 20, 0 } } },
.ver_phase_arr = {
.even = { { 0, 22, 84, 7, 22, 0, 0 },
{ 0, 17, 82, 7, 28, 1, 0 },
{ 0, 13, 79, 7, 35, 1, 0 },
{ 0, 10, 74, 7, 42, 2, 0 },
{ 0, 7, 68, 7, 50, 3, 0 },
{ 0, 5, 62, 7, 57, 4, 0 },
{ 0, 3, 55, 7, 64, 6, 0 },
{ 0, 2, 47, 7, 71, 8, 0 },
{ 0, 2, 40, 7, 75, 11, 0 },
{ 0, 1, 33, 7, 80, 14, 0 },
{ 0, 1, 26, 7, 82, 19, 0 },
{ 0, 21, 82, 7, 24, 1, 0 },
{ 0, 16, 81, 7, 30, 1, 0 },
{ 0, 12, 78, 7, 37, 1, 0 },
{ 0, 9, 72, 7, 45, 2, 0 },
{ 0, 6, 67, 7, 52, 3, 0 },
{ 0, 4, 60, 7, 60, 4, 0 },
{ 0, 3, 52, 7, 67, 6, 0 },
{ 0, 2, 45, 7, 72, 9, 0 },
{ 0, 1, 37, 7, 78, 12, 0 },
{ 0, 1, 30, 7, 81, 16, 0 },
{ 0, 1, 24, 7, 82, 21, 0 },
{ 0, 19, 82, 7, 26, 1, 0 },
{ 0, 14, 80, 7, 33, 1, 0 },
{ 0, 11, 75, 7, 40, 2, 0 },
{ 0, 8, 71, 7, 47, 2, 0 },
{ 0, 6, 64, 7, 55, 3, 0 },
{ 0, 4, 57, 7, 62, 5, 0 },
{ 0, 3, 50, 7, 68, 7, 0 },
{ 0, 2, 42, 7, 74, 10, 0 },
{ 0, 1, 35, 7, 79, 13, 0 },
{ 0, 1, 28, 7, 82, 17, 0 } },
.odd = { { 0, 20, 82, 7, 25, 1, 0 },
{ 0, 15, 81, 7, 31, 1, 0 },
{ 0, 11, 78, 7, 38, 1, 0 },
{ 0, 8, 72, 7, 46, 2, 0 },
{ 0, 6, 66, 7, 53, 3, 0 },
{ 0, 4, 58, 7, 61, 5, 0 },
{ 0, 3, 51, 7, 67, 7, 0 },
{ 0, 2, 43, 7, 74, 9, 0 },
{ 0, 1, 36, 7, 79, 12, 0 },
{ 0, 1, 29, 7, 81, 17, 0 },
{ 0, 0, 23, 7, 84, 21, 0 },
{ 0, 18, 82, 7, 27, 1, 0 },
{ 0, 14, 79, 7, 34, 1, 0 },
{ 0, 10, 75, 7, 41, 2, 0 },
{ 0, 7, 71, 7, 48, 2, 0 },
{ 0, 5, 63, 7, 56, 4, 0 },
{ 0, 4, 56, 7, 63, 5, 0 },
{ 0, 2, 48, 7, 71, 7, 0 },
{ 0, 2, 41, 7, 75, 10, 0 },
{ 0, 1, 34, 7, 79, 14, 0 },
{ 0, 1, 27, 7, 82, 18, 0 },
{ 0, 21, 84, 7, 23, 0, 0 },
{ 0, 17, 81, 7, 29, 1, 0 },
{ 0, 12, 79, 7, 36, 1, 0 },
{ 0, 9, 74, 7, 43, 2, 0 },
{ 0, 7, 67, 7, 51, 3, 0 },
{ 0, 5, 61, 7, 58, 4, 0 },
{ 0, 3, 53, 7, 66, 6, 0 },
{ 0, 2, 46, 7, 72, 8, 0 },
{ 0, 1, 38, 7, 78, 11, 0 },
{ 0, 1, 31, 7, 81, 15, 0 },
{ 0, 1, 25, 7, 82, 20, 0 } } },
.ptrn_arr = { { 0x49249249, 0x92492492, 0x92492490, 0x24924924,
0x24924921, 0x49249249, 0x2 } },
.sample_patrn_length = 198,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 68) = 0.32 */
.hor_phase_arr = {
.even = { { 0, 23, 82, 7, 23, 0, 0 },
{ 0, 16, 80, 7, 31, 1, 0 },
{ 0, 11, 75, 7, 40, 2, 0 },
{ 0, 7, 68, 7, 50, 3, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 3, 50, 7, 68, 7, 0 },
{ 0, 2, 40, 7, 75, 11, 0 },
{ 0, 1, 31, 7, 80, 16, 0 } },
.odd = { { 0, 19, 81, 7, 27, 1, 0 },
{ 0, 13, 79, 7, 35, 1, 0 },
{ 0, 9, 72, 7, 45, 2, 0 },
{ 0, 6, 63, 7, 55, 4, 0 },
{ 0, 4, 55, 7, 63, 6, 0 },
{ 0, 2, 45, 7, 72, 9, 0 },
{ 0, 1, 35, 7, 79, 13, 0 },
{ 0, 1, 27, 7, 81, 19, 0 } } },
.ver_phase_arr = {
.even = { { 0, 23, 82, 7, 23, 0, 0 },
{ 0, 16, 80, 7, 31, 1, 0 },
{ 0, 11, 75, 7, 40, 2, 0 },
{ 0, 7, 68, 7, 50, 3, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 3, 50, 7, 68, 7, 0 },
{ 0, 2, 40, 7, 75, 11, 0 },
{ 0, 1, 31, 7, 80, 16, 0 } },
.odd = { { 0, 19, 81, 7, 27, 1, 0 },
{ 0, 13, 79, 7, 35, 1, 0 },
{ 0, 9, 72, 7, 45, 2, 0 },
{ 0, 6, 63, 7, 55, 4, 0 },
{ 0, 4, 55, 7, 63, 6, 0 },
{ 0, 2, 45, 7, 72, 9, 0 },
{ 0, 1, 35, 7, 79, 13, 0 },
{ 0, 1, 27, 7, 81, 19, 0 } } },
.ptrn_arr = { { 0x49249249, 0x2492 } },
.sample_patrn_length = 50,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 69) = 0.316832 */
.hor_phase_arr = {
.even = { { 1, 23, 80, 7, 23, 1, 0 },
{ 0, 15, 79, 7, 33, 1, 0 },
{ 0, 9, 72, 7, 45, 2, 0 },
{ 0, 5, 62, 7, 57, 4, 0 },
{ 0, 3, 50, 7, 67, 8, 0 },
{ 0, 2, 38, 7, 75, 13, 0 },
{ 0, 1, 27, 7, 80, 20, 0 },
{ 0, 18, 80, 7, 29, 1, 0 },
{ 0, 11, 75, 7, 40, 2, 0 },
{ 0, 7, 66, 7, 52, 3, 0 },
{ 0, 4, 54, 7, 64, 6, 0 },
{ 0, 2, 42, 7, 74, 10, 0 },
{ 0, 1, 31, 7, 79, 17, 0 },
{ 0, 21, 81, 7, 25, 1, 0 },
{ 0, 14, 78, 7, 35, 1, 0 },
{ 0, 8, 70, 7, 47, 3, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 3, 47, 7, 70, 8, 0 },
{ 0, 1, 35, 7, 78, 14, 0 },
{ 0, 1, 25, 7, 81, 21, 0 },
{ 0, 17, 79, 7, 31, 1, 0 },
{ 0, 10, 74, 7, 42, 2, 0 },
{ 0, 6, 64, 7, 54, 4, 0 },
{ 0, 3, 52, 7, 66, 7, 0 },
{ 0, 2, 40, 7, 75, 11, 0 },
{ 0, 1, 29, 7, 80, 18, 0 },
{ 0, 20, 80, 7, 27, 1, 0 },
{ 0, 13, 75, 7, 38, 2, 0 },
{ 0, 8, 67, 7, 50, 3, 0 },
{ 0, 4, 57, 7, 62, 5, 0 },
{ 0, 2, 45, 7, 72, 9, 0 },
{ 0, 1, 33, 7, 79, 15, 0 } },
.odd = { { 0, 19, 80, 7, 28, 1, 0 },
{ 0, 12, 75, 7, 39, 2, 0 },
{ 0, 7, 67, 7, 51, 3, 0 },
{ 0, 4, 56, 7, 62, 6, 0 },
{ 0, 2, 44, 7, 72, 10, 0 },
{ 0, 1, 32, 7, 79, 16, 0 },
{ 0, 22, 81, 7, 24, 1, 0 },
{ 0, 14, 79, 7, 34, 1, 0 },
{ 0, 9, 71, 7, 46, 2, 0 },
{ 0, 5, 60, 7, 58, 5, 0 },
{ 0, 3, 48, 7, 69, 8, 0 },
{ 0, 1, 36, 7, 78, 13, 0 },
{ 0, 1, 26, 7, 81, 20, 0 },
{ 0, 17, 80, 7, 30, 1, 0 },
{ 0, 11, 74, 7, 41, 2, 0 },
{ 0, 6, 65, 7, 53, 4, 0 },
{ 0, 4, 53, 7, 65, 6, 0 },
{ 0, 2, 41, 7, 74, 11, 0 },
{ 0, 1, 30, 7, 80, 17, 0 },
{ 0, 20, 81, 7, 26, 1, 0 },
{ 0, 13, 78, 7, 36, 1, 0 },
{ 0, 8, 69, 7, 48, 3, 0 },
{ 0, 5, 58, 7, 60, 5, 0 },
{ 0, 2, 46, 7, 71, 9, 0 },
{ 0, 1, 34, 7, 79, 14, 0 },
{ 0, 1, 24, 7, 81, 22, 0 },
{ 0, 16, 79, 7, 32, 1, 0 },
{ 0, 10, 72, 7, 44, 2, 0 },
{ 0, 6, 62, 7, 56, 4, 0 },
{ 0, 3, 51, 7, 67, 7, 0 },
{ 0, 2, 39, 7, 75, 12, 0 },
{ 0, 1, 28, 7, 80, 19, 0 } } },
.ver_phase_arr = {
.even = { { 1, 23, 80, 7, 23, 1, 0 },
{ 0, 15, 79, 7, 33, 1, 0 },
{ 0, 9, 72, 7, 45, 2, 0 },
{ 0, 5, 62, 7, 57, 4, 0 },
{ 0, 3, 50, 7, 67, 8, 0 },
{ 0, 2, 38, 7, 75, 13, 0 },
{ 0, 1, 27, 7, 80, 20, 0 },
{ 0, 18, 80, 7, 29, 1, 0 },
{ 0, 11, 75, 7, 40, 2, 0 },
{ 0, 7, 66, 7, 52, 3, 0 },
{ 0, 4, 54, 7, 64, 6, 0 },
{ 0, 2, 42, 7, 74, 10, 0 },
{ 0, 1, 31, 7, 79, 17, 0 },
{ 0, 21, 81, 7, 25, 1, 0 },
{ 0, 14, 78, 7, 35, 1, 0 },
{ 0, 8, 70, 7, 47, 3, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 3, 47, 7, 70, 8, 0 },
{ 0, 1, 35, 7, 78, 14, 0 },
{ 0, 1, 25, 7, 81, 21, 0 },
{ 0, 17, 79, 7, 31, 1, 0 },
{ 0, 10, 74, 7, 42, 2, 0 },
{ 0, 6, 64, 7, 54, 4, 0 },
{ 0, 3, 52, 7, 66, 7, 0 },
{ 0, 2, 40, 7, 75, 11, 0 },
{ 0, 1, 29, 7, 80, 18, 0 },
{ 0, 20, 80, 7, 27, 1, 0 },
{ 0, 13, 75, 7, 38, 2, 0 },
{ 0, 8, 67, 7, 50, 3, 0 },
{ 0, 4, 57, 7, 62, 5, 0 },
{ 0, 2, 45, 7, 72, 9, 0 },
{ 0, 1, 33, 7, 79, 15, 0 } },
.odd = { { 0, 19, 80, 7, 28, 1, 0 },
{ 0, 12, 75, 7, 39, 2, 0 },
{ 0, 7, 67, 7, 51, 3, 0 },
{ 0, 4, 56, 7, 62, 6, 0 },
{ 0, 2, 44, 7, 72, 10, 0 },
{ 0, 1, 32, 7, 79, 16, 0 },
{ 0, 22, 81, 7, 24, 1, 0 },
{ 0, 14, 79, 7, 34, 1, 0 },
{ 0, 9, 71, 7, 46, 2, 0 },
{ 0, 5, 60, 7, 58, 5, 0 },
{ 0, 3, 48, 7, 69, 8, 0 },
{ 0, 1, 36, 7, 78, 13, 0 },
{ 0, 1, 26, 7, 81, 20, 0 },
{ 0, 17, 80, 7, 30, 1, 0 },
{ 0, 11, 74, 7, 41, 2, 0 },
{ 0, 6, 65, 7, 53, 4, 0 },
{ 0, 4, 53, 7, 65, 6, 0 },
{ 0, 2, 41, 7, 74, 11, 0 },
{ 0, 1, 30, 7, 80, 17, 0 },
{ 0, 20, 81, 7, 26, 1, 0 },
{ 0, 13, 78, 7, 36, 1, 0 },
{ 0, 8, 69, 7, 48, 3, 0 },
{ 0, 5, 58, 7, 60, 5, 0 },
{ 0, 2, 46, 7, 71, 9, 0 },
{ 0, 1, 34, 7, 79, 14, 0 },
{ 0, 1, 24, 7, 81, 22, 0 },
{ 0, 16, 79, 7, 32, 1, 0 },
{ 0, 10, 72, 7, 44, 2, 0 },
{ 0, 6, 62, 7, 56, 4, 0 },
{ 0, 3, 51, 7, 67, 7, 0 },
{ 0, 2, 39, 7, 75, 12, 0 },
{ 0, 1, 28, 7, 80, 19, 0 } } },
.ptrn_arr = { { 0x49249249, 0x49249212, 0x49242492, 0x48492492,
0x92492492, 0x92492490, 0x24 } },
.sample_patrn_length = 202,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 70) = 0.313725 */
.hor_phase_arr = {
.even = { { 1, 23, 80, 7, 23, 1, 0 },
{ 0, 14, 77, 7, 36, 1, 0 },
{ 0, 8, 67, 7, 50, 3, 0 },
{ 0, 4, 54, 7, 64, 6, 0 },
{ 0, 2, 40, 7, 74, 12, 0 },
{ 0, 1, 27, 7, 80, 20, 0 },
{ 0, 17, 79, 7, 31, 1, 0 },
{ 0, 10, 71, 7, 45, 2, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 2, 45, 7, 71, 10, 0 },
{ 0, 1, 31, 7, 79, 17, 0 },
{ 0, 20, 80, 7, 27, 1, 0 },
{ 0, 12, 74, 7, 40, 2, 0 },
{ 0, 6, 64, 7, 54, 4, 0 },
{ 0, 3, 50, 7, 67, 8, 0 },
{ 0, 1, 36, 7, 77, 14, 0 } },
.odd = { { 0, 18, 80, 7, 29, 1, 0 },
{ 0, 11, 73, 7, 42, 2, 0 },
{ 0, 6, 61, 7, 57, 4, 0 },
{ 0, 3, 47, 7, 69, 9, 0 },
{ 0, 1, 33, 7, 79, 15, 0 },
{ 0, 22, 80, 7, 25, 1, 0 },
{ 0, 13, 75, 7, 38, 2, 0 },
{ 0, 7, 65, 7, 52, 4, 0 },
{ 0, 4, 52, 7, 65, 7, 0 },
{ 0, 2, 38, 7, 75, 13, 0 },
{ 0, 1, 25, 7, 80, 22, 0 },
{ 0, 15, 79, 7, 33, 1, 0 },
{ 0, 9, 69, 7, 47, 3, 0 },
{ 0, 4, 57, 7, 61, 6, 0 },
{ 0, 2, 42, 7, 73, 11, 0 },
{ 0, 1, 29, 7, 80, 18, 0 } } },
.ver_phase_arr = {
.even = { { 1, 23, 80, 7, 23, 1, 0 },
{ 0, 14, 77, 7, 36, 1, 0 },
{ 0, 8, 67, 7, 50, 3, 0 },
{ 0, 4, 54, 7, 64, 6, 0 },
{ 0, 2, 40, 7, 74, 12, 0 },
{ 0, 1, 27, 7, 80, 20, 0 },
{ 0, 17, 79, 7, 31, 1, 0 },
{ 0, 10, 71, 7, 45, 2, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 2, 45, 7, 71, 10, 0 },
{ 0, 1, 31, 7, 79, 17, 0 },
{ 0, 20, 80, 7, 27, 1, 0 },
{ 0, 12, 74, 7, 40, 2, 0 },
{ 0, 6, 64, 7, 54, 4, 0 },
{ 0, 3, 50, 7, 67, 8, 0 },
{ 0, 1, 36, 7, 77, 14, 0 } },
.odd = { { 0, 18, 80, 7, 29, 1, 0 },
{ 0, 11, 73, 7, 42, 2, 0 },
{ 0, 6, 61, 7, 57, 4, 0 },
{ 0, 3, 47, 7, 69, 9, 0 },
{ 0, 1, 33, 7, 79, 15, 0 },
{ 0, 22, 80, 7, 25, 1, 0 },
{ 0, 13, 75, 7, 38, 2, 0 },
{ 0, 7, 65, 7, 52, 4, 0 },
{ 0, 4, 52, 7, 65, 7, 0 },
{ 0, 2, 38, 7, 75, 13, 0 },
{ 0, 1, 25, 7, 80, 22, 0 },
{ 0, 15, 79, 7, 33, 1, 0 },
{ 0, 9, 69, 7, 47, 3, 0 },
{ 0, 4, 57, 7, 61, 6, 0 },
{ 0, 2, 42, 7, 73, 11, 0 },
{ 0, 1, 29, 7, 80, 18, 0 } } },
.ptrn_arr = { { 0x49249249, 0x49249248, 0x49249242, 0x2 } },
.sample_patrn_length = 102,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 71) = 0.31068 */
.hor_phase_arr = {
.even = { { 1, 24, 78, 7, 24, 1, 0 },
{ 0, 13, 75, 7, 38, 2, 0 },
{ 0, 7, 63, 7, 54, 4, 0 },
{ 0, 3, 47, 7, 69, 9, 0 },
{ 0, 1, 31, 7, 79, 17, 0 },
{ 0, 19, 79, 7, 29, 1, 0 },
{ 0, 10, 70, 7, 45, 3, 0 },
{ 0, 5, 56, 7, 61, 6, 0 },
{ 0, 2, 40, 7, 74, 12, 0 },
{ 0, 1, 26, 7, 78, 22, 1 },
{ 0, 14, 76, 7, 36, 2, 0 },
{ 0, 7, 65, 7, 52, 4, 0 },
{ 0, 3, 50, 7, 67, 8, 0 },
{ 0, 1, 34, 7, 77, 16, 0 },
{ 0, 20, 80, 7, 27, 1, 0 },
{ 0, 11, 72, 7, 43, 2, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 2, 43, 7, 72, 11, 0 },
{ 0, 1, 27, 7, 80, 20, 0 },
{ 0, 16, 77, 7, 34, 1, 0 },
{ 0, 8, 67, 7, 50, 3, 0 },
{ 0, 4, 52, 7, 65, 7, 0 },
{ 0, 2, 36, 7, 76, 14, 0 },
{ 1, 22, 78, 7, 26, 1, 0 },
{ 0, 12, 74, 7, 40, 2, 0 },
{ 0, 6, 61, 7, 56, 5, 0 },
{ 0, 3, 45, 7, 70, 10, 0 },
{ 0, 1, 29, 7, 79, 19, 0 },
{ 0, 17, 79, 7, 31, 1, 0 },
{ 0, 9, 69, 7, 47, 3, 0 },
{ 0, 4, 54, 7, 63, 7, 0 },
{ 0, 2, 38, 7, 75, 13, 0 } },
.odd = { { 0, 18, 79, 7, 30, 1, 0 },
{ 0, 9, 70, 7, 46, 3, 0 },
{ 0, 4, 55, 7, 63, 6, 0 },
{ 0, 2, 39, 7, 74, 13, 0 },
{ 0, 1, 25, 7, 78, 23, 1 },
{ 0, 14, 75, 7, 37, 2, 0 },
{ 0, 7, 64, 7, 53, 4, 0 },
{ 0, 3, 48, 7, 68, 9, 0 },
{ 0, 1, 33, 7, 77, 17, 0 },
{ 0, 20, 79, 7, 28, 1, 0 },
{ 0, 10, 72, 7, 44, 2, 0 },
{ 0, 5, 58, 7, 59, 6, 0 },
{ 0, 2, 41, 7, 74, 11, 0 },
{ 0, 1, 26, 7, 79, 21, 1 },
{ 0, 15, 77, 7, 35, 1, 0 },
{ 0, 8, 66, 7, 51, 3, 0 },
{ 0, 3, 51, 7, 66, 8, 0 },
{ 0, 1, 35, 7, 77, 15, 0 },
{ 1, 21, 79, 7, 26, 1, 0 },
{ 0, 11, 74, 7, 41, 2, 0 },
{ 0, 6, 59, 7, 58, 5, 0 },
{ 0, 2, 44, 7, 72, 10, 0 },
{ 0, 1, 28, 7, 79, 20, 0 },
{ 0, 17, 77, 7, 33, 1, 0 },
{ 0, 9, 68, 7, 48, 3, 0 },
{ 0, 4, 53, 7, 64, 7, 0 },
{ 0, 2, 37, 7, 75, 14, 0 },
{ 1, 23, 78, 7, 25, 1, 0 },
{ 0, 13, 74, 7, 39, 2, 0 },
{ 0, 6, 63, 7, 55, 4, 0 },
{ 0, 3, 46, 7, 70, 9, 0 },
{ 0, 1, 30, 7, 79, 18, 0 } } },
.ver_phase_arr = {
.even = { { 1, 24, 78, 7, 24, 1, 0 },
{ 0, 13, 75, 7, 38, 2, 0 },
{ 0, 7, 63, 7, 54, 4, 0 },
{ 0, 3, 47, 7, 69, 9, 0 },
{ 0, 1, 31, 7, 79, 17, 0 },
{ 0, 19, 79, 7, 29, 1, 0 },
{ 0, 10, 70, 7, 45, 3, 0 },
{ 0, 5, 56, 7, 61, 6, 0 },
{ 0, 2, 40, 7, 74, 12, 0 },
{ 0, 1, 26, 7, 78, 22, 1 },
{ 0, 14, 76, 7, 36, 2, 0 },
{ 0, 7, 65, 7, 52, 4, 0 },
{ 0, 3, 50, 7, 67, 8, 0 },
{ 0, 1, 34, 7, 77, 16, 0 },
{ 0, 20, 80, 7, 27, 1, 0 },
{ 0, 11, 72, 7, 43, 2, 0 },
{ 0, 5, 59, 7, 59, 5, 0 },
{ 0, 2, 43, 7, 72, 11, 0 },
{ 0, 1, 27, 7, 80, 20, 0 },
{ 0, 16, 77, 7, 34, 1, 0 },
{ 0, 8, 67, 7, 50, 3, 0 },
{ 0, 4, 52, 7, 65, 7, 0 },
{ 0, 2, 36, 7, 76, 14, 0 },
{ 1, 22, 78, 7, 26, 1, 0 },
{ 0, 12, 74, 7, 40, 2, 0 },
{ 0, 6, 61, 7, 56, 5, 0 },
{ 0, 3, 45, 7, 70, 10, 0 },
{ 0, 1, 29, 7, 79, 19, 0 },
{ 0, 17, 79, 7, 31, 1, 0 },
{ 0, 9, 69, 7, 47, 3, 0 },
{ 0, 4, 54, 7, 63, 7, 0 },
{ 0, 2, 38, 7, 75, 13, 0 } },
.odd = { { 0, 18, 79, 7, 30, 1, 0 },
{ 0, 9, 70, 7, 46, 3, 0 },
{ 0, 4, 55, 7, 63, 6, 0 },
{ 0, 2, 39, 7, 74, 13, 0 },
{ 0, 1, 25, 7, 78, 23, 1 },
{ 0, 14, 75, 7, 37, 2, 0 },
{ 0, 7, 64, 7, 53, 4, 0 },
{ 0, 3, 48, 7, 68, 9, 0 },
{ 0, 1, 33, 7, 77, 17, 0 },
{ 0, 20, 79, 7, 28, 1, 0 },
{ 0, 10, 72, 7, 44, 2, 0 },
{ 0, 5, 58, 7, 59, 6, 0 },
{ 0, 2, 41, 7, 74, 11, 0 },
{ 0, 1, 26, 7, 79, 21, 1 },
{ 0, 15, 77, 7, 35, 1, 0 },
{ 0, 8, 66, 7, 51, 3, 0 },
{ 0, 3, 51, 7, 66, 8, 0 },
{ 0, 1, 35, 7, 77, 15, 0 },
{ 1, 21, 79, 7, 26, 1, 0 },
{ 0, 11, 74, 7, 41, 2, 0 },
{ 0, 6, 59, 7, 58, 5, 0 },
{ 0, 2, 44, 7, 72, 10, 0 },
{ 0, 1, 28, 7, 79, 20, 0 },
{ 0, 17, 77, 7, 33, 1, 0 },
{ 0, 9, 68, 7, 48, 3, 0 },
{ 0, 4, 53, 7, 64, 7, 0 },
{ 0, 2, 37, 7, 75, 14, 0 },
{ 1, 23, 78, 7, 25, 1, 0 },
{ 0, 13, 74, 7, 39, 2, 0 },
{ 0, 6, 63, 7, 55, 4, 0 },
{ 0, 3, 46, 7, 70, 9, 0 },
{ 0, 1, 30, 7, 79, 18, 0 } } },
.ptrn_arr = { { 0x9249249, 0x21249249, 0x24249249, 0x24849249,
0x24909249, 0x24921249, 0x249 } },
.sample_patrn_length = 206,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 72) = 0.307692 */
.hor_phase_arr = {
.even = { { 1, 24, 78, 7, 24, 1, 0 },
{ 0, 12, 74, 7, 40, 2, 0 },
{ 0, 5, 60, 7, 58, 5, 0 },
{ 0, 2, 40, 7, 74, 12, 0 } },
.odd = { { 0, 18, 77, 7, 32, 1, 0 },
{ 0, 8, 68, 7, 49, 3, 0 },
{ 0, 3, 49, 7, 68, 8, 0 },
{ 0, 1, 32, 7, 77, 18, 0 } } },
.ver_phase_arr = {
.even = { { 1, 24, 78, 7, 24, 1, 0 },
{ 0, 12, 74, 7, 40, 2, 0 },
{ 0, 5, 60, 7, 58, 5, 0 },
{ 0, 2, 40, 7, 74, 12, 0 } },
.odd = { { 0, 18, 77, 7, 32, 1, 0 },
{ 0, 8, 68, 7, 49, 3, 0 },
{ 0, 3, 49, 7, 68, 8, 0 },
{ 0, 1, 32, 7, 77, 18, 0 } } },
.ptrn_arr = { { 0x249249 } },
.sample_patrn_length = 26,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 73) = 0.304762 */
.hor_phase_arr = {
.even = { { 1, 24, 78, 7, 24, 1, 0 },
{ 0, 12, 70, 7, 43, 3, 0 },
{ 0, 5, 54, 7, 62, 7, 0 },
{ 0, 2, 34, 7, 76, 16, 0 },
{ 0, 18, 77, 7, 32, 1, 0 },
{ 0, 8, 64, 7, 52, 4, 0 },
{ 0, 3, 45, 7, 69, 11, 0 },
{ 0, 1, 26, 7, 77, 23, 1 },
{ 0, 13, 73, 7, 40, 2, 0 },
{ 0, 5, 56, 7, 61, 6, 0 },
{ 0, 2, 36, 7, 75, 15, 0 },
{ 0, 19, 78, 7, 30, 1, 0 },
{ 0, 9, 66, 7, 49, 4, 0 },
{ 0, 3, 47, 7, 68, 10, 0 },
{ 0, 1, 28, 7, 77, 21, 1 },
{ 0, 14, 74, 7, 38, 2, 0 },
{ 0, 6, 58, 7, 58, 6, 0 },
{ 0, 2, 38, 7, 74, 14, 0 },
{ 1, 21, 77, 7, 28, 1, 0 },
{ 0, 10, 68, 7, 47, 3, 0 },
{ 0, 4, 49, 7, 66, 9, 0 },
{ 0, 1, 30, 7, 78, 19, 0 },
{ 0, 15, 75, 7, 36, 2, 0 },
{ 0, 6, 61, 7, 56, 5, 0 },
{ 0, 2, 40, 7, 73, 13, 0 },
{ 1, 23, 77, 7, 26, 1, 0 },
{ 0, 11, 69, 7, 45, 3, 0 },
{ 0, 4, 52, 7, 64, 8, 0 },
{ 0, 1, 32, 7, 77, 18, 0 },
{ 0, 16, 76, 7, 34, 2, 0 },
{ 0, 7, 62, 7, 54, 5, 0 },
{ 0, 3, 43, 7, 70, 12, 0 } },
.odd = { { 0, 17, 77, 7, 33, 1, 0 },
{ 0, 7, 64, 7, 53, 4, 0 },
{ 0, 3, 44, 7, 70, 11, 0 },
{ 0, 1, 25, 7, 78, 23, 1 },
{ 0, 12, 72, 7, 42, 2, 0 },
{ 0, 5, 55, 7, 61, 7, 0 },
{ 0, 2, 35, 7, 75, 16, 0 },
{ 0, 19, 77, 7, 31, 1, 0 },
{ 0, 8, 65, 7, 51, 4, 0 },
{ 0, 3, 46, 7, 69, 10, 0 },
{ 0, 1, 27, 7, 77, 22, 1 },
{ 0, 13, 74, 7, 39, 2, 0 },
{ 0, 5, 57, 7, 60, 6, 0 },
{ 0, 2, 37, 7, 75, 14, 0 },
{ 1, 20, 77, 7, 29, 1, 0 },
{ 0, 9, 68, 7, 48, 3, 0 },
{ 0, 3, 48, 7, 68, 9, 0 },
{ 0, 1, 29, 7, 77, 20, 1 },
{ 0, 14, 75, 7, 37, 2, 0 },
{ 0, 6, 60, 7, 57, 5, 0 },
{ 0, 2, 39, 7, 74, 13, 0 },
{ 1, 22, 77, 7, 27, 1, 0 },
{ 0, 10, 69, 7, 46, 3, 0 },
{ 0, 4, 51, 7, 65, 8, 0 },
{ 0, 1, 31, 7, 77, 19, 0 },
{ 0, 16, 75, 7, 35, 2, 0 },
{ 0, 7, 61, 7, 55, 5, 0 },
{ 0, 2, 42, 7, 72, 12, 0 },
{ 1, 23, 78, 7, 25, 1, 0 },
{ 0, 11, 70, 7, 44, 3, 0 },
{ 0, 4, 53, 7, 64, 7, 0 },
{ 0, 1, 33, 7, 77, 17, 0 } } },
.ver_phase_arr = {
.even = { { 1, 24, 78, 7, 24, 1, 0 },
{ 0, 12, 70, 7, 43, 3, 0 },
{ 0, 5, 54, 7, 62, 7, 0 },
{ 0, 2, 34, 7, 76, 16, 0 },
{ 0, 18, 77, 7, 32, 1, 0 },
{ 0, 8, 64, 7, 52, 4, 0 },
{ 0, 3, 45, 7, 69, 11, 0 },
{ 0, 1, 26, 7, 77, 23, 1 },
{ 0, 13, 73, 7, 40, 2, 0 },
{ 0, 5, 56, 7, 61, 6, 0 },
{ 0, 2, 36, 7, 75, 15, 0 },
{ 0, 19, 78, 7, 30, 1, 0 },
{ 0, 9, 66, 7, 49, 4, 0 },
{ 0, 3, 47, 7, 68, 10, 0 },
{ 0, 1, 28, 7, 77, 21, 1 },
{ 0, 14, 74, 7, 38, 2, 0 },
{ 0, 6, 58, 7, 58, 6, 0 },
{ 0, 2, 38, 7, 74, 14, 0 },
{ 1, 21, 77, 7, 28, 1, 0 },
{ 0, 10, 68, 7, 47, 3, 0 },
{ 0, 4, 49, 7, 66, 9, 0 },
{ 0, 1, 30, 7, 78, 19, 0 },
{ 0, 15, 75, 7, 36, 2, 0 },
{ 0, 6, 61, 7, 56, 5, 0 },
{ 0, 2, 40, 7, 73, 13, 0 },
{ 1, 23, 77, 7, 26, 1, 0 },
{ 0, 11, 69, 7, 45, 3, 0 },
{ 0, 4, 52, 7, 64, 8, 0 },
{ 0, 1, 32, 7, 77, 18, 0 },
{ 0, 16, 76, 7, 34, 2, 0 },
{ 0, 7, 62, 7, 54, 5, 0 },
{ 0, 3, 43, 7, 70, 12, 0 } },
.odd = { { 0, 17, 77, 7, 33, 1, 0 },
{ 0, 7, 64, 7, 53, 4, 0 },
{ 0, 3, 44, 7, 70, 11, 0 },
{ 0, 1, 25, 7, 78, 23, 1 },
{ 0, 12, 72, 7, 42, 2, 0 },
{ 0, 5, 55, 7, 61, 7, 0 },
{ 0, 2, 35, 7, 75, 16, 0 },
{ 0, 19, 77, 7, 31, 1, 0 },
{ 0, 8, 65, 7, 51, 4, 0 },
{ 0, 3, 46, 7, 69, 10, 0 },
{ 0, 1, 27, 7, 77, 22, 1 },
{ 0, 13, 74, 7, 39, 2, 0 },
{ 0, 5, 57, 7, 60, 6, 0 },
{ 0, 2, 37, 7, 75, 14, 0 },
{ 1, 20, 77, 7, 29, 1, 0 },
{ 0, 9, 68, 7, 48, 3, 0 },
{ 0, 3, 48, 7, 68, 9, 0 },
{ 0, 1, 29, 7, 77, 20, 1 },
{ 0, 14, 75, 7, 37, 2, 0 },
{ 0, 6, 60, 7, 57, 5, 0 },
{ 0, 2, 39, 7, 74, 13, 0 },
{ 1, 22, 77, 7, 27, 1, 0 },
{ 0, 10, 69, 7, 46, 3, 0 },
{ 0, 4, 51, 7, 65, 8, 0 },
{ 0, 1, 31, 7, 77, 19, 0 },
{ 0, 16, 75, 7, 35, 2, 0 },
{ 0, 7, 61, 7, 55, 5, 0 },
{ 0, 2, 42, 7, 72, 12, 0 },
{ 1, 23, 78, 7, 25, 1, 0 },
{ 0, 11, 70, 7, 44, 3, 0 },
{ 0, 4, 53, 7, 64, 7, 0 },
{ 0, 1, 33, 7, 77, 17, 0 } } },
.ptrn_arr = { { 0x24249249, 0x24921249, 0x84924909, 0x92424924,
0x92492124, 0x48492490, 0x2492 } },
.sample_patrn_length = 210,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 74) = 0.301887 */
.hor_phase_arr = {
.even = { { 1, 25, 76, 7, 25, 1, 0 },
{ 0, 11, 69, 7, 45, 3, 0 },
{ 0, 4, 49, 7, 66, 9, 0 },
{ 0, 1, 28, 7, 77, 21, 1 },
{ 0, 13, 72, 7, 41, 2, 0 },
{ 0, 5, 54, 7, 62, 7, 0 },
{ 0, 1, 32, 7, 77, 18, 0 },
{ 0, 15, 75, 7, 36, 2, 0 },
{ 0, 6, 58, 7, 58, 6, 0 },
{ 0, 2, 36, 7, 75, 15, 0 },
{ 0, 18, 77, 7, 32, 1, 0 },
{ 0, 7, 62, 7, 54, 5, 0 },
{ 0, 2, 41, 7, 72, 13, 0 },
{ 1, 21, 77, 7, 28, 1, 0 },
{ 0, 9, 66, 7, 49, 4, 0 },
{ 0, 3, 45, 7, 69, 11, 0 } },
.odd = { { 0, 17, 75, 7, 34, 2, 0 },
{ 0, 7, 60, 7, 56, 5, 0 },
{ 0, 2, 38, 7, 74, 14, 0 },
{ 1, 20, 76, 7, 30, 1, 0 },
{ 0, 8, 64, 7, 52, 4, 0 },
{ 0, 3, 43, 7, 70, 12, 0 },
{ 1, 23, 77, 7, 26, 1, 0 },
{ 0, 10, 68, 7, 47, 3, 0 },
{ 0, 3, 47, 7, 68, 10, 0 },
{ 0, 1, 26, 7, 77, 23, 1 },
{ 0, 12, 70, 7, 43, 3, 0 },
{ 0, 4, 52, 7, 64, 8, 0 },
{ 0, 1, 30, 7, 76, 20, 1 },
{ 0, 14, 74, 7, 38, 2, 0 },
{ 0, 5, 56, 7, 60, 7, 0 },
{ 0, 2, 34, 7, 75, 17, 0 } } },
.ver_phase_arr = {
.even = { { 1, 25, 76, 7, 25, 1, 0 },
{ 0, 11, 69, 7, 45, 3, 0 },
{ 0, 4, 49, 7, 66, 9, 0 },
{ 0, 1, 28, 7, 77, 21, 1 },
{ 0, 13, 72, 7, 41, 2, 0 },
{ 0, 5, 54, 7, 62, 7, 0 },
{ 0, 1, 32, 7, 77, 18, 0 },
{ 0, 15, 75, 7, 36, 2, 0 },
{ 0, 6, 58, 7, 58, 6, 0 },
{ 0, 2, 36, 7, 75, 15, 0 },
{ 0, 18, 77, 7, 32, 1, 0 },
{ 0, 7, 62, 7, 54, 5, 0 },
{ 0, 2, 41, 7, 72, 13, 0 },
{ 1, 21, 77, 7, 28, 1, 0 },
{ 0, 9, 66, 7, 49, 4, 0 },
{ 0, 3, 45, 7, 69, 11, 0 } },
.odd = { { 0, 17, 75, 7, 34, 2, 0 },
{ 0, 7, 60, 7, 56, 5, 0 },
{ 0, 2, 38, 7, 74, 14, 0 },
{ 1, 20, 76, 7, 30, 1, 0 },
{ 0, 8, 64, 7, 52, 4, 0 },
{ 0, 3, 43, 7, 70, 12, 0 },
{ 1, 23, 77, 7, 26, 1, 0 },
{ 0, 10, 68, 7, 47, 3, 0 },
{ 0, 3, 47, 7, 68, 10, 0 },
{ 0, 1, 26, 7, 77, 23, 1 },
{ 0, 12, 70, 7, 43, 3, 0 },
{ 0, 4, 52, 7, 64, 8, 0 },
{ 0, 1, 30, 7, 76, 20, 1 },
{ 0, 14, 74, 7, 38, 2, 0 },
{ 0, 5, 56, 7, 60, 7, 0 },
{ 0, 2, 34, 7, 75, 17, 0 } } },
.ptrn_arr = { { 0x24849249, 0x24924849, 0x92424924, 0x24 } },
.sample_patrn_length = 106,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 75) = 0.299065 */
.hor_phase_arr = {
.even = { { 1, 25, 76, 7, 25, 1, 0 },
{ 0, 10, 67, 7, 47, 4, 0 },
{ 0, 3, 45, 7, 69, 11, 0 },
{ 1, 23, 76, 7, 27, 1, 0 },
{ 0, 9, 66, 7, 49, 4, 0 },
{ 0, 3, 43, 7, 70, 12, 0 },
{ 1, 22, 75, 7, 29, 1, 0 },
{ 0, 8, 65, 7, 51, 4, 0 },
{ 0, 2, 41, 7, 72, 13, 0 },
{ 1, 20, 76, 7, 30, 1, 0 },
{ 0, 8, 61, 7, 54, 5, 0 },
{ 0, 2, 39, 7, 72, 15, 0 },
{ 0, 19, 76, 7, 32, 1, 0 },
{ 0, 7, 59, 7, 56, 6, 0 },
{ 0, 2, 36, 7, 74, 16, 0 },
{ 0, 17, 75, 7, 34, 2, 0 },
{ 0, 6, 58, 7, 58, 6, 0 },
{ 0, 2, 34, 7, 75, 17, 0 },
{ 0, 16, 74, 7, 36, 2, 0 },
{ 0, 6, 56, 7, 59, 7, 0 },
{ 0, 1, 32, 7, 76, 19, 0 },
{ 0, 15, 72, 7, 39, 2, 0 },
{ 0, 5, 54, 7, 61, 8, 0 },
{ 0, 1, 30, 7, 76, 20, 1 },
{ 0, 13, 72, 7, 41, 2, 0 },
{ 0, 4, 51, 7, 65, 8, 0 },
{ 0, 1, 29, 7, 75, 22, 1 },
{ 0, 12, 70, 7, 43, 3, 0 },
{ 0, 4, 49, 7, 66, 9, 0 },
{ 0, 1, 27, 7, 76, 23, 1 },
{ 0, 11, 69, 7, 45, 3, 0 },
{ 0, 4, 47, 7, 67, 10, 0 } },
.odd = { { 0, 16, 75, 7, 35, 2, 0 },
{ 0, 6, 57, 7, 58, 7, 0 },
{ 0, 2, 33, 7, 75, 18, 0 },
{ 0, 15, 73, 7, 38, 2, 0 },
{ 0, 5, 55, 7, 61, 7, 0 },
{ 0, 1, 31, 7, 76, 19, 1 },
{ 0, 14, 72, 7, 40, 2, 0 },
{ 0, 5, 53, 7, 62, 8, 0 },
{ 0, 1, 30, 7, 75, 21, 1 },
{ 0, 13, 70, 7, 42, 3, 0 },
{ 0, 4, 50, 7, 65, 9, 0 },
{ 0, 1, 28, 7, 76, 22, 1 },
{ 0, 12, 69, 7, 44, 3, 0 },
{ 0, 4, 48, 7, 66, 10, 0 },
{ 0, 1, 26, 7, 76, 24, 1 },
{ 0, 11, 68, 7, 46, 3, 0 },
{ 0, 3, 46, 7, 68, 11, 0 },
{ 1, 24, 76, 7, 26, 1, 0 },
{ 0, 10, 66, 7, 48, 4, 0 },
{ 0, 3, 44, 7, 69, 12, 0 },
{ 1, 22, 76, 7, 28, 1, 0 },
{ 0, 9, 65, 7, 50, 4, 0 },
{ 0, 3, 42, 7, 70, 13, 0 },
{ 1, 21, 75, 7, 30, 1, 0 },
{ 0, 8, 62, 7, 53, 5, 0 },
{ 0, 2, 40, 7, 72, 14, 0 },
{ 1, 19, 76, 7, 31, 1, 0 },
{ 0, 7, 61, 7, 55, 5, 0 },
{ 0, 2, 38, 7, 73, 15, 0 },
{ 0, 18, 75, 7, 33, 2, 0 },
{ 0, 7, 58, 7, 57, 6, 0 },
{ 0, 2, 35, 7, 75, 16, 0 } } },
.ver_phase_arr = {
.even = { { 1, 25, 76, 7, 25, 1, 0 },
{ 0, 10, 67, 7, 47, 4, 0 },
{ 0, 3, 45, 7, 69, 11, 0 },
{ 1, 23, 76, 7, 27, 1, 0 },
{ 0, 9, 66, 7, 49, 4, 0 },
{ 0, 3, 43, 7, 70, 12, 0 },
{ 1, 22, 75, 7, 29, 1, 0 },
{ 0, 8, 65, 7, 51, 4, 0 },
{ 0, 2, 41, 7, 72, 13, 0 },
{ 1, 20, 76, 7, 30, 1, 0 },
{ 0, 8, 61, 7, 54, 5, 0 },
{ 0, 2, 39, 7, 72, 15, 0 },
{ 0, 19, 76, 7, 32, 1, 0 },
{ 0, 7, 59, 7, 56, 6, 0 },
{ 0, 2, 36, 7, 74, 16, 0 },
{ 0, 17, 75, 7, 34, 2, 0 },
{ 0, 6, 58, 7, 58, 6, 0 },
{ 0, 2, 34, 7, 75, 17, 0 },
{ 0, 16, 74, 7, 36, 2, 0 },
{ 0, 6, 56, 7, 59, 7, 0 },
{ 0, 1, 32, 7, 76, 19, 0 },
{ 0, 15, 72, 7, 39, 2, 0 },
{ 0, 5, 54, 7, 61, 8, 0 },
{ 0, 1, 30, 7, 76, 20, 1 },
{ 0, 13, 72, 7, 41, 2, 0 },
{ 0, 4, 51, 7, 65, 8, 0 },
{ 0, 1, 29, 7, 75, 22, 1 },
{ 0, 12, 70, 7, 43, 3, 0 },
{ 0, 4, 49, 7, 66, 9, 0 },
{ 0, 1, 27, 7, 76, 23, 1 },
{ 0, 11, 69, 7, 45, 3, 0 },
{ 0, 4, 47, 7, 67, 10, 0 } },
.odd = { { 0, 16, 75, 7, 35, 2, 0 },
{ 0, 6, 57, 7, 58, 7, 0 },
{ 0, 2, 33, 7, 75, 18, 0 },
{ 0, 15, 73, 7, 38, 2, 0 },
{ 0, 5, 55, 7, 61, 7, 0 },
{ 0, 1, 31, 7, 76, 19, 1 },
{ 0, 14, 72, 7, 40, 2, 0 },
{ 0, 5, 53, 7, 62, 8, 0 },
{ 0, 1, 30, 7, 75, 21, 1 },
{ 0, 13, 70, 7, 42, 3, 0 },
{ 0, 4, 50, 7, 65, 9, 0 },
{ 0, 1, 28, 7, 76, 22, 1 },
{ 0, 12, 69, 7, 44, 3, 0 },
{ 0, 4, 48, 7, 66, 10, 0 },
{ 0, 1, 26, 7, 76, 24, 1 },
{ 0, 11, 68, 7, 46, 3, 0 },
{ 0, 3, 46, 7, 68, 11, 0 },
{ 1, 24, 76, 7, 26, 1, 0 },
{ 0, 10, 66, 7, 48, 4, 0 },
{ 0, 3, 44, 7, 69, 12, 0 },
{ 1, 22, 76, 7, 28, 1, 0 },
{ 0, 9, 65, 7, 50, 4, 0 },
{ 0, 3, 42, 7, 70, 13, 0 },
{ 1, 21, 75, 7, 30, 1, 0 },
{ 0, 8, 62, 7, 53, 5, 0 },
{ 0, 2, 40, 7, 72, 14, 0 },
{ 1, 19, 76, 7, 31, 1, 0 },
{ 0, 7, 61, 7, 55, 5, 0 },
{ 0, 2, 38, 7, 73, 15, 0 },
{ 0, 18, 75, 7, 33, 2, 0 },
{ 0, 7, 58, 7, 57, 6, 0 },
{ 0, 2, 35, 7, 75, 16, 0 } } },
.ptrn_arr = { { 0x24909249, 0x90924909, 0x92490924, 0x49212490,
0x21249212, 0x24921249, 0x24921 } },
.sample_patrn_length = 214,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 76) = 0.296296 */
.hor_phase_arr = {
.even = { { 1, 25, 76, 7, 25, 1, 0 },
{ 0, 10, 65, 7, 49, 4, 0 },
{ 0, 3, 41, 7, 70, 14, 0 },
{ 1, 19, 73, 7, 33, 2, 0 },
{ 0, 6, 58, 7, 58, 6, 0 },
{ 0, 2, 33, 7, 73, 19, 1 },
{ 0, 14, 70, 7, 41, 3, 0 },
{ 0, 4, 49, 7, 65, 10, 0 } },
.odd = { { 0, 16, 73, 7, 37, 2, 0 },
{ 0, 5, 53, 7, 62, 8, 0 },
{ 0, 1, 29, 7, 75, 22, 1 },
{ 0, 11, 69, 7, 45, 3, 0 },
{ 0, 3, 45, 7, 69, 11, 0 },
{ 1, 22, 75, 7, 29, 1, 0 },
{ 0, 8, 62, 7, 53, 5, 0 },
{ 0, 2, 37, 7, 73, 16, 0 } } },
.ver_phase_arr = {
.even = { { 1, 25, 76, 7, 25, 1, 0 },
{ 0, 10, 65, 7, 49, 4, 0 },
{ 0, 3, 41, 7, 70, 14, 0 },
{ 1, 19, 73, 7, 33, 2, 0 },
{ 0, 6, 58, 7, 58, 6, 0 },
{ 0, 2, 33, 7, 73, 19, 1 },
{ 0, 14, 70, 7, 41, 3, 0 },
{ 0, 4, 49, 7, 65, 10, 0 } },
.odd = { { 0, 16, 73, 7, 37, 2, 0 },
{ 0, 5, 53, 7, 62, 8, 0 },
{ 0, 1, 29, 7, 75, 22, 1 },
{ 0, 11, 69, 7, 45, 3, 0 },
{ 0, 3, 45, 7, 69, 11, 0 },
{ 1, 22, 75, 7, 29, 1, 0 },
{ 0, 8, 62, 7, 53, 5, 0 },
{ 0, 2, 37, 7, 73, 16, 0 } } },
.ptrn_arr = { { 0x24909249, 0x24921 } },
.sample_patrn_length = 54,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 77) = 0.293578 */
.hor_phase_arr = {
.even = { { 1, 26, 74, 7, 26, 1, 0 },
{ 0, 9, 63, 7, 51, 5, 0 },
{ 0, 2, 37, 7, 73, 16, 0 },
{ 0, 15, 72, 7, 39, 2, 0 },
{ 0, 4, 49, 7, 65, 10, 0 },
{ 1, 24, 75, 7, 27, 1, 0 },
{ 0, 8, 62, 7, 53, 5, 0 },
{ 0, 2, 35, 7, 72, 18, 1 },
{ 0, 14, 70, 7, 41, 3, 0 },
{ 0, 4, 47, 7, 66, 11, 0 },
{ 1, 22, 75, 7, 29, 1, 0 },
{ 0, 7, 60, 7, 55, 6, 0 },
{ 0, 2, 33, 7, 73, 19, 1 },
{ 0, 13, 69, 7, 43, 3, 0 },
{ 0, 3, 45, 7, 68, 12, 0 },
{ 1, 21, 74, 7, 31, 1, 0 },
{ 0, 7, 57, 7, 57, 7, 0 },
{ 0, 1, 31, 7, 74, 21, 1 },
{ 0, 12, 68, 7, 45, 3, 0 },
{ 0, 3, 43, 7, 69, 13, 0 },
{ 1, 19, 73, 7, 33, 2, 0 },
{ 0, 6, 55, 7, 60, 7, 0 },
{ 0, 1, 29, 7, 75, 22, 1 },
{ 0, 11, 66, 7, 47, 4, 0 },
{ 0, 3, 41, 7, 70, 14, 0 },
{ 1, 18, 72, 7, 35, 2, 0 },
{ 0, 5, 53, 7, 62, 8, 0 },
{ 0, 1, 27, 7, 75, 24, 1 },
{ 0, 10, 65, 7, 49, 4, 0 },
{ 0, 2, 39, 7, 72, 15, 0 },
{ 0, 16, 73, 7, 37, 2, 0 },
{ 0, 5, 51, 7, 63, 9, 0 } },
.odd = { { 0, 16, 72, 7, 38, 2, 0 },
{ 0, 5, 50, 7, 64, 9, 0 },
{ 1, 25, 75, 7, 26, 1, 0 },
{ 0, 8, 63, 7, 52, 5, 0 },
{ 0, 2, 36, 7, 73, 17, 0 },
{ 0, 15, 70, 7, 40, 3, 0 },
{ 0, 4, 48, 7, 66, 10, 0 },
{ 1, 23, 75, 7, 28, 1, 0 },
{ 0, 8, 60, 7, 54, 6, 0 },
{ 0, 2, 34, 7, 73, 18, 1 },
{ 0, 13, 70, 7, 42, 3, 0 },
{ 0, 4, 46, 7, 67, 11, 0 },
{ 1, 21, 75, 7, 30, 1, 0 },
{ 0, 7, 59, 7, 56, 6, 0 },
{ 0, 2, 32, 7, 73, 20, 1 },
{ 0, 12, 69, 7, 44, 3, 0 },
{ 0, 3, 44, 7, 69, 12, 0 },
{ 1, 20, 73, 7, 32, 2, 0 },
{ 0, 6, 56, 7, 59, 7, 0 },
{ 0, 1, 30, 7, 75, 21, 1 },
{ 0, 11, 67, 7, 46, 4, 0 },
{ 0, 3, 42, 7, 70, 13, 0 },
{ 1, 18, 73, 7, 34, 2, 0 },
{ 0, 6, 54, 7, 60, 8, 0 },
{ 0, 1, 28, 7, 75, 23, 1 },
{ 0, 10, 66, 7, 48, 4, 0 },
{ 0, 3, 40, 7, 70, 15, 0 },
{ 0, 17, 73, 7, 36, 2, 0 },
{ 0, 5, 52, 7, 63, 8, 0 },
{ 0, 1, 26, 7, 75, 25, 1 },
{ 0, 9, 64, 7, 50, 5, 0 },
{ 0, 2, 38, 7, 72, 16, 0 } } },
.ver_phase_arr = {
.even = { { 1, 26, 74, 7, 26, 1, 0 },
{ 0, 9, 63, 7, 51, 5, 0 },
{ 0, 2, 37, 7, 73, 16, 0 },
{ 0, 15, 72, 7, 39, 2, 0 },
{ 0, 4, 49, 7, 65, 10, 0 },
{ 1, 24, 75, 7, 27, 1, 0 },
{ 0, 8, 62, 7, 53, 5, 0 },
{ 0, 2, 35, 7, 72, 18, 1 },
{ 0, 14, 70, 7, 41, 3, 0 },
{ 0, 4, 47, 7, 66, 11, 0 },
{ 1, 22, 75, 7, 29, 1, 0 },
{ 0, 7, 60, 7, 55, 6, 0 },
{ 0, 2, 33, 7, 73, 19, 1 },
{ 0, 13, 69, 7, 43, 3, 0 },
{ 0, 3, 45, 7, 68, 12, 0 },
{ 1, 21, 74, 7, 31, 1, 0 },
{ 0, 7, 57, 7, 57, 7, 0 },
{ 0, 1, 31, 7, 74, 21, 1 },
{ 0, 12, 68, 7, 45, 3, 0 },
{ 0, 3, 43, 7, 69, 13, 0 },
{ 1, 19, 73, 7, 33, 2, 0 },
{ 0, 6, 55, 7, 60, 7, 0 },
{ 0, 1, 29, 7, 75, 22, 1 },
{ 0, 11, 66, 7, 47, 4, 0 },
{ 0, 3, 41, 7, 70, 14, 0 },
{ 1, 18, 72, 7, 35, 2, 0 },
{ 0, 5, 53, 7, 62, 8, 0 },
{ 0, 1, 27, 7, 75, 24, 1 },
{ 0, 10, 65, 7, 49, 4, 0 },
{ 0, 2, 39, 7, 72, 15, 0 },
{ 0, 16, 73, 7, 37, 2, 0 },
{ 0, 5, 51, 7, 63, 9, 0 } },
.odd = { { 0, 16, 72, 7, 38, 2, 0 },
{ 0, 5, 50, 7, 64, 9, 0 },
{ 1, 25, 75, 7, 26, 1, 0 },
{ 0, 8, 63, 7, 52, 5, 0 },
{ 0, 2, 36, 7, 73, 17, 0 },
{ 0, 15, 70, 7, 40, 3, 0 },
{ 0, 4, 48, 7, 66, 10, 0 },
{ 1, 23, 75, 7, 28, 1, 0 },
{ 0, 8, 60, 7, 54, 6, 0 },
{ 0, 2, 34, 7, 73, 18, 1 },
{ 0, 13, 70, 7, 42, 3, 0 },
{ 0, 4, 46, 7, 67, 11, 0 },
{ 1, 21, 75, 7, 30, 1, 0 },
{ 0, 7, 59, 7, 56, 6, 0 },
{ 0, 2, 32, 7, 73, 20, 1 },
{ 0, 12, 69, 7, 44, 3, 0 },
{ 0, 3, 44, 7, 69, 12, 0 },
{ 1, 20, 73, 7, 32, 2, 0 },
{ 0, 6, 56, 7, 59, 7, 0 },
{ 0, 1, 30, 7, 75, 21, 1 },
{ 0, 11, 67, 7, 46, 4, 0 },
{ 0, 3, 42, 7, 70, 13, 0 },
{ 1, 18, 73, 7, 34, 2, 0 },
{ 0, 6, 54, 7, 60, 8, 0 },
{ 0, 1, 28, 7, 75, 23, 1 },
{ 0, 10, 66, 7, 48, 4, 0 },
{ 0, 3, 40, 7, 70, 15, 0 },
{ 0, 17, 73, 7, 36, 2, 0 },
{ 0, 5, 52, 7, 63, 8, 0 },
{ 0, 1, 26, 7, 75, 25, 1 },
{ 0, 9, 64, 7, 50, 5, 0 },
{ 0, 2, 38, 7, 72, 16, 0 } } },
.ptrn_arr = { { 0x24921249, 0x92484924, 0x49212490, 0x24849242,
0x92124909, 0x48492424, 0x249092 } },
.sample_patrn_length = 218,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 78) = 0.290909 */
.hor_phase_arr = {
.even = { { 1, 26, 74, 7, 26, 1, 0 },
{ 0, 8, 61, 7, 53, 6, 0 },
{ 0, 2, 33, 7, 73, 19, 1 },
{ 0, 12, 67, 7, 45, 4, 0 },
{ 0, 3, 41, 7, 70, 14, 0 },
{ 0, 17, 72, 7, 37, 2, 0 },
{ 0, 5, 49, 7, 64, 10, 0 },
{ 1, 22, 75, 7, 29, 1, 0 },
{ 0, 7, 57, 7, 57, 7, 0 },
{ 0, 1, 29, 7, 75, 22, 1 },
{ 0, 10, 64, 7, 49, 5, 0 },
{ 0, 2, 37, 7, 72, 17, 0 },
{ 0, 14, 70, 7, 41, 3, 0 },
{ 0, 4, 45, 7, 67, 12, 0 },
{ 1, 19, 73, 7, 33, 2, 0 },
{ 0, 6, 53, 7, 61, 8, 0 } },
.odd = { { 0, 15, 71, 7, 39, 3, 0 },
{ 0, 4, 47, 7, 66, 11, 0 },
{ 1, 21, 73, 7, 31, 2, 0 },
{ 0, 6, 55, 7, 59, 8, 0 },
{ 0, 1, 28, 7, 74, 24, 1 },
{ 0, 9, 63, 7, 51, 5, 0 },
{ 0, 2, 35, 7, 72, 18, 1 },
{ 0, 13, 69, 7, 43, 3, 0 },
{ 0, 3, 43, 7, 69, 13, 0 },
{ 1, 18, 72, 7, 35, 2, 0 },
{ 0, 5, 51, 7, 63, 9, 0 },
{ 1, 24, 74, 7, 28, 1, 0 },
{ 0, 8, 59, 7, 55, 6, 0 },
{ 0, 2, 31, 7, 73, 21, 1 },
{ 0, 11, 66, 7, 47, 4, 0 },
{ 0, 3, 39, 7, 71, 15, 0 } } },
.ver_phase_arr = {
.even = { { 1, 26, 74, 7, 26, 1, 0 },
{ 0, 8, 61, 7, 53, 6, 0 },
{ 0, 2, 33, 7, 73, 19, 1 },
{ 0, 12, 67, 7, 45, 4, 0 },
{ 0, 3, 41, 7, 70, 14, 0 },
{ 0, 17, 72, 7, 37, 2, 0 },
{ 0, 5, 49, 7, 64, 10, 0 },
{ 1, 22, 75, 7, 29, 1, 0 },
{ 0, 7, 57, 7, 57, 7, 0 },
{ 0, 1, 29, 7, 75, 22, 1 },
{ 0, 10, 64, 7, 49, 5, 0 },
{ 0, 2, 37, 7, 72, 17, 0 },
{ 0, 14, 70, 7, 41, 3, 0 },
{ 0, 4, 45, 7, 67, 12, 0 },
{ 1, 19, 73, 7, 33, 2, 0 },
{ 0, 6, 53, 7, 61, 8, 0 } },
.odd = { { 0, 15, 71, 7, 39, 3, 0 },
{ 0, 4, 47, 7, 66, 11, 0 },
{ 1, 21, 73, 7, 31, 2, 0 },
{ 0, 6, 55, 7, 59, 8, 0 },
{ 0, 1, 28, 7, 74, 24, 1 },
{ 0, 9, 63, 7, 51, 5, 0 },
{ 0, 2, 35, 7, 72, 18, 1 },
{ 0, 13, 69, 7, 43, 3, 0 },
{ 0, 3, 43, 7, 69, 13, 0 },
{ 1, 18, 72, 7, 35, 2, 0 },
{ 0, 5, 51, 7, 63, 9, 0 },
{ 1, 24, 74, 7, 28, 1, 0 },
{ 0, 8, 59, 7, 55, 6, 0 },
{ 0, 2, 31, 7, 73, 21, 1 },
{ 0, 11, 66, 7, 47, 4, 0 },
{ 0, 3, 39, 7, 71, 15, 0 } } },
.ptrn_arr = { { 0x24921249, 0x12490924, 0x9248492, 0x249 } },
.sample_patrn_length = 110,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 79) = 0.288288 */
.hor_phase_arr = {
.even = { { 1, 26, 74, 7, 26, 1, 0 },
{ 0, 8, 59, 7, 55, 6, 0 },
{ 0, 1, 30, 7, 73, 23, 1 },
{ 0, 9, 63, 7, 51, 5, 0 },
{ 0, 2, 33, 7, 72, 20, 1 },
{ 0, 11, 66, 7, 47, 4, 0 },
{ 0, 2, 37, 7, 71, 17, 1 },
{ 0, 13, 69, 7, 43, 3, 0 },
{ 0, 3, 41, 7, 69, 15, 0 },
{ 0, 16, 70, 7, 39, 3, 0 },
{ 0, 4, 45, 7, 67, 12, 0 },
{ 1, 18, 72, 7, 35, 2, 0 },
{ 0, 5, 49, 7, 64, 10, 0 },
{ 1, 21, 73, 7, 31, 2, 0 },
{ 0, 6, 53, 7, 60, 9, 0 },
{ 1, 24, 74, 7, 28, 1, 0 },
{ 0, 7, 57, 7, 57, 7, 0 },
{ 0, 1, 28, 7, 74, 24, 1 },
{ 0, 9, 60, 7, 53, 6, 0 },
{ 0, 2, 31, 7, 73, 21, 1 },
{ 0, 10, 64, 7, 49, 5, 0 },
{ 0, 2, 35, 7, 72, 18, 1 },
{ 0, 12, 67, 7, 45, 4, 0 },
{ 0, 3, 39, 7, 70, 16, 0 },
{ 0, 15, 69, 7, 41, 3, 0 },
{ 0, 3, 43, 7, 69, 13, 0 },
{ 1, 17, 71, 7, 37, 2, 0 },
{ 0, 4, 47, 7, 66, 11, 0 },
{ 1, 20, 72, 7, 33, 2, 0 },
{ 0, 5, 51, 7, 63, 9, 0 },
{ 1, 23, 73, 7, 30, 1, 0 },
{ 0, 6, 55, 7, 59, 8, 0 } },
.odd = { { 0, 15, 70, 7, 40, 3, 0 },
{ 0, 4, 44, 7, 67, 13, 0 },
{ 1, 18, 71, 7, 36, 2, 0 },
{ 0, 4, 48, 7, 65, 11, 0 },
{ 1, 20, 73, 7, 32, 2, 0 },
{ 0, 6, 52, 7, 61, 9, 0 },
{ 1, 24, 73, 7, 29, 1, 0 },
{ 0, 7, 56, 7, 58, 7, 0 },
{ 0, 1, 27, 7, 74, 25, 1 },
{ 0, 8, 60, 7, 54, 6, 0 },
{ 0, 2, 30, 7, 73, 22, 1 },
{ 0, 10, 63, 7, 50, 5, 0 },
{ 0, 2, 34, 7, 72, 19, 1 },
{ 0, 12, 66, 7, 46, 4, 0 },
{ 0, 3, 38, 7, 71, 16, 0 },
{ 0, 14, 69, 7, 42, 3, 0 },
{ 0, 3, 42, 7, 69, 14, 0 },
{ 0, 16, 71, 7, 38, 3, 0 },
{ 0, 4, 46, 7, 66, 12, 0 },
{ 1, 19, 72, 7, 34, 2, 0 },
{ 0, 5, 50, 7, 63, 10, 0 },
{ 1, 22, 73, 7, 30, 2, 0 },
{ 0, 6, 54, 7, 60, 8, 0 },
{ 1, 25, 74, 7, 27, 1, 0 },
{ 0, 7, 58, 7, 56, 7, 0 },
{ 0, 1, 29, 7, 73, 24, 1 },
{ 0, 9, 61, 7, 52, 6, 0 },
{ 0, 2, 32, 7, 73, 20, 1 },
{ 0, 11, 65, 7, 48, 4, 0 },
{ 0, 2, 36, 7, 71, 18, 1 },
{ 0, 13, 67, 7, 44, 4, 0 },
{ 0, 3, 40, 7, 70, 15, 0 } } },
.ver_phase_arr = {
.even = { { 1, 26, 74, 7, 26, 1, 0 },
{ 0, 8, 59, 7, 55, 6, 0 },
{ 0, 1, 30, 7, 73, 23, 1 },
{ 0, 9, 63, 7, 51, 5, 0 },
{ 0, 2, 33, 7, 72, 20, 1 },
{ 0, 11, 66, 7, 47, 4, 0 },
{ 0, 2, 37, 7, 71, 17, 1 },
{ 0, 13, 69, 7, 43, 3, 0 },
{ 0, 3, 41, 7, 69, 15, 0 },
{ 0, 16, 70, 7, 39, 3, 0 },
{ 0, 4, 45, 7, 67, 12, 0 },
{ 1, 18, 72, 7, 35, 2, 0 },
{ 0, 5, 49, 7, 64, 10, 0 },
{ 1, 21, 73, 7, 31, 2, 0 },
{ 0, 6, 53, 7, 60, 9, 0 },
{ 1, 24, 74, 7, 28, 1, 0 },
{ 0, 7, 57, 7, 57, 7, 0 },
{ 0, 1, 28, 7, 74, 24, 1 },
{ 0, 9, 60, 7, 53, 6, 0 },
{ 0, 2, 31, 7, 73, 21, 1 },
{ 0, 10, 64, 7, 49, 5, 0 },
{ 0, 2, 35, 7, 72, 18, 1 },
{ 0, 12, 67, 7, 45, 4, 0 },
{ 0, 3, 39, 7, 70, 16, 0 },
{ 0, 15, 69, 7, 41, 3, 0 },
{ 0, 3, 43, 7, 69, 13, 0 },
{ 1, 17, 71, 7, 37, 2, 0 },
{ 0, 4, 47, 7, 66, 11, 0 },
{ 1, 20, 72, 7, 33, 2, 0 },
{ 0, 5, 51, 7, 63, 9, 0 },
{ 1, 23, 73, 7, 30, 1, 0 },
{ 0, 6, 55, 7, 59, 8, 0 } },
.odd = { { 0, 15, 70, 7, 40, 3, 0 },
{ 0, 4, 44, 7, 67, 13, 0 },
{ 1, 18, 71, 7, 36, 2, 0 },
{ 0, 4, 48, 7, 65, 11, 0 },
{ 1, 20, 73, 7, 32, 2, 0 },
{ 0, 6, 52, 7, 61, 9, 0 },
{ 1, 24, 73, 7, 29, 1, 0 },
{ 0, 7, 56, 7, 58, 7, 0 },
{ 0, 1, 27, 7, 74, 25, 1 },
{ 0, 8, 60, 7, 54, 6, 0 },
{ 0, 2, 30, 7, 73, 22, 1 },
{ 0, 10, 63, 7, 50, 5, 0 },
{ 0, 2, 34, 7, 72, 19, 1 },
{ 0, 12, 66, 7, 46, 4, 0 },
{ 0, 3, 38, 7, 71, 16, 0 },
{ 0, 14, 69, 7, 42, 3, 0 },
{ 0, 3, 42, 7, 69, 14, 0 },
{ 0, 16, 71, 7, 38, 3, 0 },
{ 0, 4, 46, 7, 66, 12, 0 },
{ 1, 19, 72, 7, 34, 2, 0 },
{ 0, 5, 50, 7, 63, 10, 0 },
{ 1, 22, 73, 7, 30, 2, 0 },
{ 0, 6, 54, 7, 60, 8, 0 },
{ 1, 25, 74, 7, 27, 1, 0 },
{ 0, 7, 58, 7, 56, 7, 0 },
{ 0, 1, 29, 7, 73, 24, 1 },
{ 0, 9, 61, 7, 52, 6, 0 },
{ 0, 2, 32, 7, 73, 20, 1 },
{ 0, 11, 65, 7, 48, 4, 0 },
{ 0, 2, 36, 7, 71, 18, 1 },
{ 0, 13, 67, 7, 44, 4, 0 },
{ 0, 3, 40, 7, 70, 15, 0 } } },
.ptrn_arr = { { 0x84921249, 0x42492124, 0x24249092, 0x92124909,
0x49212484, 0x24909248, 0x2490924 } },
.sample_patrn_length = 222,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 80) = 0.285714 */
.hor_phase_arr = {
.even = { { 1, 26, 74, 7, 26, 1, 0 },
{ 0, 7, 57, 7, 57, 7, 0 } },
.odd = { { 0, 15, 69, 7, 41, 3, 0 },
{ 0, 3, 41, 7, 69, 15, 0 } } },
.ver_phase_arr = {
.even = { { 1, 26, 74, 7, 26, 1, 0 },
{ 0, 7, 57, 7, 57, 7, 0 } },
.odd = { { 0, 15, 69, 7, 41, 3, 0 },
{ 0, 3, 41, 7, 69, 15, 0 } } },
.ptrn_arr = { { 0x249 } },
.sample_patrn_length = 14,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 81) = 0.283186 */
.hor_phase_arr = {
.even = { { 1, 27, 72, 7, 27, 1, 0 },
{ 0, 7, 54, 7, 59, 8, 0 },
{ 1, 23, 72, 7, 30, 2, 0 },
{ 0, 6, 51, 7, 61, 10, 0 },
{ 1, 20, 71, 7, 34, 2, 0 },
{ 0, 5, 47, 7, 64, 12, 0 },
{ 1, 18, 69, 7, 37, 3, 0 },
{ 0, 4, 43, 7, 67, 14, 0 },
{ 0, 15, 69, 7, 41, 3, 0 },
{ 0, 3, 39, 7, 69, 16, 1 },
{ 0, 13, 66, 7, 45, 4, 0 },
{ 0, 2, 35, 7, 71, 19, 1 },
{ 0, 11, 63, 7, 49, 5, 0 },
{ 0, 2, 32, 7, 71, 22, 1 },
{ 0, 9, 60, 7, 53, 6, 0 },
{ 0, 1, 28, 7, 73, 25, 1 },
{ 0, 8, 56, 7, 56, 8, 0 },
{ 1, 25, 73, 7, 28, 1, 0 },
{ 0, 6, 53, 7, 60, 9, 0 },
{ 1, 22, 71, 7, 32, 2, 0 },
{ 0, 5, 49, 7, 63, 11, 0 },
{ 1, 19, 71, 7, 35, 2, 0 },
{ 0, 4, 45, 7, 66, 13, 0 },
{ 1, 16, 69, 7, 39, 3, 0 },
{ 0, 3, 41, 7, 69, 15, 0 },
{ 0, 14, 67, 7, 43, 4, 0 },
{ 0, 3, 37, 7, 69, 18, 1 },
{ 0, 12, 64, 7, 47, 5, 0 },
{ 0, 2, 34, 7, 71, 20, 1 },
{ 0, 10, 61, 7, 51, 6, 0 },
{ 0, 2, 30, 7, 72, 23, 1 },
{ 0, 8, 59, 7, 54, 7, 0 } },
.odd = { { 0, 15, 67, 7, 42, 4, 0 },
{ 0, 3, 38, 7, 69, 17, 1 },
{ 0, 12, 66, 7, 46, 4, 0 },
{ 0, 2, 34, 7, 71, 20, 1 },
{ 0, 10, 63, 7, 50, 5, 0 },
{ 0, 2, 31, 7, 71, 23, 1 },
{ 0, 9, 58, 7, 54, 7, 0 },
{ 0, 1, 27, 7, 73, 26, 1 },
{ 0, 7, 55, 7, 58, 8, 0 },
{ 1, 24, 72, 7, 29, 2, 0 },
{ 0, 6, 52, 7, 60, 10, 0 },
{ 1, 21, 71, 7, 33, 2, 0 },
{ 0, 5, 48, 7, 64, 11, 0 },
{ 1, 18, 70, 7, 36, 3, 0 },
{ 0, 4, 44, 7, 67, 13, 0 },
{ 0, 16, 69, 7, 40, 3, 0 },
{ 0, 3, 40, 7, 69, 16, 0 },
{ 0, 13, 67, 7, 44, 4, 0 },
{ 0, 3, 36, 7, 70, 18, 1 },
{ 0, 11, 64, 7, 48, 5, 0 },
{ 0, 2, 33, 7, 71, 21, 1 },
{ 0, 10, 60, 7, 52, 6, 0 },
{ 0, 2, 29, 7, 72, 24, 1 },
{ 0, 8, 58, 7, 55, 7, 0 },
{ 1, 26, 73, 7, 27, 1, 0 },
{ 0, 7, 54, 7, 58, 9, 0 },
{ 1, 23, 71, 7, 31, 2, 0 },
{ 0, 5, 50, 7, 63, 10, 0 },
{ 1, 20, 71, 7, 34, 2, 0 },
{ 0, 4, 46, 7, 66, 12, 0 },
{ 1, 17, 69, 7, 38, 3, 0 },
{ 0, 4, 42, 7, 67, 15, 0 } } },
.ver_phase_arr = {
.even = { { 1, 27, 72, 7, 27, 1, 0 },
{ 0, 7, 54, 7, 59, 8, 0 },
{ 1, 23, 72, 7, 30, 2, 0 },
{ 0, 6, 51, 7, 61, 10, 0 },
{ 1, 20, 71, 7, 34, 2, 0 },
{ 0, 5, 47, 7, 64, 12, 0 },
{ 1, 18, 69, 7, 37, 3, 0 },
{ 0, 4, 43, 7, 67, 14, 0 },
{ 0, 15, 69, 7, 41, 3, 0 },
{ 0, 3, 39, 7, 69, 16, 1 },
{ 0, 13, 66, 7, 45, 4, 0 },
{ 0, 2, 35, 7, 71, 19, 1 },
{ 0, 11, 63, 7, 49, 5, 0 },
{ 0, 2, 32, 7, 71, 22, 1 },
{ 0, 9, 60, 7, 53, 6, 0 },
{ 0, 1, 28, 7, 73, 25, 1 },
{ 0, 8, 56, 7, 56, 8, 0 },
{ 1, 25, 73, 7, 28, 1, 0 },
{ 0, 6, 53, 7, 60, 9, 0 },
{ 1, 22, 71, 7, 32, 2, 0 },
{ 0, 5, 49, 7, 63, 11, 0 },
{ 1, 19, 71, 7, 35, 2, 0 },
{ 0, 4, 45, 7, 66, 13, 0 },
{ 1, 16, 69, 7, 39, 3, 0 },
{ 0, 3, 41, 7, 69, 15, 0 },
{ 0, 14, 67, 7, 43, 4, 0 },
{ 0, 3, 37, 7, 69, 18, 1 },
{ 0, 12, 64, 7, 47, 5, 0 },
{ 0, 2, 34, 7, 71, 20, 1 },
{ 0, 10, 61, 7, 51, 6, 0 },
{ 0, 2, 30, 7, 72, 23, 1 },
{ 0, 8, 59, 7, 54, 7, 0 } },
.odd = { { 0, 15, 67, 7, 42, 4, 0 },
{ 0, 3, 38, 7, 69, 17, 1 },
{ 0, 12, 66, 7, 46, 4, 0 },
{ 0, 2, 34, 7, 71, 20, 1 },
{ 0, 10, 63, 7, 50, 5, 0 },
{ 0, 2, 31, 7, 71, 23, 1 },
{ 0, 9, 58, 7, 54, 7, 0 },
{ 0, 1, 27, 7, 73, 26, 1 },
{ 0, 7, 55, 7, 58, 8, 0 },
{ 1, 24, 72, 7, 29, 2, 0 },
{ 0, 6, 52, 7, 60, 10, 0 },
{ 1, 21, 71, 7, 33, 2, 0 },
{ 0, 5, 48, 7, 64, 11, 0 },
{ 1, 18, 70, 7, 36, 3, 0 },
{ 0, 4, 44, 7, 67, 13, 0 },
{ 0, 16, 69, 7, 40, 3, 0 },
{ 0, 3, 40, 7, 69, 16, 0 },
{ 0, 13, 67, 7, 44, 4, 0 },
{ 0, 3, 36, 7, 70, 18, 1 },
{ 0, 11, 64, 7, 48, 5, 0 },
{ 0, 2, 33, 7, 71, 21, 1 },
{ 0, 10, 60, 7, 52, 6, 0 },
{ 0, 2, 29, 7, 72, 24, 1 },
{ 0, 8, 58, 7, 55, 7, 0 },
{ 1, 26, 73, 7, 27, 1, 0 },
{ 0, 7, 54, 7, 58, 9, 0 },
{ 1, 23, 71, 7, 31, 2, 0 },
{ 0, 5, 50, 7, 63, 10, 0 },
{ 1, 20, 71, 7, 34, 2, 0 },
{ 0, 4, 46, 7, 66, 12, 0 },
{ 1, 17, 69, 7, 38, 3, 0 },
{ 0, 4, 42, 7, 67, 15, 0 } } },
.ptrn_arr = { { 0x90924249, 0x49092424, 0x84921248, 0x49092124,
0x24909242, 0x48492124, 0x24849212 } },
.sample_patrn_length = 226,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 82) = 0.280702 */
.hor_phase_arr = {
.even = { { 1, 27, 72, 7, 27, 1, 0 },
{ 0, 6, 52, 7, 61, 9, 0 },
{ 1, 21, 70, 7, 34, 2, 0 },
{ 0, 4, 45, 7, 66, 13, 0 },
{ 0, 15, 68, 7, 41, 4, 0 },
{ 0, 3, 37, 7, 69, 18, 1 },
{ 0, 11, 63, 7, 49, 5, 0 },
{ 0, 2, 30, 7, 71, 24, 1 },
{ 0, 8, 56, 7, 56, 8, 0 },
{ 1, 24, 71, 7, 30, 2, 0 },
{ 0, 5, 49, 7, 63, 11, 0 },
{ 1, 18, 69, 7, 37, 3, 0 },
{ 0, 4, 41, 7, 68, 15, 0 },
{ 0, 13, 66, 7, 45, 4, 0 },
{ 0, 2, 34, 7, 70, 21, 1 },
{ 0, 9, 61, 7, 52, 6, 0 } },
.odd = { { 0, 14, 67, 7, 43, 4, 0 },
{ 0, 3, 36, 7, 69, 19, 1 },
{ 0, 10, 61, 7, 51, 6, 0 },
{ 0, 2, 28, 7, 72, 25, 1 },
{ 0, 7, 54, 7, 58, 9, 0 },
{ 1, 22, 71, 7, 32, 2, 0 },
{ 0, 5, 47, 7, 64, 12, 0 },
{ 1, 17, 68, 7, 39, 3, 0 },
{ 0, 3, 39, 7, 68, 17, 1 },
{ 0, 12, 64, 7, 47, 5, 0 },
{ 0, 2, 32, 7, 71, 22, 1 },
{ 0, 9, 58, 7, 54, 7, 0 },
{ 1, 25, 72, 7, 28, 2, 0 },
{ 0, 6, 51, 7, 61, 10, 0 },
{ 1, 19, 69, 7, 36, 3, 0 },
{ 0, 4, 43, 7, 67, 14, 0 } } },
.ver_phase_arr = {
.even = { { 1, 27, 72, 7, 27, 1, 0 },
{ 0, 6, 52, 7, 61, 9, 0 },
{ 1, 21, 70, 7, 34, 2, 0 },
{ 0, 4, 45, 7, 66, 13, 0 },
{ 0, 15, 68, 7, 41, 4, 0 },
{ 0, 3, 37, 7, 69, 18, 1 },
{ 0, 11, 63, 7, 49, 5, 0 },
{ 0, 2, 30, 7, 71, 24, 1 },
{ 0, 8, 56, 7, 56, 8, 0 },
{ 1, 24, 71, 7, 30, 2, 0 },
{ 0, 5, 49, 7, 63, 11, 0 },
{ 1, 18, 69, 7, 37, 3, 0 },
{ 0, 4, 41, 7, 68, 15, 0 },
{ 0, 13, 66, 7, 45, 4, 0 },
{ 0, 2, 34, 7, 70, 21, 1 },
{ 0, 9, 61, 7, 52, 6, 0 } },
.odd = { { 0, 14, 67, 7, 43, 4, 0 },
{ 0, 3, 36, 7, 69, 19, 1 },
{ 0, 10, 61, 7, 51, 6, 0 },
{ 0, 2, 28, 7, 72, 25, 1 },
{ 0, 7, 54, 7, 58, 9, 0 },
{ 1, 22, 71, 7, 32, 2, 0 },
{ 0, 5, 47, 7, 64, 12, 0 },
{ 1, 17, 68, 7, 39, 3, 0 },
{ 0, 3, 39, 7, 68, 17, 1 },
{ 0, 12, 64, 7, 47, 5, 0 },
{ 0, 2, 32, 7, 71, 22, 1 },
{ 0, 9, 58, 7, 54, 7, 0 },
{ 1, 25, 72, 7, 28, 2, 0 },
{ 0, 6, 51, 7, 61, 10, 0 },
{ 1, 19, 69, 7, 36, 3, 0 },
{ 0, 4, 43, 7, 67, 14, 0 } } },
.ptrn_arr = { { 0x90924249, 0x9212484, 0x92124249, 0x2484 } },
.sample_patrn_length = 114,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 83) = 0.278261 */
.hor_phase_arr = {
.even = { { 1, 27, 72, 7, 27, 1, 0 },
{ 0, 6, 51, 7, 61, 10, 0 },
{ 1, 18, 68, 7, 38, 3, 0 },
{ 0, 3, 39, 7, 68, 17, 1 },
{ 0, 11, 62, 7, 49, 6, 0 },
{ 0, 2, 29, 7, 71, 25, 1 },
{ 0, 7, 52, 7, 59, 10, 0 },
{ 1, 19, 69, 7, 36, 3, 0 },
{ 0, 4, 41, 7, 66, 16, 1 },
{ 0, 12, 64, 7, 47, 5, 0 },
{ 0, 2, 30, 7, 71, 24, 1 },
{ 0, 7, 54, 7, 58, 9, 0 },
{ 1, 21, 70, 7, 34, 2, 0 },
{ 0, 4, 43, 7, 66, 15, 0 },
{ 0, 13, 65, 7, 45, 5, 0 },
{ 0, 2, 32, 7, 71, 22, 1 },
{ 0, 8, 56, 7, 56, 8, 0 },
{ 1, 22, 71, 7, 32, 2, 0 },
{ 0, 5, 45, 7, 65, 13, 0 },
{ 0, 15, 66, 7, 43, 4, 0 },
{ 0, 2, 34, 7, 70, 21, 1 },
{ 0, 9, 58, 7, 54, 7, 0 },
{ 1, 24, 71, 7, 30, 2, 0 },
{ 0, 5, 47, 7, 64, 12, 0 },
{ 1, 16, 66, 7, 41, 4, 0 },
{ 0, 3, 36, 7, 69, 19, 1 },
{ 0, 10, 59, 7, 52, 7, 0 },
{ 1, 25, 71, 7, 29, 2, 0 },
{ 0, 6, 49, 7, 62, 11, 0 },
{ 1, 17, 68, 7, 39, 3, 0 },
{ 0, 3, 38, 7, 68, 18, 1 },
{ 0, 10, 61, 7, 51, 6, 0 } },
.odd = { { 0, 14, 66, 7, 44, 4, 0 },
{ 0, 2, 33, 7, 70, 22, 1 },
{ 0, 8, 57, 7, 55, 8, 0 },
{ 1, 23, 71, 7, 31, 2, 0 },
{ 0, 5, 46, 7, 64, 13, 0 },
{ 0, 15, 67, 7, 42, 4, 0 },
{ 0, 3, 35, 7, 69, 20, 1 },
{ 0, 9, 59, 7, 53, 7, 0 },
{ 1, 25, 71, 7, 29, 2, 0 },
{ 0, 5, 48, 7, 63, 12, 0 },
{ 1, 16, 68, 7, 40, 3, 0 },
{ 0, 3, 37, 7, 68, 19, 1 },
{ 0, 10, 61, 7, 51, 6, 0 },
{ 1, 26, 71, 7, 28, 2, 0 },
{ 0, 6, 50, 7, 61, 11, 0 },
{ 1, 18, 68, 7, 38, 3, 0 },
{ 0, 3, 38, 7, 68, 18, 1 },
{ 0, 11, 61, 7, 50, 6, 0 },
{ 0, 2, 28, 7, 71, 26, 1 },
{ 0, 6, 51, 7, 61, 10, 0 },
{ 1, 19, 68, 7, 37, 3, 0 },
{ 0, 3, 40, 7, 68, 16, 1 },
{ 0, 12, 63, 7, 48, 5, 0 },
{ 0, 2, 29, 7, 71, 25, 1 },
{ 0, 7, 53, 7, 59, 9, 0 },
{ 1, 20, 69, 7, 35, 3, 0 },
{ 0, 4, 42, 7, 67, 15, 0 },
{ 0, 13, 64, 7, 46, 5, 0 },
{ 0, 2, 31, 7, 71, 23, 1 },
{ 0, 8, 55, 7, 57, 8, 0 },
{ 1, 22, 70, 7, 33, 2, 0 },
{ 0, 4, 44, 7, 66, 14, 0 } } },
.ver_phase_arr = {
.even = { { 1, 27, 72, 7, 27, 1, 0 },
{ 0, 6, 51, 7, 61, 10, 0 },
{ 1, 18, 68, 7, 38, 3, 0 },
{ 0, 3, 39, 7, 68, 17, 1 },
{ 0, 11, 62, 7, 49, 6, 0 },
{ 0, 2, 29, 7, 71, 25, 1 },
{ 0, 7, 52, 7, 59, 10, 0 },
{ 1, 19, 69, 7, 36, 3, 0 },
{ 0, 4, 41, 7, 66, 16, 1 },
{ 0, 12, 64, 7, 47, 5, 0 },
{ 0, 2, 30, 7, 71, 24, 1 },
{ 0, 7, 54, 7, 58, 9, 0 },
{ 1, 21, 70, 7, 34, 2, 0 },
{ 0, 4, 43, 7, 66, 15, 0 },
{ 0, 13, 65, 7, 45, 5, 0 },
{ 0, 2, 32, 7, 71, 22, 1 },
{ 0, 8, 56, 7, 56, 8, 0 },
{ 1, 22, 71, 7, 32, 2, 0 },
{ 0, 5, 45, 7, 65, 13, 0 },
{ 0, 15, 66, 7, 43, 4, 0 },
{ 0, 2, 34, 7, 70, 21, 1 },
{ 0, 9, 58, 7, 54, 7, 0 },
{ 1, 24, 71, 7, 30, 2, 0 },
{ 0, 5, 47, 7, 64, 12, 0 },
{ 1, 16, 66, 7, 41, 4, 0 },
{ 0, 3, 36, 7, 69, 19, 1 },
{ 0, 10, 59, 7, 52, 7, 0 },
{ 1, 25, 71, 7, 29, 2, 0 },
{ 0, 6, 49, 7, 62, 11, 0 },
{ 1, 17, 68, 7, 39, 3, 0 },
{ 0, 3, 38, 7, 68, 18, 1 },
{ 0, 10, 61, 7, 51, 6, 0 } },
.odd = { { 0, 14, 66, 7, 44, 4, 0 },
{ 0, 2, 33, 7, 70, 22, 1 },
{ 0, 8, 57, 7, 55, 8, 0 },
{ 1, 23, 71, 7, 31, 2, 0 },
{ 0, 5, 46, 7, 64, 13, 0 },
{ 0, 15, 67, 7, 42, 4, 0 },
{ 0, 3, 35, 7, 69, 20, 1 },
{ 0, 9, 59, 7, 53, 7, 0 },
{ 1, 25, 71, 7, 29, 2, 0 },
{ 0, 5, 48, 7, 63, 12, 0 },
{ 1, 16, 68, 7, 40, 3, 0 },
{ 0, 3, 37, 7, 68, 19, 1 },
{ 0, 10, 61, 7, 51, 6, 0 },
{ 1, 26, 71, 7, 28, 2, 0 },
{ 0, 6, 50, 7, 61, 11, 0 },
{ 1, 18, 68, 7, 38, 3, 0 },
{ 0, 3, 38, 7, 68, 18, 1 },
{ 0, 11, 61, 7, 50, 6, 0 },
{ 0, 2, 28, 7, 71, 26, 1 },
{ 0, 6, 51, 7, 61, 10, 0 },
{ 1, 19, 68, 7, 37, 3, 0 },
{ 0, 3, 40, 7, 68, 16, 1 },
{ 0, 12, 63, 7, 48, 5, 0 },
{ 0, 2, 29, 7, 71, 25, 1 },
{ 0, 7, 53, 7, 59, 9, 0 },
{ 1, 20, 69, 7, 35, 3, 0 },
{ 0, 4, 42, 7, 67, 15, 0 },
{ 0, 13, 64, 7, 46, 5, 0 },
{ 0, 2, 31, 7, 71, 23, 1 },
{ 0, 8, 55, 7, 57, 8, 0 },
{ 1, 22, 70, 7, 33, 2, 0 },
{ 0, 4, 44, 7, 66, 14, 0 } } },
.ptrn_arr = { { 0x92124249, 0x21242484, 0x12424849, 0x24248492,
0x42484909, 0x24849092, 0x48490924, 0x2 } },
.sample_patrn_length = 230,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 84) = 0.275862 */
.hor_phase_arr = {
.even = { { 2, 27, 70, 7, 27, 2, 0 },
{ 0, 6, 49, 7, 61, 12, 0 },
{ 1, 16, 66, 7, 41, 4, 0 },
{ 0, 2, 34, 7, 70, 21, 1 },
{ 0, 8, 56, 7, 56, 8, 0 },
{ 1, 21, 70, 7, 34, 2, 0 },
{ 0, 4, 41, 7, 66, 16, 1 },
{ 0, 12, 61, 7, 49, 6, 0 } },
.odd = { { 0, 14, 64, 7, 45, 5, 0 },
{ 0, 2, 31, 7, 70, 24, 1 },
{ 0, 7, 52, 7, 59, 10, 0 },
{ 1, 18, 68, 7, 38, 3, 0 },
{ 0, 3, 38, 7, 68, 18, 1 },
{ 0, 10, 59, 7, 52, 7, 0 },
{ 1, 24, 70, 7, 31, 2, 0 },
{ 0, 5, 45, 7, 64, 14, 0 } } },
.ver_phase_arr = {
.even = { { 2, 27, 70, 7, 27, 2, 0 },
{ 0, 6, 49, 7, 61, 12, 0 },
{ 1, 16, 66, 7, 41, 4, 0 },
{ 0, 2, 34, 7, 70, 21, 1 },
{ 0, 8, 56, 7, 56, 8, 0 },
{ 1, 21, 70, 7, 34, 2, 0 },
{ 0, 4, 41, 7, 66, 16, 1 },
{ 0, 12, 61, 7, 49, 6, 0 } },
.odd = { { 0, 14, 64, 7, 45, 5, 0 },
{ 0, 2, 31, 7, 70, 24, 1 },
{ 0, 7, 52, 7, 59, 10, 0 },
{ 1, 18, 68, 7, 38, 3, 0 },
{ 0, 3, 38, 7, 68, 18, 1 },
{ 0, 10, 59, 7, 52, 7, 0 },
{ 1, 24, 70, 7, 31, 2, 0 },
{ 0, 5, 45, 7, 64, 14, 0 } } },
.ptrn_arr = { { 0x92124249, 0x248490 } },
.sample_patrn_length = 58,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 85) = 0.273504 */
.hor_phase_arr = {
.even = { { 2, 27, 70, 7, 27, 2, 0 },
{ 0, 5, 47, 7, 63, 13, 0 },
{ 0, 14, 64, 7, 45, 5, 0 },
{ 0, 2, 29, 7, 70, 26, 1 },
{ 0, 6, 48, 7, 62, 12, 0 },
{ 1, 15, 65, 7, 43, 4, 0 },
{ 0, 2, 31, 7, 70, 24, 1 },
{ 0, 6, 50, 7, 61, 11, 0 },
{ 1, 16, 66, 7, 41, 4, 0 },
{ 0, 2, 32, 7, 70, 23, 1 },
{ 0, 7, 52, 7, 59, 10, 0 },
{ 1, 17, 67, 7, 39, 4, 0 },
{ 0, 3, 34, 7, 69, 21, 1 },
{ 0, 8, 54, 7, 57, 9, 0 },
{ 1, 19, 67, 7, 38, 3, 0 },
{ 0, 3, 36, 7, 68, 20, 1 },
{ 0, 9, 55, 7, 55, 9, 0 },
{ 1, 20, 68, 7, 36, 3, 0 },
{ 0, 3, 38, 7, 67, 19, 1 },
{ 0, 9, 57, 7, 54, 8, 0 },
{ 1, 21, 69, 7, 34, 3, 0 },
{ 0, 4, 39, 7, 67, 17, 1 },
{ 0, 10, 59, 7, 52, 7, 0 },
{ 1, 23, 70, 7, 32, 2, 0 },
{ 0, 4, 41, 7, 66, 16, 1 },
{ 0, 11, 61, 7, 50, 6, 0 },
{ 1, 24, 70, 7, 31, 2, 0 },
{ 0, 4, 43, 7, 65, 15, 1 },
{ 0, 12, 62, 7, 48, 6, 0 },
{ 1, 26, 70, 7, 29, 2, 0 },
{ 0, 5, 45, 7, 64, 14, 0 },
{ 0, 13, 63, 7, 47, 5, 0 } },
.odd = { { 0, 13, 64, 7, 46, 5, 0 },
{ 0, 2, 28, 7, 69, 27, 2 },
{ 0, 6, 48, 7, 62, 12, 0 },
{ 1, 14, 64, 7, 44, 5, 0 },
{ 0, 2, 30, 7, 70, 25, 1 },
{ 0, 6, 49, 7, 62, 11, 0 },
{ 1, 16, 65, 7, 42, 4, 0 },
{ 0, 2, 32, 7, 69, 24, 1 },
{ 0, 7, 51, 7, 59, 11, 0 },
{ 1, 17, 66, 7, 40, 4, 0 },
{ 0, 2, 33, 7, 70, 22, 1 },
{ 0, 7, 53, 7, 58, 10, 0 },
{ 1, 18, 67, 7, 39, 3, 0 },
{ 0, 3, 35, 7, 68, 21, 1 },
{ 0, 8, 54, 7, 57, 9, 0 },
{ 1, 19, 68, 7, 37, 3, 0 },
{ 0, 3, 37, 7, 68, 19, 1 },
{ 0, 9, 57, 7, 54, 8, 0 },
{ 1, 21, 68, 7, 35, 3, 0 },
{ 0, 3, 39, 7, 67, 18, 1 },
{ 0, 10, 58, 7, 53, 7, 0 },
{ 1, 22, 70, 7, 33, 2, 0 },
{ 0, 4, 40, 7, 66, 17, 1 },
{ 0, 11, 59, 7, 51, 7, 0 },
{ 1, 24, 69, 7, 32, 2, 0 },
{ 0, 4, 42, 7, 65, 16, 1 },
{ 0, 11, 62, 7, 49, 6, 0 },
{ 1, 25, 70, 7, 30, 2, 0 },
{ 0, 5, 44, 7, 64, 14, 1 },
{ 0, 12, 62, 7, 48, 6, 0 },
{ 2, 27, 69, 7, 28, 2, 0 },
{ 0, 5, 46, 7, 64, 13, 0 } } },
.ver_phase_arr = {
.even = { { 2, 27, 70, 7, 27, 2, 0 },
{ 0, 5, 47, 7, 63, 13, 0 },
{ 0, 14, 64, 7, 45, 5, 0 },
{ 0, 2, 29, 7, 70, 26, 1 },
{ 0, 6, 48, 7, 62, 12, 0 },
{ 1, 15, 65, 7, 43, 4, 0 },
{ 0, 2, 31, 7, 70, 24, 1 },
{ 0, 6, 50, 7, 61, 11, 0 },
{ 1, 16, 66, 7, 41, 4, 0 },
{ 0, 2, 32, 7, 70, 23, 1 },
{ 0, 7, 52, 7, 59, 10, 0 },
{ 1, 17, 67, 7, 39, 4, 0 },
{ 0, 3, 34, 7, 69, 21, 1 },
{ 0, 8, 54, 7, 57, 9, 0 },
{ 1, 19, 67, 7, 38, 3, 0 },
{ 0, 3, 36, 7, 68, 20, 1 },
{ 0, 9, 55, 7, 55, 9, 0 },
{ 1, 20, 68, 7, 36, 3, 0 },
{ 0, 3, 38, 7, 67, 19, 1 },
{ 0, 9, 57, 7, 54, 8, 0 },
{ 1, 21, 69, 7, 34, 3, 0 },
{ 0, 4, 39, 7, 67, 17, 1 },
{ 0, 10, 59, 7, 52, 7, 0 },
{ 1, 23, 70, 7, 32, 2, 0 },
{ 0, 4, 41, 7, 66, 16, 1 },
{ 0, 11, 61, 7, 50, 6, 0 },
{ 1, 24, 70, 7, 31, 2, 0 },
{ 0, 4, 43, 7, 65, 15, 1 },
{ 0, 12, 62, 7, 48, 6, 0 },
{ 1, 26, 70, 7, 29, 2, 0 },
{ 0, 5, 45, 7, 64, 14, 0 },
{ 0, 13, 63, 7, 47, 5, 0 } },
.odd = { { 0, 13, 64, 7, 46, 5, 0 },
{ 0, 2, 28, 7, 69, 27, 2 },
{ 0, 6, 48, 7, 62, 12, 0 },
{ 1, 14, 64, 7, 44, 5, 0 },
{ 0, 2, 30, 7, 70, 25, 1 },
{ 0, 6, 49, 7, 62, 11, 0 },
{ 1, 16, 65, 7, 42, 4, 0 },
{ 0, 2, 32, 7, 69, 24, 1 },
{ 0, 7, 51, 7, 59, 11, 0 },
{ 1, 17, 66, 7, 40, 4, 0 },
{ 0, 2, 33, 7, 70, 22, 1 },
{ 0, 7, 53, 7, 58, 10, 0 },
{ 1, 18, 67, 7, 39, 3, 0 },
{ 0, 3, 35, 7, 68, 21, 1 },
{ 0, 8, 54, 7, 57, 9, 0 },
{ 1, 19, 68, 7, 37, 3, 0 },
{ 0, 3, 37, 7, 68, 19, 1 },
{ 0, 9, 57, 7, 54, 8, 0 },
{ 1, 21, 68, 7, 35, 3, 0 },
{ 0, 3, 39, 7, 67, 18, 1 },
{ 0, 10, 58, 7, 53, 7, 0 },
{ 1, 22, 70, 7, 33, 2, 0 },
{ 0, 4, 40, 7, 66, 17, 1 },
{ 0, 11, 59, 7, 51, 7, 0 },
{ 1, 24, 69, 7, 32, 2, 0 },
{ 0, 4, 42, 7, 65, 16, 1 },
{ 0, 11, 62, 7, 49, 6, 0 },
{ 1, 25, 70, 7, 30, 2, 0 },
{ 0, 5, 44, 7, 64, 14, 1 },
{ 0, 12, 62, 7, 48, 6, 0 },
{ 2, 27, 69, 7, 28, 2, 0 },
{ 0, 5, 46, 7, 64, 13, 0 } } },
.ptrn_arr = { { 0x92124249, 0x24248490, 0x48490921, 0x90921242,
0x21242484, 0x42484909, 0x84909212, 0x24 } },
.sample_patrn_length = 234,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 86) = 0.271186 */
.hor_phase_arr = {
.even = { { 2, 28, 68, 7, 28, 2, 0 },
{ 0, 5, 45, 7, 63, 14, 1 },
{ 0, 12, 62, 7, 48, 6, 0 },
{ 1, 25, 69, 7, 31, 2, 0 },
{ 0, 4, 41, 7, 66, 16, 1 },
{ 0, 10, 59, 7, 52, 7, 0 },
{ 1, 22, 68, 7, 34, 3, 0 },
{ 0, 3, 38, 7, 67, 19, 1 },
{ 0, 9, 55, 7, 55, 9, 0 },
{ 1, 19, 67, 7, 38, 3, 0 },
{ 0, 3, 34, 7, 68, 22, 1 },
{ 0, 7, 52, 7, 59, 10, 0 },
{ 1, 16, 66, 7, 41, 4, 0 },
{ 0, 2, 31, 7, 69, 25, 1 },
{ 0, 6, 48, 7, 62, 12, 0 },
{ 1, 14, 63, 7, 45, 5, 0 } },
.odd = { { 0, 13, 62, 7, 47, 6, 0 },
{ 2, 26, 69, 7, 29, 2, 0 },
{ 0, 5, 43, 7, 64, 15, 1 },
{ 0, 11, 60, 7, 50, 7, 0 },
{ 1, 23, 69, 7, 33, 2, 0 },
{ 0, 4, 40, 7, 65, 18, 1 },
{ 0, 10, 57, 7, 53, 8, 0 },
{ 1, 20, 68, 7, 36, 3, 0 },
{ 0, 3, 36, 7, 68, 20, 1 },
{ 0, 8, 53, 7, 57, 10, 0 },
{ 1, 18, 65, 7, 40, 4, 0 },
{ 0, 2, 33, 7, 69, 23, 1 },
{ 0, 7, 50, 7, 60, 11, 0 },
{ 1, 15, 64, 7, 43, 5, 0 },
{ 0, 2, 29, 7, 69, 26, 2 },
{ 0, 6, 47, 7, 62, 13, 0 } } },
.ver_phase_arr = {
.even = { { 2, 28, 68, 7, 28, 2, 0 },
{ 0, 5, 45, 7, 63, 14, 1 },
{ 0, 12, 62, 7, 48, 6, 0 },
{ 1, 25, 69, 7, 31, 2, 0 },
{ 0, 4, 41, 7, 66, 16, 1 },
{ 0, 10, 59, 7, 52, 7, 0 },
{ 1, 22, 68, 7, 34, 3, 0 },
{ 0, 3, 38, 7, 67, 19, 1 },
{ 0, 9, 55, 7, 55, 9, 0 },
{ 1, 19, 67, 7, 38, 3, 0 },
{ 0, 3, 34, 7, 68, 22, 1 },
{ 0, 7, 52, 7, 59, 10, 0 },
{ 1, 16, 66, 7, 41, 4, 0 },
{ 0, 2, 31, 7, 69, 25, 1 },
{ 0, 6, 48, 7, 62, 12, 0 },
{ 1, 14, 63, 7, 45, 5, 0 } },
.odd = { { 0, 13, 62, 7, 47, 6, 0 },
{ 2, 26, 69, 7, 29, 2, 0 },
{ 0, 5, 43, 7, 64, 15, 1 },
{ 0, 11, 60, 7, 50, 7, 0 },
{ 1, 23, 69, 7, 33, 2, 0 },
{ 0, 4, 40, 7, 65, 18, 1 },
{ 0, 10, 57, 7, 53, 8, 0 },
{ 1, 20, 68, 7, 36, 3, 0 },
{ 0, 3, 36, 7, 68, 20, 1 },
{ 0, 8, 53, 7, 57, 10, 0 },
{ 1, 18, 65, 7, 40, 4, 0 },
{ 0, 2, 33, 7, 69, 23, 1 },
{ 0, 7, 50, 7, 60, 11, 0 },
{ 1, 15, 64, 7, 43, 5, 0 },
{ 0, 2, 29, 7, 69, 26, 2 },
{ 0, 6, 47, 7, 62, 13, 0 } } },
.ptrn_arr = { { 0x12424849, 0x24849092, 0x49092124, 0x24248 } },
.sample_patrn_length = 118,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 87) = 0.268908 */
.hor_phase_arr = {
.even = { { 2, 28, 68, 7, 28, 2, 0 },
{ 0, 5, 43, 7, 63, 16, 1 },
{ 0, 11, 57, 7, 52, 8, 0 },
{ 1, 21, 67, 7, 36, 3, 0 },
{ 0, 3, 34, 7, 68, 22, 1 },
{ 0, 7, 50, 7, 60, 11, 0 },
{ 1, 14, 63, 7, 45, 5, 0 },
{ 2, 26, 69, 7, 29, 2, 0 },
{ 0, 4, 41, 7, 65, 17, 1 },
{ 0, 10, 57, 7, 53, 8, 0 },
{ 1, 19, 66, 7, 38, 4, 0 },
{ 0, 3, 33, 7, 68, 23, 1 },
{ 0, 6, 48, 7, 62, 12, 0 },
{ 0, 13, 62, 7, 47, 6, 0 },
{ 1, 25, 69, 7, 31, 2, 0 },
{ 0, 4, 40, 7, 65, 18, 1 },
{ 0, 9, 55, 7, 55, 9, 0 },
{ 1, 18, 65, 7, 40, 4, 0 },
{ 0, 2, 31, 7, 69, 25, 1 },
{ 0, 6, 47, 7, 62, 13, 0 },
{ 0, 12, 62, 7, 48, 6, 0 },
{ 1, 23, 68, 7, 33, 3, 0 },
{ 0, 4, 38, 7, 66, 19, 1 },
{ 0, 8, 53, 7, 57, 10, 0 },
{ 1, 17, 65, 7, 41, 4, 0 },
{ 0, 2, 29, 7, 69, 26, 2 },
{ 0, 5, 45, 7, 63, 14, 1 },
{ 0, 11, 60, 7, 50, 7, 0 },
{ 1, 22, 68, 7, 34, 3, 0 },
{ 0, 3, 36, 7, 67, 21, 1 },
{ 0, 8, 52, 7, 57, 11, 0 },
{ 1, 16, 63, 7, 43, 5, 0 } },
.odd = { { 0, 13, 62, 7, 47, 6, 0 },
{ 1, 24, 69, 7, 32, 2, 0 },
{ 0, 4, 39, 7, 65, 19, 1 },
{ 0, 9, 54, 7, 56, 9, 0 },
{ 1, 17, 66, 7, 40, 4, 0 },
{ 0, 2, 30, 7, 69, 25, 2 },
{ 0, 5, 46, 7, 62, 14, 1 },
{ 0, 12, 60, 7, 49, 7, 0 },
{ 1, 23, 67, 7, 34, 3, 0 },
{ 0, 3, 37, 7, 67, 20, 1 },
{ 0, 8, 52, 7, 58, 10, 0 },
{ 1, 16, 64, 7, 42, 5, 0 },
{ 0, 2, 29, 7, 68, 27, 2 },
{ 0, 5, 44, 7, 63, 15, 1 },
{ 0, 11, 59, 7, 51, 7, 0 },
{ 1, 21, 68, 7, 35, 3, 0 },
{ 0, 3, 35, 7, 68, 21, 1 },
{ 0, 7, 51, 7, 59, 11, 0 },
{ 1, 15, 63, 7, 44, 5, 0 },
{ 2, 27, 68, 7, 29, 2, 0 },
{ 0, 5, 42, 7, 64, 16, 1 },
{ 0, 10, 58, 7, 52, 8, 0 },
{ 1, 20, 67, 7, 37, 3, 0 },
{ 0, 3, 34, 7, 67, 23, 1 },
{ 0, 7, 49, 7, 60, 12, 0 },
{ 1, 14, 62, 7, 46, 5, 0 },
{ 2, 25, 69, 7, 30, 2, 0 },
{ 0, 4, 40, 7, 66, 17, 1 },
{ 0, 9, 56, 7, 54, 9, 0 },
{ 1, 19, 65, 7, 39, 4, 0 },
{ 0, 2, 32, 7, 69, 24, 1 },
{ 0, 6, 47, 7, 62, 13, 0 } } },
.ver_phase_arr = {
.even = { { 2, 28, 68, 7, 28, 2, 0 },
{ 0, 5, 43, 7, 63, 16, 1 },
{ 0, 11, 57, 7, 52, 8, 0 },
{ 1, 21, 67, 7, 36, 3, 0 },
{ 0, 3, 34, 7, 68, 22, 1 },
{ 0, 7, 50, 7, 60, 11, 0 },
{ 1, 14, 63, 7, 45, 5, 0 },
{ 2, 26, 69, 7, 29, 2, 0 },
{ 0, 4, 41, 7, 65, 17, 1 },
{ 0, 10, 57, 7, 53, 8, 0 },
{ 1, 19, 66, 7, 38, 4, 0 },
{ 0, 3, 33, 7, 68, 23, 1 },
{ 0, 6, 48, 7, 62, 12, 0 },
{ 0, 13, 62, 7, 47, 6, 0 },
{ 1, 25, 69, 7, 31, 2, 0 },
{ 0, 4, 40, 7, 65, 18, 1 },
{ 0, 9, 55, 7, 55, 9, 0 },
{ 1, 18, 65, 7, 40, 4, 0 },
{ 0, 2, 31, 7, 69, 25, 1 },
{ 0, 6, 47, 7, 62, 13, 0 },
{ 0, 12, 62, 7, 48, 6, 0 },
{ 1, 23, 68, 7, 33, 3, 0 },
{ 0, 4, 38, 7, 66, 19, 1 },
{ 0, 8, 53, 7, 57, 10, 0 },
{ 1, 17, 65, 7, 41, 4, 0 },
{ 0, 2, 29, 7, 69, 26, 2 },
{ 0, 5, 45, 7, 63, 14, 1 },
{ 0, 11, 60, 7, 50, 7, 0 },
{ 1, 22, 68, 7, 34, 3, 0 },
{ 0, 3, 36, 7, 67, 21, 1 },
{ 0, 8, 52, 7, 57, 11, 0 },
{ 1, 16, 63, 7, 43, 5, 0 } },
.odd = { { 0, 13, 62, 7, 47, 6, 0 },
{ 1, 24, 69, 7, 32, 2, 0 },
{ 0, 4, 39, 7, 65, 19, 1 },
{ 0, 9, 54, 7, 56, 9, 0 },
{ 1, 17, 66, 7, 40, 4, 0 },
{ 0, 2, 30, 7, 69, 25, 2 },
{ 0, 5, 46, 7, 62, 14, 1 },
{ 0, 12, 60, 7, 49, 7, 0 },
{ 1, 23, 67, 7, 34, 3, 0 },
{ 0, 3, 37, 7, 67, 20, 1 },
{ 0, 8, 52, 7, 58, 10, 0 },
{ 1, 16, 64, 7, 42, 5, 0 },
{ 0, 2, 29, 7, 68, 27, 2 },
{ 0, 5, 44, 7, 63, 15, 1 },
{ 0, 11, 59, 7, 51, 7, 0 },
{ 1, 21, 68, 7, 35, 3, 0 },
{ 0, 3, 35, 7, 68, 21, 1 },
{ 0, 7, 51, 7, 59, 11, 0 },
{ 1, 15, 63, 7, 44, 5, 0 },
{ 2, 27, 68, 7, 29, 2, 0 },
{ 0, 5, 42, 7, 64, 16, 1 },
{ 0, 10, 58, 7, 52, 8, 0 },
{ 1, 20, 67, 7, 37, 3, 0 },
{ 0, 3, 34, 7, 67, 23, 1 },
{ 0, 7, 49, 7, 60, 12, 0 },
{ 1, 14, 62, 7, 46, 5, 0 },
{ 2, 25, 69, 7, 30, 2, 0 },
{ 0, 4, 40, 7, 66, 17, 1 },
{ 0, 9, 56, 7, 54, 9, 0 },
{ 1, 19, 65, 7, 39, 4, 0 },
{ 0, 2, 32, 7, 69, 24, 1 },
{ 0, 6, 47, 7, 62, 13, 0 } } },
.ptrn_arr = { { 0x12424849, 0x84909092, 0x9212424, 0x42484909,
0x90921212, 0x21242484, 0x48490921, 0x242 } },
.sample_patrn_length = 238,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 88) = 0.266667 */
.hor_phase_arr = {
.even = { { 2, 28, 68, 7, 28, 2, 0 },
{ 0, 4, 41, 7, 65, 17, 1 },
{ 0, 9, 55, 7, 55, 9, 0 },
{ 1, 17, 65, 7, 41, 4, 0 } },
.odd = { { 0, 13, 60, 7, 48, 7, 0 },
{ 1, 22, 68, 7, 34, 3, 0 },
{ 0, 3, 34, 7, 68, 22, 1 },
{ 0, 7, 48, 7, 60, 13, 0 } } },
.ver_phase_arr = {
.even = { { 2, 28, 68, 7, 28, 2, 0 },
{ 0, 4, 41, 7, 65, 17, 1 },
{ 0, 9, 55, 7, 55, 9, 0 },
{ 1, 17, 65, 7, 41, 4, 0 } },
.odd = { { 0, 13, 60, 7, 48, 7, 0 },
{ 1, 22, 68, 7, 34, 3, 0 },
{ 0, 3, 34, 7, 68, 22, 1 },
{ 0, 7, 48, 7, 60, 13, 0 } } },
.ptrn_arr = { { 0x2424849 } },
.sample_patrn_length = 30,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 89) = 0.264463 */
.hor_phase_arr = {
.even = { { 2, 28, 68, 7, 28, 2, 0 },
{ 0, 4, 40, 7, 65, 18, 1 },
{ 0, 8, 51, 7, 58, 11, 0 },
{ 1, 14, 61, 7, 46, 6, 0 },
{ 1, 22, 67, 7, 35, 3, 0 },
{ 0, 3, 33, 7, 67, 24, 1 },
{ 0, 6, 45, 7, 61, 15, 1 },
{ 0, 10, 56, 7, 53, 9, 0 },
{ 1, 17, 64, 7, 41, 5, 0 },
{ 2, 27, 67, 7, 30, 2, 0 },
{ 0, 4, 38, 7, 65, 20, 1 },
{ 0, 7, 50, 7, 59, 12, 0 },
{ 0, 13, 60, 7, 48, 7, 0 },
{ 1, 21, 67, 7, 36, 3, 0 },
{ 0, 3, 31, 7, 67, 25, 2 },
{ 0, 5, 43, 7, 63, 16, 1 },
{ 0, 9, 56, 7, 54, 9, 0 },
{ 1, 16, 63, 7, 43, 5, 0 },
{ 2, 25, 67, 7, 31, 3, 0 },
{ 0, 3, 36, 7, 67, 21, 1 },
{ 0, 7, 48, 7, 60, 13, 0 },
{ 0, 12, 59, 7, 50, 7, 0 },
{ 1, 20, 65, 7, 38, 4, 0 },
{ 0, 2, 30, 7, 67, 27, 2 },
{ 0, 5, 41, 7, 64, 17, 1 },
{ 0, 9, 53, 7, 56, 10, 0 },
{ 1, 15, 61, 7, 45, 6, 0 },
{ 1, 24, 67, 7, 33, 3, 0 },
{ 0, 3, 35, 7, 67, 22, 1 },
{ 0, 6, 46, 7, 61, 14, 1 },
{ 0, 11, 58, 7, 51, 8, 0 },
{ 1, 18, 65, 7, 40, 4, 0 } },
.odd = { { 0, 12, 60, 7, 49, 7, 0 },
{ 1, 20, 66, 7, 37, 4, 0 },
{ 0, 2, 31, 7, 67, 26, 2 },
{ 0, 5, 42, 7, 63, 17, 1 },
{ 0, 9, 54, 7, 55, 10, 0 },
{ 1, 16, 62, 7, 44, 5, 0 },
{ 2, 24, 67, 7, 32, 3, 0 },
{ 0, 3, 35, 7, 67, 22, 1 },
{ 0, 6, 47, 7, 61, 13, 1 },
{ 0, 12, 58, 7, 50, 8, 0 },
{ 1, 19, 65, 7, 39, 4, 0 },
{ 0, 2, 29, 7, 68, 27, 2 },
{ 0, 4, 40, 7, 65, 18, 1 },
{ 0, 8, 52, 7, 57, 11, 0 },
{ 1, 14, 61, 7, 46, 6, 0 },
{ 1, 23, 67, 7, 34, 3, 0 },
{ 0, 3, 34, 7, 67, 23, 1 },
{ 0, 6, 46, 7, 61, 14, 1 },
{ 0, 11, 57, 7, 52, 8, 0 },
{ 1, 18, 65, 7, 40, 4, 0 },
{ 2, 27, 68, 7, 29, 2, 0 },
{ 0, 4, 39, 7, 65, 19, 1 },
{ 0, 8, 50, 7, 58, 12, 0 },
{ 1, 13, 61, 7, 47, 6, 0 },
{ 1, 22, 67, 7, 35, 3, 0 },
{ 0, 3, 32, 7, 67, 24, 2 },
{ 0, 5, 44, 7, 62, 16, 1 },
{ 0, 10, 55, 7, 54, 9, 0 },
{ 1, 17, 63, 7, 42, 5, 0 },
{ 2, 26, 67, 7, 31, 2, 0 },
{ 0, 4, 37, 7, 66, 20, 1 },
{ 0, 7, 49, 7, 60, 12, 0 } } },
.ver_phase_arr = {
.even = { { 2, 28, 68, 7, 28, 2, 0 },
{ 0, 4, 40, 7, 65, 18, 1 },
{ 0, 8, 51, 7, 58, 11, 0 },
{ 1, 14, 61, 7, 46, 6, 0 },
{ 1, 22, 67, 7, 35, 3, 0 },
{ 0, 3, 33, 7, 67, 24, 1 },
{ 0, 6, 45, 7, 61, 15, 1 },
{ 0, 10, 56, 7, 53, 9, 0 },
{ 1, 17, 64, 7, 41, 5, 0 },
{ 2, 27, 67, 7, 30, 2, 0 },
{ 0, 4, 38, 7, 65, 20, 1 },
{ 0, 7, 50, 7, 59, 12, 0 },
{ 0, 13, 60, 7, 48, 7, 0 },
{ 1, 21, 67, 7, 36, 3, 0 },
{ 0, 3, 31, 7, 67, 25, 2 },
{ 0, 5, 43, 7, 63, 16, 1 },
{ 0, 9, 56, 7, 54, 9, 0 },
{ 1, 16, 63, 7, 43, 5, 0 },
{ 2, 25, 67, 7, 31, 3, 0 },
{ 0, 3, 36, 7, 67, 21, 1 },
{ 0, 7, 48, 7, 60, 13, 0 },
{ 0, 12, 59, 7, 50, 7, 0 },
{ 1, 20, 65, 7, 38, 4, 0 },
{ 0, 2, 30, 7, 67, 27, 2 },
{ 0, 5, 41, 7, 64, 17, 1 },
{ 0, 9, 53, 7, 56, 10, 0 },
{ 1, 15, 61, 7, 45, 6, 0 },
{ 1, 24, 67, 7, 33, 3, 0 },
{ 0, 3, 35, 7, 67, 22, 1 },
{ 0, 6, 46, 7, 61, 14, 1 },
{ 0, 11, 58, 7, 51, 8, 0 },
{ 1, 18, 65, 7, 40, 4, 0 } },
.odd = { { 0, 12, 60, 7, 49, 7, 0 },
{ 1, 20, 66, 7, 37, 4, 0 },
{ 0, 2, 31, 7, 67, 26, 2 },
{ 0, 5, 42, 7, 63, 17, 1 },
{ 0, 9, 54, 7, 55, 10, 0 },
{ 1, 16, 62, 7, 44, 5, 0 },
{ 2, 24, 67, 7, 32, 3, 0 },
{ 0, 3, 35, 7, 67, 22, 1 },
{ 0, 6, 47, 7, 61, 13, 1 },
{ 0, 12, 58, 7, 50, 8, 0 },
{ 1, 19, 65, 7, 39, 4, 0 },
{ 0, 2, 29, 7, 68, 27, 2 },
{ 0, 4, 40, 7, 65, 18, 1 },
{ 0, 8, 52, 7, 57, 11, 0 },
{ 1, 14, 61, 7, 46, 6, 0 },
{ 1, 23, 67, 7, 34, 3, 0 },
{ 0, 3, 34, 7, 67, 23, 1 },
{ 0, 6, 46, 7, 61, 14, 1 },
{ 0, 11, 57, 7, 52, 8, 0 },
{ 1, 18, 65, 7, 40, 4, 0 },
{ 2, 27, 68, 7, 29, 2, 0 },
{ 0, 4, 39, 7, 65, 19, 1 },
{ 0, 8, 50, 7, 58, 12, 0 },
{ 1, 13, 61, 7, 47, 6, 0 },
{ 1, 22, 67, 7, 35, 3, 0 },
{ 0, 3, 32, 7, 67, 24, 2 },
{ 0, 5, 44, 7, 62, 16, 1 },
{ 0, 10, 55, 7, 54, 9, 0 },
{ 1, 17, 63, 7, 42, 5, 0 },
{ 2, 26, 67, 7, 31, 2, 0 },
{ 0, 4, 37, 7, 66, 20, 1 },
{ 0, 7, 49, 7, 60, 12, 0 } } },
.ptrn_arr = { { 0x42424849, 0x90921212, 0x24248490, 0x9212124,
0x48484909, 0x92121242, 0x84849090, 0x2424 } },
.sample_patrn_length = 242,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 90) = 0.262295 */
.hor_phase_arr = {
.even = { { 2, 28, 68, 7, 28, 2, 0 },
{ 0, 4, 38, 7, 65, 20, 1 },
{ 0, 7, 48, 7, 59, 13, 1 },
{ 0, 11, 58, 7, 51, 8, 0 },
{ 1, 17, 64, 7, 41, 5, 0 },
{ 2, 25, 67, 7, 31, 3, 0 },
{ 0, 3, 35, 7, 66, 23, 1 },
{ 0, 6, 45, 7, 61, 15, 1 },
{ 0, 10, 54, 7, 54, 10, 0 },
{ 1, 15, 61, 7, 45, 6, 0 },
{ 1, 23, 66, 7, 35, 3, 0 },
{ 0, 3, 31, 7, 67, 25, 2 },
{ 0, 5, 41, 7, 64, 17, 1 },
{ 0, 8, 51, 7, 58, 11, 0 },
{ 1, 13, 59, 7, 48, 7, 0 },
{ 1, 20, 65, 7, 38, 4, 0 } },
.odd = { { 0, 12, 59, 7, 49, 8, 0 },
{ 1, 19, 64, 7, 40, 4, 0 },
{ 2, 27, 67, 7, 30, 2, 0 },
{ 0, 4, 36, 7, 66, 21, 1 },
{ 0, 6, 46, 7, 61, 14, 1 },
{ 0, 10, 56, 7, 53, 9, 0 },
{ 1, 16, 63, 7, 43, 5, 0 },
{ 2, 24, 66, 7, 33, 3, 0 },
{ 0, 3, 33, 7, 66, 24, 2 },
{ 0, 5, 43, 7, 63, 16, 1 },
{ 0, 9, 53, 7, 56, 10, 0 },
{ 1, 14, 61, 7, 46, 6, 0 },
{ 1, 21, 66, 7, 36, 4, 0 },
{ 0, 2, 30, 7, 67, 27, 2 },
{ 0, 4, 40, 7, 64, 19, 1 },
{ 0, 8, 49, 7, 59, 12, 0 } } },
.ver_phase_arr = {
.even = { { 2, 28, 68, 7, 28, 2, 0 },
{ 0, 4, 38, 7, 65, 20, 1 },
{ 0, 7, 48, 7, 59, 13, 1 },
{ 0, 11, 58, 7, 51, 8, 0 },
{ 1, 17, 64, 7, 41, 5, 0 },
{ 2, 25, 67, 7, 31, 3, 0 },
{ 0, 3, 35, 7, 66, 23, 1 },
{ 0, 6, 45, 7, 61, 15, 1 },
{ 0, 10, 54, 7, 54, 10, 0 },
{ 1, 15, 61, 7, 45, 6, 0 },
{ 1, 23, 66, 7, 35, 3, 0 },
{ 0, 3, 31, 7, 67, 25, 2 },
{ 0, 5, 41, 7, 64, 17, 1 },
{ 0, 8, 51, 7, 58, 11, 0 },
{ 1, 13, 59, 7, 48, 7, 0 },
{ 1, 20, 65, 7, 38, 4, 0 } },
.odd = { { 0, 12, 59, 7, 49, 8, 0 },
{ 1, 19, 64, 7, 40, 4, 0 },
{ 2, 27, 67, 7, 30, 2, 0 },
{ 0, 4, 36, 7, 66, 21, 1 },
{ 0, 6, 46, 7, 61, 14, 1 },
{ 0, 10, 56, 7, 53, 9, 0 },
{ 1, 16, 63, 7, 43, 5, 0 },
{ 2, 24, 66, 7, 33, 3, 0 },
{ 0, 3, 33, 7, 66, 24, 2 },
{ 0, 5, 43, 7, 63, 16, 1 },
{ 0, 9, 53, 7, 56, 10, 0 },
{ 1, 14, 61, 7, 46, 6, 0 },
{ 1, 21, 66, 7, 36, 4, 0 },
{ 0, 2, 30, 7, 67, 27, 2 },
{ 0, 4, 40, 7, 64, 19, 1 },
{ 0, 8, 49, 7, 59, 12, 0 } } },
.ptrn_arr = { { 0x42484849, 0x92121242, 0x84849090, 0x242424 } },
.sample_patrn_length = 122,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 91) = 0.260163 */
.hor_phase_arr = {
.even = { { 2, 29, 66, 7, 29, 2, 0 },
{ 0, 4, 36, 7, 66, 21, 1 },
{ 0, 6, 45, 7, 61, 15, 1 },
{ 0, 9, 52, 7, 56, 11, 0 },
{ 1, 13, 59, 7, 48, 7, 0 },
{ 1, 19, 63, 7, 40, 5, 0 },
{ 2, 26, 65, 7, 32, 3, 0 },
{ 0, 3, 33, 7, 66, 24, 2 },
{ 0, 5, 41, 7, 63, 18, 1 },
{ 0, 8, 49, 7, 59, 12, 0 },
{ 0, 12, 57, 7, 51, 8, 0 },
{ 1, 17, 62, 7, 43, 5, 0 },
{ 1, 23, 66, 7, 35, 3, 0 },
{ 0, 3, 30, 7, 66, 27, 2 },
{ 0, 4, 38, 7, 65, 20, 1 },
{ 0, 7, 46, 7, 60, 14, 1 },
{ 0, 10, 54, 7, 54, 10, 0 },
{ 1, 14, 60, 7, 46, 7, 0 },
{ 1, 20, 65, 7, 38, 4, 0 },
{ 2, 27, 66, 7, 30, 3, 0 },
{ 0, 3, 35, 7, 66, 23, 1 },
{ 0, 5, 43, 7, 62, 17, 1 },
{ 0, 8, 51, 7, 57, 12, 0 },
{ 0, 12, 59, 7, 49, 8, 0 },
{ 1, 18, 63, 7, 41, 5, 0 },
{ 2, 24, 66, 7, 33, 3, 0 },
{ 0, 3, 32, 7, 65, 26, 2 },
{ 0, 5, 40, 7, 63, 19, 1 },
{ 0, 7, 48, 7, 59, 13, 1 },
{ 0, 11, 56, 7, 52, 9, 0 },
{ 1, 15, 61, 7, 45, 6, 0 },
{ 1, 21, 66, 7, 36, 4, 0 } },
.odd = { { 0, 12, 58, 7, 50, 8, 0 },
{ 1, 17, 63, 7, 42, 5, 0 },
{ 2, 23, 66, 7, 34, 3, 0 },
{ 0, 3, 31, 7, 66, 26, 2 },
{ 0, 4, 39, 7, 64, 20, 1 },
{ 0, 7, 47, 7, 59, 14, 1 },
{ 0, 10, 55, 7, 53, 10, 0 },
{ 1, 15, 61, 7, 45, 6, 0 },
{ 1, 21, 65, 7, 37, 4, 0 },
{ 2, 28, 67, 7, 29, 2, 0 },
{ 0, 4, 36, 7, 65, 22, 1 },
{ 0, 6, 44, 7, 61, 16, 1 },
{ 0, 9, 52, 7, 56, 11, 0 },
{ 1, 13, 58, 7, 49, 7, 0 },
{ 1, 18, 64, 7, 40, 5, 0 },
{ 2, 25, 66, 7, 32, 3, 0 },
{ 0, 3, 32, 7, 66, 25, 2 },
{ 0, 5, 40, 7, 64, 18, 1 },
{ 0, 7, 49, 7, 58, 13, 1 },
{ 0, 11, 56, 7, 52, 9, 0 },
{ 1, 16, 61, 7, 44, 6, 0 },
{ 1, 22, 65, 7, 36, 4, 0 },
{ 0, 2, 29, 7, 67, 28, 2 },
{ 0, 4, 37, 7, 65, 21, 1 },
{ 0, 6, 45, 7, 61, 15, 1 },
{ 0, 10, 53, 7, 55, 10, 0 },
{ 1, 14, 59, 7, 47, 7, 0 },
{ 1, 20, 64, 7, 39, 4, 0 },
{ 2, 26, 66, 7, 31, 3, 0 },
{ 0, 3, 34, 7, 66, 23, 2 },
{ 0, 5, 42, 7, 63, 17, 1 },
{ 0, 8, 50, 7, 58, 12, 0 } } },
.ver_phase_arr = {
.even = { { 2, 29, 66, 7, 29, 2, 0 },
{ 0, 4, 36, 7, 66, 21, 1 },
{ 0, 6, 45, 7, 61, 15, 1 },
{ 0, 9, 52, 7, 56, 11, 0 },
{ 1, 13, 59, 7, 48, 7, 0 },
{ 1, 19, 63, 7, 40, 5, 0 },
{ 2, 26, 65, 7, 32, 3, 0 },
{ 0, 3, 33, 7, 66, 24, 2 },
{ 0, 5, 41, 7, 63, 18, 1 },
{ 0, 8, 49, 7, 59, 12, 0 },
{ 0, 12, 57, 7, 51, 8, 0 },
{ 1, 17, 62, 7, 43, 5, 0 },
{ 1, 23, 66, 7, 35, 3, 0 },
{ 0, 3, 30, 7, 66, 27, 2 },
{ 0, 4, 38, 7, 65, 20, 1 },
{ 0, 7, 46, 7, 60, 14, 1 },
{ 0, 10, 54, 7, 54, 10, 0 },
{ 1, 14, 60, 7, 46, 7, 0 },
{ 1, 20, 65, 7, 38, 4, 0 },
{ 2, 27, 66, 7, 30, 3, 0 },
{ 0, 3, 35, 7, 66, 23, 1 },
{ 0, 5, 43, 7, 62, 17, 1 },
{ 0, 8, 51, 7, 57, 12, 0 },
{ 0, 12, 59, 7, 49, 8, 0 },
{ 1, 18, 63, 7, 41, 5, 0 },
{ 2, 24, 66, 7, 33, 3, 0 },
{ 0, 3, 32, 7, 65, 26, 2 },
{ 0, 5, 40, 7, 63, 19, 1 },
{ 0, 7, 48, 7, 59, 13, 1 },
{ 0, 11, 56, 7, 52, 9, 0 },
{ 1, 15, 61, 7, 45, 6, 0 },
{ 1, 21, 66, 7, 36, 4, 0 } },
.odd = { { 0, 12, 58, 7, 50, 8, 0 },
{ 1, 17, 63, 7, 42, 5, 0 },
{ 2, 23, 66, 7, 34, 3, 0 },
{ 0, 3, 31, 7, 66, 26, 2 },
{ 0, 4, 39, 7, 64, 20, 1 },
{ 0, 7, 47, 7, 59, 14, 1 },
{ 0, 10, 55, 7, 53, 10, 0 },
{ 1, 15, 61, 7, 45, 6, 0 },
{ 1, 21, 65, 7, 37, 4, 0 },
{ 2, 28, 67, 7, 29, 2, 0 },
{ 0, 4, 36, 7, 65, 22, 1 },
{ 0, 6, 44, 7, 61, 16, 1 },
{ 0, 9, 52, 7, 56, 11, 0 },
{ 1, 13, 58, 7, 49, 7, 0 },
{ 1, 18, 64, 7, 40, 5, 0 },
{ 2, 25, 66, 7, 32, 3, 0 },
{ 0, 3, 32, 7, 66, 25, 2 },
{ 0, 5, 40, 7, 64, 18, 1 },
{ 0, 7, 49, 7, 58, 13, 1 },
{ 0, 11, 56, 7, 52, 9, 0 },
{ 1, 16, 61, 7, 44, 6, 0 },
{ 1, 22, 65, 7, 36, 4, 0 },
{ 0, 2, 29, 7, 67, 28, 2 },
{ 0, 4, 37, 7, 65, 21, 1 },
{ 0, 6, 45, 7, 61, 15, 1 },
{ 0, 10, 53, 7, 55, 10, 0 },
{ 1, 14, 59, 7, 47, 7, 0 },
{ 1, 20, 64, 7, 39, 4, 0 },
{ 2, 26, 66, 7, 31, 3, 0 },
{ 0, 3, 34, 7, 66, 23, 2 },
{ 0, 5, 42, 7, 63, 17, 1 },
{ 0, 8, 50, 7, 58, 12, 0 } } },
.ptrn_arr = { { 0x42484849, 0x12124242, 0x90909212, 0x24848484,
0x21242424, 0x9090921, 0x48484849, 0x24242 } },
.sample_patrn_length = 246,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 92) = 0.258065 */
.hor_phase_arr = {
.even = { { 2, 29, 66, 7, 29, 2, 0 },
{ 0, 4, 35, 7, 64, 23, 2 },
{ 0, 5, 41, 7, 63, 18, 1 },
{ 0, 7, 48, 7, 58, 14, 1 },
{ 0, 10, 54, 7, 54, 10, 0 },
{ 1, 14, 58, 7, 48, 7, 0 },
{ 1, 18, 63, 7, 41, 5, 0 },
{ 2, 23, 64, 7, 35, 4, 0 } },
.odd = { { 0, 12, 56, 7, 51, 9, 0 },
{ 1, 16, 61, 7, 44, 6, 0 },
{ 1, 20, 65, 7, 38, 4, 0 },
{ 2, 26, 65, 7, 32, 3, 0 },
{ 0, 3, 32, 7, 65, 26, 2 },
{ 0, 4, 38, 7, 65, 20, 1 },
{ 0, 6, 44, 7, 61, 16, 1 },
{ 0, 9, 51, 7, 56, 12, 0 } } },
.ver_phase_arr = {
.even = { { 2, 29, 66, 7, 29, 2, 0 },
{ 0, 4, 35, 7, 64, 23, 2 },
{ 0, 5, 41, 7, 63, 18, 1 },
{ 0, 7, 48, 7, 58, 14, 1 },
{ 0, 10, 54, 7, 54, 10, 0 },
{ 1, 14, 58, 7, 48, 7, 0 },
{ 1, 18, 63, 7, 41, 5, 0 },
{ 2, 23, 64, 7, 35, 4, 0 } },
.odd = { { 0, 12, 56, 7, 51, 9, 0 },
{ 1, 16, 61, 7, 44, 6, 0 },
{ 1, 20, 65, 7, 38, 4, 0 },
{ 2, 26, 65, 7, 32, 3, 0 },
{ 0, 3, 32, 7, 65, 26, 2 },
{ 0, 4, 38, 7, 65, 20, 1 },
{ 0, 6, 44, 7, 61, 16, 1 },
{ 0, 9, 51, 7, 56, 12, 0 } } },
.ptrn_arr = { { 0x48484849, 0x2424242 } },
.sample_patrn_length = 62,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 93) = 0.256 */
.hor_phase_arr = {
.even = { { 2, 29, 66, 7, 29, 2, 0 },
{ 0, 3, 33, 7, 65, 25, 2 },
{ 0, 4, 38, 7, 64, 21, 1 },
{ 0, 6, 43, 7, 61, 17, 1 },
{ 0, 8, 47, 7, 58, 14, 1 },
{ 0, 10, 52, 7, 55, 11, 0 },
{ 1, 12, 56, 7, 50, 9, 0 },
{ 1, 15, 59, 7, 46, 7, 0 },
{ 1, 18, 63, 7, 41, 5, 0 },
{ 1, 22, 65, 7, 36, 4, 0 },
{ 2, 26, 65, 7, 32, 3, 0 },
{ 0, 3, 30, 7, 66, 27, 2 },
{ 0, 4, 35, 7, 64, 23, 2 },
{ 0, 5, 40, 7, 63, 19, 1 },
{ 0, 6, 44, 7, 61, 16, 1 },
{ 0, 8, 49, 7, 57, 13, 1 },
{ 0, 10, 55, 7, 53, 10, 0 },
{ 1, 13, 57, 7, 49, 8, 0 },
{ 1, 16, 61, 7, 44, 6, 0 },
{ 1, 19, 63, 7, 40, 5, 0 },
{ 2, 23, 64, 7, 35, 4, 0 },
{ 2, 27, 66, 7, 30, 3, 0 },
{ 0, 3, 32, 7, 65, 26, 2 },
{ 0, 4, 36, 7, 65, 22, 1 },
{ 0, 5, 41, 7, 63, 18, 1 },
{ 0, 7, 46, 7, 59, 15, 1 },
{ 0, 9, 50, 7, 56, 12, 1 },
{ 0, 11, 55, 7, 52, 10, 0 },
{ 1, 14, 58, 7, 47, 8, 0 },
{ 1, 17, 61, 7, 43, 6, 0 },
{ 1, 21, 64, 7, 38, 4, 0 },
{ 2, 25, 65, 7, 33, 3, 0 } },
.odd = { { 0, 12, 56, 7, 51, 9, 0 },
{ 1, 14, 59, 7, 47, 7, 0 },
{ 1, 18, 61, 7, 42, 6, 0 },
{ 1, 21, 65, 7, 37, 4, 0 },
{ 2, 25, 65, 7, 33, 3, 0 },
{ 0, 3, 30, 7, 65, 28, 2 },
{ 0, 3, 34, 7, 65, 24, 2 },
{ 0, 5, 39, 7, 63, 20, 1 },
{ 0, 6, 44, 7, 61, 16, 1 },
{ 0, 8, 48, 7, 58, 13, 1 },
{ 0, 10, 53, 7, 54, 11, 0 },
{ 1, 12, 57, 7, 50, 8, 0 },
{ 1, 15, 60, 7, 45, 7, 0 },
{ 1, 19, 63, 7, 40, 5, 0 },
{ 2, 23, 63, 7, 36, 4, 0 },
{ 2, 27, 65, 7, 31, 3, 0 },
{ 0, 3, 31, 7, 65, 27, 2 },
{ 0, 4, 36, 7, 63, 23, 2 },
{ 0, 5, 40, 7, 63, 19, 1 },
{ 0, 7, 45, 7, 60, 15, 1 },
{ 0, 8, 50, 7, 57, 12, 1 },
{ 0, 11, 54, 7, 53, 10, 0 },
{ 1, 13, 58, 7, 48, 8, 0 },
{ 1, 16, 61, 7, 44, 6, 0 },
{ 1, 20, 63, 7, 39, 5, 0 },
{ 2, 24, 65, 7, 34, 3, 0 },
{ 2, 28, 65, 7, 30, 3, 0 },
{ 0, 3, 33, 7, 65, 25, 2 },
{ 0, 4, 37, 7, 65, 21, 1 },
{ 0, 6, 42, 7, 61, 18, 1 },
{ 0, 7, 47, 7, 59, 14, 1 },
{ 0, 9, 51, 7, 56, 12, 0 } } },
.ver_phase_arr = {
.even = { { 2, 29, 66, 7, 29, 2, 0 },
{ 0, 3, 33, 7, 65, 25, 2 },
{ 0, 4, 38, 7, 64, 21, 1 },
{ 0, 6, 43, 7, 61, 17, 1 },
{ 0, 8, 47, 7, 58, 14, 1 },
{ 0, 10, 52, 7, 55, 11, 0 },
{ 1, 12, 56, 7, 50, 9, 0 },
{ 1, 15, 59, 7, 46, 7, 0 },
{ 1, 18, 63, 7, 41, 5, 0 },
{ 1, 22, 65, 7, 36, 4, 0 },
{ 2, 26, 65, 7, 32, 3, 0 },
{ 0, 3, 30, 7, 66, 27, 2 },
{ 0, 4, 35, 7, 64, 23, 2 },
{ 0, 5, 40, 7, 63, 19, 1 },
{ 0, 6, 44, 7, 61, 16, 1 },
{ 0, 8, 49, 7, 57, 13, 1 },
{ 0, 10, 55, 7, 53, 10, 0 },
{ 1, 13, 57, 7, 49, 8, 0 },
{ 1, 16, 61, 7, 44, 6, 0 },
{ 1, 19, 63, 7, 40, 5, 0 },
{ 2, 23, 64, 7, 35, 4, 0 },
{ 2, 27, 66, 7, 30, 3, 0 },
{ 0, 3, 32, 7, 65, 26, 2 },
{ 0, 4, 36, 7, 65, 22, 1 },
{ 0, 5, 41, 7, 63, 18, 1 },
{ 0, 7, 46, 7, 59, 15, 1 },
{ 0, 9, 50, 7, 56, 12, 1 },
{ 0, 11, 55, 7, 52, 10, 0 },
{ 1, 14, 58, 7, 47, 8, 0 },
{ 1, 17, 61, 7, 43, 6, 0 },
{ 1, 21, 64, 7, 38, 4, 0 },
{ 2, 25, 65, 7, 33, 3, 0 } },
.odd = { { 0, 12, 56, 7, 51, 9, 0 },
{ 1, 14, 59, 7, 47, 7, 0 },
{ 1, 18, 61, 7, 42, 6, 0 },
{ 1, 21, 65, 7, 37, 4, 0 },
{ 2, 25, 65, 7, 33, 3, 0 },
{ 0, 3, 30, 7, 65, 28, 2 },
{ 0, 3, 34, 7, 65, 24, 2 },
{ 0, 5, 39, 7, 63, 20, 1 },
{ 0, 6, 44, 7, 61, 16, 1 },
{ 0, 8, 48, 7, 58, 13, 1 },
{ 0, 10, 53, 7, 54, 11, 0 },
{ 1, 12, 57, 7, 50, 8, 0 },
{ 1, 15, 60, 7, 45, 7, 0 },
{ 1, 19, 63, 7, 40, 5, 0 },
{ 2, 23, 63, 7, 36, 4, 0 },
{ 2, 27, 65, 7, 31, 3, 0 },
{ 0, 3, 31, 7, 65, 27, 2 },
{ 0, 4, 36, 7, 63, 23, 2 },
{ 0, 5, 40, 7, 63, 19, 1 },
{ 0, 7, 45, 7, 60, 15, 1 },
{ 0, 8, 50, 7, 57, 12, 1 },
{ 0, 11, 54, 7, 53, 10, 0 },
{ 1, 13, 58, 7, 48, 8, 0 },
{ 1, 16, 61, 7, 44, 6, 0 },
{ 1, 20, 63, 7, 39, 5, 0 },
{ 2, 24, 65, 7, 34, 3, 0 },
{ 2, 28, 65, 7, 30, 3, 0 },
{ 0, 3, 33, 7, 65, 25, 2 },
{ 0, 4, 37, 7, 65, 21, 1 },
{ 0, 6, 42, 7, 61, 18, 1 },
{ 0, 7, 47, 7, 59, 14, 1 },
{ 0, 9, 51, 7, 56, 12, 0 } } },
.ptrn_arr = { { 0x48484849, 0x42424248, 0x12124242, 0x92121212,
0x90909090, 0x84848490, 0x24248484, 0x242424 } },
.sample_patrn_length = 250,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 94) = 0.253968 */
.hor_phase_arr = {
.even = { { 3, 29, 64, 7, 29, 3, 0 },
{ 0, 3, 32, 7, 65, 26, 2 },
{ 0, 4, 35, 7, 64, 23, 2 },
{ 0, 5, 38, 7, 63, 21, 1 },
{ 0, 5, 41, 7, 63, 18, 1 },
{ 0, 7, 44, 7, 60, 16, 1 },
{ 0, 8, 47, 7, 58, 14, 1 },
{ 0, 9, 50, 7, 56, 12, 1 },
{ 0, 11, 53, 7, 53, 11, 0 },
{ 1, 12, 56, 7, 50, 9, 0 },
{ 1, 14, 58, 7, 47, 8, 0 },
{ 1, 16, 60, 7, 44, 7, 0 },
{ 1, 18, 63, 7, 41, 5, 0 },
{ 1, 21, 63, 7, 38, 5, 0 },
{ 2, 23, 64, 7, 35, 4, 0 },
{ 2, 26, 65, 7, 32, 3, 0 } },
.odd = { { 0, 11, 55, 7, 52, 10, 0 },
{ 1, 13, 57, 7, 49, 8, 0 },
{ 1, 15, 59, 7, 46, 7, 0 },
{ 1, 17, 61, 7, 43, 6, 0 },
{ 1, 20, 62, 7, 40, 5, 0 },
{ 2, 22, 63, 7, 37, 4, 0 },
{ 2, 25, 65, 7, 33, 3, 0 },
{ 2, 28, 65, 7, 30, 3, 0 },
{ 0, 3, 30, 7, 65, 28, 2 },
{ 0, 3, 33, 7, 65, 25, 2 },
{ 0, 4, 37, 7, 63, 22, 2 },
{ 0, 5, 40, 7, 62, 20, 1 },
{ 0, 6, 43, 7, 61, 17, 1 },
{ 0, 7, 46, 7, 59, 15, 1 },
{ 0, 8, 49, 7, 57, 13, 1 },
{ 0, 10, 52, 7, 55, 11, 0 } } },
.ver_phase_arr = {
.even = { { 3, 29, 64, 7, 29, 3, 0 },
{ 0, 3, 32, 7, 65, 26, 2 },
{ 0, 4, 35, 7, 64, 23, 2 },
{ 0, 5, 38, 7, 63, 21, 1 },
{ 0, 5, 41, 7, 63, 18, 1 },
{ 0, 7, 44, 7, 60, 16, 1 },
{ 0, 8, 47, 7, 58, 14, 1 },
{ 0, 9, 50, 7, 56, 12, 1 },
{ 0, 11, 53, 7, 53, 11, 0 },
{ 1, 12, 56, 7, 50, 9, 0 },
{ 1, 14, 58, 7, 47, 8, 0 },
{ 1, 16, 60, 7, 44, 7, 0 },
{ 1, 18, 63, 7, 41, 5, 0 },
{ 1, 21, 63, 7, 38, 5, 0 },
{ 2, 23, 64, 7, 35, 4, 0 },
{ 2, 26, 65, 7, 32, 3, 0 } },
.odd = { { 0, 11, 55, 7, 52, 10, 0 },
{ 1, 13, 57, 7, 49, 8, 0 },
{ 1, 15, 59, 7, 46, 7, 0 },
{ 1, 17, 61, 7, 43, 6, 0 },
{ 1, 20, 62, 7, 40, 5, 0 },
{ 2, 22, 63, 7, 37, 4, 0 },
{ 2, 25, 65, 7, 33, 3, 0 },
{ 2, 28, 65, 7, 30, 3, 0 },
{ 0, 3, 30, 7, 65, 28, 2 },
{ 0, 3, 33, 7, 65, 25, 2 },
{ 0, 4, 37, 7, 63, 22, 2 },
{ 0, 5, 40, 7, 62, 20, 1 },
{ 0, 6, 43, 7, 61, 17, 1 },
{ 0, 7, 46, 7, 59, 15, 1 },
{ 0, 8, 49, 7, 57, 13, 1 },
{ 0, 10, 52, 7, 55, 11, 0 } } },
.ptrn_arr = { { 0x48484849, 0x48484848, 0x42424242, 0x2424242 } },
.sample_patrn_length = 126,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 95) = 0.251969 */
.hor_phase_arr = {
.even = { { 3, 29, 64, 7, 29, 3, 0 },
{ 0, 3, 31, 7, 64, 28, 2 },
{ 0, 3, 32, 7, 65, 26, 2 },
{ 0, 4, 34, 7, 63, 25, 2 },
{ 0, 4, 35, 7, 63, 24, 2 },
{ 0, 4, 37, 7, 63, 22, 2 },
{ 0, 5, 38, 7, 63, 21, 1 },
{ 0, 5, 40, 7, 62, 20, 1 },
{ 0, 6, 41, 7, 61, 19, 1 },
{ 0, 6, 43, 7, 61, 17, 1 },
{ 0, 7, 44, 7, 60, 16, 1 },
{ 0, 7, 46, 7, 59, 15, 1 },
{ 0, 8, 47, 7, 58, 14, 1 },
{ 0, 9, 49, 7, 56, 13, 1 },
{ 0, 9, 50, 7, 56, 12, 1 },
{ 0, 10, 51, 7, 54, 12, 1 },
{ 0, 11, 53, 7, 53, 11, 0 },
{ 1, 12, 54, 7, 51, 10, 0 },
{ 1, 12, 56, 7, 50, 9, 0 },
{ 1, 13, 56, 7, 49, 9, 0 },
{ 1, 14, 58, 7, 47, 8, 0 },
{ 1, 15, 59, 7, 46, 7, 0 },
{ 1, 16, 60, 7, 44, 7, 0 },
{ 1, 17, 61, 7, 43, 6, 0 },
{ 1, 19, 61, 7, 41, 6, 0 },
{ 1, 20, 62, 7, 40, 5, 0 },
{ 1, 21, 63, 7, 38, 5, 0 },
{ 2, 22, 63, 7, 37, 4, 0 },
{ 2, 24, 63, 7, 35, 4, 0 },
{ 2, 25, 63, 7, 34, 4, 0 },
{ 2, 26, 65, 7, 32, 3, 0 },
{ 2, 28, 64, 7, 31, 3, 0 } },
.odd = { { 0, 11, 55, 7, 52, 10, 0 },
{ 1, 12, 54, 7, 51, 10, 0 },
{ 1, 13, 56, 7, 49, 9, 0 },
{ 1, 14, 57, 7, 48, 8, 0 },
{ 1, 15, 58, 7, 46, 8, 0 },
{ 1, 16, 59, 7, 45, 7, 0 },
{ 1, 17, 61, 7, 43, 6, 0 },
{ 1, 18, 61, 7, 42, 6, 0 },
{ 1, 19, 63, 7, 40, 5, 0 },
{ 1, 20, 63, 7, 39, 5, 0 },
{ 2, 22, 62, 7, 37, 5, 0 },
{ 2, 23, 63, 7, 36, 4, 0 },
{ 2, 24, 64, 7, 34, 4, 0 },
{ 2, 26, 64, 7, 33, 3, 0 },
{ 2, 27, 65, 7, 31, 3, 0 },
{ 3, 28, 64, 7, 30, 3, 0 },
{ 0, 3, 30, 7, 64, 28, 3 },
{ 0, 3, 31, 7, 65, 27, 2 },
{ 0, 3, 33, 7, 64, 26, 2 },
{ 0, 4, 34, 7, 64, 24, 2 },
{ 0, 4, 36, 7, 63, 23, 2 },
{ 0, 5, 37, 7, 62, 22, 2 },
{ 0, 5, 39, 7, 63, 20, 1 },
{ 0, 5, 40, 7, 63, 19, 1 },
{ 0, 6, 42, 7, 61, 18, 1 },
{ 0, 6, 43, 7, 61, 17, 1 },
{ 0, 7, 45, 7, 59, 16, 1 },
{ 0, 8, 46, 7, 58, 15, 1 },
{ 0, 8, 48, 7, 57, 14, 1 },
{ 0, 9, 49, 7, 56, 13, 1 },
{ 0, 10, 51, 7, 54, 12, 1 },
{ 0, 10, 52, 7, 55, 11, 0 } } },
.ver_phase_arr = {
.even = { { 3, 29, 64, 7, 29, 3, 0 },
{ 0, 3, 31, 7, 64, 28, 2 },
{ 0, 3, 32, 7, 65, 26, 2 },
{ 0, 4, 34, 7, 63, 25, 2 },
{ 0, 4, 35, 7, 63, 24, 2 },
{ 0, 4, 37, 7, 63, 22, 2 },
{ 0, 5, 38, 7, 63, 21, 1 },
{ 0, 5, 40, 7, 62, 20, 1 },
{ 0, 6, 41, 7, 61, 19, 1 },
{ 0, 6, 43, 7, 61, 17, 1 },
{ 0, 7, 44, 7, 60, 16, 1 },
{ 0, 7, 46, 7, 59, 15, 1 },
{ 0, 8, 47, 7, 58, 14, 1 },
{ 0, 9, 49, 7, 56, 13, 1 },
{ 0, 9, 50, 7, 56, 12, 1 },
{ 0, 10, 51, 7, 54, 12, 1 },
{ 0, 11, 53, 7, 53, 11, 0 },
{ 1, 12, 54, 7, 51, 10, 0 },
{ 1, 12, 56, 7, 50, 9, 0 },
{ 1, 13, 56, 7, 49, 9, 0 },
{ 1, 14, 58, 7, 47, 8, 0 },
{ 1, 15, 59, 7, 46, 7, 0 },
{ 1, 16, 60, 7, 44, 7, 0 },
{ 1, 17, 61, 7, 43, 6, 0 },
{ 1, 19, 61, 7, 41, 6, 0 },
{ 1, 20, 62, 7, 40, 5, 0 },
{ 1, 21, 63, 7, 38, 5, 0 },
{ 2, 22, 63, 7, 37, 4, 0 },
{ 2, 24, 63, 7, 35, 4, 0 },
{ 2, 25, 63, 7, 34, 4, 0 },
{ 2, 26, 65, 7, 32, 3, 0 },
{ 2, 28, 64, 7, 31, 3, 0 } },
.odd = { { 0, 11, 55, 7, 52, 10, 0 },
{ 1, 12, 54, 7, 51, 10, 0 },
{ 1, 13, 56, 7, 49, 9, 0 },
{ 1, 14, 57, 7, 48, 8, 0 },
{ 1, 15, 58, 7, 46, 8, 0 },
{ 1, 16, 59, 7, 45, 7, 0 },
{ 1, 17, 61, 7, 43, 6, 0 },
{ 1, 18, 61, 7, 42, 6, 0 },
{ 1, 19, 63, 7, 40, 5, 0 },
{ 1, 20, 63, 7, 39, 5, 0 },
{ 2, 22, 62, 7, 37, 5, 0 },
{ 2, 23, 63, 7, 36, 4, 0 },
{ 2, 24, 64, 7, 34, 4, 0 },
{ 2, 26, 64, 7, 33, 3, 0 },
{ 2, 27, 65, 7, 31, 3, 0 },
{ 3, 28, 64, 7, 30, 3, 0 },
{ 0, 3, 30, 7, 64, 28, 3 },
{ 0, 3, 31, 7, 65, 27, 2 },
{ 0, 3, 33, 7, 64, 26, 2 },
{ 0, 4, 34, 7, 64, 24, 2 },
{ 0, 4, 36, 7, 63, 23, 2 },
{ 0, 5, 37, 7, 62, 22, 2 },
{ 0, 5, 39, 7, 63, 20, 1 },
{ 0, 5, 40, 7, 63, 19, 1 },
{ 0, 6, 42, 7, 61, 18, 1 },
{ 0, 6, 43, 7, 61, 17, 1 },
{ 0, 7, 45, 7, 59, 16, 1 },
{ 0, 8, 46, 7, 58, 15, 1 },
{ 0, 8, 48, 7, 57, 14, 1 },
{ 0, 9, 49, 7, 56, 13, 1 },
{ 0, 10, 51, 7, 54, 12, 1 },
{ 0, 10, 52, 7, 55, 11, 0 } } },
.ptrn_arr = { { 0x48484849, 0x48484848, 0x48484848, 0x48484848,
0x42424242, 0x42424242, 0x42424242, 0x2424242 } },
.sample_patrn_length = 254,
.hor_ds_en = 1,
.ver_ds_en = 1
}, {
/* Scale factor 32 / (32 + 96) = 0.25 */
.hor_phase_arr = {
.even = { { 3, 29, 64, 7, 29, 3, 0 } },
.odd = { { 0, 11, 53, 7, 53, 11, 0 } } },
.ver_phase_arr = {
.even = { { 3, 29, 64, 7, 29, 3, 0 } },
.odd = { { 0, 11, 53, 7, 53, 11, 0 } } },
.ptrn_arr = { { 0x9 } },
.sample_patrn_length = 8,
.hor_ds_en = 1,
.ver_ds_en = 1
} };
const s32 imgu_css_downscale_4taps[IMGU_SCALER_DOWNSCALE_4TAPS_LEN] = {
IMGU_SCALER_FP * -0.000000000000000,
IMGU_SCALER_FP * -0.000249009327023,
IMGU_SCALER_FP * -0.001022241683322,
IMGU_SCALER_FP * -0.002352252699175,
IMGU_SCALER_FP * -0.004261594242362,
IMGU_SCALER_FP * -0.006761648795689,
IMGU_SCALER_FP * -0.009851589454154,
IMGU_SCALER_FP * -0.013517488475013,
IMGU_SCALER_FP * -0.017731595701026,
IMGU_SCALER_FP * -0.022451806160682,
IMGU_SCALER_FP * -0.027621333752351,
IMGU_SCALER_FP * -0.033168605172067,
IMGU_SCALER_FP * -0.039007385183627,
IMGU_SCALER_FP * -0.045037140997445,
IMGU_SCALER_FP * -0.051143649969349,
IMGU_SCALER_FP * -0.057199851105019,
IMGU_SCALER_FP * -0.063066937016941,
IMGU_SCALER_FP * -0.068595679088417,
IMGU_SCALER_FP * -0.073627974715370,
IMGU_SCALER_FP * -0.077998601684588,
IMGU_SCALER_FP * -0.081537161069780,
IMGU_SCALER_FP * -0.084070186546763,
IMGU_SCALER_FP * -0.085423394806327,
IMGU_SCALER_FP * -0.085424048835192,
IMGU_SCALER_FP * -0.083903403294908,
IMGU_SCALER_FP * -0.080699199103829,
IMGU_SCALER_FP * -0.075658172660608,
IMGU_SCALER_FP * -0.068638543974523,
IMGU_SCALER_FP * -0.059512447316781,
IMGU_SCALER_FP * -0.048168267897836,
IMGU_SCALER_FP * -0.034512848520921,
IMGU_SCALER_FP * -0.018473531164409,
IMGU_SCALER_FP * 0.000000000000000,
IMGU_SCALER_FP * 0.020934105554674,
IMGU_SCALER_FP * 0.044329836544650,
IMGU_SCALER_FP * 0.070161864654994,
IMGU_SCALER_FP * 0.098377719033862,
IMGU_SCALER_FP * 0.128897348012514,
IMGU_SCALER_FP * 0.161613019706978,
IMGU_SCALER_FP * 0.196389570939079,
IMGU_SCALER_FP * 0.233065009152522,
IMGU_SCALER_FP * 0.271451467092549,
IMGU_SCALER_FP * 0.311336505037934,
IMGU_SCALER_FP * 0.352484750396743,
IMGU_SCALER_FP * 0.394639859577736,
IMGU_SCALER_FP * 0.437526782302744,
IMGU_SCALER_FP * 0.480854304005320,
IMGU_SCALER_FP * 0.524317837738108,
IMGU_SCALER_FP * 0.567602433152471,
IMGU_SCALER_FP * 0.610385966680669,
IMGU_SCALER_FP * 0.652342474098843,
IMGU_SCALER_FP * 0.693145584226952,
IMGU_SCALER_FP * 0.732472010670320,
IMGU_SCALER_FP * 0.770005057258970,
IMGU_SCALER_FP * 0.805438092218553,
IMGU_SCALER_FP * 0.838477946124244,
IMGU_SCALER_FP * 0.868848189350256,
IMGU_SCALER_FP * 0.896292246026874,
IMGU_SCALER_FP * 0.920576303438191,
IMGU_SCALER_FP * 0.941491978311745,
IMGU_SCALER_FP * 0.958858704531378,
IMGU_SCALER_FP * 0.972525810403401,
IMGU_SCALER_FP * 0.982374257672165,
IMGU_SCALER_FP * 0.988318018955586,
IMGU_SCALER_FP * 0.990305075088925,
IMGU_SCALER_FP * 0.988318018955586,
IMGU_SCALER_FP * 0.982374257672165,
IMGU_SCALER_FP * 0.972525810403401,
IMGU_SCALER_FP * 0.958858704531378,
IMGU_SCALER_FP * 0.941491978311745,
IMGU_SCALER_FP * 0.920576303438191,
IMGU_SCALER_FP * 0.896292246026874,
IMGU_SCALER_FP * 0.868848189350256,
IMGU_SCALER_FP * 0.838477946124244,
IMGU_SCALER_FP * 0.805438092218553,
IMGU_SCALER_FP * 0.770005057258970,
IMGU_SCALER_FP * 0.732472010670320,
IMGU_SCALER_FP * 0.693145584226952,
IMGU_SCALER_FP * 0.652342474098843,
IMGU_SCALER_FP * 0.610385966680669,
IMGU_SCALER_FP * 0.567602433152471,
IMGU_SCALER_FP * 0.524317837738108,
IMGU_SCALER_FP * 0.480854304005320,
IMGU_SCALER_FP * 0.437526782302744,
IMGU_SCALER_FP * 0.394639859577736,
IMGU_SCALER_FP * 0.352484750396743,
IMGU_SCALER_FP * 0.311336505037934,
IMGU_SCALER_FP * 0.271451467092549,
IMGU_SCALER_FP * 0.233065009152522,
IMGU_SCALER_FP * 0.196389570939079,
IMGU_SCALER_FP * 0.161613019706978,
IMGU_SCALER_FP * 0.128897348012514,
IMGU_SCALER_FP * 0.098377719033862,
IMGU_SCALER_FP * 0.070161864654994,
IMGU_SCALER_FP * 0.044329836544650,
IMGU_SCALER_FP * 0.020934105554674,
IMGU_SCALER_FP * 0.000000000000000,
IMGU_SCALER_FP * -0.018473531164409,
IMGU_SCALER_FP * -0.034512848520921,
IMGU_SCALER_FP * -0.048168267897836,
IMGU_SCALER_FP * -0.059512447316781,
IMGU_SCALER_FP * -0.068638543974523,
IMGU_SCALER_FP * -0.075658172660608,
IMGU_SCALER_FP * -0.080699199103829,
IMGU_SCALER_FP * -0.083903403294908,
IMGU_SCALER_FP * -0.085424048835192,
IMGU_SCALER_FP * -0.085423394806327,
IMGU_SCALER_FP * -0.084070186546763,
IMGU_SCALER_FP * -0.081537161069780,
IMGU_SCALER_FP * -0.077998601684588,
IMGU_SCALER_FP * -0.073627974715370,
IMGU_SCALER_FP * -0.068595679088417,
IMGU_SCALER_FP * -0.063066937016941,
IMGU_SCALER_FP * -0.057199851105019,
IMGU_SCALER_FP * -0.051143649969349,
IMGU_SCALER_FP * -0.045037140997445,
IMGU_SCALER_FP * -0.039007385183627,
IMGU_SCALER_FP * -0.033168605172067,
IMGU_SCALER_FP * -0.027621333752351,
IMGU_SCALER_FP * -0.022451806160682,
IMGU_SCALER_FP * -0.017731595701026,
IMGU_SCALER_FP * -0.013517488475013,
IMGU_SCALER_FP * -0.009851589454154,
IMGU_SCALER_FP * -0.006761648795689,
IMGU_SCALER_FP * -0.004261594242362,
IMGU_SCALER_FP * -0.002352252699175,
IMGU_SCALER_FP * -0.001022241683322,
IMGU_SCALER_FP * -0.000249009327023
};
const s32 imgu_css_downscale_2taps[IMGU_SCALER_DOWNSCALE_2TAPS_LEN] = {
IMGU_SCALER_FP * 0.074300676367033,
IMGU_SCALER_FP * 0.094030234498392,
IMGU_SCALER_FP * 0.115522859526596,
IMGU_SCALER_FP * 0.138778551451644,
IMGU_SCALER_FP * 0.163629399140505,
IMGU_SCALER_FP * 0.190075402593178,
IMGU_SCALER_FP * 0.217864695110113,
IMGU_SCALER_FP * 0.247081232257828,
IMGU_SCALER_FP * 0.277389191770256,
IMGU_SCALER_FP * 0.308704618080881,
IMGU_SCALER_FP * 0.340859600056670,
IMGU_SCALER_FP * 0.373602270998074,
IMGU_SCALER_FP * 0.406848675338577,
IMGU_SCALER_FP * 0.440346946378629,
IMGU_SCALER_FP * 0.473845217418681,
IMGU_SCALER_FP * 0.507091621759184,
IMGU_SCALER_FP * 0.540002203833621,
IMGU_SCALER_FP * 0.572157185809410,
IMGU_SCALER_FP * 0.603472612120036,
IMGU_SCALER_FP * 0.633612660499431,
IMGU_SCALER_FP * 0.662493375381080,
IMGU_SCALER_FP * 0.689778934498917,
IMGU_SCALER_FP * 0.715301426719909,
IMGU_SCALER_FP * 0.738892940911023,
IMGU_SCALER_FP * 0.760385565939227,
IMGU_SCALER_FP * 0.779527435104971,
IMGU_SCALER_FP * 0.796234592841739,
IMGU_SCALER_FP * 0.810339128016497,
IMGU_SCALER_FP * 0.821841040629247,
IMGU_SCALER_FP * 0.830488463980438,
IMGU_SCALER_FP * 0.836281398070072,
IMGU_SCALER_FP * 0.839219842898146,
IMGU_SCALER_FP * 0.839219842898146,
IMGU_SCALER_FP * 0.836281398070072,
IMGU_SCALER_FP * 0.830488463980438,
IMGU_SCALER_FP * 0.821841040629247,
IMGU_SCALER_FP * 0.810339128016497,
IMGU_SCALER_FP * 0.796234592841739,
IMGU_SCALER_FP * 0.779527435104971,
IMGU_SCALER_FP * 0.760385565939227,
IMGU_SCALER_FP * 0.738892940911023,
IMGU_SCALER_FP * 0.715301426719909,
IMGU_SCALER_FP * 0.689778934498917,
IMGU_SCALER_FP * 0.662493375381080,
IMGU_SCALER_FP * 0.633612660499431,
IMGU_SCALER_FP * 0.603472612120036,
IMGU_SCALER_FP * 0.572157185809410,
IMGU_SCALER_FP * 0.540002203833621,
IMGU_SCALER_FP * 0.507091621759184,
IMGU_SCALER_FP * 0.473845217418681,
IMGU_SCALER_FP * 0.440346946378629,
IMGU_SCALER_FP * 0.406848675338577,
IMGU_SCALER_FP * 0.373602270998074,
IMGU_SCALER_FP * 0.340859600056670,
IMGU_SCALER_FP * 0.308704618080881,
IMGU_SCALER_FP * 0.277389191770256,
IMGU_SCALER_FP * 0.247081232257828,
IMGU_SCALER_FP * 0.217864695110113,
IMGU_SCALER_FP * 0.190075402593178,
IMGU_SCALER_FP * 0.163629399140505,
IMGU_SCALER_FP * 0.138778551451644,
IMGU_SCALER_FP * 0.115522859526596,
IMGU_SCALER_FP * 0.094030234498392,
IMGU_SCALER_FP * 0.074300676367033
};
/* settings for Geometric Distortion Correction */
const s16 imgu_css_gdc_lut[4][256] = { {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -2, -2, -2,
-2, -3, -3, -3, -4, -4, -4, -5, -5, -5, -6, -6, -7, -7, -7, -8, -8,
-9, -9, -10, -10, -11, -11, -12, -12, -13, -13, -14, -14, -15, -15,
-16, -16, -17, -17, -18, -19, -19, -20, -20, -21, -22, -22, -23, -24,
-24, -25, -25, -26, -27, -27, -28, -29, -29, -30, -31, -31, -32, -33,
-33, -34, -35, -35, -36, -37, -37, -38, -39, -39, -40, -41, -41, -42,
-43, -43, -44, -45, -45, -46, -46, -47, -48, -48, -49, -50, -50, -51,
-52, -52, -53, -53, -54, -55, -55, -56, -56, -57, -58, -58, -59, -59,
-60, -60, -61, -61, -62, -62, -63, -64, -64, -64, -65, -65, -66, -66,
-67, -67, -68, -68, -68, -69, -69, -70, -70, -70, -71, -71, -71, -72,
-72, -72, -73, -73, -73, -73, -74, -74, -74, -74, -74, -75, -75, -75,
-75, -75, -75, -75, -75, -75, -75, -75, -75, -75, -75, -75, -75, -75,
-75, -75, -75, -75, -74, -74, -74, -74, -74, -73, -73, -73, -73, -72,
-72, -72, -71, -71, -70, -70, -69, -69, -68, -68, -67, -67, -66, -66,
-65, -64, -64, -63, -62, -61, -61, -60, -59, -58, -57, -56, -56, -55,
-54, -53, -52, -51, -50, -49, -47, -46, -45, -44, -43, -41, -40, -39,
-38, -36, -35, -33, -32, -31, -29, -28, -26, -25, -23, -21, -20, -18,
-16, -15, -13, -11, -9, -7, -5, -3, -1
}, {
0, 2, 4, 6, 8, 10, 13, 15, 17, 20, 23, 25, 28, 31, 33, 36, 39, 42, 45,
48, 51, 54, 58, 61, 64, 68, 71, 74, 78, 82, 85, 89, 93, 96, 100, 104,
108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 162,
166, 171, 175, 180, 184, 189, 193, 198, 203, 207, 212, 217, 222, 227,
232, 236, 241, 246, 251, 256, 261, 266, 271, 276, 282, 287, 292, 297,
302, 307, 313, 318, 323, 328, 334, 339, 344, 350, 355, 360, 366, 371,
377, 382, 388, 393, 399, 404, 409, 415, 420, 426, 431, 437, 443, 448,
454, 459, 465, 470, 476, 481, 487, 492, 498, 504, 509, 515, 520, 526,
531, 537, 542, 548, 553, 559, 564, 570, 576, 581, 586, 592, 597, 603,
608, 614, 619, 625, 630, 635, 641, 646, 651, 657, 662, 667, 673, 678,
683, 688, 694, 699, 704, 709, 714, 719, 724, 729, 735, 740, 745, 749,
754, 759, 764, 769, 774, 779, 783, 788, 793, 797, 802, 807, 811, 816,
820, 825, 829, 834, 838, 842, 847, 851, 855, 859, 863, 868, 872, 876,
880, 884, 888, 891, 895, 899, 903, 906, 910, 914, 917, 921, 924, 927,
931, 934, 937, 940, 944, 947, 950, 953, 956, 959, 961, 964, 967, 970,
972, 975, 977, 980, 982, 984, 987, 989, 991, 993, 995, 997, 999, 1001,
1002, 1004, 1006, 1007, 1009, 1010, 1011, 1013, 1014, 1015, 1016, 1017,
1018, 1019, 1020, 1020, 1021, 1022, 1022, 1023, 1023, 1023, 1023, 1023
}, {
1024, 1023, 1023, 1023, 1023, 1023, 1022, 1022, 1021, 1020, 1020, 1019,
1018, 1017, 1016, 1015, 1014, 1013, 1011, 1010, 1009, 1007, 1006, 1004,
1002, 1001, 999, 997, 995, 993, 991, 989, 987, 984, 982, 980, 977, 975,
972, 970, 967, 964, 961, 959, 956, 953, 950, 947, 944, 940, 937, 934,
931, 927, 924, 921, 917, 914, 910, 906, 903, 899, 895, 891, 888, 884,
880, 876, 872, 868, 863, 859, 855, 851, 847, 842, 838, 834, 829, 825,
820, 816, 811, 807, 802, 797, 793, 788, 783, 779, 774, 769, 764, 759,
754, 749, 745, 740, 735, 729, 724, 719, 714, 709, 704, 699, 694, 688,
683, 678, 673, 667, 662, 657, 651, 646, 641, 635, 630, 625, 619, 614,
608, 603, 597, 592, 586, 581, 576, 570, 564, 559, 553, 548, 542, 537,
531, 526, 520, 515, 509, 504, 498, 492, 487, 481, 476, 470, 465, 459,
454, 448, 443, 437, 431, 426, 420, 415, 409, 404, 399, 393, 388, 382,
377, 371, 366, 360, 355, 350, 344, 339, 334, 328, 323, 318, 313, 307,
302, 297, 292, 287, 282, 276, 271, 266, 261, 256, 251, 246, 241, 236,
232, 227, 222, 217, 212, 207, 203, 198, 193, 189, 184, 180, 175, 171,
166, 162, 157, 153, 149, 144, 140, 136, 132, 128, 124, 120, 116, 112,
108, 104, 100, 96, 93, 89, 85, 82, 78, 74, 71, 68, 64, 61, 58, 54, 51,
48, 45, 42, 39, 36, 33, 31, 28, 25, 23, 20, 17, 15, 13, 10, 8, 6, 4, 2
}, {
0, -1, -3, -5, -7, -9, -11, -13, -14, -16, -19, -20, -21, -23, -24, -26,
-28, -29, -30, -32, -34, -34, -37, -38, -38, -41, -42, -42, -44, -46,
-46, -48, -49, -49, -51, -52, -53, -54, -55, -56, -57, -57, -58, -59,
-60, -60, -62, -62, -63, -63, -64, -65, -66, -66, -67, -68, -67, -69,
-69, -69, -70, -70, -71, -71, -72, -72, -72, -73, -73, -73, -73, -73,
-73, -74, -75, -74, -75, -75, -74, -75, -75, -75, -75, -75, -75, -75,
-75, -75, -75, -75, -75, -75, -75, -74, -75, -74, -75, -75, -74, -74,
-73, -73, -73, -73, -73, -73, -73, -71, -72, -71, -72, -70, -70, -70,
-69, -70, -69, -68, -68, -68, -67, -67, -66, -66, -65, -65, -64, -64,
-64, -63, -62, -62, -61, -61, -60, -60, -59, -59, -58, -58, -57, -57,
-55, -55, -55, -53, -54, -53, -52, -51, -52, -50, -50, -49, -48, -47,
-46, -46, -46, -46, -45, -43, -43, -42, -42, -41, -41, -40, -39, -39,
-38, -37, -37, -36, -35, -35, -34, -33, -32, -32, -31, -31, -31, -29,
-28, -27, -27, -27, -26, -25, -25, -24, -24, -23, -22, -22, -21, -20,
-20, -20, -18, -19, -17, -17, -16, -16, -15, -14, -14, -14, -14, -12,
-12, -12, -11, -11, -11, -10, -9, -9, -8, -8, -7, -6, -7, -7, -6, -6,
-5, -4, -5, -5, -3, -3, -4, -2, -3, -2, -1, -2, -1, -1, 0, -1, -1, 0,
-1, 0, 1, 0, 0, 0, 0, 0, 0, 0
} };
const struct imgu_css_xnr3_vmem_defaults imgu_css_xnr3_vmem_defaults = {
.x = {
1024, 1164, 1320, 1492, 1680, 1884, 2108, 2352,
2616, 2900, 3208, 3540, 3896, 4276, 4684, 5120
},
.a = {
-7213, -5580, -4371, -3421, -2722, -2159, -6950, -5585,
-4529, -3697, -3010, -2485, -2070, -1727, -1428, 0
},
.b = {
4096, 3603, 3178, 2811, 2497, 2226, 1990, 1783,
1603, 1446, 1307, 1185, 1077, 981, 895, 819
},
.c = {
1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
},
};
/* settings for Bayer Noise Reduction */
const struct ipu3_uapi_bnr_static_config imgu_css_bnr_defaults = {
{ 16, 16, 16, 16 }, /* wb_gains */
{ 16, 16, 16, 16 }, /* wb_gains_thr */
{ 0, X, 8, 6, X, 14 }, /* thr_coeffs */
{ 0, 0, 0, 0 }, /* thr_ctrl_shd */
{ -128, X, -128, X }, /* opt_center */
{ /* lut */
{ 17, 23, 28, 32, 36, 39, 42, 45,
48, 51, 53, 55, 58, 60, 62, 64,
66, 68, 70, 72, 73, 75, 77, 78,
80, 82, 83, 85, 86, 88, 89, 90 }
},
{ 4, X, 1, 8, X, 8, X, 8, X }, /* bp_ctrl */
{ 8, 4, 4, X, 8, X, 1, 1, 1, 1 }, /* dn_detect_ctrl */
};
const struct ipu3_uapi_dm_config imgu_css_dm_defaults = {
1, 1, 1, X, X, 8, X, 7, X, 8, X, 8, X, 4, X
};
const struct ipu3_uapi_ccm_mat_config imgu_css_ccm_defaults = {
9775, -2671, 1087, 0,
-1071, 8303, 815, 0,
-23, -7887, 16103, 0
};
/* settings for Gamma correction */
const struct ipu3_uapi_gamma_corr_lut imgu_css_gamma_lut = { {
63, 79, 95, 111, 127, 143, 159, 175, 191, 207, 223, 239, 255, 271, 287,
303, 319, 335, 351, 367, 383, 399, 415, 431, 447, 463, 479, 495, 511,
527, 543, 559, 575, 591, 607, 623, 639, 655, 671, 687, 703, 719, 735,
751, 767, 783, 799, 815, 831, 847, 863, 879, 895, 911, 927, 943, 959,
975, 991, 1007, 1023, 1039, 1055, 1071, 1087, 1103, 1119, 1135, 1151,
1167, 1183, 1199, 1215, 1231, 1247, 1263, 1279, 1295, 1311, 1327, 1343,
1359, 1375, 1391, 1407, 1423, 1439, 1455, 1471, 1487, 1503, 1519, 1535,
1551, 1567, 1583, 1599, 1615, 1631, 1647, 1663, 1679, 1695, 1711, 1727,
1743, 1759, 1775, 1791, 1807, 1823, 1839, 1855, 1871, 1887, 1903, 1919,
1935, 1951, 1967, 1983, 1999, 2015, 2031, 2047, 2063, 2079, 2095, 2111,
2143, 2175, 2207, 2239, 2271, 2303, 2335, 2367, 2399, 2431, 2463, 2495,
2527, 2559, 2591, 2623, 2655, 2687, 2719, 2751, 2783, 2815, 2847, 2879,
2911, 2943, 2975, 3007, 3039, 3071, 3103, 3135, 3167, 3199, 3231, 3263,
3295, 3327, 3359, 3391, 3423, 3455, 3487, 3519, 3551, 3583, 3615, 3647,
3679, 3711, 3743, 3775, 3807, 3839, 3871, 3903, 3935, 3967, 3999, 4031,
4063, 4095, 4127, 4159, 4223, 4287, 4351, 4415, 4479, 4543, 4607, 4671,
4735, 4799, 4863, 4927, 4991, 5055, 5119, 5183, 5247, 5311, 5375, 5439,
5503, 5567, 5631, 5695, 5759, 5823, 5887, 5951, 6015, 6079, 6143, 6207,
6271, 6335, 6399, 6463, 6527, 6591, 6655, 6719, 6783, 6847, 6911, 6975,
7039, 7103, 7167, 7231, 7295, 7359, 7423, 7487, 7551, 7615, 7679, 7743,
7807, 7871, 7935, 7999, 8063, 8127, 8191
} };
const struct ipu3_uapi_csc_mat_config imgu_css_csc_defaults = {
4898, 9617, 1867, 0,
-2410, -4732, 7143, 0,
10076, -8437, -1638, 0
};
const struct ipu3_uapi_cds_params imgu_css_cds_defaults = {
1, 3, 3, 1,
1, 3, 3, 1,
4, X, /* ds_nf */
1, /* csc_en */
0, X /* uv_bin_output */
};
const struct ipu3_uapi_shd_config_static imgu_css_shd_defaults = {
.grid = {
.width = 73,
.height = 55,
.block_width_log2 = 7,
.block_height_log2 = 7,
.x_start = 0,
.y_start = 0,
},
.general = {
.shd_enable = 1,
.gain_factor = 0,
},
.black_level = {
.bl_r = 0,
.bl_gr = 0 | (0 << IPU3_UAPI_SHD_BLGR_NF_SHIFT),
.bl_gb = 0,
.bl_b = 0,
},
};
const struct ipu3_uapi_yuvp1_iefd_config imgu_css_iefd_defaults = {
.units = {
.cu_1 = { 0, 150, 7, 0 },
.cu_ed = { 7, 110, 244, X, 307, 409, 511, X,
184, 255, 255, X, 0, 0, X,
7, 81, 255, X, 255, 255, X },
.cu_3 = { 148, 251, 10, 0 },
.cu_5 = { 25, 70, 501, X, 32, X },
.cu_6 = { 32, 63, 183, X, 397,
33, 0, X, 0,
0, 64, X, 64, X },
.cu_7 = { 200, 303,
10, 0 },
.cu_unsharp = { 10, 64, 110, X, 511,
66, 12, X, 0,
0, 56, X, 64, X },
.cu_radial = { 6, 203, 255, 255, 255, 255, X,
84, 444, 397, 288, 300, X,
4, 69, 207, X, 369, 448, X },
.cu_vssnlm = { 61, 100, 25, 0}
},
.config = { 45, X, 0, X, 16, X, 45, X },
.control = { 1, 1, 1, 1, 1, X },
.sharp = { { 50, X, 511, X, 50, X, 50, X },
{ 64, X, 0, X, 0, X},
{ 56, X, 56, X } },
.unsharp = { { 36, 17, 8, X },
{ 13, 7, 3, X } },
.rad = { { -2104, X, -1559, X },
{ 4426816, X },
{ 2430481, X },
{ 6, X, 79, X },
{ 64, 0, 0, X },
{ 1, X, 2, X, 0, X, 0, X },
{ 40, X, 62, X } },
.vsslnm = { { 16, 32, 64, X },
{ 1, X, 2, X, 8, X } },
};
const struct ipu3_uapi_yuvp1_yds_config imgu_css_yds_defaults = {
0, 1, 1, 0, 0, 1, 1, 0, 2, X, 0, X
};
const struct ipu3_uapi_yuvp1_chnr_config imgu_css_chnr_defaults = {
.coring = { 0, X, 0, X },
.sense_gain = { 6, 6, 6, X, 4, 4, 4, X },
.iir_fir = { 8, X, 12, X, 0, 256 - 127, X },
};
const struct ipu3_uapi_yuvp1_y_ee_nr_config imgu_css_y_ee_nr_defaults = {
.lpf = { 4, X, 8, X, 16, X, 0 },
.sense = { 8191, X, 0, X, 8191, X, 0, X },
.gain = { 8, X, 0, X, 8, X, 0, X },
.clip = { 8, X, 0, X, 8, X, 0, X },
.frng = { 2, X, 200, X, 2, X, 1, 1, X },
.diag = { 1, X, 4, 1, 1, 4, X },
.fc_coring = { 0, X, 0, X, 0, X, 0, X }
};
const struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config
imgu_css_tcc_gain_pcwl_lut = { {
1024, 1032, 1040, 1048, 1057, 1065, 1073, 1081, 1089, 1097, 1105, 1113,
1122, 1130, 1138, 1146, 1154, 1162, 1170, 1178, 1187, 1195, 1203, 1211,
1219, 1227, 1235, 1243, 1252, 1260, 1268, 1276, 1284, 1292, 1300, 1308,
1317, 1325, 1333, 1341, 1349, 1357, 1365, 1373, 1382, 1390, 1398, 1406,
1414, 1422, 1430, 1438, 1447, 1455, 1463, 1471, 1479, 1487, 1495, 1503,
1512, 1520, 1528, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
1536, 1536, 1528, 1520, 1512, 1503, 1495, 1487, 1479, 1471, 1463, 1455,
1447, 1438, 1430, 1422, 1414, 1406, 1398, 1390, 1382, 1373, 1365, 1357,
1349, 1341, 1333, 1325, 1317, 1308, 1300, 1292, 1284, 1276, 1268, 1260,
1252, 1243, 1235, 1227, 1219, 1211, 1203, 1195, 1187, 1178, 1170, 1162,
1154, 1146, 1138, 1130, 1122, 1113, 1105, 1097, 1089, 1081, 1073, 1065,
1057, 1048, 1040, 1032, 1024
} };
const struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config
imgu_css_tcc_r_sqr_lut = { {
32, 44, 64, 92, 128, 180, 256, 364, 512, 628, 724, 808, 888,
956, 1024, 1088, 1144, 1200, 1256, 1304, 1356, 1404, 1448
} };
const struct imgu_abi_anr_config imgu_css_anr_defaults = {
.transform = {
.adaptive_treshhold_en = 1,
.alpha = { { 13, 13, 13, 13, 0, 0, 0, 0},
{ 11, 11, 11, 11, 0, 0, 0, 0},
{ 14, 14, 14, 14, 0, 0, 0, 0} },
.beta = { { 24, 24, 24, 24},
{ 21, 20, 20, 21},
{ 25, 25, 25, 25} },
.color = { { { 166, 173, 149, 166, 161, 146, 145, 173,
145, 150, 141, 149, 145, 141, 142 },
{ 166, 173, 149, 165, 161, 145, 145, 173,
145, 150, 141, 149, 145, 141, 142 },
{ 166, 174, 149, 166, 162, 146, 146, 173,
145, 150, 141, 149, 145, 141, 142 },
{ 166, 173, 149, 165, 161, 145, 145, 173,
146, 150, 141, 149, 145, 141, 142 } },
{ { 141, 131, 140, 141, 144, 143, 144, 131,
143, 137, 140, 140, 144, 140, 141 },
{ 141, 131, 140, 141, 143, 143, 144, 131,
143, 137, 140, 140, 144, 140, 141 },
{ 141, 131, 141, 141, 144, 144, 144, 131,
143, 137, 140, 140, 144, 140, 141 },
{ 140, 131, 140, 141, 143, 143, 144, 131,
143, 137, 140, 140, 144, 140, 141 } },
{ { 184, 173, 188, 184, 182, 182, 181, 173,
182, 179, 182, 188, 181, 182, 180 },
{ 184, 173, 188, 184, 183, 182, 181, 173,
182, 178, 182, 188, 181, 182, 180 },
{ 184, 173, 188, 184, 182, 182, 181, 173,
182, 178, 182, 188, 181, 182, 181 },
{ 184, 172, 188, 184, 182, 182, 181, 173,
182, 178, 182, 188, 182, 182, 180 } } },
.sqrt_lut = { 724, 768, 810, 849, 887, 923, 958, 991, 1024,
1056, 1086, 1116, 1145, 1173, 1201, 1228, 1254,
1280, 1305, 1330, 1355, 1379, 1402, 1425, 1448 },
.xreset = -1632,
.yreset = -1224,
.x_sqr_reset = 2663424,
.r_normfactor = 14,
.y_sqr_reset = 1498176,
.gain_scale = 115
},
.stitch = {
.anr_stitch_en = 1,
.pyramid = { { 1, 3, 5 }, { 7, 7, 5 }, { 3, 1, 3 },
{ 9, 15, 21 }, { 21, 15, 9 }, { 3, 5, 15 },
{ 25, 35, 35 }, { 25, 15, 5 }, { 7, 21, 35 },
{ 49, 49, 35 }, { 21, 7, 7 }, { 21, 35, 49 },
{ 49, 35, 21 }, { 7, 5, 15 }, { 25, 35, 35 },
{ 25, 15, 5 }, { 3, 9, 15 }, { 21, 21, 15 },
{ 9, 3, 1 }, { 3, 5, 7 }, { 7, 5, 3}, { 1 }
}
}
};
/* frame settings for Auto White Balance */
const struct ipu3_uapi_awb_fr_config_s imgu_css_awb_fr_defaults = {
.grid_cfg = {
.width = 16,
.height = 16,
.block_width_log2 = 3,
.block_height_log2 = 3,
.x_start = 10,
.y_start = 2 | IPU3_UAPI_GRID_Y_START_EN,
},
.bayer_coeff = { 0, 0, 0, 0, 0, 128 },
.bayer_sign = 0,
.bayer_nf = 7
};
/* settings for Auto Exposure */
const struct ipu3_uapi_ae_grid_config imgu_css_ae_grid_defaults = {
.width = 16,
.height = 16,
.block_width_log2 = 3,
.block_height_log2 = 3,
.ae_en = 1,
.x_start = 0,
.y_start = 0,
};
/* settings for Auto Exposure color correction matrix */
const struct ipu3_uapi_ae_ccm imgu_css_ae_ccm_defaults = {
256, 256, 256, 256, /* gain_gr/r/b/gb */
.mat = { 128, 0, 0, 0, 0, 128, 0, 0, 0, 0, 128, 0, 0, 0, 0, 128 },
};
/* settings for Auto Focus */
const struct ipu3_uapi_af_config_s imgu_css_af_defaults = {
.filter_config = {
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 128 }, 0,
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 128 }, 0,
.y_calc = { 8, 8, 8, 8 },
.nf = { X, 7, X, 7 },
},
.grid_cfg = {
.width = 16,
.height = 16,
.block_width_log2 = 3,
.block_height_log2 = 3,
.x_start = 10,
.y_start = 2 | IPU3_UAPI_GRID_Y_START_EN,
},
};
/* settings for Auto White Balance */
const struct ipu3_uapi_awb_config_s imgu_css_awb_defaults = {
8191, 8191, 8191, 8191 | /* rgbs_thr_gr/r/gb/b */
IPU3_UAPI_AWB_RGBS_THR_B_EN | IPU3_UAPI_AWB_RGBS_THR_B_INCL_SAT,
.grid = {
.width = 16,
.height = 16,
.block_width_log2 = 3,
.block_height_log2 = 3,
.x_start = 0,
.y_start = 0,
},
};
| linux-master | drivers/staging/media/ipu3/ipu3-tables.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Intel Corporation
#include <linux/device.h>
#include "ipu3-css.h"
#include "ipu3-css-fw.h"
#include "ipu3-tables.h"
#include "ipu3-css-params.h"
#define DIV_ROUND_CLOSEST_DOWN(a, b) (((a) + ((b) / 2) - 1) / (b))
#define roundclosest_down(a, b) (DIV_ROUND_CLOSEST_DOWN(a, b) * (b))
#define IPU3_UAPI_ANR_MAX_RESET ((1 << 12) - 1)
#define IPU3_UAPI_ANR_MIN_RESET (((-1) << 12) + 1)
struct imgu_css_scaler_info {
unsigned int phase_step; /* Same for luma/chroma */
int exp_shift;
unsigned int phase_init; /* luma/chroma dependent */
int pad_left;
int pad_right;
int crop_left;
int crop_top;
};
static unsigned int imgu_css_scaler_get_exp(unsigned int counter,
unsigned int divider)
{
int i = fls(divider) - fls(counter);
if (i <= 0)
return 0;
if (divider >> i < counter)
i = i - 1;
return i;
}
/* Set up the CSS scaler look up table */
static void
imgu_css_scaler_setup_lut(unsigned int taps, unsigned int input_width,
unsigned int output_width, int phase_step_correction,
const int *coeffs, unsigned int coeffs_size,
s8 coeff_lut[], struct imgu_css_scaler_info *info)
{
int tap, phase, phase_sum_left, phase_sum_right;
int exponent = imgu_css_scaler_get_exp(output_width, input_width);
int mantissa = (1 << exponent) * output_width;
unsigned int phase_step, phase_taps;
if (input_width == output_width) {
for (phase = 0; phase < IMGU_SCALER_PHASES; phase++) {
phase_taps = phase * IMGU_SCALER_FILTER_TAPS;
for (tap = 0; tap < taps; tap++)
coeff_lut[phase_taps + tap] = 0;
}
info->phase_step = IMGU_SCALER_PHASES *
(1 << IMGU_SCALER_PHASE_COUNTER_PREC_REF);
info->exp_shift = 0;
info->pad_left = 0;
info->pad_right = 0;
info->phase_init = 0;
info->crop_left = 0;
info->crop_top = 0;
return;
}
for (phase = 0; phase < IMGU_SCALER_PHASES; phase++) {
phase_taps = phase * IMGU_SCALER_FILTER_TAPS;
for (tap = 0; tap < taps; tap++) {
/* flip table to for convolution reverse indexing */
s64 coeff = coeffs[coeffs_size -
((tap * (coeffs_size / taps)) + phase) - 1];
coeff *= mantissa;
coeff = div64_long(coeff, input_width);
/* Add +"0.5" */
coeff += 1 << (IMGU_SCALER_COEFF_BITS - 1);
coeff >>= IMGU_SCALER_COEFF_BITS;
coeff_lut[phase_taps + tap] = coeff;
}
}
phase_step = IMGU_SCALER_PHASES *
(1 << IMGU_SCALER_PHASE_COUNTER_PREC_REF) *
output_width / input_width;
phase_step += phase_step_correction;
phase_sum_left = (taps / 2 * IMGU_SCALER_PHASES *
(1 << IMGU_SCALER_PHASE_COUNTER_PREC_REF)) -
(1 << (IMGU_SCALER_PHASE_COUNTER_PREC_REF - 1));
phase_sum_right = (taps / 2 * IMGU_SCALER_PHASES *
(1 << IMGU_SCALER_PHASE_COUNTER_PREC_REF)) +
(1 << (IMGU_SCALER_PHASE_COUNTER_PREC_REF - 1));
info->exp_shift = IMGU_SCALER_MAX_EXPONENT_SHIFT - exponent;
info->pad_left = (phase_sum_left % phase_step == 0) ?
phase_sum_left / phase_step - 1 : phase_sum_left / phase_step;
info->pad_right = (phase_sum_right % phase_step == 0) ?
phase_sum_right / phase_step - 1 : phase_sum_right / phase_step;
info->phase_init = phase_sum_left - phase_step * info->pad_left;
info->phase_step = phase_step;
info->crop_left = taps - 1;
info->crop_top = taps - 1;
}
/*
* Calculates the exact output image width/height, based on phase_step setting
* (must be perfectly aligned with hardware).
*/
static unsigned int
imgu_css_scaler_calc_scaled_output(unsigned int input,
struct imgu_css_scaler_info *info)
{
unsigned int arg1 = input * info->phase_step +
(1 - IMGU_SCALER_TAPS_Y / 2) * IMGU_SCALER_FIR_PHASES -
IMGU_SCALER_FIR_PHASES / (2 * IMGU_SCALER_PHASES);
unsigned int arg2 = ((IMGU_SCALER_TAPS_Y / 2) * IMGU_SCALER_FIR_PHASES +
IMGU_SCALER_FIR_PHASES / (2 * IMGU_SCALER_PHASES)) *
IMGU_SCALER_FIR_PHASES + info->phase_step / 2;
return ((arg1 + (arg2 - IMGU_SCALER_FIR_PHASES * info->phase_step) /
IMGU_SCALER_FIR_PHASES) / (2 * IMGU_SCALER_FIR_PHASES)) * 2;
}
/*
* Calculate the output width and height, given the luma
* and chroma details of a scaler
*/
static void
imgu_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
u32 target_height, struct imgu_abi_osys_config *cfg,
struct imgu_css_scaler_info *info_luma,
struct imgu_css_scaler_info *info_chroma,
unsigned int *output_width, unsigned int *output_height,
unsigned int *procmode)
{
u32 out_width = target_width;
u32 out_height = target_height;
const unsigned int height_alignment = 2;
int phase_step_correction = -1;
/*
* Calculate scaled output width. If the horizontal and vertical scaling
* factor is different, then choose the biggest and crop off excess
* lines or columns after formatting.
*/
if (target_height * input_width > target_width * input_height)
target_width = DIV_ROUND_UP(target_height * input_width,
input_height);
if (input_width == target_width)
*procmode = IMGU_ABI_OSYS_PROCMODE_BYPASS;
else
*procmode = IMGU_ABI_OSYS_PROCMODE_DOWNSCALE;
memset(&cfg->scaler_coeffs_chroma, 0,
sizeof(cfg->scaler_coeffs_chroma));
memset(&cfg->scaler_coeffs_luma, 0, sizeof(cfg->scaler_coeffs_luma));
do {
phase_step_correction++;
imgu_css_scaler_setup_lut(IMGU_SCALER_TAPS_Y,
input_width, target_width,
phase_step_correction,
imgu_css_downscale_4taps,
IMGU_SCALER_DOWNSCALE_4TAPS_LEN,
cfg->scaler_coeffs_luma, info_luma);
imgu_css_scaler_setup_lut(IMGU_SCALER_TAPS_UV,
input_width, target_width,
phase_step_correction,
imgu_css_downscale_2taps,
IMGU_SCALER_DOWNSCALE_2TAPS_LEN,
cfg->scaler_coeffs_chroma,
info_chroma);
out_width = imgu_css_scaler_calc_scaled_output(input_width,
info_luma);
out_height = imgu_css_scaler_calc_scaled_output(input_height,
info_luma);
} while ((out_width < target_width || out_height < target_height ||
!IS_ALIGNED(out_height, height_alignment)) &&
phase_step_correction <= 5);
*output_width = out_width;
*output_height = out_height;
}
/********************** Osys routines for scaler****************************/
static void imgu_css_osys_set_format(enum imgu_abi_frame_format host_format,
unsigned int *osys_format,
unsigned int *osys_tiling)
{
*osys_format = IMGU_ABI_OSYS_FORMAT_YUV420;
*osys_tiling = IMGU_ABI_OSYS_TILING_NONE;
switch (host_format) {
case IMGU_ABI_FRAME_FORMAT_YUV420:
*osys_format = IMGU_ABI_OSYS_FORMAT_YUV420;
break;
case IMGU_ABI_FRAME_FORMAT_YV12:
*osys_format = IMGU_ABI_OSYS_FORMAT_YV12;
break;
case IMGU_ABI_FRAME_FORMAT_NV12:
*osys_format = IMGU_ABI_OSYS_FORMAT_NV12;
break;
case IMGU_ABI_FRAME_FORMAT_NV16:
*osys_format = IMGU_ABI_OSYS_FORMAT_NV16;
break;
case IMGU_ABI_FRAME_FORMAT_NV21:
*osys_format = IMGU_ABI_OSYS_FORMAT_NV21;
break;
case IMGU_ABI_FRAME_FORMAT_NV12_TILEY:
*osys_format = IMGU_ABI_OSYS_FORMAT_NV12;
*osys_tiling = IMGU_ABI_OSYS_TILING_Y;
break;
default:
/* For now, assume use default values */
break;
}
}
/*
* Function calculates input frame stripe offset, based
* on output frame stripe offset and filter parameters.
*/
static int imgu_css_osys_calc_stripe_offset(int stripe_offset_out,
int fir_phases, int phase_init,
int phase_step, int pad_left)
{
int stripe_offset_inp = stripe_offset_out * fir_phases -
pad_left * phase_step;
return DIV_ROUND_UP(stripe_offset_inp - phase_init, phase_step);
}
/*
* Calculate input frame phase, given the output frame
* stripe offset and filter parameters
*/
static int imgu_css_osys_calc_stripe_phase_init(int stripe_offset_out,
int fir_phases, int phase_init,
int phase_step, int pad_left)
{
int stripe_offset_inp =
imgu_css_osys_calc_stripe_offset(stripe_offset_out,
fir_phases, phase_init,
phase_step, pad_left);
return phase_init + ((pad_left + stripe_offset_inp) * phase_step) -
stripe_offset_out * fir_phases;
}
/*
* This function calculates input frame stripe width,
* based on output frame stripe offset and filter parameters
*/
static int imgu_css_osys_calc_inp_stripe_width(int stripe_width_out,
int fir_phases, int phase_init,
int phase_step, int fir_taps,
int pad_left, int pad_right)
{
int stripe_width_inp = (stripe_width_out + fir_taps - 1) * fir_phases;
stripe_width_inp = DIV_ROUND_UP(stripe_width_inp - phase_init,
phase_step);
return stripe_width_inp - pad_left - pad_right;
}
/*
* This function calculates output frame stripe width, basedi
* on output frame stripe offset and filter parameters
*/
static int imgu_css_osys_out_stripe_width(int stripe_width_inp, int fir_phases,
int phase_init, int phase_step,
int fir_taps, int pad_left,
int pad_right, int column_offset)
{
int stripe_width_out = (pad_left + stripe_width_inp +
pad_right - column_offset) * phase_step;
stripe_width_out = (stripe_width_out + phase_init) / fir_phases;
return stripe_width_out - (fir_taps - 1);
}
struct imgu_css_reso {
unsigned int input_width;
unsigned int input_height;
enum imgu_abi_frame_format input_format;
unsigned int pin_width[IMGU_ABI_OSYS_PINS];
unsigned int pin_height[IMGU_ABI_OSYS_PINS];
unsigned int pin_stride[IMGU_ABI_OSYS_PINS];
enum imgu_abi_frame_format pin_format[IMGU_ABI_OSYS_PINS];
int chunk_width;
int chunk_height;
int block_height;
int block_width;
};
struct imgu_css_frame_params {
/* Output pins */
unsigned int enable;
unsigned int format;
unsigned int flip;
unsigned int mirror;
unsigned int tiling;
unsigned int reduce_range;
unsigned int width;
unsigned int height;
unsigned int stride;
unsigned int scaled;
unsigned int crop_left;
unsigned int crop_top;
};
struct imgu_css_stripe_params {
unsigned int processing_mode;
unsigned int phase_step;
unsigned int exp_shift;
unsigned int phase_init_left_y;
unsigned int phase_init_left_uv;
unsigned int phase_init_top_y;
unsigned int phase_init_top_uv;
unsigned int pad_left_y;
unsigned int pad_left_uv;
unsigned int pad_right_y;
unsigned int pad_right_uv;
unsigned int pad_top_y;
unsigned int pad_top_uv;
unsigned int pad_bottom_y;
unsigned int pad_bottom_uv;
unsigned int crop_left_y;
unsigned int crop_top_y;
unsigned int crop_left_uv;
unsigned int crop_top_uv;
unsigned int start_column_y;
unsigned int start_column_uv;
unsigned int chunk_width;
unsigned int chunk_height;
unsigned int block_width;
unsigned int block_height;
unsigned int input_width;
unsigned int input_height;
int output_width[IMGU_ABI_OSYS_PINS];
int output_height[IMGU_ABI_OSYS_PINS];
int output_offset[IMGU_ABI_OSYS_PINS];
};
/*
* frame_params - size IMGU_ABI_OSYS_PINS
* stripe_params - size IPU3_UAPI_MAX_STRIPES
*/
static int imgu_css_osys_calc_frame_and_stripe_params(
struct imgu_css *css, unsigned int stripes,
struct imgu_abi_osys_config *osys,
struct imgu_css_scaler_info *scaler_luma,
struct imgu_css_scaler_info *scaler_chroma,
struct imgu_css_frame_params frame_params[],
struct imgu_css_stripe_params stripe_params[],
unsigned int pipe)
{
struct imgu_css_reso reso;
unsigned int output_width, pin, s;
u32 input_width, input_height, target_width, target_height;
unsigned int procmode = 0;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
input_width = css_pipe->rect[IPU3_CSS_RECT_GDC].width;
input_height = css_pipe->rect[IPU3_CSS_RECT_GDC].height;
target_width = css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
target_height = css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
/* Frame parameters */
/* Input width for Output System is output width of DVS (with GDC) */
reso.input_width = css_pipe->rect[IPU3_CSS_RECT_GDC].width;
/* Input height for Output System is output height of DVS (with GDC) */
reso.input_height = css_pipe->rect[IPU3_CSS_RECT_GDC].height;
reso.input_format =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
reso.pin_width[IMGU_ABI_OSYS_PIN_OUT] =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
reso.pin_height[IMGU_ABI_OSYS_PIN_OUT] =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
reso.pin_stride[IMGU_ABI_OSYS_PIN_OUT] =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
reso.pin_format[IMGU_ABI_OSYS_PIN_OUT] =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
reso.pin_width[IMGU_ABI_OSYS_PIN_VF] =
css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
reso.pin_height[IMGU_ABI_OSYS_PIN_VF] =
css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
reso.pin_stride[IMGU_ABI_OSYS_PIN_VF] =
css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
reso.pin_format[IMGU_ABI_OSYS_PIN_VF] =
css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
/* Configure the frame parameters for all output pins */
frame_params[IMGU_ABI_OSYS_PIN_OUT].width =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
frame_params[IMGU_ABI_OSYS_PIN_OUT].height =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
frame_params[IMGU_ABI_OSYS_PIN_VF].width =
css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
frame_params[IMGU_ABI_OSYS_PIN_VF].height =
css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
frame_params[IMGU_ABI_OSYS_PIN_VF].crop_top = 0;
frame_params[IMGU_ABI_OSYS_PIN_VF].crop_left = 0;
for (pin = 0; pin < IMGU_ABI_OSYS_PINS; pin++) {
int enable = 0;
int scaled = 0;
unsigned int format = 0;
unsigned int tiling = 0;
frame_params[pin].flip = 0;
frame_params[pin].mirror = 0;
frame_params[pin].reduce_range = 0;
if (reso.pin_width[pin] != 0 && reso.pin_height[pin] != 0) {
enable = 1;
if (pin == IMGU_ABI_OSYS_PIN_OUT) {
if (reso.input_width < reso.pin_width[pin] ||
reso.input_height < reso.pin_height[pin])
return -EINVAL;
/*
* When input and output resolution is
* different instead of scaling, cropping
* should happen. Determine the crop factor
* to do the symmetric cropping
*/
frame_params[pin].crop_left = roundclosest_down(
(reso.input_width -
reso.pin_width[pin]) / 2,
IMGU_OSYS_DMA_CROP_W_LIMIT);
frame_params[pin].crop_top = roundclosest_down(
(reso.input_height -
reso.pin_height[pin]) / 2,
IMGU_OSYS_DMA_CROP_H_LIMIT);
} else {
if (reso.pin_width[pin] != reso.input_width ||
reso.pin_height[pin] != reso.input_height) {
/*
* If resolution is different at input
* and output of OSYS, scaling is
* considered except when pin is MAIN.
* Later it will be decide whether
* scaler factor is 1 or other
* and cropping has to be done or not.
*/
scaled = 1;
}
}
imgu_css_osys_set_format(reso.pin_format[pin], &format,
&tiling);
} else {
enable = 0;
}
frame_params[pin].enable = enable;
frame_params[pin].format = format;
frame_params[pin].tiling = tiling;
frame_params[pin].stride = reso.pin_stride[pin];
frame_params[pin].scaled = scaled;
}
imgu_css_scaler_calc(input_width, input_height, target_width,
target_height, osys, scaler_luma, scaler_chroma,
&reso.pin_width[IMGU_ABI_OSYS_PIN_VF],
&reso.pin_height[IMGU_ABI_OSYS_PIN_VF], &procmode);
dev_dbg(css->dev, "osys scaler procmode is %u", procmode);
output_width = reso.pin_width[IMGU_ABI_OSYS_PIN_VF];
if (output_width < reso.input_width / 2) {
/* Scaling factor <= 0.5 */
reso.chunk_width = IMGU_OSYS_BLOCK_WIDTH;
reso.block_width = IMGU_OSYS_BLOCK_WIDTH;
} else { /* 0.5 <= Scaling factor <= 1.0 */
reso.chunk_width = IMGU_OSYS_BLOCK_WIDTH / 2;
reso.block_width = IMGU_OSYS_BLOCK_WIDTH;
}
if (output_width <= reso.input_width * 7 / 8) {
/* Scaling factor <= 0.875 */
reso.chunk_height = IMGU_OSYS_BLOCK_HEIGHT;
reso.block_height = IMGU_OSYS_BLOCK_HEIGHT;
} else { /* 1.0 <= Scaling factor <= 1.75 */
reso.chunk_height = IMGU_OSYS_BLOCK_HEIGHT / 2;
reso.block_height = IMGU_OSYS_BLOCK_HEIGHT;
}
/*
* Calculate scaler configuration parameters based on input and output
* resolution.
*/
if (frame_params[IMGU_ABI_OSYS_PIN_VF].enable) {
/*
* When aspect ratio is different between target resolution and
* required resolution, determine the crop factor to do
* symmetric cropping
*/
u32 w = reso.pin_width[IMGU_ABI_OSYS_PIN_VF] -
frame_params[IMGU_ABI_OSYS_PIN_VF].width;
u32 h = reso.pin_height[IMGU_ABI_OSYS_PIN_VF] -
frame_params[IMGU_ABI_OSYS_PIN_VF].height;
frame_params[IMGU_ABI_OSYS_PIN_VF].crop_left =
roundclosest_down(w / 2, IMGU_OSYS_DMA_CROP_W_LIMIT);
frame_params[IMGU_ABI_OSYS_PIN_VF].crop_top =
roundclosest_down(h / 2, IMGU_OSYS_DMA_CROP_H_LIMIT);
if (reso.input_height % 4 || reso.input_width % 8) {
dev_err(css->dev, "OSYS input width is not multiple of 8 or\n");
dev_err(css->dev, "height is not multiple of 4\n");
return -EINVAL;
}
}
/* Stripe parameters */
if (frame_params[IMGU_ABI_OSYS_PIN_VF].enable) {
output_width = reso.pin_width[IMGU_ABI_OSYS_PIN_VF];
} else {
/*
* in case scaler output is not enabled
* take output width as input width since
* there is no scaling at main pin.
* Due to the fact that main pin can be different
* from input resolution to osys in the case of cropping,
* main pin resolution is not taken.
*/
output_width = reso.input_width;
}
for (s = 0; s < stripes; s++) {
int stripe_offset_inp_y = 0;
int stripe_offset_inp_uv = 0;
int stripe_offset_out_y = 0;
int stripe_offset_out_uv = 0;
int stripe_phase_init_y = scaler_luma->phase_init;
int stripe_phase_init_uv = scaler_chroma->phase_init;
int stripe_offset_blk_y = 0;
int stripe_offset_blk_uv = 0;
int stripe_offset_col_y = 0;
int stripe_offset_col_uv = 0;
int stripe_pad_left_y = scaler_luma->pad_left;
int stripe_pad_left_uv = scaler_chroma->pad_left;
int stripe_pad_right_y = scaler_luma->pad_right;
int stripe_pad_right_uv = scaler_chroma->pad_right;
int stripe_crop_left_y = scaler_luma->crop_left;
int stripe_crop_left_uv = scaler_chroma->crop_left;
int stripe_input_width_y = reso.input_width;
int stripe_input_width_uv = 0;
int stripe_output_width_y = output_width;
int stripe_output_width_uv = 0;
int chunk_floor_y = 0;
int chunk_floor_uv = 0;
int chunk_ceil_uv = 0;
if (stripes > 1) {
if (s > 0) {
/* Calculate stripe offsets */
stripe_offset_out_y =
output_width * s / stripes;
stripe_offset_out_y =
rounddown(stripe_offset_out_y,
IPU3_UAPI_ISP_VEC_ELEMS);
stripe_offset_out_uv = stripe_offset_out_y /
IMGU_LUMA_TO_CHROMA_RATIO;
stripe_offset_inp_y =
imgu_css_osys_calc_stripe_offset(
stripe_offset_out_y,
IMGU_OSYS_FIR_PHASES,
scaler_luma->phase_init,
scaler_luma->phase_step,
scaler_luma->pad_left);
stripe_offset_inp_uv =
imgu_css_osys_calc_stripe_offset(
stripe_offset_out_uv,
IMGU_OSYS_FIR_PHASES,
scaler_chroma->phase_init,
scaler_chroma->phase_step,
scaler_chroma->pad_left);
/* Calculate stripe phase init */
stripe_phase_init_y =
imgu_css_osys_calc_stripe_phase_init(
stripe_offset_out_y,
IMGU_OSYS_FIR_PHASES,
scaler_luma->phase_init,
scaler_luma->phase_step,
scaler_luma->pad_left);
stripe_phase_init_uv =
imgu_css_osys_calc_stripe_phase_init(
stripe_offset_out_uv,
IMGU_OSYS_FIR_PHASES,
scaler_chroma->phase_init,
scaler_chroma->phase_step,
scaler_chroma->pad_left);
/*
* Chunk boundary corner case - luma and chroma
* start from different input chunks.
*/
chunk_floor_y = rounddown(stripe_offset_inp_y,
reso.chunk_width);
chunk_floor_uv =
rounddown(stripe_offset_inp_uv,
reso.chunk_width /
IMGU_LUMA_TO_CHROMA_RATIO);
if (chunk_floor_y != chunk_floor_uv *
IMGU_LUMA_TO_CHROMA_RATIO) {
/*
* Match starting luma/chroma chunks.
* Decrease offset for UV and add output
* cropping.
*/
stripe_offset_inp_uv -= 1;
stripe_crop_left_uv += 1;
stripe_phase_init_uv -=
scaler_luma->phase_step;
if (stripe_phase_init_uv < 0)
stripe_phase_init_uv =
stripe_phase_init_uv +
IMGU_OSYS_FIR_PHASES;
}
/*
* FW workaround for a HW bug: if the first
* chroma pixel is generated exactly at the end
* of chunck scaler HW may not output the pixel
* for downscale factors smaller than 1.5
* (timing issue).
*/
chunk_ceil_uv =
roundup(stripe_offset_inp_uv,
reso.chunk_width /
IMGU_LUMA_TO_CHROMA_RATIO);
if (stripe_offset_inp_uv ==
chunk_ceil_uv - IMGU_OSYS_TAPS_UV) {
/*
* Decrease input offset and add
* output cropping
*/
stripe_offset_inp_uv -= 1;
stripe_phase_init_uv -=
scaler_luma->phase_step;
if (stripe_phase_init_uv < 0) {
stripe_phase_init_uv +=
IMGU_OSYS_FIR_PHASES;
stripe_crop_left_uv += 1;
}
}
/*
* Calculate block and column offsets for the
* input stripe
*/
stripe_offset_blk_y =
rounddown(stripe_offset_inp_y,
IMGU_INPUT_BLOCK_WIDTH);
stripe_offset_blk_uv =
rounddown(stripe_offset_inp_uv,
IMGU_INPUT_BLOCK_WIDTH /
IMGU_LUMA_TO_CHROMA_RATIO);
stripe_offset_col_y = stripe_offset_inp_y -
stripe_offset_blk_y;
stripe_offset_col_uv = stripe_offset_inp_uv -
stripe_offset_blk_uv;
/* Left padding is only for the first stripe */
stripe_pad_left_y = 0;
stripe_pad_left_uv = 0;
}
/* Right padding is only for the last stripe */
if (s < stripes - 1) {
int next_offset;
stripe_pad_right_y = 0;
stripe_pad_right_uv = 0;
next_offset = output_width * (s + 1) / stripes;
next_offset = rounddown(next_offset, 64);
stripe_output_width_y = next_offset -
stripe_offset_out_y;
} else {
stripe_output_width_y = output_width -
stripe_offset_out_y;
}
/* Calculate target output stripe width */
stripe_output_width_uv = stripe_output_width_y /
IMGU_LUMA_TO_CHROMA_RATIO;
/* Calculate input stripe width */
stripe_input_width_y = stripe_offset_col_y +
imgu_css_osys_calc_inp_stripe_width(
stripe_output_width_y,
IMGU_OSYS_FIR_PHASES,
stripe_phase_init_y,
scaler_luma->phase_step,
IMGU_OSYS_TAPS_Y,
stripe_pad_left_y,
stripe_pad_right_y);
stripe_input_width_uv = stripe_offset_col_uv +
imgu_css_osys_calc_inp_stripe_width(
stripe_output_width_uv,
IMGU_OSYS_FIR_PHASES,
stripe_phase_init_uv,
scaler_chroma->phase_step,
IMGU_OSYS_TAPS_UV,
stripe_pad_left_uv,
stripe_pad_right_uv);
stripe_input_width_uv = max(DIV_ROUND_UP(
stripe_input_width_y,
IMGU_LUMA_TO_CHROMA_RATIO),
stripe_input_width_uv);
stripe_input_width_y = stripe_input_width_uv *
IMGU_LUMA_TO_CHROMA_RATIO;
if (s >= stripes - 1) {
stripe_input_width_y = reso.input_width -
stripe_offset_blk_y;
/*
* The scaler requires that the last stripe
* spans at least two input blocks.
*/
}
/*
* Spec: input stripe width must be a multiple of 8.
* Increase the input width and recalculate the output
* width. This may produce an extra column of junk
* blocks which will be overwritten by the
* next stripe.
*/
stripe_input_width_y = ALIGN(stripe_input_width_y, 8);
stripe_output_width_y =
imgu_css_osys_out_stripe_width(
stripe_input_width_y,
IMGU_OSYS_FIR_PHASES,
stripe_phase_init_y,
scaler_luma->phase_step,
IMGU_OSYS_TAPS_Y,
stripe_pad_left_y,
stripe_pad_right_y,
stripe_offset_col_y);
stripe_output_width_y =
rounddown(stripe_output_width_y,
IMGU_LUMA_TO_CHROMA_RATIO);
}
/*
* Following section executes and process parameters
* for both cases - Striping or No Striping.
*/
{
unsigned int i;
/*Input resolution */
stripe_params[s].input_width = stripe_input_width_y;
stripe_params[s].input_height = reso.input_height;
for (i = 0; i < IMGU_ABI_OSYS_PINS; i++) {
if (frame_params[i].scaled) {
/*
* Output stripe resolution and offset
* as produced by the scaler; actual
* output resolution may be slightly
* smaller.
*/
stripe_params[s].output_width[i] =
stripe_output_width_y;
stripe_params[s].output_height[i] =
reso.pin_height[i];
stripe_params[s].output_offset[i] =
stripe_offset_out_y;
} else {
/* Unscaled pin */
stripe_params[s].output_width[i] =
stripe_params[s].input_width;
stripe_params[s].output_height[i] =
stripe_params[s].input_height;
stripe_params[s].output_offset[i] =
stripe_offset_blk_y;
}
}
/* If no pin use scale, we use BYPASS mode */
stripe_params[s].processing_mode = procmode;
stripe_params[s].phase_step = scaler_luma->phase_step;
stripe_params[s].exp_shift = scaler_luma->exp_shift;
stripe_params[s].phase_init_left_y =
stripe_phase_init_y;
stripe_params[s].phase_init_left_uv =
stripe_phase_init_uv;
stripe_params[s].phase_init_top_y =
scaler_luma->phase_init;
stripe_params[s].phase_init_top_uv =
scaler_chroma->phase_init;
stripe_params[s].pad_left_y = stripe_pad_left_y;
stripe_params[s].pad_left_uv = stripe_pad_left_uv;
stripe_params[s].pad_right_y = stripe_pad_right_y;
stripe_params[s].pad_right_uv = stripe_pad_right_uv;
stripe_params[s].pad_top_y = scaler_luma->pad_left;
stripe_params[s].pad_top_uv = scaler_chroma->pad_left;
stripe_params[s].pad_bottom_y = scaler_luma->pad_right;
stripe_params[s].pad_bottom_uv =
scaler_chroma->pad_right;
stripe_params[s].crop_left_y = stripe_crop_left_y;
stripe_params[s].crop_top_y = scaler_luma->crop_top;
stripe_params[s].crop_left_uv = stripe_crop_left_uv;
stripe_params[s].crop_top_uv = scaler_chroma->crop_top;
stripe_params[s].start_column_y = stripe_offset_col_y;
stripe_params[s].start_column_uv = stripe_offset_col_uv;
stripe_params[s].chunk_width = reso.chunk_width;
stripe_params[s].chunk_height = reso.chunk_height;
stripe_params[s].block_width = reso.block_width;
stripe_params[s].block_height = reso.block_height;
}
}
return 0;
}
/*
* This function configures the Output Formatter System, given the number of
* stripes, scaler luma and chrome parameters
*/
static int imgu_css_osys_calc(struct imgu_css *css, unsigned int pipe,
unsigned int stripes,
struct imgu_abi_osys_config *osys,
struct imgu_css_scaler_info *scaler_luma,
struct imgu_css_scaler_info *scaler_chroma,
struct imgu_abi_stripes block_stripes[])
{
struct imgu_css_frame_params frame_params[IMGU_ABI_OSYS_PINS];
struct imgu_css_stripe_params stripe_params[IPU3_UAPI_MAX_STRIPES];
struct imgu_abi_osys_formatter_params *param;
unsigned int pin, s;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
memset(osys, 0, sizeof(*osys));
/* Compute the frame and stripe params */
if (imgu_css_osys_calc_frame_and_stripe_params(css, stripes, osys,
scaler_luma,
scaler_chroma,
frame_params,
stripe_params, pipe))
return -EINVAL;
/* Output formatter system parameters */
for (s = 0; s < stripes; s++) {
struct imgu_abi_osys_scaler_params *scaler =
&osys->scaler[s].param;
int fifo_addr_fmt = IMGU_FIFO_ADDR_SCALER_TO_FMT;
int fifo_addr_ack = IMGU_FIFO_ADDR_SCALER_TO_SP;
/* OUTPUT 0 / PIN 0 is only Scaler output */
scaler->inp_buf_y_st_addr = IMGU_VMEM1_INP_BUF_ADDR;
/*
* = (IMGU_OSYS_BLOCK_WIDTH / IMGU_VMEM1_ELEMS_PER_VEC)
* = (2 * IPU3_UAPI_ISP_VEC_ELEMS) /
* (IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS)
* = 2 * 64 / 32 = 4
*/
scaler->inp_buf_y_line_stride = IMGU_VMEM1_Y_STRIDE;
/*
* = (IMGU_VMEM1_V_OFFSET + VMEM1_uv_size)
* = (IMGU_VMEM1_U_OFFSET + VMEM1_uv_size) +
* (VMEM1_y_size / 4)
* = (VMEM1_y_size) + (VMEM1_y_size / 4) +
* (IMGU_OSYS_BLOCK_HEIGHT * IMGU_VMEM1_Y_STRIDE)/4
* = (IMGU_OSYS_BLOCK_HEIGHT * IMGU_VMEM1_Y_STRIDE)
*/
scaler->inp_buf_y_buffer_stride = IMGU_VMEM1_BUF_SIZE;
scaler->inp_buf_u_st_addr = IMGU_VMEM1_INP_BUF_ADDR +
IMGU_VMEM1_U_OFFSET;
scaler->inp_buf_v_st_addr = IMGU_VMEM1_INP_BUF_ADDR +
IMGU_VMEM1_V_OFFSET;
scaler->inp_buf_uv_line_stride = IMGU_VMEM1_UV_STRIDE;
scaler->inp_buf_uv_buffer_stride = IMGU_VMEM1_BUF_SIZE;
scaler->inp_buf_chunk_width = stripe_params[s].chunk_width;
scaler->inp_buf_nr_buffers = IMGU_OSYS_NUM_INPUT_BUFFERS;
/* Output buffers */
scaler->out_buf_y_st_addr = IMGU_VMEM1_INT_BUF_ADDR;
scaler->out_buf_y_line_stride = stripe_params[s].block_width /
IMGU_VMEM1_ELEMS_PER_VEC;
scaler->out_buf_y_buffer_stride = IMGU_VMEM1_BUF_SIZE;
scaler->out_buf_u_st_addr = IMGU_VMEM1_INT_BUF_ADDR +
IMGU_VMEM1_U_OFFSET;
scaler->out_buf_v_st_addr = IMGU_VMEM1_INT_BUF_ADDR +
IMGU_VMEM1_V_OFFSET;
scaler->out_buf_uv_line_stride = stripe_params[s].block_width /
IMGU_VMEM1_ELEMS_PER_VEC / 2;
scaler->out_buf_uv_buffer_stride = IMGU_VMEM1_BUF_SIZE;
scaler->out_buf_nr_buffers = IMGU_OSYS_NUM_INTERM_BUFFERS;
/* Intermediate buffers */
scaler->int_buf_y_st_addr = IMGU_VMEM2_BUF_Y_ADDR;
scaler->int_buf_y_line_stride = IMGU_VMEM2_BUF_Y_STRIDE;
scaler->int_buf_u_st_addr = IMGU_VMEM2_BUF_U_ADDR;
scaler->int_buf_v_st_addr = IMGU_VMEM2_BUF_V_ADDR;
scaler->int_buf_uv_line_stride = IMGU_VMEM2_BUF_UV_STRIDE;
scaler->int_buf_height = IMGU_VMEM2_LINES_PER_BLOCK;
scaler->int_buf_chunk_width = stripe_params[s].chunk_height;
scaler->int_buf_chunk_height = stripe_params[s].block_width;
/* Context buffers */
scaler->ctx_buf_hor_y_st_addr = IMGU_VMEM3_HOR_Y_ADDR;
scaler->ctx_buf_hor_u_st_addr = IMGU_VMEM3_HOR_U_ADDR;
scaler->ctx_buf_hor_v_st_addr = IMGU_VMEM3_HOR_V_ADDR;
scaler->ctx_buf_ver_y_st_addr = IMGU_VMEM3_VER_Y_ADDR;
scaler->ctx_buf_ver_u_st_addr = IMGU_VMEM3_VER_U_ADDR;
scaler->ctx_buf_ver_v_st_addr = IMGU_VMEM3_VER_V_ADDR;
/* Addresses for release-input and process-output tokens */
scaler->release_inp_buf_addr = fifo_addr_ack;
scaler->release_inp_buf_en = 1;
scaler->release_out_buf_en = 1;
scaler->process_out_buf_addr = fifo_addr_fmt;
/* Settings dimensions, padding, cropping */
scaler->input_image_y_width = stripe_params[s].input_width;
scaler->input_image_y_height = stripe_params[s].input_height;
scaler->input_image_y_start_column =
stripe_params[s].start_column_y;
scaler->input_image_uv_start_column =
stripe_params[s].start_column_uv;
scaler->input_image_y_left_pad = stripe_params[s].pad_left_y;
scaler->input_image_uv_left_pad = stripe_params[s].pad_left_uv;
scaler->input_image_y_right_pad = stripe_params[s].pad_right_y;
scaler->input_image_uv_right_pad =
stripe_params[s].pad_right_uv;
scaler->input_image_y_top_pad = stripe_params[s].pad_top_y;
scaler->input_image_uv_top_pad = stripe_params[s].pad_top_uv;
scaler->input_image_y_bottom_pad =
stripe_params[s].pad_bottom_y;
scaler->input_image_uv_bottom_pad =
stripe_params[s].pad_bottom_uv;
scaler->processing_mode = stripe_params[s].processing_mode;
scaler->scaling_ratio = stripe_params[s].phase_step;
scaler->y_left_phase_init = stripe_params[s].phase_init_left_y;
scaler->uv_left_phase_init =
stripe_params[s].phase_init_left_uv;
scaler->y_top_phase_init = stripe_params[s].phase_init_top_y;
scaler->uv_top_phase_init = stripe_params[s].phase_init_top_uv;
scaler->coeffs_exp_shift = stripe_params[s].exp_shift;
scaler->out_y_left_crop = stripe_params[s].crop_left_y;
scaler->out_uv_left_crop = stripe_params[s].crop_left_uv;
scaler->out_y_top_crop = stripe_params[s].crop_top_y;
scaler->out_uv_top_crop = stripe_params[s].crop_top_uv;
for (pin = 0; pin < IMGU_ABI_OSYS_PINS; pin++) {
int in_fifo_addr;
int out_fifo_addr;
int block_width_vecs;
int input_width_s;
int input_width_vecs;
int input_buf_y_st_addr;
int input_buf_u_st_addr;
int input_buf_v_st_addr;
int input_buf_y_line_stride;
int input_buf_uv_line_stride;
int output_buf_y_line_stride;
int output_buf_uv_line_stride;
int output_buf_nr_y_lines;
int block_height;
int block_width;
struct imgu_abi_osys_frame_params *fr_pr;
fr_pr = &osys->frame[pin].param;
/* Frame parameters */
fr_pr->enable = frame_params[pin].enable;
fr_pr->format = frame_params[pin].format;
fr_pr->mirror = frame_params[pin].mirror;
fr_pr->flip = frame_params[pin].flip;
fr_pr->tiling = frame_params[pin].tiling;
fr_pr->width = frame_params[pin].width;
fr_pr->height = frame_params[pin].height;
fr_pr->stride = frame_params[pin].stride;
fr_pr->scaled = frame_params[pin].scaled;
/* Stripe parameters */
osys->stripe[s].crop_top[pin] =
frame_params[pin].crop_top;
osys->stripe[s].input_width =
stripe_params[s].input_width;
osys->stripe[s].input_height =
stripe_params[s].input_height;
osys->stripe[s].block_height =
stripe_params[s].block_height;
osys->stripe[s].block_width =
stripe_params[s].block_width;
osys->stripe[s].output_width[pin] =
stripe_params[s].output_width[pin];
osys->stripe[s].output_height[pin] =
stripe_params[s].output_height[pin];
if (s == 0) {
/* Only first stripe should do left cropping */
osys->stripe[s].crop_left[pin] =
frame_params[pin].crop_left;
osys->stripe[s].output_offset[pin] =
stripe_params[s].output_offset[pin];
} else {
/*
* Stripe offset for other strips should be
* adjusted according to the cropping done
* at the first strip
*/
osys->stripe[s].crop_left[pin] = 0;
osys->stripe[s].output_offset[pin] =
(stripe_params[s].output_offset[pin] -
osys->stripe[0].crop_left[pin]);
}
if (!frame_params[pin].enable)
continue;
/* Formatter: configurations */
/*
* Get the dimensions of the input blocks of the
* formatter, which is the same as the output
* blocks of the scaler.
*/
if (frame_params[pin].scaled) {
block_height = stripe_params[s].block_height;
block_width = stripe_params[s].block_width;
} else {
block_height = IMGU_OSYS_BLOCK_HEIGHT;
block_width = IMGU_OSYS_BLOCK_WIDTH;
}
block_width_vecs =
block_width / IMGU_VMEM1_ELEMS_PER_VEC;
/*
* The input/output line stride depends on the
* block size.
*/
input_buf_y_line_stride = block_width_vecs;
input_buf_uv_line_stride = block_width_vecs / 2;
output_buf_y_line_stride = block_width_vecs;
output_buf_uv_line_stride = block_width_vecs / 2;
output_buf_nr_y_lines = block_height;
if (frame_params[pin].format ==
IMGU_ABI_OSYS_FORMAT_NV12 ||
frame_params[pin].format ==
IMGU_ABI_OSYS_FORMAT_NV21)
output_buf_uv_line_stride =
output_buf_y_line_stride;
/*
* Tiled outputs use a different output buffer
* configuration. The input (= scaler output) block
* width translates to a tile height, and the block
* height to the tile width. The default block size of
* 128x32 maps exactly onto a 4kB tile (512x8) for Y.
* For UV, the tile width is always half.
*/
if (frame_params[pin].tiling) {
output_buf_nr_y_lines = 8;
output_buf_y_line_stride = 512 /
IMGU_VMEM1_ELEMS_PER_VEC;
output_buf_uv_line_stride = 256 /
IMGU_VMEM1_ELEMS_PER_VEC;
}
/*
* Store the output buffer line stride. Will be
* used to compute buffer offsets in boundary
* conditions when output blocks are partially
* outside the image.
*/
osys->stripe[s].buf_stride[pin] =
output_buf_y_line_stride *
IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS;
if (frame_params[pin].scaled) {
/*
* The input buffs are the intermediate
* buffers (scalers' output)
*/
input_buf_y_st_addr = IMGU_VMEM1_INT_BUF_ADDR;
input_buf_u_st_addr = IMGU_VMEM1_INT_BUF_ADDR +
IMGU_VMEM1_U_OFFSET;
input_buf_v_st_addr = IMGU_VMEM1_INT_BUF_ADDR +
IMGU_VMEM1_V_OFFSET;
} else {
/*
* The input bufferss are the buffers
* filled by the SP
*/
input_buf_y_st_addr = IMGU_VMEM1_INP_BUF_ADDR;
input_buf_u_st_addr = IMGU_VMEM1_INP_BUF_ADDR +
IMGU_VMEM1_U_OFFSET;
input_buf_v_st_addr = IMGU_VMEM1_INP_BUF_ADDR +
IMGU_VMEM1_V_OFFSET;
}
/*
* The formatter input width must be rounded to
* the block width. Otherwise the formatter will
* not recognize the end of the line, resulting
* in incorrect tiling (system may hang!) and
* possibly other problems.
*/
input_width_s =
roundup(stripe_params[s].output_width[pin],
block_width);
input_width_vecs = input_width_s /
IMGU_VMEM1_ELEMS_PER_VEC;
out_fifo_addr = IMGU_FIFO_ADDR_FMT_TO_SP;
/*
* Process-output tokens must be sent to the SP.
* When scaling, the release-input tokens can be
* sent directly to the scaler, otherwise the
* formatter should send them to the SP.
*/
if (frame_params[pin].scaled)
in_fifo_addr = IMGU_FIFO_ADDR_FMT_TO_SCALER;
else
in_fifo_addr = IMGU_FIFO_ADDR_FMT_TO_SP;
/* Formatter */
param = &osys->formatter[s][pin].param;
param->format = frame_params[pin].format;
param->flip = frame_params[pin].flip;
param->mirror = frame_params[pin].mirror;
param->tiling = frame_params[pin].tiling;
param->reduce_range = frame_params[pin].reduce_range;
param->alpha_blending = 0;
param->release_inp_addr = in_fifo_addr;
param->release_inp_en = 1;
param->process_out_buf_addr = out_fifo_addr;
param->image_width_vecs = input_width_vecs;
param->image_height_lines =
stripe_params[s].output_height[pin];
param->inp_buff_y_st_addr = input_buf_y_st_addr;
param->inp_buff_y_line_stride = input_buf_y_line_stride;
param->inp_buff_y_buffer_stride = IMGU_VMEM1_BUF_SIZE;
param->int_buff_u_st_addr = input_buf_u_st_addr;
param->int_buff_v_st_addr = input_buf_v_st_addr;
param->inp_buff_uv_line_stride =
input_buf_uv_line_stride;
param->inp_buff_uv_buffer_stride = IMGU_VMEM1_BUF_SIZE;
param->out_buff_level = 0;
param->out_buff_nr_y_lines = output_buf_nr_y_lines;
param->out_buff_u_st_offset = IMGU_VMEM1_U_OFFSET;
param->out_buff_v_st_offset = IMGU_VMEM1_V_OFFSET;
param->out_buff_y_line_stride =
output_buf_y_line_stride;
param->out_buff_uv_line_stride =
output_buf_uv_line_stride;
param->hist_buff_st_addr = IMGU_VMEM1_HST_BUF_ADDR;
param->hist_buff_line_stride =
IMGU_VMEM1_HST_BUF_STRIDE;
param->hist_buff_nr_lines = IMGU_VMEM1_HST_BUF_NLINES;
}
}
block_stripes[0].offset = 0;
if (stripes <= 1) {
block_stripes[0].width = stripe_params[0].input_width;
block_stripes[0].height = stripe_params[0].input_height;
} else {
struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
unsigned int sp_block_width =
bi->info.isp.sp.block.block_width *
IPU3_UAPI_ISP_VEC_ELEMS;
block_stripes[0].width = roundup(stripe_params[0].input_width,
sp_block_width);
block_stripes[1].offset =
rounddown(css_pipe->rect[IPU3_CSS_RECT_GDC].width -
stripe_params[1].input_width, sp_block_width);
block_stripes[1].width =
roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].width -
block_stripes[1].offset, sp_block_width);
block_stripes[0].height = css_pipe->rect[IPU3_CSS_RECT_GDC].height;
block_stripes[1].height = block_stripes[0].height;
}
return 0;
}
/*********************** Mostly 3A operations ******************************/
/*
* This function creates a "TO-DO list" (operations) for the sp code.
*
* There are 2 types of operations:
* 1. Transfer: Issue DMA transfer request for copying grid cells from DDR to
* accelerator space (NOTE that this space is limited) associated data:
* DDR address + accelerator's config set index(acc's address).
*
* 2. Issue "Process Lines Command" to shd accelerator
* associated data: #lines + which config set to use (actually, accelerator
* will use x AND (x+1)%num_of_sets - NOTE that this implies the restriction
* of not touching config sets x & (x+1)%num_of_sets when process_lines(x)
* is active).
*
* Basically there are 2 types of operations "chunks":
* 1. "initial chunk": Initially, we do as much transfers as we can (and need)
* [0 - max sets(3) ] followed by 1 or 2 "process lines" operations.
*
* 2. "regular chunk" - 1 transfer followed by 1 process line operation.
* (in some cases we might need additional transfer ate the last chunk).
*
* for some case:
* --> init
* tr (0)
* tr (1)
* tr (2)
* pl (0)
* pl (1)
* --> ack (0)
* tr (3)
* pl (2)
* --> ack (1)
* pl (3)
* --> ack (2)
* do nothing
* --> ack (3)
* do nothing
*/
static int
imgu_css_shd_ops_calc(struct imgu_abi_shd_intra_frame_operations_data *ops,
const struct ipu3_uapi_shd_grid_config *grid,
unsigned int image_height)
{
unsigned int block_height = 1 << grid->block_height_log2;
unsigned int grid_height_per_slice = grid->grid_height_per_slice;
unsigned int set_height = grid_height_per_slice * block_height;
/* We currently support only abs(y_start) > grid_height_per_slice */
unsigned int positive_y_start = (unsigned int)-grid->y_start;
unsigned int first_process_lines =
set_height - (positive_y_start % set_height);
unsigned int last_set_height;
unsigned int num_of_sets;
struct imgu_abi_acc_operation *p_op;
struct imgu_abi_acc_process_lines_cmd_data *p_pl;
struct imgu_abi_shd_transfer_luts_set_data *p_tr;
unsigned int op_idx, pl_idx, tr_idx;
unsigned char tr_set_num, pl_cfg_set;
/*
* When the number of lines for the last process lines command
* is equal to a set height, we need another line of grid cell -
* additional transfer is required.
*/
unsigned char last_tr = 0;
/* Add "process lines" command to the list of operations */
bool add_pl;
/* Add DMA xfer (config set) command to the list of ops */
bool add_tr;
/*
* Available partial grid (the part that fits into #IMGU_SHD_SETS sets)
* doesn't cover whole frame - need to process in chunks
*/
if (image_height > first_process_lines) {
last_set_height =
(image_height - first_process_lines) % set_height;
num_of_sets = last_set_height > 0 ?
(image_height - first_process_lines) / set_height + 2 :
(image_height - first_process_lines) / set_height + 1;
last_tr = (set_height - last_set_height <= block_height ||
last_set_height == 0) ? 1 : 0;
} else { /* partial grid covers whole frame */
last_set_height = 0;
num_of_sets = 1;
first_process_lines = image_height;
last_tr = set_height - image_height <= block_height ? 1 : 0;
}
/* Init operations lists and counters */
p_op = ops->operation_list;
op_idx = 0;
p_pl = ops->process_lines_data;
pl_idx = 0;
p_tr = ops->transfer_data;
tr_idx = 0;
memset(ops, 0, sizeof(*ops));
/* Cyclic counters that holds config set number [0,IMGU_SHD_SETS) */
tr_set_num = 0;
pl_cfg_set = 0;
/*
* Always start with a transfer - process lines command must be
* initiated only after appropriate config sets are in place
* (2 configuration sets per process line command, except for last one).
*/
add_pl = false;
add_tr = true;
while (add_pl || add_tr) {
/* Transfer ops */
if (add_tr) {
if (op_idx >= IMGU_ABI_SHD_MAX_OPERATIONS ||
tr_idx >= IMGU_ABI_SHD_MAX_TRANSFERS)
return -EINVAL;
p_op[op_idx].op_type =
IMGU_ABI_ACC_OPTYPE_TRANSFER_DATA;
p_op[op_idx].op_indicator = IMGU_ABI_ACC_OP_IDLE;
op_idx++;
p_tr[tr_idx].set_number = tr_set_num;
tr_idx++;
tr_set_num = (tr_set_num + 1) % IMGU_SHD_SETS;
}
/* Process-lines ops */
if (add_pl) {
if (op_idx >= IMGU_ABI_SHD_MAX_OPERATIONS ||
pl_idx >= IMGU_ABI_SHD_MAX_PROCESS_LINES)
return -EINVAL;
p_op[op_idx].op_type =
IMGU_ABI_ACC_OPTYPE_PROCESS_LINES;
/*
* In case we have 2 process lines commands -
* don't stop after the first one
*/
if (pl_idx == 0 && num_of_sets != 1)
p_op[op_idx].op_indicator =
IMGU_ABI_ACC_OP_IDLE;
/*
* Initiate last process lines command -
* end of operation list.
*/
else if (pl_idx == num_of_sets - 1)
p_op[op_idx].op_indicator =
IMGU_ABI_ACC_OP_END_OF_OPS;
/*
* Intermediate process line command - end of operation
* "chunk" (meaning few "transfers" followed by few
* "process lines" commands).
*/
else
p_op[op_idx].op_indicator =
IMGU_ABI_ACC_OP_END_OF_ACK;
op_idx++;
/* first process line operation */
if (pl_idx == 0)
p_pl[pl_idx].lines = first_process_lines;
/* Last process line operation */
else if (pl_idx == num_of_sets - 1 &&
last_set_height > 0)
p_pl[pl_idx].lines = last_set_height;
else /* "regular" process lines operation */
p_pl[pl_idx].lines = set_height;
p_pl[pl_idx].cfg_set = pl_cfg_set;
pl_idx++;
pl_cfg_set = (pl_cfg_set + 1) % IMGU_SHD_SETS;
}
/*
* Initially, we always transfer
* min(IMGU_SHD_SETS, num_of_sets) - after that we fill in the
* corresponding process lines commands.
*/
if (tr_idx == IMGU_SHD_SETS ||
tr_idx == num_of_sets + last_tr) {
add_tr = false;
add_pl = true;
}
/*
* We have finished the "initial" operations chunk -
* be ready to get more chunks.
*/
if (pl_idx == 2) {
add_tr = true;
add_pl = true;
}
/* Stop conditions for each operation type */
if (tr_idx == num_of_sets + last_tr)
add_tr = false;
if (pl_idx == num_of_sets)
add_pl = false;
}
return 0;
}
/*
* The follow handshake procotol is the same for AF, AWB and AWB FR.
*
* for n sets of meta-data, the flow is:
* --> init
* process-lines (0)
* process-lines (1) eoc
* --> ack (0)
* read-meta-data (0)
* process-lines (2) eoc
* --> ack (1)
* read-meta-data (1)
* process-lines (3) eoc
* ...
*
* --> ack (n-3)
* read-meta-data (n-3)
* process-lines (n-1) eoc
* --> ack (n-2)
* read-meta-data (n-2) eoc
* --> ack (n-1)
* read-meta-data (n-1) eof
*
* for 2 sets we get:
* --> init
* pl (0)
* pl (1) eoc
* --> ack (0)
* pl (2) - rest of image, if applicable)
* rmd (0) eoc
* --> ack (1)
* rmd (1) eof
* --> (ack (2))
* do nothing
*
* for only one set:
*
* --> init
* pl(0) eoc
* --> ack (0)
* rmd (0) eof
*
* grid smaller than image case
* for example 128x128 grid (block size 8x8, 16x16 num of blocks)
* start at (0,0)
* 1st set holds 160 cells - 10 blocks vertical, 16 horizontal
* => 1st process lines = 80
* we're left with 128-80=48 lines (6 blocks vertical)
* => 2nd process lines = 48
* last process lines to cover the image - image_height - 128
*
* --> init
* pl (0) first
* pl (1) last-in-grid
* --> ack (0)
* rmd (0)
* pl (2) after-grid
* --> ack (1)
* rmd (1) eof
* --> ack (2)
* do nothing
*/
struct process_lines {
unsigned int image_height;
unsigned short grid_height;
unsigned short block_height;
unsigned short y_start;
unsigned char grid_height_per_slice;
unsigned short max_op; /* max operation */
unsigned short max_tr; /* max transaction */
unsigned char acc_enable;
};
/* Helper to config intra_frame_operations_data. */
static int
imgu_css_acc_process_lines(const struct process_lines *pl,
struct imgu_abi_acc_operation *p_op,
struct imgu_abi_acc_process_lines_cmd_data *p_pl,
struct imgu_abi_acc_transfer_op_data *p_tr)
{
unsigned short op_idx = 0, pl_idx = 0, tr_idx = 0;
unsigned char tr_set_num = 0, pl_cfg_set = 0;
const unsigned short grid_last_line =
pl->y_start + pl->grid_height * pl->block_height;
const unsigned short process_lines =
pl->grid_height_per_slice * pl->block_height;
unsigned int process_lines_after_grid;
unsigned short first_process_lines;
unsigned short last_process_lines_in_grid;
unsigned short num_of_process_lines;
unsigned short num_of_sets;
if (pl->grid_height_per_slice == 0)
return -EINVAL;
if (pl->acc_enable && grid_last_line > pl->image_height)
return -EINVAL;
num_of_sets = pl->grid_height / pl->grid_height_per_slice;
if (num_of_sets * pl->grid_height_per_slice < pl->grid_height)
num_of_sets++;
/* Account for two line delay inside the FF */
if (pl->max_op == IMGU_ABI_AF_MAX_OPERATIONS) {
first_process_lines = process_lines + pl->y_start + 2;
last_process_lines_in_grid =
(grid_last_line - first_process_lines) -
((num_of_sets - 2) * process_lines) + 4;
process_lines_after_grid =
pl->image_height - grid_last_line - 4;
} else {
first_process_lines = process_lines + pl->y_start;
last_process_lines_in_grid =
(grid_last_line - first_process_lines) -
((num_of_sets - 2) * process_lines);
process_lines_after_grid = pl->image_height - grid_last_line;
}
num_of_process_lines = num_of_sets;
if (process_lines_after_grid > 0)
num_of_process_lines++;
while (tr_idx < num_of_sets || pl_idx < num_of_process_lines) {
/* Read meta-data */
if (pl_idx >= 2 || (pl_idx == 1 && num_of_sets == 1)) {
if (op_idx >= pl->max_op || tr_idx >= pl->max_tr)
return -EINVAL;
p_op[op_idx].op_type =
IMGU_ABI_ACC_OPTYPE_TRANSFER_DATA;
if (tr_idx == num_of_sets - 1)
/* The last operation is always a tr */
p_op[op_idx].op_indicator =
IMGU_ABI_ACC_OP_END_OF_OPS;
else if (tr_idx == num_of_sets - 2)
if (process_lines_after_grid == 0)
/*
* No additional pl op left -
* this op is left as lats of cycle
*/
p_op[op_idx].op_indicator =
IMGU_ABI_ACC_OP_END_OF_ACK;
else
/*
* We still have to process-lines after
* the grid so have one more pl op
*/
p_op[op_idx].op_indicator =
IMGU_ABI_ACC_OP_IDLE;
else
/* Default - usually there's a pl after a tr */
p_op[op_idx].op_indicator =
IMGU_ABI_ACC_OP_IDLE;
op_idx++;
if (p_tr) {
p_tr[tr_idx].set_number = tr_set_num;
tr_set_num = 1 - tr_set_num;
}
tr_idx++;
}
/* process_lines */
if (pl_idx < num_of_process_lines) {
if (op_idx >= pl->max_op || pl_idx >= pl->max_tr)
return -EINVAL;
p_op[op_idx].op_type =
IMGU_ABI_ACC_OPTYPE_PROCESS_LINES;
if (pl_idx == 0)
if (num_of_process_lines == 1)
/* Only one pl op */
p_op[op_idx].op_indicator =
IMGU_ABI_ACC_OP_END_OF_ACK;
else
/* On init - do two pl ops */
p_op[op_idx].op_indicator =
IMGU_ABI_ACC_OP_IDLE;
else
/* Usually pl is the end of the ack cycle */
p_op[op_idx].op_indicator =
IMGU_ABI_ACC_OP_END_OF_ACK;
op_idx++;
if (pl_idx == 0)
/* First process line */
p_pl[pl_idx].lines = first_process_lines;
else if (pl_idx == num_of_sets - 1)
/* Last in grid */
p_pl[pl_idx].lines = last_process_lines_in_grid;
else if (pl_idx == num_of_process_lines - 1)
/* After the grid */
p_pl[pl_idx].lines = process_lines_after_grid;
else
/* Inside the grid */
p_pl[pl_idx].lines = process_lines;
if (p_tr) {
p_pl[pl_idx].cfg_set = pl_cfg_set;
pl_cfg_set = 1 - pl_cfg_set;
}
pl_idx++;
}
}
return 0;
}
static int imgu_css_af_ops_calc(struct imgu_css *css, unsigned int pipe,
struct imgu_abi_af_config *af_config)
{
struct imgu_abi_af_intra_frame_operations_data *to =
&af_config->operations_data;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
struct process_lines pl = {
.image_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height,
.grid_height = af_config->config.grid_cfg.height,
.block_height =
1 << af_config->config.grid_cfg.block_height_log2,
.y_start = af_config->config.grid_cfg.y_start &
IPU3_UAPI_GRID_START_MASK,
.grid_height_per_slice =
af_config->stripes[0].grid_cfg.height_per_slice,
.max_op = IMGU_ABI_AF_MAX_OPERATIONS,
.max_tr = IMGU_ABI_AF_MAX_TRANSFERS,
.acc_enable = bi->info.isp.sp.enable.af,
};
return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
NULL);
}
static int
imgu_css_awb_fr_ops_calc(struct imgu_css *css, unsigned int pipe,
struct imgu_abi_awb_fr_config *awb_fr_config)
{
struct imgu_abi_awb_fr_intra_frame_operations_data *to =
&awb_fr_config->operations_data;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
struct process_lines pl = {
.image_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height,
.grid_height = awb_fr_config->config.grid_cfg.height,
.block_height =
1 << awb_fr_config->config.grid_cfg.block_height_log2,
.y_start = awb_fr_config->config.grid_cfg.y_start &
IPU3_UAPI_GRID_START_MASK,
.grid_height_per_slice =
awb_fr_config->stripes[0].grid_cfg.height_per_slice,
.max_op = IMGU_ABI_AWB_FR_MAX_OPERATIONS,
.max_tr = IMGU_ABI_AWB_FR_MAX_PROCESS_LINES,
.acc_enable = bi->info.isp.sp.enable.awb_fr_acc,
};
return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
NULL);
}
static int imgu_css_awb_ops_calc(struct imgu_css *css, unsigned int pipe,
struct imgu_abi_awb_config *awb_config)
{
struct imgu_abi_awb_intra_frame_operations_data *to =
&awb_config->operations_data;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
struct process_lines pl = {
.image_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height,
.grid_height = awb_config->config.grid.height,
.block_height =
1 << awb_config->config.grid.block_height_log2,
.y_start = awb_config->config.grid.y_start,
.grid_height_per_slice =
awb_config->stripes[0].grid.height_per_slice,
.max_op = IMGU_ABI_AWB_MAX_OPERATIONS,
.max_tr = IMGU_ABI_AWB_MAX_TRANSFERS,
.acc_enable = bi->info.isp.sp.enable.awb_acc,
};
return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
to->transfer_data);
}
static u16 imgu_css_grid_end(u16 start, u8 width, u8 block_width_log2)
{
return (start & IPU3_UAPI_GRID_START_MASK) +
(width << block_width_log2) - 1;
}
static void imgu_css_grid_end_calc(struct ipu3_uapi_grid_config *grid_cfg)
{
grid_cfg->x_end = imgu_css_grid_end(grid_cfg->x_start, grid_cfg->width,
grid_cfg->block_width_log2);
grid_cfg->y_end = imgu_css_grid_end(grid_cfg->y_start, grid_cfg->height,
grid_cfg->block_height_log2);
}
/****************** config computation *****************************/
static int imgu_css_cfg_acc_stripe(struct imgu_css *css, unsigned int pipe,
struct imgu_abi_acc_param *acc)
{
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
const struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
struct imgu_css_scaler_info scaler_luma, scaler_chroma;
const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
const unsigned int f = IPU3_UAPI_ISP_VEC_ELEMS * 2;
unsigned int bds_ds, i;
memset(acc, 0, sizeof(*acc));
/* acc_param: osys_config */
if (imgu_css_osys_calc(css, pipe, stripes, &acc->osys, &scaler_luma,
&scaler_chroma, acc->stripe.block_stripes))
return -EINVAL;
/* acc_param: stripe data */
/*
* For the striped case the approach is as follows:
* 1. down-scaled stripes are calculated - with 128 overlap
* (this is the main limiter therefore it's first)
* 2. input stripes are derived by up-scaling the down-scaled stripes
* (there are no alignment requirements on input stripes)
* 3. output stripes are derived from down-scaled stripes too
*/
acc->stripe.num_of_stripes = stripes;
acc->stripe.input_frame.width =
css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
acc->stripe.input_frame.height =
css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
acc->stripe.input_frame.bayer_order =
css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
for (i = 0; i < stripes; i++)
acc->stripe.bds_out_stripes[i].height =
css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->stripe.bds_out_stripes[0].offset = 0;
if (stripes <= 1) {
acc->stripe.bds_out_stripes[0].width =
ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f);
} else {
/* Image processing is divided into two stripes */
acc->stripe.bds_out_stripes[0].width =
acc->stripe.bds_out_stripes[1].width =
(css_pipe->rect[IPU3_CSS_RECT_BDS].width / 2 & ~(f - 1)) + f;
/*
* Sum of width of the two stripes should not be smaller
* than output width and must be even times of overlapping
* unit f.
*/
if ((css_pipe->rect[IPU3_CSS_RECT_BDS].width / f & 1) !=
!!(css_pipe->rect[IPU3_CSS_RECT_BDS].width & (f - 1)))
acc->stripe.bds_out_stripes[0].width += f;
if ((css_pipe->rect[IPU3_CSS_RECT_BDS].width / f & 1) &&
(css_pipe->rect[IPU3_CSS_RECT_BDS].width & (f - 1))) {
acc->stripe.bds_out_stripes[0].width += f;
acc->stripe.bds_out_stripes[1].width += f;
}
/* Overlap between stripes is IPU3_UAPI_ISP_VEC_ELEMS * 4 */
acc->stripe.bds_out_stripes[1].offset =
acc->stripe.bds_out_stripes[0].width - 2 * f;
}
acc->stripe.effective_stripes[0].height =
css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
acc->stripe.effective_stripes[0].offset = 0;
acc->stripe.bds_out_stripes_no_overlap[0].height =
css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->stripe.bds_out_stripes_no_overlap[0].offset = 0;
acc->stripe.output_stripes[0].height =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
acc->stripe.output_stripes[0].offset = 0;
if (stripes <= 1) {
acc->stripe.down_scaled_stripes[0].width =
css_pipe->rect[IPU3_CSS_RECT_BDS].width;
acc->stripe.down_scaled_stripes[0].height =
css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->stripe.down_scaled_stripes[0].offset = 0;
acc->stripe.effective_stripes[0].width =
css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
acc->stripe.bds_out_stripes_no_overlap[0].width =
ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f);
acc->stripe.output_stripes[0].width =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
} else { /* Two stripes */
bds_ds = css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width *
IMGU_BDS_GRANULARITY /
css_pipe->rect[IPU3_CSS_RECT_BDS].width;
acc->stripe.down_scaled_stripes[0] =
acc->stripe.bds_out_stripes[0];
acc->stripe.down_scaled_stripes[1] =
acc->stripe.bds_out_stripes[1];
if (!IS_ALIGNED(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f))
acc->stripe.down_scaled_stripes[1].width +=
(css_pipe->rect[IPU3_CSS_RECT_BDS].width
& (f - 1)) - f;
acc->stripe.effective_stripes[0].width = bds_ds *
acc->stripe.down_scaled_stripes[0].width /
IMGU_BDS_GRANULARITY;
acc->stripe.effective_stripes[1].width = bds_ds *
acc->stripe.down_scaled_stripes[1].width /
IMGU_BDS_GRANULARITY;
acc->stripe.effective_stripes[1].height =
css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
acc->stripe.effective_stripes[1].offset = bds_ds *
acc->stripe.down_scaled_stripes[1].offset /
IMGU_BDS_GRANULARITY;
acc->stripe.bds_out_stripes_no_overlap[0].width =
acc->stripe.bds_out_stripes_no_overlap[1].offset =
ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, 2 * f) / 2;
acc->stripe.bds_out_stripes_no_overlap[1].width =
DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f)
/ 2 * f;
acc->stripe.bds_out_stripes_no_overlap[1].height =
css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->stripe.output_stripes[0].width =
acc->stripe.down_scaled_stripes[0].width - f;
acc->stripe.output_stripes[1].width =
acc->stripe.down_scaled_stripes[1].width - f;
acc->stripe.output_stripes[1].height =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
acc->stripe.output_stripes[1].offset =
acc->stripe.output_stripes[0].width;
}
acc->stripe.output_system_in_frame_width =
css_pipe->rect[IPU3_CSS_RECT_GDC].width;
acc->stripe.output_system_in_frame_height =
css_pipe->rect[IPU3_CSS_RECT_GDC].height;
acc->stripe.effective_frame_width =
css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
acc->stripe.bds_frame_width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
acc->stripe.out_frame_width =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
acc->stripe.out_frame_height =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
acc->stripe.gdc_in_buffer_width =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline /
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel;
acc->stripe.gdc_in_buffer_height =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
acc->stripe.gdc_in_buffer_offset_x = IMGU_GDC_BUF_X;
acc->stripe.gdc_in_buffer_offset_y = IMGU_GDC_BUF_Y;
acc->stripe.display_frame_width =
css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
acc->stripe.display_frame_height =
css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
acc->stripe.bds_aligned_frame_width =
roundup(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
2 * IPU3_UAPI_ISP_VEC_ELEMS);
if (stripes > 1)
acc->stripe.half_overlap_vectors =
IMGU_STRIPE_FIXED_HALF_OVERLAP;
else
acc->stripe.half_overlap_vectors = 0;
return 0;
}
static void imgu_css_cfg_acc_dvs(struct imgu_css *css,
struct imgu_abi_acc_param *acc,
unsigned int pipe)
{
unsigned int i;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
/* Disable DVS statistics */
acc->dvs_stat.operations_data.process_lines_data[0].lines =
css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->dvs_stat.operations_data.process_lines_data[0].cfg_set = 0;
acc->dvs_stat.operations_data.ops[0].op_type =
IMGU_ABI_ACC_OPTYPE_PROCESS_LINES;
acc->dvs_stat.operations_data.ops[0].op_indicator =
IMGU_ABI_ACC_OP_NO_OPS;
for (i = 0; i < IMGU_ABI_DVS_STAT_LEVELS; i++)
acc->dvs_stat.cfg.grd_config[i].enable = 0;
}
static void acc_bds_per_stripe_data(struct imgu_css *css,
struct imgu_abi_acc_param *acc,
const int i, unsigned int pipe)
{
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_en = 0;
acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_start = 0;
acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_end = 0;
acc->bds.per_stripe.aligned_data[i].data.hor_ctrl0 =
acc->bds.hor.hor_ctrl0;
acc->bds.per_stripe.aligned_data[i].data.hor_ctrl0.out_frame_width =
acc->stripe.down_scaled_stripes[i].width;
acc->bds.per_stripe.aligned_data[i].data.ver_ctrl1.out_frame_width =
acc->stripe.down_scaled_stripes[i].width;
acc->bds.per_stripe.aligned_data[i].data.ver_ctrl1.out_frame_height =
css_pipe->rect[IPU3_CSS_RECT_BDS].height;
}
/*
* Configure `acc' parameters. `acc_old' contains the old values (or is NULL)
* and `acc_user' contains new prospective values. `use' contains flags
* telling which fields to take from the old values (or generate if it is NULL)
* and which to take from the new user values.
*/
int imgu_css_cfg_acc(struct imgu_css *css, unsigned int pipe,
struct ipu3_uapi_flags *use,
struct imgu_abi_acc_param *acc,
struct imgu_abi_acc_param *acc_old,
struct ipu3_uapi_acc_param *acc_user)
{
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
const struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
const unsigned int tnr_frame_width =
acc->stripe.bds_aligned_frame_width;
const unsigned int min_overlap = 10;
const struct v4l2_pix_format_mplane *pixm =
&css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix;
const struct imgu_css_bds_config *cfg_bds;
struct imgu_abi_input_feeder_data *feeder_data;
unsigned int bds_ds, ofs_x, ofs_y, i, width, height;
u8 b_w_log2; /* Block width log2 */
/* Update stripe using chroma and luma */
if (imgu_css_cfg_acc_stripe(css, pipe, acc))
return -EINVAL;
/* acc_param: input_feeder_config */
ofs_x = ((pixm->width -
css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width) >> 1) & ~1;
ofs_x += css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
IMGU_ABI_BAYER_ORDER_RGGB ||
css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
IMGU_ABI_BAYER_ORDER_GBRG ? 1 : 0;
ofs_y = ((pixm->height -
css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height) >> 1) & ~1;
ofs_y += css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
IMGU_ABI_BAYER_ORDER_BGGR ||
css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
IMGU_ABI_BAYER_ORDER_GBRG ? 1 : 0;
acc->input_feeder.data.row_stride = pixm->plane_fmt[0].bytesperline;
acc->input_feeder.data.start_row_address =
ofs_x / IMGU_PIXELS_PER_WORD * IMGU_BYTES_PER_WORD +
ofs_y * acc->input_feeder.data.row_stride;
acc->input_feeder.data.start_pixel = ofs_x % IMGU_PIXELS_PER_WORD;
acc->input_feeder.data_per_stripe.input_feeder_data[0].data =
acc->input_feeder.data;
ofs_x += acc->stripe.effective_stripes[1].offset;
feeder_data =
&acc->input_feeder.data_per_stripe.input_feeder_data[1].data;
feeder_data->row_stride = acc->input_feeder.data.row_stride;
feeder_data->start_row_address =
ofs_x / IMGU_PIXELS_PER_WORD * IMGU_BYTES_PER_WORD +
ofs_y * acc->input_feeder.data.row_stride;
feeder_data->start_pixel = ofs_x % IMGU_PIXELS_PER_WORD;
/* acc_param: bnr_static_config */
/*
* Originate from user or be the original default values if user has
* never set them before, when user gives a new set of parameters,
* for each chunk in the parameter structure there is a flag use->xxx
* whether to use the user-provided parameter or not. If not, the
* parameter remains unchanged in the driver:
* it's value is taken from acc_old.
*/
if (use && use->acc_bnr) {
/* Take values from user */
acc->bnr = acc_user->bnr;
} else if (acc_old) {
/* Use old value */
acc->bnr = acc_old->bnr;
} else {
/* Calculate from scratch */
acc->bnr = imgu_css_bnr_defaults;
}
acc->bnr.column_size = tnr_frame_width;
/* acc_param: bnr_static_config_green_disparity */
if (use && use->acc_green_disparity) {
/* Take values from user */
acc->green_disparity = acc_user->green_disparity;
} else if (acc_old) {
/* Use old value */
acc->green_disparity = acc_old->green_disparity;
} else {
/* Calculate from scratch */
memset(&acc->green_disparity, 0, sizeof(acc->green_disparity));
}
/* acc_param: dm_config */
if (use && use->acc_dm) {
/* Take values from user */
acc->dm = acc_user->dm;
} else if (acc_old) {
/* Use old value */
acc->dm = acc_old->dm;
} else {
/* Calculate from scratch */
acc->dm = imgu_css_dm_defaults;
}
acc->dm.frame_width = tnr_frame_width;
/* acc_param: ccm_mat_config */
if (use && use->acc_ccm) {
/* Take values from user */
acc->ccm = acc_user->ccm;
} else if (acc_old) {
/* Use old value */
acc->ccm = acc_old->ccm;
} else {
/* Calculate from scratch */
acc->ccm = imgu_css_ccm_defaults;
}
/* acc_param: gamma_config */
if (use && use->acc_gamma) {
/* Take values from user */
acc->gamma = acc_user->gamma;
} else if (acc_old) {
/* Use old value */
acc->gamma = acc_old->gamma;
} else {
/* Calculate from scratch */
acc->gamma.gc_ctrl.enable = 1;
acc->gamma.gc_lut = imgu_css_gamma_lut;
}
/* acc_param: csc_mat_config */
if (use && use->acc_csc) {
/* Take values from user */
acc->csc = acc_user->csc;
} else if (acc_old) {
/* Use old value */
acc->csc = acc_old->csc;
} else {
/* Calculate from scratch */
acc->csc = imgu_css_csc_defaults;
}
/* acc_param: cds_params */
if (use && use->acc_cds) {
/* Take values from user */
acc->cds = acc_user->cds;
} else if (acc_old) {
/* Use old value */
acc->cds = acc_old->cds;
} else {
/* Calculate from scratch */
acc->cds = imgu_css_cds_defaults;
}
/* acc_param: shd_config */
if (use && use->acc_shd) {
/* Take values from user */
acc->shd.shd = acc_user->shd.shd;
acc->shd.shd_lut = acc_user->shd.shd_lut;
} else if (acc_old) {
/* Use old value */
acc->shd.shd = acc_old->shd.shd;
acc->shd.shd_lut = acc_old->shd.shd_lut;
} else {
/* Calculate from scratch */
acc->shd.shd = imgu_css_shd_defaults;
memset(&acc->shd.shd_lut, 0, sizeof(acc->shd.shd_lut));
}
if (acc->shd.shd.grid.width <= 0)
return -EINVAL;
acc->shd.shd.grid.grid_height_per_slice =
IMGU_ABI_SHD_MAX_CELLS_PER_SET / acc->shd.shd.grid.width;
if (acc->shd.shd.grid.grid_height_per_slice <= 0)
return -EINVAL;
acc->shd.shd.general.init_set_vrt_offst_ul =
(-acc->shd.shd.grid.y_start >>
acc->shd.shd.grid.block_height_log2) %
acc->shd.shd.grid.grid_height_per_slice;
if (imgu_css_shd_ops_calc(&acc->shd.shd_ops, &acc->shd.shd.grid,
css_pipe->rect[IPU3_CSS_RECT_BDS].height))
return -EINVAL;
/* acc_param: dvs_stat_config */
imgu_css_cfg_acc_dvs(css, acc, pipe);
/* acc_param: yuvp1_iefd_config */
if (use && use->acc_iefd) {
/* Take values from user */
acc->iefd = acc_user->iefd;
} else if (acc_old) {
/* Use old value */
acc->iefd = acc_old->iefd;
} else {
/* Calculate from scratch */
acc->iefd = imgu_css_iefd_defaults;
}
/* acc_param: yuvp1_yds_config yds_c0 */
if (use && use->acc_yds_c0) {
/* Take values from user */
acc->yds_c0 = acc_user->yds_c0;
} else if (acc_old) {
/* Use old value */
acc->yds_c0 = acc_old->yds_c0;
} else {
/* Calculate from scratch */
acc->yds_c0 = imgu_css_yds_defaults;
}
/* acc_param: yuvp1_chnr_config chnr_c0 */
if (use && use->acc_chnr_c0) {
/* Take values from user */
acc->chnr_c0 = acc_user->chnr_c0;
} else if (acc_old) {
/* Use old value */
acc->chnr_c0 = acc_old->chnr_c0;
} else {
/* Calculate from scratch */
acc->chnr_c0 = imgu_css_chnr_defaults;
}
/* acc_param: yuvp1_y_ee_nr_config */
if (use && use->acc_y_ee_nr) {
/* Take values from user */
acc->y_ee_nr = acc_user->y_ee_nr;
} else if (acc_old) {
/* Use old value */
acc->y_ee_nr = acc_old->y_ee_nr;
} else {
/* Calculate from scratch */
acc->y_ee_nr = imgu_css_y_ee_nr_defaults;
}
/* acc_param: yuvp1_yds_config yds */
if (use && use->acc_yds) {
/* Take values from user */
acc->yds = acc_user->yds;
} else if (acc_old) {
/* Use old value */
acc->yds = acc_old->yds;
} else {
/* Calculate from scratch */
acc->yds = imgu_css_yds_defaults;
}
/* acc_param: yuvp1_chnr_config chnr */
if (use && use->acc_chnr) {
/* Take values from user */
acc->chnr = acc_user->chnr;
} else if (acc_old) {
/* Use old value */
acc->chnr = acc_old->chnr;
} else {
/* Calculate from scratch */
acc->chnr = imgu_css_chnr_defaults;
}
/* acc_param: yuvp2_y_tm_lut_static_config */
for (i = 0; i < IMGU_ABI_YUVP2_YTM_LUT_ENTRIES; i++)
acc->ytm.entries[i] = i * 32;
acc->ytm.enable = 0; /* Always disabled on IPU3 */
/* acc_param: yuvp1_yds_config yds2 */
if (use && use->acc_yds2) {
/* Take values from user */
acc->yds2 = acc_user->yds2;
} else if (acc_old) {
/* Use old value */
acc->yds2 = acc_old->yds2;
} else {
/* Calculate from scratch */
acc->yds2 = imgu_css_yds_defaults;
}
/* acc_param: yuvp2_tcc_static_config */
if (use && use->acc_tcc) {
/* Take values from user */
acc->tcc = acc_user->tcc;
} else if (acc_old) {
/* Use old value */
acc->tcc = acc_old->tcc;
} else {
/* Calculate from scratch */
memset(&acc->tcc, 0, sizeof(acc->tcc));
acc->tcc.gen_control.en = 1;
acc->tcc.gen_control.blend_shift = 3;
acc->tcc.gen_control.gain_according_to_y_only = 1;
acc->tcc.gen_control.gamma = 8;
acc->tcc.gen_control.delta = 0;
for (i = 0; i < IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS; i++) {
acc->tcc.macc_table.entries[i].a = 1024;
acc->tcc.macc_table.entries[i].b = 0;
acc->tcc.macc_table.entries[i].c = 0;
acc->tcc.macc_table.entries[i].d = 1024;
}
acc->tcc.inv_y_lut.entries[6] = 1023;
for (i = 7; i < IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS; i++)
acc->tcc.inv_y_lut.entries[i] = 1024 >> (i - 6);
acc->tcc.gain_pcwl = imgu_css_tcc_gain_pcwl_lut;
acc->tcc.r_sqr_lut = imgu_css_tcc_r_sqr_lut;
}
/* acc_param: dpc_config */
if (use && use->acc_dpc)
return -EINVAL; /* Not supported yet */
/* Just disable by default */
memset(&acc->dpc, 0, sizeof(acc->dpc));
/* acc_param: bds_config */
bds_ds = (css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height *
IMGU_BDS_GRANULARITY) / css_pipe->rect[IPU3_CSS_RECT_BDS].height;
if (bds_ds < IMGU_BDS_MIN_SF_INV ||
bds_ds - IMGU_BDS_MIN_SF_INV >= ARRAY_SIZE(imgu_css_bds_configs))
return -EINVAL;
cfg_bds = &imgu_css_bds_configs[bds_ds - IMGU_BDS_MIN_SF_INV];
acc->bds.hor.hor_ctrl1.hor_crop_en = 0;
acc->bds.hor.hor_ctrl1.hor_crop_start = 0;
acc->bds.hor.hor_ctrl1.hor_crop_end = 0;
acc->bds.hor.hor_ctrl0.sample_patrn_length =
cfg_bds->sample_patrn_length;
acc->bds.hor.hor_ctrl0.hor_ds_en = cfg_bds->hor_ds_en;
acc->bds.hor.hor_ctrl0.min_clip_val = IMGU_BDS_MIN_CLIP_VAL;
acc->bds.hor.hor_ctrl0.max_clip_val = IMGU_BDS_MAX_CLIP_VAL;
acc->bds.hor.hor_ctrl0.out_frame_width =
css_pipe->rect[IPU3_CSS_RECT_BDS].width;
acc->bds.hor.hor_ptrn_arr = cfg_bds->ptrn_arr;
acc->bds.hor.hor_phase_arr = cfg_bds->hor_phase_arr;
acc->bds.hor.hor_ctrl2.input_frame_height =
css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
acc->bds.ver.ver_ctrl0.min_clip_val = IMGU_BDS_MIN_CLIP_VAL;
acc->bds.ver.ver_ctrl0.max_clip_val = IMGU_BDS_MAX_CLIP_VAL;
acc->bds.ver.ver_ctrl0.sample_patrn_length =
cfg_bds->sample_patrn_length;
acc->bds.ver.ver_ctrl0.ver_ds_en = cfg_bds->ver_ds_en;
acc->bds.ver.ver_ptrn_arr = cfg_bds->ptrn_arr;
acc->bds.ver.ver_phase_arr = cfg_bds->ver_phase_arr;
acc->bds.ver.ver_ctrl1.out_frame_width =
css_pipe->rect[IPU3_CSS_RECT_BDS].width;
acc->bds.ver.ver_ctrl1.out_frame_height =
css_pipe->rect[IPU3_CSS_RECT_BDS].height;
for (i = 0; i < stripes; i++)
acc_bds_per_stripe_data(css, acc, i, pipe);
acc->bds.enabled = cfg_bds->hor_ds_en || cfg_bds->ver_ds_en;
/* acc_param: anr_config */
if (use && use->acc_anr) {
/* Take values from user */
acc->anr.transform = acc_user->anr.transform;
acc->anr.stitch.anr_stitch_en =
acc_user->anr.stitch.anr_stitch_en;
memcpy(acc->anr.stitch.pyramid, acc_user->anr.stitch.pyramid,
sizeof(acc->anr.stitch.pyramid));
} else if (acc_old) {
/* Use old value */
acc->anr.transform = acc_old->anr.transform;
acc->anr.stitch.anr_stitch_en =
acc_old->anr.stitch.anr_stitch_en;
memcpy(acc->anr.stitch.pyramid, acc_old->anr.stitch.pyramid,
sizeof(acc->anr.stitch.pyramid));
} else {
/* Calculate from scratch */
acc->anr = imgu_css_anr_defaults;
}
/* Always enabled */
acc->anr.search.enable = 1;
acc->anr.transform.enable = 1;
acc->anr.tile2strm.enable = 1;
acc->anr.tile2strm.frame_width =
ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
acc->anr.search.frame_width = acc->anr.tile2strm.frame_width;
acc->anr.stitch.frame_width = acc->anr.tile2strm.frame_width;
acc->anr.tile2strm.frame_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->anr.search.frame_height = acc->anr.tile2strm.frame_height;
acc->anr.stitch.frame_height = acc->anr.tile2strm.frame_height;
width = ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
if (acc->anr.transform.xreset + width > IPU3_UAPI_ANR_MAX_RESET)
acc->anr.transform.xreset = IPU3_UAPI_ANR_MAX_RESET - width;
if (acc->anr.transform.xreset < IPU3_UAPI_ANR_MIN_RESET)
acc->anr.transform.xreset = IPU3_UAPI_ANR_MIN_RESET;
if (acc->anr.transform.yreset + height > IPU3_UAPI_ANR_MAX_RESET)
acc->anr.transform.yreset = IPU3_UAPI_ANR_MAX_RESET - height;
if (acc->anr.transform.yreset < IPU3_UAPI_ANR_MIN_RESET)
acc->anr.transform.yreset = IPU3_UAPI_ANR_MIN_RESET;
/* acc_param: awb_fr_config */
if (use && use->acc_awb_fr) {
/* Take values from user */
acc->awb_fr.config = acc_user->awb_fr;
} else if (acc_old) {
/* Use old value */
acc->awb_fr.config = acc_old->awb_fr.config;
} else {
/* Set from scratch */
acc->awb_fr.config = imgu_css_awb_fr_defaults;
}
imgu_css_grid_end_calc(&acc->awb_fr.config.grid_cfg);
if (acc->awb_fr.config.grid_cfg.width <= 0)
return -EINVAL;
acc->awb_fr.config.grid_cfg.height_per_slice =
IMGU_ABI_AWB_FR_MAX_CELLS_PER_SET /
acc->awb_fr.config.grid_cfg.width;
for (i = 0; i < stripes; i++)
acc->awb_fr.stripes[i] = acc->awb_fr.config;
if (acc->awb_fr.config.grid_cfg.x_start >=
acc->stripe.down_scaled_stripes[1].offset + min_overlap) {
/* Enable only for rightmost stripe, disable left */
acc->awb_fr.stripes[0].grid_cfg.y_start &=
~IPU3_UAPI_GRID_Y_START_EN;
} else if (acc->awb_fr.config.grid_cfg.x_end <=
acc->stripe.bds_out_stripes[0].width - min_overlap) {
/* Enable only for leftmost stripe, disable right */
acc->awb_fr.stripes[1].grid_cfg.y_start &=
~IPU3_UAPI_GRID_Y_START_EN;
} else {
/* Enable for both stripes */
u16 end; /* width for grid end */
acc->awb_fr.stripes[0].grid_cfg.width =
(acc->stripe.bds_out_stripes[0].width - min_overlap -
acc->awb_fr.config.grid_cfg.x_start + 1) >>
acc->awb_fr.config.grid_cfg.block_width_log2;
acc->awb_fr.stripes[1].grid_cfg.width =
acc->awb_fr.config.grid_cfg.width -
acc->awb_fr.stripes[0].grid_cfg.width;
b_w_log2 = acc->awb_fr.stripes[0].grid_cfg.block_width_log2;
end = imgu_css_grid_end(acc->awb_fr.stripes[0].grid_cfg.x_start,
acc->awb_fr.stripes[0].grid_cfg.width,
b_w_log2);
acc->awb_fr.stripes[0].grid_cfg.x_end = end;
acc->awb_fr.stripes[1].grid_cfg.x_start =
(acc->awb_fr.stripes[0].grid_cfg.x_end + 1 -
acc->stripe.down_scaled_stripes[1].offset) &
IPU3_UAPI_GRID_START_MASK;
b_w_log2 = acc->awb_fr.stripes[1].grid_cfg.block_width_log2;
end = imgu_css_grid_end(acc->awb_fr.stripes[1].grid_cfg.x_start,
acc->awb_fr.stripes[1].grid_cfg.width,
b_w_log2);
acc->awb_fr.stripes[1].grid_cfg.x_end = end;
/*
* To reduce complexity of debubbling and loading
* statistics fix grid_height_per_slice to 1 for both
* stripes.
*/
for (i = 0; i < stripes; i++)
acc->awb_fr.stripes[i].grid_cfg.height_per_slice = 1;
}
if (imgu_css_awb_fr_ops_calc(css, pipe, &acc->awb_fr))
return -EINVAL;
/* acc_param: ae_config */
if (use && use->acc_ae) {
/* Take values from user */
acc->ae.grid_cfg = acc_user->ae.grid_cfg;
acc->ae.ae_ccm = acc_user->ae.ae_ccm;
for (i = 0; i < IPU3_UAPI_AE_WEIGHTS; i++)
acc->ae.weights[i] = acc_user->ae.weights[i];
} else if (acc_old) {
/* Use old value */
acc->ae.grid_cfg = acc_old->ae.grid_cfg;
acc->ae.ae_ccm = acc_old->ae.ae_ccm;
for (i = 0; i < IPU3_UAPI_AE_WEIGHTS; i++)
acc->ae.weights[i] = acc_old->ae.weights[i];
} else {
/* Set from scratch */
static const struct ipu3_uapi_ae_weight_elem
weight_def = { 1, 1, 1, 1, 1, 1, 1, 1 };
acc->ae.grid_cfg = imgu_css_ae_grid_defaults;
acc->ae.ae_ccm = imgu_css_ae_ccm_defaults;
for (i = 0; i < IPU3_UAPI_AE_WEIGHTS; i++)
acc->ae.weights[i] = weight_def;
}
b_w_log2 = acc->ae.grid_cfg.block_width_log2;
acc->ae.grid_cfg.x_end = imgu_css_grid_end(acc->ae.grid_cfg.x_start,
acc->ae.grid_cfg.width,
b_w_log2);
b_w_log2 = acc->ae.grid_cfg.block_height_log2;
acc->ae.grid_cfg.y_end = imgu_css_grid_end(acc->ae.grid_cfg.y_start,
acc->ae.grid_cfg.height,
b_w_log2);
for (i = 0; i < stripes; i++)
acc->ae.stripes[i].grid = acc->ae.grid_cfg;
if (acc->ae.grid_cfg.x_start >=
acc->stripe.down_scaled_stripes[1].offset) {
/* Enable only for rightmost stripe, disable left */
acc->ae.stripes[0].grid.ae_en = 0;
} else if (acc->ae.grid_cfg.x_end <=
acc->stripe.bds_out_stripes[0].width) {
/* Enable only for leftmost stripe, disable right */
acc->ae.stripes[1].grid.ae_en = 0;
} else {
/* Enable for both stripes */
u8 b_w_log2;
acc->ae.stripes[0].grid.width =
(acc->stripe.bds_out_stripes[0].width -
acc->ae.grid_cfg.x_start + 1) >>
acc->ae.grid_cfg.block_width_log2;
acc->ae.stripes[1].grid.width =
acc->ae.grid_cfg.width - acc->ae.stripes[0].grid.width;
b_w_log2 = acc->ae.stripes[0].grid.block_width_log2;
acc->ae.stripes[0].grid.x_end =
imgu_css_grid_end(acc->ae.stripes[0].grid.x_start,
acc->ae.stripes[0].grid.width,
b_w_log2);
acc->ae.stripes[1].grid.x_start =
(acc->ae.stripes[0].grid.x_end + 1 -
acc->stripe.down_scaled_stripes[1].offset) &
IPU3_UAPI_GRID_START_MASK;
b_w_log2 = acc->ae.stripes[1].grid.block_width_log2;
acc->ae.stripes[1].grid.x_end =
imgu_css_grid_end(acc->ae.stripes[1].grid.x_start,
acc->ae.stripes[1].grid.width,
b_w_log2);
}
/* acc_param: af_config */
if (use && use->acc_af) {
/* Take values from user */
acc->af.config.filter_config = acc_user->af.filter_config;
acc->af.config.grid_cfg = acc_user->af.grid_cfg;
} else if (acc_old) {
/* Use old value */
acc->af.config = acc_old->af.config;
} else {
/* Set from scratch */
acc->af.config.filter_config =
imgu_css_af_defaults.filter_config;
acc->af.config.grid_cfg = imgu_css_af_defaults.grid_cfg;
}
imgu_css_grid_end_calc(&acc->af.config.grid_cfg);
if (acc->af.config.grid_cfg.width <= 0)
return -EINVAL;
acc->af.config.grid_cfg.height_per_slice =
IMGU_ABI_AF_MAX_CELLS_PER_SET / acc->af.config.grid_cfg.width;
acc->af.config.frame_size.width =
ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
acc->af.config.frame_size.height =
css_pipe->rect[IPU3_CSS_RECT_BDS].height;
if (acc->stripe.bds_out_stripes[0].width <= min_overlap)
return -EINVAL;
for (i = 0; i < stripes; i++) {
acc->af.stripes[i].grid_cfg = acc->af.config.grid_cfg;
acc->af.stripes[i].frame_size.height =
css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->af.stripes[i].frame_size.width =
acc->stripe.bds_out_stripes[i].width;
}
if (acc->af.config.grid_cfg.x_start >=
acc->stripe.down_scaled_stripes[1].offset + min_overlap) {
/* Enable only for rightmost stripe, disable left */
acc->af.stripes[0].grid_cfg.y_start &=
~IPU3_UAPI_GRID_Y_START_EN;
acc->af.stripes[1].grid_cfg.x_start =
(acc->af.stripes[1].grid_cfg.x_start -
acc->stripe.down_scaled_stripes[1].offset) &
IPU3_UAPI_GRID_START_MASK;
b_w_log2 = acc->af.stripes[1].grid_cfg.block_width_log2;
acc->af.stripes[1].grid_cfg.x_end =
imgu_css_grid_end(acc->af.stripes[1].grid_cfg.x_start,
acc->af.stripes[1].grid_cfg.width,
b_w_log2);
} else if (acc->af.config.grid_cfg.x_end <=
acc->stripe.bds_out_stripes[0].width - min_overlap) {
/* Enable only for leftmost stripe, disable right */
acc->af.stripes[1].grid_cfg.y_start &=
~IPU3_UAPI_GRID_Y_START_EN;
} else {
/* Enable for both stripes */
acc->af.stripes[0].grid_cfg.width =
(acc->stripe.bds_out_stripes[0].width - min_overlap -
acc->af.config.grid_cfg.x_start + 1) >>
acc->af.config.grid_cfg.block_width_log2;
acc->af.stripes[1].grid_cfg.width =
acc->af.config.grid_cfg.width -
acc->af.stripes[0].grid_cfg.width;
b_w_log2 = acc->af.stripes[0].grid_cfg.block_width_log2;
acc->af.stripes[0].grid_cfg.x_end =
imgu_css_grid_end(acc->af.stripes[0].grid_cfg.x_start,
acc->af.stripes[0].grid_cfg.width,
b_w_log2);
acc->af.stripes[1].grid_cfg.x_start =
(acc->af.stripes[0].grid_cfg.x_end + 1 -
acc->stripe.down_scaled_stripes[1].offset) &
IPU3_UAPI_GRID_START_MASK;
b_w_log2 = acc->af.stripes[1].grid_cfg.block_width_log2;
acc->af.stripes[1].grid_cfg.x_end =
imgu_css_grid_end(acc->af.stripes[1].grid_cfg.x_start,
acc->af.stripes[1].grid_cfg.width,
b_w_log2);
/*
* To reduce complexity of debubbling and loading statistics
* fix grid_height_per_slice to 1 for both stripes
*/
for (i = 0; i < stripes; i++)
acc->af.stripes[i].grid_cfg.height_per_slice = 1;
}
if (imgu_css_af_ops_calc(css, pipe, &acc->af))
return -EINVAL;
/* acc_param: awb_config */
if (use && use->acc_awb) {
/* Take values from user */
acc->awb.config = acc_user->awb.config;
} else if (acc_old) {
/* Use old value */
acc->awb.config = acc_old->awb.config;
} else {
/* Set from scratch */
acc->awb.config = imgu_css_awb_defaults;
}
if (acc->awb.config.grid.width <= 0)
return -EINVAL;
acc->awb.config.grid.height_per_slice =
IMGU_ABI_AWB_MAX_CELLS_PER_SET / acc->awb.config.grid.width,
imgu_css_grid_end_calc(&acc->awb.config.grid);
for (i = 0; i < stripes; i++)
acc->awb.stripes[i] = acc->awb.config;
if (acc->awb.config.grid.x_start >=
acc->stripe.down_scaled_stripes[1].offset + min_overlap) {
/* Enable only for rightmost stripe, disable left */
acc->awb.stripes[0].rgbs_thr_b &= ~IPU3_UAPI_AWB_RGBS_THR_B_EN;
acc->awb.stripes[1].grid.x_start =
(acc->awb.stripes[1].grid.x_start -
acc->stripe.down_scaled_stripes[1].offset) &
IPU3_UAPI_GRID_START_MASK;
b_w_log2 = acc->awb.stripes[1].grid.block_width_log2;
acc->awb.stripes[1].grid.x_end =
imgu_css_grid_end(acc->awb.stripes[1].grid.x_start,
acc->awb.stripes[1].grid.width,
b_w_log2);
} else if (acc->awb.config.grid.x_end <=
acc->stripe.bds_out_stripes[0].width - min_overlap) {
/* Enable only for leftmost stripe, disable right */
acc->awb.stripes[1].rgbs_thr_b &= ~IPU3_UAPI_AWB_RGBS_THR_B_EN;
} else {
/* Enable for both stripes */
acc->awb.stripes[0].grid.width =
(acc->stripe.bds_out_stripes[0].width -
acc->awb.config.grid.x_start + 1) >>
acc->awb.config.grid.block_width_log2;
acc->awb.stripes[1].grid.width = acc->awb.config.grid.width -
acc->awb.stripes[0].grid.width;
b_w_log2 = acc->awb.stripes[0].grid.block_width_log2;
acc->awb.stripes[0].grid.x_end =
imgu_css_grid_end(acc->awb.stripes[0].grid.x_start,
acc->awb.stripes[0].grid.width,
b_w_log2);
acc->awb.stripes[1].grid.x_start =
(acc->awb.stripes[0].grid.x_end + 1 -
acc->stripe.down_scaled_stripes[1].offset) &
IPU3_UAPI_GRID_START_MASK;
b_w_log2 = acc->awb.stripes[1].grid.block_width_log2;
acc->awb.stripes[1].grid.x_end =
imgu_css_grid_end(acc->awb.stripes[1].grid.x_start,
acc->awb.stripes[1].grid.width,
b_w_log2);
/*
* To reduce complexity of debubbling and loading statistics
* fix grid_height_per_slice to 1 for both stripes
*/
for (i = 0; i < stripes; i++)
acc->awb.stripes[i].grid.height_per_slice = 1;
}
if (imgu_css_awb_ops_calc(css, pipe, &acc->awb))
return -EINVAL;
return 0;
}
/*
* Fill the indicated structure in `new_binary_params' from the possible
* sources based on `use_user' flag: if the flag is false, copy from
* `old_binary_params', or if the flag is true, copy from `user_setting'
* and return NULL (or error pointer on error).
* If the flag is false and `old_binary_params' is NULL, return pointer
* to the structure inside `new_binary_params'. In that case the caller
* should calculate and fill the structure from scratch.
*/
static void *imgu_css_cfg_copy(struct imgu_css *css,
unsigned int pipe, bool use_user,
void *user_setting, void *old_binary_params,
void *new_binary_params,
enum imgu_abi_memories m,
struct imgu_fw_isp_parameter *par,
size_t par_size)
{
const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM;
void *new_setting, *old_setting;
new_setting = imgu_css_fw_pipeline_params(css, pipe, c, m, par,
par_size, new_binary_params);
if (!new_setting)
return ERR_PTR(-EPROTO); /* Corrupted firmware */
if (use_user) {
/* Take new user parameters */
memcpy(new_setting, user_setting, par_size);
} else if (old_binary_params) {
/* Take previous value */
old_setting = imgu_css_fw_pipeline_params(css, pipe, c, m, par,
par_size,
old_binary_params);
if (!old_setting)
return ERR_PTR(-EPROTO);
memcpy(new_setting, old_setting, par_size);
} else {
return new_setting; /* Need to calculate */
}
return NULL; /* Copied from other value */
}
/*
* Configure VMEM0 parameters (late binding parameters).
*/
int imgu_css_cfg_vmem0(struct imgu_css *css, unsigned int pipe,
struct ipu3_uapi_flags *use,
void *vmem0, void *vmem0_old,
struct ipu3_uapi_params *user)
{
const struct imgu_fw_info *bi =
&css->fwp->binary_header[css->pipes[pipe].bindex];
struct imgu_fw_param_memory_offsets *pofs = (void *)css->fwp +
bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_PARAM];
struct ipu3_uapi_isp_lin_vmem_params *lin_vmem = NULL;
struct ipu3_uapi_isp_tnr3_vmem_params *tnr_vmem = NULL;
struct ipu3_uapi_isp_xnr3_vmem_params *xnr_vmem = NULL;
const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM;
const enum imgu_abi_memories m = IMGU_ABI_MEM_ISP_VMEM0;
unsigned int i;
/* Configure VMEM0 */
memset(vmem0, 0, bi->info.isp.sp.mem_initializers.params[c][m].size);
/* Configure Linearization VMEM0 parameters */
lin_vmem = imgu_css_cfg_copy(css, pipe, use && use->lin_vmem_params,
&user->lin_vmem_params, vmem0_old, vmem0,
m, &pofs->vmem.lin, sizeof(*lin_vmem));
if (!IS_ERR_OR_NULL(lin_vmem)) {
/* Generate parameter from scratch */
for (i = 0; i < IPU3_UAPI_LIN_LUT_SIZE; i++) {
lin_vmem->lin_lutlow_gr[i] = 32 * i;
lin_vmem->lin_lutlow_r[i] = 32 * i;
lin_vmem->lin_lutlow_b[i] = 32 * i;
lin_vmem->lin_lutlow_gb[i] = 32 * i;
lin_vmem->lin_lutdif_gr[i] = 32;
lin_vmem->lin_lutdif_r[i] = 32;
lin_vmem->lin_lutdif_b[i] = 32;
lin_vmem->lin_lutdif_gb[i] = 32;
}
}
/* Configure TNR3 VMEM parameters */
if (css->pipes[pipe].pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
tnr_vmem = imgu_css_cfg_copy(css, pipe,
use && use->tnr3_vmem_params,
&user->tnr3_vmem_params,
vmem0_old, vmem0, m,
&pofs->vmem.tnr3,
sizeof(*tnr_vmem));
if (!IS_ERR_OR_NULL(tnr_vmem)) {
/* Generate parameter from scratch */
for (i = 0; i < IPU3_UAPI_ISP_TNR3_VMEM_LEN; i++)
tnr_vmem->sigma[i] = 256;
}
}
i = IPU3_UAPI_ISP_TNR3_VMEM_LEN;
/* Configure XNR3 VMEM parameters */
xnr_vmem = imgu_css_cfg_copy(css, pipe, use && use->xnr3_vmem_params,
&user->xnr3_vmem_params, vmem0_old, vmem0,
m, &pofs->vmem.xnr3, sizeof(*xnr_vmem));
if (!IS_ERR_OR_NULL(xnr_vmem)) {
xnr_vmem->x[i] = imgu_css_xnr3_vmem_defaults.x
[i % IMGU_XNR3_VMEM_LUT_LEN];
xnr_vmem->a[i] = imgu_css_xnr3_vmem_defaults.a
[i % IMGU_XNR3_VMEM_LUT_LEN];
xnr_vmem->b[i] = imgu_css_xnr3_vmem_defaults.b
[i % IMGU_XNR3_VMEM_LUT_LEN];
xnr_vmem->c[i] = imgu_css_xnr3_vmem_defaults.c
[i % IMGU_XNR3_VMEM_LUT_LEN];
}
return IS_ERR(lin_vmem) || IS_ERR(tnr_vmem) || IS_ERR(xnr_vmem) ?
-EPROTO : 0;
}
/*
* Configure DMEM0 parameters (late binding parameters).
*/
int imgu_css_cfg_dmem0(struct imgu_css *css, unsigned int pipe,
struct ipu3_uapi_flags *use,
void *dmem0, void *dmem0_old,
struct ipu3_uapi_params *user)
{
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
const struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
struct imgu_fw_param_memory_offsets *pofs = (void *)css->fwp +
bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_PARAM];
struct ipu3_uapi_isp_tnr3_params *tnr_dmem = NULL;
struct ipu3_uapi_isp_xnr3_params *xnr_dmem;
const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM;
const enum imgu_abi_memories m = IMGU_ABI_MEM_ISP_DMEM0;
/* Configure DMEM0 */
memset(dmem0, 0, bi->info.isp.sp.mem_initializers.params[c][m].size);
/* Configure TNR3 DMEM0 parameters */
if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
tnr_dmem = imgu_css_cfg_copy(css, pipe,
use && use->tnr3_dmem_params,
&user->tnr3_dmem_params,
dmem0_old, dmem0, m,
&pofs->dmem.tnr3,
sizeof(*tnr_dmem));
if (!IS_ERR_OR_NULL(tnr_dmem)) {
/* Generate parameter from scratch */
tnr_dmem->knee_y1 = 768;
tnr_dmem->knee_y2 = 1280;
}
}
/* Configure XNR3 DMEM0 parameters */
xnr_dmem = imgu_css_cfg_copy(css, pipe, use && use->xnr3_dmem_params,
&user->xnr3_dmem_params, dmem0_old, dmem0,
m, &pofs->dmem.xnr3, sizeof(*xnr_dmem));
if (!IS_ERR_OR_NULL(xnr_dmem)) {
/* Generate parameter from scratch */
xnr_dmem->alpha.y0 = 2047;
xnr_dmem->alpha.u0 = 2047;
xnr_dmem->alpha.v0 = 2047;
}
return IS_ERR(tnr_dmem) || IS_ERR(xnr_dmem) ? -EPROTO : 0;
}
/* Generate unity morphing table without morphing effect */
void imgu_css_cfg_gdc_table(struct imgu_abi_gdc_warp_param *gdc,
int frame_in_x, int frame_in_y,
int frame_out_x, int frame_out_y,
int env_w, int env_h)
{
static const unsigned int FRAC_BITS = IMGU_ABI_GDC_FRAC_BITS;
static const unsigned int XMEM_ALIGN = 1 << 4;
const unsigned int XMEM_ALIGN_MASK = ~(XMEM_ALIGN - 1);
static const unsigned int BCI_ENV = 4;
static const unsigned int BYP = 2; /* Bytes per pixel */
const unsigned int OFFSET_X = 2 * IMGU_DVS_BLOCK_W + env_w + 1;
const unsigned int OFFSET_Y = IMGU_DVS_BLOCK_H + env_h + 1;
struct imgu_abi_gdc_warp_param gdc_luma, gdc_chroma;
unsigned int blocks_x = ALIGN(DIV_ROUND_UP(frame_out_x,
IMGU_DVS_BLOCK_W), 2);
unsigned int blocks_y = DIV_ROUND_UP(frame_out_y, IMGU_DVS_BLOCK_H);
unsigned int y0, x0, x1, x, y;
/* Global luma settings */
gdc_luma.origin_x = 0;
gdc_luma.origin_y = 0;
gdc_luma.p0_x = (OFFSET_X - (OFFSET_X & XMEM_ALIGN_MASK)) << FRAC_BITS;
gdc_luma.p0_y = 0;
gdc_luma.p1_x = gdc_luma.p0_x + (IMGU_DVS_BLOCK_W << FRAC_BITS);
gdc_luma.p1_y = gdc_luma.p0_y;
gdc_luma.p2_x = gdc_luma.p0_x;
gdc_luma.p2_y = gdc_luma.p0_y + (IMGU_DVS_BLOCK_H << FRAC_BITS);
gdc_luma.p3_x = gdc_luma.p1_x;
gdc_luma.p3_y = gdc_luma.p2_y;
gdc_luma.in_block_width = IMGU_DVS_BLOCK_W + BCI_ENV +
OFFSET_X - (OFFSET_X & XMEM_ALIGN_MASK);
gdc_luma.in_block_width_a = DIV_ROUND_UP(gdc_luma.in_block_width,
IPU3_UAPI_ISP_VEC_ELEMS);
gdc_luma.in_block_width_b = DIV_ROUND_UP(gdc_luma.in_block_width,
IMGU_ABI_ISP_DDR_WORD_BYTES /
BYP);
gdc_luma.in_block_height = IMGU_DVS_BLOCK_H + BCI_ENV;
gdc_luma.padding = 0;
/* Global chroma settings */
gdc_chroma.origin_x = 0;
gdc_chroma.origin_y = 0;
gdc_chroma.p0_x = (OFFSET_X / 2 - (OFFSET_X / 2 & XMEM_ALIGN_MASK)) <<
FRAC_BITS;
gdc_chroma.p0_y = 0;
gdc_chroma.p1_x = gdc_chroma.p0_x + (IMGU_DVS_BLOCK_W << FRAC_BITS);
gdc_chroma.p1_y = gdc_chroma.p0_y;
gdc_chroma.p2_x = gdc_chroma.p0_x;
gdc_chroma.p2_y = gdc_chroma.p0_y + (IMGU_DVS_BLOCK_H / 2 << FRAC_BITS);
gdc_chroma.p3_x = gdc_chroma.p1_x;
gdc_chroma.p3_y = gdc_chroma.p2_y;
gdc_chroma.in_block_width = IMGU_DVS_BLOCK_W + BCI_ENV;
gdc_chroma.in_block_width_a = DIV_ROUND_UP(gdc_chroma.in_block_width,
IPU3_UAPI_ISP_VEC_ELEMS);
gdc_chroma.in_block_width_b = DIV_ROUND_UP(gdc_chroma.in_block_width,
IMGU_ABI_ISP_DDR_WORD_BYTES /
BYP);
gdc_chroma.in_block_height = IMGU_DVS_BLOCK_H / 2 + BCI_ENV;
gdc_chroma.padding = 0;
/* Calculate block offsets for luma and chroma */
for (y0 = 0; y0 < blocks_y; y0++) {
for (x0 = 0; x0 < blocks_x / 2; x0++) {
for (x1 = 0; x1 < 2; x1++) {
/* Luma blocks */
x = (x0 * 2 + x1) * IMGU_DVS_BLOCK_W + OFFSET_X;
x &= XMEM_ALIGN_MASK;
y = y0 * IMGU_DVS_BLOCK_H + OFFSET_Y;
*gdc = gdc_luma;
gdc->in_addr_offset =
(y * frame_in_x + x) * BYP;
gdc++;
}
/* Chroma block */
x = x0 * IMGU_DVS_BLOCK_W + OFFSET_X / 2;
x &= XMEM_ALIGN_MASK;
y = y0 * (IMGU_DVS_BLOCK_H / 2) + OFFSET_Y / 2;
*gdc = gdc_chroma;
gdc->in_addr_offset = (y * frame_in_x + x) * BYP;
gdc++;
}
}
}
| linux-master | drivers/staging/media/ipu3/ipu3-css-params.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Intel Corporation
#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include "ipu3.h"
#include "ipu3-css.h"
#include "ipu3-css-fw.h"
#include "ipu3-css-params.h"
#include "ipu3-dmamap.h"
#include "ipu3-tables.h"
/* IRQ configuration */
#define IMGU_IRQCTRL_IRQ_MASK (IMGU_IRQCTRL_IRQ_SP1 | \
IMGU_IRQCTRL_IRQ_SP2 | \
IMGU_IRQCTRL_IRQ_SW_PIN(0) | \
IMGU_IRQCTRL_IRQ_SW_PIN(1))
#define IPU3_CSS_FORMAT_BPP_DEN 50 /* Denominator */
/* Some sane limits for resolutions */
#define IPU3_CSS_MIN_RES 32
#define IPU3_CSS_MAX_H 3136
#define IPU3_CSS_MAX_W 4224
/* minimal envelope size(GDC in - out) should be 4 */
#define MIN_ENVELOPE 4
/*
* pre-allocated buffer size for CSS ABI, auxiliary frames
* after BDS and before GDC. Those values should be tuned
* to big enough to avoid buffer re-allocation when
* streaming to lower streaming latency.
*/
#define CSS_ABI_SIZE 136
#define CSS_BDS_SIZE (4480 * 3200 * 3)
#define CSS_GDC_SIZE (4224 * 3200 * 12 / 8)
#define IPU3_CSS_QUEUE_TO_FLAGS(q) (1 << (q))
#define IPU3_CSS_FORMAT_FL_IN \
IPU3_CSS_QUEUE_TO_FLAGS(IPU3_CSS_QUEUE_IN)
#define IPU3_CSS_FORMAT_FL_OUT \
IPU3_CSS_QUEUE_TO_FLAGS(IPU3_CSS_QUEUE_OUT)
#define IPU3_CSS_FORMAT_FL_VF \
IPU3_CSS_QUEUE_TO_FLAGS(IPU3_CSS_QUEUE_VF)
/* Formats supported by IPU3 Camera Sub System */
static const struct imgu_css_format imgu_css_formats[] = {
{
.pixelformat = V4L2_PIX_FMT_NV12,
.colorspace = V4L2_COLORSPACE_SRGB,
.frame_format = IMGU_ABI_FRAME_FORMAT_NV12,
.osys_format = IMGU_ABI_OSYS_FORMAT_NV12,
.osys_tiling = IMGU_ABI_OSYS_TILING_NONE,
.chroma_decim = 4,
.width_align = IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_OUT | IPU3_CSS_FORMAT_FL_VF,
}, {
/* Each 32 bytes contains 25 10-bit pixels */
.pixelformat = V4L2_PIX_FMT_IPU3_SBGGR10,
.colorspace = V4L2_COLORSPACE_RAW,
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_BGGR,
.bit_depth = 10,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
}, {
.pixelformat = V4L2_PIX_FMT_IPU3_SGBRG10,
.colorspace = V4L2_COLORSPACE_RAW,
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_GBRG,
.bit_depth = 10,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
}, {
.pixelformat = V4L2_PIX_FMT_IPU3_SGRBG10,
.colorspace = V4L2_COLORSPACE_RAW,
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_GRBG,
.bit_depth = 10,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
}, {
.pixelformat = V4L2_PIX_FMT_IPU3_SRGGB10,
.colorspace = V4L2_COLORSPACE_RAW,
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_RGGB,
.bit_depth = 10,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
},
};
static const struct {
enum imgu_abi_queue_id qid;
size_t ptr_ofs;
} imgu_css_queues[IPU3_CSS_QUEUES] = {
[IPU3_CSS_QUEUE_IN] = {
IMGU_ABI_QUEUE_C_ID,
offsetof(struct imgu_abi_buffer, payload.frame.frame_data)
},
[IPU3_CSS_QUEUE_OUT] = {
IMGU_ABI_QUEUE_D_ID,
offsetof(struct imgu_abi_buffer, payload.frame.frame_data)
},
[IPU3_CSS_QUEUE_VF] = {
IMGU_ABI_QUEUE_E_ID,
offsetof(struct imgu_abi_buffer, payload.frame.frame_data)
},
[IPU3_CSS_QUEUE_STAT_3A] = {
IMGU_ABI_QUEUE_F_ID,
offsetof(struct imgu_abi_buffer, payload.s3a.data_ptr)
},
};
/* Initialize queue based on given format, adjust format as needed */
static int imgu_css_queue_init(struct imgu_css_queue *queue,
struct v4l2_pix_format_mplane *fmt, u32 flags)
{
struct v4l2_pix_format_mplane *const f = &queue->fmt.mpix;
unsigned int i;
u32 sizeimage;
INIT_LIST_HEAD(&queue->bufs);
queue->css_fmt = NULL; /* Disable */
if (!fmt)
return 0;
for (i = 0; i < ARRAY_SIZE(imgu_css_formats); i++) {
if (!(imgu_css_formats[i].flags & flags))
continue;
queue->css_fmt = &imgu_css_formats[i];
if (imgu_css_formats[i].pixelformat == fmt->pixelformat)
break;
}
if (!queue->css_fmt)
return -EINVAL; /* Could not find any suitable format */
queue->fmt.mpix = *fmt;
f->width = ALIGN(clamp_t(u32, f->width,
IPU3_CSS_MIN_RES, IPU3_CSS_MAX_W), 2);
f->height = ALIGN(clamp_t(u32, f->height,
IPU3_CSS_MIN_RES, IPU3_CSS_MAX_H), 2);
queue->width_pad = ALIGN(f->width, queue->css_fmt->width_align);
f->plane_fmt[0].bytesperline =
imgu_bytesperline(f->width, queue->css_fmt->frame_format);
sizeimage = f->height * f->plane_fmt[0].bytesperline;
if (queue->css_fmt->chroma_decim)
sizeimage += 2 * sizeimage / queue->css_fmt->chroma_decim;
f->plane_fmt[0].sizeimage = sizeimage;
f->field = V4L2_FIELD_NONE;
f->num_planes = 1;
f->colorspace = queue->css_fmt->colorspace;
f->flags = 0;
f->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
f->quantization = V4L2_QUANTIZATION_DEFAULT;
f->xfer_func = V4L2_XFER_FUNC_DEFAULT;
memset(f->reserved, 0, sizeof(f->reserved));
return 0;
}
static bool imgu_css_queue_enabled(struct imgu_css_queue *q)
{
return q->css_fmt;
}
/******************* css hw *******************/
/* In the style of writesl() defined in include/asm-generic/io.h */
static inline void writes(const void *mem, ssize_t count, void __iomem *addr)
{
if (count >= 4) {
const u32 *buf = mem;
count /= 4;
do {
writel(*buf++, addr);
addr += 4;
} while (--count);
}
}
/* Wait until register `reg', masked with `mask', becomes `cmp' */
static int imgu_hw_wait(void __iomem *base, int reg, u32 mask, u32 cmp)
{
u32 val;
return readl_poll_timeout(base + reg, val, (val & mask) == cmp,
1000, 100 * 1000);
}
/* Initialize the IPU3 CSS hardware and associated h/w blocks */
int imgu_css_set_powerup(struct device *dev, void __iomem *base,
unsigned int freq)
{
u32 pm_ctrl, state, val;
dev_dbg(dev, "%s with freq %u\n", __func__, freq);
/* Clear the CSS busy signal */
readl(base + IMGU_REG_GP_BUSY);
writel(0, base + IMGU_REG_GP_BUSY);
/* Wait for idle signal */
if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
IMGU_STATE_IDLE_STS)) {
dev_err(dev, "failed to set CSS idle\n");
goto fail;
}
/* Reset the css */
writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_RESET,
base + IMGU_REG_PM_CTRL);
usleep_range(200, 300);
/** Prepare CSS */
pm_ctrl = readl(base + IMGU_REG_PM_CTRL);
state = readl(base + IMGU_REG_STATE);
dev_dbg(dev, "CSS pm_ctrl 0x%x state 0x%x (power %s)\n",
pm_ctrl, state, state & IMGU_STATE_POWER_DOWN ? "down" : "up");
/* Power up CSS using wrapper */
if (state & IMGU_STATE_POWER_DOWN) {
writel(IMGU_PM_CTRL_RACE_TO_HALT | IMGU_PM_CTRL_START,
base + IMGU_REG_PM_CTRL);
if (imgu_hw_wait(base, IMGU_REG_PM_CTRL,
IMGU_PM_CTRL_START, 0)) {
dev_err(dev, "failed to power up CSS\n");
goto fail;
}
usleep_range(2000, 3000);
} else {
writel(IMGU_PM_CTRL_RACE_TO_HALT, base + IMGU_REG_PM_CTRL);
}
/* Set the busy bit */
writel(readl(base + IMGU_REG_GP_BUSY) | 1, base + IMGU_REG_GP_BUSY);
/* Set CSS clock frequency */
pm_ctrl = readl(base + IMGU_REG_PM_CTRL);
val = pm_ctrl & ~(IMGU_PM_CTRL_CSS_PWRDN | IMGU_PM_CTRL_RST_AT_EOF);
writel(val, base + IMGU_REG_PM_CTRL);
writel(0, base + IMGU_REG_GP_BUSY);
if (imgu_hw_wait(base, IMGU_REG_STATE,
IMGU_STATE_PWRDNM_FSM_MASK, 0)) {
dev_err(dev, "failed to pwrdn CSS\n");
goto fail;
}
val = (freq / IMGU_SYSTEM_REQ_FREQ_DIVIDER) & IMGU_SYSTEM_REQ_FREQ_MASK;
writel(val, base + IMGU_REG_SYSTEM_REQ);
writel(1, base + IMGU_REG_GP_BUSY);
writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_HALT,
base + IMGU_REG_PM_CTRL);
if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_HALT_STS,
IMGU_STATE_HALT_STS)) {
dev_err(dev, "failed to halt CSS\n");
goto fail;
}
writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_START,
base + IMGU_REG_PM_CTRL);
if (imgu_hw_wait(base, IMGU_REG_PM_CTRL, IMGU_PM_CTRL_START, 0)) {
dev_err(dev, "failed to start CSS\n");
goto fail;
}
writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_UNHALT,
base + IMGU_REG_PM_CTRL);
val = readl(base + IMGU_REG_PM_CTRL); /* get pm_ctrl */
val &= ~(IMGU_PM_CTRL_CSS_PWRDN | IMGU_PM_CTRL_RST_AT_EOF);
val |= pm_ctrl & (IMGU_PM_CTRL_CSS_PWRDN | IMGU_PM_CTRL_RST_AT_EOF);
writel(val, base + IMGU_REG_PM_CTRL);
return 0;
fail:
imgu_css_set_powerdown(dev, base);
return -EIO;
}
void imgu_css_set_powerdown(struct device *dev, void __iomem *base)
{
dev_dbg(dev, "%s\n", __func__);
/* wait for cio idle signal */
if (imgu_hw_wait(base, IMGU_REG_CIO_GATE_BURST_STATE,
IMGU_CIO_GATE_BURST_MASK, 0))
dev_warn(dev, "wait cio gate idle timeout");
/* wait for css idle signal */
if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
IMGU_STATE_IDLE_STS))
dev_warn(dev, "wait css idle timeout\n");
/* do halt-halted handshake with css */
writel(1, base + IMGU_REG_GP_HALT);
if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_HALT_STS,
IMGU_STATE_HALT_STS))
dev_warn(dev, "failed to halt css");
/* de-assert the busy bit */
writel(0, base + IMGU_REG_GP_BUSY);
}
static void imgu_css_hw_enable_irq(struct imgu_css *css)
{
void __iomem *const base = css->base;
u32 val, i;
/* Set up interrupts */
/*
* Enable IRQ on the SP which signals that SP goes to idle
* (aka ready state) and set trigger to pulse
*/
val = readl(base + IMGU_REG_SP_CTRL(0)) | IMGU_CTRL_IRQ_READY;
writel(val, base + IMGU_REG_SP_CTRL(0));
writel(val | IMGU_CTRL_IRQ_CLEAR, base + IMGU_REG_SP_CTRL(0));
/* Enable IRQs from the IMGU wrapper */
writel(IMGU_REG_INT_CSS_IRQ, base + IMGU_REG_INT_ENABLE);
/* Clear */
writel(IMGU_REG_INT_CSS_IRQ, base + IMGU_REG_INT_STATUS);
/* Enable IRQs from main IRQ controller */
writel(~0, base + IMGU_REG_IRQCTRL_EDGE_NOT_PULSE(IMGU_IRQCTRL_MAIN));
writel(0, base + IMGU_REG_IRQCTRL_MASK(IMGU_IRQCTRL_MAIN));
writel(IMGU_IRQCTRL_IRQ_MASK,
base + IMGU_REG_IRQCTRL_EDGE(IMGU_IRQCTRL_MAIN));
writel(IMGU_IRQCTRL_IRQ_MASK,
base + IMGU_REG_IRQCTRL_ENABLE(IMGU_IRQCTRL_MAIN));
writel(IMGU_IRQCTRL_IRQ_MASK,
base + IMGU_REG_IRQCTRL_CLEAR(IMGU_IRQCTRL_MAIN));
writel(IMGU_IRQCTRL_IRQ_MASK,
base + IMGU_REG_IRQCTRL_MASK(IMGU_IRQCTRL_MAIN));
/* Wait for write complete */
readl(base + IMGU_REG_IRQCTRL_ENABLE(IMGU_IRQCTRL_MAIN));
/* Enable IRQs from SP0 and SP1 controllers */
for (i = IMGU_IRQCTRL_SP0; i <= IMGU_IRQCTRL_SP1; i++) {
writel(~0, base + IMGU_REG_IRQCTRL_EDGE_NOT_PULSE(i));
writel(0, base + IMGU_REG_IRQCTRL_MASK(i));
writel(IMGU_IRQCTRL_IRQ_MASK, base + IMGU_REG_IRQCTRL_EDGE(i));
writel(IMGU_IRQCTRL_IRQ_MASK,
base + IMGU_REG_IRQCTRL_ENABLE(i));
writel(IMGU_IRQCTRL_IRQ_MASK, base + IMGU_REG_IRQCTRL_CLEAR(i));
writel(IMGU_IRQCTRL_IRQ_MASK, base + IMGU_REG_IRQCTRL_MASK(i));
/* Wait for write complete */
readl(base + IMGU_REG_IRQCTRL_ENABLE(i));
}
}
static int imgu_css_hw_init(struct imgu_css *css)
{
/* For checking that streaming monitor statuses are valid */
static const struct {
u32 reg;
u32 mask;
const char *name;
} stream_monitors[] = {
{
IMGU_REG_GP_SP1_STRMON_STAT,
IMGU_GP_STRMON_STAT_ISP_PORT_SP12ISP,
"ISP0 to SP0"
}, {
IMGU_REG_GP_ISP_STRMON_STAT,
IMGU_GP_STRMON_STAT_SP1_PORT_ISP2SP1,
"SP0 to ISP0"
}, {
IMGU_REG_GP_MOD_STRMON_STAT,
IMGU_GP_STRMON_STAT_MOD_PORT_ISP2DMA,
"ISP0 to DMA0"
}, {
IMGU_REG_GP_ISP_STRMON_STAT,
IMGU_GP_STRMON_STAT_ISP_PORT_DMA2ISP,
"DMA0 to ISP0"
}, {
IMGU_REG_GP_MOD_STRMON_STAT,
IMGU_GP_STRMON_STAT_MOD_PORT_CELLS2GDC,
"ISP0 to GDC0"
}, {
IMGU_REG_GP_MOD_STRMON_STAT,
IMGU_GP_STRMON_STAT_MOD_PORT_GDC2CELLS,
"GDC0 to ISP0"
}, {
IMGU_REG_GP_MOD_STRMON_STAT,
IMGU_GP_STRMON_STAT_MOD_PORT_SP12DMA,
"SP0 to DMA0"
}, {
IMGU_REG_GP_SP1_STRMON_STAT,
IMGU_GP_STRMON_STAT_SP1_PORT_DMA2SP1,
"DMA0 to SP0"
}, {
IMGU_REG_GP_MOD_STRMON_STAT,
IMGU_GP_STRMON_STAT_MOD_PORT_CELLS2GDC,
"SP0 to GDC0"
}, {
IMGU_REG_GP_MOD_STRMON_STAT,
IMGU_GP_STRMON_STAT_MOD_PORT_GDC2CELLS,
"GDC0 to SP0"
},
};
struct device *dev = css->dev;
void __iomem *const base = css->base;
u32 val, i;
/* Set instruction cache address and inv bit for ISP, SP, and SP1 */
for (i = 0; i < IMGU_NUM_SP; i++) {
struct imgu_fw_info *bi =
&css->fwp->binary_header[css->fw_sp[i]];
writel(css->binary[css->fw_sp[i]].daddr,
base + IMGU_REG_SP_ICACHE_ADDR(bi->type));
writel(readl(base + IMGU_REG_SP_CTRL(bi->type)) |
IMGU_CTRL_ICACHE_INV,
base + IMGU_REG_SP_CTRL(bi->type));
}
writel(css->binary[css->fw_bl].daddr, base + IMGU_REG_ISP_ICACHE_ADDR);
writel(readl(base + IMGU_REG_ISP_CTRL) | IMGU_CTRL_ICACHE_INV,
base + IMGU_REG_ISP_CTRL);
/* Check that IMGU hardware is ready */
if (!(readl(base + IMGU_REG_SP_CTRL(0)) & IMGU_CTRL_IDLE)) {
dev_err(dev, "SP is not idle\n");
return -EIO;
}
if (!(readl(base + IMGU_REG_ISP_CTRL) & IMGU_CTRL_IDLE)) {
dev_err(dev, "ISP is not idle\n");
return -EIO;
}
for (i = 0; i < ARRAY_SIZE(stream_monitors); i++) {
val = readl(base + stream_monitors[i].reg);
if (val & stream_monitors[i].mask) {
dev_err(dev, "error: Stream monitor %s is valid\n",
stream_monitors[i].name);
return -EIO;
}
}
/* Initialize GDC with default values */
for (i = 0; i < ARRAY_SIZE(imgu_css_gdc_lut[0]); i++) {
u32 val0 = imgu_css_gdc_lut[0][i] & IMGU_GDC_LUT_MASK;
u32 val1 = imgu_css_gdc_lut[1][i] & IMGU_GDC_LUT_MASK;
u32 val2 = imgu_css_gdc_lut[2][i] & IMGU_GDC_LUT_MASK;
u32 val3 = imgu_css_gdc_lut[3][i] & IMGU_GDC_LUT_MASK;
writel(val0 | (val1 << 16),
base + IMGU_REG_GDC_LUT_BASE + i * 8);
writel(val2 | (val3 << 16),
base + IMGU_REG_GDC_LUT_BASE + i * 8 + 4);
}
return 0;
}
/* Boot the given IPU3 CSS SP */
static int imgu_css_hw_start_sp(struct imgu_css *css, int sp)
{
void __iomem *const base = css->base;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
struct imgu_abi_sp_init_dmem_cfg dmem_cfg = {
.ddr_data_addr = css->binary[css->fw_sp[sp]].daddr
+ bi->blob.data_source,
.dmem_data_addr = bi->blob.data_target,
.dmem_bss_addr = bi->blob.bss_target,
.data_size = bi->blob.data_size,
.bss_size = bi->blob.bss_size,
.sp_id = sp,
};
writes(&dmem_cfg, sizeof(dmem_cfg), base +
IMGU_REG_SP_DMEM_BASE(sp) + bi->info.sp.init_dmem_data);
writel(bi->info.sp.sp_entry, base + IMGU_REG_SP_START_ADDR(sp));
writel(readl(base + IMGU_REG_SP_CTRL(sp))
| IMGU_CTRL_START | IMGU_CTRL_RUN, base + IMGU_REG_SP_CTRL(sp));
if (imgu_hw_wait(css->base, IMGU_REG_SP_DMEM_BASE(sp)
+ bi->info.sp.sw_state,
~0, IMGU_ABI_SP_SWSTATE_INITIALIZED))
return -EIO;
return 0;
}
/* Start the IPU3 CSS ImgU (Imaging Unit) and all the SPs */
static int imgu_css_hw_start(struct imgu_css *css)
{
static const u32 event_mask =
((1 << IMGU_ABI_EVTTYPE_OUT_FRAME_DONE) |
(1 << IMGU_ABI_EVTTYPE_2ND_OUT_FRAME_DONE) |
(1 << IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE) |
(1 << IMGU_ABI_EVTTYPE_2ND_VF_OUT_FRAME_DONE) |
(1 << IMGU_ABI_EVTTYPE_3A_STATS_DONE) |
(1 << IMGU_ABI_EVTTYPE_DIS_STATS_DONE) |
(1 << IMGU_ABI_EVTTYPE_PIPELINE_DONE) |
(1 << IMGU_ABI_EVTTYPE_FRAME_TAGGED) |
(1 << IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE) |
(1 << IMGU_ABI_EVTTYPE_METADATA_DONE) |
(1 << IMGU_ABI_EVTTYPE_ACC_STAGE_COMPLETE))
<< IMGU_ABI_SP_COMM_EVENT_IRQ_MASK_OR_SHIFT;
void __iomem *const base = css->base;
struct imgu_fw_info *bi, *bl = &css->fwp->binary_header[css->fw_bl];
unsigned int i;
writel(IMGU_TLB_INVALIDATE, base + IMGU_REG_TLB_INVALIDATE);
/* Start bootloader */
writel(IMGU_ABI_BL_SWSTATE_BUSY,
base + IMGU_REG_ISP_DMEM_BASE + bl->info.bl.sw_state);
writel(IMGU_NUM_SP,
base + IMGU_REG_ISP_DMEM_BASE + bl->info.bl.num_dma_cmds);
for (i = 0; i < IMGU_NUM_SP; i++) {
int j = IMGU_NUM_SP - i - 1; /* load sp1 first, then sp0 */
struct imgu_fw_info *sp =
&css->fwp->binary_header[css->fw_sp[j]];
struct imgu_abi_bl_dma_cmd_entry dma_cmd = {
.src_addr = css->binary[css->fw_sp[j]].daddr
+ sp->blob.text_source,
.size = sp->blob.text_size,
.dst_type = IMGU_ABI_BL_DMACMD_TYPE_SP_PMEM,
.dst_addr = IMGU_SP_PMEM_BASE(j),
};
writes(&dma_cmd, sizeof(dma_cmd),
base + IMGU_REG_ISP_DMEM_BASE + i * sizeof(dma_cmd) +
bl->info.bl.dma_cmd_list);
}
writel(bl->info.bl.bl_entry, base + IMGU_REG_ISP_START_ADDR);
writel(readl(base + IMGU_REG_ISP_CTRL)
| IMGU_CTRL_START | IMGU_CTRL_RUN, base + IMGU_REG_ISP_CTRL);
if (imgu_hw_wait(css->base, IMGU_REG_ISP_DMEM_BASE
+ bl->info.bl.sw_state, ~0,
IMGU_ABI_BL_SWSTATE_OK)) {
dev_err(css->dev, "failed to start bootloader\n");
return -EIO;
}
/* Start ISP */
memset(css->xmem_sp_group_ptrs.vaddr, 0,
sizeof(struct imgu_abi_sp_group));
bi = &css->fwp->binary_header[css->fw_sp[0]];
writel(css->xmem_sp_group_ptrs.daddr,
base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.per_frame_data);
writel(IMGU_ABI_SP_SWSTATE_TERMINATED,
base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sw_state);
writel(1, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.invalidate_tlb);
if (imgu_css_hw_start_sp(css, 0))
return -EIO;
writel(0, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.isp_started);
writel(0, base + IMGU_REG_SP_DMEM_BASE(0) +
bi->info.sp.host_sp_queues_initialized);
writel(0, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sleep_mode);
writel(0, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.invalidate_tlb);
writel(IMGU_ABI_SP_COMM_COMMAND_READY, base + IMGU_REG_SP_DMEM_BASE(0)
+ bi->info.sp.host_sp_com + IMGU_ABI_SP_COMM_COMMAND);
/* Enable all events for all queues */
for (i = 0; i < IPU3_CSS_PIPE_ID_NUM; i++)
writel(event_mask, base + IMGU_REG_SP_DMEM_BASE(0)
+ bi->info.sp.host_sp_com
+ IMGU_ABI_SP_COMM_EVENT_IRQ_MASK(i));
writel(1, base + IMGU_REG_SP_DMEM_BASE(0) +
bi->info.sp.host_sp_queues_initialized);
/* Start SP1 */
bi = &css->fwp->binary_header[css->fw_sp[1]];
writel(IMGU_ABI_SP_SWSTATE_TERMINATED,
base + IMGU_REG_SP_DMEM_BASE(1) + bi->info.sp.sw_state);
if (imgu_css_hw_start_sp(css, 1))
return -EIO;
writel(IMGU_ABI_SP_COMM_COMMAND_READY, base + IMGU_REG_SP_DMEM_BASE(1)
+ bi->info.sp.host_sp_com + IMGU_ABI_SP_COMM_COMMAND);
return 0;
}
static void imgu_css_hw_stop(struct imgu_css *css)
{
void __iomem *const base = css->base;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[0]];
/* Stop fw */
writel(IMGU_ABI_SP_COMM_COMMAND_TERMINATE,
base + IMGU_REG_SP_DMEM_BASE(0) +
bi->info.sp.host_sp_com + IMGU_ABI_SP_COMM_COMMAND);
if (imgu_hw_wait(css->base, IMGU_REG_SP_CTRL(0),
IMGU_CTRL_IDLE, IMGU_CTRL_IDLE))
dev_err(css->dev, "wait sp0 idle timeout.\n");
if (readl(base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sw_state) !=
IMGU_ABI_SP_SWSTATE_TERMINATED)
dev_err(css->dev, "sp0 is not terminated.\n");
if (imgu_hw_wait(css->base, IMGU_REG_ISP_CTRL,
IMGU_CTRL_IDLE, IMGU_CTRL_IDLE))
dev_err(css->dev, "wait isp idle timeout\n");
}
static void imgu_css_hw_cleanup(struct imgu_css *css)
{
void __iomem *const base = css->base;
/** Reset CSS **/
/* Clear the CSS busy signal */
readl(base + IMGU_REG_GP_BUSY);
writel(0, base + IMGU_REG_GP_BUSY);
/* Wait for idle signal */
if (imgu_hw_wait(css->base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
IMGU_STATE_IDLE_STS))
dev_err(css->dev, "failed to shut down hw cleanly\n");
/* Reset the css */
writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_RESET,
base + IMGU_REG_PM_CTRL);
usleep_range(200, 300);
}
static void imgu_css_pipeline_cleanup(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
unsigned int i;
imgu_css_pool_cleanup(imgu, &css_pipe->pool.parameter_set_info);
imgu_css_pool_cleanup(imgu, &css_pipe->pool.acc);
imgu_css_pool_cleanup(imgu, &css_pipe->pool.gdc);
imgu_css_pool_cleanup(imgu, &css_pipe->pool.obgrid);
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
imgu_css_pool_cleanup(imgu, &css_pipe->pool.binary_params_p[i]);
}
/*
* This function initializes various stages of the
* IPU3 CSS ISP pipeline
*/
static int imgu_css_pipeline_init(struct imgu_css *css, unsigned int pipe)
{
static const int BYPC = 2; /* Bytes per component */
static const struct imgu_abi_buffer_sp buffer_sp_init = {
.buf_src = {.queue_id = IMGU_ABI_QUEUE_EVENT_ID},
.buf_type = IMGU_ABI_BUFFER_TYPE_INVALID,
};
struct imgu_abi_isp_iterator_config *cfg_iter;
struct imgu_abi_isp_ref_config *cfg_ref;
struct imgu_abi_isp_dvs_config *cfg_dvs;
struct imgu_abi_isp_tnr3_config *cfg_tnr;
struct imgu_abi_isp_ref_dmem_state *cfg_ref_state;
struct imgu_abi_isp_tnr3_dmem_state *cfg_tnr_state;
const int stage = 0;
unsigned int i, j;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
struct imgu_css_queue *css_queue_in =
&css_pipe->queue[IPU3_CSS_QUEUE_IN];
struct imgu_css_queue *css_queue_out =
&css_pipe->queue[IPU3_CSS_QUEUE_OUT];
struct imgu_css_queue *css_queue_vf =
&css_pipe->queue[IPU3_CSS_QUEUE_VF];
const struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
struct imgu_fw_config_memory_offsets *cofs = (void *)css->fwp +
bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_CONFIG];
struct imgu_fw_state_memory_offsets *sofs = (void *)css->fwp +
bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_STATE];
struct imgu_abi_isp_stage *isp_stage;
struct imgu_abi_sp_stage *sp_stage;
struct imgu_abi_sp_group *sp_group;
struct imgu_abi_frames_sp *frames_sp;
struct imgu_abi_frame_sp *frame_sp;
struct imgu_abi_frame_sp_info *frame_sp_info;
const unsigned int bds_width_pad =
ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
2 * IPU3_UAPI_ISP_VEC_ELEMS);
const enum imgu_abi_memories m0 = IMGU_ABI_MEM_ISP_DMEM0;
enum imgu_abi_param_class cfg = IMGU_ABI_PARAM_CLASS_CONFIG;
void *vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr;
struct imgu_device *imgu = dev_get_drvdata(css->dev);
dev_dbg(css->dev, "%s for pipe %d", __func__, pipe);
/* Configure iterator */
cfg_iter = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.iterator,
sizeof(*cfg_iter), vaddr);
if (!cfg_iter)
goto bad_firmware;
frame_sp_info = &cfg_iter->input_info;
frame_sp_info->res.width = css_queue_in->fmt.mpix.width;
frame_sp_info->res.height = css_queue_in->fmt.mpix.height;
frame_sp_info->padded_width = css_queue_in->width_pad;
frame_sp_info->format = css_queue_in->css_fmt->frame_format;
frame_sp_info->raw_bit_depth = css_queue_in->css_fmt->bit_depth;
frame_sp_info->raw_bayer_order = css_queue_in->css_fmt->bayer_order;
frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp_info = &cfg_iter->internal_info;
frame_sp_info->res.width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
frame_sp_info->res.height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
frame_sp_info->padded_width = bds_width_pad;
frame_sp_info->format = css_queue_out->css_fmt->frame_format;
frame_sp_info->raw_bit_depth = css_queue_out->css_fmt->bit_depth;
frame_sp_info->raw_bayer_order = css_queue_out->css_fmt->bayer_order;
frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp_info = &cfg_iter->output_info;
frame_sp_info->res.width = css_queue_out->fmt.mpix.width;
frame_sp_info->res.height = css_queue_out->fmt.mpix.height;
frame_sp_info->padded_width = css_queue_out->width_pad;
frame_sp_info->format = css_queue_out->css_fmt->frame_format;
frame_sp_info->raw_bit_depth = css_queue_out->css_fmt->bit_depth;
frame_sp_info->raw_bayer_order = css_queue_out->css_fmt->bayer_order;
frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp_info = &cfg_iter->vf_info;
frame_sp_info->res.width = css_queue_vf->fmt.mpix.width;
frame_sp_info->res.height = css_queue_vf->fmt.mpix.height;
frame_sp_info->padded_width = css_queue_vf->width_pad;
frame_sp_info->format = css_queue_vf->css_fmt->frame_format;
frame_sp_info->raw_bit_depth = css_queue_vf->css_fmt->bit_depth;
frame_sp_info->raw_bayer_order = css_queue_vf->css_fmt->bayer_order;
frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
cfg_iter->dvs_envelope.width =
css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
cfg_iter->dvs_envelope.height =
css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].height;
/* Configure reference (delay) frames */
cfg_ref = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.ref,
sizeof(*cfg_ref), vaddr);
if (!cfg_ref)
goto bad_firmware;
cfg_ref->port_b.crop = 0;
cfg_ref->port_b.elems = IMGU_ABI_ISP_DDR_WORD_BYTES / BYPC;
cfg_ref->port_b.width =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].width;
cfg_ref->port_b.stride =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline;
cfg_ref->width_a_over_b =
IPU3_UAPI_ISP_VEC_ELEMS / cfg_ref->port_b.elems;
cfg_ref->dvs_frame_delay = IPU3_CSS_AUX_FRAMES - 1;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++) {
cfg_ref->ref_frame_addr_y[i] =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i].daddr;
cfg_ref->ref_frame_addr_c[i] =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i].daddr +
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline *
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
}
for (; i < IMGU_ABI_FRAMES_REF; i++) {
cfg_ref->ref_frame_addr_y[i] = 0;
cfg_ref->ref_frame_addr_c[i] = 0;
}
/* Configure DVS (digital video stabilization) */
cfg_dvs = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.dvs, sizeof(*cfg_dvs),
vaddr);
if (!cfg_dvs)
goto bad_firmware;
cfg_dvs->num_horizontal_blocks =
ALIGN(DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_GDC].width,
IMGU_DVS_BLOCK_W), 2);
cfg_dvs->num_vertical_blocks =
DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_GDC].height,
IMGU_DVS_BLOCK_H);
/* Configure TNR (temporal noise reduction) */
if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
cfg_tnr = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.tnr3,
sizeof(*cfg_tnr),
vaddr);
if (!cfg_tnr)
goto bad_firmware;
cfg_tnr->port_b.crop = 0;
cfg_tnr->port_b.elems = IMGU_ABI_ISP_DDR_WORD_BYTES;
cfg_tnr->port_b.width =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width;
cfg_tnr->port_b.stride =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperline;
cfg_tnr->width_a_over_b =
IPU3_UAPI_ISP_VEC_ELEMS / cfg_tnr->port_b.elems;
cfg_tnr->frame_height =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;
cfg_tnr->delay_frame = IPU3_CSS_AUX_FRAMES - 1;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
cfg_tnr->frame_addr[i] =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR]
.mem[i].daddr;
for (; i < IMGU_ABI_FRAMES_TNR; i++)
cfg_tnr->frame_addr[i] = 0;
}
/* Configure ref dmem state parameters */
cfg = IMGU_ABI_PARAM_CLASS_STATE;
vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr;
cfg_ref_state = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&sofs->dmem.ref,
sizeof(*cfg_ref_state),
vaddr);
if (!cfg_ref_state)
goto bad_firmware;
cfg_ref_state->ref_in_buf_idx = 0;
cfg_ref_state->ref_out_buf_idx = 1;
/* Configure tnr dmem state parameters */
if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
cfg_tnr_state =
imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&sofs->dmem.tnr3,
sizeof(*cfg_tnr_state),
vaddr);
if (!cfg_tnr_state)
goto bad_firmware;
cfg_tnr_state->in_bufidx = 0;
cfg_tnr_state->out_bufidx = 1;
cfg_tnr_state->bypass_filter = 0;
cfg_tnr_state->total_frame_counter = 0;
for (i = 0; i < IMGU_ABI_BUF_SETS_TNR; i++)
cfg_tnr_state->buffer_frame_counter[i] = 0;
}
/* Configure ISP stage */
isp_stage = css_pipe->xmem_isp_stage_ptrs[pipe][stage].vaddr;
memset(isp_stage, 0, sizeof(*isp_stage));
isp_stage->blob_info = bi->blob;
isp_stage->binary_info = bi->info.isp.sp;
strscpy(isp_stage->binary_name,
(char *)css->fwp + bi->blob.prog_name_offset,
sizeof(isp_stage->binary_name));
isp_stage->mem_initializers = bi->info.isp.sp.mem_initializers;
for (i = IMGU_ABI_PARAM_CLASS_CONFIG; i < IMGU_ABI_PARAM_CLASS_NUM; i++)
for (j = 0; j < IMGU_ABI_NUM_MEMORIES; j++)
isp_stage->mem_initializers.params[i][j].address =
css_pipe->binary_params_cs[i - 1][j].daddr;
/* Configure SP stage */
sp_stage = css_pipe->xmem_sp_stage_ptrs[pipe][stage].vaddr;
memset(sp_stage, 0, sizeof(*sp_stage));
frames_sp = &sp_stage->frames;
frames_sp->in.buf_attr = buffer_sp_init;
for (i = 0; i < IMGU_ABI_BINARY_MAX_OUTPUT_PORTS; i++)
frames_sp->out[i].buf_attr = buffer_sp_init;
frames_sp->out_vf.buf_attr = buffer_sp_init;
frames_sp->s3a_buf = buffer_sp_init;
frames_sp->dvs_buf = buffer_sp_init;
sp_stage->stage_type = IMGU_ABI_STAGE_TYPE_ISP;
sp_stage->num = stage;
sp_stage->isp_online = 0;
sp_stage->isp_copy_vf = 0;
sp_stage->isp_copy_output = 0;
sp_stage->enable.vf_output = css_pipe->vf_output_en;
frames_sp->effective_in_res.width =
css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
frames_sp->effective_in_res.height =
css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
frame_sp = &frames_sp->in;
frame_sp->info.res.width = css_queue_in->fmt.mpix.width;
frame_sp->info.res.height = css_queue_in->fmt.mpix.height;
frame_sp->info.padded_width = css_queue_in->width_pad;
frame_sp->info.format = css_queue_in->css_fmt->frame_format;
frame_sp->info.raw_bit_depth = css_queue_in->css_fmt->bit_depth;
frame_sp->info.raw_bayer_order = css_queue_in->css_fmt->bayer_order;
frame_sp->info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_C_ID;
frame_sp->buf_attr.buf_type = IMGU_ABI_BUFFER_TYPE_INPUT_FRAME;
frame_sp = &frames_sp->out[0];
frame_sp->info.res.width = css_queue_out->fmt.mpix.width;
frame_sp->info.res.height = css_queue_out->fmt.mpix.height;
frame_sp->info.padded_width = css_queue_out->width_pad;
frame_sp->info.format = css_queue_out->css_fmt->frame_format;
frame_sp->info.raw_bit_depth = css_queue_out->css_fmt->bit_depth;
frame_sp->info.raw_bayer_order = css_queue_out->css_fmt->bayer_order;
frame_sp->info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp->planes.nv.uv.offset = css_queue_out->width_pad *
css_queue_out->fmt.mpix.height;
frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_D_ID;
frame_sp->buf_attr.buf_type = IMGU_ABI_BUFFER_TYPE_OUTPUT_FRAME;
frame_sp = &frames_sp->out[1];
frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_EVENT_ID;
frame_sp_info = &frames_sp->internal_frame_info;
frame_sp_info->res.width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
frame_sp_info->res.height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
frame_sp_info->padded_width = bds_width_pad;
frame_sp_info->format = css_queue_out->css_fmt->frame_format;
frame_sp_info->raw_bit_depth = css_queue_out->css_fmt->bit_depth;
frame_sp_info->raw_bayer_order = css_queue_out->css_fmt->bayer_order;
frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp = &frames_sp->out_vf;
frame_sp->info.res.width = css_queue_vf->fmt.mpix.width;
frame_sp->info.res.height = css_queue_vf->fmt.mpix.height;
frame_sp->info.padded_width = css_queue_vf->width_pad;
frame_sp->info.format = css_queue_vf->css_fmt->frame_format;
frame_sp->info.raw_bit_depth = css_queue_vf->css_fmt->bit_depth;
frame_sp->info.raw_bayer_order = css_queue_vf->css_fmt->bayer_order;
frame_sp->info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp->planes.yuv.u.offset = css_queue_vf->width_pad *
css_queue_vf->fmt.mpix.height;
frame_sp->planes.yuv.v.offset = css_queue_vf->width_pad *
css_queue_vf->fmt.mpix.height * 5 / 4;
frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_E_ID;
frame_sp->buf_attr.buf_type = IMGU_ABI_BUFFER_TYPE_VF_OUTPUT_FRAME;
frames_sp->s3a_buf.buf_src.queue_id = IMGU_ABI_QUEUE_F_ID;
frames_sp->s3a_buf.buf_type = IMGU_ABI_BUFFER_TYPE_3A_STATISTICS;
frames_sp->dvs_buf.buf_src.queue_id = IMGU_ABI_QUEUE_G_ID;
frames_sp->dvs_buf.buf_type = IMGU_ABI_BUFFER_TYPE_DIS_STATISTICS;
sp_stage->dvs_envelope.width =
css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
sp_stage->dvs_envelope.height =
css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].height;
sp_stage->isp_pipe_version =
bi->info.isp.sp.pipeline.isp_pipe_version;
sp_stage->isp_deci_log_factor =
clamp(max(fls(css_pipe->rect[IPU3_CSS_RECT_BDS].width /
IMGU_MAX_BQ_GRID_WIDTH),
fls(css_pipe->rect[IPU3_CSS_RECT_BDS].height /
IMGU_MAX_BQ_GRID_HEIGHT)) - 1, 3, 5);
sp_stage->isp_vf_downscale_bits = 0;
sp_stage->if_config_index = 255;
sp_stage->sp_enable_xnr = 0;
sp_stage->num_stripes = stripes;
sp_stage->enable.s3a = 1;
sp_stage->enable.dvs_stats = 0;
sp_stage->xmem_bin_addr = css->binary[css_pipe->bindex].daddr;
sp_stage->xmem_map_addr = css_pipe->sp_ddr_ptrs.daddr;
sp_stage->isp_stage_addr =
css_pipe->xmem_isp_stage_ptrs[pipe][stage].daddr;
/* Configure SP group */
sp_group = css->xmem_sp_group_ptrs.vaddr;
memset(&sp_group->pipe[pipe], 0, sizeof(struct imgu_abi_sp_pipeline));
sp_group->pipe[pipe].num_stages = 1;
sp_group->pipe[pipe].pipe_id = css_pipe->pipe_id;
sp_group->pipe[pipe].thread_id = pipe;
sp_group->pipe[pipe].pipe_num = pipe;
sp_group->pipe[pipe].num_execs = -1;
sp_group->pipe[pipe].pipe_qos_config = -1;
sp_group->pipe[pipe].required_bds_factor = 0;
sp_group->pipe[pipe].dvs_frame_delay = IPU3_CSS_AUX_FRAMES - 1;
sp_group->pipe[pipe].inout_port_config =
IMGU_ABI_PORT_CONFIG_TYPE_INPUT_HOST |
IMGU_ABI_PORT_CONFIG_TYPE_OUTPUT_HOST;
sp_group->pipe[pipe].scaler_pp_lut = 0;
sp_group->pipe[pipe].shading.internal_frame_origin_x_bqs_on_sctbl = 0;
sp_group->pipe[pipe].shading.internal_frame_origin_y_bqs_on_sctbl = 0;
sp_group->pipe[pipe].sp_stage_addr[stage] =
css_pipe->xmem_sp_stage_ptrs[pipe][stage].daddr;
sp_group->pipe[pipe].pipe_config =
bi->info.isp.sp.enable.params ? (1 << pipe) : 0;
sp_group->pipe[pipe].pipe_config |= IMGU_ABI_PIPE_CONFIG_ACQUIRE_ISP;
/* Initialize parameter pools */
if (imgu_css_pool_init(imgu, &css_pipe->pool.parameter_set_info,
sizeof(struct imgu_abi_parameter_set_info)) ||
imgu_css_pool_init(imgu, &css_pipe->pool.acc,
sizeof(struct imgu_abi_acc_param)) ||
imgu_css_pool_init(imgu, &css_pipe->pool.gdc,
sizeof(struct imgu_abi_gdc_warp_param) *
3 * cfg_dvs->num_horizontal_blocks / 2 *
cfg_dvs->num_vertical_blocks) ||
imgu_css_pool_init(imgu, &css_pipe->pool.obgrid,
imgu_css_fw_obgrid_size(
&css->fwp->binary_header[css_pipe->bindex])))
goto out_of_memory;
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
if (imgu_css_pool_init(imgu,
&css_pipe->pool.binary_params_p[i],
bi->info.isp.sp.mem_initializers.params
[IMGU_ABI_PARAM_CLASS_PARAM][i].size))
goto out_of_memory;
return 0;
bad_firmware:
imgu_css_pipeline_cleanup(css, pipe);
return -EPROTO;
out_of_memory:
imgu_css_pipeline_cleanup(css, pipe);
return -ENOMEM;
}
static u8 imgu_css_queue_pos(struct imgu_css *css, int queue, int thread)
{
static const unsigned int sp;
void __iomem *const base = css->base;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
bi->info.sp.host_sp_queue;
return queue >= 0 ? readb(&q->host2sp_bufq_info[thread][queue].end) :
readb(&q->host2sp_evtq_info.end);
}
/* Sent data to sp using given buffer queue, or if queue < 0, event queue. */
static int imgu_css_queue_data(struct imgu_css *css,
int queue, int thread, u32 data)
{
static const unsigned int sp;
void __iomem *const base = css->base;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
bi->info.sp.host_sp_queue;
u8 size, start, end, end2;
if (queue >= 0) {
size = readb(&q->host2sp_bufq_info[thread][queue].size);
start = readb(&q->host2sp_bufq_info[thread][queue].start);
end = readb(&q->host2sp_bufq_info[thread][queue].end);
} else {
size = readb(&q->host2sp_evtq_info.size);
start = readb(&q->host2sp_evtq_info.start);
end = readb(&q->host2sp_evtq_info.end);
}
if (size == 0)
return -EIO;
end2 = (end + 1) % size;
if (end2 == start)
return -EBUSY; /* Queue full */
if (queue >= 0) {
writel(data, &q->host2sp_bufq[thread][queue][end]);
writeb(end2, &q->host2sp_bufq_info[thread][queue].end);
} else {
writel(data, &q->host2sp_evtq[end]);
writeb(end2, &q->host2sp_evtq_info.end);
}
return 0;
}
/* Receive data using given buffer queue, or if queue < 0, event queue. */
static int imgu_css_dequeue_data(struct imgu_css *css, int queue, u32 *data)
{
static const unsigned int sp;
void __iomem *const base = css->base;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
bi->info.sp.host_sp_queue;
u8 size, start, end, start2;
if (queue >= 0) {
size = readb(&q->sp2host_bufq_info[queue].size);
start = readb(&q->sp2host_bufq_info[queue].start);
end = readb(&q->sp2host_bufq_info[queue].end);
} else {
size = readb(&q->sp2host_evtq_info.size);
start = readb(&q->sp2host_evtq_info.start);
end = readb(&q->sp2host_evtq_info.end);
}
if (size == 0)
return -EIO;
if (end == start)
return -EBUSY; /* Queue empty */
start2 = (start + 1) % size;
if (queue >= 0) {
*data = readl(&q->sp2host_bufq[queue][start]);
writeb(start2, &q->sp2host_bufq_info[queue].start);
} else {
int r;
*data = readl(&q->sp2host_evtq[start]);
writeb(start2, &q->sp2host_evtq_info.start);
/* Acknowledge events dequeued from event queue */
r = imgu_css_queue_data(css, queue, 0,
IMGU_ABI_EVENT_EVENT_DEQUEUED);
if (r < 0)
return r;
}
return 0;
}
/* Free binary-specific resources */
static void imgu_css_binary_cleanup(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
unsigned int i, j;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
for (j = 0; j < IMGU_ABI_PARAM_CLASS_NUM - 1; j++)
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
imgu_dmamap_free(imgu,
&css_pipe->binary_params_cs[j][i]);
j = IPU3_CSS_AUX_FRAME_REF;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
imgu_dmamap_free(imgu,
&css_pipe->aux_frames[j].mem[i]);
j = IPU3_CSS_AUX_FRAME_TNR;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
imgu_dmamap_free(imgu,
&css_pipe->aux_frames[j].mem[i]);
}
static int imgu_css_binary_preallocate(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
unsigned int i, j;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
for (j = IMGU_ABI_PARAM_CLASS_CONFIG;
j < IMGU_ABI_PARAM_CLASS_NUM; j++)
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
if (!imgu_dmamap_alloc(imgu,
&css_pipe->binary_params_cs[j - 1][i],
CSS_ABI_SIZE))
goto out_of_memory;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
if (!imgu_dmamap_alloc(imgu,
&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i],
CSS_BDS_SIZE))
goto out_of_memory;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
if (!imgu_dmamap_alloc(imgu,
&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].mem[i],
CSS_GDC_SIZE))
goto out_of_memory;
return 0;
out_of_memory:
imgu_css_binary_cleanup(css, pipe);
return -ENOMEM;
}
/* allocate binary-specific resources */
static int imgu_css_binary_setup(struct imgu_css *css, unsigned int pipe)
{
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
struct imgu_fw_info *bi = &css->fwp->binary_header[css_pipe->bindex];
struct imgu_device *imgu = dev_get_drvdata(css->dev);
int i, j, size;
static const int BYPC = 2; /* Bytes per component */
unsigned int w, h;
/* Allocate parameter memory blocks for this binary */
for (j = IMGU_ABI_PARAM_CLASS_CONFIG; j < IMGU_ABI_PARAM_CLASS_NUM; j++)
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++) {
if (imgu_css_dma_buffer_resize(
imgu,
&css_pipe->binary_params_cs[j - 1][i],
bi->info.isp.sp.mem_initializers.params[j][i].size))
goto out_of_memory;
}
/* Allocate internal frame buffers */
/* Reference frames for DVS, FRAME_FORMAT_YUV420_16 */
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel = BYPC;
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].width =
css_pipe->rect[IPU3_CSS_RECT_BDS].width;
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height =
ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].height,
IMGU_DVS_BLOCK_H) + 2 * IMGU_GDC_BUF_Y;
h = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
w = ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
2 * IPU3_UAPI_ISP_VEC_ELEMS) + 2 * IMGU_GDC_BUF_X;
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel * w;
size = w * h * BYPC + (w / 2) * (h / 2) * BYPC * 2;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
if (imgu_css_dma_buffer_resize(
imgu,
&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i],
size))
goto out_of_memory;
/* TNR frames for temporal noise reduction, FRAME_FORMAT_YUV_LINE */
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperpixel = 1;
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width =
roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].width,
bi->info.isp.sp.block.block_width *
IPU3_UAPI_ISP_VEC_ELEMS);
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height =
roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].height,
bi->info.isp.sp.block.output_block_height);
w = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width;
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperline = w;
h = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;
size = w * ALIGN(h * 3 / 2 + 3, 2); /* +3 for vf_pp prefetch */
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
if (imgu_css_dma_buffer_resize(
imgu,
&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].mem[i],
size))
goto out_of_memory;
return 0;
out_of_memory:
imgu_css_binary_cleanup(css, pipe);
return -ENOMEM;
}
int imgu_css_start_streaming(struct imgu_css *css)
{
u32 data;
int r, pipe;
if (css->streaming)
return -EPROTO;
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
r = imgu_css_binary_setup(css, pipe);
if (r < 0)
return r;
}
r = imgu_css_hw_init(css);
if (r < 0)
return r;
r = imgu_css_hw_start(css);
if (r < 0)
goto fail;
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
r = imgu_css_pipeline_init(css, pipe);
if (r < 0)
goto fail;
}
css->streaming = true;
imgu_css_hw_enable_irq(css);
/* Initialize parameters to default */
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
r = imgu_css_set_parameters(css, pipe, NULL);
if (r < 0)
goto fail;
}
while (!(r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_A_ID, &data)))
;
if (r != -EBUSY)
goto fail;
while (!(r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_B_ID, &data)))
;
if (r != -EBUSY)
goto fail;
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_START_STREAM |
pipe << 16);
if (r < 0)
goto fail;
}
return 0;
fail:
css->streaming = false;
imgu_css_hw_cleanup(css);
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
imgu_css_pipeline_cleanup(css, pipe);
imgu_css_binary_cleanup(css, pipe);
}
return r;
}
void imgu_css_stop_streaming(struct imgu_css *css)
{
struct imgu_css_buffer *b, *b0;
int q, r, pipe;
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_STOP_STREAM);
if (r < 0)
dev_warn(css->dev, "failed on stop stream event\n");
}
if (!css->streaming)
return;
imgu_css_hw_stop(css);
imgu_css_hw_cleanup(css);
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
imgu_css_pipeline_cleanup(css, pipe);
spin_lock(&css_pipe->qlock);
for (q = 0; q < IPU3_CSS_QUEUES; q++)
list_for_each_entry_safe(b, b0,
&css_pipe->queue[q].bufs,
list) {
b->state = IPU3_CSS_BUFFER_FAILED;
list_del(&b->list);
}
spin_unlock(&css_pipe->qlock);
}
css->streaming = false;
}
bool imgu_css_pipe_queue_empty(struct imgu_css *css, unsigned int pipe)
{
int q;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
spin_lock(&css_pipe->qlock);
for (q = 0; q < IPU3_CSS_QUEUES; q++)
if (!list_empty(&css_pipe->queue[q].bufs))
break;
spin_unlock(&css_pipe->qlock);
return (q == IPU3_CSS_QUEUES);
}
bool imgu_css_queue_empty(struct imgu_css *css)
{
unsigned int pipe;
bool ret = false;
for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
ret &= imgu_css_pipe_queue_empty(css, pipe);
return ret;
}
bool imgu_css_is_streaming(struct imgu_css *css)
{
return css->streaming;
}
static int imgu_css_map_init(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
unsigned int p, q, i;
/* Allocate and map common structures with imgu hardware */
for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
if (!imgu_dmamap_alloc(imgu,
&css_pipe->xmem_sp_stage_ptrs[p][i],
sizeof(struct imgu_abi_sp_stage)))
return -ENOMEM;
if (!imgu_dmamap_alloc(imgu,
&css_pipe->xmem_isp_stage_ptrs[p][i],
sizeof(struct imgu_abi_isp_stage)))
return -ENOMEM;
}
if (!imgu_dmamap_alloc(imgu, &css_pipe->sp_ddr_ptrs,
ALIGN(sizeof(struct imgu_abi_ddr_address_map),
IMGU_ABI_ISP_DDR_WORD_BYTES)))
return -ENOMEM;
for (q = 0; q < IPU3_CSS_QUEUES; q++) {
unsigned int abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
for (i = 0; i < abi_buf_num; i++)
if (!imgu_dmamap_alloc(imgu,
&css_pipe->abi_buffers[q][i],
sizeof(struct imgu_abi_buffer)))
return -ENOMEM;
}
if (imgu_css_binary_preallocate(css, pipe)) {
imgu_css_binary_cleanup(css, pipe);
return -ENOMEM;
}
return 0;
}
static void imgu_css_pipe_cleanup(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
unsigned int p, q, i, abi_buf_num;
imgu_css_binary_cleanup(css, pipe);
for (q = 0; q < IPU3_CSS_QUEUES; q++) {
abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
for (i = 0; i < abi_buf_num; i++)
imgu_dmamap_free(imgu, &css_pipe->abi_buffers[q][i]);
}
for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
imgu_dmamap_free(imgu,
&css_pipe->xmem_sp_stage_ptrs[p][i]);
imgu_dmamap_free(imgu,
&css_pipe->xmem_isp_stage_ptrs[p][i]);
}
imgu_dmamap_free(imgu, &css_pipe->sp_ddr_ptrs);
}
void imgu_css_cleanup(struct imgu_css *css)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
unsigned int pipe;
imgu_css_stop_streaming(css);
for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
imgu_css_pipe_cleanup(css, pipe);
imgu_dmamap_free(imgu, &css->xmem_sp_group_ptrs);
imgu_css_fw_cleanup(css);
}
int imgu_css_init(struct device *dev, struct imgu_css *css,
void __iomem *base, int length)
{
struct imgu_device *imgu = dev_get_drvdata(dev);
int r, q, pipe;
/* Initialize main data structure */
css->dev = dev;
css->base = base;
css->iomem_length = length;
for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++) {
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
css_pipe->vf_output_en = false;
spin_lock_init(&css_pipe->qlock);
css_pipe->bindex = IPU3_CSS_DEFAULT_BINARY;
css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
for (q = 0; q < IPU3_CSS_QUEUES; q++) {
r = imgu_css_queue_init(&css_pipe->queue[q], NULL, 0);
if (r)
return r;
}
r = imgu_css_map_init(css, pipe);
if (r) {
imgu_css_cleanup(css);
return r;
}
}
if (!imgu_dmamap_alloc(imgu, &css->xmem_sp_group_ptrs,
sizeof(struct imgu_abi_sp_group)))
return -ENOMEM;
r = imgu_css_fw_init(css);
if (r)
return r;
return 0;
}
static u32 imgu_css_adjust(u32 res, u32 align)
{
u32 val = max_t(u32, IPU3_CSS_MIN_RES, res);
return DIV_ROUND_CLOSEST(val, align) * align;
}
/* Select a binary matching the required resolutions and formats */
static int imgu_css_find_binary(struct imgu_css *css,
unsigned int pipe,
struct imgu_css_queue queue[IPU3_CSS_QUEUES],
struct v4l2_rect rects[IPU3_CSS_RECTS])
{
const int binary_nr = css->fwp->file_header.binary_nr;
unsigned int binary_mode =
(css->pipes[pipe].pipe_id == IPU3_CSS_PIPE_ID_CAPTURE) ?
IA_CSS_BINARY_MODE_PRIMARY : IA_CSS_BINARY_MODE_VIDEO;
const struct v4l2_pix_format_mplane *in =
&queue[IPU3_CSS_QUEUE_IN].fmt.mpix;
const struct v4l2_pix_format_mplane *out =
&queue[IPU3_CSS_QUEUE_OUT].fmt.mpix;
const struct v4l2_pix_format_mplane *vf =
&queue[IPU3_CSS_QUEUE_VF].fmt.mpix;
u32 stripe_w = 0, stripe_h = 0;
const char *name;
int i, j;
if (!imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_IN]))
return -EINVAL;
/* Find out the strip size boundary */
for (i = 0; i < binary_nr; i++) {
struct imgu_fw_info *bi = &css->fwp->binary_header[i];
u32 max_width = bi->info.isp.sp.output.max_width;
u32 max_height = bi->info.isp.sp.output.max_height;
if (bi->info.isp.sp.iterator.num_stripes <= 1) {
stripe_w = stripe_w ?
min(stripe_w, max_width) : max_width;
stripe_h = stripe_h ?
min(stripe_h, max_height) : max_height;
}
}
for (i = 0; i < binary_nr; i++) {
struct imgu_fw_info *bi = &css->fwp->binary_header[i];
enum imgu_abi_frame_format q_fmt;
name = (void *)css->fwp + bi->blob.prog_name_offset;
/* Check that binary supports memory-to-memory processing */
if (bi->info.isp.sp.input.source !=
IMGU_ABI_BINARY_INPUT_SOURCE_MEMORY)
continue;
/* Check that binary supports raw10 input */
if (!bi->info.isp.sp.enable.input_feeder &&
!bi->info.isp.sp.enable.input_raw)
continue;
/* Check binary mode */
if (bi->info.isp.sp.pipeline.mode != binary_mode)
continue;
/* Since input is RGGB bayer, need to process colors */
if (bi->info.isp.sp.enable.luma_only)
continue;
if (in->width < bi->info.isp.sp.input.min_width ||
in->width > bi->info.isp.sp.input.max_width ||
in->height < bi->info.isp.sp.input.min_height ||
in->height > bi->info.isp.sp.input.max_height)
continue;
if (imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_OUT])) {
if (bi->info.isp.num_output_pins <= 0)
continue;
q_fmt = queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
for (j = 0; j < bi->info.isp.num_output_formats; j++)
if (bi->info.isp.output_formats[j] == q_fmt)
break;
if (j >= bi->info.isp.num_output_formats)
continue;
if (out->width < bi->info.isp.sp.output.min_width ||
out->width > bi->info.isp.sp.output.max_width ||
out->height < bi->info.isp.sp.output.min_height ||
out->height > bi->info.isp.sp.output.max_height)
continue;
if (out->width > bi->info.isp.sp.internal.max_width ||
out->height > bi->info.isp.sp.internal.max_height)
continue;
}
if (imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_VF])) {
if (bi->info.isp.num_output_pins <= 1)
continue;
q_fmt = queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
for (j = 0; j < bi->info.isp.num_output_formats; j++)
if (bi->info.isp.output_formats[j] == q_fmt)
break;
if (j >= bi->info.isp.num_output_formats)
continue;
if (vf->width < bi->info.isp.sp.output.min_width ||
vf->width > bi->info.isp.sp.output.max_width ||
vf->height < bi->info.isp.sp.output.min_height ||
vf->height > bi->info.isp.sp.output.max_height)
continue;
}
/* All checks passed, select the binary */
dev_dbg(css->dev, "using binary %s id = %u\n", name,
bi->info.isp.sp.id);
return i;
}
/* Can not find suitable binary for these parameters */
return -EINVAL;
}
/*
* Check that there is a binary matching requirements. Parameters may be
* NULL indicating disabled input/output. Return negative if given
* parameters can not be supported or on error, zero or positive indicating
* found binary number. May modify the given parameters if not exact match
* is found.
*/
int imgu_css_fmt_try(struct imgu_css *css,
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
struct v4l2_rect *rects[IPU3_CSS_RECTS],
unsigned int pipe)
{
static const u32 EFF_ALIGN_W = 2;
static const u32 BDS_ALIGN_W = 4;
static const u32 OUT_ALIGN_W = 8;
static const u32 OUT_ALIGN_H = 4;
static const u32 VF_ALIGN_W = 2;
static const char *qnames[IPU3_CSS_QUEUES] = {
[IPU3_CSS_QUEUE_IN] = "in",
[IPU3_CSS_QUEUE_PARAMS] = "params",
[IPU3_CSS_QUEUE_OUT] = "out",
[IPU3_CSS_QUEUE_VF] = "vf",
[IPU3_CSS_QUEUE_STAT_3A] = "3a",
};
static const char *rnames[IPU3_CSS_RECTS] = {
[IPU3_CSS_RECT_EFFECTIVE] = "effective resolution",
[IPU3_CSS_RECT_BDS] = "bayer-domain scaled resolution",
[IPU3_CSS_RECT_ENVELOPE] = "DVS envelope size",
[IPU3_CSS_RECT_GDC] = "GDC output res",
};
struct v4l2_rect r[IPU3_CSS_RECTS] = { };
struct v4l2_rect *const eff = &r[IPU3_CSS_RECT_EFFECTIVE];
struct v4l2_rect *const bds = &r[IPU3_CSS_RECT_BDS];
struct v4l2_rect *const env = &r[IPU3_CSS_RECT_ENVELOPE];
struct v4l2_rect *const gdc = &r[IPU3_CSS_RECT_GDC];
struct imgu_css_queue *q;
struct v4l2_pix_format_mplane *in, *out, *vf;
int i, s, ret;
q = kcalloc(IPU3_CSS_QUEUES, sizeof(struct imgu_css_queue), GFP_KERNEL);
if (!q)
return -ENOMEM;
in = &q[IPU3_CSS_QUEUE_IN].fmt.mpix;
out = &q[IPU3_CSS_QUEUE_OUT].fmt.mpix;
vf = &q[IPU3_CSS_QUEUE_VF].fmt.mpix;
/* Adjust all formats, get statistics buffer sizes and formats */
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
if (fmts[i])
dev_dbg(css->dev, "%s %s: (%i,%i) fmt 0x%x\n", __func__,
qnames[i], fmts[i]->width, fmts[i]->height,
fmts[i]->pixelformat);
else
dev_dbg(css->dev, "%s %s: (not set)\n", __func__,
qnames[i]);
if (imgu_css_queue_init(&q[i], fmts[i],
IPU3_CSS_QUEUE_TO_FLAGS(i))) {
dev_notice(css->dev, "can not initialize queue %s\n",
qnames[i]);
ret = -EINVAL;
goto out;
}
}
for (i = 0; i < IPU3_CSS_RECTS; i++) {
if (rects[i]) {
dev_dbg(css->dev, "%s %s: (%i,%i)\n", __func__,
rnames[i], rects[i]->width, rects[i]->height);
r[i].width = rects[i]->width;
r[i].height = rects[i]->height;
} else {
dev_dbg(css->dev, "%s %s: (not set)\n", __func__,
rnames[i]);
}
/* For now, force known good resolutions */
r[i].left = 0;
r[i].top = 0;
}
/* Always require one input and vf only if out is also enabled */
if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_IN]) ||
!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
dev_warn(css->dev, "required queues are disabled\n");
ret = -EINVAL;
goto out;
}
if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
out->width = in->width;
out->height = in->height;
}
if (eff->width <= 0 || eff->height <= 0) {
eff->width = in->width;
eff->height = in->height;
}
if (bds->width <= 0 || bds->height <= 0) {
bds->width = out->width;
bds->height = out->height;
}
if (gdc->width <= 0 || gdc->height <= 0) {
gdc->width = out->width;
gdc->height = out->height;
}
in->width = imgu_css_adjust(in->width, 1);
in->height = imgu_css_adjust(in->height, 1);
eff->width = imgu_css_adjust(eff->width, EFF_ALIGN_W);
eff->height = imgu_css_adjust(eff->height, 1);
bds->width = imgu_css_adjust(bds->width, BDS_ALIGN_W);
bds->height = imgu_css_adjust(bds->height, 1);
gdc->width = imgu_css_adjust(gdc->width, OUT_ALIGN_W);
gdc->height = imgu_css_adjust(gdc->height, OUT_ALIGN_H);
out->width = imgu_css_adjust(out->width, OUT_ALIGN_W);
out->height = imgu_css_adjust(out->height, OUT_ALIGN_H);
vf->width = imgu_css_adjust(vf->width, VF_ALIGN_W);
vf->height = imgu_css_adjust(vf->height, 1);
s = (bds->width - gdc->width) / 2;
env->width = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
s = (bds->height - gdc->height) / 2;
env->height = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
ret = imgu_css_find_binary(css, pipe, q, r);
if (ret < 0) {
dev_err(css->dev, "failed to find suitable binary\n");
ret = -EINVAL;
goto out;
}
css->pipes[pipe].bindex = ret;
dev_dbg(css->dev, "Binary index %d for pipe %d found.",
css->pipes[pipe].bindex, pipe);
/* Final adjustment and set back the queried formats */
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
if (fmts[i]) {
if (imgu_css_queue_init(&q[i], &q[i].fmt.mpix,
IPU3_CSS_QUEUE_TO_FLAGS(i))) {
dev_err(css->dev,
"final resolution adjustment failed\n");
ret = -EINVAL;
goto out;
}
*fmts[i] = q[i].fmt.mpix;
}
}
for (i = 0; i < IPU3_CSS_RECTS; i++)
if (rects[i])
*rects[i] = r[i];
dev_dbg(css->dev,
"in(%u,%u) if(%u,%u) ds(%u,%u) gdc(%u,%u) out(%u,%u) vf(%u,%u)",
in->width, in->height, eff->width, eff->height,
bds->width, bds->height, gdc->width, gdc->height,
out->width, out->height, vf->width, vf->height);
ret = 0;
out:
kfree(q);
return ret;
}
int imgu_css_fmt_set(struct imgu_css *css,
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
struct v4l2_rect *rects[IPU3_CSS_RECTS],
unsigned int pipe)
{
struct v4l2_rect rect_data[IPU3_CSS_RECTS];
struct v4l2_rect *all_rects[IPU3_CSS_RECTS];
int i, r;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
for (i = 0; i < IPU3_CSS_RECTS; i++) {
if (rects[i])
rect_data[i] = *rects[i];
else
memset(&rect_data[i], 0, sizeof(rect_data[i]));
all_rects[i] = &rect_data[i];
}
r = imgu_css_fmt_try(css, fmts, all_rects, pipe);
if (r < 0)
return r;
for (i = 0; i < IPU3_CSS_QUEUES; i++)
if (imgu_css_queue_init(&css_pipe->queue[i], fmts[i],
IPU3_CSS_QUEUE_TO_FLAGS(i)))
return -EINVAL;
for (i = 0; i < IPU3_CSS_RECTS; i++) {
css_pipe->rect[i] = rect_data[i];
if (rects[i])
*rects[i] = rect_data[i];
}
return 0;
}
int imgu_css_meta_fmt_set(struct v4l2_meta_format *fmt)
{
switch (fmt->dataformat) {
case V4L2_META_FMT_IPU3_PARAMS:
fmt->buffersize = sizeof(struct ipu3_uapi_params);
/*
* Sanity check for the parameter struct size. This must
* not change!
*/
BUILD_BUG_ON(sizeof(struct ipu3_uapi_params) != 39328);
break;
case V4L2_META_FMT_IPU3_STAT_3A:
fmt->buffersize = sizeof(struct ipu3_uapi_stats_3a);
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Queue given buffer to CSS. imgu_css_buf_prepare() must have been first
* called for the buffer. May be called from interrupt context.
* Returns 0 on success, -EBUSY if the buffer queue is full, or some other
* code on error conditions.
*/
int imgu_css_buf_queue(struct imgu_css *css, unsigned int pipe,
struct imgu_css_buffer *b)
{
struct imgu_abi_buffer *abi_buf;
struct imgu_addr_t *buf_addr;
u32 data;
int r;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
if (!css->streaming)
return -EPROTO; /* CSS or buffer in wrong state */
if (b->queue >= IPU3_CSS_QUEUES || !imgu_css_queues[b->queue].qid)
return -EINVAL;
b->queue_pos = imgu_css_queue_pos(css, imgu_css_queues[b->queue].qid,
pipe);
if (b->queue_pos >= ARRAY_SIZE(css->pipes[pipe].abi_buffers[b->queue]))
return -EIO;
abi_buf = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].vaddr;
/* Fill struct abi_buffer for firmware */
memset(abi_buf, 0, sizeof(*abi_buf));
buf_addr = (void *)abi_buf + imgu_css_queues[b->queue].ptr_ofs;
*(imgu_addr_t *)buf_addr = b->daddr;
if (b->queue == IPU3_CSS_QUEUE_STAT_3A)
abi_buf->payload.s3a.data.dmem.s3a_tbl = b->daddr;
if (b->queue == IPU3_CSS_QUEUE_OUT)
abi_buf->payload.frame.padded_width =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
if (b->queue == IPU3_CSS_QUEUE_VF)
abi_buf->payload.frame.padded_width =
css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
spin_lock(&css_pipe->qlock);
list_add_tail(&b->list, &css_pipe->queue[b->queue].bufs);
spin_unlock(&css_pipe->qlock);
b->state = IPU3_CSS_BUFFER_QUEUED;
data = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].daddr;
r = imgu_css_queue_data(css, imgu_css_queues[b->queue].qid,
pipe, data);
if (r < 0)
goto queueing_failed;
data = IMGU_ABI_EVENT_BUFFER_ENQUEUED(pipe,
imgu_css_queues[b->queue].qid);
r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe, data);
if (r < 0)
goto queueing_failed;
dev_dbg(css->dev, "queued buffer %p to css queue %i in pipe %d\n",
b, b->queue, pipe);
return 0;
queueing_failed:
b->state = (r == -EBUSY || r == -EAGAIN) ?
IPU3_CSS_BUFFER_NEW : IPU3_CSS_BUFFER_FAILED;
list_del(&b->list);
return r;
}
/*
* Get next ready CSS buffer. Returns -EAGAIN in which case the function
* should be called again, or -EBUSY which means that there are no more
* buffers available. May be called from interrupt context.
*/
struct imgu_css_buffer *imgu_css_buf_dequeue(struct imgu_css *css)
{
static const unsigned char evtype_to_queue[] = {
[IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE] = IPU3_CSS_QUEUE_IN,
[IMGU_ABI_EVTTYPE_OUT_FRAME_DONE] = IPU3_CSS_QUEUE_OUT,
[IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE] = IPU3_CSS_QUEUE_VF,
[IMGU_ABI_EVTTYPE_3A_STATS_DONE] = IPU3_CSS_QUEUE_STAT_3A,
};
struct imgu_css_buffer *b = ERR_PTR(-EAGAIN);
u32 event, daddr;
int evtype, pipe, pipeid, queue, qid, r;
struct imgu_css_pipe *css_pipe;
if (!css->streaming)
return ERR_PTR(-EPROTO);
r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event);
if (r < 0)
return ERR_PTR(r);
evtype = (event & IMGU_ABI_EVTTYPE_EVENT_MASK) >>
IMGU_ABI_EVTTYPE_EVENT_SHIFT;
switch (evtype) {
case IMGU_ABI_EVTTYPE_OUT_FRAME_DONE:
case IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE:
case IMGU_ABI_EVTTYPE_3A_STATS_DONE:
case IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE:
pipe = (event & IMGU_ABI_EVTTYPE_PIPE_MASK) >>
IMGU_ABI_EVTTYPE_PIPE_SHIFT;
pipeid = (event & IMGU_ABI_EVTTYPE_PIPEID_MASK) >>
IMGU_ABI_EVTTYPE_PIPEID_SHIFT;
queue = evtype_to_queue[evtype];
qid = imgu_css_queues[queue].qid;
if (pipe >= IMGU_MAX_PIPE_NUM) {
dev_err(css->dev, "Invalid pipe: %i\n", pipe);
return ERR_PTR(-EIO);
}
if (qid >= IMGU_ABI_QUEUE_NUM) {
dev_err(css->dev, "Invalid qid: %i\n", qid);
return ERR_PTR(-EIO);
}
css_pipe = &css->pipes[pipe];
dev_dbg(css->dev,
"event: buffer done 0x%x queue %i pipe %i pipeid %i\n",
event, queue, pipe, pipeid);
r = imgu_css_dequeue_data(css, qid, &daddr);
if (r < 0) {
dev_err(css->dev, "failed to dequeue buffer\n");
/* Force real error, not -EBUSY */
return ERR_PTR(-EIO);
}
r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_BUFFER_DEQUEUED(qid));
if (r < 0) {
dev_err(css->dev, "failed to queue event\n");
return ERR_PTR(-EIO);
}
spin_lock(&css_pipe->qlock);
if (list_empty(&css_pipe->queue[queue].bufs)) {
spin_unlock(&css_pipe->qlock);
dev_err(css->dev, "event on empty queue\n");
return ERR_PTR(-EIO);
}
b = list_first_entry(&css_pipe->queue[queue].bufs,
struct imgu_css_buffer, list);
if (queue != b->queue ||
daddr != css_pipe->abi_buffers
[b->queue][b->queue_pos].daddr) {
spin_unlock(&css_pipe->qlock);
dev_err(css->dev, "dequeued bad buffer 0x%x\n", daddr);
return ERR_PTR(-EIO);
}
dev_dbg(css->dev, "buffer 0x%8x done from pipe %d\n", daddr, pipe);
b->pipe = pipe;
b->state = IPU3_CSS_BUFFER_DONE;
list_del(&b->list);
spin_unlock(&css_pipe->qlock);
break;
case IMGU_ABI_EVTTYPE_PIPELINE_DONE:
pipe = (event & IMGU_ABI_EVTTYPE_PIPE_MASK) >>
IMGU_ABI_EVTTYPE_PIPE_SHIFT;
if (pipe >= IMGU_MAX_PIPE_NUM) {
dev_err(css->dev, "Invalid pipe: %i\n", pipe);
return ERR_PTR(-EIO);
}
css_pipe = &css->pipes[pipe];
dev_dbg(css->dev, "event: pipeline done 0x%8x for pipe %d\n",
event, pipe);
break;
case IMGU_ABI_EVTTYPE_TIMER:
r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event);
if (r < 0)
return ERR_PTR(r);
if ((event & IMGU_ABI_EVTTYPE_EVENT_MASK) >>
IMGU_ABI_EVTTYPE_EVENT_SHIFT == IMGU_ABI_EVTTYPE_TIMER)
dev_dbg(css->dev, "event: timer\n");
else
dev_warn(css->dev, "half of timer event missing\n");
break;
case IMGU_ABI_EVTTYPE_FW_WARNING:
dev_warn(css->dev, "event: firmware warning 0x%x\n", event);
break;
case IMGU_ABI_EVTTYPE_FW_ASSERT:
dev_err(css->dev,
"event: firmware assert 0x%x module_id %i line_no %i\n",
event,
(event & IMGU_ABI_EVTTYPE_MODULEID_MASK) >>
IMGU_ABI_EVTTYPE_MODULEID_SHIFT,
swab16((event & IMGU_ABI_EVTTYPE_LINENO_MASK) >>
IMGU_ABI_EVTTYPE_LINENO_SHIFT));
break;
default:
dev_warn(css->dev, "received unknown event 0x%x\n", event);
}
return b;
}
/*
* Get a new set of parameters from pool and initialize them based on
* the parameters params, gdc, and obgrid. Any of these may be NULL,
* in which case the previously set parameters are used.
* If parameters haven't been set previously, initialize from scratch.
*
* Return index to css->parameter_set_info which has the newly created
* parameters or negative value on error.
*/
int imgu_css_set_parameters(struct imgu_css *css, unsigned int pipe,
struct ipu3_uapi_params *set_params)
{
static const unsigned int queue_id = IMGU_ABI_QUEUE_A_ID;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
const int stage = 0;
const struct imgu_fw_info *bi;
int obgrid_size;
unsigned int stripes, i;
struct ipu3_uapi_flags *use = set_params ? &set_params->use : NULL;
/* Destination buffers which are filled here */
struct imgu_abi_parameter_set_info *param_set;
struct imgu_abi_acc_param *acc = NULL;
struct imgu_abi_gdc_warp_param *gdc = NULL;
struct ipu3_uapi_obgrid_param *obgrid = NULL;
const struct imgu_css_map *map;
void *vmem0 = NULL;
void *dmem0 = NULL;
enum imgu_abi_memories m;
int r = -EBUSY;
if (!css->streaming)
return -EPROTO;
dev_dbg(css->dev, "%s for pipe %d", __func__, pipe);
bi = &css->fwp->binary_header[css_pipe->bindex];
obgrid_size = imgu_css_fw_obgrid_size(bi);
stripes = bi->info.isp.sp.iterator.num_stripes ? : 1;
imgu_css_pool_get(&css_pipe->pool.parameter_set_info);
param_set = imgu_css_pool_last(&css_pipe->pool.parameter_set_info,
0)->vaddr;
/* Get a new acc only if new parameters given, or none yet */
map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
if (set_params || !map->vaddr) {
imgu_css_pool_get(&css_pipe->pool.acc);
map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
acc = map->vaddr;
}
/* Get new VMEM0 only if needed, or none yet */
m = IMGU_ABI_MEM_ISP_VMEM0;
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
if (!map->vaddr || (set_params && (set_params->use.lin_vmem_params ||
set_params->use.tnr3_vmem_params ||
set_params->use.xnr3_vmem_params))) {
imgu_css_pool_get(&css_pipe->pool.binary_params_p[m]);
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
vmem0 = map->vaddr;
}
/* Get new DMEM0 only if needed, or none yet */
m = IMGU_ABI_MEM_ISP_DMEM0;
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
if (!map->vaddr || (set_params && (set_params->use.tnr3_dmem_params ||
set_params->use.xnr3_dmem_params))) {
imgu_css_pool_get(&css_pipe->pool.binary_params_p[m]);
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
dmem0 = map->vaddr;
}
/* Configure acc parameter cluster */
if (acc) {
/* get acc_old */
map = imgu_css_pool_last(&css_pipe->pool.acc, 1);
/* user acc */
r = imgu_css_cfg_acc(css, pipe, use, acc, map->vaddr,
set_params ? &set_params->acc_param : NULL);
if (r < 0)
goto fail;
}
/* Configure late binding parameters */
if (vmem0) {
m = IMGU_ABI_MEM_ISP_VMEM0;
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
r = imgu_css_cfg_vmem0(css, pipe, use, vmem0,
map->vaddr, set_params);
if (r < 0)
goto fail;
}
if (dmem0) {
m = IMGU_ABI_MEM_ISP_DMEM0;
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
r = imgu_css_cfg_dmem0(css, pipe, use, dmem0,
map->vaddr, set_params);
if (r < 0)
goto fail;
}
/* Get a new gdc only if a new gdc is given, or none yet */
if (bi->info.isp.sp.enable.dvs_6axis) {
unsigned int a = IPU3_CSS_AUX_FRAME_REF;
unsigned int g = IPU3_CSS_RECT_GDC;
unsigned int e = IPU3_CSS_RECT_ENVELOPE;
map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
if (!map->vaddr) {
imgu_css_pool_get(&css_pipe->pool.gdc);
map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
gdc = map->vaddr;
imgu_css_cfg_gdc_table(map->vaddr,
css_pipe->aux_frames[a].bytesperline /
css_pipe->aux_frames[a].bytesperpixel,
css_pipe->aux_frames[a].height,
css_pipe->rect[g].width,
css_pipe->rect[g].height,
css_pipe->rect[e].width,
css_pipe->rect[e].height);
}
}
/* Get a new obgrid only if a new obgrid is given, or none yet */
map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
if (!map->vaddr || (set_params && set_params->use.obgrid_param)) {
imgu_css_pool_get(&css_pipe->pool.obgrid);
map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
obgrid = map->vaddr;
/* Configure optical black level grid (obgrid) */
if (set_params && set_params->use.obgrid_param)
for (i = 0; i < obgrid_size / sizeof(*obgrid); i++)
obgrid[i] = set_params->obgrid_param;
else
memset(obgrid, 0, obgrid_size);
}
/* Configure parameter set info, queued to `queue_id' */
memset(param_set, 0, sizeof(*param_set));
map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
param_set->mem_map.acc_cluster_params_for_sp = map->daddr;
map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
param_set->mem_map.dvs_6axis_params_y = map->daddr;
for (i = 0; i < stripes; i++) {
map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
param_set->mem_map.obgrid_tbl[i] =
map->daddr + (obgrid_size / stripes) * i;
}
for (m = 0; m < IMGU_ABI_NUM_MEMORIES; m++) {
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
param_set->mem_map.isp_mem_param[stage][m] = map->daddr;
}
/* Then queue the new parameter buffer */
map = imgu_css_pool_last(&css_pipe->pool.parameter_set_info, 0);
r = imgu_css_queue_data(css, queue_id, pipe, map->daddr);
if (r < 0)
goto fail;
r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_BUFFER_ENQUEUED(pipe,
queue_id));
if (r < 0)
goto fail_no_put;
/* Finally dequeue all old parameter buffers */
do {
u32 daddr;
r = imgu_css_dequeue_data(css, queue_id, &daddr);
if (r == -EBUSY)
break;
if (r)
goto fail_no_put;
r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_BUFFER_DEQUEUED
(queue_id));
if (r < 0) {
dev_err(css->dev, "failed to queue parameter event\n");
goto fail_no_put;
}
} while (1);
return 0;
fail:
/*
* A failure, most likely the parameter queue was full.
* Return error but continue streaming. User can try submitting new
* parameters again later.
*/
imgu_css_pool_put(&css_pipe->pool.parameter_set_info);
if (acc)
imgu_css_pool_put(&css_pipe->pool.acc);
if (gdc)
imgu_css_pool_put(&css_pipe->pool.gdc);
if (obgrid)
imgu_css_pool_put(&css_pipe->pool.obgrid);
if (vmem0)
imgu_css_pool_put(
&css_pipe->pool.binary_params_p
[IMGU_ABI_MEM_ISP_VMEM0]);
if (dmem0)
imgu_css_pool_put(
&css_pipe->pool.binary_params_p
[IMGU_ABI_MEM_ISP_DMEM0]);
fail_no_put:
return r;
}
int imgu_css_irq_ack(struct imgu_css *css)
{
static const int NUM_SWIRQS = 3;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[0]];
void __iomem *const base = css->base;
u32 irq_status[IMGU_IRQCTRL_NUM];
int i;
u32 imgu_status = readl(base + IMGU_REG_INT_STATUS);
writel(imgu_status, base + IMGU_REG_INT_STATUS);
for (i = 0; i < IMGU_IRQCTRL_NUM; i++)
irq_status[i] = readl(base + IMGU_REG_IRQCTRL_STATUS(i));
for (i = 0; i < NUM_SWIRQS; i++) {
if (irq_status[IMGU_IRQCTRL_SP0] & IMGU_IRQCTRL_IRQ_SW_PIN(i)) {
/* SP SW interrupt */
u32 cnt = readl(base + IMGU_REG_SP_DMEM_BASE(0) +
bi->info.sp.output);
u32 val = readl(base + IMGU_REG_SP_DMEM_BASE(0) +
bi->info.sp.output + 4 + 4 * i);
dev_dbg(css->dev, "%s: swirq %i cnt %i val 0x%x\n",
__func__, i, cnt, val);
}
}
for (i = IMGU_IRQCTRL_NUM - 1; i >= 0; i--)
if (irq_status[i]) {
writel(irq_status[i], base + IMGU_REG_IRQCTRL_CLEAR(i));
/* Wait for write to complete */
readl(base + IMGU_REG_IRQCTRL_ENABLE(i));
}
dev_dbg(css->dev, "%s: imgu 0x%x main 0x%x sp0 0x%x sp1 0x%x\n",
__func__, imgu_status, irq_status[IMGU_IRQCTRL_MAIN],
irq_status[IMGU_IRQCTRL_SP0], irq_status[IMGU_IRQCTRL_SP1]);
if (!imgu_status && !irq_status[IMGU_IRQCTRL_MAIN])
return -ENOMSG;
return 0;
}
| linux-master | drivers/staging/media/ipu3/ipu3-css.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Intel Corporation
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include "ipu3.h"
#include "ipu3-dmamap.h"
/******************** v4l2_subdev_ops ********************/
#define IPU3_RUNNING_MODE_VIDEO 0
#define IPU3_RUNNING_MODE_STILL 1
static int imgu_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
struct imgu_v4l2_subdev,
subdev);
struct imgu_device *imgu = v4l2_get_subdevdata(sd);
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[imgu_sd->pipe];
struct v4l2_rect try_crop = {
.top = 0,
.left = 0,
};
unsigned int i;
try_crop.width =
imgu_pipe->nodes[IMGU_NODE_IN].vdev_fmt.fmt.pix_mp.width;
try_crop.height =
imgu_pipe->nodes[IMGU_NODE_IN].vdev_fmt.fmt.pix_mp.height;
/* Initialize try_fmt */
for (i = 0; i < IMGU_NODE_NUM; i++) {
struct v4l2_mbus_framefmt *try_fmt =
v4l2_subdev_get_try_format(sd, fh->state, i);
try_fmt->width = try_crop.width;
try_fmt->height = try_crop.height;
try_fmt->code = imgu_pipe->nodes[i].pad_fmt.code;
try_fmt->field = V4L2_FIELD_NONE;
}
*v4l2_subdev_get_try_crop(sd, fh->state, IMGU_NODE_IN) = try_crop;
*v4l2_subdev_get_try_compose(sd, fh->state, IMGU_NODE_IN) = try_crop;
return 0;
}
static int imgu_subdev_s_stream(struct v4l2_subdev *sd, int enable)
{
int i;
unsigned int node;
int r = 0;
struct imgu_device *imgu = v4l2_get_subdevdata(sd);
struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
struct imgu_v4l2_subdev,
subdev);
unsigned int pipe = imgu_sd->pipe;
struct device *dev = &imgu->pci_dev->dev;
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
struct imgu_css_pipe *css_pipe = &imgu->css.pipes[pipe];
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
dev_dbg(dev, "%s %d for pipe %u", __func__, enable, pipe);
/* grab ctrl after streamon and return after off */
v4l2_ctrl_grab(imgu_sd->ctrl, enable);
if (!enable) {
imgu_sd->active = false;
return 0;
}
for (i = 0; i < IMGU_NODE_NUM; i++)
imgu_pipe->queue_enabled[i] = imgu_pipe->nodes[i].enabled;
/* This is handled specially */
imgu_pipe->queue_enabled[IPU3_CSS_QUEUE_PARAMS] = false;
/* Initialize CSS formats */
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
node = imgu_map_node(imgu, i);
/* No need to reconfig meta nodes */
if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
continue;
fmts[i] = imgu_pipe->queue_enabled[node] ?
&imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp : NULL;
}
/* Enable VF output only when VF queue requested by user */
css_pipe->vf_output_en = false;
if (imgu_pipe->nodes[IMGU_NODE_VF].enabled)
css_pipe->vf_output_en = true;
if (atomic_read(&imgu_sd->running_mode) == IPU3_RUNNING_MODE_VIDEO)
css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
else
css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE;
dev_dbg(dev, "IPU3 pipe %u pipe_id %u", pipe, css_pipe->pipe_id);
rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_sd->rect.eff;
rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds;
rects[IPU3_CSS_RECT_GDC] = &imgu_sd->rect.gdc;
r = imgu_css_fmt_set(&imgu->css, fmts, rects, pipe);
if (r) {
dev_err(dev, "failed to set initial formats pipe %u with (%d)",
pipe, r);
return r;
}
imgu_sd->active = true;
return 0;
}
static int imgu_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imgu_device *imgu = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf;
struct imgu_media_pipe *imgu_pipe;
u32 pad = fmt->pad;
struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
struct imgu_v4l2_subdev,
subdev);
unsigned int pipe = imgu_sd->pipe;
imgu_pipe = &imgu->imgu_pipe[pipe];
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
fmt->format = imgu_pipe->nodes[pad].pad_fmt;
} else {
mf = v4l2_subdev_get_try_format(sd, sd_state, pad);
fmt->format = *mf;
}
return 0;
}
static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imgu_media_pipe *imgu_pipe;
struct imgu_device *imgu = v4l2_get_subdevdata(sd);
struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
struct imgu_v4l2_subdev,
subdev);
struct v4l2_mbus_framefmt *mf;
u32 pad = fmt->pad;
unsigned int pipe = imgu_sd->pipe;
dev_dbg(&imgu->pci_dev->dev, "set subdev %u pad %u fmt to [%ux%u]",
pipe, pad, fmt->format.width, fmt->format.height);
imgu_pipe = &imgu->imgu_pipe[pipe];
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
mf = v4l2_subdev_get_try_format(sd, sd_state, pad);
else
mf = &imgu_pipe->nodes[pad].pad_fmt;
fmt->format.code = mf->code;
/* Clamp the w and h based on the hardware capabilities */
if (imgu_sd->subdev_pads[pad].flags & MEDIA_PAD_FL_SOURCE) {
fmt->format.width = clamp(fmt->format.width,
IPU3_OUTPUT_MIN_WIDTH,
IPU3_OUTPUT_MAX_WIDTH);
fmt->format.height = clamp(fmt->format.height,
IPU3_OUTPUT_MIN_HEIGHT,
IPU3_OUTPUT_MAX_HEIGHT);
} else {
fmt->format.width = clamp(fmt->format.width,
IPU3_INPUT_MIN_WIDTH,
IPU3_INPUT_MAX_WIDTH);
fmt->format.height = clamp(fmt->format.height,
IPU3_INPUT_MIN_HEIGHT,
IPU3_INPUT_MAX_HEIGHT);
}
*mf = fmt->format;
return 0;
}
static struct v4l2_rect *
imgu_subdev_get_crop(struct imgu_v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_crop(&sd->subdev, sd_state, pad);
else
return &sd->rect.eff;
}
static struct v4l2_rect *
imgu_subdev_get_compose(struct imgu_v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_compose(&sd->subdev, sd_state, pad);
else
return &sd->rect.bds;
}
static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct imgu_v4l2_subdev *imgu_sd =
container_of(sd, struct imgu_v4l2_subdev, subdev);
if (sel->pad != IMGU_NODE_IN)
return -EINVAL;
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
sel->r = *imgu_subdev_get_crop(imgu_sd, sd_state, sel->pad,
sel->which);
return 0;
case V4L2_SEL_TGT_COMPOSE:
sel->r = *imgu_subdev_get_compose(imgu_sd, sd_state, sel->pad,
sel->which);
return 0;
default:
return -EINVAL;
}
}
static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct imgu_device *imgu = v4l2_get_subdevdata(sd);
struct imgu_v4l2_subdev *imgu_sd =
container_of(sd, struct imgu_v4l2_subdev, subdev);
struct v4l2_rect *rect;
dev_dbg(&imgu->pci_dev->dev,
"set subdev %u sel which %u target 0x%4x rect [%ux%u]",
imgu_sd->pipe, sel->which, sel->target,
sel->r.width, sel->r.height);
if (sel->pad != IMGU_NODE_IN)
return -EINVAL;
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
rect = imgu_subdev_get_crop(imgu_sd, sd_state, sel->pad,
sel->which);
break;
case V4L2_SEL_TGT_COMPOSE:
rect = imgu_subdev_get_compose(imgu_sd, sd_state, sel->pad,
sel->which);
break;
default:
return -EINVAL;
}
*rect = sel->r;
return 0;
}
/******************** media_entity_operations ********************/
static int imgu_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
struct imgu_media_pipe *imgu_pipe;
struct v4l2_subdev *sd = container_of(entity, struct v4l2_subdev,
entity);
struct imgu_device *imgu = v4l2_get_subdevdata(sd);
struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
struct imgu_v4l2_subdev,
subdev);
unsigned int pipe = imgu_sd->pipe;
u32 pad = local->index;
WARN_ON(pad >= IMGU_NODE_NUM);
dev_dbg(&imgu->pci_dev->dev, "pipe %u pad %u is %s", pipe, pad,
flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled");
imgu_pipe = &imgu->imgu_pipe[pipe];
imgu_pipe->nodes[pad].enabled = flags & MEDIA_LNK_FL_ENABLED;
/* enable input node to enable the pipe */
if (pad != IMGU_NODE_IN)
return 0;
if (flags & MEDIA_LNK_FL_ENABLED)
__set_bit(pipe, imgu->css.enabled_pipes);
else
__clear_bit(pipe, imgu->css.enabled_pipes);
dev_dbg(&imgu->pci_dev->dev, "pipe %u is %s", pipe,
flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled");
return 0;
}
/******************** vb2_ops ********************/
static int imgu_vb2_buf_init(struct vb2_buffer *vb)
{
struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0);
struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
struct imgu_buffer *buf = container_of(vb,
struct imgu_buffer, vid_buf.vbb.vb2_buf);
struct imgu_video_device *node =
container_of(vb->vb2_queue, struct imgu_video_device, vbq);
unsigned int queue = imgu_node_to_queue(node->id);
if (queue == IPU3_CSS_QUEUE_PARAMS)
return 0;
return imgu_dmamap_map_sg(imgu, sg->sgl, sg->nents, &buf->map);
}
/* Called when each buffer is freed */
static void imgu_vb2_buf_cleanup(struct vb2_buffer *vb)
{
struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
struct imgu_buffer *buf = container_of(vb,
struct imgu_buffer, vid_buf.vbb.vb2_buf);
struct imgu_video_device *node =
container_of(vb->vb2_queue, struct imgu_video_device, vbq);
unsigned int queue = imgu_node_to_queue(node->id);
if (queue == IPU3_CSS_QUEUE_PARAMS)
return;
imgu_dmamap_unmap(imgu, &buf->map);
}
/* Transfer buffer ownership to me */
static void imgu_vb2_buf_queue(struct vb2_buffer *vb)
{
struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
struct imgu_video_device *node =
container_of(vb->vb2_queue, struct imgu_video_device, vbq);
unsigned int queue = imgu_node_to_queue(node->id);
struct imgu_buffer *buf = container_of(vb, struct imgu_buffer,
vid_buf.vbb.vb2_buf);
unsigned long need_bytes;
unsigned long payload = vb2_get_plane_payload(vb, 0);
if (vb->vb2_queue->type == V4L2_BUF_TYPE_META_CAPTURE ||
vb->vb2_queue->type == V4L2_BUF_TYPE_META_OUTPUT)
need_bytes = node->vdev_fmt.fmt.meta.buffersize;
else
need_bytes = node->vdev_fmt.fmt.pix_mp.plane_fmt[0].sizeimage;
if (queue == IPU3_CSS_QUEUE_PARAMS && payload && payload < need_bytes) {
dev_err(&imgu->pci_dev->dev, "invalid data size for params.");
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
return;
}
mutex_lock(&imgu->lock);
if (queue != IPU3_CSS_QUEUE_PARAMS)
imgu_css_buf_init(&buf->css_buf, queue, buf->map.daddr);
list_add_tail(&buf->vid_buf.list, &node->buffers);
mutex_unlock(&imgu->lock);
vb2_set_plane_payload(vb, 0, need_bytes);
mutex_lock(&imgu->streaming_lock);
if (imgu->streaming)
imgu_queue_buffers(imgu, false, node->pipe);
mutex_unlock(&imgu->streaming_lock);
dev_dbg(&imgu->pci_dev->dev, "%s for pipe %u node %u", __func__,
node->pipe, node->id);
}
static int imgu_vb2_queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers,
unsigned int *num_planes,
unsigned int sizes[],
struct device *alloc_devs[])
{
struct imgu_device *imgu = vb2_get_drv_priv(vq);
struct imgu_video_device *node =
container_of(vq, struct imgu_video_device, vbq);
const struct v4l2_format *fmt = &node->vdev_fmt;
unsigned int size;
*num_buffers = clamp_val(*num_buffers, 1, VB2_MAX_FRAME);
alloc_devs[0] = &imgu->pci_dev->dev;
if (vq->type == V4L2_BUF_TYPE_META_CAPTURE ||
vq->type == V4L2_BUF_TYPE_META_OUTPUT)
size = fmt->fmt.meta.buffersize;
else
size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
if (*num_planes) {
if (sizes[0] < size)
return -EINVAL;
size = sizes[0];
}
*num_planes = 1;
sizes[0] = size;
/* Initialize buffer queue */
INIT_LIST_HEAD(&node->buffers);
return 0;
}
/* Check if all enabled video nodes are streaming, exception ignored */
static bool imgu_all_nodes_streaming(struct imgu_device *imgu,
struct imgu_video_device *except)
{
unsigned int i, pipe, p;
struct imgu_video_device *node;
struct device *dev = &imgu->pci_dev->dev;
pipe = except->pipe;
if (!test_bit(pipe, imgu->css.enabled_pipes)) {
dev_warn(&imgu->pci_dev->dev,
"pipe %u link is not ready yet", pipe);
return false;
}
for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
for (i = 0; i < IMGU_NODE_NUM; i++) {
node = &imgu->imgu_pipe[p].nodes[i];
dev_dbg(dev, "%s pipe %u queue %u name %s enabled = %u",
__func__, p, i, node->name, node->enabled);
if (node == except)
continue;
if (node->enabled && !vb2_start_streaming_called(&node->vbq))
return false;
}
}
return true;
}
static void imgu_return_all_buffers(struct imgu_device *imgu,
struct imgu_video_device *node,
enum vb2_buffer_state state)
{
struct imgu_vb2_buffer *b, *b0;
/* Return all buffers */
mutex_lock(&imgu->lock);
list_for_each_entry_safe(b, b0, &node->buffers, list) {
list_del(&b->list);
vb2_buffer_done(&b->vbb.vb2_buf, state);
}
mutex_unlock(&imgu->lock);
}
static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct imgu_media_pipe *imgu_pipe;
struct imgu_device *imgu = vb2_get_drv_priv(vq);
struct device *dev = &imgu->pci_dev->dev;
struct imgu_video_device *node =
container_of(vq, struct imgu_video_device, vbq);
int r;
unsigned int pipe;
dev_dbg(dev, "%s node name %s pipe %u id %u", __func__,
node->name, node->pipe, node->id);
mutex_lock(&imgu->streaming_lock);
if (imgu->streaming) {
r = -EBUSY;
mutex_unlock(&imgu->streaming_lock);
goto fail_return_bufs;
}
mutex_unlock(&imgu->streaming_lock);
if (!node->enabled) {
dev_err(dev, "IMGU node is not enabled");
r = -EINVAL;
goto fail_return_bufs;
}
pipe = node->pipe;
imgu_pipe = &imgu->imgu_pipe[pipe];
atomic_set(&node->sequence, 0);
r = video_device_pipeline_start(&node->vdev, &imgu_pipe->pipeline);
if (r < 0)
goto fail_return_bufs;
if (!imgu_all_nodes_streaming(imgu, node))
return 0;
for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
r = v4l2_subdev_call(&imgu->imgu_pipe[pipe].imgu_sd.subdev,
video, s_stream, 1);
if (r < 0)
goto fail_stop_pipeline;
}
/* Start streaming of the whole pipeline now */
dev_dbg(dev, "IMGU streaming is ready to start");
mutex_lock(&imgu->streaming_lock);
r = imgu_s_stream(imgu, true);
if (!r)
imgu->streaming = true;
mutex_unlock(&imgu->streaming_lock);
return 0;
fail_stop_pipeline:
video_device_pipeline_stop(&node->vdev);
fail_return_bufs:
imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_QUEUED);
return r;
}
static void imgu_vb2_stop_streaming(struct vb2_queue *vq)
{
struct imgu_media_pipe *imgu_pipe;
struct imgu_device *imgu = vb2_get_drv_priv(vq);
struct device *dev = &imgu->pci_dev->dev;
struct imgu_video_device *node =
container_of(vq, struct imgu_video_device, vbq);
int r;
unsigned int pipe;
WARN_ON(!node->enabled);
pipe = node->pipe;
dev_dbg(dev, "Try to stream off node [%u][%u]", pipe, node->id);
imgu_pipe = &imgu->imgu_pipe[pipe];
r = v4l2_subdev_call(&imgu_pipe->imgu_sd.subdev, video, s_stream, 0);
if (r)
dev_err(&imgu->pci_dev->dev,
"failed to stop subdev streaming\n");
mutex_lock(&imgu->streaming_lock);
/* Was this the first node with streaming disabled? */
if (imgu->streaming && imgu_all_nodes_streaming(imgu, node)) {
/* Yes, really stop streaming now */
dev_dbg(dev, "IMGU streaming is ready to stop");
r = imgu_s_stream(imgu, false);
if (!r)
imgu->streaming = false;
}
imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
mutex_unlock(&imgu->streaming_lock);
video_device_pipeline_stop(&node->vdev);
}
/******************** v4l2_ioctl_ops ********************/
#define VID_CAPTURE 0
#define VID_OUTPUT 1
#define DEF_VID_CAPTURE 0
#define DEF_VID_OUTPUT 1
struct imgu_fmt {
u32 fourcc;
u16 type; /* VID_CAPTURE or VID_OUTPUT not both */
};
/* format descriptions for capture and preview */
static const struct imgu_fmt formats[] = {
{ V4L2_PIX_FMT_NV12, VID_CAPTURE },
{ V4L2_PIX_FMT_IPU3_SGRBG10, VID_OUTPUT },
{ V4L2_PIX_FMT_IPU3_SBGGR10, VID_OUTPUT },
{ V4L2_PIX_FMT_IPU3_SGBRG10, VID_OUTPUT },
{ V4L2_PIX_FMT_IPU3_SRGGB10, VID_OUTPUT },
};
/* Find the first matched format, return default if not found */
static const struct imgu_fmt *find_format(struct v4l2_format *f, u32 type)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
formats[i].type == type)
return &formats[i];
}
return type == VID_CAPTURE ? &formats[DEF_VID_CAPTURE] :
&formats[DEF_VID_OUTPUT];
}
static int imgu_vidioc_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
struct imgu_device *imgu = video_drvdata(file);
strscpy(cap->driver, IMGU_NAME, sizeof(cap->driver));
strscpy(cap->card, IMGU_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
pci_name(imgu->pci_dev));
return 0;
}
static int enum_fmts(struct v4l2_fmtdesc *f, u32 type)
{
unsigned int i, j;
if (f->mbus_code != 0 && f->mbus_code != MEDIA_BUS_FMT_FIXED)
return -EINVAL;
for (i = j = 0; i < ARRAY_SIZE(formats); ++i) {
if (formats[i].type == type) {
if (j == f->index)
break;
++j;
}
}
if (i < ARRAY_SIZE(formats)) {
f->pixelformat = formats[i].fourcc;
return 0;
}
return -EINVAL;
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
return enum_fmts(f, VID_CAPTURE);
}
static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return -EINVAL;
return enum_fmts(f, VID_OUTPUT);
}
/* Propagate forward always the format from the CIO2 subdev */
static int imgu_vidioc_g_fmt(struct file *file, void *fh,
struct v4l2_format *f)
{
struct imgu_video_device *node = file_to_intel_imgu_node(file);
f->fmt = node->vdev_fmt.fmt;
return 0;
}
/*
* Set input/output format. Unless it is just a try, this also resets
* selections (ie. effective and BDS resolutions) to defaults.
*/
static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
struct v4l2_format *f, bool try)
{
struct device *dev = &imgu->pci_dev->dev;
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
struct v4l2_mbus_framefmt pad_fmt;
unsigned int i, css_q;
int ret;
struct imgu_css_pipe *css_pipe = &imgu->css.pipes[pipe];
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd;
dev_dbg(dev, "set fmt node [%u][%u](try = %u)", pipe, node, try);
for (i = 0; i < IMGU_NODE_NUM; i++)
dev_dbg(dev, "IMGU pipe %u node %u enabled = %u",
pipe, i, imgu_pipe->nodes[i].enabled);
if (imgu_pipe->nodes[IMGU_NODE_VF].enabled)
css_pipe->vf_output_en = true;
if (atomic_read(&imgu_sd->running_mode) == IPU3_RUNNING_MODE_VIDEO)
css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
else
css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE;
dev_dbg(dev, "IPU3 pipe %u pipe_id = %u", pipe, css_pipe->pipe_id);
css_q = imgu_node_to_queue(node);
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
unsigned int inode = imgu_map_node(imgu, i);
/* Skip the meta node */
if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS)
continue;
/* CSS expects some format on OUT queue */
if (i != IPU3_CSS_QUEUE_OUT &&
!imgu_pipe->nodes[inode].enabled && !try) {
fmts[i] = NULL;
continue;
}
if (i == css_q) {
fmts[i] = &f->fmt.pix_mp;
continue;
}
if (try) {
fmts[i] = kmemdup(&imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp,
sizeof(struct v4l2_pix_format_mplane),
GFP_KERNEL);
if (!fmts[i]) {
ret = -ENOMEM;
goto out;
}
} else {
fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
}
}
if (!try) {
/* eff and bds res got by imgu_s_sel */
struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd;
rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_sd->rect.eff;
rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds;
rects[IPU3_CSS_RECT_GDC] = &imgu_sd->rect.gdc;
/* suppose that pad fmt was set by subdev s_fmt before */
pad_fmt = imgu_pipe->nodes[IMGU_NODE_IN].pad_fmt;
rects[IPU3_CSS_RECT_GDC]->width = pad_fmt.width;
rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height;
}
if (!fmts[css_q]) {
ret = -EINVAL;
goto out;
}
if (try)
ret = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe);
else
ret = imgu_css_fmt_set(&imgu->css, fmts, rects, pipe);
/* ret is the binary number in the firmware blob */
if (ret < 0)
goto out;
/*
* imgu doesn't set the node to the value given by user
* before we return success from this function, so set it here.
*/
if (!try)
imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp = f->fmt.pix_mp;
out:
if (try) {
for (i = 0; i < IPU3_CSS_QUEUES; i++)
if (i != css_q)
kfree(fmts[i]);
}
return ret;
}
static int imgu_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
const struct imgu_fmt *fmt;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
fmt = find_format(f, VID_CAPTURE);
else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
fmt = find_format(f, VID_OUTPUT);
else
return -EINVAL;
pixm->pixelformat = fmt->fourcc;
return 0;
}
static int imgu_vidioc_try_fmt(struct file *file, void *fh,
struct v4l2_format *f)
{
struct imgu_device *imgu = video_drvdata(file);
struct device *dev = &imgu->pci_dev->dev;
struct imgu_video_device *node = file_to_intel_imgu_node(file);
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
int r;
dev_dbg(dev, "%s [%ux%u] for node %u\n", __func__,
pix_mp->width, pix_mp->height, node->id);
r = imgu_try_fmt(file, fh, f);
if (r)
return r;
return imgu_fmt(imgu, node->pipe, node->id, f, true);
}
static int imgu_vidioc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct imgu_device *imgu = video_drvdata(file);
struct device *dev = &imgu->pci_dev->dev;
struct imgu_video_device *node = file_to_intel_imgu_node(file);
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
int r;
dev_dbg(dev, "%s [%ux%u] for node %u\n", __func__,
pix_mp->width, pix_mp->height, node->id);
r = imgu_try_fmt(file, fh, f);
if (r)
return r;
return imgu_fmt(imgu, node->pipe, node->id, f, false);
}
struct imgu_meta_fmt {
__u32 fourcc;
char *name;
};
/* From drivers/media/v4l2-core/v4l2-ioctl.c */
static const struct imgu_meta_fmt meta_fmts[] = {
{ V4L2_META_FMT_IPU3_PARAMS, "IPU3 processing parameters" },
{ V4L2_META_FMT_IPU3_STAT_3A, "IPU3 3A statistics" },
};
static int imgu_meta_enum_format(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
struct imgu_video_device *node = file_to_intel_imgu_node(file);
unsigned int i = fmt->type == V4L2_BUF_TYPE_META_OUTPUT ? 0 : 1;
/* Each node is dedicated to only one meta format */
if (fmt->index > 0 || fmt->type != node->vbq.type)
return -EINVAL;
if (fmt->mbus_code != 0 && fmt->mbus_code != MEDIA_BUS_FMT_FIXED)
return -EINVAL;
strscpy(fmt->description, meta_fmts[i].name, sizeof(fmt->description));
fmt->pixelformat = meta_fmts[i].fourcc;
return 0;
}
static int imgu_vidioc_g_meta_fmt(struct file *file, void *fh,
struct v4l2_format *f)
{
struct imgu_video_device *node = file_to_intel_imgu_node(file);
if (f->type != node->vbq.type)
return -EINVAL;
f->fmt = node->vdev_fmt.fmt;
return 0;
}
/******************** function pointers ********************/
static const struct v4l2_subdev_internal_ops imgu_subdev_internal_ops = {
.open = imgu_subdev_open,
};
static const struct v4l2_subdev_core_ops imgu_subdev_core_ops = {
.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_video_ops imgu_subdev_video_ops = {
.s_stream = imgu_subdev_s_stream,
};
static const struct v4l2_subdev_pad_ops imgu_subdev_pad_ops = {
.link_validate = v4l2_subdev_link_validate_default,
.get_fmt = imgu_subdev_get_fmt,
.set_fmt = imgu_subdev_set_fmt,
.get_selection = imgu_subdev_get_selection,
.set_selection = imgu_subdev_set_selection,
};
static const struct v4l2_subdev_ops imgu_subdev_ops = {
.core = &imgu_subdev_core_ops,
.video = &imgu_subdev_video_ops,
.pad = &imgu_subdev_pad_ops,
};
static const struct media_entity_operations imgu_media_ops = {
.link_setup = imgu_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
/****************** vb2_ops of the Q ********************/
static const struct vb2_ops imgu_vb2_ops = {
.buf_init = imgu_vb2_buf_init,
.buf_cleanup = imgu_vb2_buf_cleanup,
.buf_queue = imgu_vb2_buf_queue,
.queue_setup = imgu_vb2_queue_setup,
.start_streaming = imgu_vb2_start_streaming,
.stop_streaming = imgu_vb2_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
/****************** v4l2_file_operations *****************/
static const struct v4l2_file_operations imgu_v4l2_fops = {
.unlocked_ioctl = video_ioctl2,
.open = v4l2_fh_open,
.release = vb2_fop_release,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
};
/******************** v4l2_ioctl_ops ********************/
static const struct v4l2_ioctl_ops imgu_v4l2_ioctl_ops = {
.vidioc_querycap = imgu_vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap_mplane = imgu_vidioc_g_fmt,
.vidioc_s_fmt_vid_cap_mplane = imgu_vidioc_s_fmt,
.vidioc_try_fmt_vid_cap_mplane = imgu_vidioc_try_fmt,
.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
.vidioc_g_fmt_vid_out_mplane = imgu_vidioc_g_fmt,
.vidioc_s_fmt_vid_out_mplane = imgu_vidioc_s_fmt,
.vidioc_try_fmt_vid_out_mplane = imgu_vidioc_try_fmt,
/* buffer queue management */
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_expbuf = vb2_ioctl_expbuf,
};
static const struct v4l2_ioctl_ops imgu_v4l2_meta_ioctl_ops = {
.vidioc_querycap = imgu_vidioc_querycap,
/* meta capture */
.vidioc_enum_fmt_meta_cap = imgu_meta_enum_format,
.vidioc_g_fmt_meta_cap = imgu_vidioc_g_meta_fmt,
.vidioc_s_fmt_meta_cap = imgu_vidioc_g_meta_fmt,
.vidioc_try_fmt_meta_cap = imgu_vidioc_g_meta_fmt,
/* meta output */
.vidioc_enum_fmt_meta_out = imgu_meta_enum_format,
.vidioc_g_fmt_meta_out = imgu_vidioc_g_meta_fmt,
.vidioc_s_fmt_meta_out = imgu_vidioc_g_meta_fmt,
.vidioc_try_fmt_meta_out = imgu_vidioc_g_meta_fmt,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_expbuf = vb2_ioctl_expbuf,
};
static int imgu_sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct imgu_v4l2_subdev *imgu_sd =
container_of(ctrl->handler, struct imgu_v4l2_subdev, ctrl_handler);
struct imgu_device *imgu = v4l2_get_subdevdata(&imgu_sd->subdev);
struct device *dev = &imgu->pci_dev->dev;
dev_dbg(dev, "set val %d to ctrl 0x%8x for subdev %u",
ctrl->val, ctrl->id, imgu_sd->pipe);
switch (ctrl->id) {
case V4L2_CID_INTEL_IPU3_MODE:
atomic_set(&imgu_sd->running_mode, ctrl->val);
return 0;
default:
return -EINVAL;
}
}
static const struct v4l2_ctrl_ops imgu_subdev_ctrl_ops = {
.s_ctrl = imgu_sd_s_ctrl,
};
static const char * const imgu_ctrl_mode_strings[] = {
"Video mode",
"Still mode",
};
static const struct v4l2_ctrl_config imgu_subdev_ctrl_mode = {
.ops = &imgu_subdev_ctrl_ops,
.id = V4L2_CID_INTEL_IPU3_MODE,
.name = "IPU3 Pipe Mode",
.type = V4L2_CTRL_TYPE_MENU,
.max = ARRAY_SIZE(imgu_ctrl_mode_strings) - 1,
.def = IPU3_RUNNING_MODE_VIDEO,
.qmenu = imgu_ctrl_mode_strings,
};
/******************** Framework registration ********************/
/* helper function to config node's video properties */
static void imgu_node_to_v4l2(u32 node, struct video_device *vdev,
struct v4l2_format *f)
{
u32 cap;
/* Should not happen */
WARN_ON(node >= IMGU_NODE_NUM);
switch (node) {
case IMGU_NODE_IN:
cap = V4L2_CAP_VIDEO_OUTPUT_MPLANE;
f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
vdev->ioctl_ops = &imgu_v4l2_ioctl_ops;
break;
case IMGU_NODE_PARAMS:
cap = V4L2_CAP_META_OUTPUT;
f->type = V4L2_BUF_TYPE_META_OUTPUT;
f->fmt.meta.dataformat = V4L2_META_FMT_IPU3_PARAMS;
vdev->ioctl_ops = &imgu_v4l2_meta_ioctl_ops;
imgu_css_meta_fmt_set(&f->fmt.meta);
break;
case IMGU_NODE_STAT_3A:
cap = V4L2_CAP_META_CAPTURE;
f->type = V4L2_BUF_TYPE_META_CAPTURE;
f->fmt.meta.dataformat = V4L2_META_FMT_IPU3_STAT_3A;
vdev->ioctl_ops = &imgu_v4l2_meta_ioctl_ops;
imgu_css_meta_fmt_set(&f->fmt.meta);
break;
default:
cap = V4L2_CAP_VIDEO_CAPTURE_MPLANE;
f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
vdev->ioctl_ops = &imgu_v4l2_ioctl_ops;
}
vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC | cap;
}
static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
struct imgu_v4l2_subdev *imgu_sd,
unsigned int pipe)
{
int i, r;
struct v4l2_ctrl_handler *hdl = &imgu_sd->ctrl_handler;
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
/* Initialize subdev media entity */
r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM,
imgu_sd->subdev_pads);
if (r) {
dev_err(&imgu->pci_dev->dev,
"failed initialize subdev media entity (%d)\n", r);
return r;
}
imgu_sd->subdev.entity.ops = &imgu_media_ops;
for (i = 0; i < IMGU_NODE_NUM; i++) {
imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
}
/* Initialize subdev */
v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops);
imgu_sd->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_STATISTICS;
imgu_sd->subdev.internal_ops = &imgu_subdev_internal_ops;
imgu_sd->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
snprintf(imgu_sd->subdev.name, sizeof(imgu_sd->subdev.name),
"%s %u", IMGU_NAME, pipe);
v4l2_set_subdevdata(&imgu_sd->subdev, imgu);
atomic_set(&imgu_sd->running_mode, IPU3_RUNNING_MODE_VIDEO);
v4l2_ctrl_handler_init(hdl, 1);
imgu_sd->subdev.ctrl_handler = hdl;
imgu_sd->ctrl = v4l2_ctrl_new_custom(hdl, &imgu_subdev_ctrl_mode, NULL);
if (hdl->error) {
r = hdl->error;
dev_err(&imgu->pci_dev->dev,
"failed to create subdev v4l2 ctrl with err %d", r);
goto fail_subdev;
}
r = v4l2_device_register_subdev(&imgu->v4l2_dev, &imgu_sd->subdev);
if (r) {
dev_err(&imgu->pci_dev->dev,
"failed initialize subdev (%d)\n", r);
goto fail_subdev;
}
imgu_sd->pipe = pipe;
return 0;
fail_subdev:
v4l2_ctrl_handler_free(imgu_sd->subdev.ctrl_handler);
media_entity_cleanup(&imgu_sd->subdev.entity);
return r;
}
static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
int node_num)
{
int r;
u32 flags;
struct v4l2_mbus_framefmt def_bus_fmt = { 0 };
struct v4l2_pix_format_mplane def_pix_fmt = { 0 };
struct device *dev = &imgu->pci_dev->dev;
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
struct v4l2_subdev *sd = &imgu_pipe->imgu_sd.subdev;
struct imgu_video_device *node = &imgu_pipe->nodes[node_num];
struct video_device *vdev = &node->vdev;
struct vb2_queue *vbq = &node->vbq;
/* Initialize formats to default values */
def_bus_fmt.width = 1920;
def_bus_fmt.height = 1080;
def_bus_fmt.code = MEDIA_BUS_FMT_FIXED;
def_bus_fmt.field = V4L2_FIELD_NONE;
def_bus_fmt.colorspace = V4L2_COLORSPACE_RAW;
def_bus_fmt.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
def_bus_fmt.quantization = V4L2_QUANTIZATION_DEFAULT;
def_bus_fmt.xfer_func = V4L2_XFER_FUNC_DEFAULT;
def_pix_fmt.width = def_bus_fmt.width;
def_pix_fmt.height = def_bus_fmt.height;
def_pix_fmt.field = def_bus_fmt.field;
def_pix_fmt.num_planes = 1;
def_pix_fmt.plane_fmt[0].bytesperline =
imgu_bytesperline(def_pix_fmt.width,
IMGU_ABI_FRAME_FORMAT_RAW_PACKED);
def_pix_fmt.plane_fmt[0].sizeimage =
def_pix_fmt.height * def_pix_fmt.plane_fmt[0].bytesperline;
def_pix_fmt.flags = 0;
def_pix_fmt.colorspace = def_bus_fmt.colorspace;
def_pix_fmt.ycbcr_enc = def_bus_fmt.ycbcr_enc;
def_pix_fmt.quantization = def_bus_fmt.quantization;
def_pix_fmt.xfer_func = def_bus_fmt.xfer_func;
/* Initialize miscellaneous variables */
mutex_init(&node->lock);
INIT_LIST_HEAD(&node->buffers);
/* Initialize formats to default values */
node->pad_fmt = def_bus_fmt;
node->id = node_num;
node->pipe = pipe;
imgu_node_to_v4l2(node_num, vdev, &node->vdev_fmt);
if (node->vdev_fmt.type ==
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
node->vdev_fmt.type ==
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
def_pix_fmt.pixelformat = node->output ?
V4L2_PIX_FMT_IPU3_SGRBG10 :
V4L2_PIX_FMT_NV12;
node->vdev_fmt.fmt.pix_mp = def_pix_fmt;
}
/* Initialize media entities */
r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad);
if (r) {
dev_err(dev, "failed initialize media entity (%d)\n", r);
mutex_destroy(&node->lock);
return r;
}
node->vdev_pad.flags = node->output ?
MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
vdev->entity.ops = NULL;
/* Initialize vbq */
vbq->type = node->vdev_fmt.type;
vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
vbq->ops = &imgu_vb2_ops;
vbq->mem_ops = &vb2_dma_sg_memops;
if (imgu->buf_struct_size <= 0)
imgu->buf_struct_size =
sizeof(struct imgu_vb2_buffer);
vbq->buf_struct_size = imgu->buf_struct_size;
vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
/* can streamon w/o buffers */
vbq->min_buffers_needed = 0;
vbq->drv_priv = imgu;
vbq->lock = &node->lock;
r = vb2_queue_init(vbq);
if (r) {
dev_err(dev, "failed to initialize video queue (%d)", r);
media_entity_cleanup(&vdev->entity);
return r;
}
/* Initialize vdev */
snprintf(vdev->name, sizeof(vdev->name), "%s %u %s",
IMGU_NAME, pipe, node->name);
vdev->release = video_device_release_empty;
vdev->fops = &imgu_v4l2_fops;
vdev->lock = &node->lock;
vdev->v4l2_dev = &imgu->v4l2_dev;
vdev->queue = &node->vbq;
vdev->vfl_dir = node->output ? VFL_DIR_TX : VFL_DIR_RX;
video_set_drvdata(vdev, imgu);
r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (r) {
dev_err(dev, "failed to register video device (%d)", r);
media_entity_cleanup(&vdev->entity);
return r;
}
/* Create link between video node and the subdev pad */
flags = 0;
if (node->enabled)
flags |= MEDIA_LNK_FL_ENABLED;
if (node->output) {
r = media_create_pad_link(&vdev->entity, 0, &sd->entity,
node_num, flags);
} else {
if (node->id == IMGU_NODE_OUT) {
flags |= MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
node->enabled = true;
}
r = media_create_pad_link(&sd->entity, node_num, &vdev->entity,
0, flags);
}
if (r) {
dev_err(dev, "failed to create pad link (%d)", r);
video_unregister_device(vdev);
return r;
}
return 0;
}
static void imgu_v4l2_nodes_cleanup_pipe(struct imgu_device *imgu,
unsigned int pipe, int node)
{
int i;
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
for (i = 0; i < node; i++) {
video_unregister_device(&imgu_pipe->nodes[i].vdev);
media_entity_cleanup(&imgu_pipe->nodes[i].vdev.entity);
mutex_destroy(&imgu_pipe->nodes[i].lock);
}
}
static int imgu_v4l2_nodes_setup_pipe(struct imgu_device *imgu, int pipe)
{
int i;
for (i = 0; i < IMGU_NODE_NUM; i++) {
int r = imgu_v4l2_node_setup(imgu, pipe, i);
if (r) {
imgu_v4l2_nodes_cleanup_pipe(imgu, pipe, i);
return r;
}
}
return 0;
}
static void imgu_v4l2_subdev_cleanup(struct imgu_device *imgu, unsigned int i)
{
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[i];
v4l2_device_unregister_subdev(&imgu_pipe->imgu_sd.subdev);
v4l2_ctrl_handler_free(imgu_pipe->imgu_sd.subdev.ctrl_handler);
media_entity_cleanup(&imgu_pipe->imgu_sd.subdev.entity);
}
static void imgu_v4l2_cleanup_pipes(struct imgu_device *imgu, unsigned int pipe)
{
int i;
for (i = 0; i < pipe; i++) {
imgu_v4l2_nodes_cleanup_pipe(imgu, i, IMGU_NODE_NUM);
imgu_v4l2_subdev_cleanup(imgu, i);
}
}
static int imgu_v4l2_register_pipes(struct imgu_device *imgu)
{
struct imgu_media_pipe *imgu_pipe;
int i, r;
for (i = 0; i < IMGU_MAX_PIPE_NUM; i++) {
imgu_pipe = &imgu->imgu_pipe[i];
r = imgu_v4l2_subdev_register(imgu, &imgu_pipe->imgu_sd, i);
if (r) {
dev_err(&imgu->pci_dev->dev,
"failed to register subdev%u ret (%d)\n", i, r);
goto pipes_cleanup;
}
r = imgu_v4l2_nodes_setup_pipe(imgu, i);
if (r) {
imgu_v4l2_subdev_cleanup(imgu, i);
goto pipes_cleanup;
}
}
return 0;
pipes_cleanup:
imgu_v4l2_cleanup_pipes(imgu, i);
return r;
}
int imgu_v4l2_register(struct imgu_device *imgu)
{
int r;
/* Initialize miscellaneous variables */
imgu->streaming = false;
/* Set up media device */
media_device_pci_init(&imgu->media_dev, imgu->pci_dev, IMGU_NAME);
/* Set up v4l2 device */
imgu->v4l2_dev.mdev = &imgu->media_dev;
imgu->v4l2_dev.ctrl_handler = NULL;
r = v4l2_device_register(&imgu->pci_dev->dev, &imgu->v4l2_dev);
if (r) {
dev_err(&imgu->pci_dev->dev,
"failed to register V4L2 device (%d)\n", r);
goto fail_v4l2_dev;
}
r = imgu_v4l2_register_pipes(imgu);
if (r) {
dev_err(&imgu->pci_dev->dev,
"failed to register pipes (%d)\n", r);
goto fail_v4l2_pipes;
}
r = v4l2_device_register_subdev_nodes(&imgu->v4l2_dev);
if (r) {
dev_err(&imgu->pci_dev->dev,
"failed to register subdevs (%d)\n", r);
goto fail_subdevs;
}
r = media_device_register(&imgu->media_dev);
if (r) {
dev_err(&imgu->pci_dev->dev,
"failed to register media device (%d)\n", r);
goto fail_subdevs;
}
return 0;
fail_subdevs:
imgu_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
fail_v4l2_pipes:
v4l2_device_unregister(&imgu->v4l2_dev);
fail_v4l2_dev:
media_device_cleanup(&imgu->media_dev);
return r;
}
int imgu_v4l2_unregister(struct imgu_device *imgu)
{
media_device_unregister(&imgu->media_dev);
imgu_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
v4l2_device_unregister(&imgu->v4l2_dev);
media_device_cleanup(&imgu->media_dev);
return 0;
}
void imgu_v4l2_buffer_done(struct vb2_buffer *vb,
enum vb2_buffer_state state)
{
struct imgu_vb2_buffer *b =
container_of(vb, struct imgu_vb2_buffer, vbb.vb2_buf);
list_del(&b->list);
vb2_buffer_done(&b->vbb.vb2_buf, state);
}
| linux-master | drivers/staging/media/ipu3/ipu3-v4l2.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Intel Corporation
#include <linux/device.h>
#include "ipu3.h"
#include "ipu3-css-pool.h"
#include "ipu3-dmamap.h"
int imgu_css_dma_buffer_resize(struct imgu_device *imgu,
struct imgu_css_map *map, size_t size)
{
if (map->size < size && map->vaddr) {
dev_warn(&imgu->pci_dev->dev, "dma buf resized from %zu to %zu",
map->size, size);
imgu_dmamap_free(imgu, map);
if (!imgu_dmamap_alloc(imgu, map, size))
return -ENOMEM;
}
return 0;
}
void imgu_css_pool_cleanup(struct imgu_device *imgu, struct imgu_css_pool *pool)
{
unsigned int i;
for (i = 0; i < IPU3_CSS_POOL_SIZE; i++)
imgu_dmamap_free(imgu, &pool->entry[i].param);
}
int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool,
size_t size)
{
unsigned int i;
for (i = 0; i < IPU3_CSS_POOL_SIZE; i++) {
pool->entry[i].valid = false;
if (size == 0) {
pool->entry[i].param.vaddr = NULL;
continue;
}
if (!imgu_dmamap_alloc(imgu, &pool->entry[i].param, size))
goto fail;
}
pool->last = IPU3_CSS_POOL_SIZE;
return 0;
fail:
imgu_css_pool_cleanup(imgu, pool);
return -ENOMEM;
}
/*
* Allocate a new parameter via recycling the oldest entry in the pool.
*/
void imgu_css_pool_get(struct imgu_css_pool *pool)
{
/* Get the oldest entry */
u32 n = (pool->last + 1) % IPU3_CSS_POOL_SIZE;
pool->entry[n].valid = true;
pool->last = n;
}
/*
* Undo, for all practical purposes, the effect of pool_get().
*/
void imgu_css_pool_put(struct imgu_css_pool *pool)
{
pool->entry[pool->last].valid = false;
pool->last = (pool->last + IPU3_CSS_POOL_SIZE - 1) % IPU3_CSS_POOL_SIZE;
}
/**
* imgu_css_pool_last - Retrieve the nth pool entry from last
*
* @pool: a pointer to &struct imgu_css_pool.
* @n: the distance to the last index.
*
* Returns:
* The nth entry from last or null map to indicate no frame stored.
*/
const struct imgu_css_map *
imgu_css_pool_last(struct imgu_css_pool *pool, unsigned int n)
{
static const struct imgu_css_map null_map = { 0 };
int i = (pool->last + IPU3_CSS_POOL_SIZE - n) % IPU3_CSS_POOL_SIZE;
WARN_ON(n >= IPU3_CSS_POOL_SIZE);
if (!pool->entry[i].valid)
return &null_map;
return &pool->entry[i].param;
}
| linux-master | drivers/staging/media/ipu3/ipu3-css-pool.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <media/v4l2-event.h>
#include "video.h"
static void tegra_v4l2_dev_release(struct v4l2_device *v4l2_dev)
{
struct tegra_video_device *vid;
vid = container_of(v4l2_dev, struct tegra_video_device, v4l2_dev);
/* cleanup channels here as all video device nodes are released */
tegra_channels_cleanup(vid->vi);
v4l2_device_unregister(v4l2_dev);
media_device_unregister(&vid->media_dev);
media_device_cleanup(&vid->media_dev);
kfree(vid);
}
static void tegra_v4l2_dev_notify(struct v4l2_subdev *sd,
unsigned int notification, void *arg)
{
struct tegra_vi_channel *chan;
const struct v4l2_event *ev = arg;
if (notification != V4L2_DEVICE_NOTIFY_EVENT)
return;
chan = v4l2_get_subdev_hostdata(sd);
v4l2_event_queue(&chan->video, arg);
if (ev->type == V4L2_EVENT_SOURCE_CHANGE && vb2_is_streaming(&chan->queue))
vb2_queue_error(&chan->queue);
}
static int host1x_video_probe(struct host1x_device *dev)
{
struct tegra_video_device *vid;
int ret;
vid = kzalloc(sizeof(*vid), GFP_KERNEL);
if (!vid)
return -ENOMEM;
dev_set_drvdata(&dev->dev, vid);
vid->media_dev.dev = &dev->dev;
strscpy(vid->media_dev.model, "NVIDIA Tegra Video Input Device",
sizeof(vid->media_dev.model));
media_device_init(&vid->media_dev);
ret = media_device_register(&vid->media_dev);
if (ret < 0) {
dev_err(&dev->dev,
"failed to register media device: %d\n", ret);
goto cleanup;
}
vid->v4l2_dev.mdev = &vid->media_dev;
vid->v4l2_dev.release = tegra_v4l2_dev_release;
vid->v4l2_dev.notify = tegra_v4l2_dev_notify;
ret = v4l2_device_register(&dev->dev, &vid->v4l2_dev);
if (ret < 0) {
dev_err(&dev->dev,
"V4L2 device registration failed: %d\n", ret);
goto unregister_media;
}
ret = host1x_device_init(dev);
if (ret < 0)
goto unregister_v4l2;
if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)) {
/*
* Both vi and csi channels are available now.
* Register v4l2 nodes and create media links for TPG.
*/
ret = tegra_v4l2_nodes_setup_tpg(vid);
if (ret < 0) {
dev_err(&dev->dev,
"failed to setup tpg graph: %d\n", ret);
goto device_exit;
}
}
return 0;
device_exit:
host1x_device_exit(dev);
/* vi exit ops does not clean channels, so clean them here */
tegra_channels_cleanup(vid->vi);
unregister_v4l2:
v4l2_device_unregister(&vid->v4l2_dev);
unregister_media:
media_device_unregister(&vid->media_dev);
cleanup:
media_device_cleanup(&vid->media_dev);
kfree(vid);
return ret;
}
static int host1x_video_remove(struct host1x_device *dev)
{
struct tegra_video_device *vid = dev_get_drvdata(&dev->dev);
if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
tegra_v4l2_nodes_cleanup_tpg(vid);
host1x_device_exit(dev);
/* This calls v4l2_dev release callback on last reference */
v4l2_device_put(&vid->v4l2_dev);
return 0;
}
static const struct of_device_id host1x_video_subdevs[] = {
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
{ .compatible = "nvidia,tegra20-vip", },
{ .compatible = "nvidia,tegra20-vi", },
#endif
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
{ .compatible = "nvidia,tegra210-csi", },
{ .compatible = "nvidia,tegra210-vi", },
#endif
{ }
};
static struct host1x_driver host1x_video_driver = {
.driver = {
.name = "tegra-video",
},
.probe = host1x_video_probe,
.remove = host1x_video_remove,
.subdevs = host1x_video_subdevs,
};
static struct platform_driver * const drivers[] = {
&tegra_csi_driver,
&tegra_vip_driver,
&tegra_vi_driver,
};
static int __init host1x_video_init(void)
{
int err;
err = host1x_driver_register(&host1x_video_driver);
if (err < 0)
return err;
err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (err < 0)
goto unregister_host1x;
return 0;
unregister_host1x:
host1x_driver_unregister(&host1x_video_driver);
return err;
}
module_init(host1x_video_init);
static void __exit host1x_video_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
host1x_driver_unregister(&host1x_video_driver);
}
module_exit(host1x_video_exit);
MODULE_AUTHOR("Sowjanya Komatineni <[email protected]>");
MODULE_DESCRIPTION("NVIDIA Tegra Host1x Video driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/staging/media/tegra-video/video.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/bitmap.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/lcm.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <media/v4l2-dv-timings.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-dma-contig.h>
#include <soc/tegra/pmc.h>
#include "vi.h"
#include "video.h"
#define MAX_CID_CONTROLS 3
/**
* struct tegra_vi_graph_entity - Entity in the video graph
*
* @asd: subdev asynchronous registration information
* @entity: media entity from the corresponding V4L2 subdev
* @subdev: V4L2 subdev
*/
struct tegra_vi_graph_entity {
struct v4l2_async_connection asd;
struct media_entity *entity;
struct v4l2_subdev *subdev;
};
static inline struct tegra_vi *
host1x_client_to_vi(struct host1x_client *client)
{
return container_of(client, struct tegra_vi, client);
}
static inline struct tegra_channel_buffer *
to_tegra_channel_buffer(struct vb2_v4l2_buffer *vb)
{
return container_of(vb, struct tegra_channel_buffer, buf);
}
static inline struct tegra_vi_graph_entity *
to_tegra_vi_graph_entity(struct v4l2_async_connection *asd)
{
return container_of(asd, struct tegra_vi_graph_entity, asd);
}
static int tegra_get_format_idx_by_code(struct tegra_vi *vi,
unsigned int code,
unsigned int offset)
{
unsigned int i;
for (i = offset; i < vi->soc->nformats; ++i) {
if (vi->soc->video_formats[i].code == code)
return i;
}
return -1;
}
static u32 tegra_get_format_fourcc_by_idx(struct tegra_vi *vi,
unsigned int index)
{
if (index >= vi->soc->nformats)
return -EINVAL;
return vi->soc->video_formats[index].fourcc;
}
static const struct tegra_video_format *
tegra_get_format_by_fourcc(struct tegra_vi *vi, u32 fourcc)
{
unsigned int i;
for (i = 0; i < vi->soc->nformats; ++i) {
if (vi->soc->video_formats[i].fourcc == fourcc)
return &vi->soc->video_formats[i];
}
return NULL;
}
/*
* videobuf2 queue operations
*/
static int tegra_channel_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers,
unsigned int *nplanes,
unsigned int sizes[],
struct device *alloc_devs[])
{
struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
if (*nplanes)
return sizes[0] < chan->format.sizeimage ? -EINVAL : 0;
*nplanes = 1;
sizes[0] = chan->format.sizeimage;
alloc_devs[0] = chan->vi->dev;
if (chan->vi->ops->channel_queue_setup)
chan->vi->ops->channel_queue_setup(chan);
return 0;
}
static int tegra_channel_buffer_prepare(struct vb2_buffer *vb)
{
struct tegra_vi_channel *chan = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct tegra_channel_buffer *buf = to_tegra_channel_buffer(vbuf);
unsigned long size = chan->format.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
v4l2_err(chan->video.v4l2_dev,
"buffer too small (%lu < %lu)\n",
vb2_plane_size(vb, 0), size);
return -EINVAL;
}
vb2_set_plane_payload(vb, 0, size);
buf->chan = chan;
buf->addr = vb2_dma_contig_plane_dma_addr(vb, 0);
return 0;
}
static void tegra_channel_buffer_queue(struct vb2_buffer *vb)
{
struct tegra_vi_channel *chan = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct tegra_channel_buffer *buf = to_tegra_channel_buffer(vbuf);
/* put buffer into the capture queue */
spin_lock(&chan->start_lock);
list_add_tail(&buf->queue, &chan->capture);
spin_unlock(&chan->start_lock);
/* wait up kthread for capture */
wake_up_interruptible(&chan->start_wait);
}
struct v4l2_subdev *
tegra_channel_get_remote_csi_subdev(struct tegra_vi_channel *chan)
{
struct media_pad *pad;
pad = media_pad_remote_pad_first(&chan->pad);
if (!pad)
return NULL;
return media_entity_to_v4l2_subdev(pad->entity);
}
/*
* Walk up the chain until the initial source (e.g. image sensor)
*/
struct v4l2_subdev *
tegra_channel_get_remote_source_subdev(struct tegra_vi_channel *chan)
{
struct media_pad *pad;
struct v4l2_subdev *subdev;
struct media_entity *entity;
subdev = tegra_channel_get_remote_csi_subdev(chan);
if (!subdev)
return NULL;
pad = &subdev->entity.pads[0];
while (!(pad->flags & MEDIA_PAD_FL_SOURCE)) {
pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
entity = pad->entity;
pad = &entity->pads[0];
subdev = media_entity_to_v4l2_subdev(entity);
}
return subdev;
}
static int tegra_channel_enable_stream(struct tegra_vi_channel *chan)
{
struct v4l2_subdev *subdev;
int ret;
subdev = tegra_channel_get_remote_csi_subdev(chan);
ret = v4l2_subdev_call(subdev, video, s_stream, true);
if (ret < 0 && ret != -ENOIOCTLCMD)
return ret;
return 0;
}
static int tegra_channel_disable_stream(struct tegra_vi_channel *chan)
{
struct v4l2_subdev *subdev;
int ret;
subdev = tegra_channel_get_remote_csi_subdev(chan);
ret = v4l2_subdev_call(subdev, video, s_stream, false);
if (ret < 0 && ret != -ENOIOCTLCMD)
return ret;
return 0;
}
int tegra_channel_set_stream(struct tegra_vi_channel *chan, bool on)
{
int ret;
if (on)
ret = tegra_channel_enable_stream(chan);
else
ret = tegra_channel_disable_stream(chan);
return ret;
}
void tegra_channel_release_buffers(struct tegra_vi_channel *chan,
enum vb2_buffer_state state)
{
struct tegra_channel_buffer *buf, *nbuf;
spin_lock(&chan->start_lock);
list_for_each_entry_safe(buf, nbuf, &chan->capture, queue) {
vb2_buffer_done(&buf->buf.vb2_buf, state);
list_del(&buf->queue);
}
spin_unlock(&chan->start_lock);
spin_lock(&chan->done_lock);
list_for_each_entry_safe(buf, nbuf, &chan->done, queue) {
vb2_buffer_done(&buf->buf.vb2_buf, state);
list_del(&buf->queue);
}
spin_unlock(&chan->done_lock);
}
static int tegra_channel_start_streaming(struct vb2_queue *vq, u32 count)
{
struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
int ret;
ret = pm_runtime_resume_and_get(chan->vi->dev);
if (ret < 0) {
dev_err(chan->vi->dev, "failed to get runtime PM: %d\n", ret);
return ret;
}
ret = chan->vi->ops->vi_start_streaming(vq, count);
if (ret < 0)
pm_runtime_put(chan->vi->dev);
return ret;
}
static void tegra_channel_stop_streaming(struct vb2_queue *vq)
{
struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
chan->vi->ops->vi_stop_streaming(vq);
pm_runtime_put(chan->vi->dev);
}
static const struct vb2_ops tegra_channel_queue_qops = {
.queue_setup = tegra_channel_queue_setup,
.buf_prepare = tegra_channel_buffer_prepare,
.buf_queue = tegra_channel_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.start_streaming = tegra_channel_start_streaming,
.stop_streaming = tegra_channel_stop_streaming,
};
/*
* V4L2 ioctl operations
*/
static int tegra_channel_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
struct tegra_vi_channel *chan = video_drvdata(file);
strscpy(cap->driver, "tegra-video", sizeof(cap->driver));
strscpy(cap->card, chan->video.name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
dev_name(chan->vi->dev));
return 0;
}
static int tegra_channel_g_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
subdev = tegra_channel_get_remote_source_subdev(chan);
return v4l2_g_parm_cap(&chan->video, subdev, a);
}
static int tegra_channel_s_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
subdev = tegra_channel_get_remote_source_subdev(chan);
return v4l2_s_parm_cap(&chan->video, subdev, a);
}
static int tegra_channel_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *sizes)
{
int ret;
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
const struct tegra_video_format *fmtinfo;
struct v4l2_subdev_frame_size_enum fse = {
.index = sizes->index,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
fmtinfo = tegra_get_format_by_fourcc(chan->vi, sizes->pixel_format);
if (!fmtinfo)
return -EINVAL;
fse.code = fmtinfo->code;
subdev = tegra_channel_get_remote_source_subdev(chan);
ret = v4l2_subdev_call(subdev, pad, enum_frame_size, NULL, &fse);
if (ret)
return ret;
sizes->type = V4L2_FRMSIZE_TYPE_DISCRETE;
sizes->discrete.width = fse.max_width;
sizes->discrete.height = fse.max_height;
return 0;
}
static int tegra_channel_enum_frameintervals(struct file *file, void *fh,
struct v4l2_frmivalenum *ivals)
{
int ret;
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
const struct tegra_video_format *fmtinfo;
struct v4l2_subdev_frame_interval_enum fie = {
.index = ivals->index,
.width = ivals->width,
.height = ivals->height,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
fmtinfo = tegra_get_format_by_fourcc(chan->vi, ivals->pixel_format);
if (!fmtinfo)
return -EINVAL;
fie.code = fmtinfo->code;
subdev = tegra_channel_get_remote_source_subdev(chan);
ret = v4l2_subdev_call(subdev, pad, enum_frame_interval, NULL, &fie);
if (ret)
return ret;
ivals->type = V4L2_FRMIVAL_TYPE_DISCRETE;
ivals->discrete.numerator = fie.interval.numerator;
ivals->discrete.denominator = fie.interval.denominator;
return 0;
}
static int tegra_channel_enum_format(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
struct tegra_vi_channel *chan = video_drvdata(file);
unsigned int index = 0, i;
unsigned long *fmts_bitmap = chan->tpg_fmts_bitmap;
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
fmts_bitmap = chan->fmts_bitmap;
if (f->index >= bitmap_weight(fmts_bitmap, MAX_FORMAT_NUM))
return -EINVAL;
for (i = 0; i < f->index + 1; i++, index++)
index = find_next_bit(fmts_bitmap, MAX_FORMAT_NUM, index);
f->pixelformat = tegra_get_format_fourcc_by_idx(chan->vi, index - 1);
return 0;
}
static int tegra_channel_get_format(struct file *file, void *fh,
struct v4l2_format *format)
{
struct tegra_vi_channel *chan = video_drvdata(file);
format->fmt.pix = chan->format;
return 0;
}
static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
struct v4l2_pix_format *pix)
{
const struct tegra_video_format *fmtinfo;
static struct lock_class_key key;
struct v4l2_subdev *subdev;
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
struct v4l2_subdev_state *sd_state;
struct v4l2_subdev_frame_size_enum fse = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
struct v4l2_subdev_selection sdsel = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.target = V4L2_SEL_TGT_CROP_BOUNDS,
};
int ret;
subdev = tegra_channel_get_remote_source_subdev(chan);
if (!subdev)
return -ENODEV;
/*
* FIXME: Drop this call, drivers are not supposed to use
* __v4l2_subdev_state_alloc().
*/
sd_state = __v4l2_subdev_state_alloc(subdev, "tegra:state->lock",
&key);
if (IS_ERR(sd_state))
return PTR_ERR(sd_state);
/*
* Retrieve the format information and if requested format isn't
* supported, keep the current format.
*/
fmtinfo = tegra_get_format_by_fourcc(chan->vi, pix->pixelformat);
if (!fmtinfo) {
pix->pixelformat = chan->format.pixelformat;
pix->colorspace = chan->format.colorspace;
fmtinfo = tegra_get_format_by_fourcc(chan->vi,
pix->pixelformat);
}
pix->field = V4L2_FIELD_NONE;
fmt.pad = 0;
v4l2_fill_mbus_format(&fmt.format, pix, fmtinfo->code);
/*
* Attempt to obtain the format size from subdev.
* If not available, try to get crop boundary from subdev.
*/
fse.code = fmtinfo->code;
ret = v4l2_subdev_call(subdev, pad, enum_frame_size, sd_state, &fse);
if (ret) {
if (!v4l2_subdev_has_op(subdev, pad, get_selection)) {
sd_state->pads->try_crop.width = 0;
sd_state->pads->try_crop.height = 0;
} else {
ret = v4l2_subdev_call(subdev, pad, get_selection,
NULL, &sdsel);
if (ret)
return -EINVAL;
sd_state->pads->try_crop.width = sdsel.r.width;
sd_state->pads->try_crop.height = sdsel.r.height;
}
} else {
sd_state->pads->try_crop.width = fse.max_width;
sd_state->pads->try_crop.height = fse.max_height;
}
ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
if (ret < 0)
return ret;
v4l2_fill_pix_format(pix, &fmt.format);
chan->vi->ops->vi_fmt_align(pix, fmtinfo->bpp);
__v4l2_subdev_state_free(sd_state);
return 0;
}
static int tegra_channel_try_format(struct file *file, void *fh,
struct v4l2_format *format)
{
struct tegra_vi_channel *chan = video_drvdata(file);
return __tegra_channel_try_format(chan, &format->fmt.pix);
}
static void tegra_channel_update_gangports(struct tegra_vi_channel *chan)
{
if (chan->format.width <= 1920)
chan->numgangports = 1;
else
chan->numgangports = chan->totalports;
}
static int tegra_channel_set_format(struct file *file, void *fh,
struct v4l2_format *format)
{
struct tegra_vi_channel *chan = video_drvdata(file);
const struct tegra_video_format *fmtinfo;
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct v4l2_subdev *subdev;
struct v4l2_pix_format *pix = &format->fmt.pix;
int ret;
if (vb2_is_busy(&chan->queue))
return -EBUSY;
/* get supported format by try_fmt */
ret = __tegra_channel_try_format(chan, pix);
if (ret)
return ret;
fmtinfo = tegra_get_format_by_fourcc(chan->vi, pix->pixelformat);
fmt.pad = 0;
v4l2_fill_mbus_format(&fmt.format, pix, fmtinfo->code);
subdev = tegra_channel_get_remote_source_subdev(chan);
ret = v4l2_subdev_call(subdev, pad, set_fmt, NULL, &fmt);
if (ret < 0)
return ret;
v4l2_fill_pix_format(pix, &fmt.format);
chan->vi->ops->vi_fmt_align(pix, fmtinfo->bpp);
chan->format = *pix;
chan->fmtinfo = fmtinfo;
tegra_channel_update_gangports(chan);
return 0;
}
static int tegra_channel_set_subdev_active_fmt(struct tegra_vi_channel *chan)
{
int ret, index;
struct v4l2_subdev *subdev;
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
/*
* Initialize channel format to the sub-device active format if there
* is corresponding match in the Tegra supported video formats.
*/
subdev = tegra_channel_get_remote_source_subdev(chan);
ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
if (ret)
return ret;
index = tegra_get_format_idx_by_code(chan->vi, fmt.format.code, 0);
if (index < 0)
return -EINVAL;
chan->fmtinfo = &chan->vi->soc->video_formats[index];
v4l2_fill_pix_format(&chan->format, &fmt.format);
chan->format.pixelformat = chan->fmtinfo->fourcc;
chan->format.bytesperline = chan->format.width * chan->fmtinfo->bpp;
chan->format.sizeimage = chan->format.bytesperline *
chan->format.height;
chan->vi->ops->vi_fmt_align(&chan->format, chan->fmtinfo->bpp);
tegra_channel_update_gangports(chan);
return 0;
}
static int
tegra_channel_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
case V4L2_EVENT_SOURCE_CHANGE:
return v4l2_event_subscribe(fh, sub, 4, NULL);
}
return v4l2_ctrl_subscribe_event(fh, sub);
}
static int tegra_channel_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct v4l2_subdev_selection sdsel = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.target = sel->target,
};
int ret;
subdev = tegra_channel_get_remote_source_subdev(chan);
if (!v4l2_subdev_has_op(subdev, pad, get_selection))
return -ENOTTY;
if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
/*
* Try the get selection operation and fallback to get format if not
* implemented.
*/
ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
if (!ret)
sel->r = sdsel.r;
if (ret != -ENOIOCTLCMD)
return ret;
ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
if (ret < 0)
return ret;
sel->r.left = 0;
sel->r.top = 0;
sel->r.width = fmt.format.width;
sel->r.height = fmt.format.height;
return 0;
}
static int tegra_channel_s_selection(struct file *file, void *fh,
struct v4l2_selection *sel)
{
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
int ret;
struct v4l2_subdev_selection sdsel = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.target = sel->target,
.flags = sel->flags,
.r = sel->r,
};
subdev = tegra_channel_get_remote_source_subdev(chan);
if (!v4l2_subdev_has_op(subdev, pad, set_selection))
return -ENOTTY;
if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (vb2_is_busy(&chan->queue))
return -EBUSY;
ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel);
if (!ret) {
sel->r = sdsel.r;
/*
* Subdev active format resolution may have changed during
* set selection operation. So, update channel format to
* the sub-device active format.
*/
return tegra_channel_set_subdev_active_fmt(chan);
}
return ret;
}
static int tegra_channel_g_edid(struct file *file, void *fh,
struct v4l2_edid *edid)
{
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
subdev = tegra_channel_get_remote_source_subdev(chan);
if (!v4l2_subdev_has_op(subdev, pad, get_edid))
return -ENOTTY;
return v4l2_subdev_call(subdev, pad, get_edid, edid);
}
static int tegra_channel_s_edid(struct file *file, void *fh,
struct v4l2_edid *edid)
{
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
subdev = tegra_channel_get_remote_source_subdev(chan);
if (!v4l2_subdev_has_op(subdev, pad, set_edid))
return -ENOTTY;
return v4l2_subdev_call(subdev, pad, set_edid, edid);
}
static int tegra_channel_g_dv_timings(struct file *file, void *fh,
struct v4l2_dv_timings *timings)
{
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
subdev = tegra_channel_get_remote_source_subdev(chan);
if (!v4l2_subdev_has_op(subdev, video, g_dv_timings))
return -ENOTTY;
return v4l2_device_call_until_err(chan->video.v4l2_dev, 0,
video, g_dv_timings, timings);
}
static int tegra_channel_s_dv_timings(struct file *file, void *fh,
struct v4l2_dv_timings *timings)
{
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
struct v4l2_bt_timings *bt = &timings->bt;
struct v4l2_dv_timings curr_timings;
int ret;
subdev = tegra_channel_get_remote_source_subdev(chan);
if (!v4l2_subdev_has_op(subdev, video, s_dv_timings))
return -ENOTTY;
ret = tegra_channel_g_dv_timings(file, fh, &curr_timings);
if (ret)
return ret;
if (v4l2_match_dv_timings(timings, &curr_timings, 0, false))
return 0;
if (vb2_is_busy(&chan->queue))
return -EBUSY;
ret = v4l2_device_call_until_err(chan->video.v4l2_dev, 0,
video, s_dv_timings, timings);
if (ret)
return ret;
chan->format.width = bt->width;
chan->format.height = bt->height;
chan->format.bytesperline = bt->width * chan->fmtinfo->bpp;
chan->format.sizeimage = chan->format.bytesperline * bt->height;
chan->vi->ops->vi_fmt_align(&chan->format, chan->fmtinfo->bpp);
tegra_channel_update_gangports(chan);
return 0;
}
static int tegra_channel_query_dv_timings(struct file *file, void *fh,
struct v4l2_dv_timings *timings)
{
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
subdev = tegra_channel_get_remote_source_subdev(chan);
if (!v4l2_subdev_has_op(subdev, video, query_dv_timings))
return -ENOTTY;
return v4l2_device_call_until_err(chan->video.v4l2_dev, 0,
video, query_dv_timings, timings);
}
static int tegra_channel_enum_dv_timings(struct file *file, void *fh,
struct v4l2_enum_dv_timings *timings)
{
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
subdev = tegra_channel_get_remote_source_subdev(chan);
if (!v4l2_subdev_has_op(subdev, pad, enum_dv_timings))
return -ENOTTY;
return v4l2_subdev_call(subdev, pad, enum_dv_timings, timings);
}
static int tegra_channel_dv_timings_cap(struct file *file, void *fh,
struct v4l2_dv_timings_cap *cap)
{
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
subdev = tegra_channel_get_remote_source_subdev(chan);
if (!v4l2_subdev_has_op(subdev, pad, dv_timings_cap))
return -ENOTTY;
return v4l2_subdev_call(subdev, pad, dv_timings_cap, cap);
}
static int tegra_channel_log_status(struct file *file, void *fh)
{
struct tegra_vi_channel *chan = video_drvdata(file);
v4l2_device_call_all(chan->video.v4l2_dev, 0, core, log_status);
return 0;
}
static int tegra_channel_enum_input(struct file *file, void *fh,
struct v4l2_input *inp)
{
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
if (inp->index)
return -EINVAL;
inp->type = V4L2_INPUT_TYPE_CAMERA;
subdev = tegra_channel_get_remote_source_subdev(chan);
strscpy(inp->name, subdev->name, sizeof(inp->name));
if (v4l2_subdev_has_op(subdev, pad, dv_timings_cap))
inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
return 0;
}
static int tegra_channel_g_input(struct file *file, void *priv,
unsigned int *i)
{
*i = 0;
return 0;
}
static int tegra_channel_s_input(struct file *file, void *priv,
unsigned int input)
{
if (input > 0)
return -EINVAL;
return 0;
}
static const struct v4l2_ioctl_ops tegra_channel_ioctl_ops = {
.vidioc_querycap = tegra_channel_querycap,
.vidioc_g_parm = tegra_channel_g_parm,
.vidioc_s_parm = tegra_channel_s_parm,
.vidioc_enum_framesizes = tegra_channel_enum_framesizes,
.vidioc_enum_frameintervals = tegra_channel_enum_frameintervals,
.vidioc_enum_fmt_vid_cap = tegra_channel_enum_format,
.vidioc_g_fmt_vid_cap = tegra_channel_get_format,
.vidioc_s_fmt_vid_cap = tegra_channel_set_format,
.vidioc_try_fmt_vid_cap = tegra_channel_try_format,
.vidioc_enum_input = tegra_channel_enum_input,
.vidioc_g_input = tegra_channel_g_input,
.vidioc_s_input = tegra_channel_s_input,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_subscribe_event = tegra_channel_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
.vidioc_g_selection = tegra_channel_g_selection,
.vidioc_s_selection = tegra_channel_s_selection,
.vidioc_g_edid = tegra_channel_g_edid,
.vidioc_s_edid = tegra_channel_s_edid,
.vidioc_g_dv_timings = tegra_channel_g_dv_timings,
.vidioc_s_dv_timings = tegra_channel_s_dv_timings,
.vidioc_query_dv_timings = tegra_channel_query_dv_timings,
.vidioc_enum_dv_timings = tegra_channel_enum_dv_timings,
.vidioc_dv_timings_cap = tegra_channel_dv_timings_cap,
.vidioc_log_status = tegra_channel_log_status,
};
/*
* V4L2 file operations
*/
static const struct v4l2_file_operations tegra_channel_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
.open = v4l2_fh_open,
.release = vb2_fop_release,
.read = vb2_fop_read,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
};
/*
* V4L2 control operations
*/
static int vi_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct tegra_vi_channel *chan = container_of(ctrl->handler,
struct tegra_vi_channel,
ctrl_handler);
switch (ctrl->id) {
case V4L2_CID_TEST_PATTERN:
/* pattern change takes effect on next stream */
chan->pg_mode = ctrl->val + 1;
break;
case V4L2_CID_TEGRA_SYNCPT_TIMEOUT_RETRY:
chan->syncpt_timeout_retry = ctrl->val;
break;
case V4L2_CID_HFLIP:
chan->hflip = ctrl->val;
break;
case V4L2_CID_VFLIP:
chan->vflip = ctrl->val;
break;
default:
return -EINVAL;
}
return 0;
}
static const struct v4l2_ctrl_ops vi_ctrl_ops = {
.s_ctrl = vi_s_ctrl,
};
#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
static const char *const vi_pattern_strings[] = {
"Black/White Direct Mode",
"Color Patch Mode",
};
#else
static const struct v4l2_ctrl_config syncpt_timeout_ctrl = {
.ops = &vi_ctrl_ops,
.id = V4L2_CID_TEGRA_SYNCPT_TIMEOUT_RETRY,
.name = "Syncpt timeout retry",
.type = V4L2_CTRL_TYPE_INTEGER,
.min = 1,
.max = 10000,
.step = 1,
.def = 5,
};
#endif
static int tegra_channel_setup_ctrl_handler(struct tegra_vi_channel *chan)
{
int ret;
#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
/* add test pattern control handler to v4l2 device */
v4l2_ctrl_new_std_menu_items(&chan->ctrl_handler, &vi_ctrl_ops,
V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(vi_pattern_strings) - 1,
0, 0, vi_pattern_strings);
if (chan->ctrl_handler.error) {
dev_err(chan->vi->dev, "failed to add TPG ctrl handler: %d\n",
chan->ctrl_handler.error);
v4l2_ctrl_handler_free(&chan->ctrl_handler);
return chan->ctrl_handler.error;
}
#else
struct v4l2_subdev *subdev;
/* custom control */
v4l2_ctrl_new_custom(&chan->ctrl_handler, &syncpt_timeout_ctrl, NULL);
if (chan->ctrl_handler.error) {
dev_err(chan->vi->dev, "failed to add %s ctrl handler: %d\n",
syncpt_timeout_ctrl.name,
chan->ctrl_handler.error);
v4l2_ctrl_handler_free(&chan->ctrl_handler);
return chan->ctrl_handler.error;
}
subdev = tegra_channel_get_remote_source_subdev(chan);
if (!subdev)
return -ENODEV;
ret = v4l2_ctrl_add_handler(&chan->ctrl_handler, subdev->ctrl_handler,
NULL, true);
if (ret < 0) {
dev_err(chan->vi->dev,
"failed to add subdev %s ctrl handler: %d\n",
subdev->name, ret);
v4l2_ctrl_handler_free(&chan->ctrl_handler);
return ret;
}
if (chan->vi->soc->has_h_v_flip) {
v4l2_ctrl_new_std(&chan->ctrl_handler, &vi_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&chan->ctrl_handler, &vi_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
}
#endif
/* setup the controls */
ret = v4l2_ctrl_handler_setup(&chan->ctrl_handler);
if (ret < 0) {
dev_err(chan->vi->dev,
"failed to setup v4l2 ctrl handler: %d\n", ret);
return ret;
}
return 0;
}
/* VI only support 2 formats in TPG mode */
static void vi_tpg_fmts_bitmap_init(struct tegra_vi_channel *chan)
{
int index;
bitmap_zero(chan->tpg_fmts_bitmap, MAX_FORMAT_NUM);
index = tegra_get_format_idx_by_code(chan->vi,
MEDIA_BUS_FMT_SRGGB10_1X10, 0);
bitmap_set(chan->tpg_fmts_bitmap, index, 1);
index = tegra_get_format_idx_by_code(chan->vi,
MEDIA_BUS_FMT_RGB888_1X32_PADHI,
0);
bitmap_set(chan->tpg_fmts_bitmap, index, 1);
}
static int vi_fmts_bitmap_init(struct tegra_vi_channel *chan)
{
int index, ret, match_code = 0;
struct v4l2_subdev *subdev;
struct v4l2_subdev_mbus_code_enum code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
bitmap_zero(chan->fmts_bitmap, MAX_FORMAT_NUM);
/*
* Set the bitmap bits based on all the matched formats between the
* available media bus formats of sub-device and the pre-defined Tegra
* supported video formats.
*/
subdev = tegra_channel_get_remote_source_subdev(chan);
while (1) {
ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
NULL, &code);
if (ret < 0)
break;
index = tegra_get_format_idx_by_code(chan->vi, code.code, 0);
while (index >= 0) {
bitmap_set(chan->fmts_bitmap, index, 1);
if (!match_code)
match_code = code.code;
/* look for other formats with same mbus code */
index = tegra_get_format_idx_by_code(chan->vi,
code.code,
index + 1);
}
code.index++;
}
/*
* Set the bitmap bit corresponding to default tegra video format if
* there are no matched formats.
*/
if (!match_code) {
match_code = chan->vi->soc->default_video_format->code;
index = tegra_get_format_idx_by_code(chan->vi, match_code, 0);
if (WARN_ON(index < 0))
return -EINVAL;
bitmap_set(chan->fmts_bitmap, index, 1);
}
/* initialize channel format to the sub-device active format */
tegra_channel_set_subdev_active_fmt(chan);
return 0;
}
static void tegra_channel_cleanup(struct tegra_vi_channel *chan)
{
v4l2_ctrl_handler_free(&chan->ctrl_handler);
media_entity_cleanup(&chan->video.entity);
chan->vi->ops->channel_host1x_syncpt_free(chan);
mutex_destroy(&chan->video_lock);
}
void tegra_channels_cleanup(struct tegra_vi *vi)
{
struct tegra_vi_channel *chan, *tmp;
if (!vi)
return;
list_for_each_entry_safe(chan, tmp, &vi->vi_chans, list) {
tegra_channel_cleanup(chan);
list_del(&chan->list);
kfree(chan);
}
}
static int tegra_channel_init(struct tegra_vi_channel *chan)
{
struct tegra_vi *vi = chan->vi;
struct tegra_video_device *vid = dev_get_drvdata(vi->client.host);
int ret;
mutex_init(&chan->video_lock);
INIT_LIST_HEAD(&chan->capture);
INIT_LIST_HEAD(&chan->done);
spin_lock_init(&chan->start_lock);
spin_lock_init(&chan->done_lock);
init_waitqueue_head(&chan->start_wait);
init_waitqueue_head(&chan->done_wait);
/* initialize the video format */
chan->fmtinfo = chan->vi->soc->default_video_format;
chan->format.pixelformat = chan->fmtinfo->fourcc;
chan->format.colorspace = V4L2_COLORSPACE_SRGB;
chan->format.field = V4L2_FIELD_NONE;
chan->format.width = TEGRA_DEF_WIDTH;
chan->format.height = TEGRA_DEF_HEIGHT;
chan->format.bytesperline = TEGRA_DEF_WIDTH * chan->fmtinfo->bpp;
chan->format.sizeimage = chan->format.bytesperline * TEGRA_DEF_HEIGHT;
vi->ops->vi_fmt_align(&chan->format, chan->fmtinfo->bpp);
ret = vi->ops->channel_host1x_syncpt_init(chan);
if (ret)
return ret;
/* initialize the media entity */
chan->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&chan->video.entity, 1, &chan->pad);
if (ret < 0) {
dev_err(vi->dev,
"failed to initialize media entity: %d\n", ret);
goto free_syncpts;
}
ret = v4l2_ctrl_handler_init(&chan->ctrl_handler, MAX_CID_CONTROLS);
if (chan->ctrl_handler.error) {
dev_err(vi->dev,
"failed to initialize v4l2 ctrl handler: %d\n", ret);
goto cleanup_media;
}
/* initialize the video_device */
chan->video.fops = &tegra_channel_fops;
chan->video.v4l2_dev = &vid->v4l2_dev;
chan->video.release = video_device_release_empty;
chan->video.queue = &chan->queue;
snprintf(chan->video.name, sizeof(chan->video.name), "%s-%s-%u",
dev_name(vi->dev), "output", chan->portnos[0]);
chan->video.vfl_type = VFL_TYPE_VIDEO;
chan->video.vfl_dir = VFL_DIR_RX;
chan->video.ioctl_ops = &tegra_channel_ioctl_ops;
chan->video.ctrl_handler = &chan->ctrl_handler;
chan->video.lock = &chan->video_lock;
chan->video.device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE;
video_set_drvdata(&chan->video, chan);
chan->queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
chan->queue.io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
chan->queue.lock = &chan->video_lock;
chan->queue.drv_priv = chan;
chan->queue.buf_struct_size = sizeof(struct tegra_channel_buffer);
chan->queue.ops = &tegra_channel_queue_qops;
chan->queue.mem_ops = &vb2_dma_contig_memops;
chan->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
chan->queue.min_buffers_needed = 2;
chan->queue.dev = vi->dev;
ret = vb2_queue_init(&chan->queue);
if (ret < 0) {
dev_err(vi->dev, "failed to initialize vb2 queue: %d\n", ret);
goto free_v4l2_ctrl_hdl;
}
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
v4l2_async_nf_init(&chan->notifier, &vid->v4l2_dev);
return 0;
free_v4l2_ctrl_hdl:
v4l2_ctrl_handler_free(&chan->ctrl_handler);
cleanup_media:
media_entity_cleanup(&chan->video.entity);
free_syncpts:
vi->ops->channel_host1x_syncpt_free(chan);
return ret;
}
static int tegra_vi_channel_alloc(struct tegra_vi *vi, unsigned int port_num,
struct device_node *node, unsigned int lanes)
{
struct tegra_vi_channel *chan;
unsigned int i;
/*
* Do not use devm_kzalloc as memory is freed immediately
* when device instance is unbound but application might still
* be holding the device node open. Channel memory allocated
* with kzalloc is freed during video device release callback.
*/
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
return -ENOMEM;
chan->vi = vi;
chan->portnos[0] = port_num;
/*
* For data lanes more than maximum csi lanes per brick, multiple of
* x4 ports are used simultaneously for capture.
*/
if (lanes <= CSI_LANES_PER_BRICK)
chan->totalports = 1;
else
chan->totalports = lanes / CSI_LANES_PER_BRICK;
chan->numgangports = chan->totalports;
for (i = 1; i < chan->totalports; i++)
chan->portnos[i] = chan->portnos[0] + i * CSI_PORTS_PER_BRICK;
chan->of_node = node;
list_add_tail(&chan->list, &vi->vi_chans);
return 0;
}
static int tegra_vi_tpg_channels_alloc(struct tegra_vi *vi)
{
unsigned int port_num;
unsigned int nchannels = vi->soc->vi_max_channels;
int ret;
for (port_num = 0; port_num < nchannels; port_num++) {
ret = tegra_vi_channel_alloc(vi, port_num,
vi->dev->of_node, 2);
if (ret < 0)
return ret;
}
return 0;
}
static int tegra_vi_channels_alloc(struct tegra_vi *vi)
{
struct device_node *node = vi->dev->of_node;
struct device_node *ep = NULL;
struct device_node *ports;
struct device_node *port = NULL;
unsigned int port_num;
struct device_node *parent;
struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
unsigned int lanes;
int ret = 0;
ports = of_get_child_by_name(node, "ports");
if (!ports)
return dev_err_probe(vi->dev, -ENODEV, "%pOF: missing 'ports' node\n", node);
for_each_child_of_node(ports, port) {
if (!of_node_name_eq(port, "port"))
continue;
ret = of_property_read_u32(port, "reg", &port_num);
if (ret < 0)
continue;
if (port_num > vi->soc->vi_max_channels) {
dev_err(vi->dev, "invalid port num %d for %pOF\n",
port_num, port);
ret = -EINVAL;
goto cleanup;
}
ep = of_get_child_by_name(port, "endpoint");
if (!ep)
continue;
parent = of_graph_get_remote_port_parent(ep);
of_node_put(ep);
if (!parent)
continue;
ep = of_graph_get_endpoint_by_regs(parent, 0, 0);
of_node_put(parent);
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep),
&v4l2_ep);
of_node_put(ep);
if (ret)
continue;
lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
ret = tegra_vi_channel_alloc(vi, port_num, port, lanes);
if (ret < 0)
goto cleanup;
}
cleanup:
of_node_put(port);
of_node_put(ports);
return ret;
}
static int tegra_vi_channels_init(struct tegra_vi *vi)
{
struct tegra_vi_channel *chan;
int ret;
list_for_each_entry(chan, &vi->vi_chans, list) {
ret = tegra_channel_init(chan);
if (ret < 0) {
dev_err(vi->dev,
"failed to initialize channel-%d: %d\n",
chan->portnos[0], ret);
goto cleanup;
}
}
return 0;
cleanup:
list_for_each_entry_continue_reverse(chan, &vi->vi_chans, list)
tegra_channel_cleanup(chan);
return ret;
}
void tegra_v4l2_nodes_cleanup_tpg(struct tegra_video_device *vid)
{
struct tegra_vi *vi = vid->vi;
struct tegra_csi *csi = vid->csi;
struct tegra_csi_channel *csi_chan;
struct tegra_vi_channel *chan;
list_for_each_entry(chan, &vi->vi_chans, list)
vb2_video_unregister_device(&chan->video);
list_for_each_entry(csi_chan, &csi->csi_chans, list)
v4l2_device_unregister_subdev(&csi_chan->subdev);
}
int tegra_v4l2_nodes_setup_tpg(struct tegra_video_device *vid)
{
struct tegra_vi *vi = vid->vi;
struct tegra_csi *csi = vid->csi;
struct tegra_vi_channel *vi_chan;
struct tegra_csi_channel *csi_chan;
u32 link_flags = MEDIA_LNK_FL_ENABLED;
int ret;
if (!vi || !csi)
return -ENODEV;
csi_chan = list_first_entry(&csi->csi_chans,
struct tegra_csi_channel, list);
list_for_each_entry(vi_chan, &vi->vi_chans, list) {
struct media_entity *source = &csi_chan->subdev.entity;
struct media_entity *sink = &vi_chan->video.entity;
struct media_pad *source_pad = csi_chan->pads;
struct media_pad *sink_pad = &vi_chan->pad;
ret = v4l2_device_register_subdev(&vid->v4l2_dev,
&csi_chan->subdev);
if (ret) {
dev_err(vi->dev,
"failed to register subdev: %d\n", ret);
goto cleanup;
}
ret = video_register_device(&vi_chan->video,
VFL_TYPE_VIDEO, -1);
if (ret < 0) {
dev_err(vi->dev,
"failed to register video device: %d\n", ret);
goto cleanup;
}
dev_dbg(vi->dev, "creating %s:%u -> %s:%u link\n",
source->name, source_pad->index,
sink->name, sink_pad->index);
ret = media_create_pad_link(source, source_pad->index,
sink, sink_pad->index,
link_flags);
if (ret < 0) {
dev_err(vi->dev,
"failed to create %s:%u -> %s:%u link: %d\n",
source->name, source_pad->index,
sink->name, sink_pad->index, ret);
goto cleanup;
}
ret = tegra_channel_setup_ctrl_handler(vi_chan);
if (ret < 0)
goto cleanup;
v4l2_set_subdev_hostdata(&csi_chan->subdev, vi_chan);
vi_tpg_fmts_bitmap_init(vi_chan);
csi_chan = list_next_entry(csi_chan, list);
}
return 0;
cleanup:
tegra_v4l2_nodes_cleanup_tpg(vid);
return ret;
}
static int __maybe_unused vi_runtime_resume(struct device *dev)
{
struct tegra_vi *vi = dev_get_drvdata(dev);
int ret;
ret = regulator_enable(vi->vdd);
if (ret) {
dev_err(dev, "failed to enable VDD supply: %d\n", ret);
return ret;
}
ret = clk_set_rate(vi->clk, vi->soc->vi_max_clk_hz);
if (ret) {
dev_err(dev, "failed to set vi clock rate: %d\n", ret);
goto disable_vdd;
}
ret = clk_prepare_enable(vi->clk);
if (ret) {
dev_err(dev, "failed to enable vi clock: %d\n", ret);
goto disable_vdd;
}
return 0;
disable_vdd:
regulator_disable(vi->vdd);
return ret;
}
static int __maybe_unused vi_runtime_suspend(struct device *dev)
{
struct tegra_vi *vi = dev_get_drvdata(dev);
clk_disable_unprepare(vi->clk);
regulator_disable(vi->vdd);
return 0;
}
/*
* Graph Management
*/
static struct tegra_vi_graph_entity *
tegra_vi_graph_find_entity(struct tegra_vi_channel *chan,
const struct fwnode_handle *fwnode)
{
struct tegra_vi_graph_entity *entity;
struct v4l2_async_connection *asd;
list_for_each_entry(asd, &chan->notifier.done_list, asc_entry) {
entity = to_tegra_vi_graph_entity(asd);
if (entity->asd.match.fwnode == fwnode)
return entity;
}
return NULL;
}
static int tegra_vi_graph_build(struct tegra_vi_channel *chan,
struct tegra_vi_graph_entity *entity)
{
struct tegra_vi *vi = chan->vi;
struct tegra_vi_graph_entity *ent;
struct fwnode_handle *ep = NULL;
struct v4l2_fwnode_link link;
struct media_entity *local = entity->entity;
struct media_entity *remote;
struct media_pad *local_pad;
struct media_pad *remote_pad;
u32 link_flags = MEDIA_LNK_FL_ENABLED;
int ret = 0;
dev_dbg(vi->dev, "creating links for entity %s\n", local->name);
while (1) {
ep = fwnode_graph_get_next_endpoint(entity->asd.match.fwnode,
ep);
if (!ep)
break;
ret = v4l2_fwnode_parse_link(ep, &link);
if (ret < 0) {
dev_err(vi->dev, "failed to parse link for %pOF: %d\n",
to_of_node(ep), ret);
continue;
}
if (link.local_port >= local->num_pads) {
dev_err(vi->dev, "invalid port number %u on %pOF\n",
link.local_port, to_of_node(link.local_node));
v4l2_fwnode_put_link(&link);
ret = -EINVAL;
break;
}
local_pad = &local->pads[link.local_port];
/* Remote node is vi node. So use channel video entity and pad
* as remote/sink.
*/
if (link.remote_node == of_fwnode_handle(vi->dev->of_node)) {
remote = &chan->video.entity;
remote_pad = &chan->pad;
goto create_link;
}
/*
* Skip sink ports, they will be processed from the other end
* of the link.
*/
if (local_pad->flags & MEDIA_PAD_FL_SINK) {
dev_dbg(vi->dev, "skipping sink port %pOF:%u\n",
to_of_node(link.local_node), link.local_port);
v4l2_fwnode_put_link(&link);
continue;
}
/* find the remote entity from notifier list */
ent = tegra_vi_graph_find_entity(chan, link.remote_node);
if (!ent) {
dev_err(vi->dev, "no entity found for %pOF\n",
to_of_node(link.remote_node));
v4l2_fwnode_put_link(&link);
ret = -ENODEV;
break;
}
remote = ent->entity;
if (link.remote_port >= remote->num_pads) {
dev_err(vi->dev, "invalid port number %u on %pOF\n",
link.remote_port,
to_of_node(link.remote_node));
v4l2_fwnode_put_link(&link);
ret = -EINVAL;
break;
}
remote_pad = &remote->pads[link.remote_port];
create_link:
dev_dbg(vi->dev, "creating %s:%u -> %s:%u link\n",
local->name, local_pad->index,
remote->name, remote_pad->index);
ret = media_create_pad_link(local, local_pad->index,
remote, remote_pad->index,
link_flags);
v4l2_fwnode_put_link(&link);
if (ret < 0) {
dev_err(vi->dev,
"failed to create %s:%u -> %s:%u link: %d\n",
local->name, local_pad->index,
remote->name, remote_pad->index, ret);
break;
}
}
fwnode_handle_put(ep);
return ret;
}
static int tegra_vi_graph_notify_complete(struct v4l2_async_notifier *notifier)
{
struct tegra_vi_graph_entity *entity;
struct v4l2_async_connection *asd;
struct v4l2_subdev *subdev;
struct tegra_vi_channel *chan;
struct tegra_vi *vi;
int ret;
chan = container_of(notifier, struct tegra_vi_channel, notifier);
vi = chan->vi;
dev_dbg(vi->dev, "notify complete, all subdevs registered\n");
/*
* Video device node should be created at the end of all the device
* related initialization/setup.
* Current video_register_device() does both initialize and register
* video device in same API.
*
* TODO: Update v4l2-dev driver to split initialize and register into
* separate APIs and then update Tegra video driver to do video device
* initialize followed by all video device related setup and then
* register the video device.
*/
ret = video_register_device(&chan->video, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
dev_err(vi->dev,
"failed to register video device: %d\n", ret);
goto unregister_video;
}
/* create links between the entities */
list_for_each_entry(asd, &chan->notifier.done_list, asc_entry) {
entity = to_tegra_vi_graph_entity(asd);
ret = tegra_vi_graph_build(chan, entity);
if (ret < 0)
goto unregister_video;
}
ret = tegra_channel_setup_ctrl_handler(chan);
if (ret < 0) {
dev_err(vi->dev,
"failed to setup channel controls: %d\n", ret);
goto unregister_video;
}
ret = vi_fmts_bitmap_init(chan);
if (ret < 0) {
dev_err(vi->dev,
"failed to initialize formats bitmap: %d\n", ret);
goto unregister_video;
}
subdev = tegra_channel_get_remote_csi_subdev(chan);
if (!subdev) {
ret = -ENODEV;
dev_err(vi->dev,
"failed to get remote csi subdev: %d\n", ret);
goto unregister_video;
}
v4l2_set_subdev_hostdata(subdev, chan);
subdev = tegra_channel_get_remote_source_subdev(chan);
v4l2_set_subdev_hostdata(subdev, chan);
return 0;
unregister_video:
vb2_video_unregister_device(&chan->video);
return ret;
}
static int tegra_vi_graph_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_connection *asd)
{
struct tegra_vi_graph_entity *entity;
struct tegra_vi *vi;
struct tegra_vi_channel *chan;
chan = container_of(notifier, struct tegra_vi_channel, notifier);
vi = chan->vi;
/*
* Locate the entity corresponding to the bound subdev and store the
* subdev pointer.
*/
entity = tegra_vi_graph_find_entity(chan, subdev->fwnode);
if (!entity) {
dev_err(vi->dev, "no entity for subdev %s\n", subdev->name);
return -EINVAL;
}
if (entity->subdev) {
dev_err(vi->dev, "duplicate subdev for node %pOF\n",
to_of_node(entity->asd.match.fwnode));
return -EINVAL;
}
dev_dbg(vi->dev, "subdev %s bound\n", subdev->name);
entity->entity = &subdev->entity;
entity->subdev = subdev;
return 0;
}
static const struct v4l2_async_notifier_operations tegra_vi_async_ops = {
.bound = tegra_vi_graph_notify_bound,
.complete = tegra_vi_graph_notify_complete,
};
static int tegra_vi_graph_parse_one(struct tegra_vi_channel *chan,
struct fwnode_handle *fwnode)
{
struct tegra_vi *vi = chan->vi;
struct fwnode_handle *ep = NULL;
struct fwnode_handle *remote = NULL;
struct tegra_vi_graph_entity *tvge;
struct device_node *node = NULL;
int ret;
dev_dbg(vi->dev, "parsing node %pOF\n", to_of_node(fwnode));
/* parse all the remote entities and put them into the list */
for_each_endpoint_of_node(to_of_node(fwnode), node) {
ep = of_fwnode_handle(node);
remote = fwnode_graph_get_remote_port_parent(ep);
if (!remote) {
dev_err(vi->dev,
"remote device at %pOF not found\n", node);
ret = -EINVAL;
goto cleanup;
}
/* skip entities that are already processed */
if (device_match_fwnode(vi->dev, remote) ||
tegra_vi_graph_find_entity(chan, remote)) {
fwnode_handle_put(remote);
continue;
}
tvge = v4l2_async_nf_add_fwnode(&chan->notifier, remote,
struct tegra_vi_graph_entity);
if (IS_ERR(tvge)) {
ret = PTR_ERR(tvge);
dev_err(vi->dev,
"failed to add subdev to notifier: %d\n", ret);
fwnode_handle_put(remote);
goto cleanup;
}
ret = tegra_vi_graph_parse_one(chan, remote);
if (ret < 0) {
fwnode_handle_put(remote);
goto cleanup;
}
fwnode_handle_put(remote);
}
return 0;
cleanup:
dev_err(vi->dev, "failed parsing the graph: %d\n", ret);
v4l2_async_nf_cleanup(&chan->notifier);
of_node_put(node);
return ret;
}
static int tegra_vi_graph_init(struct tegra_vi *vi)
{
struct tegra_vi_channel *chan;
struct fwnode_handle *fwnode = dev_fwnode(vi->dev);
int ret;
/*
* Walk the links to parse the full graph. Each channel will have
* one endpoint of the composite node. Start by parsing the
* composite node and parse the remote entities in turn.
* Each channel will register a v4l2 async notifier to make the graph
* independent between the channels so we can skip the current channel
* in case of something wrong during graph parsing and continue with
* the next channels.
*/
list_for_each_entry(chan, &vi->vi_chans, list) {
struct fwnode_handle *ep, *remote;
ep = fwnode_graph_get_endpoint_by_id(fwnode,
chan->portnos[0], 0, 0);
if (!ep)
continue;
remote = fwnode_graph_get_remote_port_parent(ep);
fwnode_handle_put(ep);
ret = tegra_vi_graph_parse_one(chan, remote);
fwnode_handle_put(remote);
if (ret < 0 || list_empty(&chan->notifier.waiting_list))
continue;
chan->notifier.ops = &tegra_vi_async_ops;
ret = v4l2_async_nf_register(&chan->notifier);
if (ret < 0) {
dev_err(vi->dev,
"failed to register channel %d notifier: %d\n",
chan->portnos[0], ret);
v4l2_async_nf_cleanup(&chan->notifier);
}
}
return 0;
}
static void tegra_vi_graph_cleanup(struct tegra_vi *vi)
{
struct tegra_vi_channel *chan;
list_for_each_entry(chan, &vi->vi_chans, list) {
vb2_video_unregister_device(&chan->video);
v4l2_async_nf_unregister(&chan->notifier);
v4l2_async_nf_cleanup(&chan->notifier);
}
}
static int tegra_vi_init(struct host1x_client *client)
{
struct tegra_video_device *vid = dev_get_drvdata(client->host);
struct tegra_vi *vi = host1x_client_to_vi(client);
struct tegra_vi_channel *chan, *tmp;
int ret;
vid->media_dev.hw_revision = vi->soc->hw_revision;
snprintf(vid->media_dev.bus_info, sizeof(vid->media_dev.bus_info),
"platform:%s", dev_name(vi->dev));
INIT_LIST_HEAD(&vi->vi_chans);
if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
ret = tegra_vi_tpg_channels_alloc(vi);
else
ret = tegra_vi_channels_alloc(vi);
if (ret < 0)
goto free_chans;
ret = tegra_vi_channels_init(vi);
if (ret < 0)
goto free_chans;
vid->vi = vi;
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)) {
ret = tegra_vi_graph_init(vi);
if (ret < 0)
goto cleanup_chans;
}
return 0;
cleanup_chans:
list_for_each_entry(chan, &vi->vi_chans, list)
tegra_channel_cleanup(chan);
free_chans:
list_for_each_entry_safe(chan, tmp, &vi->vi_chans, list) {
list_del(&chan->list);
kfree(chan);
}
return ret;
}
static int tegra_vi_exit(struct host1x_client *client)
{
struct tegra_vi *vi = host1x_client_to_vi(client);
/*
* Do not cleanup the channels here as application might still be
* holding video device nodes. Channels cleanup will happen during
* v4l2_device release callback which gets called after all video
* device nodes are released.
*/
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
tegra_vi_graph_cleanup(vi);
return 0;
}
static const struct host1x_client_ops vi_client_ops = {
.init = tegra_vi_init,
.exit = tegra_vi_exit,
};
static int tegra_vi_probe(struct platform_device *pdev)
{
struct tegra_vi *vi;
int ret;
vi = devm_kzalloc(&pdev->dev, sizeof(*vi), GFP_KERNEL);
if (!vi)
return -ENOMEM;
vi->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vi->iomem))
return PTR_ERR(vi->iomem);
vi->soc = of_device_get_match_data(&pdev->dev);
vi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(vi->clk)) {
ret = PTR_ERR(vi->clk);
dev_err(&pdev->dev, "failed to get vi clock: %d\n", ret);
return ret;
}
vi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
if (IS_ERR(vi->vdd)) {
ret = PTR_ERR(vi->vdd);
dev_err(&pdev->dev, "failed to get VDD supply: %d\n", ret);
return ret;
}
if (!pdev->dev.pm_domain) {
ret = -ENOENT;
dev_warn(&pdev->dev, "PM domain is not attached: %d\n", ret);
return ret;
}
ret = devm_of_platform_populate(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev,
"failed to populate vi child device: %d\n", ret);
return ret;
}
vi->dev = &pdev->dev;
vi->ops = vi->soc->ops;
platform_set_drvdata(pdev, vi);
pm_runtime_enable(&pdev->dev);
/* initialize host1x interface */
INIT_LIST_HEAD(&vi->client.list);
vi->client.ops = &vi_client_ops;
vi->client.dev = &pdev->dev;
if (vi->ops->vi_enable)
vi->ops->vi_enable(vi, true);
ret = host1x_client_register(&vi->client);
if (ret < 0) {
dev_err(&pdev->dev,
"failed to register host1x client: %d\n", ret);
goto rpm_disable;
}
return 0;
rpm_disable:
if (vi->ops->vi_enable)
vi->ops->vi_enable(vi, false);
pm_runtime_disable(&pdev->dev);
return ret;
}
static int tegra_vi_remove(struct platform_device *pdev)
{
struct tegra_vi *vi = platform_get_drvdata(pdev);
host1x_client_unregister(&vi->client);
if (vi->ops->vi_enable)
vi->ops->vi_enable(vi, false);
pm_runtime_disable(&pdev->dev);
return 0;
}
static const struct of_device_id tegra_vi_of_id_table[] = {
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
{ .compatible = "nvidia,tegra20-vi", .data = &tegra20_vi_soc },
#endif
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
{ .compatible = "nvidia,tegra210-vi", .data = &tegra210_vi_soc },
#endif
{ }
};
MODULE_DEVICE_TABLE(of, tegra_vi_of_id_table);
static const struct dev_pm_ops tegra_vi_pm_ops = {
SET_RUNTIME_PM_OPS(vi_runtime_suspend, vi_runtime_resume, NULL)
};
struct platform_driver tegra_vi_driver = {
.driver = {
.name = "tegra-vi",
.of_match_table = tegra_vi_of_id_table,
.pm = &tegra_vi_pm_ops,
},
.probe = tegra_vi_probe,
.remove = tegra_vi_remove,
};
| linux-master | drivers/staging/media/tegra-video/vi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
*/
/*
* This source file contains Tegra210 supported video formats,
* VI and CSI SoC specific data, operations and registers accessors.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/kthread.h>
#include "csi.h"
#include "vi.h"
#define TEGRA210_MIN_WIDTH 32U
#define TEGRA210_MAX_WIDTH 32768U
#define TEGRA210_MIN_HEIGHT 32U
#define TEGRA210_MAX_HEIGHT 32768U
#define SURFACE_ALIGN_BYTES 64
#define TEGRA_VI_SYNCPT_WAIT_TIMEOUT msecs_to_jiffies(200)
/* Tegra210 VI registers */
#define TEGRA_VI_CFG_VI_INCR_SYNCPT 0x000
#define VI_CFG_VI_INCR_SYNCPT_COND(x) (((x) & 0xff) << 8)
#define VI_CSI_PP_FRAME_START(port) (5 + (port) * 4)
#define VI_CSI_MW_ACK_DONE(port) (7 + (port) * 4)
#define TEGRA_VI_CFG_VI_INCR_SYNCPT_CNTRL 0x004
#define VI_INCR_SYNCPT_NO_STALL BIT(8)
#define TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR 0x008
#define TEGRA_VI_CFG_CG_CTRL 0x0b8
#define VI_CG_2ND_LEVEL_EN 0x1
/* Tegra210 VI CSI registers */
#define TEGRA_VI_CSI_SW_RESET 0x000
#define TEGRA_VI_CSI_SINGLE_SHOT 0x004
#define SINGLE_SHOT_CAPTURE 0x1
#define TEGRA_VI_CSI_IMAGE_DEF 0x00c
#define BYPASS_PXL_TRANSFORM_OFFSET 24
#define IMAGE_DEF_FORMAT_OFFSET 16
#define IMAGE_DEF_DEST_MEM 0x1
#define TEGRA_VI_CSI_IMAGE_SIZE 0x018
#define IMAGE_SIZE_HEIGHT_OFFSET 16
#define TEGRA_VI_CSI_IMAGE_SIZE_WC 0x01c
#define TEGRA_VI_CSI_IMAGE_DT 0x020
#define TEGRA_VI_CSI_SURFACE0_OFFSET_MSB 0x024
#define TEGRA_VI_CSI_SURFACE0_OFFSET_LSB 0x028
#define TEGRA_VI_CSI_SURFACE1_OFFSET_MSB 0x02c
#define TEGRA_VI_CSI_SURFACE1_OFFSET_LSB 0x030
#define TEGRA_VI_CSI_SURFACE2_OFFSET_MSB 0x034
#define TEGRA_VI_CSI_SURFACE2_OFFSET_LSB 0x038
#define TEGRA_VI_CSI_SURFACE0_STRIDE 0x054
#define TEGRA_VI_CSI_SURFACE1_STRIDE 0x058
#define TEGRA_VI_CSI_SURFACE2_STRIDE 0x05c
#define TEGRA_VI_CSI_SURFACE_HEIGHT0 0x060
#define TEGRA_VI_CSI_ERROR_STATUS 0x084
/* Tegra210 CSI Pixel Parser registers: Starts from 0x838, offset 0x0 */
#define TEGRA_CSI_INPUT_STREAM_CONTROL 0x000
#define CSI_SKIP_PACKET_THRESHOLD_OFFSET 16
#define TEGRA_CSI_PIXEL_STREAM_CONTROL0 0x004
#define CSI_PP_PACKET_HEADER_SENT BIT(4)
#define CSI_PP_DATA_IDENTIFIER_ENABLE BIT(5)
#define CSI_PP_WORD_COUNT_SELECT_HEADER BIT(6)
#define CSI_PP_CRC_CHECK_ENABLE BIT(7)
#define CSI_PP_WC_CHECK BIT(8)
#define CSI_PP_OUTPUT_FORMAT_STORE (0x3 << 16)
#define CSI_PPA_PAD_LINE_NOPAD (0x2 << 24)
#define CSI_PP_HEADER_EC_DISABLE (0x1 << 27)
#define CSI_PPA_PAD_FRAME_NOPAD (0x2 << 28)
#define TEGRA_CSI_PIXEL_STREAM_CONTROL1 0x008
#define CSI_PP_TOP_FIELD_FRAME_OFFSET 0
#define CSI_PP_TOP_FIELD_FRAME_MASK_OFFSET 4
#define TEGRA_CSI_PIXEL_STREAM_GAP 0x00c
#define PP_FRAME_MIN_GAP_OFFSET 16
#define TEGRA_CSI_PIXEL_STREAM_PP_COMMAND 0x010
#define CSI_PP_ENABLE 0x1
#define CSI_PP_DISABLE 0x2
#define CSI_PP_RST 0x3
#define CSI_PP_SINGLE_SHOT_ENABLE (0x1 << 2)
#define CSI_PP_START_MARKER_FRAME_MAX_OFFSET 12
#define TEGRA_CSI_PIXEL_STREAM_EXPECTED_FRAME 0x014
#define TEGRA_CSI_PIXEL_PARSER_INTERRUPT_MASK 0x018
#define TEGRA_CSI_PIXEL_PARSER_STATUS 0x01c
/* Tegra210 CSI PHY registers */
/* CSI_PHY_CIL_COMMAND_0 offset 0x0d0 from TEGRA_CSI_PIXEL_PARSER_0_BASE */
#define TEGRA_CSI_PHY_CIL_COMMAND 0x0d0
#define CSI_A_PHY_CIL_NOP 0x0
#define CSI_A_PHY_CIL_ENABLE 0x1
#define CSI_A_PHY_CIL_DISABLE 0x2
#define CSI_A_PHY_CIL_ENABLE_MASK 0x3
#define CSI_B_PHY_CIL_NOP (0x0 << 8)
#define CSI_B_PHY_CIL_ENABLE (0x1 << 8)
#define CSI_B_PHY_CIL_DISABLE (0x2 << 8)
#define CSI_B_PHY_CIL_ENABLE_MASK (0x3 << 8)
#define TEGRA_CSI_CIL_PAD_CONFIG0 0x000
#define BRICK_CLOCK_A_4X (0x1 << 16)
#define BRICK_CLOCK_B_4X (0x2 << 16)
#define TEGRA_CSI_CIL_PAD_CONFIG1 0x004
#define TEGRA_CSI_CIL_PHY_CONTROL 0x008
#define CLK_SETTLE_MASK GENMASK(13, 8)
#define THS_SETTLE_MASK GENMASK(5, 0)
#define TEGRA_CSI_CIL_INTERRUPT_MASK 0x00c
#define TEGRA_CSI_CIL_STATUS 0x010
#define TEGRA_CSI_CILX_STATUS 0x014
#define TEGRA_CSI_CIL_SW_SENSOR_RESET 0x020
#define TEGRA_CSI_PATTERN_GENERATOR_CTRL 0x000
#define PG_MODE_OFFSET 2
#define PG_ENABLE 0x1
#define PG_DISABLE 0x0
#define TEGRA_CSI_PG_BLANK 0x004
#define PG_VBLANK_OFFSET 16
#define TEGRA_CSI_PG_PHASE 0x008
#define TEGRA_CSI_PG_RED_FREQ 0x00c
#define PG_RED_VERT_INIT_FREQ_OFFSET 16
#define PG_RED_HOR_INIT_FREQ_OFFSET 0
#define TEGRA_CSI_PG_RED_FREQ_RATE 0x010
#define TEGRA_CSI_PG_GREEN_FREQ 0x014
#define PG_GREEN_VERT_INIT_FREQ_OFFSET 16
#define PG_GREEN_HOR_INIT_FREQ_OFFSET 0
#define TEGRA_CSI_PG_GREEN_FREQ_RATE 0x018
#define TEGRA_CSI_PG_BLUE_FREQ 0x01c
#define PG_BLUE_VERT_INIT_FREQ_OFFSET 16
#define PG_BLUE_HOR_INIT_FREQ_OFFSET 0
#define TEGRA_CSI_PG_BLUE_FREQ_RATE 0x020
#define TEGRA_CSI_PG_AOHDR 0x024
#define TEGRA_CSI_CSI_SW_STATUS_RESET 0x214
#define TEGRA_CSI_CLKEN_OVERRIDE 0x218
#define TEGRA210_CSI_PORT_OFFSET 0x34
#define TEGRA210_CSI_CIL_OFFSET 0x0f4
#define TEGRA210_CSI_TPG_OFFSET 0x18c
#define CSI_PP_OFFSET(block) ((block) * 0x800)
#define TEGRA210_VI_CSI_BASE(x) (0x100 + (x) * 0x100)
/* Tegra210 VI registers accessors */
static void tegra_vi_write(struct tegra_vi_channel *chan, unsigned int addr,
u32 val)
{
writel_relaxed(val, chan->vi->iomem + addr);
}
static u32 tegra_vi_read(struct tegra_vi_channel *chan, unsigned int addr)
{
return readl_relaxed(chan->vi->iomem + addr);
}
/* Tegra210 VI_CSI registers accessors */
static void vi_csi_write(struct tegra_vi_channel *chan, u8 portno,
unsigned int addr, u32 val)
{
void __iomem *vi_csi_base;
vi_csi_base = chan->vi->iomem + TEGRA210_VI_CSI_BASE(portno);
writel_relaxed(val, vi_csi_base + addr);
}
static u32 vi_csi_read(struct tegra_vi_channel *chan, u8 portno,
unsigned int addr)
{
void __iomem *vi_csi_base;
vi_csi_base = chan->vi->iomem + TEGRA210_VI_CSI_BASE(portno);
return readl_relaxed(vi_csi_base + addr);
}
/*
* Tegra210 VI channel capture operations
*/
static int tegra210_channel_host1x_syncpt_init(struct tegra_vi_channel *chan)
{
struct tegra_vi *vi = chan->vi;
unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
struct host1x_syncpt *fs_sp;
struct host1x_syncpt *mw_sp;
int ret, i;
for (i = 0; i < chan->numgangports; i++) {
fs_sp = host1x_syncpt_request(&vi->client, flags);
if (!fs_sp) {
dev_err(vi->dev, "failed to request frame start syncpoint\n");
ret = -ENOMEM;
goto free_syncpts;
}
mw_sp = host1x_syncpt_request(&vi->client, flags);
if (!mw_sp) {
dev_err(vi->dev, "failed to request memory ack syncpoint\n");
host1x_syncpt_put(fs_sp);
ret = -ENOMEM;
goto free_syncpts;
}
chan->frame_start_sp[i] = fs_sp;
chan->mw_ack_sp[i] = mw_sp;
spin_lock_init(&chan->sp_incr_lock[i]);
}
return 0;
free_syncpts:
for (i = 0; i < chan->numgangports; i++) {
host1x_syncpt_put(chan->mw_ack_sp[i]);
host1x_syncpt_put(chan->frame_start_sp[i]);
}
return ret;
}
static void tegra210_channel_host1x_syncpt_free(struct tegra_vi_channel *chan)
{
int i;
for (i = 0; i < chan->numgangports; i++) {
host1x_syncpt_put(chan->mw_ack_sp[i]);
host1x_syncpt_put(chan->frame_start_sp[i]);
}
}
static void tegra210_fmt_align(struct v4l2_pix_format *pix, unsigned int bpp)
{
unsigned int min_bpl;
unsigned int max_bpl;
unsigned int bpl;
/*
* The transfer alignment requirements are expressed in bytes.
* Clamp the requested width and height to the limits.
*/
pix->width = clamp(pix->width, TEGRA210_MIN_WIDTH, TEGRA210_MAX_WIDTH);
pix->height = clamp(pix->height, TEGRA210_MIN_HEIGHT, TEGRA210_MAX_HEIGHT);
/* Clamp the requested bytes per line value. If the maximum bytes per
* line value is zero, the module doesn't support user configurable
* line sizes. Override the requested value with the minimum in that
* case.
*/
min_bpl = pix->width * bpp;
max_bpl = rounddown(TEGRA210_MAX_WIDTH, SURFACE_ALIGN_BYTES);
bpl = roundup(pix->bytesperline, SURFACE_ALIGN_BYTES);
pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
pix->sizeimage = pix->bytesperline * pix->height;
if (pix->pixelformat == V4L2_PIX_FMT_NV16)
pix->sizeimage *= 2;
}
static int tegra_channel_capture_setup(struct tegra_vi_channel *chan,
u8 portno)
{
u32 height = chan->format.height;
u32 width = chan->format.width;
u32 format = chan->fmtinfo->img_fmt;
u32 data_type = chan->fmtinfo->img_dt;
u32 word_count = (width * chan->fmtinfo->bit_width) / 8;
u32 bypass_pixel_transform = BIT(BYPASS_PXL_TRANSFORM_OFFSET);
/*
* VI Pixel transformation unit converts source pixels data format
* into selected destination pixel format and aligns properly while
* interfacing with memory packer.
* This pixel transformation should be enabled for YUV and RGB
* formats and should be bypassed for RAW formats as RAW formats
* only support direct to memory.
*/
if (chan->pg_mode || data_type == TEGRA_IMAGE_DT_YUV422_8 ||
data_type == TEGRA_IMAGE_DT_RGB888)
bypass_pixel_transform = 0;
/*
* For x8 source streaming, the source image is split onto two x4 ports
* with left half to first x4 port and right half to second x4 port.
* So, use split width and corresponding word count for each x4 port.
*/
if (chan->numgangports > 1) {
width = width >> 1;
word_count = (width * chan->fmtinfo->bit_width) / 8;
}
vi_csi_write(chan, portno, TEGRA_VI_CSI_ERROR_STATUS, 0xffffffff);
vi_csi_write(chan, portno, TEGRA_VI_CSI_IMAGE_DEF,
bypass_pixel_transform |
(format << IMAGE_DEF_FORMAT_OFFSET) |
IMAGE_DEF_DEST_MEM);
vi_csi_write(chan, portno, TEGRA_VI_CSI_IMAGE_DT, data_type);
vi_csi_write(chan, portno, TEGRA_VI_CSI_IMAGE_SIZE_WC, word_count);
vi_csi_write(chan, portno, TEGRA_VI_CSI_IMAGE_SIZE,
(height << IMAGE_SIZE_HEIGHT_OFFSET) | width);
return 0;
}
static void tegra_channel_vi_soft_reset(struct tegra_vi_channel *chan,
u8 portno)
{
/* disable clock gating to enable continuous clock */
tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, 0);
/*
* Soft reset memory client interface, pixel format logic, sensor
* control logic, and a shadow copy logic to bring VI to clean state.
*/
vi_csi_write(chan, portno, TEGRA_VI_CSI_SW_RESET, 0xf);
usleep_range(100, 200);
vi_csi_write(chan, portno, TEGRA_VI_CSI_SW_RESET, 0x0);
/* enable back VI clock gating */
tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, VI_CG_2ND_LEVEL_EN);
}
static void tegra_channel_capture_error_recover(struct tegra_vi_channel *chan,
u8 portno)
{
struct v4l2_subdev *subdev;
u32 val;
/*
* Recover VI and CSI hardware blocks in case of missing frame start
* events due to source not streaming or noisy csi inputs from the
* external source or many outstanding frame start or MW_ACK_DONE
* events which can cause CSI and VI hardware hang.
* This helps to have a clean capture for next frame.
*/
val = vi_csi_read(chan, portno, TEGRA_VI_CSI_ERROR_STATUS);
dev_dbg(&chan->video.dev, "TEGRA_VI_CSI_ERROR_STATUS 0x%08x\n", val);
vi_csi_write(chan, portno, TEGRA_VI_CSI_ERROR_STATUS, val);
val = tegra_vi_read(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR);
dev_dbg(&chan->video.dev,
"TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR 0x%08x\n", val);
tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR, val);
/* recover VI by issuing software reset and re-setup for capture */
tegra_channel_vi_soft_reset(chan, portno);
tegra_channel_capture_setup(chan, portno);
/* recover CSI block */
subdev = tegra_channel_get_remote_csi_subdev(chan);
tegra_csi_error_recover(subdev);
}
static struct tegra_channel_buffer *
dequeue_buf_done(struct tegra_vi_channel *chan)
{
struct tegra_channel_buffer *buf = NULL;
spin_lock(&chan->done_lock);
if (list_empty(&chan->done)) {
spin_unlock(&chan->done_lock);
return NULL;
}
buf = list_first_entry(&chan->done,
struct tegra_channel_buffer, queue);
if (buf)
list_del_init(&buf->queue);
spin_unlock(&chan->done_lock);
return buf;
}
static void release_buffer(struct tegra_vi_channel *chan,
struct tegra_channel_buffer *buf,
enum vb2_buffer_state state)
{
struct vb2_v4l2_buffer *vb = &buf->buf;
vb->sequence = chan->sequence++;
vb->field = V4L2_FIELD_NONE;
vb->vb2_buf.timestamp = ktime_get_ns();
vb2_buffer_done(&vb->vb2_buf, state);
}
static void tegra_channel_vi_buffer_setup(struct tegra_vi_channel *chan,
u8 portno, u32 buf_offset,
struct tegra_channel_buffer *buf)
{
int bytesperline = chan->format.bytesperline;
u32 sizeimage = chan->format.sizeimage;
/* program buffer address by using surface 0 */
vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE0_OFFSET_MSB,
((u64)buf->addr + buf_offset) >> 32);
vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE0_OFFSET_LSB,
buf->addr + buf_offset);
vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE0_STRIDE, bytesperline);
if (chan->fmtinfo->fourcc != V4L2_PIX_FMT_NV16)
return;
/*
* Program surface 1 for UV plane with offset sizeimage from Y plane.
*/
vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE1_OFFSET_MSB,
(((u64)buf->addr + sizeimage / 2) + buf_offset) >> 32);
vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE1_OFFSET_LSB,
buf->addr + sizeimage / 2 + buf_offset);
vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE1_STRIDE, bytesperline);
}
static int tegra_channel_capture_frame(struct tegra_vi_channel *chan,
struct tegra_channel_buffer *buf)
{
u32 thresh, value, frame_start, mw_ack_done;
u32 fs_thresh[GANG_PORTS_MAX];
u8 *portnos = chan->portnos;
int gang_bpl = (chan->format.width >> 1) * chan->fmtinfo->bpp;
u32 buf_offset;
bool capture_timedout = false;
int err, i;
for (i = 0; i < chan->numgangports; i++) {
/*
* Align buffers side-by-side for all consecutive x4 ports
* in gang ports using bytes per line based on source split
* width.
*/
buf_offset = i * roundup(gang_bpl, SURFACE_ALIGN_BYTES);
tegra_channel_vi_buffer_setup(chan, portnos[i], buf_offset,
buf);
/*
* Tegra VI block interacts with host1x syncpt to synchronize
* programmed condition and hardware operation for capture.
* Frame start and Memory write acknowledge syncpts has their
* own FIFO of depth 2.
*
* Syncpoint trigger conditions set through VI_INCR_SYNCPT
* register are added to HW syncpt FIFO and when HW triggers,
* syncpt condition is removed from the FIFO and counter at
* syncpoint index will be incremented by the hardware and
* software can wait for counter to reach threshold to
* synchronize capturing frame with hardware capture events.
*/
/* increase channel syncpoint threshold for FRAME_START */
thresh = host1x_syncpt_incr_max(chan->frame_start_sp[i], 1);
fs_thresh[i] = thresh;
/* Program FRAME_START trigger condition syncpt request */
frame_start = VI_CSI_PP_FRAME_START(portnos[i]);
value = VI_CFG_VI_INCR_SYNCPT_COND(frame_start) |
host1x_syncpt_id(chan->frame_start_sp[i]);
tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT, value);
/* increase channel syncpoint threshold for MW_ACK_DONE */
thresh = host1x_syncpt_incr_max(chan->mw_ack_sp[i], 1);
buf->mw_ack_sp_thresh[i] = thresh;
/* Program MW_ACK_DONE trigger condition syncpt request */
mw_ack_done = VI_CSI_MW_ACK_DONE(portnos[i]);
value = VI_CFG_VI_INCR_SYNCPT_COND(mw_ack_done) |
host1x_syncpt_id(chan->mw_ack_sp[i]);
tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT, value);
}
/* enable single shot capture after all ganged ports are ready */
for (i = 0; i < chan->numgangports; i++)
vi_csi_write(chan, portnos[i], TEGRA_VI_CSI_SINGLE_SHOT,
SINGLE_SHOT_CAPTURE);
for (i = 0; i < chan->numgangports; i++) {
/*
* Wait for syncpt counter to reach frame start event threshold
*/
err = host1x_syncpt_wait(chan->frame_start_sp[i], fs_thresh[i],
TEGRA_VI_SYNCPT_WAIT_TIMEOUT, &value);
if (err) {
capture_timedout = true;
/* increment syncpoint counter for timedout events */
host1x_syncpt_incr(chan->frame_start_sp[i]);
spin_lock(&chan->sp_incr_lock[i]);
host1x_syncpt_incr(chan->mw_ack_sp[i]);
spin_unlock(&chan->sp_incr_lock[i]);
/* clear errors and recover */
tegra_channel_capture_error_recover(chan, portnos[i]);
}
}
if (capture_timedout) {
dev_err_ratelimited(&chan->video.dev,
"frame start syncpt timeout: %d\n", err);
release_buffer(chan, buf, VB2_BUF_STATE_ERROR);
return err;
}
/* move buffer to capture done queue */
spin_lock(&chan->done_lock);
list_add_tail(&buf->queue, &chan->done);
spin_unlock(&chan->done_lock);
/* wait up kthread for capture done */
wake_up_interruptible(&chan->done_wait);
return 0;
}
static void tegra_channel_capture_done(struct tegra_vi_channel *chan,
struct tegra_channel_buffer *buf)
{
enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
u32 value;
bool capture_timedout = false;
int ret, i;
for (i = 0; i < chan->numgangports; i++) {
/*
* Wait for syncpt counter to reach MW_ACK_DONE event threshold
*/
ret = host1x_syncpt_wait(chan->mw_ack_sp[i],
buf->mw_ack_sp_thresh[i],
TEGRA_VI_SYNCPT_WAIT_TIMEOUT, &value);
if (ret) {
capture_timedout = true;
state = VB2_BUF_STATE_ERROR;
/* increment syncpoint counter for timedout event */
spin_lock(&chan->sp_incr_lock[i]);
host1x_syncpt_incr(chan->mw_ack_sp[i]);
spin_unlock(&chan->sp_incr_lock[i]);
}
}
if (capture_timedout)
dev_err_ratelimited(&chan->video.dev,
"MW_ACK_DONE syncpt timeout: %d\n", ret);
release_buffer(chan, buf, state);
}
static int chan_capture_kthread_start(void *data)
{
struct tegra_vi_channel *chan = data;
struct tegra_channel_buffer *buf;
unsigned int retries = 0;
int err = 0;
while (1) {
/*
* Source is not streaming if error is non-zero.
* So, do not dequeue buffers on error and let the thread sleep
* till kthread stop signal is received.
*/
wait_event_interruptible(chan->start_wait,
kthread_should_stop() ||
(!list_empty(&chan->capture) &&
!err));
if (kthread_should_stop())
break;
/* dequeue the buffer and start capture */
spin_lock(&chan->start_lock);
if (list_empty(&chan->capture)) {
spin_unlock(&chan->start_lock);
continue;
}
buf = list_first_entry(&chan->capture,
struct tegra_channel_buffer, queue);
list_del_init(&buf->queue);
spin_unlock(&chan->start_lock);
err = tegra_channel_capture_frame(chan, buf);
if (!err) {
retries = 0;
continue;
}
if (retries++ > chan->syncpt_timeout_retry)
vb2_queue_error(&chan->queue);
else
err = 0;
}
return 0;
}
static int chan_capture_kthread_finish(void *data)
{
struct tegra_vi_channel *chan = data;
struct tegra_channel_buffer *buf;
while (1) {
wait_event_interruptible(chan->done_wait,
!list_empty(&chan->done) ||
kthread_should_stop());
/* dequeue buffers and finish capture */
buf = dequeue_buf_done(chan);
while (buf) {
tegra_channel_capture_done(chan, buf);
buf = dequeue_buf_done(chan);
}
if (kthread_should_stop())
break;
}
return 0;
}
static int tegra210_vi_start_streaming(struct vb2_queue *vq, u32 count)
{
struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
struct media_pipeline *pipe = &chan->video.pipe;
u32 val;
u8 *portnos = chan->portnos;
int ret, i;
tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, VI_CG_2ND_LEVEL_EN);
/* clear syncpt errors */
val = tegra_vi_read(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR);
tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR, val);
/*
* Sync point FIFO full stalls the host interface.
* Setting NO_STALL will drop INCR_SYNCPT methods when fifos are
* full and the corresponding condition bits in INCR_SYNCPT_ERROR
* register will be set.
* This allows SW to process error recovery.
*/
tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_CNTRL,
VI_INCR_SYNCPT_NO_STALL);
/* start the pipeline */
ret = video_device_pipeline_start(&chan->video, pipe);
if (ret < 0)
goto error_pipeline_start;
/* clear csi errors and do capture setup for all ports in gang mode */
for (i = 0; i < chan->numgangports; i++) {
val = vi_csi_read(chan, portnos[i], TEGRA_VI_CSI_ERROR_STATUS);
vi_csi_write(chan, portnos[i], TEGRA_VI_CSI_ERROR_STATUS, val);
tegra_channel_capture_setup(chan, portnos[i]);
}
ret = tegra_channel_set_stream(chan, true);
if (ret < 0)
goto error_set_stream;
chan->sequence = 0;
/* start kthreads to capture data to buffer and return them */
chan->kthread_start_capture = kthread_run(chan_capture_kthread_start,
chan, "%s:0",
chan->video.name);
if (IS_ERR(chan->kthread_start_capture)) {
ret = PTR_ERR(chan->kthread_start_capture);
chan->kthread_start_capture = NULL;
dev_err(&chan->video.dev,
"failed to run capture start kthread: %d\n", ret);
goto error_kthread_start;
}
chan->kthread_finish_capture = kthread_run(chan_capture_kthread_finish,
chan, "%s:1",
chan->video.name);
if (IS_ERR(chan->kthread_finish_capture)) {
ret = PTR_ERR(chan->kthread_finish_capture);
chan->kthread_finish_capture = NULL;
dev_err(&chan->video.dev,
"failed to run capture finish kthread: %d\n", ret);
goto error_kthread_done;
}
return 0;
error_kthread_done:
kthread_stop(chan->kthread_start_capture);
error_kthread_start:
tegra_channel_set_stream(chan, false);
error_set_stream:
video_device_pipeline_stop(&chan->video);
error_pipeline_start:
tegra_channel_release_buffers(chan, VB2_BUF_STATE_QUEUED);
return ret;
}
static void tegra210_vi_stop_streaming(struct vb2_queue *vq)
{
struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
if (chan->kthread_start_capture) {
kthread_stop(chan->kthread_start_capture);
chan->kthread_start_capture = NULL;
}
if (chan->kthread_finish_capture) {
kthread_stop(chan->kthread_finish_capture);
chan->kthread_finish_capture = NULL;
}
tegra_channel_release_buffers(chan, VB2_BUF_STATE_ERROR);
tegra_channel_set_stream(chan, false);
video_device_pipeline_stop(&chan->video);
}
/*
* Tegra210 VI Pixel memory format enum.
* These format enum value gets programmed into corresponding Tegra VI
* channel register bits.
*/
enum tegra210_image_format {
TEGRA210_IMAGE_FORMAT_T_L8 = 16,
TEGRA210_IMAGE_FORMAT_T_R16_I = 32,
TEGRA210_IMAGE_FORMAT_T_B5G6R5,
TEGRA210_IMAGE_FORMAT_T_R5G6B5,
TEGRA210_IMAGE_FORMAT_T_A1B5G5R5,
TEGRA210_IMAGE_FORMAT_T_A1R5G5B5,
TEGRA210_IMAGE_FORMAT_T_B5G5R5A1,
TEGRA210_IMAGE_FORMAT_T_R5G5B5A1,
TEGRA210_IMAGE_FORMAT_T_A4B4G4R4,
TEGRA210_IMAGE_FORMAT_T_A4R4G4B4,
TEGRA210_IMAGE_FORMAT_T_B4G4R4A4,
TEGRA210_IMAGE_FORMAT_T_R4G4B4A4,
TEGRA210_IMAGE_FORMAT_T_A8B8G8R8 = 64,
TEGRA210_IMAGE_FORMAT_T_A8R8G8B8,
TEGRA210_IMAGE_FORMAT_T_B8G8R8A8,
TEGRA210_IMAGE_FORMAT_T_R8G8B8A8,
TEGRA210_IMAGE_FORMAT_T_A2B10G10R10,
TEGRA210_IMAGE_FORMAT_T_A2R10G10B10,
TEGRA210_IMAGE_FORMAT_T_B10G10R10A2,
TEGRA210_IMAGE_FORMAT_T_R10G10B10A2,
TEGRA210_IMAGE_FORMAT_T_A8Y8U8V8 = 193,
TEGRA210_IMAGE_FORMAT_T_V8U8Y8A8,
TEGRA210_IMAGE_FORMAT_T_A2Y10U10V10 = 197,
TEGRA210_IMAGE_FORMAT_T_V10U10Y10A2,
TEGRA210_IMAGE_FORMAT_T_Y8_U8__Y8_V8,
TEGRA210_IMAGE_FORMAT_T_Y8_V8__Y8_U8,
TEGRA210_IMAGE_FORMAT_T_U8_Y8__V8_Y8,
TEGRA210_IMAGE_FORMAT_T_V8_Y8__U8_Y8,
TEGRA210_IMAGE_FORMAT_T_Y8__U8__V8_N444 = 224,
TEGRA210_IMAGE_FORMAT_T_Y8__U8V8_N444,
TEGRA210_IMAGE_FORMAT_T_Y8__V8U8_N444,
TEGRA210_IMAGE_FORMAT_T_Y8__U8__V8_N422,
TEGRA210_IMAGE_FORMAT_T_Y8__U8V8_N422,
TEGRA210_IMAGE_FORMAT_T_Y8__V8U8_N422,
TEGRA210_IMAGE_FORMAT_T_Y8__U8__V8_N420,
TEGRA210_IMAGE_FORMAT_T_Y8__U8V8_N420,
TEGRA210_IMAGE_FORMAT_T_Y8__V8U8_N420,
TEGRA210_IMAGE_FORMAT_T_X2LC10LB10LA10,
TEGRA210_IMAGE_FORMAT_T_A2R6R6R6R6R6,
};
#define TEGRA210_VIDEO_FMT(DATA_TYPE, BIT_WIDTH, MBUS_CODE, BPP, \
FORMAT, FOURCC) \
{ \
TEGRA_IMAGE_DT_##DATA_TYPE, \
BIT_WIDTH, \
MEDIA_BUS_FMT_##MBUS_CODE, \
BPP, \
TEGRA210_IMAGE_FORMAT_##FORMAT, \
V4L2_PIX_FMT_##FOURCC, \
}
/* Tegra210 supported video formats */
static const struct tegra_video_format tegra210_video_formats[] = {
/* RAW 8 */
TEGRA210_VIDEO_FMT(RAW8, 8, SRGGB8_1X8, 1, T_L8, SRGGB8),
TEGRA210_VIDEO_FMT(RAW8, 8, SGRBG8_1X8, 1, T_L8, SGRBG8),
TEGRA210_VIDEO_FMT(RAW8, 8, SGBRG8_1X8, 1, T_L8, SGBRG8),
TEGRA210_VIDEO_FMT(RAW8, 8, SBGGR8_1X8, 1, T_L8, SBGGR8),
/* RAW 10 */
TEGRA210_VIDEO_FMT(RAW10, 10, SRGGB10_1X10, 2, T_R16_I, SRGGB10),
TEGRA210_VIDEO_FMT(RAW10, 10, SGRBG10_1X10, 2, T_R16_I, SGRBG10),
TEGRA210_VIDEO_FMT(RAW10, 10, SGBRG10_1X10, 2, T_R16_I, SGBRG10),
TEGRA210_VIDEO_FMT(RAW10, 10, SBGGR10_1X10, 2, T_R16_I, SBGGR10),
/* RAW 12 */
TEGRA210_VIDEO_FMT(RAW12, 12, SRGGB12_1X12, 2, T_R16_I, SRGGB12),
TEGRA210_VIDEO_FMT(RAW12, 12, SGRBG12_1X12, 2, T_R16_I, SGRBG12),
TEGRA210_VIDEO_FMT(RAW12, 12, SGBRG12_1X12, 2, T_R16_I, SGBRG12),
TEGRA210_VIDEO_FMT(RAW12, 12, SBGGR12_1X12, 2, T_R16_I, SBGGR12),
/* RGB888 */
TEGRA210_VIDEO_FMT(RGB888, 24, RGB888_1X24, 4, T_A8R8G8B8, XBGR32),
TEGRA210_VIDEO_FMT(RGB888, 24, RGB888_1X32_PADHI, 4, T_A8B8G8R8,
RGBX32),
/* YUV422 */
TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_1X16, 2, T_U8_Y8__V8_Y8, YVYU),
TEGRA210_VIDEO_FMT(YUV422_8, 16, VYUY8_1X16, 2, T_V8_Y8__U8_Y8, YUYV),
TEGRA210_VIDEO_FMT(YUV422_8, 16, YUYV8_1X16, 2, T_Y8_U8__Y8_V8, VYUY),
TEGRA210_VIDEO_FMT(YUV422_8, 16, YVYU8_1X16, 2, T_Y8_V8__Y8_U8, UYVY),
TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_1X16, 1, T_Y8__V8U8_N422, NV16),
TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_2X8, 2, T_U8_Y8__V8_Y8, YVYU),
TEGRA210_VIDEO_FMT(YUV422_8, 16, VYUY8_2X8, 2, T_V8_Y8__U8_Y8, YUYV),
TEGRA210_VIDEO_FMT(YUV422_8, 16, YUYV8_2X8, 2, T_Y8_U8__Y8_V8, VYUY),
TEGRA210_VIDEO_FMT(YUV422_8, 16, YVYU8_2X8, 2, T_Y8_V8__Y8_U8, UYVY),
};
/* Tegra210 VI operations */
static const struct tegra_vi_ops tegra210_vi_ops = {
.channel_host1x_syncpt_init = tegra210_channel_host1x_syncpt_init,
.channel_host1x_syncpt_free = tegra210_channel_host1x_syncpt_free,
.vi_fmt_align = tegra210_fmt_align,
.vi_start_streaming = tegra210_vi_start_streaming,
.vi_stop_streaming = tegra210_vi_stop_streaming,
};
/* Tegra210 VI SoC data */
const struct tegra_vi_soc tegra210_vi_soc = {
.video_formats = tegra210_video_formats,
.nformats = ARRAY_SIZE(tegra210_video_formats),
.ops = &tegra210_vi_ops,
.hw_revision = 3,
.vi_max_channels = 6,
#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
.default_video_format = &tegra210_video_formats[0],
.vi_max_clk_hz = 499200000,
#else
.default_video_format = &tegra210_video_formats[4],
.vi_max_clk_hz = 998400000,
#endif
};
/* Tegra210 CSI PHY registers accessors */
static void csi_write(struct tegra_csi *csi, u8 portno, unsigned int addr,
u32 val)
{
void __iomem *csi_pp_base;
csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
writel_relaxed(val, csi_pp_base + addr);
}
/* Tegra210 CSI Pixel parser registers accessors */
static void pp_write(struct tegra_csi *csi, u8 portno, u32 addr, u32 val)
{
void __iomem *csi_pp_base;
unsigned int offset;
csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
writel_relaxed(val, csi_pp_base + offset + addr);
}
static u32 pp_read(struct tegra_csi *csi, u8 portno, u32 addr)
{
void __iomem *csi_pp_base;
unsigned int offset;
csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
return readl_relaxed(csi_pp_base + offset + addr);
}
/* Tegra210 CSI CIL A/B port registers accessors */
static void cil_write(struct tegra_csi *csi, u8 portno, u32 addr, u32 val)
{
void __iomem *csi_cil_base;
unsigned int offset;
csi_cil_base = csi->iomem + CSI_PP_OFFSET(portno >> 1) +
TEGRA210_CSI_CIL_OFFSET;
offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
writel_relaxed(val, csi_cil_base + offset + addr);
}
static u32 cil_read(struct tegra_csi *csi, u8 portno, u32 addr)
{
void __iomem *csi_cil_base;
unsigned int offset;
csi_cil_base = csi->iomem + CSI_PP_OFFSET(portno >> 1) +
TEGRA210_CSI_CIL_OFFSET;
offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
return readl_relaxed(csi_cil_base + offset + addr);
}
/* Tegra210 CSI Test pattern generator registers accessor */
static void tpg_write(struct tegra_csi *csi, u8 portno, unsigned int addr,
u32 val)
{
void __iomem *csi_pp_base;
unsigned int offset;
csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET +
TEGRA210_CSI_TPG_OFFSET;
writel_relaxed(val, csi_pp_base + offset + addr);
}
/*
* Tegra210 CSI operations
*/
static void tegra210_csi_port_recover(struct tegra_csi_channel *csi_chan,
u8 portno)
{
struct tegra_csi *csi = csi_chan->csi;
u32 val;
/*
* Recover CSI hardware in case of capture errors by issuing
* software reset to CSICIL sensor, pixel parser, and clear errors
* to have clean capture on next streaming.
*/
val = pp_read(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS);
dev_dbg(csi->dev, "TEGRA_CSI_PIXEL_PARSER_STATUS 0x%08x\n", val);
val = cil_read(csi, portno, TEGRA_CSI_CIL_STATUS);
dev_dbg(csi->dev, "TEGRA_CSI_CIL_STATUS 0x%08x\n", val);
val = cil_read(csi, portno, TEGRA_CSI_CILX_STATUS);
dev_dbg(csi->dev, "TEGRA_CSI_CILX_STATUS 0x%08x\n", val);
if (csi_chan->numlanes == 4) {
/* reset CSI CIL sensor */
cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x1);
cil_write(csi, portno + 1, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x1);
/*
* SW_STATUS_RESET resets all status bits of PPA, PPB, CILA,
* CILB status registers and debug counters.
* So, SW_STATUS_RESET can be used only when CSI brick is in
* x4 mode.
*/
csi_write(csi, portno, TEGRA_CSI_CSI_SW_STATUS_RESET, 0x1);
/* sleep for 20 clock cycles to drain the FIFO */
usleep_range(10, 20);
cil_write(csi, portno + 1, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x0);
cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x0);
csi_write(csi, portno, TEGRA_CSI_CSI_SW_STATUS_RESET, 0x0);
} else {
/* reset CSICIL sensor */
cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x1);
usleep_range(10, 20);
cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x0);
/* clear the errors */
pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS,
0xffffffff);
cil_write(csi, portno, TEGRA_CSI_CIL_STATUS, 0xffffffff);
cil_write(csi, portno, TEGRA_CSI_CILX_STATUS, 0xffffffff);
}
}
static void tegra210_csi_error_recover(struct tegra_csi_channel *csi_chan)
{
u8 *portnos = csi_chan->csi_port_nums;
int i;
for (i = 0; i < csi_chan->numgangports; i++)
tegra210_csi_port_recover(csi_chan, portnos[i]);
}
static int
tegra210_csi_port_start_streaming(struct tegra_csi_channel *csi_chan,
u8 portno)
{
struct tegra_csi *csi = csi_chan->csi;
u8 clk_settle_time = 0;
u8 ths_settle_time = 10;
u32 val;
if (!csi_chan->pg_mode)
tegra_csi_calc_settle_time(csi_chan, portno, &clk_settle_time,
&ths_settle_time);
csi_write(csi, portno, TEGRA_CSI_CLKEN_OVERRIDE, 0);
/* clean up status */
pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS, 0xffffffff);
cil_write(csi, portno, TEGRA_CSI_CIL_STATUS, 0xffffffff);
cil_write(csi, portno, TEGRA_CSI_CILX_STATUS, 0xffffffff);
cil_write(csi, portno, TEGRA_CSI_CIL_INTERRUPT_MASK, 0x0);
/* CIL PHY registers setup */
cil_write(csi, portno, TEGRA_CSI_CIL_PAD_CONFIG0, 0x0);
cil_write(csi, portno, TEGRA_CSI_CIL_PHY_CONTROL,
FIELD_PREP(CLK_SETTLE_MASK, clk_settle_time) |
FIELD_PREP(THS_SETTLE_MASK, ths_settle_time));
/*
* The CSI unit provides for connection of up to six cameras in
* the system and is organized as three identical instances of
* two MIPI support blocks, each with a separate 4-lane
* interface that can be configured as a single camera with 4
* lanes or as a dual camera with 2 lanes available for each
* camera.
*/
if (csi_chan->numlanes == 4) {
cil_write(csi, portno + 1, TEGRA_CSI_CIL_STATUS, 0xffffffff);
cil_write(csi, portno + 1, TEGRA_CSI_CILX_STATUS, 0xffffffff);
cil_write(csi, portno + 1, TEGRA_CSI_CIL_INTERRUPT_MASK, 0x0);
cil_write(csi, portno, TEGRA_CSI_CIL_PAD_CONFIG0,
BRICK_CLOCK_A_4X);
cil_write(csi, portno + 1, TEGRA_CSI_CIL_PAD_CONFIG0, 0x0);
cil_write(csi, portno + 1, TEGRA_CSI_CIL_INTERRUPT_MASK, 0x0);
cil_write(csi, portno + 1, TEGRA_CSI_CIL_PHY_CONTROL,
FIELD_PREP(CLK_SETTLE_MASK, clk_settle_time) |
FIELD_PREP(THS_SETTLE_MASK, ths_settle_time));
csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND,
CSI_A_PHY_CIL_ENABLE | CSI_B_PHY_CIL_ENABLE);
} else {
val = ((portno & 1) == PORT_A) ?
CSI_A_PHY_CIL_ENABLE | CSI_B_PHY_CIL_NOP :
CSI_B_PHY_CIL_ENABLE | CSI_A_PHY_CIL_NOP;
csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND, val);
}
/* CSI pixel parser registers setup */
pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_PP_COMMAND,
(0xf << CSI_PP_START_MARKER_FRAME_MAX_OFFSET) |
CSI_PP_SINGLE_SHOT_ENABLE | CSI_PP_RST);
pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_INTERRUPT_MASK, 0x0);
pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_CONTROL0,
CSI_PP_PACKET_HEADER_SENT |
CSI_PP_DATA_IDENTIFIER_ENABLE |
CSI_PP_WORD_COUNT_SELECT_HEADER |
CSI_PP_CRC_CHECK_ENABLE | CSI_PP_WC_CHECK |
CSI_PP_OUTPUT_FORMAT_STORE | CSI_PPA_PAD_LINE_NOPAD |
CSI_PP_HEADER_EC_DISABLE | CSI_PPA_PAD_FRAME_NOPAD |
(portno & 1));
pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_CONTROL1,
(0x1 << CSI_PP_TOP_FIELD_FRAME_OFFSET) |
(0x1 << CSI_PP_TOP_FIELD_FRAME_MASK_OFFSET));
pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_GAP,
0x14 << PP_FRAME_MIN_GAP_OFFSET);
pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_EXPECTED_FRAME, 0x0);
pp_write(csi, portno, TEGRA_CSI_INPUT_STREAM_CONTROL,
(0x3f << CSI_SKIP_PACKET_THRESHOLD_OFFSET) |
(csi_chan->numlanes - 1));
/* TPG setup */
if (csi_chan->pg_mode) {
tpg_write(csi, portno, TEGRA_CSI_PATTERN_GENERATOR_CTRL,
((csi_chan->pg_mode - 1) << PG_MODE_OFFSET) |
PG_ENABLE);
tpg_write(csi, portno, TEGRA_CSI_PG_BLANK,
csi_chan->v_blank << PG_VBLANK_OFFSET |
csi_chan->h_blank);
tpg_write(csi, portno, TEGRA_CSI_PG_PHASE, 0x0);
tpg_write(csi, portno, TEGRA_CSI_PG_RED_FREQ,
(0x10 << PG_RED_VERT_INIT_FREQ_OFFSET) |
(0x10 << PG_RED_HOR_INIT_FREQ_OFFSET));
tpg_write(csi, portno, TEGRA_CSI_PG_RED_FREQ_RATE, 0x0);
tpg_write(csi, portno, TEGRA_CSI_PG_GREEN_FREQ,
(0x10 << PG_GREEN_VERT_INIT_FREQ_OFFSET) |
(0x10 << PG_GREEN_HOR_INIT_FREQ_OFFSET));
tpg_write(csi, portno, TEGRA_CSI_PG_GREEN_FREQ_RATE, 0x0);
tpg_write(csi, portno, TEGRA_CSI_PG_BLUE_FREQ,
(0x10 << PG_BLUE_VERT_INIT_FREQ_OFFSET) |
(0x10 << PG_BLUE_HOR_INIT_FREQ_OFFSET));
tpg_write(csi, portno, TEGRA_CSI_PG_BLUE_FREQ_RATE, 0x0);
}
pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_PP_COMMAND,
(0xf << CSI_PP_START_MARKER_FRAME_MAX_OFFSET) |
CSI_PP_SINGLE_SHOT_ENABLE | CSI_PP_ENABLE);
return 0;
}
static void
tegra210_csi_port_stop_streaming(struct tegra_csi_channel *csi_chan, u8 portno)
{
struct tegra_csi *csi = csi_chan->csi;
u32 val;
val = pp_read(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS);
dev_dbg(csi->dev, "TEGRA_CSI_PIXEL_PARSER_STATUS 0x%08x\n", val);
pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS, val);
val = cil_read(csi, portno, TEGRA_CSI_CIL_STATUS);
dev_dbg(csi->dev, "TEGRA_CSI_CIL_STATUS 0x%08x\n", val);
cil_write(csi, portno, TEGRA_CSI_CIL_STATUS, val);
val = cil_read(csi, portno, TEGRA_CSI_CILX_STATUS);
dev_dbg(csi->dev, "TEGRA_CSI_CILX_STATUS 0x%08x\n", val);
cil_write(csi, portno, TEGRA_CSI_CILX_STATUS, val);
pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_PP_COMMAND,
(0xf << CSI_PP_START_MARKER_FRAME_MAX_OFFSET) |
CSI_PP_DISABLE);
if (csi_chan->pg_mode) {
tpg_write(csi, portno, TEGRA_CSI_PATTERN_GENERATOR_CTRL,
PG_DISABLE);
return;
}
if (csi_chan->numlanes == 4) {
csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND,
CSI_A_PHY_CIL_DISABLE |
CSI_B_PHY_CIL_DISABLE);
} else {
val = ((portno & 1) == PORT_A) ?
CSI_A_PHY_CIL_DISABLE | CSI_B_PHY_CIL_NOP :
CSI_B_PHY_CIL_DISABLE | CSI_A_PHY_CIL_NOP;
csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND, val);
}
}
static int tegra210_csi_start_streaming(struct tegra_csi_channel *csi_chan)
{
u8 *portnos = csi_chan->csi_port_nums;
int ret, i;
for (i = 0; i < csi_chan->numgangports; i++) {
ret = tegra210_csi_port_start_streaming(csi_chan, portnos[i]);
if (ret)
goto stream_start_fail;
}
return 0;
stream_start_fail:
for (i = i - 1; i >= 0; i--)
tegra210_csi_port_stop_streaming(csi_chan, portnos[i]);
return ret;
}
static void tegra210_csi_stop_streaming(struct tegra_csi_channel *csi_chan)
{
u8 *portnos = csi_chan->csi_port_nums;
int i;
for (i = 0; i < csi_chan->numgangports; i++)
tegra210_csi_port_stop_streaming(csi_chan, portnos[i]);
}
/*
* Tegra210 CSI TPG frame rate table with horizontal and vertical
* blanking intervals for corresponding format and resolution.
* Blanking intervals are tuned values from design team for max TPG
* clock rate.
*/
static const struct tpg_framerate tegra210_tpg_frmrate_table[] = {
{
.frmsize = { 1280, 720 },
.code = MEDIA_BUS_FMT_SRGGB10_1X10,
.framerate = 120,
.h_blank = 512,
.v_blank = 8,
},
{
.frmsize = { 1920, 1080 },
.code = MEDIA_BUS_FMT_SRGGB10_1X10,
.framerate = 60,
.h_blank = 512,
.v_blank = 8,
},
{
.frmsize = { 3840, 2160 },
.code = MEDIA_BUS_FMT_SRGGB10_1X10,
.framerate = 20,
.h_blank = 8,
.v_blank = 8,
},
{
.frmsize = { 1280, 720 },
.code = MEDIA_BUS_FMT_RGB888_1X32_PADHI,
.framerate = 60,
.h_blank = 512,
.v_blank = 8,
},
{
.frmsize = { 1920, 1080 },
.code = MEDIA_BUS_FMT_RGB888_1X32_PADHI,
.framerate = 30,
.h_blank = 512,
.v_blank = 8,
},
{
.frmsize = { 3840, 2160 },
.code = MEDIA_BUS_FMT_RGB888_1X32_PADHI,
.framerate = 8,
.h_blank = 8,
.v_blank = 8,
},
};
static const char * const tegra210_csi_cil_clks[] = {
"csi",
"cilab",
"cilcd",
"cile",
#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
"csi_tpg",
#endif
};
/* Tegra210 CSI operations */
static const struct tegra_csi_ops tegra210_csi_ops = {
.csi_start_streaming = tegra210_csi_start_streaming,
.csi_stop_streaming = tegra210_csi_stop_streaming,
.csi_err_recover = tegra210_csi_error_recover,
};
/* Tegra210 CSI SoC data */
const struct tegra_csi_soc tegra210_csi_soc = {
.ops = &tegra210_csi_ops,
.csi_max_channels = 6,
.clk_names = tegra210_csi_cil_clks,
.num_clks = ARRAY_SIZE(tegra210_csi_cil_clks),
.tpg_frmrate_table = tegra210_tpg_frmrate_table,
.tpg_frmrate_table_size = ARRAY_SIZE(tegra210_tpg_frmrate_table),
};
| linux-master | drivers/staging/media/tegra-video/tegra210.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra20-specific VI implementation
*
* Copyright (C) 2023 SKIDATA GmbH
* Author: Luca Ceresoli <[email protected]>
*/
/*
* This source file contains Tegra20 supported video formats,
* VI and VIP SoC specific data, operations and registers accessors.
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/v4l2-mediabus.h>
#include "vip.h"
#include "vi.h"
#define TEGRA_VI_SYNCPT_WAIT_TIMEOUT msecs_to_jiffies(200)
/* This are just good-sense numbers. The actual min/max is not documented. */
#define TEGRA20_MIN_WIDTH 32U
#define TEGRA20_MIN_HEIGHT 32U
#define TEGRA20_MAX_WIDTH 2048U
#define TEGRA20_MAX_HEIGHT 2048U
/* --------------------------------------------------------------------------
* Registers
*/
#define TEGRA_VI_CONT_SYNCPT_OUT_1 0x0060
#define VI_CONT_SYNCPT_OUT_1_CONTINUOUS_SYNCPT BIT(8)
#define VI_CONT_SYNCPT_OUT_1_SYNCPT_IDX_SFT 0
#define TEGRA_VI_VI_INPUT_CONTROL 0x0088
#define VI_INPUT_FIELD_DETECT BIT(27)
#define VI_INPUT_BT656 BIT(25)
#define VI_INPUT_YUV_INPUT_FORMAT_SFT 8 /* bits [9:8] */
#define VI_INPUT_YUV_INPUT_FORMAT_UYVY (0 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
#define VI_INPUT_YUV_INPUT_FORMAT_VYUY (1 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
#define VI_INPUT_YUV_INPUT_FORMAT_YUYV (2 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
#define VI_INPUT_YUV_INPUT_FORMAT_YVYU (3 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
#define VI_INPUT_INPUT_FORMAT_SFT 2 /* bits [5:2] */
#define VI_INPUT_INPUT_FORMAT_YUV422 (0 << VI_INPUT_INPUT_FORMAT_SFT)
#define VI_INPUT_VIP_INPUT_ENABLE BIT(1)
#define TEGRA_VI_VI_CORE_CONTROL 0x008c
#define VI_VI_CORE_CONTROL_PLANAR_CONV_IN_SEL_EXT BIT(31)
#define VI_VI_CORE_CONTROL_CSC_INPUT_SEL_EXT BIT(30)
#define VI_VI_CORE_CONTROL_INPUT_TO_ALT_MUX_SFT 27
#define VI_VI_CORE_CONTROL_INPUT_TO_CORE_EXT_SFT 24
#define VI_VI_CORE_CONTROL_OUTPUT_TO_ISP_EXT_SFT 21
#define VI_VI_CORE_CONTROL_ISP_HOST_STALL_OFF BIT(20)
#define VI_VI_CORE_CONTROL_V_DOWNSCALING BIT(19)
#define VI_VI_CORE_CONTROL_V_AVERAGING BIT(18)
#define VI_VI_CORE_CONTROL_H_DOWNSCALING BIT(17)
#define VI_VI_CORE_CONTROL_H_AVERAGING BIT(16)
#define VI_VI_CORE_CONTROL_CSC_INPUT_SEL BIT(11)
#define VI_VI_CORE_CONTROL_PLANAR_CONV_INPUT_SEL BIT(10)
#define VI_VI_CORE_CONTROL_INPUT_TO_CORE_SFT 8
#define VI_VI_CORE_CONTROL_ISP_DOWNSAMPLE_SFT 5
#define VI_VI_CORE_CONTROL_OUTPUT_TO_EPP_SFT 2
#define VI_VI_CORE_CONTROL_OUTPUT_TO_ISP_SFT 0
#define TEGRA_VI_VI_FIRST_OUTPUT_CONTROL 0x0090
#define VI_OUTPUT_FORMAT_EXT BIT(22)
#define VI_OUTPUT_V_DIRECTION BIT(20)
#define VI_OUTPUT_H_DIRECTION BIT(19)
#define VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT 17
#define VI_OUTPUT_YUV_OUTPUT_FORMAT_UYVY (0 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
#define VI_OUTPUT_YUV_OUTPUT_FORMAT_VYUY (1 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
#define VI_OUTPUT_YUV_OUTPUT_FORMAT_YUYV (2 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
#define VI_OUTPUT_YUV_OUTPUT_FORMAT_YVYU (3 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
#define VI_OUTPUT_OUTPUT_BYTE_SWAP BIT(16)
#define VI_OUTPUT_LAST_PIXEL_DUPLICATION BIT(8)
#define VI_OUTPUT_OUTPUT_FORMAT_SFT 0
#define VI_OUTPUT_OUTPUT_FORMAT_YUV422POST (3 << VI_OUTPUT_OUTPUT_FORMAT_SFT)
#define VI_OUTPUT_OUTPUT_FORMAT_YUV420PLANAR (6 << VI_OUTPUT_OUTPUT_FORMAT_SFT)
#define TEGRA_VI_VIP_H_ACTIVE 0x00a4
#define VI_VIP_H_ACTIVE_PERIOD_SFT 16 /* active pixels/line, must be even */
#define VI_VIP_H_ACTIVE_START_SFT 0
#define TEGRA_VI_VIP_V_ACTIVE 0x00a8
#define VI_VIP_V_ACTIVE_PERIOD_SFT 16 /* active lines */
#define VI_VIP_V_ACTIVE_START_SFT 0
#define TEGRA_VI_VB0_START_ADDRESS_FIRST 0x00c4
#define TEGRA_VI_VB0_BASE_ADDRESS_FIRST 0x00c8
#define TEGRA_VI_VB0_START_ADDRESS_U 0x00cc
#define TEGRA_VI_VB0_BASE_ADDRESS_U 0x00d0
#define TEGRA_VI_VB0_START_ADDRESS_V 0x00d4
#define TEGRA_VI_VB0_BASE_ADDRESS_V 0x00d8
#define TEGRA_VI_FIRST_OUTPUT_FRAME_SIZE 0x00e0
#define VI_FIRST_OUTPUT_FRAME_HEIGHT_SFT 16
#define VI_FIRST_OUTPUT_FRAME_WIDTH_SFT 0
#define TEGRA_VI_VB0_COUNT_FIRST 0x00e4
#define TEGRA_VI_VB0_SIZE_FIRST 0x00e8
#define VI_VB0_SIZE_FIRST_V_SFT 16
#define VI_VB0_SIZE_FIRST_H_SFT 0
#define TEGRA_VI_VB0_BUFFER_STRIDE_FIRST 0x00ec
#define VI_VB0_BUFFER_STRIDE_FIRST_CHROMA_SFT 30
#define VI_VB0_BUFFER_STRIDE_FIRST_LUMA_SFT 0
#define TEGRA_VI_H_LPF_CONTROL 0x0108
#define VI_H_LPF_CONTROL_CHROMA_SFT 16
#define VI_H_LPF_CONTROL_LUMA_SFT 0
#define TEGRA_VI_H_DOWNSCALE_CONTROL 0x010c
#define TEGRA_VI_V_DOWNSCALE_CONTROL 0x0110
#define TEGRA_VI_VIP_INPUT_STATUS 0x0144
#define TEGRA_VI_VI_DATA_INPUT_CONTROL 0x0168
#define VI_DATA_INPUT_SFT 0 /* [11:0] = mask pin inputs to VI core */
#define TEGRA_VI_PIN_INPUT_ENABLE 0x016c
#define VI_PIN_INPUT_VSYNC BIT(14)
#define VI_PIN_INPUT_HSYNC BIT(13)
#define VI_PIN_INPUT_VD_SFT 0 /* [11:0] = data bin N input enable */
#define TEGRA_VI_PIN_INVERSION 0x0174
#define VI_PIN_INVERSION_VSYNC_ACTIVE_HIGH BIT(1)
#define VI_PIN_INVERSION_HSYNC_ACTIVE_HIGH BIT(0)
#define TEGRA_VI_CAMERA_CONTROL 0x01a0
#define VI_CAMERA_CONTROL_STOP_CAPTURE BIT(2)
#define VI_CAMERA_CONTROL_TEST_MODE BIT(1)
#define VI_CAMERA_CONTROL_VIP_ENABLE BIT(0)
#define TEGRA_VI_VI_ENABLE 0x01a4
#define VI_VI_ENABLE_SW_FLOW_CONTROL_OUT1 BIT(1)
#define VI_VI_ENABLE_FIRST_OUTPUT_TO_MEM_DISABLE BIT(0)
#define TEGRA_VI_VI_RAISE 0x01ac
#define VI_VI_RAISE_ON_EDGE BIT(0)
/* --------------------------------------------------------------------------
* VI
*/
static void tegra20_vi_write(struct tegra_vi_channel *chan, unsigned int addr, u32 val)
{
writel(val, chan->vi->iomem + addr);
}
/*
* Get the main input format (YUV/RGB...) and the YUV variant as values to
* be written into registers for the current VI input mbus code.
*/
static void tegra20_vi_get_input_formats(struct tegra_vi_channel *chan,
unsigned int *main_input_format,
unsigned int *yuv_input_format)
{
unsigned int input_mbus_code = chan->fmtinfo->code;
(*main_input_format) = VI_INPUT_INPUT_FORMAT_YUV422;
switch (input_mbus_code) {
case MEDIA_BUS_FMT_UYVY8_2X8:
(*yuv_input_format) = VI_INPUT_YUV_INPUT_FORMAT_UYVY;
break;
case MEDIA_BUS_FMT_VYUY8_2X8:
(*yuv_input_format) = VI_INPUT_YUV_INPUT_FORMAT_VYUY;
break;
case MEDIA_BUS_FMT_YUYV8_2X8:
(*yuv_input_format) = VI_INPUT_YUV_INPUT_FORMAT_YUYV;
break;
case MEDIA_BUS_FMT_YVYU8_2X8:
(*yuv_input_format) = VI_INPUT_YUV_INPUT_FORMAT_YVYU;
break;
}
}
/*
* Get the main output format (YUV/RGB...) and the YUV variant as values to
* be written into registers for the current VI output pixel format.
*/
static void tegra20_vi_get_output_formats(struct tegra_vi_channel *chan,
unsigned int *main_output_format,
unsigned int *yuv_output_format)
{
u32 output_fourcc = chan->format.pixelformat;
/* Default to YUV422 non-planar (U8Y8V8Y8) after downscaling */
(*main_output_format) = VI_OUTPUT_OUTPUT_FORMAT_YUV422POST;
(*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_UYVY;
switch (output_fourcc) {
case V4L2_PIX_FMT_UYVY:
(*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_UYVY;
break;
case V4L2_PIX_FMT_VYUY:
(*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_VYUY;
break;
case V4L2_PIX_FMT_YUYV:
(*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_YUYV;
break;
case V4L2_PIX_FMT_YVYU:
(*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_YVYU;
break;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
(*main_output_format) = VI_OUTPUT_OUTPUT_FORMAT_YUV420PLANAR;
break;
}
}
/*
* Make the VI accessible (needed on Tegra20).
*
* This function writes an unknown bit into an unknown register. The code
* comes from a downstream 3.1 kernel that has a working VIP driver for
* Tegra20, and removing it makes the VI completely unaccessible. It should
* be rewritten and possibly moved elsewhere, but the appropriate location
* and implementation is unknown due to a total lack of documentation.
*/
static int tegra20_vi_enable(struct tegra_vi *vi, bool on)
{
/* from arch/arm/mach-tegra/iomap.h */
const phys_addr_t TEGRA_APB_MISC_BASE = 0x70000000;
const unsigned long reg_offset = 0x42c;
void __iomem *apb_misc;
u32 val;
apb_misc = ioremap(TEGRA_APB_MISC_BASE, PAGE_SIZE);
if (!apb_misc)
apb_misc = ERR_PTR(-ENOENT);
if (IS_ERR(apb_misc))
return dev_err_probe(vi->dev, PTR_ERR(apb_misc), "cannot access APB_MISC");
val = readl(apb_misc + reg_offset);
val &= ~BIT(0);
val |= on ? BIT(0) : 0;
writel(val, apb_misc + reg_offset);
iounmap(apb_misc);
return 0;
}
static int tegra20_channel_host1x_syncpt_init(struct tegra_vi_channel *chan)
{
struct tegra_vi *vi = chan->vi;
struct host1x_syncpt *out_sp;
out_sp = host1x_syncpt_request(&vi->client, HOST1X_SYNCPT_CLIENT_MANAGED);
if (!out_sp)
return dev_err_probe(vi->dev, -ENOMEM, "failed to request syncpoint\n");
chan->mw_ack_sp[0] = out_sp;
return 0;
}
static void tegra20_channel_host1x_syncpt_free(struct tegra_vi_channel *chan)
{
host1x_syncpt_put(chan->mw_ack_sp[0]);
}
static void tegra20_fmt_align(struct v4l2_pix_format *pix, unsigned int bpp)
{
pix->width = clamp(pix->width, TEGRA20_MIN_WIDTH, TEGRA20_MAX_WIDTH);
pix->height = clamp(pix->height, TEGRA20_MIN_HEIGHT, TEGRA20_MAX_HEIGHT);
switch (pix->pixelformat) {
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
pix->bytesperline = roundup(pix->width, 2) * 2;
pix->sizeimage = roundup(pix->width, 2) * 2 * pix->height;
break;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
pix->bytesperline = roundup(pix->width, 8);
pix->sizeimage = roundup(pix->width, 8) * pix->height * 3 / 2;
break;
}
}
/*
* Compute buffer offsets once per stream so that
* tegra20_channel_vi_buffer_setup() only has to do very simple maths for
* each buffer.
*/
static void tegra20_channel_queue_setup(struct tegra_vi_channel *chan)
{
unsigned int stride = chan->format.bytesperline;
unsigned int height = chan->format.height;
chan->start_offset = 0;
switch (chan->format.pixelformat) {
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
if (chan->vflip)
chan->start_offset += stride * (height - 1);
if (chan->hflip)
chan->start_offset += stride - 1;
break;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
chan->addr_offset_u = stride * height;
chan->addr_offset_v = chan->addr_offset_u + stride * height / 4;
/* For YVU420, we swap the locations of the U and V planes. */
if (chan->format.pixelformat == V4L2_PIX_FMT_YVU420) {
unsigned long temp;
temp = chan->addr_offset_u;
chan->addr_offset_u = chan->addr_offset_v;
chan->addr_offset_v = temp;
}
chan->start_offset_u = chan->addr_offset_u;
chan->start_offset_v = chan->addr_offset_v;
if (chan->vflip) {
chan->start_offset += stride * (height - 1);
chan->start_offset_u += (stride / 2) * ((height / 2) - 1);
chan->start_offset_v += (stride / 2) * ((height / 2) - 1);
}
if (chan->hflip) {
chan->start_offset += stride - 1;
chan->start_offset_u += (stride / 2) - 1;
chan->start_offset_v += (stride / 2) - 1;
}
break;
}
}
static void release_buffer(struct tegra_vi_channel *chan,
struct tegra_channel_buffer *buf,
enum vb2_buffer_state state)
{
struct vb2_v4l2_buffer *vb = &buf->buf;
vb->sequence = chan->sequence++;
vb->field = V4L2_FIELD_NONE;
vb->vb2_buf.timestamp = ktime_get_ns();
vb2_buffer_done(&vb->vb2_buf, state);
}
static void tegra20_channel_vi_buffer_setup(struct tegra_vi_channel *chan,
struct tegra_channel_buffer *buf)
{
dma_addr_t base = buf->addr;
switch (chan->fmtinfo->fourcc) {
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
tegra20_vi_write(chan, TEGRA_VI_VB0_BASE_ADDRESS_U, base + chan->addr_offset_u);
tegra20_vi_write(chan, TEGRA_VI_VB0_START_ADDRESS_U, base + chan->start_offset_u);
tegra20_vi_write(chan, TEGRA_VI_VB0_BASE_ADDRESS_V, base + chan->addr_offset_v);
tegra20_vi_write(chan, TEGRA_VI_VB0_START_ADDRESS_V, base + chan->start_offset_v);
fallthrough;
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
tegra20_vi_write(chan, TEGRA_VI_VB0_BASE_ADDRESS_FIRST, base);
tegra20_vi_write(chan, TEGRA_VI_VB0_START_ADDRESS_FIRST, base + chan->start_offset);
break;
}
}
static int tegra20_channel_capture_frame(struct tegra_vi_channel *chan,
struct tegra_channel_buffer *buf)
{
int err;
chan->next_out_sp_idx++;
tegra20_channel_vi_buffer_setup(chan, buf);
tegra20_vi_write(chan, TEGRA_VI_CAMERA_CONTROL, VI_CAMERA_CONTROL_VIP_ENABLE);
/* Wait for syncpt counter to reach frame start event threshold */
err = host1x_syncpt_wait(chan->mw_ack_sp[0], chan->next_out_sp_idx,
TEGRA_VI_SYNCPT_WAIT_TIMEOUT, NULL);
if (err) {
host1x_syncpt_incr(chan->mw_ack_sp[0]);
dev_err_ratelimited(&chan->video.dev, "frame start syncpt timeout: %d\n", err);
release_buffer(chan, buf, VB2_BUF_STATE_ERROR);
return err;
}
tegra20_vi_write(chan, TEGRA_VI_CAMERA_CONTROL,
VI_CAMERA_CONTROL_STOP_CAPTURE | VI_CAMERA_CONTROL_VIP_ENABLE);
release_buffer(chan, buf, VB2_BUF_STATE_DONE);
return 0;
}
static int tegra20_chan_capture_kthread_start(void *data)
{
struct tegra_vi_channel *chan = data;
struct tegra_channel_buffer *buf;
unsigned int retries = 0;
int err = 0;
while (1) {
/*
* Source is not streaming if error is non-zero.
* So, do not dequeue buffers on error and let the thread sleep
* till kthread stop signal is received.
*/
wait_event_interruptible(chan->start_wait,
kthread_should_stop() ||
(!list_empty(&chan->capture) && !err));
if (kthread_should_stop())
break;
/* dequeue the buffer and start capture */
spin_lock(&chan->start_lock);
if (list_empty(&chan->capture)) {
spin_unlock(&chan->start_lock);
continue;
}
buf = list_first_entry(&chan->capture, struct tegra_channel_buffer, queue);
list_del_init(&buf->queue);
spin_unlock(&chan->start_lock);
err = tegra20_channel_capture_frame(chan, buf);
if (!err) {
retries = 0;
continue;
}
if (retries++ > chan->syncpt_timeout_retry)
vb2_queue_error(&chan->queue);
else
err = 0;
}
return 0;
}
static void tegra20_camera_capture_setup(struct tegra_vi_channel *chan)
{
u32 output_fourcc = chan->format.pixelformat;
int width = chan->format.width;
int height = chan->format.height;
int stride_l = chan->format.bytesperline;
int stride_c = (output_fourcc == V4L2_PIX_FMT_YUV420 ||
output_fourcc == V4L2_PIX_FMT_YVU420) ? 1 : 0;
int main_output_format;
int yuv_output_format;
tegra20_vi_get_output_formats(chan, &main_output_format, &yuv_output_format);
/*
* Set up low pass filter. Use 0x240 for chromaticity and 0x240
* for luminance, which is the default and means not to touch
* anything.
*/
tegra20_vi_write(chan, TEGRA_VI_H_LPF_CONTROL,
0x0240 << VI_H_LPF_CONTROL_LUMA_SFT |
0x0240 << VI_H_LPF_CONTROL_CHROMA_SFT);
/* Set up raise-on-edge, so we get an interrupt on end of frame. */
tegra20_vi_write(chan, TEGRA_VI_VI_RAISE, VI_VI_RAISE_ON_EDGE);
tegra20_vi_write(chan, TEGRA_VI_VI_FIRST_OUTPUT_CONTROL,
(chan->vflip ? VI_OUTPUT_V_DIRECTION : 0) |
(chan->hflip ? VI_OUTPUT_H_DIRECTION : 0) |
yuv_output_format << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT |
main_output_format << VI_OUTPUT_OUTPUT_FORMAT_SFT);
/* Set up frame size */
tegra20_vi_write(chan, TEGRA_VI_FIRST_OUTPUT_FRAME_SIZE,
height << VI_FIRST_OUTPUT_FRAME_HEIGHT_SFT |
width << VI_FIRST_OUTPUT_FRAME_WIDTH_SFT);
/* First output memory enabled */
tegra20_vi_write(chan, TEGRA_VI_VI_ENABLE, 0);
/* Set the number of frames in the buffer */
tegra20_vi_write(chan, TEGRA_VI_VB0_COUNT_FIRST, 1);
/* Set up buffer frame size */
tegra20_vi_write(chan, TEGRA_VI_VB0_SIZE_FIRST,
height << VI_VB0_SIZE_FIRST_V_SFT |
width << VI_VB0_SIZE_FIRST_H_SFT);
tegra20_vi_write(chan, TEGRA_VI_VB0_BUFFER_STRIDE_FIRST,
stride_l << VI_VB0_BUFFER_STRIDE_FIRST_LUMA_SFT |
stride_c << VI_VB0_BUFFER_STRIDE_FIRST_CHROMA_SFT);
tegra20_vi_write(chan, TEGRA_VI_VI_ENABLE, 0);
}
static int tegra20_vi_start_streaming(struct vb2_queue *vq, u32 count)
{
struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
struct media_pipeline *pipe = &chan->video.pipe;
int err;
chan->next_out_sp_idx = host1x_syncpt_read(chan->mw_ack_sp[0]);
err = video_device_pipeline_start(&chan->video, pipe);
if (err)
goto error_pipeline_start;
tegra20_camera_capture_setup(chan);
err = tegra_channel_set_stream(chan, true);
if (err)
goto error_set_stream;
chan->sequence = 0;
chan->kthread_start_capture = kthread_run(tegra20_chan_capture_kthread_start,
chan, "%s:0", chan->video.name);
if (IS_ERR(chan->kthread_start_capture)) {
err = PTR_ERR(chan->kthread_start_capture);
chan->kthread_start_capture = NULL;
dev_err_probe(&chan->video.dev, err, "failed to run capture kthread\n");
goto error_kthread_start;
}
return 0;
error_kthread_start:
tegra_channel_set_stream(chan, false);
error_set_stream:
video_device_pipeline_stop(&chan->video);
error_pipeline_start:
tegra_channel_release_buffers(chan, VB2_BUF_STATE_QUEUED);
return err;
}
static void tegra20_vi_stop_streaming(struct vb2_queue *vq)
{
struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
if (chan->kthread_start_capture) {
kthread_stop(chan->kthread_start_capture);
chan->kthread_start_capture = NULL;
}
tegra_channel_release_buffers(chan, VB2_BUF_STATE_ERROR);
tegra_channel_set_stream(chan, false);
video_device_pipeline_stop(&chan->video);
}
static const struct tegra_vi_ops tegra20_vi_ops = {
.vi_enable = tegra20_vi_enable,
.channel_host1x_syncpt_init = tegra20_channel_host1x_syncpt_init,
.channel_host1x_syncpt_free = tegra20_channel_host1x_syncpt_free,
.vi_fmt_align = tegra20_fmt_align,
.channel_queue_setup = tegra20_channel_queue_setup,
.vi_start_streaming = tegra20_vi_start_streaming,
.vi_stop_streaming = tegra20_vi_stop_streaming,
};
#define TEGRA20_VIDEO_FMT(MBUS_CODE, BPP, FOURCC) \
{ \
.code = MEDIA_BUS_FMT_##MBUS_CODE, \
.bpp = BPP, \
.fourcc = V4L2_PIX_FMT_##FOURCC, \
}
static const struct tegra_video_format tegra20_video_formats[] = {
TEGRA20_VIDEO_FMT(UYVY8_2X8, 2, UYVY),
TEGRA20_VIDEO_FMT(VYUY8_2X8, 2, VYUY),
TEGRA20_VIDEO_FMT(YUYV8_2X8, 2, YUYV),
TEGRA20_VIDEO_FMT(YVYU8_2X8, 2, YVYU),
TEGRA20_VIDEO_FMT(UYVY8_2X8, 1, YUV420),
TEGRA20_VIDEO_FMT(UYVY8_2X8, 1, YVU420),
};
const struct tegra_vi_soc tegra20_vi_soc = {
.video_formats = tegra20_video_formats,
.nformats = ARRAY_SIZE(tegra20_video_formats),
.default_video_format = &tegra20_video_formats[0],
.ops = &tegra20_vi_ops,
.vi_max_channels = 1, /* parallel input (VIP) */
.vi_max_clk_hz = 150000000,
.has_h_v_flip = true,
};
/* --------------------------------------------------------------------------
* VIP
*/
/*
* VIP-specific configuration for stream start.
*
* Whatever is common among VIP and CSI is done by the VI component (see
* tegra20_vi_start_streaming()). Here we do what is VIP-specific.
*/
static int tegra20_vip_start_streaming(struct tegra_vip_channel *vip_chan)
{
struct tegra_vi_channel *vi_chan = v4l2_get_subdev_hostdata(&vip_chan->subdev);
int width = vi_chan->format.width;
int height = vi_chan->format.height;
unsigned int main_input_format;
unsigned int yuv_input_format;
tegra20_vi_get_input_formats(vi_chan, &main_input_format, &yuv_input_format);
tegra20_vi_write(vi_chan, TEGRA_VI_VI_CORE_CONTROL, 0);
tegra20_vi_write(vi_chan, TEGRA_VI_VI_INPUT_CONTROL,
VI_INPUT_VIP_INPUT_ENABLE | main_input_format | yuv_input_format);
tegra20_vi_write(vi_chan, TEGRA_VI_V_DOWNSCALE_CONTROL, 0);
tegra20_vi_write(vi_chan, TEGRA_VI_H_DOWNSCALE_CONTROL, 0);
tegra20_vi_write(vi_chan, TEGRA_VI_VIP_V_ACTIVE, height << VI_VIP_V_ACTIVE_PERIOD_SFT);
tegra20_vi_write(vi_chan, TEGRA_VI_VIP_H_ACTIVE,
roundup(width, 2) << VI_VIP_H_ACTIVE_PERIOD_SFT);
/*
* For VIP, D9..D2 is mapped to the video decoder's P7..P0.
* Disable/mask out the other Dn wires. When not in BT656
* mode we also need the V/H sync.
*/
tegra20_vi_write(vi_chan, TEGRA_VI_PIN_INPUT_ENABLE,
GENMASK(9, 2) << VI_PIN_INPUT_VD_SFT |
VI_PIN_INPUT_HSYNC | VI_PIN_INPUT_VSYNC);
tegra20_vi_write(vi_chan, TEGRA_VI_VI_DATA_INPUT_CONTROL,
GENMASK(9, 2) << VI_DATA_INPUT_SFT);
tegra20_vi_write(vi_chan, TEGRA_VI_PIN_INVERSION, 0);
tegra20_vi_write(vi_chan, TEGRA_VI_CONT_SYNCPT_OUT_1,
VI_CONT_SYNCPT_OUT_1_CONTINUOUS_SYNCPT |
host1x_syncpt_id(vi_chan->mw_ack_sp[0])
<< VI_CONT_SYNCPT_OUT_1_SYNCPT_IDX_SFT);
tegra20_vi_write(vi_chan, TEGRA_VI_CAMERA_CONTROL, VI_CAMERA_CONTROL_STOP_CAPTURE);
return 0;
}
static const struct tegra_vip_ops tegra20_vip_ops = {
.vip_start_streaming = tegra20_vip_start_streaming,
};
const struct tegra_vip_soc tegra20_vip_soc = {
.ops = &tegra20_vip_ops,
};
| linux-master | drivers/staging/media/tegra-video/tegra20.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Parallel video capture module (VIP) for the Tegra VI.
*
* This file implements the VIP-specific infrastructure.
*
* Copyright (C) 2023 SKIDATA GmbH
* Author: Luca Ceresoli <[email protected]>
*/
#include <linux/device.h>
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <media/v4l2-fwnode.h>
#include "vip.h"
#include "video.h"
static inline struct tegra_vip *host1x_client_to_vip(struct host1x_client *client)
{
return container_of(client, struct tegra_vip, client);
}
static inline struct tegra_vip_channel *subdev_to_vip_channel(struct v4l2_subdev *subdev)
{
return container_of(subdev, struct tegra_vip_channel, subdev);
}
static inline struct tegra_vip *vip_channel_to_vip(struct tegra_vip_channel *chan)
{
return container_of(chan, struct tegra_vip, chan);
}
/* Find the previous subdev in the pipeline (i.e. the one connected to our sink pad) */
static struct v4l2_subdev *tegra_vip_channel_get_prev_subdev(struct tegra_vip_channel *chan)
{
struct media_pad *remote_pad;
remote_pad = media_pad_remote_pad_first(&chan->pads[TEGRA_VIP_PAD_SINK]);
if (!remote_pad)
return NULL;
return media_entity_to_v4l2_subdev(remote_pad->entity);
}
static int tegra_vip_enable_stream(struct v4l2_subdev *subdev)
{
struct tegra_vip_channel *vip_chan = subdev_to_vip_channel(subdev);
struct tegra_vip *vip = vip_channel_to_vip(vip_chan);
struct v4l2_subdev *prev_subdev = tegra_vip_channel_get_prev_subdev(vip_chan);
int err;
err = pm_runtime_resume_and_get(vip->dev);
if (err)
return dev_err_probe(vip->dev, err, "failed to get runtime PM\n");
err = vip->soc->ops->vip_start_streaming(vip_chan);
if (err < 0)
goto err_start_streaming;
err = v4l2_subdev_call(prev_subdev, video, s_stream, true);
if (err < 0 && err != -ENOIOCTLCMD)
goto err_prev_subdev_start_stream;
return 0;
err_prev_subdev_start_stream:
err_start_streaming:
pm_runtime_put(vip->dev);
return err;
}
static int tegra_vip_disable_stream(struct v4l2_subdev *subdev)
{
struct tegra_vip_channel *vip_chan = subdev_to_vip_channel(subdev);
struct tegra_vip *vip = vip_channel_to_vip(vip_chan);
struct v4l2_subdev *prev_subdev = tegra_vip_channel_get_prev_subdev(vip_chan);
v4l2_subdev_call(prev_subdev, video, s_stream, false);
pm_runtime_put(vip->dev);
return 0;
}
static int tegra_vip_s_stream(struct v4l2_subdev *subdev, int enable)
{
int err;
if (enable)
err = tegra_vip_enable_stream(subdev);
else
err = tegra_vip_disable_stream(subdev);
return err;
}
static const struct v4l2_subdev_video_ops tegra_vip_video_ops = {
.s_stream = tegra_vip_s_stream,
};
static const struct v4l2_subdev_ops tegra_vip_ops = {
.video = &tegra_vip_video_ops,
};
static int tegra_vip_channel_of_parse(struct tegra_vip *vip)
{
struct device *dev = vip->dev;
struct device_node *np = dev->of_node;
struct v4l2_fwnode_endpoint v4l2_ep = {
.bus_type = V4L2_MBUS_PARALLEL
};
struct fwnode_handle *fwh;
struct device_node *ep;
unsigned int num_pads;
int err;
dev_dbg(dev, "Parsing %pOF", np);
ep = of_graph_get_endpoint_by_regs(np, 0, 0);
if (!ep) {
err = -EINVAL;
dev_err_probe(dev, err, "%pOF: error getting endpoint node\n", np);
goto err_node_put;
}
fwh = of_fwnode_handle(ep);
err = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep);
of_node_put(ep);
if (err) {
dev_err_probe(dev, err, "%pOF: failed to parse v4l2 endpoint\n", np);
goto err_node_put;
}
num_pads = of_graph_get_endpoint_count(np);
if (num_pads != TEGRA_VIP_PADS_NUM) {
err = -EINVAL;
dev_err_probe(dev, err, "%pOF: need 2 pads, got %d\n", np, num_pads);
goto err_node_put;
}
vip->chan.of_node = of_node_get(np);
vip->chan.pads[TEGRA_VIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
vip->chan.pads[TEGRA_VIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
return 0;
err_node_put:
of_node_put(np);
return err;
}
static int tegra_vip_channel_init(struct tegra_vip *vip)
{
struct v4l2_subdev *subdev;
int err;
subdev = &vip->chan.subdev;
v4l2_subdev_init(subdev, &tegra_vip_ops);
subdev->dev = vip->dev;
snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s",
kbasename(vip->chan.of_node->full_name));
v4l2_set_subdevdata(subdev, &vip->chan);
subdev->fwnode = of_fwnode_handle(vip->chan.of_node);
subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
err = media_entity_pads_init(&subdev->entity, TEGRA_VIP_PADS_NUM, vip->chan.pads);
if (err)
return dev_err_probe(vip->dev, err, "failed to initialize media entity\n");
err = v4l2_async_register_subdev(subdev);
if (err) {
dev_err_probe(vip->dev, err, "failed to register subdev\n");
goto err_register_subdev;
}
return 0;
err_register_subdev:
media_entity_cleanup(&subdev->entity);
return err;
}
static int tegra_vip_init(struct host1x_client *client)
{
struct tegra_vip *vip = host1x_client_to_vip(client);
int err;
err = tegra_vip_channel_of_parse(vip);
if (err)
return err;
err = tegra_vip_channel_init(vip);
if (err)
goto err_init;
return 0;
err_init:
of_node_put(vip->chan.of_node);
return err;
}
static int tegra_vip_exit(struct host1x_client *client)
{
struct tegra_vip *vip = host1x_client_to_vip(client);
struct v4l2_subdev *subdev = &vip->chan.subdev;
v4l2_async_unregister_subdev(subdev);
media_entity_cleanup(&subdev->entity);
of_node_put(vip->chan.of_node);
return 0;
}
static const struct host1x_client_ops vip_client_ops = {
.init = tegra_vip_init,
.exit = tegra_vip_exit,
};
static int tegra_vip_probe(struct platform_device *pdev)
{
struct tegra_vip *vip;
int err;
dev_dbg(&pdev->dev, "Probing VIP \"%s\" from %pOF\n", pdev->name, pdev->dev.of_node);
vip = devm_kzalloc(&pdev->dev, sizeof(*vip), GFP_KERNEL);
if (!vip)
return -ENOMEM;
vip->soc = of_device_get_match_data(&pdev->dev);
vip->dev = &pdev->dev;
platform_set_drvdata(pdev, vip);
/* initialize host1x interface */
INIT_LIST_HEAD(&vip->client.list);
vip->client.ops = &vip_client_ops;
vip->client.dev = &pdev->dev;
err = host1x_client_register(&vip->client);
if (err)
return dev_err_probe(&pdev->dev, err, "failed to register host1x client\n");
pm_runtime_enable(&pdev->dev);
return 0;
}
static int tegra_vip_remove(struct platform_device *pdev)
{
struct tegra_vip *vip = platform_get_drvdata(pdev);
host1x_client_unregister(&vip->client);
pm_runtime_disable(&pdev->dev);
return 0;
}
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
extern const struct tegra_vip_soc tegra20_vip_soc;
#endif
static const struct of_device_id tegra_vip_of_id_table[] = {
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
{ .compatible = "nvidia,tegra20-vip", .data = &tegra20_vip_soc },
#endif
{ }
};
MODULE_DEVICE_TABLE(of, tegra_vip_of_id_table);
struct platform_driver tegra_vip_driver = {
.driver = {
.name = "tegra-vip",
.of_match_table = tegra_vip_of_id_table,
},
.probe = tegra_vip_probe,
.remove = tegra_vip_remove,
};
| linux-master | drivers/staging/media/tegra-video/vip.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/device.h>
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <media/v4l2-fwnode.h>
#include "csi.h"
#include "video.h"
#define MHZ 1000000
static inline struct tegra_csi *
host1x_client_to_csi(struct host1x_client *client)
{
return container_of(client, struct tegra_csi, client);
}
static inline struct tegra_csi_channel *to_csi_chan(struct v4l2_subdev *subdev)
{
return container_of(subdev, struct tegra_csi_channel, subdev);
}
/*
* CSI is a separate subdevice which has 6 source pads to generate
* test pattern. CSI subdevice pad ops are used only for TPG and
* allows below TPG formats.
*/
static const struct v4l2_mbus_framefmt tegra_csi_tpg_fmts[] = {
{
TEGRA_DEF_WIDTH,
TEGRA_DEF_HEIGHT,
MEDIA_BUS_FMT_SRGGB10_1X10,
V4L2_FIELD_NONE,
V4L2_COLORSPACE_SRGB
},
{
TEGRA_DEF_WIDTH,
TEGRA_DEF_HEIGHT,
MEDIA_BUS_FMT_RGB888_1X32_PADHI,
V4L2_FIELD_NONE,
V4L2_COLORSPACE_SRGB
},
};
static const struct v4l2_frmsize_discrete tegra_csi_tpg_sizes[] = {
{ 1280, 720 },
{ 1920, 1080 },
{ 3840, 2160 },
};
/*
* V4L2 Subdevice Pad Operations
*/
static int csi_enum_bus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
return -ENOIOCTLCMD;
if (code->index >= ARRAY_SIZE(tegra_csi_tpg_fmts))
return -EINVAL;
code->code = tegra_csi_tpg_fmts[code->index].code;
return 0;
}
static int csi_get_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
return -ENOIOCTLCMD;
fmt->format = csi_chan->format;
return 0;
}
static int csi_get_frmrate_table_index(struct tegra_csi *csi, u32 code,
u32 width, u32 height)
{
const struct tpg_framerate *frmrate;
unsigned int i;
frmrate = csi->soc->tpg_frmrate_table;
for (i = 0; i < csi->soc->tpg_frmrate_table_size; i++) {
if (frmrate[i].code == code &&
frmrate[i].frmsize.width == width &&
frmrate[i].frmsize.height == height) {
return i;
}
}
return -EINVAL;
}
static void csi_chan_update_blank_intervals(struct tegra_csi_channel *csi_chan,
u32 code, u32 width, u32 height)
{
struct tegra_csi *csi = csi_chan->csi;
const struct tpg_framerate *frmrate = csi->soc->tpg_frmrate_table;
int index;
index = csi_get_frmrate_table_index(csi_chan->csi, code,
width, height);
if (index >= 0) {
csi_chan->h_blank = frmrate[index].h_blank;
csi_chan->v_blank = frmrate[index].v_blank;
csi_chan->framerate = frmrate[index].framerate;
}
}
static int csi_enum_framesizes(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
unsigned int i;
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
return -ENOIOCTLCMD;
if (fse->index >= ARRAY_SIZE(tegra_csi_tpg_sizes))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(tegra_csi_tpg_fmts); i++)
if (fse->code == tegra_csi_tpg_fmts[i].code)
break;
if (i == ARRAY_SIZE(tegra_csi_tpg_fmts))
return -EINVAL;
fse->min_width = tegra_csi_tpg_sizes[fse->index].width;
fse->max_width = tegra_csi_tpg_sizes[fse->index].width;
fse->min_height = tegra_csi_tpg_sizes[fse->index].height;
fse->max_height = tegra_csi_tpg_sizes[fse->index].height;
return 0;
}
static int csi_enum_frameintervals(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
struct tegra_csi *csi = csi_chan->csi;
const struct tpg_framerate *frmrate = csi->soc->tpg_frmrate_table;
int index;
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
return -ENOIOCTLCMD;
/* one framerate per format and resolution */
if (fie->index > 0)
return -EINVAL;
index = csi_get_frmrate_table_index(csi_chan->csi, fie->code,
fie->width, fie->height);
if (index < 0)
return -EINVAL;
fie->interval.numerator = 1;
fie->interval.denominator = frmrate[index].framerate;
return 0;
}
static int csi_set_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
struct v4l2_mbus_framefmt *format = &fmt->format;
const struct v4l2_frmsize_discrete *sizes;
unsigned int i;
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
return -ENOIOCTLCMD;
sizes = v4l2_find_nearest_size(tegra_csi_tpg_sizes,
ARRAY_SIZE(tegra_csi_tpg_sizes),
width, height,
format->width, format->width);
format->width = sizes->width;
format->height = sizes->height;
for (i = 0; i < ARRAY_SIZE(tegra_csi_tpg_fmts); i++)
if (format->code == tegra_csi_tpg_fmts[i].code)
break;
if (i == ARRAY_SIZE(tegra_csi_tpg_fmts))
i = 0;
format->code = tegra_csi_tpg_fmts[i].code;
format->field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
return 0;
/* update blanking intervals from frame rate table and format */
csi_chan_update_blank_intervals(csi_chan, format->code,
format->width, format->height);
csi_chan->format = *format;
return 0;
}
/*
* V4L2 Subdevice Video Operations
*/
static int tegra_csi_g_frame_interval(struct v4l2_subdev *subdev,
struct v4l2_subdev_frame_interval *vfi)
{
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
return -ENOIOCTLCMD;
vfi->interval.numerator = 1;
vfi->interval.denominator = csi_chan->framerate;
return 0;
}
static unsigned int csi_get_pixel_rate(struct tegra_csi_channel *csi_chan)
{
struct tegra_vi_channel *chan;
struct v4l2_subdev *src_subdev;
struct v4l2_ctrl *ctrl;
chan = v4l2_get_subdev_hostdata(&csi_chan->subdev);
src_subdev = tegra_channel_get_remote_source_subdev(chan);
ctrl = v4l2_ctrl_find(src_subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
if (ctrl)
return v4l2_ctrl_g_ctrl_int64(ctrl);
return 0;
}
void tegra_csi_calc_settle_time(struct tegra_csi_channel *csi_chan,
u8 csi_port_num,
u8 *clk_settle_time,
u8 *ths_settle_time)
{
struct tegra_csi *csi = csi_chan->csi;
unsigned int cil_clk_mhz;
unsigned int pix_clk_mhz;
int clk_idx = (csi_port_num >> 1) + 1;
cil_clk_mhz = clk_get_rate(csi->clks[clk_idx].clk) / MHZ;
pix_clk_mhz = csi_get_pixel_rate(csi_chan) / MHZ;
/*
* CLK Settle time is the interval during which HS receiver should
* ignore any clock lane HS transitions, starting from the beginning
* of T-CLK-PREPARE.
* Per DPHY specification, T-CLK-SETTLE should be between 95ns ~ 300ns
*
* 95ns < (clk-settle-programmed + 7) * lp clk period < 300ns
* midpoint = 197.5 ns
*/
*clk_settle_time = ((95 + 300) * cil_clk_mhz - 14000) / 2000;
/*
* THS Settle time is the interval during which HS receiver should
* ignore any data lane HS transitions, starting from the beginning
* of THS-PREPARE.
*
* Per DPHY specification, T-HS-SETTLE should be between 85ns + 6UI
* and 145ns+10UI.
* 85ns + 6UI < (Ths-settle-prog + 5) * lp_clk_period < 145ns + 10UI
* midpoint = 115ns + 8UI
*/
if (pix_clk_mhz)
*ths_settle_time = (115 * cil_clk_mhz + 8000 * cil_clk_mhz
/ (2 * pix_clk_mhz) - 5000) / 1000;
}
static int tegra_csi_enable_stream(struct v4l2_subdev *subdev)
{
struct tegra_vi_channel *chan = v4l2_get_subdev_hostdata(subdev);
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
struct tegra_csi *csi = csi_chan->csi;
int ret, err;
ret = pm_runtime_resume_and_get(csi->dev);
if (ret < 0) {
dev_err(csi->dev, "failed to get runtime PM: %d\n", ret);
return ret;
}
if (csi_chan->mipi) {
ret = tegra_mipi_enable(csi_chan->mipi);
if (ret < 0) {
dev_err(csi->dev,
"failed to enable MIPI pads: %d\n", ret);
goto rpm_put;
}
/*
* CSI MIPI pads PULLUP, PULLDN and TERM impedances need to
* be calibrated after power on.
* So, trigger the calibration start here and results will
* be latched and applied to the pads when link is in LP11
* state during start of sensor streaming.
*/
ret = tegra_mipi_start_calibration(csi_chan->mipi);
if (ret < 0) {
dev_err(csi->dev,
"failed to start MIPI calibration: %d\n", ret);
goto disable_mipi;
}
}
csi_chan->pg_mode = chan->pg_mode;
/*
* Tegra CSI receiver can detect the first LP to HS transition.
* So, start the CSI stream-on prior to sensor stream-on and
* vice-versa for stream-off.
*/
ret = csi->ops->csi_start_streaming(csi_chan);
if (ret < 0)
goto finish_calibration;
if (csi_chan->mipi) {
struct v4l2_subdev *src_subdev;
/*
* TRM has incorrectly documented to wait for done status from
* calibration logic after CSI interface power on.
* As per the design, calibration results are latched and applied
* to the pads only when the link is in LP11 state which will happen
* during the sensor stream-on.
* CSI subdev stream-on triggers start of MIPI pads calibration.
* Wait for calibration to finish here after sensor subdev stream-on.
*/
src_subdev = tegra_channel_get_remote_source_subdev(chan);
ret = v4l2_subdev_call(src_subdev, video, s_stream, true);
if (ret < 0 && ret != -ENOIOCTLCMD)
goto disable_csi_stream;
err = tegra_mipi_finish_calibration(csi_chan->mipi);
if (err < 0)
dev_warn(csi->dev, "MIPI calibration failed: %d\n", err);
}
return 0;
disable_csi_stream:
csi->ops->csi_stop_streaming(csi_chan);
finish_calibration:
if (csi_chan->mipi)
tegra_mipi_finish_calibration(csi_chan->mipi);
disable_mipi:
if (csi_chan->mipi) {
err = tegra_mipi_disable(csi_chan->mipi);
if (err < 0)
dev_err(csi->dev,
"failed to disable MIPI pads: %d\n", err);
}
rpm_put:
pm_runtime_put(csi->dev);
return ret;
}
static int tegra_csi_disable_stream(struct v4l2_subdev *subdev)
{
struct tegra_vi_channel *chan = v4l2_get_subdev_hostdata(subdev);
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
struct tegra_csi *csi = csi_chan->csi;
int err;
/*
* Stream-off subdevices in reverse order to stream-on.
* Remote source subdev in TPG mode is same as CSI subdev.
*/
if (csi_chan->mipi) {
struct v4l2_subdev *src_subdev;
src_subdev = tegra_channel_get_remote_source_subdev(chan);
err = v4l2_subdev_call(src_subdev, video, s_stream, false);
if (err < 0 && err != -ENOIOCTLCMD)
dev_err_probe(csi->dev, err, "source subdev stream off failed\n");
}
csi->ops->csi_stop_streaming(csi_chan);
if (csi_chan->mipi) {
err = tegra_mipi_disable(csi_chan->mipi);
if (err < 0)
dev_err(csi->dev,
"failed to disable MIPI pads: %d\n", err);
}
pm_runtime_put(csi->dev);
return 0;
}
static int tegra_csi_s_stream(struct v4l2_subdev *subdev, int enable)
{
int ret;
if (enable)
ret = tegra_csi_enable_stream(subdev);
else
ret = tegra_csi_disable_stream(subdev);
return ret;
}
/*
* V4L2 Subdevice Operations
*/
static const struct v4l2_subdev_video_ops tegra_csi_video_ops = {
.s_stream = tegra_csi_s_stream,
.g_frame_interval = tegra_csi_g_frame_interval,
.s_frame_interval = tegra_csi_g_frame_interval,
};
static const struct v4l2_subdev_pad_ops tegra_csi_pad_ops = {
.enum_mbus_code = csi_enum_bus_code,
.enum_frame_size = csi_enum_framesizes,
.enum_frame_interval = csi_enum_frameintervals,
.get_fmt = csi_get_format,
.set_fmt = csi_set_format,
};
static const struct v4l2_subdev_ops tegra_csi_ops = {
.video = &tegra_csi_video_ops,
.pad = &tegra_csi_pad_ops,
};
static int tegra_csi_channel_alloc(struct tegra_csi *csi,
struct device_node *node,
unsigned int port_num, unsigned int lanes,
unsigned int num_pads)
{
struct tegra_csi_channel *chan;
int ret = 0, i;
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
return -ENOMEM;
list_add_tail(&chan->list, &csi->csi_chans);
chan->csi = csi;
/*
* Each CSI brick has maximum of 4 lanes.
* For lanes more than 4, use multiple of immediate CSI bricks as gang.
*/
if (lanes <= CSI_LANES_PER_BRICK) {
chan->numlanes = lanes;
chan->numgangports = 1;
} else {
chan->numlanes = CSI_LANES_PER_BRICK;
chan->numgangports = lanes / CSI_LANES_PER_BRICK;
}
for (i = 0; i < chan->numgangports; i++)
chan->csi_port_nums[i] = port_num + i * CSI_PORTS_PER_BRICK;
chan->of_node = of_node_get(node);
chan->numpads = num_pads;
if (num_pads & 0x2) {
chan->pads[0].flags = MEDIA_PAD_FL_SINK;
chan->pads[1].flags = MEDIA_PAD_FL_SOURCE;
} else {
chan->pads[0].flags = MEDIA_PAD_FL_SOURCE;
}
if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
return 0;
chan->mipi = tegra_mipi_request(csi->dev, node);
if (IS_ERR(chan->mipi)) {
ret = PTR_ERR(chan->mipi);
chan->mipi = NULL;
dev_err(csi->dev, "failed to get mipi device: %d\n", ret);
}
return ret;
}
static int tegra_csi_tpg_channels_alloc(struct tegra_csi *csi)
{
struct device_node *node = csi->dev->of_node;
unsigned int port_num;
unsigned int tpg_channels = csi->soc->csi_max_channels;
int ret;
/* allocate CSI channel for each CSI x2 ports */
for (port_num = 0; port_num < tpg_channels; port_num++) {
ret = tegra_csi_channel_alloc(csi, node, port_num, 2, 1);
if (ret < 0)
return ret;
}
return 0;
}
static int tegra_csi_channels_alloc(struct tegra_csi *csi)
{
struct device_node *node = csi->dev->of_node;
struct v4l2_fwnode_endpoint v4l2_ep = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
struct fwnode_handle *fwh;
struct device_node *channel;
struct device_node *ep;
unsigned int lanes, portno, num_pads;
int ret;
for_each_child_of_node(node, channel) {
if (!of_node_name_eq(channel, "channel"))
continue;
ret = of_property_read_u32(channel, "reg", &portno);
if (ret < 0)
continue;
if (portno >= csi->soc->csi_max_channels) {
dev_err(csi->dev, "invalid port num %d for %pOF\n",
portno, channel);
ret = -EINVAL;
goto err_node_put;
}
ep = of_graph_get_endpoint_by_regs(channel, 0, 0);
if (!ep)
continue;
fwh = of_fwnode_handle(ep);
ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep);
of_node_put(ep);
if (ret) {
dev_err(csi->dev,
"failed to parse v4l2 endpoint for %pOF: %d\n",
channel, ret);
goto err_node_put;
}
lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
/*
* Each CSI brick has maximum 4 data lanes.
* For lanes more than 4, validate lanes to be multiple of 4
* so multiple of consecutive CSI bricks can be ganged up for
* streaming.
*/
if (!lanes || ((lanes & (lanes - 1)) != 0) ||
(lanes > CSI_LANES_PER_BRICK && ((portno & 1) != 0))) {
dev_err(csi->dev, "invalid data-lanes %d for %pOF\n",
lanes, channel);
ret = -EINVAL;
goto err_node_put;
}
num_pads = of_graph_get_endpoint_count(channel);
if (num_pads == TEGRA_CSI_PADS_NUM) {
ret = tegra_csi_channel_alloc(csi, channel, portno,
lanes, num_pads);
if (ret < 0)
goto err_node_put;
}
}
return 0;
err_node_put:
of_node_put(channel);
return ret;
}
static int tegra_csi_channel_init(struct tegra_csi_channel *chan)
{
struct tegra_csi *csi = chan->csi;
struct v4l2_subdev *subdev;
int ret;
/* initialize the default format */
chan->format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
chan->format.field = V4L2_FIELD_NONE;
chan->format.colorspace = V4L2_COLORSPACE_SRGB;
chan->format.width = TEGRA_DEF_WIDTH;
chan->format.height = TEGRA_DEF_HEIGHT;
csi_chan_update_blank_intervals(chan, chan->format.code,
chan->format.width,
chan->format.height);
/* initialize V4L2 subdevice and media entity */
subdev = &chan->subdev;
v4l2_subdev_init(subdev, &tegra_csi_ops);
subdev->dev = csi->dev;
if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s-%d", "tpg",
chan->csi_port_nums[0]);
else
snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s",
kbasename(chan->of_node->full_name));
v4l2_set_subdevdata(subdev, chan);
subdev->fwnode = of_fwnode_handle(chan->of_node);
subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
/* initialize media entity pads */
ret = media_entity_pads_init(&subdev->entity, chan->numpads,
chan->pads);
if (ret < 0) {
dev_err(csi->dev,
"failed to initialize media entity: %d\n", ret);
subdev->dev = NULL;
return ret;
}
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)) {
ret = v4l2_async_register_subdev(subdev);
if (ret < 0) {
dev_err(csi->dev,
"failed to register subdev: %d\n", ret);
return ret;
}
}
return 0;
}
void tegra_csi_error_recover(struct v4l2_subdev *sd)
{
struct tegra_csi_channel *csi_chan = to_csi_chan(sd);
struct tegra_csi *csi = csi_chan->csi;
/* stop streaming during error recovery */
csi->ops->csi_stop_streaming(csi_chan);
csi->ops->csi_err_recover(csi_chan);
csi->ops->csi_start_streaming(csi_chan);
}
static int tegra_csi_channels_init(struct tegra_csi *csi)
{
struct tegra_csi_channel *chan;
int ret;
list_for_each_entry(chan, &csi->csi_chans, list) {
ret = tegra_csi_channel_init(chan);
if (ret) {
dev_err(csi->dev,
"failed to initialize channel-%d: %d\n",
chan->csi_port_nums[0], ret);
return ret;
}
}
return 0;
}
static void tegra_csi_channels_cleanup(struct tegra_csi *csi)
{
struct v4l2_subdev *subdev;
struct tegra_csi_channel *chan, *tmp;
list_for_each_entry_safe(chan, tmp, &csi->csi_chans, list) {
if (chan->mipi)
tegra_mipi_free(chan->mipi);
subdev = &chan->subdev;
if (subdev->dev) {
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
v4l2_async_unregister_subdev(subdev);
media_entity_cleanup(&subdev->entity);
}
of_node_put(chan->of_node);
list_del(&chan->list);
kfree(chan);
}
}
static int __maybe_unused csi_runtime_suspend(struct device *dev)
{
struct tegra_csi *csi = dev_get_drvdata(dev);
clk_bulk_disable_unprepare(csi->soc->num_clks, csi->clks);
return 0;
}
static int __maybe_unused csi_runtime_resume(struct device *dev)
{
struct tegra_csi *csi = dev_get_drvdata(dev);
int ret;
ret = clk_bulk_prepare_enable(csi->soc->num_clks, csi->clks);
if (ret < 0) {
dev_err(csi->dev, "failed to enable clocks: %d\n", ret);
return ret;
}
return 0;
}
static int tegra_csi_init(struct host1x_client *client)
{
struct tegra_csi *csi = host1x_client_to_csi(client);
struct tegra_video_device *vid = dev_get_drvdata(client->host);
int ret;
INIT_LIST_HEAD(&csi->csi_chans);
if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
ret = tegra_csi_tpg_channels_alloc(csi);
else
ret = tegra_csi_channels_alloc(csi);
if (ret < 0) {
dev_err(csi->dev,
"failed to allocate channels: %d\n", ret);
goto cleanup;
}
ret = tegra_csi_channels_init(csi);
if (ret < 0)
goto cleanup;
vid->csi = csi;
return 0;
cleanup:
tegra_csi_channels_cleanup(csi);
return ret;
}
static int tegra_csi_exit(struct host1x_client *client)
{
struct tegra_csi *csi = host1x_client_to_csi(client);
tegra_csi_channels_cleanup(csi);
return 0;
}
static const struct host1x_client_ops csi_client_ops = {
.init = tegra_csi_init,
.exit = tegra_csi_exit,
};
static int tegra_csi_probe(struct platform_device *pdev)
{
struct tegra_csi *csi;
unsigned int i;
int ret;
csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
if (!csi)
return -ENOMEM;
csi->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi->iomem))
return PTR_ERR(csi->iomem);
csi->soc = of_device_get_match_data(&pdev->dev);
csi->clks = devm_kcalloc(&pdev->dev, csi->soc->num_clks,
sizeof(*csi->clks), GFP_KERNEL);
if (!csi->clks)
return -ENOMEM;
for (i = 0; i < csi->soc->num_clks; i++)
csi->clks[i].id = csi->soc->clk_names[i];
ret = devm_clk_bulk_get(&pdev->dev, csi->soc->num_clks, csi->clks);
if (ret) {
dev_err(&pdev->dev, "failed to get the clocks: %d\n", ret);
return ret;
}
if (!pdev->dev.pm_domain) {
ret = -ENOENT;
dev_warn(&pdev->dev, "PM domain is not attached: %d\n", ret);
return ret;
}
csi->dev = &pdev->dev;
csi->ops = csi->soc->ops;
platform_set_drvdata(pdev, csi);
pm_runtime_enable(&pdev->dev);
/* initialize host1x interface */
INIT_LIST_HEAD(&csi->client.list);
csi->client.ops = &csi_client_ops;
csi->client.dev = &pdev->dev;
ret = host1x_client_register(&csi->client);
if (ret < 0) {
dev_err(&pdev->dev,
"failed to register host1x client: %d\n", ret);
goto rpm_disable;
}
return 0;
rpm_disable:
pm_runtime_disable(&pdev->dev);
return ret;
}
static int tegra_csi_remove(struct platform_device *pdev)
{
struct tegra_csi *csi = platform_get_drvdata(pdev);
host1x_client_unregister(&csi->client);
pm_runtime_disable(&pdev->dev);
return 0;
}
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
extern const struct tegra_csi_soc tegra210_csi_soc;
#endif
static const struct of_device_id tegra_csi_of_id_table[] = {
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
{ .compatible = "nvidia,tegra210-csi", .data = &tegra210_csi_soc },
#endif
{ }
};
MODULE_DEVICE_TABLE(of, tegra_csi_of_id_table);
static const struct dev_pm_ops tegra_csi_pm_ops = {
SET_RUNTIME_PM_OPS(csi_runtime_suspend, csi_runtime_resume, NULL)
};
struct platform_driver tegra_csi_driver = {
.driver = {
.name = "tegra-csi",
.of_match_table = tegra_csi_of_id_table,
.pm = &tegra_csi_pm_ops,
},
.probe = tegra_csi_probe,
.remove = tegra_csi_remove,
};
| linux-master | drivers/staging/media/tegra-video/csi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 BayLibre, SAS
* Author: Maxime Jourdan <[email protected]>
*/
#include <linux/gcd.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-event.h>
#include <media/videobuf2-dma-contig.h>
#include "vdec_helpers.h"
#define NUM_CANVAS_NV12 2
#define NUM_CANVAS_YUV420 3
u32 amvdec_read_dos(struct amvdec_core *core, u32 reg)
{
return readl_relaxed(core->dos_base + reg);
}
EXPORT_SYMBOL_GPL(amvdec_read_dos);
void amvdec_write_dos(struct amvdec_core *core, u32 reg, u32 val)
{
writel_relaxed(val, core->dos_base + reg);
}
EXPORT_SYMBOL_GPL(amvdec_write_dos);
void amvdec_write_dos_bits(struct amvdec_core *core, u32 reg, u32 val)
{
amvdec_write_dos(core, reg, amvdec_read_dos(core, reg) | val);
}
EXPORT_SYMBOL_GPL(amvdec_write_dos_bits);
void amvdec_clear_dos_bits(struct amvdec_core *core, u32 reg, u32 val)
{
amvdec_write_dos(core, reg, amvdec_read_dos(core, reg) & ~val);
}
EXPORT_SYMBOL_GPL(amvdec_clear_dos_bits);
u32 amvdec_read_parser(struct amvdec_core *core, u32 reg)
{
return readl_relaxed(core->esparser_base + reg);
}
EXPORT_SYMBOL_GPL(amvdec_read_parser);
void amvdec_write_parser(struct amvdec_core *core, u32 reg, u32 val)
{
writel_relaxed(val, core->esparser_base + reg);
}
EXPORT_SYMBOL_GPL(amvdec_write_parser);
/* 4 KiB per 64x32 block */
u32 amvdec_am21c_body_size(u32 width, u32 height)
{
u32 width_64 = ALIGN(width, 64) / 64;
u32 height_32 = ALIGN(height, 32) / 32;
return SZ_4K * width_64 * height_32;
}
EXPORT_SYMBOL_GPL(amvdec_am21c_body_size);
/* 32 bytes per 128x64 block */
u32 amvdec_am21c_head_size(u32 width, u32 height)
{
u32 width_128 = ALIGN(width, 128) / 128;
u32 height_64 = ALIGN(height, 64) / 64;
return 32 * width_128 * height_64;
}
EXPORT_SYMBOL_GPL(amvdec_am21c_head_size);
u32 amvdec_am21c_size(u32 width, u32 height)
{
return ALIGN(amvdec_am21c_body_size(width, height) +
amvdec_am21c_head_size(width, height), SZ_64K);
}
EXPORT_SYMBOL_GPL(amvdec_am21c_size);
static int canvas_alloc(struct amvdec_session *sess, u8 *canvas_id)
{
int ret;
if (sess->canvas_num >= MAX_CANVAS) {
dev_err(sess->core->dev, "Reached max number of canvas\n");
return -ENOMEM;
}
ret = meson_canvas_alloc(sess->core->canvas, canvas_id);
if (ret)
return ret;
sess->canvas_alloc[sess->canvas_num++] = *canvas_id;
return 0;
}
static int set_canvas_yuv420m(struct amvdec_session *sess,
struct vb2_buffer *vb, u32 width,
u32 height, u32 reg)
{
struct amvdec_core *core = sess->core;
u8 canvas_id[NUM_CANVAS_YUV420]; /* Y U V */
dma_addr_t buf_paddr[NUM_CANVAS_YUV420]; /* Y U V */
int ret, i;
for (i = 0; i < NUM_CANVAS_YUV420; ++i) {
ret = canvas_alloc(sess, &canvas_id[i]);
if (ret)
return ret;
buf_paddr[i] =
vb2_dma_contig_plane_dma_addr(vb, i);
}
/* Y plane */
meson_canvas_config(core->canvas, canvas_id[0], buf_paddr[0],
width, height, MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR,
MESON_CANVAS_ENDIAN_SWAP64);
/* U plane */
meson_canvas_config(core->canvas, canvas_id[1], buf_paddr[1],
width / 2, height / 2, MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR,
MESON_CANVAS_ENDIAN_SWAP64);
/* V plane */
meson_canvas_config(core->canvas, canvas_id[2], buf_paddr[2],
width / 2, height / 2, MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR,
MESON_CANVAS_ENDIAN_SWAP64);
amvdec_write_dos(core, reg,
((canvas_id[2]) << 16) |
((canvas_id[1]) << 8) |
(canvas_id[0]));
return 0;
}
static int set_canvas_nv12m(struct amvdec_session *sess,
struct vb2_buffer *vb, u32 width,
u32 height, u32 reg)
{
struct amvdec_core *core = sess->core;
u8 canvas_id[NUM_CANVAS_NV12]; /* Y U/V */
dma_addr_t buf_paddr[NUM_CANVAS_NV12]; /* Y U/V */
int ret, i;
for (i = 0; i < NUM_CANVAS_NV12; ++i) {
ret = canvas_alloc(sess, &canvas_id[i]);
if (ret)
return ret;
buf_paddr[i] =
vb2_dma_contig_plane_dma_addr(vb, i);
}
/* Y plane */
meson_canvas_config(core->canvas, canvas_id[0], buf_paddr[0],
width, height, MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR,
MESON_CANVAS_ENDIAN_SWAP64);
/* U/V plane */
meson_canvas_config(core->canvas, canvas_id[1], buf_paddr[1],
width, height / 2, MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR,
MESON_CANVAS_ENDIAN_SWAP64);
amvdec_write_dos(core, reg,
((canvas_id[1]) << 16) |
((canvas_id[1]) << 8) |
(canvas_id[0]));
return 0;
}
int amvdec_set_canvases(struct amvdec_session *sess,
u32 reg_base[], u32 reg_num[])
{
struct v4l2_m2m_buffer *buf;
u32 pixfmt = sess->pixfmt_cap;
u32 width = ALIGN(sess->width, 32);
u32 height = ALIGN(sess->height, 32);
u32 reg_cur;
u32 reg_num_cur = 0;
u32 reg_base_cur = 0;
int i = 0;
int ret;
v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
if (!reg_base[reg_base_cur])
return -EINVAL;
reg_cur = reg_base[reg_base_cur] + reg_num_cur * 4;
switch (pixfmt) {
case V4L2_PIX_FMT_NV12M:
ret = set_canvas_nv12m(sess, &buf->vb.vb2_buf, width,
height, reg_cur);
if (ret)
return ret;
break;
case V4L2_PIX_FMT_YUV420M:
ret = set_canvas_yuv420m(sess, &buf->vb.vb2_buf, width,
height, reg_cur);
if (ret)
return ret;
break;
default:
dev_err(sess->core->dev, "Unsupported pixfmt %08X\n",
pixfmt);
return -EINVAL;
}
reg_num_cur++;
if (reg_num_cur >= reg_num[reg_base_cur]) {
reg_base_cur++;
reg_num_cur = 0;
}
sess->fw_idx_to_vb2_idx[i++] = buf->vb.vb2_buf.index;
}
return 0;
}
EXPORT_SYMBOL_GPL(amvdec_set_canvases);
int amvdec_add_ts(struct amvdec_session *sess, u64 ts,
struct v4l2_timecode tc, u32 offset, u32 vbuf_flags)
{
struct amvdec_timestamp *new_ts;
unsigned long flags;
new_ts = kzalloc(sizeof(*new_ts), GFP_KERNEL);
if (!new_ts)
return -ENOMEM;
new_ts->ts = ts;
new_ts->tc = tc;
new_ts->offset = offset;
new_ts->flags = vbuf_flags;
spin_lock_irqsave(&sess->ts_spinlock, flags);
list_add_tail(&new_ts->list, &sess->timestamps);
spin_unlock_irqrestore(&sess->ts_spinlock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(amvdec_add_ts);
void amvdec_remove_ts(struct amvdec_session *sess, u64 ts)
{
struct amvdec_timestamp *tmp;
unsigned long flags;
spin_lock_irqsave(&sess->ts_spinlock, flags);
list_for_each_entry(tmp, &sess->timestamps, list) {
if (tmp->ts == ts) {
list_del(&tmp->list);
kfree(tmp);
goto unlock;
}
}
dev_warn(sess->core->dev_dec,
"Couldn't remove buffer with timestamp %llu from list\n", ts);
unlock:
spin_unlock_irqrestore(&sess->ts_spinlock, flags);
}
EXPORT_SYMBOL_GPL(amvdec_remove_ts);
static void dst_buf_done(struct amvdec_session *sess,
struct vb2_v4l2_buffer *vbuf,
u32 field, u64 timestamp,
struct v4l2_timecode timecode, u32 flags)
{
struct device *dev = sess->core->dev_dec;
u32 output_size = amvdec_get_output_size(sess);
switch (sess->pixfmt_cap) {
case V4L2_PIX_FMT_NV12M:
vb2_set_plane_payload(&vbuf->vb2_buf, 0, output_size);
vb2_set_plane_payload(&vbuf->vb2_buf, 1, output_size / 2);
break;
case V4L2_PIX_FMT_YUV420M:
vb2_set_plane_payload(&vbuf->vb2_buf, 0, output_size);
vb2_set_plane_payload(&vbuf->vb2_buf, 1, output_size / 4);
vb2_set_plane_payload(&vbuf->vb2_buf, 2, output_size / 4);
break;
}
vbuf->vb2_buf.timestamp = timestamp;
vbuf->sequence = sess->sequence_cap++;
vbuf->flags = flags;
vbuf->timecode = timecode;
if (sess->should_stop &&
atomic_read(&sess->esparser_queued_bufs) <= 1) {
const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
dev_dbg(dev, "Signaling EOS, sequence_cap = %u\n",
sess->sequence_cap - 1);
v4l2_event_queue_fh(&sess->fh, &ev);
vbuf->flags |= V4L2_BUF_FLAG_LAST;
} else if (sess->status == STATUS_NEEDS_RESUME) {
/* Mark LAST for drained show frames during a source change */
vbuf->flags |= V4L2_BUF_FLAG_LAST;
sess->sequence_cap = 0;
} else if (sess->should_stop)
dev_dbg(dev, "should_stop, %u bufs remain\n",
atomic_read(&sess->esparser_queued_bufs));
dev_dbg(dev, "Buffer %u done, ts = %llu, flags = %08X\n",
vbuf->vb2_buf.index, timestamp, flags);
vbuf->field = field;
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
/* Buffer done probably means the vififo got freed */
schedule_work(&sess->esparser_queue_work);
}
void amvdec_dst_buf_done(struct amvdec_session *sess,
struct vb2_v4l2_buffer *vbuf, u32 field)
{
struct device *dev = sess->core->dev_dec;
struct amvdec_timestamp *tmp;
struct list_head *timestamps = &sess->timestamps;
struct v4l2_timecode timecode;
u64 timestamp;
u32 vbuf_flags;
unsigned long flags;
spin_lock_irqsave(&sess->ts_spinlock, flags);
if (list_empty(timestamps)) {
dev_err(dev, "Buffer %u done but list is empty\n",
vbuf->vb2_buf.index);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
spin_unlock_irqrestore(&sess->ts_spinlock, flags);
return;
}
tmp = list_first_entry(timestamps, struct amvdec_timestamp, list);
timestamp = tmp->ts;
timecode = tmp->tc;
vbuf_flags = tmp->flags;
list_del(&tmp->list);
kfree(tmp);
spin_unlock_irqrestore(&sess->ts_spinlock, flags);
dst_buf_done(sess, vbuf, field, timestamp, timecode, vbuf_flags);
atomic_dec(&sess->esparser_queued_bufs);
}
EXPORT_SYMBOL_GPL(amvdec_dst_buf_done);
void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
struct vb2_v4l2_buffer *vbuf,
u32 offset, u32 field, bool allow_drop)
{
struct device *dev = sess->core->dev_dec;
struct amvdec_timestamp *match = NULL;
struct amvdec_timestamp *tmp, *n;
struct v4l2_timecode timecode = { 0 };
u64 timestamp = 0;
u32 vbuf_flags = 0;
unsigned long flags;
spin_lock_irqsave(&sess->ts_spinlock, flags);
/* Look for our vififo offset to get the corresponding timestamp. */
list_for_each_entry_safe(tmp, n, &sess->timestamps, list) {
if (tmp->offset > offset) {
/*
* Delete any record that remained unused for 32 match
* checks
*/
if (tmp->used_count++ >= 32) {
list_del(&tmp->list);
kfree(tmp);
}
break;
}
match = tmp;
}
if (!match) {
dev_err(dev, "Buffer %u done but can't match offset (%08X)\n",
vbuf->vb2_buf.index, offset);
} else {
timestamp = match->ts;
timecode = match->tc;
vbuf_flags = match->flags;
list_del(&match->list);
kfree(match);
}
spin_unlock_irqrestore(&sess->ts_spinlock, flags);
dst_buf_done(sess, vbuf, field, timestamp, timecode, vbuf_flags);
if (match)
atomic_dec(&sess->esparser_queued_bufs);
}
EXPORT_SYMBOL_GPL(amvdec_dst_buf_done_offset);
void amvdec_dst_buf_done_idx(struct amvdec_session *sess,
u32 buf_idx, u32 offset, u32 field)
{
struct vb2_v4l2_buffer *vbuf;
struct device *dev = sess->core->dev_dec;
vbuf = v4l2_m2m_dst_buf_remove_by_idx(sess->m2m_ctx,
sess->fw_idx_to_vb2_idx[buf_idx]);
if (!vbuf) {
dev_err(dev,
"Buffer %u done but it doesn't exist in m2m_ctx\n",
buf_idx);
return;
}
if (offset != -1)
amvdec_dst_buf_done_offset(sess, vbuf, offset, field, true);
else
amvdec_dst_buf_done(sess, vbuf, field);
}
EXPORT_SYMBOL_GPL(amvdec_dst_buf_done_idx);
void amvdec_set_par_from_dar(struct amvdec_session *sess,
u32 dar_num, u32 dar_den)
{
u32 div;
sess->pixelaspect.numerator = sess->height * dar_num;
sess->pixelaspect.denominator = sess->width * dar_den;
div = gcd(sess->pixelaspect.numerator, sess->pixelaspect.denominator);
sess->pixelaspect.numerator /= div;
sess->pixelaspect.denominator /= div;
}
EXPORT_SYMBOL_GPL(amvdec_set_par_from_dar);
void amvdec_src_change(struct amvdec_session *sess, u32 width,
u32 height, u32 dpb_size)
{
static const struct v4l2_event ev = {
.type = V4L2_EVENT_SOURCE_CHANGE,
.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION };
v4l2_ctrl_s_ctrl(sess->ctrl_min_buf_capture, dpb_size);
/*
* Check if the capture queue is already configured well for our
* usecase. If so, keep decoding with it and do not send the event
*/
if (sess->streamon_cap &&
sess->width == width &&
sess->height == height &&
dpb_size <= sess->num_dst_bufs) {
sess->fmt_out->codec_ops->resume(sess);
return;
}
sess->changed_format = 0;
sess->width = width;
sess->height = height;
sess->status = STATUS_NEEDS_RESUME;
dev_dbg(sess->core->dev, "Res. changed (%ux%u), DPB size %u\n",
width, height, dpb_size);
v4l2_event_queue_fh(&sess->fh, &ev);
}
EXPORT_SYMBOL_GPL(amvdec_src_change);
void amvdec_abort(struct amvdec_session *sess)
{
dev_info(sess->core->dev, "Aborting decoding session!\n");
vb2_queue_error(&sess->m2m_ctx->cap_q_ctx.q);
vb2_queue_error(&sess->m2m_ctx->out_q_ctx.q);
}
EXPORT_SYMBOL_GPL(amvdec_abort);
| linux-master | drivers/staging/media/meson/vdec/vdec_helpers.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 BayLibre, SAS
* Author: Maxime Jourdan <[email protected]>
*/
#include "vdec_platform.h"
#include "vdec.h"
#include "vdec_1.h"
#include "vdec_hevc.h"
#include "codec_mpeg12.h"
#include "codec_h264.h"
#include "codec_vp9.h"
static const struct amvdec_format vdec_formats_gxbb[] = {
{
.pixfmt = V4L2_PIX_FMT_H264,
.min_buffers = 2,
.max_buffers = 24,
.max_width = 1920,
.max_height = 1080,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_h264_ops,
.firmware_path = "meson/vdec/gxbb_h264.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED |
V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG1,
.min_buffers = 8,
.max_buffers = 8,
.max_width = 1920,
.max_height = 1080,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG2,
.min_buffers = 8,
.max_buffers = 8,
.max_width = 1920,
.max_height = 1080,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED,
},
};
static const struct amvdec_format vdec_formats_gxl[] = {
{
.pixfmt = V4L2_PIX_FMT_VP9,
.min_buffers = 16,
.max_buffers = 24,
.max_width = 3840,
.max_height = 2160,
.vdec_ops = &vdec_hevc_ops,
.codec_ops = &codec_vp9_ops,
.firmware_path = "meson/vdec/gxl_vp9.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED |
V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_H264,
.min_buffers = 2,
.max_buffers = 24,
.max_width = 3840,
.max_height = 2160,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_h264_ops,
.firmware_path = "meson/vdec/gxl_h264.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED |
V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG1,
.min_buffers = 8,
.max_buffers = 8,
.max_width = 1920,
.max_height = 1080,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG2,
.min_buffers = 8,
.max_buffers = 8,
.max_width = 1920,
.max_height = 1080,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED,
},
};
static const struct amvdec_format vdec_formats_gxm[] = {
{
.pixfmt = V4L2_PIX_FMT_VP9,
.min_buffers = 16,
.max_buffers = 24,
.max_width = 3840,
.max_height = 2160,
.vdec_ops = &vdec_hevc_ops,
.codec_ops = &codec_vp9_ops,
.firmware_path = "meson/vdec/gxl_vp9.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED |
V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_H264,
.min_buffers = 2,
.max_buffers = 24,
.max_width = 3840,
.max_height = 2160,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_h264_ops,
.firmware_path = "meson/vdec/gxm_h264.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED |
V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG1,
.min_buffers = 8,
.max_buffers = 8,
.max_width = 1920,
.max_height = 1080,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG2,
.min_buffers = 8,
.max_buffers = 8,
.max_width = 1920,
.max_height = 1080,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED,
},
};
static const struct amvdec_format vdec_formats_g12a[] = {
{
.pixfmt = V4L2_PIX_FMT_VP9,
.min_buffers = 16,
.max_buffers = 24,
.max_width = 3840,
.max_height = 2160,
.vdec_ops = &vdec_hevc_ops,
.codec_ops = &codec_vp9_ops,
.firmware_path = "meson/vdec/g12a_vp9.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED |
V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_H264,
.min_buffers = 2,
.max_buffers = 24,
.max_width = 3840,
.max_height = 2160,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_h264_ops,
.firmware_path = "meson/vdec/g12a_h264.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED |
V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG1,
.min_buffers = 8,
.max_buffers = 8,
.max_width = 1920,
.max_height = 1080,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG2,
.min_buffers = 8,
.max_buffers = 8,
.max_width = 1920,
.max_height = 1080,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED,
},
};
static const struct amvdec_format vdec_formats_sm1[] = {
{
.pixfmt = V4L2_PIX_FMT_VP9,
.min_buffers = 16,
.max_buffers = 24,
.max_width = 3840,
.max_height = 2160,
.vdec_ops = &vdec_hevc_ops,
.codec_ops = &codec_vp9_ops,
.firmware_path = "meson/vdec/sm1_vp9_mmu.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED |
V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_H264,
.min_buffers = 2,
.max_buffers = 24,
.max_width = 3840,
.max_height = 2160,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_h264_ops,
.firmware_path = "meson/vdec/g12a_h264.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED |
V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG1,
.min_buffers = 8,
.max_buffers = 8,
.max_width = 1920,
.max_height = 1080,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG2,
.min_buffers = 8,
.max_buffers = 8,
.max_width = 1920,
.max_height = 1080,
.vdec_ops = &vdec_1_ops,
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
.flags = V4L2_FMT_FLAG_COMPRESSED,
},
};
const struct vdec_platform vdec_platform_gxbb = {
.formats = vdec_formats_gxbb,
.num_formats = ARRAY_SIZE(vdec_formats_gxbb),
.revision = VDEC_REVISION_GXBB,
};
const struct vdec_platform vdec_platform_gxl = {
.formats = vdec_formats_gxl,
.num_formats = ARRAY_SIZE(vdec_formats_gxl),
.revision = VDEC_REVISION_GXL,
};
const struct vdec_platform vdec_platform_gxm = {
.formats = vdec_formats_gxm,
.num_formats = ARRAY_SIZE(vdec_formats_gxm),
.revision = VDEC_REVISION_GXM,
};
const struct vdec_platform vdec_platform_g12a = {
.formats = vdec_formats_g12a,
.num_formats = ARRAY_SIZE(vdec_formats_g12a),
.revision = VDEC_REVISION_G12A,
};
const struct vdec_platform vdec_platform_sm1 = {
.formats = vdec_formats_sm1,
.num_formats = ARRAY_SIZE(vdec_formats_sm1),
.revision = VDEC_REVISION_SM1,
};
MODULE_FIRMWARE("meson/vdec/g12a_h264.bin");
MODULE_FIRMWARE("meson/vdec/g12a_vp9.bin");
MODULE_FIRMWARE("meson/vdec/gxbb_h264.bin");
MODULE_FIRMWARE("meson/vdec/gxl_h264.bin");
MODULE_FIRMWARE("meson/vdec/gxl_mpeg12.bin");
MODULE_FIRMWARE("meson/vdec/gxl_vp9.bin");
MODULE_FIRMWARE("meson/vdec/gxm_h264.bin");
MODULE_FIRMWARE("meson/vdec/sm1_vp9_mmu.bin");
| linux-master | drivers/staging/media/meson/vdec/vdec_platform.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 BayLibre, SAS
* Author: Maxime Jourdan <[email protected]>
*
* VDEC_1 is a video decoding block that allows decoding of
* MPEG 1/2/4, H.263, H.264, MJPEG, VC1
*/
#include <linux/firmware.h>
#include <linux/clk.h>
#include "vdec_1.h"
#include "vdec_helpers.h"
#include "dos_regs.h"
/* AO Registers */
#define AO_RTI_GEN_PWR_SLEEP0 0xe8
#define AO_RTI_GEN_PWR_ISO0 0xec
#define GEN_PWR_VDEC_1 (BIT(3) | BIT(2))
#define GEN_PWR_VDEC_1_SM1 (BIT(1))
#define MC_SIZE (4096 * 4)
static int
vdec_1_load_firmware(struct amvdec_session *sess, const char *fwname)
{
const struct firmware *fw;
struct amvdec_core *core = sess->core;
struct device *dev = core->dev_dec;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
static void *mc_addr;
static dma_addr_t mc_addr_map;
int ret;
u32 i = 1000;
ret = request_firmware(&fw, fwname, dev);
if (ret < 0)
return -EINVAL;
if (fw->size < MC_SIZE) {
dev_err(dev, "Firmware size %zu is too small. Expected %u.\n",
fw->size, MC_SIZE);
ret = -EINVAL;
goto release_firmware;
}
mc_addr = dma_alloc_coherent(core->dev, MC_SIZE,
&mc_addr_map, GFP_KERNEL);
if (!mc_addr) {
ret = -ENOMEM;
goto release_firmware;
}
memcpy(mc_addr, fw->data, MC_SIZE);
amvdec_write_dos(core, MPSR, 0);
amvdec_write_dos(core, CPSR, 0);
amvdec_clear_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(31));
amvdec_write_dos(core, IMEM_DMA_ADR, mc_addr_map);
amvdec_write_dos(core, IMEM_DMA_COUNT, MC_SIZE / 4);
amvdec_write_dos(core, IMEM_DMA_CTRL, (0x8000 | (7 << 16)));
while (--i && amvdec_read_dos(core, IMEM_DMA_CTRL) & 0x8000);
if (i == 0) {
dev_err(dev, "Firmware load fail (DMA hang?)\n");
ret = -EINVAL;
goto free_mc;
}
if (codec_ops->load_extended_firmware)
ret = codec_ops->load_extended_firmware(sess,
fw->data + MC_SIZE,
fw->size - MC_SIZE);
free_mc:
dma_free_coherent(core->dev, MC_SIZE, mc_addr, mc_addr_map);
release_firmware:
release_firmware(fw);
return ret;
}
static int vdec_1_stbuf_power_up(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
amvdec_write_dos(core, VLD_MEM_VIFIFO_CONTROL, 0);
amvdec_write_dos(core, VLD_MEM_VIFIFO_WRAP_COUNT, 0);
amvdec_write_dos(core, POWER_CTL_VLD, BIT(4));
amvdec_write_dos(core, VLD_MEM_VIFIFO_START_PTR, sess->vififo_paddr);
amvdec_write_dos(core, VLD_MEM_VIFIFO_CURR_PTR, sess->vififo_paddr);
amvdec_write_dos(core, VLD_MEM_VIFIFO_END_PTR,
sess->vififo_paddr + sess->vififo_size - 8);
amvdec_write_dos_bits(core, VLD_MEM_VIFIFO_CONTROL, 1);
amvdec_clear_dos_bits(core, VLD_MEM_VIFIFO_CONTROL, 1);
amvdec_write_dos(core, VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_MANUAL);
amvdec_write_dos(core, VLD_MEM_VIFIFO_WP, sess->vififo_paddr);
amvdec_write_dos_bits(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
amvdec_clear_dos_bits(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
amvdec_write_dos_bits(core, VLD_MEM_VIFIFO_CONTROL,
(0x11 << MEM_FIFO_CNT_BIT) | MEM_FILL_ON_LEVEL |
MEM_CTRL_FILL_EN | MEM_CTRL_EMPTY_EN);
return 0;
}
static void vdec_1_conf_esparser(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
/* VDEC_1 specific ESPARSER stuff */
amvdec_write_dos(core, DOS_GEN_CTRL0, 0);
amvdec_write_dos(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
amvdec_clear_dos_bits(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
}
static u32 vdec_1_vififo_level(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
return amvdec_read_dos(core, VLD_MEM_VIFIFO_LEVEL);
}
static int vdec_1_stop(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
amvdec_write_dos(core, MPSR, 0);
amvdec_write_dos(core, CPSR, 0);
amvdec_write_dos(core, ASSIST_MBOX1_MASK, 0);
amvdec_write_dos(core, DOS_SW_RESET0, BIT(12) | BIT(11));
amvdec_write_dos(core, DOS_SW_RESET0, 0);
amvdec_read_dos(core, DOS_SW_RESET0);
/* enable vdec1 isolation */
if (core->platform->revision == VDEC_REVISION_SM1)
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
GEN_PWR_VDEC_1_SM1, GEN_PWR_VDEC_1_SM1);
else
regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0xc0);
/* power off vdec1 memories */
amvdec_write_dos(core, DOS_MEM_PD_VDEC, 0xffffffff);
/* power off vdec1 */
if (core->platform->revision == VDEC_REVISION_SM1)
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VDEC_1_SM1, GEN_PWR_VDEC_1_SM1);
else
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VDEC_1, GEN_PWR_VDEC_1);
clk_disable_unprepare(core->vdec_1_clk);
if (sess->priv)
codec_ops->stop(sess);
return 0;
}
static int vdec_1_start(struct amvdec_session *sess)
{
int ret;
struct amvdec_core *core = sess->core;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
/* Configure the vdec clk to the maximum available */
clk_set_rate(core->vdec_1_clk, 666666666);
ret = clk_prepare_enable(core->vdec_1_clk);
if (ret)
return ret;
/* Enable power for VDEC_1 */
if (core->platform->revision == VDEC_REVISION_SM1)
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VDEC_1_SM1, 0);
else
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VDEC_1, 0);
usleep_range(10, 20);
/* Reset VDEC1 */
amvdec_write_dos(core, DOS_SW_RESET0, 0xfffffffc);
amvdec_write_dos(core, DOS_SW_RESET0, 0x00000000);
amvdec_write_dos(core, DOS_GCLK_EN0, 0x3ff);
/* enable VDEC Memories */
amvdec_write_dos(core, DOS_MEM_PD_VDEC, 0);
/* Remove VDEC1 Isolation */
if (core->platform->revision == VDEC_REVISION_SM1)
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
GEN_PWR_VDEC_1_SM1, 0);
else
regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0);
/* Reset DOS top registers */
amvdec_write_dos(core, DOS_VDEC_MCRCC_STALL_CTRL, 0);
amvdec_write_dos(core, GCLK_EN, 0x3ff);
amvdec_clear_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(31));
vdec_1_stbuf_power_up(sess);
ret = vdec_1_load_firmware(sess, sess->fmt_out->firmware_path);
if (ret)
goto stop;
ret = codec_ops->start(sess);
if (ret)
goto stop;
/* Enable IRQ */
amvdec_write_dos(core, ASSIST_MBOX1_CLR_REG, 1);
amvdec_write_dos(core, ASSIST_MBOX1_MASK, 1);
/* Enable 2-plane output */
if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M)
amvdec_write_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(17));
else
amvdec_clear_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(17));
/* Enable firmware processor */
amvdec_write_dos(core, MPSR, 1);
/* Let the firmware settle */
usleep_range(10, 20);
return 0;
stop:
vdec_1_stop(sess);
return ret;
}
struct amvdec_ops vdec_1_ops = {
.start = vdec_1_start,
.stop = vdec_1_stop,
.conf_esparser = vdec_1_conf_esparser,
.vififo_level = vdec_1_vififo_level,
};
| linux-master | drivers/staging/media/meson/vdec/vdec_1.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 Maxime Jourdan <[email protected]>
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
#include "dos_regs.h"
#include "hevc_regs.h"
#include "codec_vp9.h"
#include "vdec_helpers.h"
#include "codec_hevc_common.h"
/* HEVC reg mapping */
#define VP9_DEC_STATUS_REG HEVC_ASSIST_SCRATCH_0
#define VP9_10B_DECODE_SLICE 5
#define VP9_HEAD_PARSER_DONE 0xf0
#define VP9_RPM_BUFFER HEVC_ASSIST_SCRATCH_1
#define VP9_SHORT_TERM_RPS HEVC_ASSIST_SCRATCH_2
#define VP9_ADAPT_PROB_REG HEVC_ASSIST_SCRATCH_3
#define VP9_MMU_MAP_BUFFER HEVC_ASSIST_SCRATCH_4
#define VP9_PPS_BUFFER HEVC_ASSIST_SCRATCH_5
#define VP9_SAO_UP HEVC_ASSIST_SCRATCH_6
#define VP9_STREAM_SWAP_BUFFER HEVC_ASSIST_SCRATCH_7
#define VP9_STREAM_SWAP_BUFFER2 HEVC_ASSIST_SCRATCH_8
#define VP9_PROB_SWAP_BUFFER HEVC_ASSIST_SCRATCH_9
#define VP9_COUNT_SWAP_BUFFER HEVC_ASSIST_SCRATCH_A
#define VP9_SEG_MAP_BUFFER HEVC_ASSIST_SCRATCH_B
#define VP9_SCALELUT HEVC_ASSIST_SCRATCH_D
#define VP9_WAIT_FLAG HEVC_ASSIST_SCRATCH_E
#define LMEM_DUMP_ADR HEVC_ASSIST_SCRATCH_F
#define NAL_SEARCH_CTL HEVC_ASSIST_SCRATCH_I
#define VP9_DECODE_MODE HEVC_ASSIST_SCRATCH_J
#define DECODE_MODE_SINGLE 0
#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K
#define HEVC_DECODE_COUNT HEVC_ASSIST_SCRATCH_M
#define HEVC_DECODE_SIZE HEVC_ASSIST_SCRATCH_N
/* VP9 Constants */
#define LCU_SIZE 64
#define MAX_REF_PIC_NUM 24
#define REFS_PER_FRAME 3
#define REF_FRAMES 8
#define MV_MEM_UNIT 0x240
#define ADAPT_PROB_SIZE 0xf80
enum FRAME_TYPE {
KEY_FRAME = 0,
INTER_FRAME = 1,
FRAME_TYPES,
};
/* VP9 Workspace layout */
#define MPRED_MV_BUF_SIZE 0x120000
#define IPP_SIZE 0x4000
#define SAO_ABV_SIZE 0x30000
#define SAO_VB_SIZE 0x30000
#define SH_TM_RPS_SIZE 0x800
#define VPS_SIZE 0x800
#define SPS_SIZE 0x800
#define PPS_SIZE 0x2000
#define SAO_UP_SIZE 0x2800
#define SWAP_BUF_SIZE 0x800
#define SWAP_BUF2_SIZE 0x800
#define SCALELUT_SIZE 0x8000
#define DBLK_PARA_SIZE 0x80000
#define DBLK_DATA_SIZE 0x80000
#define SEG_MAP_SIZE 0xd800
#define PROB_SIZE 0x5000
#define COUNT_SIZE 0x3000
#define MMU_VBH_SIZE 0x5000
#define MPRED_ABV_SIZE 0x10000
#define MPRED_MV_SIZE (MPRED_MV_BUF_SIZE * MAX_REF_PIC_NUM)
#define RPM_BUF_SIZE 0x100
#define LMEM_SIZE 0x800
#define IPP_OFFSET 0x00
#define SAO_ABV_OFFSET (IPP_OFFSET + IPP_SIZE)
#define SAO_VB_OFFSET (SAO_ABV_OFFSET + SAO_ABV_SIZE)
#define SH_TM_RPS_OFFSET (SAO_VB_OFFSET + SAO_VB_SIZE)
#define VPS_OFFSET (SH_TM_RPS_OFFSET + SH_TM_RPS_SIZE)
#define SPS_OFFSET (VPS_OFFSET + VPS_SIZE)
#define PPS_OFFSET (SPS_OFFSET + SPS_SIZE)
#define SAO_UP_OFFSET (PPS_OFFSET + PPS_SIZE)
#define SWAP_BUF_OFFSET (SAO_UP_OFFSET + SAO_UP_SIZE)
#define SWAP_BUF2_OFFSET (SWAP_BUF_OFFSET + SWAP_BUF_SIZE)
#define SCALELUT_OFFSET (SWAP_BUF2_OFFSET + SWAP_BUF2_SIZE)
#define DBLK_PARA_OFFSET (SCALELUT_OFFSET + SCALELUT_SIZE)
#define DBLK_DATA_OFFSET (DBLK_PARA_OFFSET + DBLK_PARA_SIZE)
#define SEG_MAP_OFFSET (DBLK_DATA_OFFSET + DBLK_DATA_SIZE)
#define PROB_OFFSET (SEG_MAP_OFFSET + SEG_MAP_SIZE)
#define COUNT_OFFSET (PROB_OFFSET + PROB_SIZE)
#define MMU_VBH_OFFSET (COUNT_OFFSET + COUNT_SIZE)
#define MPRED_ABV_OFFSET (MMU_VBH_OFFSET + MMU_VBH_SIZE)
#define MPRED_MV_OFFSET (MPRED_ABV_OFFSET + MPRED_ABV_SIZE)
#define RPM_OFFSET (MPRED_MV_OFFSET + MPRED_MV_SIZE)
#define LMEM_OFFSET (RPM_OFFSET + RPM_BUF_SIZE)
#define SIZE_WORKSPACE ALIGN(LMEM_OFFSET + LMEM_SIZE, 64 * SZ_1K)
#define NONE -1
#define INTRA_FRAME 0
#define LAST_FRAME 1
#define GOLDEN_FRAME 2
#define ALTREF_FRAME 3
#define MAX_REF_FRAMES 4
/*
* Defines, declarations, sub-functions for vp9 de-block loop
filter Thr/Lvl table update
* - struct segmentation is for loop filter only (removed something)
* - function "vp9_loop_filter_init" and "vp9_loop_filter_frame_init" will
be instantiated in C_Entry
* - vp9_loop_filter_init run once before decoding start
* - vp9_loop_filter_frame_init run before every frame decoding start
* - set video format to VP9 is in vp9_loop_filter_init
*/
#define MAX_LOOP_FILTER 63
#define MAX_REF_LF_DELTAS 4
#define MAX_MODE_LF_DELTAS 2
#define SEGMENT_DELTADATA 0
#define SEGMENT_ABSDATA 1
#define MAX_SEGMENTS 8
/* VP9 PROB processing defines */
#define VP9_PARTITION_START 0
#define VP9_PARTITION_SIZE_STEP (3 * 4)
#define VP9_PARTITION_ONE_SIZE (4 * VP9_PARTITION_SIZE_STEP)
#define VP9_PARTITION_KEY_START 0
#define VP9_PARTITION_P_START VP9_PARTITION_ONE_SIZE
#define VP9_PARTITION_SIZE (2 * VP9_PARTITION_ONE_SIZE)
#define VP9_SKIP_START (VP9_PARTITION_START + VP9_PARTITION_SIZE)
#define VP9_SKIP_SIZE 4 /* only use 3*/
#define VP9_TX_MODE_START (VP9_SKIP_START + VP9_SKIP_SIZE)
#define VP9_TX_MODE_8_0_OFFSET 0
#define VP9_TX_MODE_8_1_OFFSET 1
#define VP9_TX_MODE_16_0_OFFSET 2
#define VP9_TX_MODE_16_1_OFFSET 4
#define VP9_TX_MODE_32_0_OFFSET 6
#define VP9_TX_MODE_32_1_OFFSET 9
#define VP9_TX_MODE_SIZE 12
#define VP9_COEF_START (VP9_TX_MODE_START + VP9_TX_MODE_SIZE)
#define VP9_COEF_BAND_0_OFFSET 0
#define VP9_COEF_BAND_1_OFFSET (VP9_COEF_BAND_0_OFFSET + 3 * 3 + 1)
#define VP9_COEF_BAND_2_OFFSET (VP9_COEF_BAND_1_OFFSET + 6 * 3)
#define VP9_COEF_BAND_3_OFFSET (VP9_COEF_BAND_2_OFFSET + 6 * 3)
#define VP9_COEF_BAND_4_OFFSET (VP9_COEF_BAND_3_OFFSET + 6 * 3)
#define VP9_COEF_BAND_5_OFFSET (VP9_COEF_BAND_4_OFFSET + 6 * 3)
#define VP9_COEF_SIZE_ONE_SET 100 /* ((3 + 5 * 6) * 3 + 1 padding)*/
#define VP9_COEF_4X4_START (VP9_COEF_START + 0 * VP9_COEF_SIZE_ONE_SET)
#define VP9_COEF_8X8_START (VP9_COEF_START + 4 * VP9_COEF_SIZE_ONE_SET)
#define VP9_COEF_16X16_START (VP9_COEF_START + 8 * VP9_COEF_SIZE_ONE_SET)
#define VP9_COEF_32X32_START (VP9_COEF_START + 12 * VP9_COEF_SIZE_ONE_SET)
#define VP9_COEF_SIZE_PLANE (2 * VP9_COEF_SIZE_ONE_SET)
#define VP9_COEF_SIZE (4 * 2 * 2 * VP9_COEF_SIZE_ONE_SET)
#define VP9_INTER_MODE_START (VP9_COEF_START + VP9_COEF_SIZE)
#define VP9_INTER_MODE_SIZE 24 /* only use 21 (# * 7)*/
#define VP9_INTERP_START (VP9_INTER_MODE_START + VP9_INTER_MODE_SIZE)
#define VP9_INTERP_SIZE 8
#define VP9_INTRA_INTER_START (VP9_INTERP_START + VP9_INTERP_SIZE)
#define VP9_INTRA_INTER_SIZE 4
#define VP9_INTERP_INTRA_INTER_START VP9_INTERP_START
#define VP9_INTERP_INTRA_INTER_SIZE (VP9_INTERP_SIZE + VP9_INTRA_INTER_SIZE)
#define VP9_COMP_INTER_START \
(VP9_INTERP_INTRA_INTER_START + VP9_INTERP_INTRA_INTER_SIZE)
#define VP9_COMP_INTER_SIZE 5
#define VP9_COMP_REF_START (VP9_COMP_INTER_START + VP9_COMP_INTER_SIZE)
#define VP9_COMP_REF_SIZE 5
#define VP9_SINGLE_REF_START (VP9_COMP_REF_START + VP9_COMP_REF_SIZE)
#define VP9_SINGLE_REF_SIZE 10
#define VP9_REF_MODE_START VP9_COMP_INTER_START
#define VP9_REF_MODE_SIZE \
(VP9_COMP_INTER_SIZE + VP9_COMP_REF_SIZE + VP9_SINGLE_REF_SIZE)
#define VP9_IF_Y_MODE_START (VP9_REF_MODE_START + VP9_REF_MODE_SIZE)
#define VP9_IF_Y_MODE_SIZE 36
#define VP9_IF_UV_MODE_START (VP9_IF_Y_MODE_START + VP9_IF_Y_MODE_SIZE)
#define VP9_IF_UV_MODE_SIZE 92 /* only use 90*/
#define VP9_MV_JOINTS_START (VP9_IF_UV_MODE_START + VP9_IF_UV_MODE_SIZE)
#define VP9_MV_JOINTS_SIZE 3
#define VP9_MV_SIGN_0_START (VP9_MV_JOINTS_START + VP9_MV_JOINTS_SIZE)
#define VP9_MV_SIGN_0_SIZE 1
#define VP9_MV_CLASSES_0_START (VP9_MV_SIGN_0_START + VP9_MV_SIGN_0_SIZE)
#define VP9_MV_CLASSES_0_SIZE 10
#define VP9_MV_CLASS0_0_START \
(VP9_MV_CLASSES_0_START + VP9_MV_CLASSES_0_SIZE)
#define VP9_MV_CLASS0_0_SIZE 1
#define VP9_MV_BITS_0_START (VP9_MV_CLASS0_0_START + VP9_MV_CLASS0_0_SIZE)
#define VP9_MV_BITS_0_SIZE 10
#define VP9_MV_SIGN_1_START (VP9_MV_BITS_0_START + VP9_MV_BITS_0_SIZE)
#define VP9_MV_SIGN_1_SIZE 1
#define VP9_MV_CLASSES_1_START \
(VP9_MV_SIGN_1_START + VP9_MV_SIGN_1_SIZE)
#define VP9_MV_CLASSES_1_SIZE 10
#define VP9_MV_CLASS0_1_START \
(VP9_MV_CLASSES_1_START + VP9_MV_CLASSES_1_SIZE)
#define VP9_MV_CLASS0_1_SIZE 1
#define VP9_MV_BITS_1_START \
(VP9_MV_CLASS0_1_START + VP9_MV_CLASS0_1_SIZE)
#define VP9_MV_BITS_1_SIZE 10
#define VP9_MV_CLASS0_FP_0_START \
(VP9_MV_BITS_1_START + VP9_MV_BITS_1_SIZE)
#define VP9_MV_CLASS0_FP_0_SIZE 9
#define VP9_MV_CLASS0_FP_1_START \
(VP9_MV_CLASS0_FP_0_START + VP9_MV_CLASS0_FP_0_SIZE)
#define VP9_MV_CLASS0_FP_1_SIZE 9
#define VP9_MV_CLASS0_HP_0_START \
(VP9_MV_CLASS0_FP_1_START + VP9_MV_CLASS0_FP_1_SIZE)
#define VP9_MV_CLASS0_HP_0_SIZE 2
#define VP9_MV_CLASS0_HP_1_START \
(VP9_MV_CLASS0_HP_0_START + VP9_MV_CLASS0_HP_0_SIZE)
#define VP9_MV_CLASS0_HP_1_SIZE 2
#define VP9_MV_START VP9_MV_JOINTS_START
#define VP9_MV_SIZE 72 /*only use 69*/
#define VP9_TOTAL_SIZE (VP9_MV_START + VP9_MV_SIZE)
/* VP9 COUNT mem processing defines */
#define VP9_COEF_COUNT_START 0
#define VP9_COEF_COUNT_BAND_0_OFFSET 0
#define VP9_COEF_COUNT_BAND_1_OFFSET \
(VP9_COEF_COUNT_BAND_0_OFFSET + 3 * 5)
#define VP9_COEF_COUNT_BAND_2_OFFSET \
(VP9_COEF_COUNT_BAND_1_OFFSET + 6 * 5)
#define VP9_COEF_COUNT_BAND_3_OFFSET \
(VP9_COEF_COUNT_BAND_2_OFFSET + 6 * 5)
#define VP9_COEF_COUNT_BAND_4_OFFSET \
(VP9_COEF_COUNT_BAND_3_OFFSET + 6 * 5)
#define VP9_COEF_COUNT_BAND_5_OFFSET \
(VP9_COEF_COUNT_BAND_4_OFFSET + 6 * 5)
#define VP9_COEF_COUNT_SIZE_ONE_SET 165 /* ((3 + 5 * 6) * 5 */
#define VP9_COEF_COUNT_4X4_START \
(VP9_COEF_COUNT_START + 0 * VP9_COEF_COUNT_SIZE_ONE_SET)
#define VP9_COEF_COUNT_8X8_START \
(VP9_COEF_COUNT_START + 4 * VP9_COEF_COUNT_SIZE_ONE_SET)
#define VP9_COEF_COUNT_16X16_START \
(VP9_COEF_COUNT_START + 8 * VP9_COEF_COUNT_SIZE_ONE_SET)
#define VP9_COEF_COUNT_32X32_START \
(VP9_COEF_COUNT_START + 12 * VP9_COEF_COUNT_SIZE_ONE_SET)
#define VP9_COEF_COUNT_SIZE_PLANE (2 * VP9_COEF_COUNT_SIZE_ONE_SET)
#define VP9_COEF_COUNT_SIZE (4 * 2 * 2 * VP9_COEF_COUNT_SIZE_ONE_SET)
#define VP9_INTRA_INTER_COUNT_START \
(VP9_COEF_COUNT_START + VP9_COEF_COUNT_SIZE)
#define VP9_INTRA_INTER_COUNT_SIZE (4 * 2)
#define VP9_COMP_INTER_COUNT_START \
(VP9_INTRA_INTER_COUNT_START + VP9_INTRA_INTER_COUNT_SIZE)
#define VP9_COMP_INTER_COUNT_SIZE (5 * 2)
#define VP9_COMP_REF_COUNT_START \
(VP9_COMP_INTER_COUNT_START + VP9_COMP_INTER_COUNT_SIZE)
#define VP9_COMP_REF_COUNT_SIZE (5 * 2)
#define VP9_SINGLE_REF_COUNT_START \
(VP9_COMP_REF_COUNT_START + VP9_COMP_REF_COUNT_SIZE)
#define VP9_SINGLE_REF_COUNT_SIZE (10 * 2)
#define VP9_TX_MODE_COUNT_START \
(VP9_SINGLE_REF_COUNT_START + VP9_SINGLE_REF_COUNT_SIZE)
#define VP9_TX_MODE_COUNT_SIZE (12 * 2)
#define VP9_SKIP_COUNT_START \
(VP9_TX_MODE_COUNT_START + VP9_TX_MODE_COUNT_SIZE)
#define VP9_SKIP_COUNT_SIZE (3 * 2)
#define VP9_MV_SIGN_0_COUNT_START \
(VP9_SKIP_COUNT_START + VP9_SKIP_COUNT_SIZE)
#define VP9_MV_SIGN_0_COUNT_SIZE (1 * 2)
#define VP9_MV_SIGN_1_COUNT_START \
(VP9_MV_SIGN_0_COUNT_START + VP9_MV_SIGN_0_COUNT_SIZE)
#define VP9_MV_SIGN_1_COUNT_SIZE (1 * 2)
#define VP9_MV_BITS_0_COUNT_START \
(VP9_MV_SIGN_1_COUNT_START + VP9_MV_SIGN_1_COUNT_SIZE)
#define VP9_MV_BITS_0_COUNT_SIZE (10 * 2)
#define VP9_MV_BITS_1_COUNT_START \
(VP9_MV_BITS_0_COUNT_START + VP9_MV_BITS_0_COUNT_SIZE)
#define VP9_MV_BITS_1_COUNT_SIZE (10 * 2)
#define VP9_MV_CLASS0_HP_0_COUNT_START \
(VP9_MV_BITS_1_COUNT_START + VP9_MV_BITS_1_COUNT_SIZE)
#define VP9_MV_CLASS0_HP_0_COUNT_SIZE (2 * 2)
#define VP9_MV_CLASS0_HP_1_COUNT_START \
(VP9_MV_CLASS0_HP_0_COUNT_START + VP9_MV_CLASS0_HP_0_COUNT_SIZE)
#define VP9_MV_CLASS0_HP_1_COUNT_SIZE (2 * 2)
/* Start merge_tree */
#define VP9_INTER_MODE_COUNT_START \
(VP9_MV_CLASS0_HP_1_COUNT_START + VP9_MV_CLASS0_HP_1_COUNT_SIZE)
#define VP9_INTER_MODE_COUNT_SIZE (7 * 4)
#define VP9_IF_Y_MODE_COUNT_START \
(VP9_INTER_MODE_COUNT_START + VP9_INTER_MODE_COUNT_SIZE)
#define VP9_IF_Y_MODE_COUNT_SIZE (10 * 4)
#define VP9_IF_UV_MODE_COUNT_START \
(VP9_IF_Y_MODE_COUNT_START + VP9_IF_Y_MODE_COUNT_SIZE)
#define VP9_IF_UV_MODE_COUNT_SIZE (10 * 10)
#define VP9_PARTITION_P_COUNT_START \
(VP9_IF_UV_MODE_COUNT_START + VP9_IF_UV_MODE_COUNT_SIZE)
#define VP9_PARTITION_P_COUNT_SIZE (4 * 4 * 4)
#define VP9_INTERP_COUNT_START \
(VP9_PARTITION_P_COUNT_START + VP9_PARTITION_P_COUNT_SIZE)
#define VP9_INTERP_COUNT_SIZE (4 * 3)
#define VP9_MV_JOINTS_COUNT_START \
(VP9_INTERP_COUNT_START + VP9_INTERP_COUNT_SIZE)
#define VP9_MV_JOINTS_COUNT_SIZE (1 * 4)
#define VP9_MV_CLASSES_0_COUNT_START \
(VP9_MV_JOINTS_COUNT_START + VP9_MV_JOINTS_COUNT_SIZE)
#define VP9_MV_CLASSES_0_COUNT_SIZE (1 * 11)
#define VP9_MV_CLASS0_0_COUNT_START \
(VP9_MV_CLASSES_0_COUNT_START + VP9_MV_CLASSES_0_COUNT_SIZE)
#define VP9_MV_CLASS0_0_COUNT_SIZE (1 * 2)
#define VP9_MV_CLASSES_1_COUNT_START \
(VP9_MV_CLASS0_0_COUNT_START + VP9_MV_CLASS0_0_COUNT_SIZE)
#define VP9_MV_CLASSES_1_COUNT_SIZE (1 * 11)
#define VP9_MV_CLASS0_1_COUNT_START \
(VP9_MV_CLASSES_1_COUNT_START + VP9_MV_CLASSES_1_COUNT_SIZE)
#define VP9_MV_CLASS0_1_COUNT_SIZE (1 * 2)
#define VP9_MV_CLASS0_FP_0_COUNT_START \
(VP9_MV_CLASS0_1_COUNT_START + VP9_MV_CLASS0_1_COUNT_SIZE)
#define VP9_MV_CLASS0_FP_0_COUNT_SIZE (3 * 4)
#define VP9_MV_CLASS0_FP_1_COUNT_START \
(VP9_MV_CLASS0_FP_0_COUNT_START + VP9_MV_CLASS0_FP_0_COUNT_SIZE)
#define VP9_MV_CLASS0_FP_1_COUNT_SIZE (3 * 4)
#define DC_PRED 0 /* Average of above and left pixels */
#define V_PRED 1 /* Vertical */
#define H_PRED 2 /* Horizontal */
#define D45_PRED 3 /* Directional 45 deg = round(arctan(1/1) * 180/pi) */
#define D135_PRED 4 /* Directional 135 deg = 180 - 45 */
#define D117_PRED 5 /* Directional 117 deg = 180 - 63 */
#define D153_PRED 6 /* Directional 153 deg = 180 - 27 */
#define D207_PRED 7 /* Directional 207 deg = 180 + 27 */
#define D63_PRED 8 /* Directional 63 deg = round(arctan(2/1) * 180/pi) */
#define TM_PRED 9 /* True-motion */
/* Use a static inline to avoid possible side effect from num being reused */
static inline int round_power_of_two(int value, int num)
{
return (value + (1 << (num - 1))) >> num;
}
#define MODE_MV_COUNT_SAT 20
static const int count_to_update_factor[MODE_MV_COUNT_SAT + 1] = {
0, 6, 12, 19, 25, 32, 38, 44, 51, 57, 64,
70, 76, 83, 89, 96, 102, 108, 115, 121, 128
};
union rpm_param {
struct {
u16 data[RPM_BUF_SIZE];
} l;
struct {
u16 profile;
u16 show_existing_frame;
u16 frame_to_show_idx;
u16 frame_type; /*1 bit*/
u16 show_frame; /*1 bit*/
u16 error_resilient_mode; /*1 bit*/
u16 intra_only; /*1 bit*/
u16 display_size_present; /*1 bit*/
u16 reset_frame_context;
u16 refresh_frame_flags;
u16 width;
u16 height;
u16 display_width;
u16 display_height;
u16 ref_info;
u16 same_frame_size;
u16 mode_ref_delta_enabled;
u16 ref_deltas[4];
u16 mode_deltas[2];
u16 filter_level;
u16 sharpness_level;
u16 bit_depth;
u16 seg_quant_info[8];
u16 seg_enabled;
u16 seg_abs_delta;
/* bit 15: feature enabled; bit 8, sign; bit[5:0], data */
u16 seg_lf_info[8];
} p;
};
enum SEG_LVL_FEATURES {
SEG_LVL_ALT_Q = 0, /* Use alternate Quantizer */
SEG_LVL_ALT_LF = 1, /* Use alternate loop filter value */
SEG_LVL_REF_FRAME = 2, /* Optional Segment reference frame */
SEG_LVL_SKIP = 3, /* Optional Segment (0,0) + skip mode */
SEG_LVL_MAX = 4 /* Number of features supported */
};
struct segmentation {
u8 enabled;
u8 update_map;
u8 update_data;
u8 abs_delta;
u8 temporal_update;
s16 feature_data[MAX_SEGMENTS][SEG_LVL_MAX];
unsigned int feature_mask[MAX_SEGMENTS];
};
struct loop_filter_thresh {
u8 mblim;
u8 lim;
u8 hev_thr;
};
struct loop_filter_info_n {
struct loop_filter_thresh lfthr[MAX_LOOP_FILTER + 1];
u8 lvl[MAX_SEGMENTS][MAX_REF_FRAMES][MAX_MODE_LF_DELTAS];
};
struct loopfilter {
int filter_level;
int sharpness_level;
int last_sharpness_level;
u8 mode_ref_delta_enabled;
u8 mode_ref_delta_update;
/*0 = Intra, Last, GF, ARF*/
signed char ref_deltas[MAX_REF_LF_DELTAS];
signed char last_ref_deltas[MAX_REF_LF_DELTAS];
/*0 = ZERO_MV, MV*/
signed char mode_deltas[MAX_MODE_LF_DELTAS];
signed char last_mode_deltas[MAX_MODE_LF_DELTAS];
};
struct vp9_frame {
struct list_head list;
struct vb2_v4l2_buffer *vbuf;
int index;
int intra_only;
int show;
int type;
int done;
unsigned int width;
unsigned int height;
};
struct codec_vp9 {
/* VP9 context lock */
struct mutex lock;
/* Common part with the HEVC decoder */
struct codec_hevc_common common;
/* Buffer for the VP9 Workspace */
void *workspace_vaddr;
dma_addr_t workspace_paddr;
/* Contains many information parsed from the bitstream */
union rpm_param rpm_param;
/* Whether we detected the bitstream as 10-bit */
int is_10bit;
/* Coded resolution reported by the hardware */
u32 width, height;
/* All ref frames used by the HW at a given time */
struct list_head ref_frames_list;
u32 frames_num;
/* In case of downsampling (decoding with FBC but outputting in NV12M),
* we need to allocate additional buffers for FBC.
*/
void *fbc_buffer_vaddr[MAX_REF_PIC_NUM];
dma_addr_t fbc_buffer_paddr[MAX_REF_PIC_NUM];
int ref_frame_map[REF_FRAMES];
int next_ref_frame_map[REF_FRAMES];
struct vp9_frame *frame_refs[REFS_PER_FRAME];
u32 lcu_total;
/* loop filter */
int default_filt_lvl;
struct loop_filter_info_n lfi;
struct loopfilter lf;
struct segmentation seg_4lf;
struct vp9_frame *cur_frame;
struct vp9_frame *prev_frame;
};
static int div_r32(s64 m, int n)
{
s64 qu = div_s64(m, n);
return (int)qu;
}
static int clip_prob(int p)
{
return clamp_val(p, 1, 255);
}
static int segfeature_active(struct segmentation *seg, int segment_id,
enum SEG_LVL_FEATURES feature_id)
{
return seg->enabled &&
(seg->feature_mask[segment_id] & (1 << feature_id));
}
static int get_segdata(struct segmentation *seg, int segment_id,
enum SEG_LVL_FEATURES feature_id)
{
return seg->feature_data[segment_id][feature_id];
}
static void vp9_update_sharpness(struct loop_filter_info_n *lfi,
int sharpness_lvl)
{
int lvl;
/* For each possible value for the loop filter fill out limits*/
for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) {
/* Set loop filter parameters that control sharpness.*/
int block_inside_limit = lvl >> ((sharpness_lvl > 0) +
(sharpness_lvl > 4));
if (sharpness_lvl > 0) {
if (block_inside_limit > (9 - sharpness_lvl))
block_inside_limit = (9 - sharpness_lvl);
}
if (block_inside_limit < 1)
block_inside_limit = 1;
lfi->lfthr[lvl].lim = (u8)block_inside_limit;
lfi->lfthr[lvl].mblim = (u8)(2 * (lvl + 2) +
block_inside_limit);
}
}
/* Instantiate this function once when decode is started */
static void
vp9_loop_filter_init(struct amvdec_core *core, struct codec_vp9 *vp9)
{
struct loop_filter_info_n *lfi = &vp9->lfi;
struct loopfilter *lf = &vp9->lf;
struct segmentation *seg_4lf = &vp9->seg_4lf;
int i;
memset(lfi, 0, sizeof(struct loop_filter_info_n));
memset(lf, 0, sizeof(struct loopfilter));
memset(seg_4lf, 0, sizeof(struct segmentation));
lf->sharpness_level = 0;
vp9_update_sharpness(lfi, lf->sharpness_level);
lf->last_sharpness_level = lf->sharpness_level;
for (i = 0; i < 32; i++) {
unsigned int thr;
thr = ((lfi->lfthr[i * 2 + 1].lim & 0x3f) << 8) |
(lfi->lfthr[i * 2 + 1].mblim & 0xff);
thr = (thr << 16) | ((lfi->lfthr[i * 2].lim & 0x3f) << 8) |
(lfi->lfthr[i * 2].mblim & 0xff);
amvdec_write_dos(core, HEVC_DBLK_CFG9, thr);
}
if (core->platform->revision >= VDEC_REVISION_SM1)
amvdec_write_dos(core, HEVC_DBLK_CFGB,
(0x3 << 14) | /* dw fifo thres r and b */
(0x3 << 12) | /* dw fifo thres r or b */
(0x3 << 10) | /* dw fifo thres not r/b */
BIT(0)); /* VP9 video format */
else if (core->platform->revision >= VDEC_REVISION_G12A)
/* VP9 video format */
amvdec_write_dos(core, HEVC_DBLK_CFGB, (0x54 << 8) | BIT(0));
else
amvdec_write_dos(core, HEVC_DBLK_CFGB, 0x40400001);
}
static void
vp9_loop_filter_frame_init(struct amvdec_core *core, struct segmentation *seg,
struct loop_filter_info_n *lfi,
struct loopfilter *lf, int default_filt_lvl)
{
int i;
int seg_id;
/*
* n_shift is the multiplier for lf_deltas
* the multiplier is:
* - 1 for when filter_lvl is between 0 and 31
* - 2 when filter_lvl is between 32 and 63
*/
const int scale = 1 << (default_filt_lvl >> 5);
/* update limits if sharpness has changed */
if (lf->last_sharpness_level != lf->sharpness_level) {
vp9_update_sharpness(lfi, lf->sharpness_level);
lf->last_sharpness_level = lf->sharpness_level;
/* Write to register */
for (i = 0; i < 32; i++) {
unsigned int thr;
thr = ((lfi->lfthr[i * 2 + 1].lim & 0x3f) << 8) |
(lfi->lfthr[i * 2 + 1].mblim & 0xff);
thr = (thr << 16) |
((lfi->lfthr[i * 2].lim & 0x3f) << 8) |
(lfi->lfthr[i * 2].mblim & 0xff);
amvdec_write_dos(core, HEVC_DBLK_CFG9, thr);
}
}
for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) {
int lvl_seg = default_filt_lvl;
if (segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) {
const int data = get_segdata(seg, seg_id,
SEG_LVL_ALT_LF);
lvl_seg = clamp_t(int,
seg->abs_delta == SEGMENT_ABSDATA ?
data : default_filt_lvl + data,
0, MAX_LOOP_FILTER);
}
if (!lf->mode_ref_delta_enabled) {
/*
* We could get rid of this if we assume that deltas
* are set to zero when not in use.
* encoder always uses deltas
*/
memset(lfi->lvl[seg_id], lvl_seg,
sizeof(lfi->lvl[seg_id]));
} else {
int ref, mode;
const int intra_lvl =
lvl_seg + lf->ref_deltas[INTRA_FRAME] * scale;
lfi->lvl[seg_id][INTRA_FRAME][0] =
clamp_val(intra_lvl, 0, MAX_LOOP_FILTER);
for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref) {
for (mode = 0; mode < MAX_MODE_LF_DELTAS;
++mode) {
const int inter_lvl =
lvl_seg +
lf->ref_deltas[ref] * scale +
lf->mode_deltas[mode] * scale;
lfi->lvl[seg_id][ref][mode] =
clamp_val(inter_lvl, 0,
MAX_LOOP_FILTER);
}
}
}
}
for (i = 0; i < 16; i++) {
unsigned int level;
level = ((lfi->lvl[i >> 1][3][i & 1] & 0x3f) << 24) |
((lfi->lvl[i >> 1][2][i & 1] & 0x3f) << 16) |
((lfi->lvl[i >> 1][1][i & 1] & 0x3f) << 8) |
(lfi->lvl[i >> 1][0][i & 1] & 0x3f);
if (!default_filt_lvl)
level = 0;
amvdec_write_dos(core, HEVC_DBLK_CFGA, level);
}
}
static void codec_vp9_flush_output(struct amvdec_session *sess)
{
struct codec_vp9 *vp9 = sess->priv;
struct vp9_frame *tmp, *n;
mutex_lock(&vp9->lock);
list_for_each_entry_safe(tmp, n, &vp9->ref_frames_list, list) {
if (!tmp->done) {
if (tmp->show)
amvdec_dst_buf_done(sess, tmp->vbuf,
V4L2_FIELD_NONE);
else
v4l2_m2m_buf_queue(sess->m2m_ctx, tmp->vbuf);
vp9->frames_num--;
}
list_del(&tmp->list);
kfree(tmp);
}
mutex_unlock(&vp9->lock);
}
static u32 codec_vp9_num_pending_bufs(struct amvdec_session *sess)
{
struct codec_vp9 *vp9 = sess->priv;
if (!vp9)
return 0;
return vp9->frames_num;
}
static int codec_vp9_alloc_workspace(struct amvdec_core *core,
struct codec_vp9 *vp9)
{
/* Allocate some memory for the VP9 decoder's state */
vp9->workspace_vaddr = dma_alloc_coherent(core->dev, SIZE_WORKSPACE,
&vp9->workspace_paddr,
GFP_KERNEL);
if (!vp9->workspace_vaddr) {
dev_err(core->dev, "Failed to allocate VP9 Workspace\n");
return -ENOMEM;
}
return 0;
}
static void codec_vp9_setup_workspace(struct amvdec_session *sess,
struct codec_vp9 *vp9)
{
struct amvdec_core *core = sess->core;
u32 revision = core->platform->revision;
dma_addr_t wkaddr = vp9->workspace_paddr;
amvdec_write_dos(core, HEVCD_IPP_LINEBUFF_BASE, wkaddr + IPP_OFFSET);
amvdec_write_dos(core, VP9_RPM_BUFFER, wkaddr + RPM_OFFSET);
amvdec_write_dos(core, VP9_SHORT_TERM_RPS, wkaddr + SH_TM_RPS_OFFSET);
amvdec_write_dos(core, VP9_PPS_BUFFER, wkaddr + PPS_OFFSET);
amvdec_write_dos(core, VP9_SAO_UP, wkaddr + SAO_UP_OFFSET);
amvdec_write_dos(core, VP9_STREAM_SWAP_BUFFER,
wkaddr + SWAP_BUF_OFFSET);
amvdec_write_dos(core, VP9_STREAM_SWAP_BUFFER2,
wkaddr + SWAP_BUF2_OFFSET);
amvdec_write_dos(core, VP9_SCALELUT, wkaddr + SCALELUT_OFFSET);
if (core->platform->revision >= VDEC_REVISION_G12A)
amvdec_write_dos(core, HEVC_DBLK_CFGE,
wkaddr + DBLK_PARA_OFFSET);
amvdec_write_dos(core, HEVC_DBLK_CFG4, wkaddr + DBLK_PARA_OFFSET);
amvdec_write_dos(core, HEVC_DBLK_CFG5, wkaddr + DBLK_DATA_OFFSET);
amvdec_write_dos(core, VP9_SEG_MAP_BUFFER, wkaddr + SEG_MAP_OFFSET);
amvdec_write_dos(core, VP9_PROB_SWAP_BUFFER, wkaddr + PROB_OFFSET);
amvdec_write_dos(core, VP9_COUNT_SWAP_BUFFER, wkaddr + COUNT_OFFSET);
amvdec_write_dos(core, LMEM_DUMP_ADR, wkaddr + LMEM_OFFSET);
if (codec_hevc_use_mmu(revision, sess->pixfmt_cap, vp9->is_10bit)) {
amvdec_write_dos(core, HEVC_SAO_MMU_VH0_ADDR,
wkaddr + MMU_VBH_OFFSET);
amvdec_write_dos(core, HEVC_SAO_MMU_VH1_ADDR,
wkaddr + MMU_VBH_OFFSET + (MMU_VBH_SIZE / 2));
if (revision >= VDEC_REVISION_G12A)
amvdec_write_dos(core, HEVC_ASSIST_MMU_MAP_ADDR,
vp9->common.mmu_map_paddr);
else
amvdec_write_dos(core, VP9_MMU_MAP_BUFFER,
vp9->common.mmu_map_paddr);
}
}
static int codec_vp9_start(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct codec_vp9 *vp9;
u32 val;
int i;
int ret;
vp9 = kzalloc(sizeof(*vp9), GFP_KERNEL);
if (!vp9)
return -ENOMEM;
ret = codec_vp9_alloc_workspace(core, vp9);
if (ret)
goto free_vp9;
codec_vp9_setup_workspace(sess, vp9);
amvdec_write_dos_bits(core, HEVC_STREAM_CONTROL, BIT(0));
/* stream_fifo_hole */
if (core->platform->revision >= VDEC_REVISION_G12A)
amvdec_write_dos_bits(core, HEVC_STREAM_FIFO_CTL, BIT(29));
val = amvdec_read_dos(core, HEVC_PARSER_INT_CONTROL) & 0x7fffffff;
val |= (3 << 29) | BIT(24) | BIT(22) | BIT(7) | BIT(4) | BIT(0);
amvdec_write_dos(core, HEVC_PARSER_INT_CONTROL, val);
amvdec_write_dos_bits(core, HEVC_SHIFT_STATUS, BIT(0));
amvdec_write_dos(core, HEVC_SHIFT_CONTROL, BIT(10) | BIT(9) |
(3 << 6) | BIT(5) | BIT(2) | BIT(1) | BIT(0));
amvdec_write_dos(core, HEVC_CABAC_CONTROL, BIT(0));
amvdec_write_dos(core, HEVC_PARSER_CORE_CONTROL, BIT(0));
amvdec_write_dos(core, HEVC_SHIFT_STARTCODE, 0x00000001);
amvdec_write_dos(core, VP9_DEC_STATUS_REG, 0);
amvdec_write_dos(core, HEVC_PARSER_CMD_WRITE, BIT(16));
for (i = 0; i < ARRAY_SIZE(vdec_hevc_parser_cmd); ++i)
amvdec_write_dos(core, HEVC_PARSER_CMD_WRITE,
vdec_hevc_parser_cmd[i]);
amvdec_write_dos(core, HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0);
amvdec_write_dos(core, HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1);
amvdec_write_dos(core, HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2);
amvdec_write_dos(core, HEVC_PARSER_IF_CONTROL,
BIT(5) | BIT(2) | BIT(0));
amvdec_write_dos(core, HEVCD_IPP_TOP_CNTL, BIT(0));
amvdec_write_dos(core, HEVCD_IPP_TOP_CNTL, BIT(1));
amvdec_write_dos(core, VP9_WAIT_FLAG, 1);
/* clear mailbox interrupt */
amvdec_write_dos(core, HEVC_ASSIST_MBOX1_CLR_REG, 1);
/* enable mailbox interrupt */
amvdec_write_dos(core, HEVC_ASSIST_MBOX1_MASK, 1);
/* disable PSCALE for hardware sharing */
amvdec_write_dos(core, HEVC_PSCALE_CTRL, 0);
/* Let the uCode do all the parsing */
amvdec_write_dos(core, NAL_SEARCH_CTL, 0x8);
amvdec_write_dos(core, DECODE_STOP_POS, 0);
amvdec_write_dos(core, VP9_DECODE_MODE, DECODE_MODE_SINGLE);
pr_debug("decode_count: %u; decode_size: %u\n",
amvdec_read_dos(core, HEVC_DECODE_COUNT),
amvdec_read_dos(core, HEVC_DECODE_SIZE));
vp9_loop_filter_init(core, vp9);
INIT_LIST_HEAD(&vp9->ref_frames_list);
mutex_init(&vp9->lock);
memset(&vp9->ref_frame_map, -1, sizeof(vp9->ref_frame_map));
memset(&vp9->next_ref_frame_map, -1, sizeof(vp9->next_ref_frame_map));
for (i = 0; i < REFS_PER_FRAME; ++i)
vp9->frame_refs[i] = NULL;
sess->priv = vp9;
return 0;
free_vp9:
kfree(vp9);
return ret;
}
static int codec_vp9_stop(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct codec_vp9 *vp9 = sess->priv;
mutex_lock(&vp9->lock);
if (vp9->workspace_vaddr)
dma_free_coherent(core->dev, SIZE_WORKSPACE,
vp9->workspace_vaddr,
vp9->workspace_paddr);
codec_hevc_free_fbc_buffers(sess, &vp9->common);
mutex_unlock(&vp9->lock);
return 0;
}
/*
* Program LAST & GOLDEN frames into the motion compensation reference cache
* controller
*/
static void codec_vp9_set_mcrcc(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct codec_vp9 *vp9 = sess->priv;
u32 val;
/* Reset mcrcc */
amvdec_write_dos(core, HEVCD_MCRCC_CTL1, 0x2);
/* Disable on I-frame */
if (vp9->cur_frame->type == KEY_FRAME || vp9->cur_frame->intra_only) {
amvdec_write_dos(core, HEVCD_MCRCC_CTL1, 0x0);
return;
}
amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, BIT(1));
val = amvdec_read_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR) & 0xffff;
val |= (val << 16);
amvdec_write_dos(core, HEVCD_MCRCC_CTL2, val);
val = amvdec_read_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR) & 0xffff;
val |= (val << 16);
amvdec_write_dos(core, HEVCD_MCRCC_CTL3, val);
/* Enable mcrcc progressive-mode */
amvdec_write_dos(core, HEVCD_MCRCC_CTL1, 0xff0);
}
static void codec_vp9_set_sao(struct amvdec_session *sess,
struct vb2_buffer *vb)
{
struct amvdec_core *core = sess->core;
struct codec_vp9 *vp9 = sess->priv;
dma_addr_t buf_y_paddr;
dma_addr_t buf_u_v_paddr;
u32 val;
if (codec_hevc_use_downsample(sess->pixfmt_cap, vp9->is_10bit))
buf_y_paddr =
vp9->common.fbc_buffer_paddr[vb->index];
else
buf_y_paddr =
vb2_dma_contig_plane_dma_addr(vb, 0);
if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit)) {
val = amvdec_read_dos(core, HEVC_SAO_CTRL5) & ~0xff0200;
amvdec_write_dos(core, HEVC_SAO_CTRL5, val);
amvdec_write_dos(core, HEVC_CM_BODY_START_ADDR, buf_y_paddr);
}
if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M) {
buf_y_paddr =
vb2_dma_contig_plane_dma_addr(vb, 0);
buf_u_v_paddr =
vb2_dma_contig_plane_dma_addr(vb, 1);
amvdec_write_dos(core, HEVC_SAO_Y_START_ADDR, buf_y_paddr);
amvdec_write_dos(core, HEVC_SAO_C_START_ADDR, buf_u_v_paddr);
amvdec_write_dos(core, HEVC_SAO_Y_WPTR, buf_y_paddr);
amvdec_write_dos(core, HEVC_SAO_C_WPTR, buf_u_v_paddr);
}
if (codec_hevc_use_mmu(core->platform->revision, sess->pixfmt_cap,
vp9->is_10bit)) {
amvdec_write_dos(core, HEVC_CM_HEADER_START_ADDR,
vp9->common.mmu_header_paddr[vb->index]);
/* use HEVC_CM_HEADER_START_ADDR */
amvdec_write_dos_bits(core, HEVC_SAO_CTRL5, BIT(10));
}
amvdec_write_dos(core, HEVC_SAO_Y_LENGTH,
amvdec_get_output_size(sess));
amvdec_write_dos(core, HEVC_SAO_C_LENGTH,
(amvdec_get_output_size(sess) / 2));
if (core->platform->revision >= VDEC_REVISION_G12A) {
amvdec_clear_dos_bits(core, HEVC_DBLK_CFGB,
BIT(4) | BIT(5) | BIT(8) | BIT(9));
/* enable first, compressed write */
if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit))
amvdec_write_dos_bits(core, HEVC_DBLK_CFGB, BIT(8));
/* enable second, uncompressed write */
if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M)
amvdec_write_dos_bits(core, HEVC_DBLK_CFGB, BIT(9));
/* dblk pipeline mode=1 for performance */
if (sess->width >= 1280)
amvdec_write_dos_bits(core, HEVC_DBLK_CFGB, BIT(4));
pr_debug("HEVC_DBLK_CFGB: %08X\n",
amvdec_read_dos(core, HEVC_DBLK_CFGB));
}
val = amvdec_read_dos(core, HEVC_SAO_CTRL1) & ~0x3ff0;
val |= 0xff0; /* Set endianness for 2-bytes swaps (nv12) */
if (core->platform->revision < VDEC_REVISION_G12A) {
val &= ~0x3;
if (!codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit))
val |= BIT(0); /* disable cm compression */
/* TOFIX: Handle Amlogic Framebuffer compression */
}
amvdec_write_dos(core, HEVC_SAO_CTRL1, val);
pr_debug("HEVC_SAO_CTRL1: %08X\n", val);
/* no downscale for NV12 */
val = amvdec_read_dos(core, HEVC_SAO_CTRL5) & ~0xff0000;
amvdec_write_dos(core, HEVC_SAO_CTRL5, val);
val = amvdec_read_dos(core, HEVCD_IPP_AXIIF_CONFIG) & ~0x30;
val |= 0xf;
val &= ~BIT(12); /* NV12 */
amvdec_write_dos(core, HEVCD_IPP_AXIIF_CONFIG, val);
}
static dma_addr_t codec_vp9_get_frame_mv_paddr(struct codec_vp9 *vp9,
struct vp9_frame *frame)
{
return vp9->workspace_paddr + MPRED_MV_OFFSET +
(frame->index * MPRED_MV_BUF_SIZE);
}
static void codec_vp9_set_mpred_mv(struct amvdec_core *core,
struct codec_vp9 *vp9)
{
int mpred_mv_rd_end_addr;
int use_prev_frame_mvs = vp9->prev_frame->width ==
vp9->cur_frame->width &&
vp9->prev_frame->height ==
vp9->cur_frame->height &&
!vp9->prev_frame->intra_only &&
vp9->prev_frame->show &&
vp9->prev_frame->type != KEY_FRAME;
amvdec_write_dos(core, HEVC_MPRED_CTRL3, 0x24122412);
amvdec_write_dos(core, HEVC_MPRED_ABV_START_ADDR,
vp9->workspace_paddr + MPRED_ABV_OFFSET);
amvdec_clear_dos_bits(core, HEVC_MPRED_CTRL4, BIT(6));
if (use_prev_frame_mvs)
amvdec_write_dos_bits(core, HEVC_MPRED_CTRL4, BIT(6));
amvdec_write_dos(core, HEVC_MPRED_MV_WR_START_ADDR,
codec_vp9_get_frame_mv_paddr(vp9, vp9->cur_frame));
amvdec_write_dos(core, HEVC_MPRED_MV_WPTR,
codec_vp9_get_frame_mv_paddr(vp9, vp9->cur_frame));
amvdec_write_dos(core, HEVC_MPRED_MV_RD_START_ADDR,
codec_vp9_get_frame_mv_paddr(vp9, vp9->prev_frame));
amvdec_write_dos(core, HEVC_MPRED_MV_RPTR,
codec_vp9_get_frame_mv_paddr(vp9, vp9->prev_frame));
mpred_mv_rd_end_addr =
codec_vp9_get_frame_mv_paddr(vp9, vp9->prev_frame) +
(vp9->lcu_total * MV_MEM_UNIT);
amvdec_write_dos(core, HEVC_MPRED_MV_RD_END_ADDR, mpred_mv_rd_end_addr);
}
static void codec_vp9_update_next_ref(struct codec_vp9 *vp9)
{
union rpm_param *param = &vp9->rpm_param;
u32 buf_idx = vp9->cur_frame->index;
int ref_index = 0;
int refresh_frame_flags;
int mask;
refresh_frame_flags = vp9->cur_frame->type == KEY_FRAME ?
0xff : param->p.refresh_frame_flags;
for (mask = refresh_frame_flags; mask; mask >>= 1) {
pr_debug("mask=%08X; ref_index=%d\n", mask, ref_index);
if (mask & 1)
vp9->next_ref_frame_map[ref_index] = buf_idx;
else
vp9->next_ref_frame_map[ref_index] =
vp9->ref_frame_map[ref_index];
++ref_index;
}
for (; ref_index < REF_FRAMES; ++ref_index)
vp9->next_ref_frame_map[ref_index] =
vp9->ref_frame_map[ref_index];
}
static void codec_vp9_save_refs(struct codec_vp9 *vp9)
{
union rpm_param *param = &vp9->rpm_param;
int i;
for (i = 0; i < REFS_PER_FRAME; ++i) {
const int ref = (param->p.ref_info >>
(((REFS_PER_FRAME - i - 1) * 4) + 1)) & 0x7;
if (vp9->ref_frame_map[ref] < 0)
continue;
pr_warn("%s: FIXME, would need to save ref %d\n",
__func__, vp9->ref_frame_map[ref]);
}
}
static void codec_vp9_update_ref(struct codec_vp9 *vp9)
{
union rpm_param *param = &vp9->rpm_param;
int ref_index = 0;
int mask;
int refresh_frame_flags;
if (!vp9->cur_frame)
return;
refresh_frame_flags = vp9->cur_frame->type == KEY_FRAME ?
0xff : param->p.refresh_frame_flags;
for (mask = refresh_frame_flags; mask; mask >>= 1) {
vp9->ref_frame_map[ref_index] =
vp9->next_ref_frame_map[ref_index];
++ref_index;
}
if (param->p.show_existing_frame)
return;
for (; ref_index < REF_FRAMES; ++ref_index)
vp9->ref_frame_map[ref_index] =
vp9->next_ref_frame_map[ref_index];
}
static struct vp9_frame *codec_vp9_get_frame_by_idx(struct codec_vp9 *vp9,
int idx)
{
struct vp9_frame *frame;
list_for_each_entry(frame, &vp9->ref_frames_list, list) {
if (frame->index == idx)
return frame;
}
return NULL;
}
static void codec_vp9_sync_ref(struct codec_vp9 *vp9)
{
union rpm_param *param = &vp9->rpm_param;
int i;
for (i = 0; i < REFS_PER_FRAME; ++i) {
const int ref = (param->p.ref_info >>
(((REFS_PER_FRAME - i - 1) * 4) + 1)) & 0x7;
const int idx = vp9->ref_frame_map[ref];
vp9->frame_refs[i] = codec_vp9_get_frame_by_idx(vp9, idx);
if (!vp9->frame_refs[i])
pr_warn("%s: couldn't find VP9 ref %d\n", __func__,
idx);
}
}
static void codec_vp9_set_refs(struct amvdec_session *sess,
struct codec_vp9 *vp9)
{
struct amvdec_core *core = sess->core;
int i;
for (i = 0; i < REFS_PER_FRAME; ++i) {
struct vp9_frame *frame = vp9->frame_refs[i];
int id_y;
int id_u_v;
if (!frame)
continue;
if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit)) {
id_y = frame->index;
id_u_v = id_y;
} else {
id_y = frame->index * 2;
id_u_v = id_y + 1;
}
amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR,
(id_u_v << 16) | (id_u_v << 8) | id_y);
}
}
static void codec_vp9_set_mc(struct amvdec_session *sess,
struct codec_vp9 *vp9)
{
struct amvdec_core *core = sess->core;
u32 scale = 0;
u32 sz;
int i;
amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, 1);
codec_vp9_set_refs(sess, vp9);
amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR,
(16 << 8) | 1);
codec_vp9_set_refs(sess, vp9);
amvdec_write_dos(core, VP9D_MPP_REFINFO_TBL_ACCCONFIG, BIT(2));
for (i = 0; i < REFS_PER_FRAME; ++i) {
if (!vp9->frame_refs[i])
continue;
if (vp9->frame_refs[i]->width != vp9->width ||
vp9->frame_refs[i]->height != vp9->height)
scale = 1;
sz = amvdec_am21c_body_size(vp9->frame_refs[i]->width,
vp9->frame_refs[i]->height);
amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA,
vp9->frame_refs[i]->width);
amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA,
vp9->frame_refs[i]->height);
amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA,
(vp9->frame_refs[i]->width << 14) /
vp9->width);
amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA,
(vp9->frame_refs[i]->height << 14) /
vp9->height);
amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA, sz >> 5);
}
amvdec_write_dos(core, VP9D_MPP_REF_SCALE_ENBL, scale);
}
static struct vp9_frame *codec_vp9_get_new_frame(struct amvdec_session *sess)
{
struct codec_vp9 *vp9 = sess->priv;
union rpm_param *param = &vp9->rpm_param;
struct vb2_v4l2_buffer *vbuf;
struct vp9_frame *new_frame;
new_frame = kzalloc(sizeof(*new_frame), GFP_KERNEL);
if (!new_frame)
return NULL;
vbuf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx);
if (!vbuf) {
dev_err(sess->core->dev, "No dst buffer available\n");
kfree(new_frame);
return NULL;
}
while (codec_vp9_get_frame_by_idx(vp9, vbuf->vb2_buf.index)) {
struct vb2_v4l2_buffer *old_vbuf = vbuf;
vbuf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx);
v4l2_m2m_buf_queue(sess->m2m_ctx, old_vbuf);
if (!vbuf) {
dev_err(sess->core->dev, "No dst buffer available\n");
kfree(new_frame);
return NULL;
}
}
new_frame->vbuf = vbuf;
new_frame->index = vbuf->vb2_buf.index;
new_frame->intra_only = param->p.intra_only;
new_frame->show = param->p.show_frame;
new_frame->type = param->p.frame_type;
new_frame->width = vp9->width;
new_frame->height = vp9->height;
list_add_tail(&new_frame->list, &vp9->ref_frames_list);
vp9->frames_num++;
return new_frame;
}
static void codec_vp9_show_existing_frame(struct codec_vp9 *vp9)
{
union rpm_param *param = &vp9->rpm_param;
if (!param->p.show_existing_frame)
return;
pr_debug("showing frame %u\n", param->p.frame_to_show_idx);
}
static void codec_vp9_rm_noshow_frame(struct amvdec_session *sess)
{
struct codec_vp9 *vp9 = sess->priv;
struct vp9_frame *tmp;
list_for_each_entry(tmp, &vp9->ref_frames_list, list) {
if (tmp->show)
continue;
pr_debug("rm noshow: %u\n", tmp->index);
v4l2_m2m_buf_queue(sess->m2m_ctx, tmp->vbuf);
list_del(&tmp->list);
kfree(tmp);
vp9->frames_num--;
return;
}
}
static void codec_vp9_process_frame(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct codec_vp9 *vp9 = sess->priv;
union rpm_param *param = &vp9->rpm_param;
int intra_only;
if (!param->p.show_frame)
codec_vp9_rm_noshow_frame(sess);
vp9->cur_frame = codec_vp9_get_new_frame(sess);
if (!vp9->cur_frame)
return;
pr_debug("frame %d: type: %08X; show_exist: %u; show: %u, intra_only: %u\n",
vp9->cur_frame->index,
param->p.frame_type, param->p.show_existing_frame,
param->p.show_frame, param->p.intra_only);
if (param->p.frame_type != KEY_FRAME)
codec_vp9_sync_ref(vp9);
codec_vp9_update_next_ref(vp9);
codec_vp9_show_existing_frame(vp9);
if (codec_hevc_use_mmu(core->platform->revision, sess->pixfmt_cap,
vp9->is_10bit))
codec_hevc_fill_mmu_map(sess, &vp9->common,
&vp9->cur_frame->vbuf->vb2_buf);
intra_only = param->p.show_frame ? 0 : param->p.intra_only;
/* clear mpred (for keyframe only) */
if (param->p.frame_type != KEY_FRAME && !intra_only) {
codec_vp9_set_mc(sess, vp9);
codec_vp9_set_mpred_mv(core, vp9);
} else {
amvdec_clear_dos_bits(core, HEVC_MPRED_CTRL4, BIT(6));
}
amvdec_write_dos(core, HEVC_PARSER_PICTURE_SIZE,
(vp9->height << 16) | vp9->width);
codec_vp9_set_mcrcc(sess);
codec_vp9_set_sao(sess, &vp9->cur_frame->vbuf->vb2_buf);
vp9_loop_filter_frame_init(core, &vp9->seg_4lf,
&vp9->lfi, &vp9->lf,
vp9->default_filt_lvl);
/* ask uCode to start decoding */
amvdec_write_dos(core, VP9_DEC_STATUS_REG, VP9_10B_DECODE_SLICE);
}
static void codec_vp9_process_lf(struct codec_vp9 *vp9)
{
union rpm_param *param = &vp9->rpm_param;
int i;
vp9->lf.mode_ref_delta_enabled = param->p.mode_ref_delta_enabled;
vp9->lf.sharpness_level = param->p.sharpness_level;
vp9->default_filt_lvl = param->p.filter_level;
vp9->seg_4lf.enabled = param->p.seg_enabled;
vp9->seg_4lf.abs_delta = param->p.seg_abs_delta;
for (i = 0; i < 4; i++)
vp9->lf.ref_deltas[i] = param->p.ref_deltas[i];
for (i = 0; i < 2; i++)
vp9->lf.mode_deltas[i] = param->p.mode_deltas[i];
for (i = 0; i < MAX_SEGMENTS; i++)
vp9->seg_4lf.feature_mask[i] =
(param->p.seg_lf_info[i] & 0x8000) ?
(1 << SEG_LVL_ALT_LF) : 0;
for (i = 0; i < MAX_SEGMENTS; i++)
vp9->seg_4lf.feature_data[i][SEG_LVL_ALT_LF] =
(param->p.seg_lf_info[i] & 0x100) ?
-(param->p.seg_lf_info[i] & 0x3f)
: (param->p.seg_lf_info[i] & 0x3f);
}
static void codec_vp9_resume(struct amvdec_session *sess)
{
struct codec_vp9 *vp9 = sess->priv;
mutex_lock(&vp9->lock);
if (codec_hevc_setup_buffers(sess, &vp9->common, vp9->is_10bit)) {
mutex_unlock(&vp9->lock);
amvdec_abort(sess);
return;
}
codec_vp9_setup_workspace(sess, vp9);
codec_hevc_setup_decode_head(sess, vp9->is_10bit);
codec_vp9_process_lf(vp9);
codec_vp9_process_frame(sess);
mutex_unlock(&vp9->lock);
}
/*
* The RPM section within the workspace contains
* many information regarding the parsed bitstream
*/
static void codec_vp9_fetch_rpm(struct amvdec_session *sess)
{
struct codec_vp9 *vp9 = sess->priv;
u16 *rpm_vaddr = vp9->workspace_vaddr + RPM_OFFSET;
int i, j;
for (i = 0; i < RPM_BUF_SIZE; i += 4)
for (j = 0; j < 4; j++)
vp9->rpm_param.l.data[i + j] = rpm_vaddr[i + 3 - j];
}
static int codec_vp9_process_rpm(struct codec_vp9 *vp9)
{
union rpm_param *param = &vp9->rpm_param;
int src_changed = 0;
int is_10bit = 0;
int pic_width_64 = ALIGN(param->p.width, 64);
int pic_height_32 = ALIGN(param->p.height, 32);
int pic_width_lcu = (pic_width_64 % LCU_SIZE) ?
pic_width_64 / LCU_SIZE + 1
: pic_width_64 / LCU_SIZE;
int pic_height_lcu = (pic_height_32 % LCU_SIZE) ?
pic_height_32 / LCU_SIZE + 1
: pic_height_32 / LCU_SIZE;
vp9->lcu_total = pic_width_lcu * pic_height_lcu;
if (param->p.bit_depth == 10)
is_10bit = 1;
if (vp9->width != param->p.width || vp9->height != param->p.height ||
vp9->is_10bit != is_10bit)
src_changed = 1;
vp9->width = param->p.width;
vp9->height = param->p.height;
vp9->is_10bit = is_10bit;
pr_debug("width: %u; height: %u; is_10bit: %d; src_changed: %d\n",
vp9->width, vp9->height, is_10bit, src_changed);
return src_changed;
}
static bool codec_vp9_is_ref(struct codec_vp9 *vp9, struct vp9_frame *frame)
{
int i;
for (i = 0; i < REF_FRAMES; ++i)
if (vp9->ref_frame_map[i] == frame->index)
return true;
return false;
}
static void codec_vp9_show_frame(struct amvdec_session *sess)
{
struct codec_vp9 *vp9 = sess->priv;
struct vp9_frame *tmp, *n;
list_for_each_entry_safe(tmp, n, &vp9->ref_frames_list, list) {
if (!tmp->show || tmp == vp9->cur_frame)
continue;
if (!tmp->done) {
pr_debug("Doning %u\n", tmp->index);
amvdec_dst_buf_done(sess, tmp->vbuf, V4L2_FIELD_NONE);
tmp->done = 1;
vp9->frames_num--;
}
if (codec_vp9_is_ref(vp9, tmp) || tmp == vp9->prev_frame)
continue;
pr_debug("deleting %d\n", tmp->index);
list_del(&tmp->list);
kfree(tmp);
}
}
static void vp9_tree_merge_probs(unsigned int *prev_prob,
unsigned int *cur_prob,
int coef_node_start, int tree_left,
int tree_right,
int tree_i, int node)
{
int prob_32, prob_res, prob_shift;
int pre_prob, new_prob;
int den, m_count, get_prob, factor;
prob_32 = prev_prob[coef_node_start / 4 * 2];
prob_res = coef_node_start & 3;
prob_shift = prob_res * 8;
pre_prob = (prob_32 >> prob_shift) & 0xff;
den = tree_left + tree_right;
if (den == 0) {
new_prob = pre_prob;
} else {
m_count = min(den, MODE_MV_COUNT_SAT);
get_prob =
clip_prob(div_r32(((int64_t)tree_left * 256 +
(den >> 1)),
den));
/* weighted_prob */
factor = count_to_update_factor[m_count];
new_prob = round_power_of_two(pre_prob * (256 - factor) +
get_prob * factor, 8);
}
cur_prob[coef_node_start / 4 * 2] =
(cur_prob[coef_node_start / 4 * 2] & (~(0xff << prob_shift))) |
(new_prob << prob_shift);
}
static void adapt_coef_probs_cxt(unsigned int *prev_prob,
unsigned int *cur_prob,
unsigned int *count,
int update_factor,
int cxt_num,
int coef_cxt_start,
int coef_count_cxt_start)
{
int prob_32, prob_res, prob_shift;
int pre_prob, new_prob;
int num, den, m_count, get_prob, factor;
int node, coef_node_start;
int count_sat = 24;
int cxt;
for (cxt = 0; cxt < cxt_num; cxt++) {
const int n0 = count[coef_count_cxt_start];
const int n1 = count[coef_count_cxt_start + 1];
const int n2 = count[coef_count_cxt_start + 2];
const int neob = count[coef_count_cxt_start + 3];
const int nneob = count[coef_count_cxt_start + 4];
const unsigned int branch_ct[3][2] = {
{ neob, nneob },
{ n0, n1 + n2 },
{ n1, n2 }
};
coef_node_start = coef_cxt_start;
for (node = 0 ; node < 3 ; node++) {
prob_32 = prev_prob[coef_node_start / 4 * 2];
prob_res = coef_node_start & 3;
prob_shift = prob_res * 8;
pre_prob = (prob_32 >> prob_shift) & 0xff;
/* get binary prob */
num = branch_ct[node][0];
den = branch_ct[node][0] + branch_ct[node][1];
m_count = min(den, count_sat);
get_prob = (den == 0) ?
128u :
clip_prob(div_r32(((int64_t)num * 256 +
(den >> 1)), den));
factor = update_factor * m_count / count_sat;
new_prob =
round_power_of_two(pre_prob * (256 - factor) +
get_prob * factor, 8);
cur_prob[coef_node_start / 4 * 2] =
(cur_prob[coef_node_start / 4 * 2] &
(~(0xff << prob_shift))) |
(new_prob << prob_shift);
coef_node_start += 1;
}
coef_cxt_start = coef_cxt_start + 3;
coef_count_cxt_start = coef_count_cxt_start + 5;
}
}
static void adapt_coef_probs(int prev_kf, int cur_kf, int pre_fc,
unsigned int *prev_prob, unsigned int *cur_prob,
unsigned int *count)
{
int tx_size, coef_tx_size_start, coef_count_tx_size_start;
int plane, coef_plane_start, coef_count_plane_start;
int type, coef_type_start, coef_count_type_start;
int band, coef_band_start, coef_count_band_start;
int cxt_num;
int coef_cxt_start, coef_count_cxt_start;
int node, coef_node_start, coef_count_node_start;
int tree_i, tree_left, tree_right;
int mvd_i;
int update_factor = cur_kf ? 112 : (prev_kf ? 128 : 112);
int prob_32;
int prob_res;
int prob_shift;
int pre_prob;
int den;
int get_prob;
int m_count;
int factor;
int new_prob;
for (tx_size = 0 ; tx_size < 4 ; tx_size++) {
coef_tx_size_start = VP9_COEF_START +
tx_size * 4 * VP9_COEF_SIZE_ONE_SET;
coef_count_tx_size_start = VP9_COEF_COUNT_START +
tx_size * 4 * VP9_COEF_COUNT_SIZE_ONE_SET;
coef_plane_start = coef_tx_size_start;
coef_count_plane_start = coef_count_tx_size_start;
for (plane = 0 ; plane < 2 ; plane++) {
coef_type_start = coef_plane_start;
coef_count_type_start = coef_count_plane_start;
for (type = 0 ; type < 2 ; type++) {
coef_band_start = coef_type_start;
coef_count_band_start = coef_count_type_start;
for (band = 0 ; band < 6 ; band++) {
if (band == 0)
cxt_num = 3;
else
cxt_num = 6;
coef_cxt_start = coef_band_start;
coef_count_cxt_start =
coef_count_band_start;
adapt_coef_probs_cxt(prev_prob,
cur_prob,
count,
update_factor,
cxt_num,
coef_cxt_start,
coef_count_cxt_start);
if (band == 0) {
coef_band_start += 10;
coef_count_band_start += 15;
} else {
coef_band_start += 18;
coef_count_band_start += 30;
}
}
coef_type_start += VP9_COEF_SIZE_ONE_SET;
coef_count_type_start +=
VP9_COEF_COUNT_SIZE_ONE_SET;
}
coef_plane_start += 2 * VP9_COEF_SIZE_ONE_SET;
coef_count_plane_start +=
2 * VP9_COEF_COUNT_SIZE_ONE_SET;
}
}
if (cur_kf == 0) {
/* mode_mv_merge_probs - merge_intra_inter_prob */
for (coef_count_node_start = VP9_INTRA_INTER_COUNT_START;
coef_count_node_start < (VP9_MV_CLASS0_HP_1_COUNT_START +
VP9_MV_CLASS0_HP_1_COUNT_SIZE);
coef_count_node_start += 2) {
if (coef_count_node_start ==
VP9_INTRA_INTER_COUNT_START)
coef_node_start = VP9_INTRA_INTER_START;
else if (coef_count_node_start ==
VP9_COMP_INTER_COUNT_START)
coef_node_start = VP9_COMP_INTER_START;
else if (coef_count_node_start ==
VP9_TX_MODE_COUNT_START)
coef_node_start = VP9_TX_MODE_START;
else if (coef_count_node_start ==
VP9_SKIP_COUNT_START)
coef_node_start = VP9_SKIP_START;
else if (coef_count_node_start ==
VP9_MV_SIGN_0_COUNT_START)
coef_node_start = VP9_MV_SIGN_0_START;
else if (coef_count_node_start ==
VP9_MV_SIGN_1_COUNT_START)
coef_node_start = VP9_MV_SIGN_1_START;
else if (coef_count_node_start ==
VP9_MV_BITS_0_COUNT_START)
coef_node_start = VP9_MV_BITS_0_START;
else if (coef_count_node_start ==
VP9_MV_BITS_1_COUNT_START)
coef_node_start = VP9_MV_BITS_1_START;
else /* node_start == VP9_MV_CLASS0_HP_0_COUNT_START */
coef_node_start = VP9_MV_CLASS0_HP_0_START;
den = count[coef_count_node_start] +
count[coef_count_node_start + 1];
prob_32 = prev_prob[coef_node_start / 4 * 2];
prob_res = coef_node_start & 3;
prob_shift = prob_res * 8;
pre_prob = (prob_32 >> prob_shift) & 0xff;
if (den == 0) {
new_prob = pre_prob;
} else {
m_count = min(den, MODE_MV_COUNT_SAT);
get_prob =
clip_prob(div_r32(((int64_t)
count[coef_count_node_start] * 256 +
(den >> 1)),
den));
/* weighted prob */
factor = count_to_update_factor[m_count];
new_prob =
round_power_of_two(pre_prob *
(256 - factor) +
get_prob * factor,
8);
}
cur_prob[coef_node_start / 4 * 2] =
(cur_prob[coef_node_start / 4 * 2] &
(~(0xff << prob_shift))) |
(new_prob << prob_shift);
coef_node_start = coef_node_start + 1;
}
coef_node_start = VP9_INTER_MODE_START;
coef_count_node_start = VP9_INTER_MODE_COUNT_START;
for (tree_i = 0 ; tree_i < 7 ; tree_i++) {
for (node = 0 ; node < 3 ; node++) {
unsigned int start = coef_count_node_start;
switch (node) {
case 2:
tree_left = count[start + 1];
tree_right = count[start + 3];
break;
case 1:
tree_left = count[start + 0];
tree_right = count[start + 1] +
count[start + 3];
break;
default:
tree_left = count[start + 2];
tree_right = count[start + 0] +
count[start + 1] +
count[start + 3];
break;
}
vp9_tree_merge_probs(prev_prob, cur_prob,
coef_node_start,
tree_left, tree_right,
tree_i, node);
coef_node_start = coef_node_start + 1;
}
coef_count_node_start = coef_count_node_start + 4;
}
coef_node_start = VP9_IF_Y_MODE_START;
coef_count_node_start = VP9_IF_Y_MODE_COUNT_START;
for (tree_i = 0 ; tree_i < 14 ; tree_i++) {
for (node = 0 ; node < 9 ; node++) {
unsigned int start = coef_count_node_start;
switch (node) {
case 8:
tree_left =
count[start + D153_PRED];
tree_right =
count[start + D207_PRED];
break;
case 7:
tree_left =
count[start + D63_PRED];
tree_right =
count[start + D207_PRED] +
count[start + D153_PRED];
break;
case 6:
tree_left =
count[start + D45_PRED];
tree_right =
count[start + D207_PRED] +
count[start + D153_PRED] +
count[start + D63_PRED];
break;
case 5:
tree_left =
count[start + D135_PRED];
tree_right =
count[start + D117_PRED];
break;
case 4:
tree_left =
count[start + H_PRED];
tree_right =
count[start + D117_PRED] +
count[start + D135_PRED];
break;
case 3:
tree_left =
count[start + H_PRED] +
count[start + D117_PRED] +
count[start + D135_PRED];
tree_right =
count[start + D45_PRED] +
count[start + D207_PRED] +
count[start + D153_PRED] +
count[start + D63_PRED];
break;
case 2:
tree_left =
count[start + V_PRED];
tree_right =
count[start + H_PRED] +
count[start + D117_PRED] +
count[start + D135_PRED] +
count[start + D45_PRED] +
count[start + D207_PRED] +
count[start + D153_PRED] +
count[start + D63_PRED];
break;
case 1:
tree_left =
count[start + TM_PRED];
tree_right =
count[start + V_PRED] +
count[start + H_PRED] +
count[start + D117_PRED] +
count[start + D135_PRED] +
count[start + D45_PRED] +
count[start + D207_PRED] +
count[start + D153_PRED] +
count[start + D63_PRED];
break;
default:
tree_left =
count[start + DC_PRED];
tree_right =
count[start + TM_PRED] +
count[start + V_PRED] +
count[start + H_PRED] +
count[start + D117_PRED] +
count[start + D135_PRED] +
count[start + D45_PRED] +
count[start + D207_PRED] +
count[start + D153_PRED] +
count[start + D63_PRED];
break;
}
vp9_tree_merge_probs(prev_prob, cur_prob,
coef_node_start,
tree_left, tree_right,
tree_i, node);
coef_node_start = coef_node_start + 1;
}
coef_count_node_start = coef_count_node_start + 10;
}
coef_node_start = VP9_PARTITION_P_START;
coef_count_node_start = VP9_PARTITION_P_COUNT_START;
for (tree_i = 0 ; tree_i < 16 ; tree_i++) {
for (node = 0 ; node < 3 ; node++) {
unsigned int start = coef_count_node_start;
switch (node) {
case 2:
tree_left = count[start + 2];
tree_right = count[start + 3];
break;
case 1:
tree_left = count[start + 1];
tree_right = count[start + 2] +
count[start + 3];
break;
default:
tree_left = count[start + 0];
tree_right = count[start + 1] +
count[start + 2] +
count[start + 3];
break;
}
vp9_tree_merge_probs(prev_prob, cur_prob,
coef_node_start,
tree_left, tree_right,
tree_i, node);
coef_node_start = coef_node_start + 1;
}
coef_count_node_start = coef_count_node_start + 4;
}
coef_node_start = VP9_INTERP_START;
coef_count_node_start = VP9_INTERP_COUNT_START;
for (tree_i = 0 ; tree_i < 4 ; tree_i++) {
for (node = 0 ; node < 2 ; node++) {
unsigned int start = coef_count_node_start;
switch (node) {
case 1:
tree_left = count[start + 1];
tree_right = count[start + 2];
break;
default:
tree_left = count[start + 0];
tree_right = count[start + 1] +
count[start + 2];
break;
}
vp9_tree_merge_probs(prev_prob, cur_prob,
coef_node_start,
tree_left, tree_right,
tree_i, node);
coef_node_start = coef_node_start + 1;
}
coef_count_node_start = coef_count_node_start + 3;
}
coef_node_start = VP9_MV_JOINTS_START;
coef_count_node_start = VP9_MV_JOINTS_COUNT_START;
for (tree_i = 0 ; tree_i < 1 ; tree_i++) {
for (node = 0 ; node < 3 ; node++) {
unsigned int start = coef_count_node_start;
switch (node) {
case 2:
tree_left = count[start + 2];
tree_right = count[start + 3];
break;
case 1:
tree_left = count[start + 1];
tree_right = count[start + 2] +
count[start + 3];
break;
default:
tree_left = count[start + 0];
tree_right = count[start + 1] +
count[start + 2] +
count[start + 3];
break;
}
vp9_tree_merge_probs(prev_prob, cur_prob,
coef_node_start,
tree_left, tree_right,
tree_i, node);
coef_node_start = coef_node_start + 1;
}
coef_count_node_start = coef_count_node_start + 4;
}
for (mvd_i = 0 ; mvd_i < 2 ; mvd_i++) {
coef_node_start = mvd_i ? VP9_MV_CLASSES_1_START :
VP9_MV_CLASSES_0_START;
coef_count_node_start = mvd_i ?
VP9_MV_CLASSES_1_COUNT_START :
VP9_MV_CLASSES_0_COUNT_START;
tree_i = 0;
for (node = 0; node < 10; node++) {
unsigned int start = coef_count_node_start;
switch (node) {
case 9:
tree_left = count[start + 9];
tree_right = count[start + 10];
break;
case 8:
tree_left = count[start + 7];
tree_right = count[start + 8];
break;
case 7:
tree_left = count[start + 7] +
count[start + 8];
tree_right = count[start + 9] +
count[start + 10];
break;
case 6:
tree_left = count[start + 6];
tree_right = count[start + 7] +
count[start + 8] +
count[start + 9] +
count[start + 10];
break;
case 5:
tree_left = count[start + 4];
tree_right = count[start + 5];
break;
case 4:
tree_left = count[start + 4] +
count[start + 5];
tree_right = count[start + 6] +
count[start + 7] +
count[start + 8] +
count[start + 9] +
count[start + 10];
break;
case 3:
tree_left = count[start + 2];
tree_right = count[start + 3];
break;
case 2:
tree_left = count[start + 2] +
count[start + 3];
tree_right = count[start + 4] +
count[start + 5] +
count[start + 6] +
count[start + 7] +
count[start + 8] +
count[start + 9] +
count[start + 10];
break;
case 1:
tree_left = count[start + 1];
tree_right = count[start + 2] +
count[start + 3] +
count[start + 4] +
count[start + 5] +
count[start + 6] +
count[start + 7] +
count[start + 8] +
count[start + 9] +
count[start + 10];
break;
default:
tree_left = count[start + 0];
tree_right = count[start + 1] +
count[start + 2] +
count[start + 3] +
count[start + 4] +
count[start + 5] +
count[start + 6] +
count[start + 7] +
count[start + 8] +
count[start + 9] +
count[start + 10];
break;
}
vp9_tree_merge_probs(prev_prob, cur_prob,
coef_node_start,
tree_left, tree_right,
tree_i, node);
coef_node_start = coef_node_start + 1;
}
coef_node_start = mvd_i ? VP9_MV_CLASS0_1_START :
VP9_MV_CLASS0_0_START;
coef_count_node_start = mvd_i ?
VP9_MV_CLASS0_1_COUNT_START :
VP9_MV_CLASS0_0_COUNT_START;
tree_i = 0;
node = 0;
tree_left = count[coef_count_node_start + 0];
tree_right = count[coef_count_node_start + 1];
vp9_tree_merge_probs(prev_prob, cur_prob,
coef_node_start,
tree_left, tree_right,
tree_i, node);
coef_node_start = mvd_i ? VP9_MV_CLASS0_FP_1_START :
VP9_MV_CLASS0_FP_0_START;
coef_count_node_start = mvd_i ?
VP9_MV_CLASS0_FP_1_COUNT_START :
VP9_MV_CLASS0_FP_0_COUNT_START;
for (tree_i = 0; tree_i < 3; tree_i++) {
for (node = 0; node < 3; node++) {
unsigned int start =
coef_count_node_start;
switch (node) {
case 2:
tree_left = count[start + 2];
tree_right = count[start + 3];
break;
case 1:
tree_left = count[start + 1];
tree_right = count[start + 2] +
count[start + 3];
break;
default:
tree_left = count[start + 0];
tree_right = count[start + 1] +
count[start + 2] +
count[start + 3];
break;
}
vp9_tree_merge_probs(prev_prob,
cur_prob,
coef_node_start,
tree_left,
tree_right,
tree_i, node);
coef_node_start = coef_node_start + 1;
}
coef_count_node_start =
coef_count_node_start + 4;
}
}
}
}
static irqreturn_t codec_vp9_threaded_isr(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct codec_vp9 *vp9 = sess->priv;
u32 dec_status = amvdec_read_dos(core, VP9_DEC_STATUS_REG);
u32 prob_status = amvdec_read_dos(core, VP9_ADAPT_PROB_REG);
int i;
if (!vp9)
return IRQ_HANDLED;
mutex_lock(&vp9->lock);
if (dec_status != VP9_HEAD_PARSER_DONE) {
dev_err(core->dev_dec, "Unrecognized dec_status: %08X\n",
dec_status);
amvdec_abort(sess);
goto unlock;
}
pr_debug("ISR: %08X;%08X\n", dec_status, prob_status);
sess->keyframe_found = 1;
if ((prob_status & 0xff) == 0xfd && vp9->cur_frame) {
/* VP9_REQ_ADAPT_PROB */
u8 *prev_prob_b = ((u8 *)vp9->workspace_vaddr +
PROB_OFFSET) +
((prob_status >> 8) * 0x1000);
u8 *cur_prob_b = ((u8 *)vp9->workspace_vaddr +
PROB_OFFSET) + 0x4000;
u8 *count_b = (u8 *)vp9->workspace_vaddr +
COUNT_OFFSET;
int last_frame_type = vp9->prev_frame ?
vp9->prev_frame->type :
KEY_FRAME;
adapt_coef_probs(last_frame_type == KEY_FRAME,
vp9->cur_frame->type == KEY_FRAME ? 1 : 0,
prob_status >> 8,
(unsigned int *)prev_prob_b,
(unsigned int *)cur_prob_b,
(unsigned int *)count_b);
memcpy(prev_prob_b, cur_prob_b, ADAPT_PROB_SIZE);
amvdec_write_dos(core, VP9_ADAPT_PROB_REG, 0);
}
/* Invalidate first 3 refs */
for (i = 0; i < REFS_PER_FRAME ; ++i)
vp9->frame_refs[i] = NULL;
vp9->prev_frame = vp9->cur_frame;
codec_vp9_update_ref(vp9);
codec_vp9_fetch_rpm(sess);
if (codec_vp9_process_rpm(vp9)) {
amvdec_src_change(sess, vp9->width, vp9->height, 16);
/* No frame is actually processed */
vp9->cur_frame = NULL;
/* Show the remaining frame */
codec_vp9_show_frame(sess);
/* FIXME: Save refs for resized frame */
if (vp9->frames_num)
codec_vp9_save_refs(vp9);
goto unlock;
}
codec_vp9_process_lf(vp9);
codec_vp9_process_frame(sess);
codec_vp9_show_frame(sess);
unlock:
mutex_unlock(&vp9->lock);
return IRQ_HANDLED;
}
static irqreturn_t codec_vp9_isr(struct amvdec_session *sess)
{
return IRQ_WAKE_THREAD;
}
struct amvdec_codec_ops codec_vp9_ops = {
.start = codec_vp9_start,
.stop = codec_vp9_stop,
.isr = codec_vp9_isr,
.threaded_isr = codec_vp9_threaded_isr,
.num_pending_bufs = codec_vp9_num_pending_bufs,
.drain = codec_vp9_flush_output,
.resume = codec_vp9_resume,
};
| linux-master | drivers/staging/media/meson/vdec/codec_vp9.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 Maxime Jourdan <[email protected]>
*
* VDEC_HEVC is a video decoding block that allows decoding of
* HEVC, VP9
*/
#include <linux/firmware.h>
#include <linux/clk.h>
#include "vdec_1.h"
#include "vdec_helpers.h"
#include "vdec_hevc.h"
#include "hevc_regs.h"
#include "dos_regs.h"
/* AO Registers */
#define AO_RTI_GEN_PWR_SLEEP0 0xe8
#define AO_RTI_GEN_PWR_ISO0 0xec
#define GEN_PWR_VDEC_HEVC (BIT(7) | BIT(6))
#define GEN_PWR_VDEC_HEVC_SM1 (BIT(2))
#define MC_SIZE (4096 * 4)
static int vdec_hevc_load_firmware(struct amvdec_session *sess,
const char *fwname)
{
struct amvdec_core *core = sess->core;
struct device *dev = core->dev_dec;
const struct firmware *fw;
static void *mc_addr;
static dma_addr_t mc_addr_map;
int ret;
u32 i = 100;
ret = request_firmware(&fw, fwname, dev);
if (ret < 0) {
dev_err(dev, "Unable to request firmware %s\n", fwname);
return ret;
}
if (fw->size < MC_SIZE) {
dev_err(dev, "Firmware size %zu is too small. Expected %u.\n",
fw->size, MC_SIZE);
ret = -EINVAL;
goto release_firmware;
}
mc_addr = dma_alloc_coherent(core->dev, MC_SIZE, &mc_addr_map,
GFP_KERNEL);
if (!mc_addr) {
ret = -ENOMEM;
goto release_firmware;
}
memcpy(mc_addr, fw->data, MC_SIZE);
amvdec_write_dos(core, HEVC_MPSR, 0);
amvdec_write_dos(core, HEVC_CPSR, 0);
amvdec_write_dos(core, HEVC_IMEM_DMA_ADR, mc_addr_map);
amvdec_write_dos(core, HEVC_IMEM_DMA_COUNT, MC_SIZE / 4);
amvdec_write_dos(core, HEVC_IMEM_DMA_CTRL, (0x8000 | (7 << 16)));
while (i && (readl(core->dos_base + HEVC_IMEM_DMA_CTRL) & 0x8000))
i--;
if (i == 0) {
dev_err(dev, "Firmware load fail (DMA hang?)\n");
ret = -ENODEV;
}
dma_free_coherent(core->dev, MC_SIZE, mc_addr, mc_addr_map);
release_firmware:
release_firmware(fw);
return ret;
}
static void vdec_hevc_stbuf_init(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
amvdec_write_dos(core, HEVC_STREAM_CONTROL,
amvdec_read_dos(core, HEVC_STREAM_CONTROL) & ~1);
amvdec_write_dos(core, HEVC_STREAM_START_ADDR, sess->vififo_paddr);
amvdec_write_dos(core, HEVC_STREAM_END_ADDR,
sess->vififo_paddr + sess->vififo_size);
amvdec_write_dos(core, HEVC_STREAM_RD_PTR, sess->vififo_paddr);
amvdec_write_dos(core, HEVC_STREAM_WR_PTR, sess->vififo_paddr);
}
/* VDEC_HEVC specific ESPARSER configuration */
static void vdec_hevc_conf_esparser(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
/* set vififo_vbuf_rp_sel=>vdec_hevc */
amvdec_write_dos(core, DOS_GEN_CTRL0, 3 << 1);
amvdec_write_dos(core, HEVC_STREAM_CONTROL,
amvdec_read_dos(core, HEVC_STREAM_CONTROL) | BIT(3));
amvdec_write_dos(core, HEVC_STREAM_CONTROL,
amvdec_read_dos(core, HEVC_STREAM_CONTROL) | 1);
amvdec_write_dos(core, HEVC_STREAM_FIFO_CTL,
amvdec_read_dos(core, HEVC_STREAM_FIFO_CTL) | BIT(29));
}
static u32 vdec_hevc_vififo_level(struct amvdec_session *sess)
{
return readl_relaxed(sess->core->dos_base + HEVC_STREAM_LEVEL);
}
static int vdec_hevc_stop(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
/* Disable interrupt */
amvdec_write_dos(core, HEVC_ASSIST_MBOX1_MASK, 0);
/* Disable firmware processor */
amvdec_write_dos(core, HEVC_MPSR, 0);
if (sess->priv)
codec_ops->stop(sess);
/* Enable VDEC_HEVC Isolation */
if (core->platform->revision == VDEC_REVISION_SM1)
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
GEN_PWR_VDEC_HEVC_SM1,
GEN_PWR_VDEC_HEVC_SM1);
else
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
0xc00, 0xc00);
/* VDEC_HEVC Memories */
amvdec_write_dos(core, DOS_MEM_PD_HEVC, 0xffffffffUL);
if (core->platform->revision == VDEC_REVISION_SM1)
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VDEC_HEVC_SM1,
GEN_PWR_VDEC_HEVC_SM1);
else
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VDEC_HEVC, GEN_PWR_VDEC_HEVC);
clk_disable_unprepare(core->vdec_hevc_clk);
if (core->platform->revision == VDEC_REVISION_G12A ||
core->platform->revision == VDEC_REVISION_SM1)
clk_disable_unprepare(core->vdec_hevcf_clk);
return 0;
}
static int vdec_hevc_start(struct amvdec_session *sess)
{
int ret;
struct amvdec_core *core = sess->core;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
if (core->platform->revision == VDEC_REVISION_G12A ||
core->platform->revision == VDEC_REVISION_SM1) {
clk_set_rate(core->vdec_hevcf_clk, 666666666);
ret = clk_prepare_enable(core->vdec_hevcf_clk);
if (ret)
return ret;
}
clk_set_rate(core->vdec_hevc_clk, 666666666);
ret = clk_prepare_enable(core->vdec_hevc_clk);
if (ret) {
if (core->platform->revision == VDEC_REVISION_G12A ||
core->platform->revision == VDEC_REVISION_SM1)
clk_disable_unprepare(core->vdec_hevcf_clk);
return ret;
}
if (core->platform->revision == VDEC_REVISION_SM1)
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VDEC_HEVC_SM1, 0);
else
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VDEC_HEVC, 0);
usleep_range(10, 20);
/* Reset VDEC_HEVC*/
amvdec_write_dos(core, DOS_SW_RESET3, 0xffffffff);
amvdec_write_dos(core, DOS_SW_RESET3, 0x00000000);
amvdec_write_dos(core, DOS_GCLK_EN3, 0xffffffff);
/* VDEC_HEVC Memories */
amvdec_write_dos(core, DOS_MEM_PD_HEVC, 0x00000000);
/* Remove VDEC_HEVC Isolation */
if (core->platform->revision == VDEC_REVISION_SM1)
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
GEN_PWR_VDEC_HEVC_SM1, 0);
else
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
0xc00, 0);
amvdec_write_dos(core, DOS_SW_RESET3, 0xffffffff);
amvdec_write_dos(core, DOS_SW_RESET3, 0x00000000);
vdec_hevc_stbuf_init(sess);
ret = vdec_hevc_load_firmware(sess, sess->fmt_out->firmware_path);
if (ret)
goto stop;
ret = codec_ops->start(sess);
if (ret)
goto stop;
amvdec_write_dos(core, DOS_SW_RESET3, BIT(12) | BIT(11));
amvdec_write_dos(core, DOS_SW_RESET3, 0);
amvdec_read_dos(core, DOS_SW_RESET3);
amvdec_write_dos(core, HEVC_MPSR, 1);
/* Let the firmware settle */
usleep_range(10, 20);
return 0;
stop:
vdec_hevc_stop(sess);
return ret;
}
struct amvdec_ops vdec_hevc_ops = {
.start = vdec_hevc_start,
.stop = vdec_hevc_stop,
.conf_esparser = vdec_hevc_conf_esparser,
.vififo_level = vdec_hevc_vififo_level,
};
| linux-master | drivers/staging/media/meson/vdec/vdec_hevc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 BayLibre, SAS
* Author: Maxime Jourdan <[email protected]>
*/
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-dev.h>
#include <media/videobuf2-dma-contig.h>
#include "vdec.h"
#include "esparser.h"
#include "vdec_helpers.h"
struct dummy_buf {
struct vb2_v4l2_buffer vb;
struct list_head list;
};
/* 16 MiB for parsed bitstream swap exchange */
#define SIZE_VIFIFO SZ_16M
static u32 get_output_size(u32 width, u32 height)
{
return ALIGN(width * height, SZ_64K);
}
u32 amvdec_get_output_size(struct amvdec_session *sess)
{
return get_output_size(sess->width, sess->height);
}
EXPORT_SYMBOL_GPL(amvdec_get_output_size);
static int vdec_codec_needs_recycle(struct amvdec_session *sess)
{
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
return codec_ops->can_recycle && codec_ops->recycle;
}
static int vdec_recycle_thread(void *data)
{
struct amvdec_session *sess = data;
struct amvdec_core *core = sess->core;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
struct amvdec_buffer *tmp, *n;
while (!kthread_should_stop()) {
mutex_lock(&sess->bufs_recycle_lock);
list_for_each_entry_safe(tmp, n, &sess->bufs_recycle, list) {
if (!codec_ops->can_recycle(core))
break;
codec_ops->recycle(core, tmp->vb->index);
list_del(&tmp->list);
kfree(tmp);
}
mutex_unlock(&sess->bufs_recycle_lock);
usleep_range(5000, 10000);
}
return 0;
}
static int vdec_poweron(struct amvdec_session *sess)
{
int ret;
struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
ret = clk_prepare_enable(sess->core->dos_parser_clk);
if (ret)
return ret;
ret = clk_prepare_enable(sess->core->dos_clk);
if (ret)
goto disable_dos_parser;
ret = vdec_ops->start(sess);
if (ret)
goto disable_dos;
esparser_power_up(sess);
return 0;
disable_dos:
clk_disable_unprepare(sess->core->dos_clk);
disable_dos_parser:
clk_disable_unprepare(sess->core->dos_parser_clk);
return ret;
}
static void vdec_wait_inactive(struct amvdec_session *sess)
{
/* We consider 50ms with no IRQ to be inactive. */
while (time_is_after_jiffies64(sess->last_irq_jiffies +
msecs_to_jiffies(50)))
msleep(25);
}
static void vdec_poweroff(struct amvdec_session *sess)
{
struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
sess->should_stop = 1;
vdec_wait_inactive(sess);
if (codec_ops->drain)
codec_ops->drain(sess);
vdec_ops->stop(sess);
clk_disable_unprepare(sess->core->dos_clk);
clk_disable_unprepare(sess->core->dos_parser_clk);
}
static void
vdec_queue_recycle(struct amvdec_session *sess, struct vb2_buffer *vb)
{
struct amvdec_buffer *new_buf;
new_buf = kmalloc(sizeof(*new_buf), GFP_KERNEL);
if (!new_buf)
return;
new_buf->vb = vb;
mutex_lock(&sess->bufs_recycle_lock);
list_add_tail(&new_buf->list, &sess->bufs_recycle);
mutex_unlock(&sess->bufs_recycle_lock);
}
static void vdec_m2m_device_run(void *priv)
{
struct amvdec_session *sess = priv;
schedule_work(&sess->esparser_queue_work);
}
static void vdec_m2m_job_abort(void *priv)
{
struct amvdec_session *sess = priv;
v4l2_m2m_job_finish(sess->m2m_dev, sess->m2m_ctx);
}
static const struct v4l2_m2m_ops vdec_m2m_ops = {
.device_run = vdec_m2m_device_run,
.job_abort = vdec_m2m_job_abort,
};
static void process_num_buffers(struct vb2_queue *q,
struct amvdec_session *sess,
unsigned int *num_buffers,
bool is_reqbufs)
{
const struct amvdec_format *fmt_out = sess->fmt_out;
unsigned int buffers_total = q->num_buffers + *num_buffers;
u32 min_buf_capture = v4l2_ctrl_g_ctrl(sess->ctrl_min_buf_capture);
if (q->num_buffers + *num_buffers < min_buf_capture)
*num_buffers = min_buf_capture - q->num_buffers;
if (is_reqbufs && buffers_total < fmt_out->min_buffers)
*num_buffers = fmt_out->min_buffers - q->num_buffers;
if (buffers_total > fmt_out->max_buffers)
*num_buffers = fmt_out->max_buffers - q->num_buffers;
/* We need to program the complete CAPTURE buffer list
* in registers during start_streaming, and the firmwares
* are free to choose any of them to write frames to. As such,
* we need all of them to be queued into the driver
*/
sess->num_dst_bufs = q->num_buffers + *num_buffers;
q->min_buffers_needed = max(fmt_out->min_buffers, sess->num_dst_bufs);
}
static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
unsigned int *num_planes, unsigned int sizes[],
struct device *alloc_devs[])
{
struct amvdec_session *sess = vb2_get_drv_priv(q);
u32 output_size = amvdec_get_output_size(sess);
if (*num_planes) {
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
if (*num_planes != 1 ||
sizes[0] < sess->src_buffer_size)
return -EINVAL;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
switch (sess->pixfmt_cap) {
case V4L2_PIX_FMT_NV12M:
if (*num_planes != 2 ||
sizes[0] < output_size ||
sizes[1] < output_size / 2)
return -EINVAL;
break;
case V4L2_PIX_FMT_YUV420M:
if (*num_planes != 3 ||
sizes[0] < output_size ||
sizes[1] < output_size / 4 ||
sizes[2] < output_size / 4)
return -EINVAL;
break;
default:
return -EINVAL;
}
process_num_buffers(q, sess, num_buffers, false);
break;
}
return 0;
}
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
sizes[0] = sess->src_buffer_size;
*num_planes = 1;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
switch (sess->pixfmt_cap) {
case V4L2_PIX_FMT_NV12M:
sizes[0] = output_size;
sizes[1] = output_size / 2;
*num_planes = 2;
break;
case V4L2_PIX_FMT_YUV420M:
sizes[0] = output_size;
sizes[1] = output_size / 4;
sizes[2] = output_size / 4;
*num_planes = 3;
break;
default:
return -EINVAL;
}
process_num_buffers(q, sess, num_buffers, true);
break;
default:
return -EINVAL;
}
sess->changed_format = 1;
return 0;
}
static void vdec_vb2_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct amvdec_session *sess = vb2_get_drv_priv(vb->vb2_queue);
struct v4l2_m2m_ctx *m2m_ctx = sess->m2m_ctx;
v4l2_m2m_buf_queue(m2m_ctx, vbuf);
if (!sess->streamon_out)
return;
if (sess->streamon_cap &&
vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
vdec_codec_needs_recycle(sess))
vdec_queue_recycle(sess, vb);
schedule_work(&sess->esparser_queue_work);
}
static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct amvdec_session *sess = vb2_get_drv_priv(q);
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
struct amvdec_core *core = sess->core;
struct vb2_v4l2_buffer *buf;
int ret;
if (core->cur_sess && core->cur_sess != sess) {
ret = -EBUSY;
goto bufs_done;
}
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
sess->streamon_out = 1;
else
sess->streamon_cap = 1;
if (!sess->streamon_out)
return 0;
if (sess->status == STATUS_NEEDS_RESUME &&
q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
sess->changed_format) {
codec_ops->resume(sess);
sess->status = STATUS_RUNNING;
return 0;
}
if (sess->status == STATUS_RUNNING ||
sess->status == STATUS_NEEDS_RESUME ||
sess->status == STATUS_INIT)
return 0;
sess->vififo_size = SIZE_VIFIFO;
sess->vififo_vaddr =
dma_alloc_coherent(sess->core->dev, sess->vififo_size,
&sess->vififo_paddr, GFP_KERNEL);
if (!sess->vififo_vaddr) {
dev_err(sess->core->dev, "Failed to request VIFIFO buffer\n");
ret = -ENOMEM;
goto bufs_done;
}
sess->should_stop = 0;
sess->keyframe_found = 0;
sess->last_offset = 0;
sess->wrap_count = 0;
sess->pixelaspect.numerator = 1;
sess->pixelaspect.denominator = 1;
atomic_set(&sess->esparser_queued_bufs, 0);
v4l2_ctrl_s_ctrl(sess->ctrl_min_buf_capture, 1);
ret = vdec_poweron(sess);
if (ret)
goto vififo_free;
sess->sequence_cap = 0;
sess->sequence_out = 0;
if (vdec_codec_needs_recycle(sess))
sess->recycle_thread = kthread_run(vdec_recycle_thread, sess,
"vdec_recycle");
sess->status = STATUS_INIT;
core->cur_sess = sess;
schedule_work(&sess->esparser_queue_work);
return 0;
vififo_free:
dma_free_coherent(sess->core->dev, sess->vififo_size,
sess->vififo_vaddr, sess->vififo_paddr);
bufs_done:
while ((buf = v4l2_m2m_src_buf_remove(sess->m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
while ((buf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
sess->streamon_out = 0;
else
sess->streamon_cap = 0;
return ret;
}
static void vdec_free_canvas(struct amvdec_session *sess)
{
int i;
for (i = 0; i < sess->canvas_num; ++i)
meson_canvas_free(sess->core->canvas, sess->canvas_alloc[i]);
sess->canvas_num = 0;
}
static void vdec_reset_timestamps(struct amvdec_session *sess)
{
struct amvdec_timestamp *tmp, *n;
list_for_each_entry_safe(tmp, n, &sess->timestamps, list) {
list_del(&tmp->list);
kfree(tmp);
}
}
static void vdec_reset_bufs_recycle(struct amvdec_session *sess)
{
struct amvdec_buffer *tmp, *n;
list_for_each_entry_safe(tmp, n, &sess->bufs_recycle, list) {
list_del(&tmp->list);
kfree(tmp);
}
}
static void vdec_stop_streaming(struct vb2_queue *q)
{
struct amvdec_session *sess = vb2_get_drv_priv(q);
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
struct amvdec_core *core = sess->core;
struct vb2_v4l2_buffer *buf;
if (sess->status == STATUS_RUNNING ||
sess->status == STATUS_INIT ||
(sess->status == STATUS_NEEDS_RESUME &&
(!sess->streamon_out || !sess->streamon_cap))) {
if (vdec_codec_needs_recycle(sess))
kthread_stop(sess->recycle_thread);
vdec_poweroff(sess);
vdec_free_canvas(sess);
dma_free_coherent(sess->core->dev, sess->vififo_size,
sess->vififo_vaddr, sess->vififo_paddr);
vdec_reset_timestamps(sess);
vdec_reset_bufs_recycle(sess);
kfree(sess->priv);
sess->priv = NULL;
core->cur_sess = NULL;
sess->status = STATUS_STOPPED;
}
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
while ((buf = v4l2_m2m_src_buf_remove(sess->m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
sess->streamon_out = 0;
} else {
/* Drain remaining refs if was still running */
if (sess->status >= STATUS_RUNNING && codec_ops->drain)
codec_ops->drain(sess);
while ((buf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
sess->streamon_cap = 0;
}
}
static int vdec_vb2_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
vbuf->field = V4L2_FIELD_NONE;
return 0;
}
static const struct vb2_ops vdec_vb2_ops = {
.queue_setup = vdec_queue_setup,
.start_streaming = vdec_start_streaming,
.stop_streaming = vdec_stop_streaming,
.buf_queue = vdec_vb2_buf_queue,
.buf_prepare = vdec_vb2_buf_prepare,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
static int
vdec_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
strscpy(cap->driver, "meson-vdec", sizeof(cap->driver));
strscpy(cap->card, "Amlogic Video Decoder", sizeof(cap->card));
strscpy(cap->bus_info, "platform:meson-vdec", sizeof(cap->bus_info));
return 0;
}
static const struct amvdec_format *
find_format(const struct amvdec_format *fmts, u32 size, u32 pixfmt)
{
unsigned int i;
for (i = 0; i < size; i++) {
if (fmts[i].pixfmt == pixfmt)
return &fmts[i];
}
return NULL;
}
static unsigned int
vdec_supports_pixfmt_cap(const struct amvdec_format *fmt_out, u32 pixfmt_cap)
{
int i;
for (i = 0; fmt_out->pixfmts_cap[i]; i++)
if (fmt_out->pixfmts_cap[i] == pixfmt_cap)
return 1;
return 0;
}
static const struct amvdec_format *
vdec_try_fmt_common(struct amvdec_session *sess, u32 size,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
const struct amvdec_format *fmts = sess->core->platform->formats;
const struct amvdec_format *fmt_out = NULL;
u32 output_size = 0;
memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
fmt_out = find_format(fmts, size, pixmp->pixelformat);
if (!fmt_out) {
pixmp->pixelformat = V4L2_PIX_FMT_MPEG2;
fmt_out = find_format(fmts, size, pixmp->pixelformat);
}
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
fmt_out = sess->fmt_out;
break;
default:
return NULL;
}
pixmp->width = clamp(pixmp->width, (u32)256, fmt_out->max_width);
pixmp->height = clamp(pixmp->height, (u32)144, fmt_out->max_height);
output_size = get_output_size(pixmp->width, pixmp->height);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (!pfmt[0].sizeimage)
pfmt[0].sizeimage = sess->src_buffer_size;
pfmt[0].bytesperline = 0;
pixmp->num_planes = 1;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
fmt_out = sess->fmt_out;
if (!vdec_supports_pixfmt_cap(fmt_out, pixmp->pixelformat))
pixmp->pixelformat = fmt_out->pixfmts_cap[0];
memset(pfmt[1].reserved, 0, sizeof(pfmt[1].reserved));
if (pixmp->pixelformat == V4L2_PIX_FMT_NV12M) {
pfmt[0].sizeimage = output_size;
pfmt[0].bytesperline = ALIGN(pixmp->width, 32);
pfmt[1].sizeimage = output_size / 2;
pfmt[1].bytesperline = ALIGN(pixmp->width, 32);
pixmp->num_planes = 2;
} else if (pixmp->pixelformat == V4L2_PIX_FMT_YUV420M) {
pfmt[0].sizeimage = output_size;
pfmt[0].bytesperline = ALIGN(pixmp->width, 32);
pfmt[1].sizeimage = output_size / 4;
pfmt[1].bytesperline = ALIGN(pixmp->width, 32) / 2;
pfmt[2].sizeimage = output_size / 2;
pfmt[2].bytesperline = ALIGN(pixmp->width, 32) / 2;
pixmp->num_planes = 3;
}
}
if (pixmp->field == V4L2_FIELD_ANY)
pixmp->field = V4L2_FIELD_NONE;
return fmt_out;
}
static int vdec_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct amvdec_session *sess =
container_of(file->private_data, struct amvdec_session, fh);
vdec_try_fmt_common(sess, sess->core->platform->num_formats, f);
return 0;
}
static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct amvdec_session *sess =
container_of(file->private_data, struct amvdec_session, fh);
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
pixmp->pixelformat = sess->pixfmt_cap;
else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
pixmp->pixelformat = sess->fmt_out->pixfmt;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
pixmp->width = sess->width;
pixmp->height = sess->height;
pixmp->colorspace = sess->colorspace;
pixmp->ycbcr_enc = sess->ycbcr_enc;
pixmp->quantization = sess->quantization;
pixmp->xfer_func = sess->xfer_func;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
pixmp->width = sess->width;
pixmp->height = sess->height;
}
vdec_try_fmt_common(sess, sess->core->platform->num_formats, f);
return 0;
}
static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct amvdec_session *sess =
container_of(file->private_data, struct amvdec_session, fh);
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
u32 num_formats = sess->core->platform->num_formats;
const struct amvdec_format *fmt_out;
struct v4l2_pix_format_mplane orig_pixmp;
struct v4l2_format format;
u32 pixfmt_out = 0, pixfmt_cap = 0;
orig_pixmp = *pixmp;
fmt_out = vdec_try_fmt_common(sess, num_formats, f);
if (!fmt_out)
return -EINVAL;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
pixfmt_out = pixmp->pixelformat;
pixfmt_cap = sess->pixfmt_cap;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
pixfmt_cap = pixmp->pixelformat;
pixfmt_out = sess->fmt_out->pixfmt;
}
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
format.fmt.pix_mp.pixelformat = pixfmt_out;
format.fmt.pix_mp.width = orig_pixmp.width;
format.fmt.pix_mp.height = orig_pixmp.height;
vdec_try_fmt_common(sess, num_formats, &format);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
sess->width = format.fmt.pix_mp.width;
sess->height = format.fmt.pix_mp.height;
sess->colorspace = pixmp->colorspace;
sess->ycbcr_enc = pixmp->ycbcr_enc;
sess->quantization = pixmp->quantization;
sess->xfer_func = pixmp->xfer_func;
sess->src_buffer_size = pixmp->plane_fmt[0].sizeimage;
}
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
format.fmt.pix_mp.pixelformat = pixfmt_cap;
format.fmt.pix_mp.width = orig_pixmp.width;
format.fmt.pix_mp.height = orig_pixmp.height;
vdec_try_fmt_common(sess, num_formats, &format);
sess->width = format.fmt.pix_mp.width;
sess->height = format.fmt.pix_mp.height;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
sess->fmt_out = fmt_out;
else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
sess->pixfmt_cap = format.fmt.pix_mp.pixelformat;
return 0;
}
static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
struct amvdec_session *sess =
container_of(file->private_data, struct amvdec_session, fh);
const struct vdec_platform *platform = sess->core->platform;
const struct amvdec_format *fmt_out;
memset(f->reserved, 0, sizeof(f->reserved));
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (f->index >= platform->num_formats)
return -EINVAL;
fmt_out = &platform->formats[f->index];
f->pixelformat = fmt_out->pixfmt;
f->flags = fmt_out->flags;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
fmt_out = sess->fmt_out;
if (f->index >= 4 || !fmt_out->pixfmts_cap[f->index])
return -EINVAL;
f->pixelformat = fmt_out->pixfmts_cap[f->index];
} else {
return -EINVAL;
}
return 0;
}
static int vdec_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct amvdec_session *sess =
container_of(file->private_data, struct amvdec_session, fh);
const struct amvdec_format *formats = sess->core->platform->formats;
const struct amvdec_format *fmt;
u32 num_formats = sess->core->platform->num_formats;
fmt = find_format(formats, num_formats, fsize->pixel_format);
if (!fmt || fsize->index)
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
fsize->stepwise.min_width = 256;
fsize->stepwise.max_width = fmt->max_width;
fsize->stepwise.step_width = 1;
fsize->stepwise.min_height = 144;
fsize->stepwise.max_height = fmt->max_height;
fsize->stepwise.step_height = 1;
return 0;
}
static int
vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{
struct amvdec_session *sess =
container_of(file->private_data, struct amvdec_session, fh);
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
struct device *dev = sess->core->dev;
int ret;
ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, cmd);
if (ret)
return ret;
if (!(sess->streamon_out & sess->streamon_cap))
return 0;
if (cmd->cmd == V4L2_DEC_CMD_START) {
v4l2_m2m_clear_state(sess->m2m_ctx);
sess->should_stop = 0;
return 0;
}
/* Should not happen */
if (cmd->cmd != V4L2_DEC_CMD_STOP)
return -EINVAL;
dev_dbg(dev, "Received V4L2_DEC_CMD_STOP\n");
sess->should_stop = 1;
v4l2_m2m_mark_stopped(sess->m2m_ctx);
if (codec_ops->drain) {
vdec_wait_inactive(sess);
codec_ops->drain(sess);
} else if (codec_ops->eos_sequence) {
u32 len;
const u8 *data = codec_ops->eos_sequence(&len);
esparser_queue_eos(sess->core, data, len);
vdec_wait_inactive(sess);
}
return ret;
}
static int vdec_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
case V4L2_EVENT_EOS:
case V4L2_EVENT_SOURCE_CHANGE:
return v4l2_event_subscribe(fh, sub, 0, NULL);
case V4L2_EVENT_CTRL:
return v4l2_ctrl_subscribe_event(fh, sub);
default:
return -EINVAL;
}
}
static int vdec_g_pixelaspect(struct file *file, void *fh, int type,
struct v4l2_fract *f)
{
struct amvdec_session *sess =
container_of(file->private_data, struct amvdec_session, fh);
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
*f = sess->pixelaspect;
return 0;
}
static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
.vidioc_querycap = vdec_querycap,
.vidioc_enum_fmt_vid_cap = vdec_enum_fmt,
.vidioc_enum_fmt_vid_out = vdec_enum_fmt,
.vidioc_s_fmt_vid_cap_mplane = vdec_s_fmt,
.vidioc_s_fmt_vid_out_mplane = vdec_s_fmt,
.vidioc_g_fmt_vid_cap_mplane = vdec_g_fmt,
.vidioc_g_fmt_vid_out_mplane = vdec_g_fmt,
.vidioc_try_fmt_vid_cap_mplane = vdec_try_fmt,
.vidioc_try_fmt_vid_out_mplane = vdec_try_fmt,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_enum_framesizes = vdec_enum_framesizes,
.vidioc_subscribe_event = vdec_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
.vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd,
.vidioc_decoder_cmd = vdec_decoder_cmd,
.vidioc_g_pixelaspect = vdec_g_pixelaspect,
};
static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct amvdec_session *sess = priv;
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->ops = &vdec_vb2_ops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->drv_priv = sess;
src_vq->buf_struct_size = sizeof(struct dummy_buf);
src_vq->min_buffers_needed = 1;
src_vq->dev = sess->core->dev;
src_vq->lock = &sess->lock;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->ops = &vdec_vb2_ops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->drv_priv = sess;
dst_vq->buf_struct_size = sizeof(struct dummy_buf);
dst_vq->min_buffers_needed = 1;
dst_vq->dev = sess->core->dev;
dst_vq->lock = &sess->lock;
return vb2_queue_init(dst_vq);
}
static int vdec_init_ctrls(struct amvdec_session *sess)
{
struct v4l2_ctrl_handler *ctrl_handler = &sess->ctrl_handler;
int ret;
ret = v4l2_ctrl_handler_init(ctrl_handler, 1);
if (ret)
return ret;
sess->ctrl_min_buf_capture =
v4l2_ctrl_new_std(ctrl_handler, NULL,
V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1,
1);
ret = ctrl_handler->error;
if (ret) {
v4l2_ctrl_handler_free(ctrl_handler);
return ret;
}
return 0;
}
static int vdec_open(struct file *file)
{
struct amvdec_core *core = video_drvdata(file);
struct device *dev = core->dev;
const struct amvdec_format *formats = core->platform->formats;
struct amvdec_session *sess;
int ret;
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
if (!sess)
return -ENOMEM;
sess->core = core;
sess->m2m_dev = v4l2_m2m_init(&vdec_m2m_ops);
if (IS_ERR(sess->m2m_dev)) {
dev_err(dev, "Fail to v4l2_m2m_init\n");
ret = PTR_ERR(sess->m2m_dev);
goto err_free_sess;
}
sess->m2m_ctx = v4l2_m2m_ctx_init(sess->m2m_dev, sess, m2m_queue_init);
if (IS_ERR(sess->m2m_ctx)) {
dev_err(dev, "Fail to v4l2_m2m_ctx_init\n");
ret = PTR_ERR(sess->m2m_ctx);
goto err_m2m_release;
}
ret = vdec_init_ctrls(sess);
if (ret)
goto err_m2m_release;
sess->pixfmt_cap = formats[0].pixfmts_cap[0];
sess->fmt_out = &formats[0];
sess->width = 1280;
sess->height = 720;
sess->pixelaspect.numerator = 1;
sess->pixelaspect.denominator = 1;
sess->src_buffer_size = SZ_1M;
INIT_LIST_HEAD(&sess->timestamps);
INIT_LIST_HEAD(&sess->bufs_recycle);
INIT_WORK(&sess->esparser_queue_work, esparser_queue_all_src);
mutex_init(&sess->lock);
mutex_init(&sess->bufs_recycle_lock);
spin_lock_init(&sess->ts_spinlock);
v4l2_fh_init(&sess->fh, core->vdev_dec);
sess->fh.ctrl_handler = &sess->ctrl_handler;
v4l2_fh_add(&sess->fh);
sess->fh.m2m_ctx = sess->m2m_ctx;
file->private_data = &sess->fh;
return 0;
err_m2m_release:
v4l2_m2m_release(sess->m2m_dev);
err_free_sess:
kfree(sess);
return ret;
}
static int vdec_close(struct file *file)
{
struct amvdec_session *sess =
container_of(file->private_data, struct amvdec_session, fh);
v4l2_m2m_ctx_release(sess->m2m_ctx);
v4l2_m2m_release(sess->m2m_dev);
v4l2_fh_del(&sess->fh);
v4l2_fh_exit(&sess->fh);
mutex_destroy(&sess->lock);
mutex_destroy(&sess->bufs_recycle_lock);
kfree(sess);
return 0;
}
static const struct v4l2_file_operations vdec_fops = {
.owner = THIS_MODULE,
.open = vdec_open,
.release = vdec_close,
.unlocked_ioctl = video_ioctl2,
.poll = v4l2_m2m_fop_poll,
.mmap = v4l2_m2m_fop_mmap,
};
static irqreturn_t vdec_isr(int irq, void *data)
{
struct amvdec_core *core = data;
struct amvdec_session *sess = core->cur_sess;
sess->last_irq_jiffies = get_jiffies_64();
return sess->fmt_out->codec_ops->isr(sess);
}
static irqreturn_t vdec_threaded_isr(int irq, void *data)
{
struct amvdec_core *core = data;
struct amvdec_session *sess = core->cur_sess;
return sess->fmt_out->codec_ops->threaded_isr(sess);
}
static const struct of_device_id vdec_dt_match[] = {
{ .compatible = "amlogic,gxbb-vdec",
.data = &vdec_platform_gxbb },
{ .compatible = "amlogic,gxm-vdec",
.data = &vdec_platform_gxm },
{ .compatible = "amlogic,gxl-vdec",
.data = &vdec_platform_gxl },
{ .compatible = "amlogic,g12a-vdec",
.data = &vdec_platform_g12a },
{ .compatible = "amlogic,sm1-vdec",
.data = &vdec_platform_sm1 },
{}
};
MODULE_DEVICE_TABLE(of, vdec_dt_match);
static int vdec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct video_device *vdev;
struct amvdec_core *core;
const struct of_device_id *of_id;
int irq;
int ret;
core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
if (!core)
return -ENOMEM;
core->dev = dev;
platform_set_drvdata(pdev, core);
core->dos_base = devm_platform_ioremap_resource_byname(pdev, "dos");
if (IS_ERR(core->dos_base))
return PTR_ERR(core->dos_base);
core->esparser_base = devm_platform_ioremap_resource_byname(pdev, "esparser");
if (IS_ERR(core->esparser_base))
return PTR_ERR(core->esparser_base);
core->regmap_ao =
syscon_regmap_lookup_by_phandle(dev->of_node,
"amlogic,ao-sysctrl");
if (IS_ERR(core->regmap_ao)) {
dev_err(dev, "Couldn't regmap AO sysctrl\n");
return PTR_ERR(core->regmap_ao);
}
core->canvas = meson_canvas_get(dev);
if (IS_ERR(core->canvas))
return PTR_ERR(core->canvas);
of_id = of_match_node(vdec_dt_match, dev->of_node);
core->platform = of_id->data;
if (core->platform->revision == VDEC_REVISION_G12A ||
core->platform->revision == VDEC_REVISION_SM1) {
core->vdec_hevcf_clk = devm_clk_get(dev, "vdec_hevcf");
if (IS_ERR(core->vdec_hevcf_clk))
return -EPROBE_DEFER;
}
core->dos_parser_clk = devm_clk_get(dev, "dos_parser");
if (IS_ERR(core->dos_parser_clk))
return -EPROBE_DEFER;
core->dos_clk = devm_clk_get(dev, "dos");
if (IS_ERR(core->dos_clk))
return -EPROBE_DEFER;
core->vdec_1_clk = devm_clk_get(dev, "vdec_1");
if (IS_ERR(core->vdec_1_clk))
return -EPROBE_DEFER;
core->vdec_hevc_clk = devm_clk_get(dev, "vdec_hevc");
if (IS_ERR(core->vdec_hevc_clk))
return -EPROBE_DEFER;
irq = platform_get_irq_byname(pdev, "vdec");
if (irq < 0)
return irq;
ret = devm_request_threaded_irq(core->dev, irq, vdec_isr,
vdec_threaded_isr, IRQF_ONESHOT,
"vdec", core);
if (ret)
return ret;
ret = esparser_init(pdev, core);
if (ret)
return ret;
ret = v4l2_device_register(dev, &core->v4l2_dev);
if (ret) {
dev_err(dev, "Couldn't register v4l2 device\n");
return -ENOMEM;
}
vdev = video_device_alloc();
if (!vdev) {
ret = -ENOMEM;
goto err_vdev_release;
}
core->vdev_dec = vdev;
core->dev_dec = dev;
mutex_init(&core->lock);
strscpy(vdev->name, "meson-video-decoder", sizeof(vdev->name));
vdev->release = video_device_release;
vdev->fops = &vdec_fops;
vdev->ioctl_ops = &vdec_ioctl_ops;
vdev->vfl_dir = VFL_DIR_M2M;
vdev->v4l2_dev = &core->v4l2_dev;
vdev->lock = &core->lock;
vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
video_set_drvdata(vdev, core);
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(dev, "Failed registering video device\n");
goto err_vdev_release;
}
return 0;
err_vdev_release:
video_device_release(vdev);
v4l2_device_unregister(&core->v4l2_dev);
return ret;
}
static void vdec_remove(struct platform_device *pdev)
{
struct amvdec_core *core = platform_get_drvdata(pdev);
video_unregister_device(core->vdev_dec);
v4l2_device_unregister(&core->v4l2_dev);
}
static struct platform_driver meson_vdec_driver = {
.probe = vdec_probe,
.remove_new = vdec_remove,
.driver = {
.name = "meson-vdec",
.of_match_table = vdec_dt_match,
},
};
module_platform_driver(meson_vdec_driver);
MODULE_DESCRIPTION("Meson video decoder driver for GXBB/GXL/GXM/G12/SM1");
MODULE_AUTHOR("Maxime Jourdan <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/media/meson/vdec/vdec.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 BayLibre, SAS
* Author: Maxime Jourdan <[email protected]>
*/
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
#include "codec_mpeg12.h"
#include "dos_regs.h"
#include "vdec_helpers.h"
#define SIZE_WORKSPACE SZ_128K
/* Offset substracted by the firmware from the workspace paddr */
#define WORKSPACE_OFFSET (5 * SZ_1K)
/* map firmware registers to known MPEG1/2 functions */
#define MREG_SEQ_INFO AV_SCRATCH_4
#define MPEG2_SEQ_DAR_MASK GENMASK(3, 0)
#define MPEG2_DAR_4_3 2
#define MPEG2_DAR_16_9 3
#define MPEG2_DAR_221_100 4
#define MREG_PIC_INFO AV_SCRATCH_5
#define MREG_PIC_WIDTH AV_SCRATCH_6
#define MREG_PIC_HEIGHT AV_SCRATCH_7
#define MREG_BUFFERIN AV_SCRATCH_8
#define MREG_BUFFEROUT AV_SCRATCH_9
#define MREG_CMD AV_SCRATCH_A
#define MREG_CO_MV_START AV_SCRATCH_B
#define MREG_ERROR_COUNT AV_SCRATCH_C
#define MREG_FRAME_OFFSET AV_SCRATCH_D
#define MREG_WAIT_BUFFER AV_SCRATCH_E
#define MREG_FATAL_ERROR AV_SCRATCH_F
#define PICINFO_PROG 0x00008000
#define PICINFO_TOP_FIRST 0x00002000
struct codec_mpeg12 {
/* Buffer for the MPEG1/2 Workspace */
void *workspace_vaddr;
dma_addr_t workspace_paddr;
};
static const u8 eos_sequence[SZ_1K] = { 0x00, 0x00, 0x01, 0xB7 };
static const u8 *codec_mpeg12_eos_sequence(u32 *len)
{
*len = ARRAY_SIZE(eos_sequence);
return eos_sequence;
}
static int codec_mpeg12_can_recycle(struct amvdec_core *core)
{
return !amvdec_read_dos(core, MREG_BUFFERIN);
}
static void codec_mpeg12_recycle(struct amvdec_core *core, u32 buf_idx)
{
amvdec_write_dos(core, MREG_BUFFERIN, buf_idx + 1);
}
static int codec_mpeg12_start(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct codec_mpeg12 *mpeg12;
int ret;
mpeg12 = kzalloc(sizeof(*mpeg12), GFP_KERNEL);
if (!mpeg12)
return -ENOMEM;
/* Allocate some memory for the MPEG1/2 decoder's state */
mpeg12->workspace_vaddr = dma_alloc_coherent(core->dev, SIZE_WORKSPACE,
&mpeg12->workspace_paddr,
GFP_KERNEL);
if (!mpeg12->workspace_vaddr) {
dev_err(core->dev, "Failed to request MPEG 1/2 Workspace\n");
ret = -ENOMEM;
goto free_mpeg12;
}
ret = amvdec_set_canvases(sess, (u32[]){ AV_SCRATCH_0, 0 },
(u32[]){ 8, 0 });
if (ret)
goto free_workspace;
amvdec_write_dos(core, POWER_CTL_VLD, BIT(4));
amvdec_write_dos(core, MREG_CO_MV_START,
mpeg12->workspace_paddr + WORKSPACE_OFFSET);
amvdec_write_dos(core, MPEG1_2_REG, 0);
amvdec_write_dos(core, PSCALE_CTRL, 0);
amvdec_write_dos(core, PIC_HEAD_INFO, 0x380);
amvdec_write_dos(core, M4_CONTROL_REG, 0);
amvdec_write_dos(core, MREG_BUFFERIN, 0);
amvdec_write_dos(core, MREG_BUFFEROUT, 0);
amvdec_write_dos(core, MREG_CMD, (sess->width << 16) | sess->height);
amvdec_write_dos(core, MREG_ERROR_COUNT, 0);
amvdec_write_dos(core, MREG_FATAL_ERROR, 0);
amvdec_write_dos(core, MREG_WAIT_BUFFER, 0);
sess->keyframe_found = 1;
sess->priv = mpeg12;
return 0;
free_workspace:
dma_free_coherent(core->dev, SIZE_WORKSPACE, mpeg12->workspace_vaddr,
mpeg12->workspace_paddr);
free_mpeg12:
kfree(mpeg12);
return ret;
}
static int codec_mpeg12_stop(struct amvdec_session *sess)
{
struct codec_mpeg12 *mpeg12 = sess->priv;
struct amvdec_core *core = sess->core;
if (mpeg12->workspace_vaddr)
dma_free_coherent(core->dev, SIZE_WORKSPACE,
mpeg12->workspace_vaddr,
mpeg12->workspace_paddr);
return 0;
}
static void codec_mpeg12_update_dar(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
u32 seq = amvdec_read_dos(core, MREG_SEQ_INFO);
u32 ar = seq & MPEG2_SEQ_DAR_MASK;
switch (ar) {
case MPEG2_DAR_4_3:
amvdec_set_par_from_dar(sess, 4, 3);
break;
case MPEG2_DAR_16_9:
amvdec_set_par_from_dar(sess, 16, 9);
break;
case MPEG2_DAR_221_100:
amvdec_set_par_from_dar(sess, 221, 100);
break;
default:
sess->pixelaspect.numerator = 1;
sess->pixelaspect.denominator = 1;
break;
}
}
static irqreturn_t codec_mpeg12_threaded_isr(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
u32 reg;
u32 pic_info;
u32 is_progressive;
u32 buffer_index;
u32 field = V4L2_FIELD_NONE;
u32 offset;
amvdec_write_dos(core, ASSIST_MBOX1_CLR_REG, 1);
reg = amvdec_read_dos(core, MREG_FATAL_ERROR);
if (reg == 1) {
dev_err(core->dev, "MPEG1/2 fatal error\n");
amvdec_abort(sess);
return IRQ_HANDLED;
}
reg = amvdec_read_dos(core, MREG_BUFFEROUT);
if (!reg)
return IRQ_HANDLED;
/* Unclear what this means */
if ((reg & GENMASK(23, 17)) == GENMASK(23, 17))
goto end;
pic_info = amvdec_read_dos(core, MREG_PIC_INFO);
is_progressive = pic_info & PICINFO_PROG;
if (!is_progressive)
field = (pic_info & PICINFO_TOP_FIRST) ?
V4L2_FIELD_INTERLACED_TB :
V4L2_FIELD_INTERLACED_BT;
codec_mpeg12_update_dar(sess);
buffer_index = ((reg & 0xf) - 1) & 7;
offset = amvdec_read_dos(core, MREG_FRAME_OFFSET);
amvdec_dst_buf_done_idx(sess, buffer_index, offset, field);
end:
amvdec_write_dos(core, MREG_BUFFEROUT, 0);
return IRQ_HANDLED;
}
static irqreturn_t codec_mpeg12_isr(struct amvdec_session *sess)
{
return IRQ_WAKE_THREAD;
}
struct amvdec_codec_ops codec_mpeg12_ops = {
.start = codec_mpeg12_start,
.stop = codec_mpeg12_stop,
.isr = codec_mpeg12_isr,
.threaded_isr = codec_mpeg12_threaded_isr,
.can_recycle = codec_mpeg12_can_recycle,
.recycle = codec_mpeg12_recycle,
.eos_sequence = codec_mpeg12_eos_sequence,
};
| linux-master | drivers/staging/media/meson/vdec/codec_mpeg12.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 BayLibre, SAS
* Author: Maxime Jourdan <[email protected]>
*/
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
#include "vdec_helpers.h"
#include "dos_regs.h"
#include "codec_h264.h"
#define SIZE_EXT_FW (20 * SZ_1K)
#define SIZE_WORKSPACE 0x1ee000
#define SIZE_SEI (8 * SZ_1K)
/*
* Offset added by the firmware which must be substracted
* from the workspace phyaddr
*/
#define WORKSPACE_BUF_OFFSET 0x1000000
/* ISR status */
#define CMD_MASK GENMASK(7, 0)
#define CMD_SRC_CHANGE 1
#define CMD_FRAMES_READY 2
#define CMD_FATAL_ERROR 6
#define CMD_BAD_WIDTH 7
#define CMD_BAD_HEIGHT 8
#define SEI_DATA_READY BIT(15)
/* Picture type */
#define PIC_TOP_BOT 5
#define PIC_BOT_TOP 6
/* Size of Motion Vector per macroblock */
#define MB_MV_SIZE 96
/* Frame status data */
#define PIC_STRUCT_BIT 5
#define PIC_STRUCT_MASK GENMASK(2, 0)
#define BUF_IDX_MASK GENMASK(4, 0)
#define ERROR_FLAG BIT(9)
#define OFFSET_BIT 16
#define OFFSET_MASK GENMASK(15, 0)
/* Bitstream parsed data */
#define MB_TOTAL_BIT 8
#define MB_TOTAL_MASK GENMASK(15, 0)
#define MB_WIDTH_MASK GENMASK(7, 0)
#define MAX_REF_BIT 24
#define MAX_REF_MASK GENMASK(6, 0)
#define AR_IDC_BIT 16
#define AR_IDC_MASK GENMASK(7, 0)
#define AR_PRESENT_FLAG BIT(0)
#define AR_EXTEND 0xff
/*
* Buffer to send to the ESPARSER to signal End Of Stream for H.264.
* This is a 16x16 encoded picture that will trigger drain firmware-side.
* There is no known alternative.
*/
static const u8 eos_sequence[SZ_4K] = {
0x00, 0x00, 0x00, 0x01, 0x06, 0x05, 0xff, 0xe4, 0xdc, 0x45, 0xe9, 0xbd,
0xe6, 0xd9, 0x48, 0xb7, 0x96, 0x2c, 0xd8, 0x20, 0xd9, 0x23, 0xee, 0xef,
0x78, 0x32, 0x36, 0x34, 0x20, 0x2d, 0x20, 0x63, 0x6f, 0x72, 0x65, 0x20,
0x36, 0x37, 0x20, 0x72, 0x31, 0x31, 0x33, 0x30, 0x20, 0x38, 0x34, 0x37,
0x35, 0x39, 0x37, 0x37, 0x20, 0x2d, 0x20, 0x48, 0x2e, 0x32, 0x36, 0x34,
0x2f, 0x4d, 0x50, 0x45, 0x47, 0x2d, 0x34, 0x20, 0x41, 0x56, 0x43, 0x20,
0x63, 0x6f, 0x64, 0x65, 0x63, 0x20, 0x2d, 0x20, 0x43, 0x6f, 0x70, 0x79,
0x6c, 0x65, 0x66, 0x74, 0x20, 0x32, 0x30, 0x30, 0x33, 0x2d, 0x32, 0x30,
0x30, 0x39, 0x20, 0x2d, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f,
0x77, 0x77, 0x77, 0x2e, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x6c, 0x61, 0x6e,
0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x78, 0x32, 0x36, 0x34, 0x2e, 0x68, 0x74,
0x6d, 0x6c, 0x20, 0x2d, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
0x3a, 0x20, 0x63, 0x61, 0x62, 0x61, 0x63, 0x3d, 0x31, 0x20, 0x72, 0x65,
0x66, 0x3d, 0x31, 0x20, 0x64, 0x65, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x3d,
0x31, 0x3a, 0x30, 0x3a, 0x30, 0x20, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x73,
0x65, 0x3d, 0x30, 0x78, 0x31, 0x3a, 0x30, 0x78, 0x31, 0x31, 0x31, 0x20,
0x6d, 0x65, 0x3d, 0x68, 0x65, 0x78, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x65,
0x3d, 0x36, 0x20, 0x70, 0x73, 0x79, 0x5f, 0x72, 0x64, 0x3d, 0x31, 0x2e,
0x30, 0x3a, 0x30, 0x2e, 0x30, 0x20, 0x6d, 0x69, 0x78, 0x65, 0x64, 0x5f,
0x72, 0x65, 0x66, 0x3d, 0x30, 0x20, 0x6d, 0x65, 0x5f, 0x72, 0x61, 0x6e,
0x67, 0x65, 0x3d, 0x31, 0x36, 0x20, 0x63, 0x68, 0x72, 0x6f, 0x6d, 0x61,
0x5f, 0x6d, 0x65, 0x3d, 0x31, 0x20, 0x74, 0x72, 0x65, 0x6c, 0x6c, 0x69,
0x73, 0x3d, 0x30, 0x20, 0x38, 0x78, 0x38, 0x64, 0x63, 0x74, 0x3d, 0x30,
0x20, 0x63, 0x71, 0x6d, 0x3d, 0x30, 0x20, 0x64, 0x65, 0x61, 0x64, 0x7a,
0x6f, 0x6e, 0x65, 0x3d, 0x32, 0x31, 0x2c, 0x31, 0x31, 0x20, 0x63, 0x68,
0x72, 0x6f, 0x6d, 0x61, 0x5f, 0x71, 0x70, 0x5f, 0x6f, 0x66, 0x66, 0x73,
0x65, 0x74, 0x3d, 0x2d, 0x32, 0x20, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64,
0x73, 0x3d, 0x31, 0x20, 0x6e, 0x72, 0x3d, 0x30, 0x20, 0x64, 0x65, 0x63,
0x69, 0x6d, 0x61, 0x74, 0x65, 0x3d, 0x31, 0x20, 0x6d, 0x62, 0x61, 0x66,
0x66, 0x3d, 0x30, 0x20, 0x62, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x3d,
0x30, 0x20, 0x6b, 0x65, 0x79, 0x69, 0x6e, 0x74, 0x3d, 0x32, 0x35, 0x30,
0x20, 0x6b, 0x65, 0x79, 0x69, 0x6e, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x3d,
0x32, 0x35, 0x20, 0x73, 0x63, 0x65, 0x6e, 0x65, 0x63, 0x75, 0x74, 0x3d,
0x34, 0x30, 0x20, 0x72, 0x63, 0x3d, 0x61, 0x62, 0x72, 0x20, 0x62, 0x69,
0x74, 0x72, 0x61, 0x74, 0x65, 0x3d, 0x31, 0x30, 0x20, 0x72, 0x61, 0x74,
0x65, 0x74, 0x6f, 0x6c, 0x3d, 0x31, 0x2e, 0x30, 0x20, 0x71, 0x63, 0x6f,
0x6d, 0x70, 0x3d, 0x30, 0x2e, 0x36, 0x30, 0x20, 0x71, 0x70, 0x6d, 0x69,
0x6e, 0x3d, 0x31, 0x30, 0x20, 0x71, 0x70, 0x6d, 0x61, 0x78, 0x3d, 0x35,
0x31, 0x20, 0x71, 0x70, 0x73, 0x74, 0x65, 0x70, 0x3d, 0x34, 0x20, 0x69,
0x70, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x3d, 0x31, 0x2e, 0x34, 0x30,
0x20, 0x61, 0x71, 0x3d, 0x31, 0x3a, 0x31, 0x2e, 0x30, 0x30, 0x00, 0x80,
0x00, 0x00, 0x00, 0x01, 0x67, 0x4d, 0x40, 0x0a, 0x9a, 0x74, 0xf4, 0x20,
0x00, 0x00, 0x03, 0x00, 0x20, 0x00, 0x00, 0x06, 0x51, 0xe2, 0x44, 0xd4,
0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x32, 0xc8, 0x00, 0x00, 0x00, 0x01,
0x65, 0x88, 0x80, 0x20, 0x00, 0x08, 0x7f, 0xea, 0x6a, 0xe2, 0x99, 0xb6,
0x57, 0xae, 0x49, 0x30, 0xf5, 0xfe, 0x5e, 0x46, 0x0b, 0x72, 0x44, 0xc4,
0xe1, 0xfc, 0x62, 0xda, 0xf1, 0xfb, 0xa2, 0xdb, 0xd6, 0xbe, 0x5c, 0xd7,
0x24, 0xa3, 0xf5, 0xb9, 0x2f, 0x57, 0x16, 0x49, 0x75, 0x47, 0x77, 0x09,
0x5c, 0xa1, 0xb4, 0xc3, 0x4f, 0x60, 0x2b, 0xb0, 0x0c, 0xc8, 0xd6, 0x66,
0xba, 0x9b, 0x82, 0x29, 0x33, 0x92, 0x26, 0x99, 0x31, 0x1c, 0x7f, 0x9b,
0x00, 0x00, 0x01, 0x0ff,
};
static const u8 *codec_h264_eos_sequence(u32 *len)
{
*len = ARRAY_SIZE(eos_sequence);
return eos_sequence;
}
struct codec_h264 {
/* H.264 decoder requires an extended firmware */
void *ext_fw_vaddr;
dma_addr_t ext_fw_paddr;
/* Buffer for the H.264 Workspace */
void *workspace_vaddr;
dma_addr_t workspace_paddr;
/* Buffer for the H.264 references MV */
void *ref_vaddr;
dma_addr_t ref_paddr;
u32 ref_size;
/* Buffer for parsed SEI data */
void *sei_vaddr;
dma_addr_t sei_paddr;
u32 mb_width;
u32 mb_height;
u32 max_refs;
};
static int codec_h264_can_recycle(struct amvdec_core *core)
{
return !amvdec_read_dos(core, AV_SCRATCH_7) ||
!amvdec_read_dos(core, AV_SCRATCH_8);
}
static void codec_h264_recycle(struct amvdec_core *core, u32 buf_idx)
{
/*
* Tell the firmware it can recycle this buffer.
* AV_SCRATCH_8 serves the same purpose.
*/
if (!amvdec_read_dos(core, AV_SCRATCH_7))
amvdec_write_dos(core, AV_SCRATCH_7, buf_idx + 1);
else
amvdec_write_dos(core, AV_SCRATCH_8, buf_idx + 1);
}
static int codec_h264_start(struct amvdec_session *sess)
{
u32 workspace_offset;
struct amvdec_core *core = sess->core;
struct codec_h264 *h264 = sess->priv;
/* Allocate some memory for the H.264 decoder's state */
h264->workspace_vaddr =
dma_alloc_coherent(core->dev, SIZE_WORKSPACE,
&h264->workspace_paddr, GFP_KERNEL);
if (!h264->workspace_vaddr)
return -ENOMEM;
/* Allocate some memory for the H.264 SEI dump */
h264->sei_vaddr = dma_alloc_coherent(core->dev, SIZE_SEI,
&h264->sei_paddr, GFP_KERNEL);
if (!h264->sei_vaddr)
return -ENOMEM;
amvdec_write_dos_bits(core, POWER_CTL_VLD, BIT(9) | BIT(6));
workspace_offset = h264->workspace_paddr - WORKSPACE_BUF_OFFSET;
amvdec_write_dos(core, AV_SCRATCH_1, workspace_offset);
amvdec_write_dos(core, AV_SCRATCH_G, h264->ext_fw_paddr);
amvdec_write_dos(core, AV_SCRATCH_I, h264->sei_paddr -
workspace_offset);
/* Enable "error correction" */
amvdec_write_dos(core, AV_SCRATCH_F,
(amvdec_read_dos(core, AV_SCRATCH_F) & 0xffffffc3) |
BIT(4) | BIT(7));
amvdec_write_dos(core, MDEC_PIC_DC_THRESH, 0x404038aa);
return 0;
}
static int codec_h264_stop(struct amvdec_session *sess)
{
struct codec_h264 *h264 = sess->priv;
struct amvdec_core *core = sess->core;
if (h264->ext_fw_vaddr)
dma_free_coherent(core->dev, SIZE_EXT_FW,
h264->ext_fw_vaddr, h264->ext_fw_paddr);
if (h264->workspace_vaddr)
dma_free_coherent(core->dev, SIZE_WORKSPACE,
h264->workspace_vaddr, h264->workspace_paddr);
if (h264->ref_vaddr)
dma_free_coherent(core->dev, h264->ref_size,
h264->ref_vaddr, h264->ref_paddr);
if (h264->sei_vaddr)
dma_free_coherent(core->dev, SIZE_SEI,
h264->sei_vaddr, h264->sei_paddr);
return 0;
}
static int codec_h264_load_extended_firmware(struct amvdec_session *sess,
const u8 *data, u32 len)
{
struct codec_h264 *h264;
struct amvdec_core *core = sess->core;
if (len < SIZE_EXT_FW)
return -EINVAL;
h264 = kzalloc(sizeof(*h264), GFP_KERNEL);
if (!h264)
return -ENOMEM;
h264->ext_fw_vaddr = dma_alloc_coherent(core->dev, SIZE_EXT_FW,
&h264->ext_fw_paddr,
GFP_KERNEL);
if (!h264->ext_fw_vaddr) {
kfree(h264);
return -ENOMEM;
}
memcpy(h264->ext_fw_vaddr, data, SIZE_EXT_FW);
sess->priv = h264;
return 0;
}
static const struct v4l2_fract par_table[] = {
{ 1, 1 }, { 1, 1 }, { 12, 11 }, { 10, 11 },
{ 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 },
{ 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 },
{ 64, 33 }, { 160, 99 }, { 4, 3 }, { 3, 2 },
{ 2, 1 }
};
static void codec_h264_set_par(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
u32 seq_info = amvdec_read_dos(core, AV_SCRATCH_2);
u32 ar_idc = (seq_info >> AR_IDC_BIT) & AR_IDC_MASK;
if (!(seq_info & AR_PRESENT_FLAG))
return;
if (ar_idc == AR_EXTEND) {
u32 ar_info = amvdec_read_dos(core, AV_SCRATCH_3);
sess->pixelaspect.numerator = ar_info & 0xffff;
sess->pixelaspect.denominator = (ar_info >> 16) & 0xffff;
return;
}
if (ar_idc >= ARRAY_SIZE(par_table))
return;
sess->pixelaspect = par_table[ar_idc];
}
static void codec_h264_resume(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct codec_h264 *h264 = sess->priv;
u32 mb_width, mb_height, mb_total;
amvdec_set_canvases(sess,
(u32[]){ ANC0_CANVAS_ADDR, 0 },
(u32[]){ 24, 0 });
dev_dbg(core->dev, "max_refs = %u; actual_dpb_size = %u\n",
h264->max_refs, sess->num_dst_bufs);
/* Align to a multiple of 4 macroblocks */
mb_width = ALIGN(h264->mb_width, 4);
mb_height = ALIGN(h264->mb_height, 4);
mb_total = mb_width * mb_height;
h264->ref_size = mb_total * MB_MV_SIZE * h264->max_refs;
h264->ref_vaddr = dma_alloc_coherent(core->dev, h264->ref_size,
&h264->ref_paddr, GFP_KERNEL);
if (!h264->ref_vaddr) {
amvdec_abort(sess);
return;
}
/* Address to store the references' MVs */
amvdec_write_dos(core, AV_SCRATCH_1, h264->ref_paddr);
/* End of ref MV */
amvdec_write_dos(core, AV_SCRATCH_4, h264->ref_paddr + h264->ref_size);
amvdec_write_dos(core, AV_SCRATCH_0, (h264->max_refs << 24) |
(sess->num_dst_bufs << 16) |
((h264->max_refs - 1) << 8));
}
/*
* Configure the H.264 decoder when the parser detected a parameter set change
*/
static void codec_h264_src_change(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct codec_h264 *h264 = sess->priv;
u32 parsed_info, mb_total;
u32 crop_infor, crop_bottom, crop_right;
u32 frame_width, frame_height;
sess->keyframe_found = 1;
parsed_info = amvdec_read_dos(core, AV_SCRATCH_1);
/* Total number of 16x16 macroblocks */
mb_total = (parsed_info >> MB_TOTAL_BIT) & MB_TOTAL_MASK;
/* Number of macroblocks per line */
h264->mb_width = parsed_info & MB_WIDTH_MASK;
/* Number of macroblock lines */
h264->mb_height = mb_total / h264->mb_width;
h264->max_refs = ((parsed_info >> MAX_REF_BIT) & MAX_REF_MASK) + 1;
crop_infor = amvdec_read_dos(core, AV_SCRATCH_6);
crop_bottom = (crop_infor & 0xff);
crop_right = (crop_infor >> 16) & 0xff;
frame_width = h264->mb_width * 16 - crop_right;
frame_height = h264->mb_height * 16 - crop_bottom;
dev_dbg(core->dev, "frame: %ux%u; crop: %u %u\n",
frame_width, frame_height, crop_right, crop_bottom);
codec_h264_set_par(sess);
amvdec_src_change(sess, frame_width, frame_height, h264->max_refs + 5);
}
/*
* The bitstream offset is split in half in 2 different registers.
* Fetch its MSB here, which location depends on the frame number.
*/
static u32 get_offset_msb(struct amvdec_core *core, int frame_num)
{
int take_msb = frame_num % 2;
int reg_offset = (frame_num / 2) * 4;
u32 offset_msb = amvdec_read_dos(core, AV_SCRATCH_A + reg_offset);
if (take_msb)
return offset_msb & 0xffff0000;
return (offset_msb & 0x0000ffff) << 16;
}
static void codec_h264_frames_ready(struct amvdec_session *sess, u32 status)
{
struct amvdec_core *core = sess->core;
int error_count;
int num_frames;
int i;
error_count = amvdec_read_dos(core, AV_SCRATCH_D);
num_frames = (status >> 8) & 0xff;
if (error_count) {
dev_warn(core->dev,
"decoder error(s) happened, count %d\n", error_count);
amvdec_write_dos(core, AV_SCRATCH_D, 0);
}
for (i = 0; i < num_frames; i++) {
u32 frame_status = amvdec_read_dos(core, AV_SCRATCH_1 + i * 4);
u32 buffer_index = frame_status & BUF_IDX_MASK;
u32 pic_struct = (frame_status >> PIC_STRUCT_BIT) &
PIC_STRUCT_MASK;
u32 offset = (frame_status >> OFFSET_BIT) & OFFSET_MASK;
u32 field = V4L2_FIELD_NONE;
/*
* A buffer decode error means it was decoded,
* but part of the picture will have artifacts.
* Typical reason is a temporarily corrupted bitstream
*/
if (frame_status & ERROR_FLAG)
dev_dbg(core->dev, "Buffer %d decode error\n",
buffer_index);
if (pic_struct == PIC_TOP_BOT)
field = V4L2_FIELD_INTERLACED_TB;
else if (pic_struct == PIC_BOT_TOP)
field = V4L2_FIELD_INTERLACED_BT;
offset |= get_offset_msb(core, i);
amvdec_dst_buf_done_idx(sess, buffer_index, offset, field);
}
}
static irqreturn_t codec_h264_threaded_isr(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
u32 status;
u32 size;
u8 cmd;
status = amvdec_read_dos(core, AV_SCRATCH_0);
cmd = status & CMD_MASK;
switch (cmd) {
case CMD_SRC_CHANGE:
codec_h264_src_change(sess);
break;
case CMD_FRAMES_READY:
codec_h264_frames_ready(sess, status);
break;
case CMD_FATAL_ERROR:
dev_err(core->dev, "H.264 decoder fatal error\n");
goto abort;
case CMD_BAD_WIDTH:
size = (amvdec_read_dos(core, AV_SCRATCH_1) + 1) * 16;
dev_err(core->dev, "Unsupported video width: %u\n", size);
goto abort;
case CMD_BAD_HEIGHT:
size = (amvdec_read_dos(core, AV_SCRATCH_1) + 1) * 16;
dev_err(core->dev, "Unsupported video height: %u\n", size);
goto abort;
case 0: /* Unused but not worth printing for */
case 9:
break;
default:
dev_info(core->dev, "Unexpected H264 ISR: %08X\n", cmd);
break;
}
if (cmd && cmd != CMD_SRC_CHANGE)
amvdec_write_dos(core, AV_SCRATCH_0, 0);
/* Decoder has some SEI data for us ; ignore */
if (amvdec_read_dos(core, AV_SCRATCH_J) & SEI_DATA_READY)
amvdec_write_dos(core, AV_SCRATCH_J, 0);
return IRQ_HANDLED;
abort:
amvdec_abort(sess);
return IRQ_HANDLED;
}
static irqreturn_t codec_h264_isr(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
amvdec_write_dos(core, ASSIST_MBOX1_CLR_REG, 1);
return IRQ_WAKE_THREAD;
}
struct amvdec_codec_ops codec_h264_ops = {
.start = codec_h264_start,
.stop = codec_h264_stop,
.load_extended_firmware = codec_h264_load_extended_firmware,
.isr = codec_h264_isr,
.threaded_isr = codec_h264_threaded_isr,
.can_recycle = codec_h264_can_recycle,
.recycle = codec_h264_recycle,
.eos_sequence = codec_h264_eos_sequence,
.resume = codec_h264_resume,
};
| linux-master | drivers/staging/media/meson/vdec/codec_h264.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 Maxime Jourdan <[email protected]>
*/
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
#include "codec_hevc_common.h"
#include "vdec_helpers.h"
#include "hevc_regs.h"
#define MMU_COMPRESS_HEADER_SIZE 0x48000
#define MMU_MAP_SIZE 0x4800
const u16 vdec_hevc_parser_cmd[] = {
0x0401, 0x8401, 0x0800, 0x0402,
0x9002, 0x1423, 0x8CC3, 0x1423,
0x8804, 0x9825, 0x0800, 0x04FE,
0x8406, 0x8411, 0x1800, 0x8408,
0x8409, 0x8C2A, 0x9C2B, 0x1C00,
0x840F, 0x8407, 0x8000, 0x8408,
0x2000, 0xA800, 0x8410, 0x04DE,
0x840C, 0x840D, 0xAC00, 0xA000,
0x08C0, 0x08E0, 0xA40E, 0xFC00,
0x7C00
};
/* Configure decode head read mode */
void codec_hevc_setup_decode_head(struct amvdec_session *sess, int is_10bit)
{
struct amvdec_core *core = sess->core;
u32 body_size = amvdec_am21c_body_size(sess->width, sess->height);
u32 head_size = amvdec_am21c_head_size(sess->width, sess->height);
if (!codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit)) {
/* Enable 2-plane reference read mode */
amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, BIT(31));
return;
}
if (codec_hevc_use_mmu(core->platform->revision,
sess->pixfmt_cap, is_10bit))
amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, BIT(4));
else
amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, 0);
if (core->platform->revision < VDEC_REVISION_SM1)
amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL2, body_size / 32);
amvdec_write_dos(core, HEVC_CM_BODY_LENGTH, body_size);
amvdec_write_dos(core, HEVC_CM_HEADER_OFFSET, body_size);
amvdec_write_dos(core, HEVC_CM_HEADER_LENGTH, head_size);
}
EXPORT_SYMBOL_GPL(codec_hevc_setup_decode_head);
static void codec_hevc_setup_buffers_gxbb(struct amvdec_session *sess,
struct codec_hevc_common *comm,
int is_10bit)
{
struct amvdec_core *core = sess->core;
struct v4l2_m2m_buffer *buf;
u32 buf_num = v4l2_m2m_num_dst_bufs_ready(sess->m2m_ctx);
dma_addr_t buf_y_paddr = 0;
dma_addr_t buf_uv_paddr = 0;
u32 idx = 0;
u32 val;
int i;
amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0);
v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
struct vb2_buffer *vb = &buf->vb.vb2_buf;
idx = vb->index;
if (codec_hevc_use_downsample(sess->pixfmt_cap, is_10bit))
buf_y_paddr = comm->fbc_buffer_paddr[idx];
else
buf_y_paddr = vb2_dma_contig_plane_dma_addr(vb, 0);
if (codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit)) {
val = buf_y_paddr | (idx << 8) | 1;
amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
val);
} else {
buf_uv_paddr = vb2_dma_contig_plane_dma_addr(vb, 1);
val = buf_y_paddr | ((idx * 2) << 8) | 1;
amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
val);
val = buf_uv_paddr | ((idx * 2 + 1) << 8) | 1;
amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
val);
}
}
if (codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit))
val = buf_y_paddr | (idx << 8) | 1;
else
val = buf_y_paddr | ((idx * 2) << 8) | 1;
/* Fill the remaining unused slots with the last buffer's Y addr */
for (i = buf_num; i < MAX_REF_PIC_NUM; ++i)
amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, val);
amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1);
amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, 1);
for (i = 0; i < 32; ++i)
amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
}
static void codec_hevc_setup_buffers_gxl(struct amvdec_session *sess,
struct codec_hevc_common *comm,
int is_10bit)
{
struct amvdec_core *core = sess->core;
struct v4l2_m2m_buffer *buf;
u32 revision = core->platform->revision;
u32 pixfmt_cap = sess->pixfmt_cap;
int i;
amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR,
BIT(2) | BIT(1));
v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
struct vb2_buffer *vb = &buf->vb.vb2_buf;
dma_addr_t buf_y_paddr = 0;
dma_addr_t buf_uv_paddr = 0;
u32 idx = vb->index;
if (codec_hevc_use_mmu(revision, pixfmt_cap, is_10bit))
buf_y_paddr = comm->mmu_header_paddr[idx];
else if (codec_hevc_use_downsample(pixfmt_cap, is_10bit))
buf_y_paddr = comm->fbc_buffer_paddr[idx];
else
buf_y_paddr = vb2_dma_contig_plane_dma_addr(vb, 0);
amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_DATA,
buf_y_paddr >> 5);
if (!codec_hevc_use_fbc(pixfmt_cap, is_10bit)) {
buf_uv_paddr = vb2_dma_contig_plane_dma_addr(vb, 1);
amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_DATA,
buf_uv_paddr >> 5);
}
}
amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1);
amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, 1);
for (i = 0; i < 32; ++i)
amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
}
void codec_hevc_free_fbc_buffers(struct amvdec_session *sess,
struct codec_hevc_common *comm)
{
struct device *dev = sess->core->dev;
u32 am21_size = amvdec_am21c_size(sess->width, sess->height);
int i;
for (i = 0; i < MAX_REF_PIC_NUM; ++i) {
if (comm->fbc_buffer_vaddr[i]) {
dma_free_coherent(dev, am21_size,
comm->fbc_buffer_vaddr[i],
comm->fbc_buffer_paddr[i]);
comm->fbc_buffer_vaddr[i] = NULL;
}
}
}
EXPORT_SYMBOL_GPL(codec_hevc_free_fbc_buffers);
static int codec_hevc_alloc_fbc_buffers(struct amvdec_session *sess,
struct codec_hevc_common *comm)
{
struct device *dev = sess->core->dev;
struct v4l2_m2m_buffer *buf;
u32 am21_size = amvdec_am21c_size(sess->width, sess->height);
v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
u32 idx = buf->vb.vb2_buf.index;
dma_addr_t paddr;
void *vaddr = dma_alloc_coherent(dev, am21_size, &paddr,
GFP_KERNEL);
if (!vaddr) {
codec_hevc_free_fbc_buffers(sess, comm);
return -ENOMEM;
}
comm->fbc_buffer_vaddr[idx] = vaddr;
comm->fbc_buffer_paddr[idx] = paddr;
}
return 0;
}
void codec_hevc_free_mmu_headers(struct amvdec_session *sess,
struct codec_hevc_common *comm)
{
struct device *dev = sess->core->dev;
int i;
for (i = 0; i < MAX_REF_PIC_NUM; ++i) {
if (comm->mmu_header_vaddr[i]) {
dma_free_coherent(dev, MMU_COMPRESS_HEADER_SIZE,
comm->mmu_header_vaddr[i],
comm->mmu_header_paddr[i]);
comm->mmu_header_vaddr[i] = NULL;
}
}
if (comm->mmu_map_vaddr) {
dma_free_coherent(dev, MMU_MAP_SIZE,
comm->mmu_map_vaddr,
comm->mmu_map_paddr);
comm->mmu_map_vaddr = NULL;
}
}
EXPORT_SYMBOL_GPL(codec_hevc_free_mmu_headers);
static int codec_hevc_alloc_mmu_headers(struct amvdec_session *sess,
struct codec_hevc_common *comm)
{
struct device *dev = sess->core->dev;
struct v4l2_m2m_buffer *buf;
comm->mmu_map_vaddr = dma_alloc_coherent(dev, MMU_MAP_SIZE,
&comm->mmu_map_paddr,
GFP_KERNEL);
if (!comm->mmu_map_vaddr)
return -ENOMEM;
v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
u32 idx = buf->vb.vb2_buf.index;
dma_addr_t paddr;
void *vaddr = dma_alloc_coherent(dev, MMU_COMPRESS_HEADER_SIZE,
&paddr, GFP_KERNEL);
if (!vaddr) {
codec_hevc_free_mmu_headers(sess, comm);
return -ENOMEM;
}
comm->mmu_header_vaddr[idx] = vaddr;
comm->mmu_header_paddr[idx] = paddr;
}
return 0;
}
int codec_hevc_setup_buffers(struct amvdec_session *sess,
struct codec_hevc_common *comm,
int is_10bit)
{
struct amvdec_core *core = sess->core;
int ret;
if (codec_hevc_use_downsample(sess->pixfmt_cap, is_10bit)) {
ret = codec_hevc_alloc_fbc_buffers(sess, comm);
if (ret)
return ret;
}
if (codec_hevc_use_mmu(core->platform->revision,
sess->pixfmt_cap, is_10bit)) {
ret = codec_hevc_alloc_mmu_headers(sess, comm);
if (ret) {
codec_hevc_free_fbc_buffers(sess, comm);
return ret;
}
}
if (core->platform->revision == VDEC_REVISION_GXBB)
codec_hevc_setup_buffers_gxbb(sess, comm, is_10bit);
else
codec_hevc_setup_buffers_gxl(sess, comm, is_10bit);
return 0;
}
EXPORT_SYMBOL_GPL(codec_hevc_setup_buffers);
void codec_hevc_fill_mmu_map(struct amvdec_session *sess,
struct codec_hevc_common *comm,
struct vb2_buffer *vb)
{
u32 size = amvdec_am21c_size(sess->width, sess->height);
u32 nb_pages = size / PAGE_SIZE;
u32 *mmu_map = comm->mmu_map_vaddr;
u32 first_page;
u32 i;
if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M)
first_page = comm->fbc_buffer_paddr[vb->index] >> PAGE_SHIFT;
else
first_page = vb2_dma_contig_plane_dma_addr(vb, 0) >> PAGE_SHIFT;
for (i = 0; i < nb_pages; ++i)
mmu_map[i] = first_page + i;
}
EXPORT_SYMBOL_GPL(codec_hevc_fill_mmu_map);
| linux-master | drivers/staging/media/meson/vdec/codec_hevc_common.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 BayLibre, SAS
* Author: Maxime Jourdan <[email protected]>
*
* The Elementary Stream Parser is a HW bitstream parser.
* It reads bitstream buffers and feeds them to the VIFIFO
*/
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/reset.h>
#include <linux/interrupt.h>
#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-mem2mem.h>
#include "dos_regs.h"
#include "esparser.h"
#include "vdec_helpers.h"
/* PARSER REGS (CBUS) */
#define PARSER_CONTROL 0x00
#define ES_PACK_SIZE_BIT 8
#define ES_WRITE BIT(5)
#define ES_SEARCH BIT(1)
#define ES_PARSER_START BIT(0)
#define PARSER_FETCH_ADDR 0x4
#define PARSER_FETCH_CMD 0x8
#define PARSER_CONFIG 0x14
#define PS_CFG_MAX_FETCH_CYCLE_BIT 0
#define PS_CFG_STARTCODE_WID_24_BIT 10
#define PS_CFG_MAX_ES_WR_CYCLE_BIT 12
#define PS_CFG_PFIFO_EMPTY_CNT_BIT 16
#define PFIFO_WR_PTR 0x18
#define PFIFO_RD_PTR 0x1c
#define PARSER_SEARCH_PATTERN 0x24
#define ES_START_CODE_PATTERN 0x00000100
#define PARSER_SEARCH_MASK 0x28
#define ES_START_CODE_MASK 0xffffff00
#define FETCH_ENDIAN_BIT 27
#define PARSER_INT_ENABLE 0x2c
#define PARSER_INT_HOST_EN_BIT 8
#define PARSER_INT_STATUS 0x30
#define PARSER_INTSTAT_SC_FOUND 1
#define PARSER_ES_CONTROL 0x5c
#define PARSER_VIDEO_START_PTR 0x80
#define PARSER_VIDEO_END_PTR 0x84
#define PARSER_VIDEO_WP 0x88
#define PARSER_VIDEO_HOLE 0x90
#define SEARCH_PATTERN_LEN 512
#define VP9_HEADER_SIZE 16
static DECLARE_WAIT_QUEUE_HEAD(wq);
static int search_done;
static irqreturn_t esparser_isr(int irq, void *dev)
{
int int_status;
struct amvdec_core *core = dev;
int_status = amvdec_read_parser(core, PARSER_INT_STATUS);
amvdec_write_parser(core, PARSER_INT_STATUS, int_status);
if (int_status & PARSER_INTSTAT_SC_FOUND) {
amvdec_write_parser(core, PFIFO_RD_PTR, 0);
amvdec_write_parser(core, PFIFO_WR_PTR, 0);
search_done = 1;
wake_up_interruptible(&wq);
}
return IRQ_HANDLED;
}
/*
* VP9 frame headers need to be appended by a 16-byte long
* Amlogic custom header
*/
static int vp9_update_header(struct amvdec_core *core, struct vb2_buffer *buf)
{
u8 *dp;
u8 marker;
int dsize;
int num_frames, cur_frame;
int cur_mag, mag, mag_ptr;
int frame_size[8], tot_frame_size[8];
int total_datasize = 0;
int new_frame_size;
unsigned char *old_header = NULL;
dp = (uint8_t *)vb2_plane_vaddr(buf, 0);
dsize = vb2_get_plane_payload(buf, 0);
if (dsize == vb2_plane_size(buf, 0)) {
dev_warn(core->dev, "%s: unable to update header\n", __func__);
return 0;
}
marker = dp[dsize - 1];
if ((marker & 0xe0) == 0xc0) {
num_frames = (marker & 0x7) + 1;
mag = ((marker >> 3) & 0x3) + 1;
mag_ptr = dsize - mag * num_frames - 2;
if (dp[mag_ptr] != marker)
return 0;
mag_ptr++;
for (cur_frame = 0; cur_frame < num_frames; cur_frame++) {
frame_size[cur_frame] = 0;
for (cur_mag = 0; cur_mag < mag; cur_mag++) {
frame_size[cur_frame] |=
(dp[mag_ptr] << (cur_mag * 8));
mag_ptr++;
}
if (cur_frame == 0)
tot_frame_size[cur_frame] =
frame_size[cur_frame];
else
tot_frame_size[cur_frame] =
tot_frame_size[cur_frame - 1] +
frame_size[cur_frame];
total_datasize += frame_size[cur_frame];
}
} else {
num_frames = 1;
frame_size[0] = dsize;
tot_frame_size[0] = dsize;
total_datasize = dsize;
}
new_frame_size = total_datasize + num_frames * VP9_HEADER_SIZE;
if (new_frame_size >= vb2_plane_size(buf, 0)) {
dev_warn(core->dev, "%s: unable to update header\n", __func__);
return 0;
}
for (cur_frame = num_frames - 1; cur_frame >= 0; cur_frame--) {
int framesize = frame_size[cur_frame];
int framesize_header = framesize + 4;
int oldframeoff = tot_frame_size[cur_frame] - framesize;
int outheaderoff = oldframeoff + cur_frame * VP9_HEADER_SIZE;
u8 *fdata = dp + outheaderoff;
u8 *old_framedata = dp + oldframeoff;
memmove(fdata + VP9_HEADER_SIZE, old_framedata, framesize);
fdata[0] = (framesize_header >> 24) & 0xff;
fdata[1] = (framesize_header >> 16) & 0xff;
fdata[2] = (framesize_header >> 8) & 0xff;
fdata[3] = (framesize_header >> 0) & 0xff;
fdata[4] = ((framesize_header >> 24) & 0xff) ^ 0xff;
fdata[5] = ((framesize_header >> 16) & 0xff) ^ 0xff;
fdata[6] = ((framesize_header >> 8) & 0xff) ^ 0xff;
fdata[7] = ((framesize_header >> 0) & 0xff) ^ 0xff;
fdata[8] = 0;
fdata[9] = 0;
fdata[10] = 0;
fdata[11] = 1;
fdata[12] = 'A';
fdata[13] = 'M';
fdata[14] = 'L';
fdata[15] = 'V';
if (!old_header) {
/* nothing */
} else if (old_header > fdata + 16 + framesize) {
dev_dbg(core->dev, "%s: data has gaps, setting to 0\n",
__func__);
memset(fdata + 16 + framesize, 0,
(old_header - fdata + 16 + framesize));
} else if (old_header < fdata + 16 + framesize) {
dev_err(core->dev, "%s: data overwritten\n", __func__);
}
old_header = fdata;
}
return new_frame_size;
}
/* Pad the packet to at least 4KiB bytes otherwise the VDEC unit won't trigger
* ISRs.
* Also append a start code 000001ff at the end to trigger
* the ESPARSER interrupt.
*/
static u32 esparser_pad_start_code(struct amvdec_core *core,
struct vb2_buffer *vb,
u32 payload_size)
{
u32 pad_size = 0;
u8 *vaddr = vb2_plane_vaddr(vb, 0);
if (payload_size < ESPARSER_MIN_PACKET_SIZE) {
pad_size = ESPARSER_MIN_PACKET_SIZE - payload_size;
memset(vaddr + payload_size, 0, pad_size);
}
if ((payload_size + pad_size + SEARCH_PATTERN_LEN) >
vb2_plane_size(vb, 0)) {
dev_warn(core->dev, "%s: unable to pad start code\n", __func__);
return pad_size;
}
memset(vaddr + payload_size + pad_size, 0, SEARCH_PATTERN_LEN);
vaddr[payload_size + pad_size] = 0x00;
vaddr[payload_size + pad_size + 1] = 0x00;
vaddr[payload_size + pad_size + 2] = 0x01;
vaddr[payload_size + pad_size + 3] = 0xff;
return pad_size;
}
static int
esparser_write_data(struct amvdec_core *core, dma_addr_t addr, u32 size)
{
amvdec_write_parser(core, PFIFO_RD_PTR, 0);
amvdec_write_parser(core, PFIFO_WR_PTR, 0);
amvdec_write_parser(core, PARSER_CONTROL,
ES_WRITE |
ES_PARSER_START |
ES_SEARCH |
(size << ES_PACK_SIZE_BIT));
amvdec_write_parser(core, PARSER_FETCH_ADDR, addr);
amvdec_write_parser(core, PARSER_FETCH_CMD,
(7 << FETCH_ENDIAN_BIT) |
(size + SEARCH_PATTERN_LEN));
search_done = 0;
return wait_event_interruptible_timeout(wq, search_done, (HZ / 5));
}
static u32 esparser_vififo_get_free_space(struct amvdec_session *sess)
{
u32 vififo_usage;
struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
struct amvdec_core *core = sess->core;
vififo_usage = vdec_ops->vififo_level(sess);
vififo_usage += amvdec_read_parser(core, PARSER_VIDEO_HOLE);
vififo_usage += (6 * SZ_1K); // 6 KiB internal fifo
if (vififo_usage > sess->vififo_size) {
dev_warn(sess->core->dev,
"VIFIFO usage (%u) > VIFIFO size (%u)\n",
vififo_usage, sess->vififo_size);
return 0;
}
return sess->vififo_size - vififo_usage;
}
int esparser_queue_eos(struct amvdec_core *core, const u8 *data, u32 len)
{
struct device *dev = core->dev;
void *eos_vaddr;
dma_addr_t eos_paddr;
int ret;
eos_vaddr = dma_alloc_coherent(dev, len + SEARCH_PATTERN_LEN,
&eos_paddr, GFP_KERNEL);
if (!eos_vaddr)
return -ENOMEM;
memcpy(eos_vaddr, data, len);
ret = esparser_write_data(core, eos_paddr, len);
dma_free_coherent(dev, len + SEARCH_PATTERN_LEN,
eos_vaddr, eos_paddr);
return ret;
}
static u32 esparser_get_offset(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
u32 offset = amvdec_read_parser(core, PARSER_VIDEO_WP) -
sess->vififo_paddr;
if (offset < sess->last_offset)
sess->wrap_count++;
sess->last_offset = offset;
offset += (sess->wrap_count * sess->vififo_size);
return offset;
}
static int
esparser_queue(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf)
{
int ret;
struct vb2_buffer *vb = &vbuf->vb2_buf;
struct amvdec_core *core = sess->core;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
u32 payload_size = vb2_get_plane_payload(vb, 0);
dma_addr_t phy = vb2_dma_contig_plane_dma_addr(vb, 0);
u32 num_dst_bufs = 0;
u32 offset;
u32 pad_size;
/*
* When max ref frame is held by VP9, this should be -= 3 to prevent a
* shortage of CAPTURE buffers on the decoder side.
* For the future, a good enhancement of the way this is handled could
* be to notify new capture buffers to the decoding modules, so that
* they could pause when there is no capture buffer available and
* resume on this notification.
*/
if (sess->fmt_out->pixfmt == V4L2_PIX_FMT_VP9) {
if (codec_ops->num_pending_bufs)
num_dst_bufs = codec_ops->num_pending_bufs(sess);
num_dst_bufs += v4l2_m2m_num_dst_bufs_ready(sess->m2m_ctx);
num_dst_bufs -= 3;
if (esparser_vififo_get_free_space(sess) < payload_size ||
atomic_read(&sess->esparser_queued_bufs) >= num_dst_bufs)
return -EAGAIN;
} else if (esparser_vififo_get_free_space(sess) < payload_size) {
return -EAGAIN;
}
v4l2_m2m_src_buf_remove_by_buf(sess->m2m_ctx, vbuf);
offset = esparser_get_offset(sess);
ret = amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags);
if (ret) {
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
return ret;
}
dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X flags = %08X\n",
vb->timestamp, payload_size, offset, vbuf->flags);
vbuf->flags = 0;
vbuf->field = V4L2_FIELD_NONE;
vbuf->sequence = sess->sequence_out++;
if (sess->fmt_out->pixfmt == V4L2_PIX_FMT_VP9) {
payload_size = vp9_update_header(core, vb);
/* If unable to alter buffer to add headers */
if (payload_size == 0) {
amvdec_remove_ts(sess, vb->timestamp);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
return 0;
}
}
pad_size = esparser_pad_start_code(core, vb, payload_size);
ret = esparser_write_data(core, phy, payload_size + pad_size);
if (ret <= 0) {
dev_warn(core->dev, "esparser: input parsing error\n");
amvdec_remove_ts(sess, vb->timestamp);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
amvdec_write_parser(core, PARSER_FETCH_CMD, 0);
return 0;
}
atomic_inc(&sess->esparser_queued_bufs);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
return 0;
}
void esparser_queue_all_src(struct work_struct *work)
{
struct v4l2_m2m_buffer *buf, *n;
struct amvdec_session *sess =
container_of(work, struct amvdec_session, esparser_queue_work);
mutex_lock(&sess->lock);
v4l2_m2m_for_each_src_buf_safe(sess->m2m_ctx, buf, n) {
if (sess->should_stop)
break;
if (esparser_queue(sess, &buf->vb) < 0)
break;
}
mutex_unlock(&sess->lock);
}
int esparser_power_up(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
reset_control_reset(core->esparser_reset);
amvdec_write_parser(core, PARSER_CONFIG,
(10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) |
(1 << PS_CFG_MAX_ES_WR_CYCLE_BIT) |
(16 << PS_CFG_MAX_FETCH_CYCLE_BIT));
amvdec_write_parser(core, PFIFO_RD_PTR, 0);
amvdec_write_parser(core, PFIFO_WR_PTR, 0);
amvdec_write_parser(core, PARSER_SEARCH_PATTERN,
ES_START_CODE_PATTERN);
amvdec_write_parser(core, PARSER_SEARCH_MASK, ES_START_CODE_MASK);
amvdec_write_parser(core, PARSER_CONFIG,
(10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) |
(1 << PS_CFG_MAX_ES_WR_CYCLE_BIT) |
(16 << PS_CFG_MAX_FETCH_CYCLE_BIT) |
(2 << PS_CFG_STARTCODE_WID_24_BIT));
amvdec_write_parser(core, PARSER_CONTROL,
(ES_SEARCH | ES_PARSER_START));
amvdec_write_parser(core, PARSER_VIDEO_START_PTR, sess->vififo_paddr);
amvdec_write_parser(core, PARSER_VIDEO_END_PTR,
sess->vififo_paddr + sess->vififo_size - 8);
amvdec_write_parser(core, PARSER_ES_CONTROL,
amvdec_read_parser(core, PARSER_ES_CONTROL) & ~1);
if (vdec_ops->conf_esparser)
vdec_ops->conf_esparser(sess);
amvdec_write_parser(core, PARSER_INT_STATUS, 0xffff);
amvdec_write_parser(core, PARSER_INT_ENABLE,
BIT(PARSER_INT_HOST_EN_BIT));
return 0;
}
int esparser_init(struct platform_device *pdev, struct amvdec_core *core)
{
struct device *dev = &pdev->dev;
int ret;
int irq;
irq = platform_get_irq_byname(pdev, "esparser");
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, esparser_isr, IRQF_SHARED,
"esparserirq", core);
if (ret) {
dev_err(dev, "Failed requesting ESPARSER IRQ\n");
return ret;
}
core->esparser_reset =
devm_reset_control_get_exclusive(dev, "esparser");
if (IS_ERR(core->esparser_reset)) {
dev_err(dev, "Failed to get esparser_reset\n");
return PTR_ERR(core->esparser_reset);
}
return 0;
}
| linux-master | drivers/staging/media/meson/vdec/esparser.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cedrus VPU driver
*
* Copyright (c) 2019 Jernej Skrabec <[email protected]>
*/
/*
* VP8 in Cedrus shares same engine as H264.
*
* Note that it seems necessary to call bitstream parsing functions,
* to parse frame header, otherwise decoded image is garbage. This is
* contrary to what is driver supposed to do. However, values are not
* really used, so this might be acceptable. It's possible that bitstream
* parsing functions set some internal VPU state, which is later necessary
* for proper decoding. Biggest suspect is "VP8 probs update" trigger.
*/
#include <linux/delay.h>
#include <linux/types.h>
#include <media/videobuf2-dma-contig.h>
#include "cedrus.h"
#include "cedrus_hw.h"
#include "cedrus_regs.h"
#define CEDRUS_ENTROPY_PROBS_SIZE 0x2400
#define VP8_PROB_HALF 128
#define QUANT_DELTA_COUNT 5
/*
* This table comes from the concatenation of k_coeff_entropy_update_probs,
* kf_ymode_prob, default_mv_context, etc. It is provided in this form in
* order to avoid computing it every time the driver is initialised, and is
* suitable for direct consumption by the hardware.
*/
static const u8 prob_table_init[] = {
/* k_coeff_entropy_update_probs */
/* block 0 */
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xB0, 0xF6, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xDF, 0xF1, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xF9, 0xFD, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xF4, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xEA, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xF6, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xEF, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFE, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xF8, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFB, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFB, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFE, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFE, 0xFD, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFA, 0xFF, 0xFE, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* block 1 */
0xD9, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xE1, 0xFC, 0xF1, 0xFD, 0xFF, 0xFF, 0xFE, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xEA, 0xFA, 0xF1, 0xFA, 0xFD, 0xFF, 0xFD, 0xFE,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xDF, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xEE, 0xFD, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xF8, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xF9, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xF7, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFE, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* block 2 */
0xBA, 0xFB, 0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xEA, 0xFB, 0xF4, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFB, 0xFB, 0xF3, 0xFD, 0xFE, 0xFF, 0xFE, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xEC, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFB, 0xFD, 0xFD, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFE, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* block 3 */
0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFA, 0xFE, 0xFC, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xF8, 0xFE, 0xF9, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFD, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xF6, 0xFD, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFC, 0xFE, 0xFB, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFE, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xF8, 0xFE, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFD, 0xFF, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFB, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xF5, 0xFB, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFD, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFC, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xF9, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* kf_y_mode_probs */
0x91, 0x9C, 0xA3, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* split_mv_probs */
0x6E, 0x6F, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00,
/* bmode_prob */
0x78, 0x5A, 0x4F, 0x85, 0x57, 0x55, 0x50, 0x6F,
0x97, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* sub_mv_ref_prob */
0x93, 0x88, 0x12, 0x00,
0x6A, 0x91, 0x01, 0x00,
0xB3, 0x79, 0x01, 0x00,
0xDF, 0x01, 0x22, 0x00,
0xD0, 0x01, 0x01, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* mv_counts_to_probs */
0x07, 0x01, 0x01, 0x8F,
0x0E, 0x12, 0x0E, 0x6B,
0x87, 0x40, 0x39, 0x44,
0x3C, 0x38, 0x80, 0x41,
0x9F, 0x86, 0x80, 0x22,
0xEA, 0xBC, 0x80, 0x1C,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* kf_y_mode_tree */
0x84, 0x02, 0x04, 0x06, 0x80, 0x81, 0x82, 0x83,
/* y_mode_tree */
0x80, 0x02, 0x04, 0x06, 0x81, 0x82, 0x83, 0x84,
/* uv_mode_tree */
0x80, 0x02, 0x81, 0x04, 0x82, 0x83, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
/* small_mv_tree */
0x02, 0x08, 0x04, 0x06, 0x80, 0x81, 0x82, 0x83,
0x0A, 0x0C, 0x84, 0x85, 0x86, 0x87, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* small_mv_tree again */
0x02, 0x08, 0x04, 0x06, 0x80, 0x81, 0x82, 0x83,
0x0A, 0x0C, 0x84, 0x85, 0x86, 0x87, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* split_mv_tree */
0x83, 0x02, 0x82, 0x04, 0x80, 0x81, 0x00, 0x00,
/* b_mode_tree */
0x80, 0x02, 0x81, 0x04, 0x82, 0x06, 0x08, 0x0C,
0x83, 0x0A, 0x85, 0x86, 0x84, 0x0E, 0x87, 0x10,
0x88, 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* submv_ref_tree */
0x8A, 0x02, 0x8B, 0x04, 0x8C, 0x8D, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* mv_ref_tree */
0x87, 0x02, 0x85, 0x04, 0x86, 0x06, 0x88, 0x89,
};
/*
* This table is a copy of k_mv_entropy_update_probs from the VP8
* specification.
*
* FIXME: If any other driver uses it, we can consider moving
* this table so it can be shared.
*/
static const u8 k_mv_entropy_update_probs[2][V4L2_VP8_MV_PROB_CNT] = {
{ 237, 246, 253, 253, 254, 254, 254, 254, 254,
254, 254, 254, 254, 254, 250, 250, 252, 254, 254 },
{ 231, 243, 245, 253, 254, 254, 254, 254, 254,
254, 254, 254, 254, 254, 251, 251, 254, 254, 254 }
};
static uint8_t read_bits(struct cedrus_dev *dev, unsigned int bits_count,
unsigned int probability)
{
cedrus_write(dev, VE_H264_TRIGGER_TYPE,
VE_H264_TRIGGER_TYPE_VP8_GET_BITS |
VE_H264_TRIGGER_TYPE_BIN_LENS(bits_count) |
VE_H264_TRIGGER_TYPE_PROBABILITY(probability));
cedrus_wait_for(dev, VE_H264_STATUS, VE_H264_STATUS_VLD_BUSY);
return cedrus_read(dev, VE_H264_BASIC_BITS);
}
static void get_delta_q(struct cedrus_dev *dev)
{
if (read_bits(dev, 1, VP8_PROB_HALF)) {
read_bits(dev, 4, VP8_PROB_HALF);
read_bits(dev, 1, VP8_PROB_HALF);
}
}
static void process_segmentation_info(struct cedrus_dev *dev)
{
int update, i;
update = read_bits(dev, 1, VP8_PROB_HALF);
if (read_bits(dev, 1, VP8_PROB_HALF)) {
read_bits(dev, 1, VP8_PROB_HALF);
for (i = 0; i < 4; i++)
if (read_bits(dev, 1, VP8_PROB_HALF)) {
read_bits(dev, 7, VP8_PROB_HALF);
read_bits(dev, 1, VP8_PROB_HALF);
}
for (i = 0; i < 4; i++)
if (read_bits(dev, 1, VP8_PROB_HALF)) {
read_bits(dev, 6, VP8_PROB_HALF);
read_bits(dev, 1, VP8_PROB_HALF);
}
}
if (update)
for (i = 0; i < 3; i++)
if (read_bits(dev, 1, VP8_PROB_HALF))
read_bits(dev, 8, VP8_PROB_HALF);
}
static void process_ref_lf_delta_info(struct cedrus_dev *dev)
{
if (read_bits(dev, 1, VP8_PROB_HALF)) {
int i;
for (i = 0; i < 4; i++)
if (read_bits(dev, 1, VP8_PROB_HALF)) {
read_bits(dev, 6, VP8_PROB_HALF);
read_bits(dev, 1, VP8_PROB_HALF);
}
for (i = 0; i < 4; i++)
if (read_bits(dev, 1, VP8_PROB_HALF)) {
read_bits(dev, 6, VP8_PROB_HALF);
read_bits(dev, 1, VP8_PROB_HALF);
}
}
}
static void process_ref_frame_info(struct cedrus_dev *dev)
{
u8 refresh_golden_frame = read_bits(dev, 1, VP8_PROB_HALF);
u8 refresh_alt_ref_frame = read_bits(dev, 1, VP8_PROB_HALF);
if (!refresh_golden_frame)
read_bits(dev, 2, VP8_PROB_HALF);
if (!refresh_alt_ref_frame)
read_bits(dev, 2, VP8_PROB_HALF);
read_bits(dev, 1, VP8_PROB_HALF);
read_bits(dev, 1, VP8_PROB_HALF);
}
static void cedrus_irq_clear(struct cedrus_dev *dev)
{
cedrus_write(dev, VE_H264_STATUS,
VE_H264_STATUS_INT_MASK);
}
static void cedrus_read_header(struct cedrus_dev *dev,
const struct v4l2_ctrl_vp8_frame *slice)
{
int i, j;
if (V4L2_VP8_FRAME_IS_KEY_FRAME(slice)) {
read_bits(dev, 1, VP8_PROB_HALF);
read_bits(dev, 1, VP8_PROB_HALF);
}
if (read_bits(dev, 1, VP8_PROB_HALF))
process_segmentation_info(dev);
read_bits(dev, 1, VP8_PROB_HALF);
read_bits(dev, 6, VP8_PROB_HALF);
read_bits(dev, 3, VP8_PROB_HALF);
if (read_bits(dev, 1, VP8_PROB_HALF))
process_ref_lf_delta_info(dev);
read_bits(dev, 2, VP8_PROB_HALF);
/* y_ac_qi */
read_bits(dev, 7, VP8_PROB_HALF);
/* Parses y_dc_delta, y2_dc_delta, etc. */
for (i = 0; i < QUANT_DELTA_COUNT; i++)
get_delta_q(dev);
if (!V4L2_VP8_FRAME_IS_KEY_FRAME(slice))
process_ref_frame_info(dev);
read_bits(dev, 1, VP8_PROB_HALF);
if (!V4L2_VP8_FRAME_IS_KEY_FRAME(slice))
read_bits(dev, 1, VP8_PROB_HALF);
cedrus_write(dev, VE_H264_TRIGGER_TYPE, VE_H264_TRIGGER_TYPE_VP8_UPDATE_COEF);
cedrus_wait_for(dev, VE_H264_STATUS, VE_H264_STATUS_VP8_UPPROB_BUSY);
cedrus_irq_clear(dev);
if (read_bits(dev, 1, VP8_PROB_HALF))
read_bits(dev, 8, VP8_PROB_HALF);
if (!V4L2_VP8_FRAME_IS_KEY_FRAME(slice)) {
read_bits(dev, 8, VP8_PROB_HALF);
read_bits(dev, 8, VP8_PROB_HALF);
read_bits(dev, 8, VP8_PROB_HALF);
if (read_bits(dev, 1, VP8_PROB_HALF)) {
read_bits(dev, 8, VP8_PROB_HALF);
read_bits(dev, 8, VP8_PROB_HALF);
read_bits(dev, 8, VP8_PROB_HALF);
read_bits(dev, 8, VP8_PROB_HALF);
}
if (read_bits(dev, 1, VP8_PROB_HALF)) {
read_bits(dev, 8, VP8_PROB_HALF);
read_bits(dev, 8, VP8_PROB_HALF);
read_bits(dev, 8, VP8_PROB_HALF);
}
for (i = 0; i < 2; i++)
for (j = 0; j < V4L2_VP8_MV_PROB_CNT; j++)
if (read_bits(dev, 1, k_mv_entropy_update_probs[i][j]))
read_bits(dev, 7, VP8_PROB_HALF);
}
}
static void cedrus_vp8_update_probs(const struct v4l2_ctrl_vp8_frame *slice,
u8 *prob_table)
{
int i, j, k;
memcpy(&prob_table[0x1008], slice->entropy.y_mode_probs,
sizeof(slice->entropy.y_mode_probs));
memcpy(&prob_table[0x1010], slice->entropy.uv_mode_probs,
sizeof(slice->entropy.uv_mode_probs));
memcpy(&prob_table[0x1018], slice->segment.segment_probs,
sizeof(slice->segment.segment_probs));
prob_table[0x101c] = slice->prob_skip_false;
prob_table[0x101d] = slice->prob_intra;
prob_table[0x101e] = slice->prob_last;
prob_table[0x101f] = slice->prob_gf;
memcpy(&prob_table[0x1020], slice->entropy.mv_probs[0],
V4L2_VP8_MV_PROB_CNT);
memcpy(&prob_table[0x1040], slice->entropy.mv_probs[1],
V4L2_VP8_MV_PROB_CNT);
for (i = 0; i < 4; ++i)
for (j = 0; j < 8; ++j)
for (k = 0; k < 3; ++k)
memcpy(&prob_table[i * 512 + j * 64 + k * 16],
slice->entropy.coeff_probs[i][j][k], 11);
}
static enum cedrus_irq_status
cedrus_vp8_irq_status(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
u32 reg = cedrus_read(dev, VE_H264_STATUS);
if (reg & (VE_H264_STATUS_DECODE_ERR_INT |
VE_H264_STATUS_VLD_DATA_REQ_INT))
return CEDRUS_IRQ_ERROR;
if (reg & VE_H264_CTRL_SLICE_DECODE_INT)
return CEDRUS_IRQ_OK;
return CEDRUS_IRQ_NONE;
}
static void cedrus_vp8_irq_clear(struct cedrus_ctx *ctx)
{
cedrus_irq_clear(ctx->dev);
}
static void cedrus_vp8_irq_disable(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
u32 reg = cedrus_read(dev, VE_H264_CTRL);
cedrus_write(dev, VE_H264_CTRL,
reg & ~VE_H264_CTRL_INT_MASK);
}
static int cedrus_vp8_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
{
const struct v4l2_ctrl_vp8_frame *slice = run->vp8.frame_params;
struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
struct vb2_buffer *src_buf = &run->src->vb2_buf;
struct cedrus_dev *dev = ctx->dev;
dma_addr_t luma_addr, chroma_addr;
dma_addr_t src_buf_addr;
int header_size;
u32 reg;
cedrus_engine_enable(ctx);
cedrus_write(dev, VE_H264_CTRL, VE_H264_CTRL_VP8);
cedrus_vp8_update_probs(slice, ctx->codec.vp8.entropy_probs_buf);
reg = slice->first_part_size * 8;
cedrus_write(dev, VE_VP8_FIRST_DATA_PART_LEN, reg);
header_size = V4L2_VP8_FRAME_IS_KEY_FRAME(slice) ? 10 : 3;
reg = slice->first_part_size + header_size;
cedrus_write(dev, VE_VP8_PART_SIZE_OFFSET, reg);
reg = vb2_plane_size(src_buf, 0) * 8;
cedrus_write(dev, VE_H264_VLD_LEN, reg);
/*
* FIXME: There is a problem if frame header is skipped (adding
* first_part_header_bits to offset). It seems that functions
* for parsing bitstreams change internal state of VPU in some
* way that can't be otherwise set. Maybe this can be bypassed
* by somehow fixing probability table buffer?
*/
reg = header_size * 8;
cedrus_write(dev, VE_H264_VLD_OFFSET, reg);
src_buf_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
cedrus_write(dev, VE_H264_VLD_END,
src_buf_addr + vb2_get_plane_payload(src_buf, 0));
cedrus_write(dev, VE_H264_VLD_ADDR,
VE_H264_VLD_ADDR_VAL(src_buf_addr) |
VE_H264_VLD_ADDR_FIRST | VE_H264_VLD_ADDR_VALID |
VE_H264_VLD_ADDR_LAST);
cedrus_write(dev, VE_H264_TRIGGER_TYPE,
VE_H264_TRIGGER_TYPE_INIT_SWDEC);
cedrus_write(dev, VE_VP8_ENTROPY_PROBS_ADDR,
ctx->codec.vp8.entropy_probs_buf_dma);
reg = 0;
switch (slice->version) {
case 1:
reg |= VE_VP8_PPS_FILTER_TYPE_SIMPLE;
reg |= VE_VP8_PPS_BILINEAR_MC_FILTER;
break;
case 2:
reg |= VE_VP8_PPS_LPF_DISABLE;
reg |= VE_VP8_PPS_BILINEAR_MC_FILTER;
break;
case 3:
reg |= VE_VP8_PPS_LPF_DISABLE;
reg |= VE_VP8_PPS_FULL_PIXEL;
break;
}
if (slice->segment.flags & V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP)
reg |= VE_VP8_PPS_UPDATE_MB_SEGMENTATION_MAP;
if (!(slice->segment.flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE))
reg |= VE_VP8_PPS_MB_SEGMENT_ABS_DELTA;
if (slice->segment.flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)
reg |= VE_VP8_PPS_SEGMENTATION_ENABLE;
if (ctx->codec.vp8.last_filter_type)
reg |= VE_VP8_PPS_LAST_LOOP_FILTER_SIMPLE;
reg |= VE_VP8_PPS_SHARPNESS_LEVEL(slice->lf.sharpness_level);
if (slice->lf.flags & V4L2_VP8_LF_FILTER_TYPE_SIMPLE)
reg |= VE_VP8_PPS_LOOP_FILTER_SIMPLE;
reg |= VE_VP8_PPS_LOOP_FILTER_LEVEL(slice->lf.level);
if (slice->lf.flags & V4L2_VP8_LF_ADJ_ENABLE)
reg |= VE_VP8_PPS_MODE_REF_LF_DELTA_ENABLE;
if (slice->lf.flags & V4L2_VP8_LF_DELTA_UPDATE)
reg |= VE_VP8_PPS_MODE_REF_LF_DELTA_UPDATE;
reg |= VE_VP8_PPS_TOKEN_PARTITION(ilog2(slice->num_dct_parts));
if (slice->flags & V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF)
reg |= VE_VP8_PPS_MB_NO_COEFF_SKIP;
reg |= VE_VP8_PPS_RELOAD_ENTROPY_PROBS;
if (slice->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN)
reg |= VE_VP8_PPS_GOLDEN_SIGN_BIAS;
if (slice->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT)
reg |= VE_VP8_PPS_ALTREF_SIGN_BIAS;
if (ctx->codec.vp8.last_frame_p_type)
reg |= VE_VP8_PPS_LAST_PIC_TYPE_P_FRAME;
reg |= VE_VP8_PPS_LAST_SHARPNESS_LEVEL(ctx->codec.vp8.last_sharpness_level);
if (!(slice->flags & V4L2_VP8_FRAME_FLAG_KEY_FRAME))
reg |= VE_VP8_PPS_PIC_TYPE_P_FRAME;
cedrus_write(dev, VE_VP8_PPS, reg);
cedrus_read_header(dev, slice);
/* reset registers changed by HW */
cedrus_write(dev, VE_H264_CUR_MB_NUM, 0);
cedrus_write(dev, VE_H264_MB_ADDR, 0);
cedrus_write(dev, VE_H264_ERROR_CASE, 0);
reg = 0;
reg |= VE_VP8_QP_INDEX_DELTA_UVAC(slice->quant.uv_ac_delta);
reg |= VE_VP8_QP_INDEX_DELTA_UVDC(slice->quant.uv_dc_delta);
reg |= VE_VP8_QP_INDEX_DELTA_Y2AC(slice->quant.y2_ac_delta);
reg |= VE_VP8_QP_INDEX_DELTA_Y2DC(slice->quant.y2_dc_delta);
reg |= VE_VP8_QP_INDEX_DELTA_Y1DC(slice->quant.y_dc_delta);
reg |= VE_VP8_QP_INDEX_DELTA_BASE_QINDEX(slice->quant.y_ac_qi);
cedrus_write(dev, VE_VP8_QP_INDEX_DELTA, reg);
reg = 0;
reg |= VE_VP8_FSIZE_WIDTH(slice->width);
reg |= VE_VP8_FSIZE_HEIGHT(slice->height);
cedrus_write(dev, VE_VP8_FSIZE, reg);
reg = 0;
reg |= VE_VP8_PICSIZE_WIDTH(slice->width);
reg |= VE_VP8_PICSIZE_HEIGHT(slice->height);
cedrus_write(dev, VE_VP8_PICSIZE, reg);
reg = 0;
reg |= VE_VP8_SEGMENT3(slice->segment.quant_update[3]);
reg |= VE_VP8_SEGMENT2(slice->segment.quant_update[2]);
reg |= VE_VP8_SEGMENT1(slice->segment.quant_update[1]);
reg |= VE_VP8_SEGMENT0(slice->segment.quant_update[0]);
cedrus_write(dev, VE_VP8_SEGMENT_FEAT_MB_LV0, reg);
reg = 0;
reg |= VE_VP8_SEGMENT3(slice->segment.lf_update[3]);
reg |= VE_VP8_SEGMENT2(slice->segment.lf_update[2]);
reg |= VE_VP8_SEGMENT1(slice->segment.lf_update[1]);
reg |= VE_VP8_SEGMENT0(slice->segment.lf_update[0]);
cedrus_write(dev, VE_VP8_SEGMENT_FEAT_MB_LV1, reg);
reg = 0;
reg |= VE_VP8_LF_DELTA3(slice->lf.ref_frm_delta[3]);
reg |= VE_VP8_LF_DELTA2(slice->lf.ref_frm_delta[2]);
reg |= VE_VP8_LF_DELTA1(slice->lf.ref_frm_delta[1]);
reg |= VE_VP8_LF_DELTA0(slice->lf.ref_frm_delta[0]);
cedrus_write(dev, VE_VP8_REF_LF_DELTA, reg);
reg = 0;
reg |= VE_VP8_LF_DELTA3(slice->lf.mb_mode_delta[3]);
reg |= VE_VP8_LF_DELTA2(slice->lf.mb_mode_delta[2]);
reg |= VE_VP8_LF_DELTA1(slice->lf.mb_mode_delta[1]);
reg |= VE_VP8_LF_DELTA0(slice->lf.mb_mode_delta[0]);
cedrus_write(dev, VE_VP8_MODE_LF_DELTA, reg);
luma_addr = cedrus_dst_buf_addr(ctx, &run->dst->vb2_buf, 0);
chroma_addr = cedrus_dst_buf_addr(ctx, &run->dst->vb2_buf, 1);
cedrus_write(dev, VE_VP8_REC_LUMA, luma_addr);
cedrus_write(dev, VE_VP8_REC_CHROMA, chroma_addr);
cedrus_write_ref_buf_addr(ctx, cap_q, slice->last_frame_ts,
VE_VP8_FWD_LUMA, VE_VP8_FWD_CHROMA);
cedrus_write_ref_buf_addr(ctx, cap_q, slice->golden_frame_ts,
VE_VP8_BWD_LUMA, VE_VP8_BWD_CHROMA);
cedrus_write_ref_buf_addr(ctx, cap_q, slice->alt_frame_ts,
VE_VP8_ALT_LUMA, VE_VP8_ALT_CHROMA);
cedrus_write(dev, VE_H264_CTRL, VE_H264_CTRL_VP8 |
VE_H264_CTRL_DECODE_ERR_INT |
VE_H264_CTRL_SLICE_DECODE_INT);
if (slice->lf.level) {
ctx->codec.vp8.last_filter_type =
!!(slice->lf.flags & V4L2_VP8_LF_FILTER_TYPE_SIMPLE);
ctx->codec.vp8.last_frame_p_type =
!V4L2_VP8_FRAME_IS_KEY_FRAME(slice);
ctx->codec.vp8.last_sharpness_level =
slice->lf.sharpness_level;
}
return 0;
}
static int cedrus_vp8_start(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
ctx->codec.vp8.entropy_probs_buf =
dma_alloc_coherent(dev->dev, CEDRUS_ENTROPY_PROBS_SIZE,
&ctx->codec.vp8.entropy_probs_buf_dma,
GFP_KERNEL);
if (!ctx->codec.vp8.entropy_probs_buf)
return -ENOMEM;
/*
* This offset has been discovered by reverse engineering, we don’t know
* what it actually means.
*/
memcpy(&ctx->codec.vp8.entropy_probs_buf[2048],
prob_table_init, sizeof(prob_table_init));
return 0;
}
static void cedrus_vp8_stop(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
cedrus_engine_disable(dev);
dma_free_coherent(dev->dev, CEDRUS_ENTROPY_PROBS_SIZE,
ctx->codec.vp8.entropy_probs_buf,
ctx->codec.vp8.entropy_probs_buf_dma);
}
static void cedrus_vp8_trigger(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
cedrus_write(dev, VE_H264_TRIGGER_TYPE,
VE_H264_TRIGGER_TYPE_VP8_SLICE_DECODE);
}
struct cedrus_dec_ops cedrus_dec_ops_vp8 = {
.irq_clear = cedrus_vp8_irq_clear,
.irq_disable = cedrus_vp8_irq_disable,
.irq_status = cedrus_vp8_irq_status,
.setup = cedrus_vp8_setup,
.start = cedrus_vp8_start,
.stop = cedrus_vp8_stop,
.trigger = cedrus_vp8_trigger,
};
| linux-master | drivers/staging/media/sunxi/cedrus/cedrus_vp8.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cedrus VPU driver
*
* Copyright (C) 2013 Jens Kuske <[email protected]>
* Copyright (C) 2018 Paul Kocialkowski <[email protected]>
* Copyright (C) 2018 Bootlin
*/
#include <linux/delay.h>
#include <linux/types.h>
#include <media/videobuf2-dma-contig.h>
#include "cedrus.h"
#include "cedrus_hw.h"
#include "cedrus_regs.h"
/*
* These are the sizes for side buffers required by the hardware for storing
* internal decoding metadata. They match the values used by the early BSP
* implementations, that were initially exposed in libvdpau-sunxi.
* Subsequent BSP implementations seem to double the neighbor info buffer size
* for the H6 SoC, which may be related to 10 bit H265 support.
*/
#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (794 * SZ_1K)
#define CEDRUS_H265_ENTRY_POINTS_BUF_SIZE (4 * SZ_1K)
#define CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE 160
struct cedrus_h265_sram_frame_info {
__le32 top_pic_order_cnt;
__le32 bottom_pic_order_cnt;
__le32 top_mv_col_buf_addr;
__le32 bottom_mv_col_buf_addr;
__le32 luma_addr;
__le32 chroma_addr;
} __packed;
struct cedrus_h265_sram_pred_weight {
__s8 delta_weight;
__s8 offset;
} __packed;
static unsigned int cedrus_h265_2bit_size(unsigned int width,
unsigned int height)
{
/*
* Vendor library additionally aligns width and height to 16,
* but all capture formats are already aligned to that anyway,
* so we can skip that here. All formats are also one form of
* YUV 4:2:0 or another, so we can safely assume multiplication
* factor of 1.5.
*/
return ALIGN(width / 4, 32) * height * 3 / 2;
}
static enum cedrus_irq_status cedrus_h265_irq_status(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
u32 reg;
reg = cedrus_read(dev, VE_DEC_H265_STATUS);
reg &= VE_DEC_H265_STATUS_CHECK_MASK;
if (reg & VE_DEC_H265_STATUS_CHECK_ERROR ||
!(reg & VE_DEC_H265_STATUS_SUCCESS))
return CEDRUS_IRQ_ERROR;
return CEDRUS_IRQ_OK;
}
static void cedrus_h265_irq_clear(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
cedrus_write(dev, VE_DEC_H265_STATUS, VE_DEC_H265_STATUS_CHECK_MASK);
}
static void cedrus_h265_irq_disable(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
u32 reg = cedrus_read(dev, VE_DEC_H265_CTRL);
reg &= ~VE_DEC_H265_CTRL_IRQ_MASK;
cedrus_write(dev, VE_DEC_H265_CTRL, reg);
}
static void cedrus_h265_sram_write_offset(struct cedrus_dev *dev, u32 offset)
{
cedrus_write(dev, VE_DEC_H265_SRAM_OFFSET, offset);
}
static void cedrus_h265_sram_write_data(struct cedrus_dev *dev, void *data,
unsigned int size)
{
u32 *word = data;
while (size >= sizeof(u32)) {
cedrus_write(dev, VE_DEC_H265_SRAM_DATA, *word++);
size -= sizeof(u32);
}
}
static inline dma_addr_t
cedrus_h265_frame_info_mv_col_buf_addr(struct vb2_buffer *buf,
unsigned int field)
{
struct cedrus_buffer *cedrus_buf = vb2_to_cedrus_buffer(buf);
return cedrus_buf->codec.h265.mv_col_buf_dma +
field * cedrus_buf->codec.h265.mv_col_buf_size / 2;
}
static void cedrus_h265_frame_info_write_single(struct cedrus_ctx *ctx,
unsigned int index,
bool field_pic,
u32 pic_order_cnt[],
struct vb2_buffer *buf)
{
struct cedrus_dev *dev = ctx->dev;
dma_addr_t dst_luma_addr = cedrus_dst_buf_addr(ctx, buf, 0);
dma_addr_t dst_chroma_addr = cedrus_dst_buf_addr(ctx, buf, 1);
dma_addr_t mv_col_buf_addr[2] = {
cedrus_h265_frame_info_mv_col_buf_addr(buf, 0),
cedrus_h265_frame_info_mv_col_buf_addr(buf, field_pic ? 1 : 0)
};
u32 offset = VE_DEC_H265_SRAM_OFFSET_FRAME_INFO +
VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT * index;
struct cedrus_h265_sram_frame_info frame_info = {
.top_pic_order_cnt = cpu_to_le32(pic_order_cnt[0]),
.bottom_pic_order_cnt = cpu_to_le32(field_pic ?
pic_order_cnt[1] :
pic_order_cnt[0]),
.top_mv_col_buf_addr =
cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[0])),
.bottom_mv_col_buf_addr = cpu_to_le32(field_pic ?
VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[1]) :
VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[0])),
.luma_addr = cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(dst_luma_addr)),
.chroma_addr = cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(dst_chroma_addr)),
};
cedrus_h265_sram_write_offset(dev, offset);
cedrus_h265_sram_write_data(dev, &frame_info, sizeof(frame_info));
}
static void cedrus_h265_frame_info_write_dpb(struct cedrus_ctx *ctx,
const struct v4l2_hevc_dpb_entry *dpb,
u8 num_active_dpb_entries)
{
struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE);
unsigned int i;
for (i = 0; i < num_active_dpb_entries; i++) {
struct vb2_buffer *buf = vb2_find_buffer(vq, dpb[i].timestamp);
u32 pic_order_cnt[2] = {
dpb[i].pic_order_cnt_val,
dpb[i].pic_order_cnt_val
};
if (!buf)
continue;
cedrus_h265_frame_info_write_single(ctx, i, dpb[i].field_pic,
pic_order_cnt,
buf);
}
}
static void cedrus_h265_ref_pic_list_write(struct cedrus_dev *dev,
const struct v4l2_hevc_dpb_entry *dpb,
const u8 list[],
u8 num_ref_idx_active,
u32 sram_offset)
{
unsigned int i;
u32 word = 0;
cedrus_h265_sram_write_offset(dev, sram_offset);
for (i = 0; i < num_ref_idx_active; i++) {
unsigned int shift = (i % 4) * 8;
unsigned int index = list[i];
u8 value = list[i];
if (dpb[index].flags & V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE)
value |= VE_DEC_H265_SRAM_REF_PIC_LIST_LT_REF;
/* Each SRAM word gathers up to 4 references. */
word |= value << shift;
/* Write the word to SRAM and clear it for the next batch. */
if ((i % 4) == 3 || i == (num_ref_idx_active - 1)) {
cedrus_h265_sram_write_data(dev, &word, sizeof(word));
word = 0;
}
}
}
static void cedrus_h265_pred_weight_write(struct cedrus_dev *dev,
const s8 delta_luma_weight[],
const s8 luma_offset[],
const s8 delta_chroma_weight[][2],
const s8 chroma_offset[][2],
u8 num_ref_idx_active,
u32 sram_luma_offset,
u32 sram_chroma_offset)
{
struct cedrus_h265_sram_pred_weight pred_weight[2] = { { 0 } };
unsigned int i, j;
cedrus_h265_sram_write_offset(dev, sram_luma_offset);
for (i = 0; i < num_ref_idx_active; i++) {
unsigned int index = i % 2;
pred_weight[index].delta_weight = delta_luma_weight[i];
pred_weight[index].offset = luma_offset[i];
if (index == 1 || i == (num_ref_idx_active - 1))
cedrus_h265_sram_write_data(dev, (u32 *)&pred_weight,
sizeof(pred_weight));
}
cedrus_h265_sram_write_offset(dev, sram_chroma_offset);
for (i = 0; i < num_ref_idx_active; i++) {
for (j = 0; j < 2; j++) {
pred_weight[j].delta_weight = delta_chroma_weight[i][j];
pred_weight[j].offset = chroma_offset[i][j];
}
cedrus_h265_sram_write_data(dev, &pred_weight,
sizeof(pred_weight));
}
}
static void cedrus_h265_skip_bits(struct cedrus_dev *dev, int num)
{
int count = 0;
while (count < num) {
int tmp = min(num - count, 32);
cedrus_write(dev, VE_DEC_H265_TRIGGER,
VE_DEC_H265_TRIGGER_FLUSH_BITS |
VE_DEC_H265_TRIGGER_TYPE_N_BITS(tmp));
if (cedrus_wait_for(dev, VE_DEC_H265_STATUS, VE_DEC_H265_STATUS_VLD_BUSY))
dev_err_ratelimited(dev->dev, "timed out waiting to skip bits\n");
count += tmp;
}
}
static u32 cedrus_h265_show_bits(struct cedrus_dev *dev, int num)
{
cedrus_write(dev, VE_DEC_H265_TRIGGER,
VE_DEC_H265_TRIGGER_SHOW_BITS |
VE_DEC_H265_TRIGGER_TYPE_N_BITS(num));
cedrus_wait_for(dev, VE_DEC_H265_STATUS,
VE_DEC_H265_STATUS_VLD_BUSY);
return cedrus_read(dev, VE_DEC_H265_BITS_READ);
}
static void cedrus_h265_write_scaling_list(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
const struct v4l2_ctrl_hevc_scaling_matrix *scaling;
struct cedrus_dev *dev = ctx->dev;
u32 i, j, k, val;
scaling = run->h265.scaling_matrix;
cedrus_write(dev, VE_DEC_H265_SCALING_LIST_DC_COEF0,
(scaling->scaling_list_dc_coef_32x32[1] << 24) |
(scaling->scaling_list_dc_coef_32x32[0] << 16) |
(scaling->scaling_list_dc_coef_16x16[1] << 8) |
(scaling->scaling_list_dc_coef_16x16[0] << 0));
cedrus_write(dev, VE_DEC_H265_SCALING_LIST_DC_COEF1,
(scaling->scaling_list_dc_coef_16x16[5] << 24) |
(scaling->scaling_list_dc_coef_16x16[4] << 16) |
(scaling->scaling_list_dc_coef_16x16[3] << 8) |
(scaling->scaling_list_dc_coef_16x16[2] << 0));
cedrus_h265_sram_write_offset(dev, VE_DEC_H265_SRAM_OFFSET_SCALING_LISTS);
for (i = 0; i < 6; i++)
for (j = 0; j < 8; j++)
for (k = 0; k < 8; k += 4) {
val = ((u32)scaling->scaling_list_8x8[i][j + (k + 3) * 8] << 24) |
((u32)scaling->scaling_list_8x8[i][j + (k + 2) * 8] << 16) |
((u32)scaling->scaling_list_8x8[i][j + (k + 1) * 8] << 8) |
scaling->scaling_list_8x8[i][j + k * 8];
cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
}
for (i = 0; i < 2; i++)
for (j = 0; j < 8; j++)
for (k = 0; k < 8; k += 4) {
val = ((u32)scaling->scaling_list_32x32[i][j + (k + 3) * 8] << 24) |
((u32)scaling->scaling_list_32x32[i][j + (k + 2) * 8] << 16) |
((u32)scaling->scaling_list_32x32[i][j + (k + 1) * 8] << 8) |
scaling->scaling_list_32x32[i][j + k * 8];
cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
}
for (i = 0; i < 6; i++)
for (j = 0; j < 8; j++)
for (k = 0; k < 8; k += 4) {
val = ((u32)scaling->scaling_list_16x16[i][j + (k + 3) * 8] << 24) |
((u32)scaling->scaling_list_16x16[i][j + (k + 2) * 8] << 16) |
((u32)scaling->scaling_list_16x16[i][j + (k + 1) * 8] << 8) |
scaling->scaling_list_16x16[i][j + k * 8];
cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
}
for (i = 0; i < 6; i++)
for (j = 0; j < 4; j++) {
val = ((u32)scaling->scaling_list_4x4[i][j + 12] << 24) |
((u32)scaling->scaling_list_4x4[i][j + 8] << 16) |
((u32)scaling->scaling_list_4x4[i][j + 4] << 8) |
scaling->scaling_list_4x4[i][j];
cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
}
}
static int cedrus_h265_is_low_delay(struct cedrus_run *run)
{
const struct v4l2_ctrl_hevc_slice_params *slice_params;
const struct v4l2_hevc_dpb_entry *dpb;
s32 poc;
int i;
slice_params = run->h265.slice_params;
poc = run->h265.decode_params->pic_order_cnt_val;
dpb = run->h265.decode_params->dpb;
for (i = 0; i < slice_params->num_ref_idx_l0_active_minus1 + 1; i++)
if (dpb[slice_params->ref_idx_l0[i]].pic_order_cnt_val > poc)
return 1;
if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_B)
return 0;
for (i = 0; i < slice_params->num_ref_idx_l1_active_minus1 + 1; i++)
if (dpb[slice_params->ref_idx_l1[i]].pic_order_cnt_val > poc)
return 1;
return 0;
}
static void cedrus_h265_write_tiles(struct cedrus_ctx *ctx,
struct cedrus_run *run,
unsigned int ctb_addr_x,
unsigned int ctb_addr_y)
{
const struct v4l2_ctrl_hevc_slice_params *slice_params;
const struct v4l2_ctrl_hevc_pps *pps;
struct cedrus_dev *dev = ctx->dev;
const u32 *entry_points;
u32 *entry_points_buf;
int i, x, tx, y, ty;
pps = run->h265.pps;
slice_params = run->h265.slice_params;
entry_points = run->h265.entry_points;
entry_points_buf = ctx->codec.h265.entry_points_buf;
for (x = 0, tx = 0; tx < pps->num_tile_columns_minus1 + 1; tx++) {
if (x + pps->column_width_minus1[tx] + 1 > ctb_addr_x)
break;
x += pps->column_width_minus1[tx] + 1;
}
for (y = 0, ty = 0; ty < pps->num_tile_rows_minus1 + 1; ty++) {
if (y + pps->row_height_minus1[ty] + 1 > ctb_addr_y)
break;
y += pps->row_height_minus1[ty] + 1;
}
cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, (y << 16) | (x << 0));
cedrus_write(dev, VE_DEC_H265_TILE_END_CTB,
((y + pps->row_height_minus1[ty]) << 16) |
((x + pps->column_width_minus1[tx]) << 0));
if (pps->flags & V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED) {
for (i = 0; i < slice_params->num_entry_point_offsets; i++)
entry_points_buf[i] = entry_points[i];
} else {
for (i = 0; i < slice_params->num_entry_point_offsets; i++) {
if (tx + 1 >= pps->num_tile_columns_minus1 + 1) {
x = 0;
tx = 0;
y += pps->row_height_minus1[ty++] + 1;
} else {
x += pps->column_width_minus1[tx++] + 1;
}
entry_points_buf[i * 4 + 0] = entry_points[i];
entry_points_buf[i * 4 + 1] = 0x0;
entry_points_buf[i * 4 + 2] = (y << 16) | (x << 0);
entry_points_buf[i * 4 + 3] =
((y + pps->row_height_minus1[ty]) << 16) |
((x + pps->column_width_minus1[tx]) << 0);
}
}
}
static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
{
struct cedrus_dev *dev = ctx->dev;
const struct v4l2_ctrl_hevc_sps *sps;
const struct v4l2_ctrl_hevc_pps *pps;
const struct v4l2_ctrl_hevc_slice_params *slice_params;
const struct v4l2_ctrl_hevc_decode_params *decode_params;
const struct v4l2_hevc_pred_weight_table *pred_weight_table;
unsigned int width_in_ctb_luma, ctb_size_luma;
unsigned int log2_max_luma_coding_block_size;
unsigned int ctb_addr_x, ctb_addr_y;
struct cedrus_buffer *cedrus_buf;
dma_addr_t src_buf_addr;
dma_addr_t src_buf_end_addr;
u32 chroma_log2_weight_denom;
u32 num_entry_point_offsets;
u32 output_pic_list_index;
u32 pic_order_cnt[2];
u8 padding;
int count;
u32 reg;
sps = run->h265.sps;
pps = run->h265.pps;
slice_params = run->h265.slice_params;
decode_params = run->h265.decode_params;
pred_weight_table = &slice_params->pred_weight_table;
num_entry_point_offsets = slice_params->num_entry_point_offsets;
cedrus_buf = vb2_to_cedrus_buffer(&run->dst->vb2_buf);
/*
* If entry points offsets are present, we should get them
* exactly the right amount.
*/
if (num_entry_point_offsets &&
num_entry_point_offsets != run->h265.entry_points_count)
return -ERANGE;
log2_max_luma_coding_block_size =
sps->log2_min_luma_coding_block_size_minus3 + 3 +
sps->log2_diff_max_min_luma_coding_block_size;
ctb_size_luma = 1UL << log2_max_luma_coding_block_size;
width_in_ctb_luma =
DIV_ROUND_UP(sps->pic_width_in_luma_samples, ctb_size_luma);
/* MV column buffer size and allocation. */
if (!cedrus_buf->codec.h265.mv_col_buf_size) {
/*
* Each CTB requires a MV col buffer with a specific unit size.
* Since the address is given with missing lsb bits, 1 KiB is
* added to each buffer to ensure proper alignment.
*/
cedrus_buf->codec.h265.mv_col_buf_size =
DIV_ROUND_UP(ctx->src_fmt.width, ctb_size_luma) *
DIV_ROUND_UP(ctx->src_fmt.height, ctb_size_luma) *
CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE + SZ_1K;
/* Buffer is never accessed by CPU, so we can skip kernel mapping. */
cedrus_buf->codec.h265.mv_col_buf =
dma_alloc_attrs(dev->dev,
cedrus_buf->codec.h265.mv_col_buf_size,
&cedrus_buf->codec.h265.mv_col_buf_dma,
GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!cedrus_buf->codec.h265.mv_col_buf) {
cedrus_buf->codec.h265.mv_col_buf_size = 0;
return -ENOMEM;
}
}
/* Activate H265 engine. */
cedrus_engine_enable(ctx);
/* Source offset and length in bits. */
cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, 0);
reg = slice_params->bit_size;
cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg);
/* Source beginning and end addresses. */
src_buf_addr = vb2_dma_contig_plane_dma_addr(&run->src->vb2_buf, 0);
reg = VE_DEC_H265_BITS_ADDR_BASE(src_buf_addr);
reg |= VE_DEC_H265_BITS_ADDR_VALID_SLICE_DATA;
reg |= VE_DEC_H265_BITS_ADDR_LAST_SLICE_DATA;
reg |= VE_DEC_H265_BITS_ADDR_FIRST_SLICE_DATA;
cedrus_write(dev, VE_DEC_H265_BITS_ADDR, reg);
src_buf_end_addr = src_buf_addr +
DIV_ROUND_UP(slice_params->bit_size, 8);
reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_end_addr);
cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg);
/* Coding tree block address */
ctb_addr_x = slice_params->slice_segment_addr % width_in_ctb_luma;
ctb_addr_y = slice_params->slice_segment_addr / width_in_ctb_luma;
reg = VE_DEC_H265_DEC_CTB_ADDR_X(ctb_addr_x);
reg |= VE_DEC_H265_DEC_CTB_ADDR_Y(ctb_addr_y);
cedrus_write(dev, VE_DEC_H265_DEC_CTB_ADDR, reg);
if ((pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED) ||
(pps->flags & V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED)) {
cedrus_h265_write_tiles(ctx, run, ctb_addr_x, ctb_addr_y);
} else {
cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, 0);
cedrus_write(dev, VE_DEC_H265_TILE_END_CTB, 0);
}
/* Clear the number of correctly-decoded coding tree blocks. */
if (ctx->fh.m2m_ctx->new_frame)
cedrus_write(dev, VE_DEC_H265_DEC_CTB_NUM, 0);
/* Initialize bitstream access. */
cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_INIT_SWDEC);
/*
* Cedrus expects that bitstream pointer is actually at the end of the slice header
* instead of start of slice data. Padding is 8 bits at most (one bit set to 1 and
* at most seven bits set to 0), so we have to inspect only one byte before slice data.
*/
if (slice_params->data_byte_offset == 0)
return -EOPNOTSUPP;
cedrus_h265_skip_bits(dev, (slice_params->data_byte_offset - 1) * 8);
padding = cedrus_h265_show_bits(dev, 8);
/* at least one bit must be set in that byte */
if (padding == 0)
return -EINVAL;
for (count = 0; count < 8; count++)
if (padding & (1 << count))
break;
/* Include the one bit. */
count++;
cedrus_h265_skip_bits(dev, 8 - count);
/* Bitstream parameters. */
reg = VE_DEC_H265_DEC_NAL_HDR_NAL_UNIT_TYPE(slice_params->nal_unit_type) |
VE_DEC_H265_DEC_NAL_HDR_NUH_TEMPORAL_ID_PLUS1(slice_params->nuh_temporal_id_plus1);
cedrus_write(dev, VE_DEC_H265_DEC_NAL_HDR, reg);
/* SPS. */
reg = VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTRA(sps->max_transform_hierarchy_depth_intra) |
VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTER(sps->max_transform_hierarchy_depth_inter) |
VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_TRANSFORM_BLOCK_SIZE(sps->log2_diff_max_min_luma_transform_block_size) |
VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_TRANSFORM_BLOCK_SIZE_MINUS2(sps->log2_min_luma_transform_block_size_minus2) |
VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_luma_coding_block_size) |
VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_luma_coding_block_size_minus3) |
VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_CHROMA_MINUS8(sps->bit_depth_chroma_minus8) |
VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_LUMA_MINUS8(sps->bit_depth_luma_minus8) |
VE_DEC_H265_DEC_SPS_HDR_CHROMA_FORMAT_IDC(sps->chroma_format_idc);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_STRONG_INTRA_SMOOTHING_ENABLE,
V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED,
sps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SPS_TEMPORAL_MVP_ENABLED,
V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED,
sps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SAMPLE_ADAPTIVE_OFFSET_ENABLED,
V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET,
sps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_AMP_ENABLED,
V4L2_HEVC_SPS_FLAG_AMP_ENABLED, sps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SEPARATE_COLOUR_PLANE,
V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE,
sps->flags);
cedrus_write(dev, VE_DEC_H265_DEC_SPS_HDR, reg);
reg = VE_DEC_H265_DEC_PCM_CTRL_LOG2_DIFF_MAX_MIN_PCM_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_pcm_luma_coding_block_size) |
VE_DEC_H265_DEC_PCM_CTRL_LOG2_MIN_PCM_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_pcm_luma_coding_block_size_minus3) |
VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_CHROMA_MINUS1(sps->pcm_sample_bit_depth_chroma_minus1) |
VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_LUMA_MINUS1(sps->pcm_sample_bit_depth_luma_minus1);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_ENABLED,
V4L2_HEVC_SPS_FLAG_PCM_ENABLED, sps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_LOOP_FILTER_DISABLED,
V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED,
sps->flags);
cedrus_write(dev, VE_DEC_H265_DEC_PCM_CTRL, reg);
/* PPS. */
reg = VE_DEC_H265_DEC_PPS_CTRL0_PPS_CR_QP_OFFSET(pps->pps_cr_qp_offset) |
VE_DEC_H265_DEC_PPS_CTRL0_PPS_CB_QP_OFFSET(pps->pps_cb_qp_offset) |
VE_DEC_H265_DEC_PPS_CTRL0_INIT_QP_MINUS26(pps->init_qp_minus26) |
VE_DEC_H265_DEC_PPS_CTRL0_DIFF_CU_QP_DELTA_DEPTH(pps->diff_cu_qp_delta_depth);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CU_QP_DELTA_ENABLED,
V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED,
pps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_TRANSFORM_SKIP_ENABLED,
V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED,
pps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CONSTRAINED_INTRA_PRED,
V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED,
pps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_SIGN_DATA_HIDING_ENABLED,
V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED,
pps->flags);
cedrus_write(dev, VE_DEC_H265_DEC_PPS_CTRL0, reg);
reg = VE_DEC_H265_DEC_PPS_CTRL1_LOG2_PARALLEL_MERGE_LEVEL_MINUS2(pps->log2_parallel_merge_level_minus2);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED,
V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED,
pps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED,
V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED,
pps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_ENTROPY_CODING_SYNC_ENABLED,
V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED,
pps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED,
V4L2_HEVC_PPS_FLAG_TILES_ENABLED,
pps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TRANSQUANT_BYPASS_ENABLED,
V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED,
pps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_BIPRED,
V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED, pps->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_PRED,
V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED, pps->flags);
cedrus_write(dev, VE_DEC_H265_DEC_PPS_CTRL1, reg);
/* Slice Parameters. */
reg = VE_DEC_H265_DEC_SLICE_HDR_INFO0_PICTURE_TYPE(slice_params->pic_struct) |
VE_DEC_H265_DEC_SLICE_HDR_INFO0_FIVE_MINUS_MAX_NUM_MERGE_CAND(slice_params->five_minus_max_num_merge_cand) |
VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L1_ACTIVE_MINUS1(slice_params->num_ref_idx_l1_active_minus1) |
VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L0_ACTIVE_MINUS1(slice_params->num_ref_idx_l0_active_minus1) |
VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLLOCATED_REF_IDX(slice_params->collocated_ref_idx) |
VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLOUR_PLANE_ID(slice_params->colour_plane_id) |
VE_DEC_H265_DEC_SLICE_HDR_INFO0_SLICE_TYPE(slice_params->slice_type);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_COLLOCATED_FROM_L0,
V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0,
slice_params->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_CABAC_INIT,
V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT,
slice_params->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_MVD_L1_ZERO,
V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO,
slice_params->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_CHROMA,
V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA,
slice_params->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_LUMA,
V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA,
slice_params->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_TEMPORAL_MVP_ENABLE,
V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED,
slice_params->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT,
V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT,
slice_params->flags);
if (ctx->fh.m2m_ctx->new_frame)
reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC;
cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO0, reg);
reg = VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(slice_params->slice_tc_offset_div2) |
VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(slice_params->slice_beta_offset_div2) |
VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(slice_params->slice_cr_qp_offset) |
VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(slice_params->slice_cb_qp_offset) |
VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(slice_params->slice_qp_delta);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED,
V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED,
slice_params->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED,
V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED,
slice_params->flags);
if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_I && !cedrus_h265_is_low_delay(run))
reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_NOT_LOW_DELAY;
cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO1, reg);
chroma_log2_weight_denom = pred_weight_table->luma_log2_weight_denom +
pred_weight_table->delta_chroma_log2_weight_denom;
reg = VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(num_entry_point_offsets) |
VE_DEC_H265_DEC_SLICE_HDR_INFO2_CHROMA_LOG2_WEIGHT_DENOM(chroma_log2_weight_denom) |
VE_DEC_H265_DEC_SLICE_HDR_INFO2_LUMA_LOG2_WEIGHT_DENOM(pred_weight_table->luma_log2_weight_denom);
cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO2, reg);
cedrus_write(dev, VE_DEC_H265_ENTRY_POINT_OFFSET_ADDR,
ctx->codec.h265.entry_points_buf_addr >> 8);
/* Decoded picture size. */
reg = VE_DEC_H265_DEC_PIC_SIZE_WIDTH(ctx->src_fmt.width) |
VE_DEC_H265_DEC_PIC_SIZE_HEIGHT(ctx->src_fmt.height);
cedrus_write(dev, VE_DEC_H265_DEC_PIC_SIZE, reg);
/* Scaling list. */
if (sps->flags & V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED) {
cedrus_h265_write_scaling_list(ctx, run);
reg = VE_DEC_H265_SCALING_LIST_CTRL0_FLAG_ENABLED;
} else {
reg = VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT;
}
cedrus_write(dev, VE_DEC_H265_SCALING_LIST_CTRL0, reg);
/* Neightbor information address. */
reg = VE_DEC_H265_NEIGHBOR_INFO_ADDR_BASE(ctx->codec.h265.neighbor_info_buf_addr);
cedrus_write(dev, VE_DEC_H265_NEIGHBOR_INFO_ADDR, reg);
/* Write decoded picture buffer in pic list. */
cedrus_h265_frame_info_write_dpb(ctx, decode_params->dpb,
decode_params->num_active_dpb_entries);
/* Output frame. */
output_pic_list_index = V4L2_HEVC_DPB_ENTRIES_NUM_MAX;
pic_order_cnt[0] = slice_params->slice_pic_order_cnt;
pic_order_cnt[1] = slice_params->slice_pic_order_cnt;
cedrus_h265_frame_info_write_single(ctx, output_pic_list_index,
slice_params->pic_struct != 0,
pic_order_cnt,
&run->dst->vb2_buf);
cedrus_write(dev, VE_DEC_H265_OUTPUT_FRAME_IDX, output_pic_list_index);
/* Reference picture list 0 (for P/B frames). */
if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_I) {
cedrus_h265_ref_pic_list_write(dev, decode_params->dpb,
slice_params->ref_idx_l0,
slice_params->num_ref_idx_l0_active_minus1 + 1,
VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST0);
if ((pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED) ||
(pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED))
cedrus_h265_pred_weight_write(dev,
pred_weight_table->delta_luma_weight_l0,
pred_weight_table->luma_offset_l0,
pred_weight_table->delta_chroma_weight_l0,
pred_weight_table->chroma_offset_l0,
slice_params->num_ref_idx_l0_active_minus1 + 1,
VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L0,
VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L0);
}
/* Reference picture list 1 (for B frames). */
if (slice_params->slice_type == V4L2_HEVC_SLICE_TYPE_B) {
cedrus_h265_ref_pic_list_write(dev, decode_params->dpb,
slice_params->ref_idx_l1,
slice_params->num_ref_idx_l1_active_minus1 + 1,
VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST1);
if (pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED)
cedrus_h265_pred_weight_write(dev,
pred_weight_table->delta_luma_weight_l1,
pred_weight_table->luma_offset_l1,
pred_weight_table->delta_chroma_weight_l1,
pred_weight_table->chroma_offset_l1,
slice_params->num_ref_idx_l1_active_minus1 + 1,
VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L1,
VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L1);
}
if (ctx->bit_depth > 8) {
unsigned int stride = ALIGN(ctx->dst_fmt.width / 4, 32);
reg = ctx->dst_fmt.sizeimage -
cedrus_h265_2bit_size(ctx->dst_fmt.width,
ctx->dst_fmt.height);
cedrus_write(dev, VE_DEC_H265_OFFSET_ADDR_FIRST_OUT, reg);
reg = VE_DEC_H265_10BIT_CONFIGURE_FIRST_2BIT_STRIDE(stride);
cedrus_write(dev, VE_DEC_H265_10BIT_CONFIGURE, reg);
}
/* Enable appropriate interruptions. */
cedrus_write(dev, VE_DEC_H265_CTRL, VE_DEC_H265_CTRL_IRQ_MASK);
return 0;
}
static int cedrus_h265_start(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
/* Buffer is never accessed by CPU, so we can skip kernel mapping. */
ctx->codec.h265.neighbor_info_buf =
dma_alloc_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
&ctx->codec.h265.neighbor_info_buf_addr,
GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h265.neighbor_info_buf)
return -ENOMEM;
ctx->codec.h265.entry_points_buf =
dma_alloc_coherent(dev->dev, CEDRUS_H265_ENTRY_POINTS_BUF_SIZE,
&ctx->codec.h265.entry_points_buf_addr,
GFP_KERNEL);
if (!ctx->codec.h265.entry_points_buf) {
dma_free_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
ctx->codec.h265.neighbor_info_buf,
ctx->codec.h265.neighbor_info_buf_addr,
DMA_ATTR_NO_KERNEL_MAPPING);
return -ENOMEM;
}
return 0;
}
static void cedrus_h265_stop(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
struct cedrus_buffer *buf;
struct vb2_queue *vq;
unsigned int i;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
for (i = 0; i < vq->num_buffers; i++) {
buf = vb2_to_cedrus_buffer(vb2_get_buffer(vq, i));
if (buf->codec.h265.mv_col_buf_size > 0) {
dma_free_attrs(dev->dev,
buf->codec.h265.mv_col_buf_size,
buf->codec.h265.mv_col_buf,
buf->codec.h265.mv_col_buf_dma,
DMA_ATTR_NO_KERNEL_MAPPING);
buf->codec.h265.mv_col_buf_size = 0;
}
}
dma_free_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
ctx->codec.h265.neighbor_info_buf,
ctx->codec.h265.neighbor_info_buf_addr,
DMA_ATTR_NO_KERNEL_MAPPING);
dma_free_coherent(dev->dev, CEDRUS_H265_ENTRY_POINTS_BUF_SIZE,
ctx->codec.h265.entry_points_buf,
ctx->codec.h265.entry_points_buf_addr);
}
static void cedrus_h265_trigger(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_DEC_SLICE);
}
static unsigned int cedrus_h265_extra_cap_size(struct cedrus_ctx *ctx,
struct v4l2_pix_format *pix_fmt)
{
if (ctx->bit_depth > 8)
return cedrus_h265_2bit_size(pix_fmt->width, pix_fmt->height);
return 0;
}
struct cedrus_dec_ops cedrus_dec_ops_h265 = {
.irq_clear = cedrus_h265_irq_clear,
.irq_disable = cedrus_h265_irq_disable,
.irq_status = cedrus_h265_irq_status,
.setup = cedrus_h265_setup,
.start = cedrus_h265_start,
.stop = cedrus_h265_stop,
.trigger = cedrus_h265_trigger,
.extra_cap_size = cedrus_h265_extra_cap_size,
};
| linux-master | drivers/staging/media/sunxi/cedrus/cedrus_h265.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cedrus VPU driver
*
* Copyright (C) 2016 Florent Revest <[email protected]>
* Copyright (C) 2018 Paul Kocialkowski <[email protected]>
* Copyright (C) 2018 Bootlin
*
* Based on the vim2m driver, that is:
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* Pawel Osciak, <[email protected]>
* Marek Szyprowski, <[email protected]>
*/
#include <linux/pm_runtime.h>
#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include "cedrus.h"
#include "cedrus_video.h"
#include "cedrus_dec.h"
#include "cedrus_hw.h"
#define CEDRUS_DECODE_SRC BIT(0)
#define CEDRUS_DECODE_DST BIT(1)
#define CEDRUS_MIN_WIDTH 16U
#define CEDRUS_MIN_HEIGHT 16U
#define CEDRUS_MAX_WIDTH 4096U
#define CEDRUS_MAX_HEIGHT 2304U
static struct cedrus_format cedrus_formats[] = {
{
.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE,
.directions = CEDRUS_DECODE_SRC,
.capabilities = CEDRUS_CAPABILITY_MPEG2_DEC,
},
{
.pixelformat = V4L2_PIX_FMT_H264_SLICE,
.directions = CEDRUS_DECODE_SRC,
.capabilities = CEDRUS_CAPABILITY_H264_DEC,
},
{
.pixelformat = V4L2_PIX_FMT_HEVC_SLICE,
.directions = CEDRUS_DECODE_SRC,
.capabilities = CEDRUS_CAPABILITY_H265_DEC,
},
{
.pixelformat = V4L2_PIX_FMT_VP8_FRAME,
.directions = CEDRUS_DECODE_SRC,
.capabilities = CEDRUS_CAPABILITY_VP8_DEC,
},
{
.pixelformat = V4L2_PIX_FMT_NV12,
.directions = CEDRUS_DECODE_DST,
.capabilities = CEDRUS_CAPABILITY_UNTILED,
},
{
.pixelformat = V4L2_PIX_FMT_NV12_32L32,
.directions = CEDRUS_DECODE_DST,
},
};
#define CEDRUS_FORMATS_COUNT ARRAY_SIZE(cedrus_formats)
static inline struct cedrus_ctx *cedrus_file2ctx(struct file *file)
{
return container_of(file->private_data, struct cedrus_ctx, fh);
}
static struct cedrus_format *cedrus_find_format(struct cedrus_ctx *ctx,
u32 pixelformat, u32 directions)
{
struct cedrus_format *first_valid_fmt = NULL;
struct cedrus_format *fmt;
unsigned int i;
for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) {
fmt = &cedrus_formats[i];
if (!cedrus_is_capable(ctx, fmt->capabilities) ||
!(fmt->directions & directions))
continue;
if (fmt->pixelformat == pixelformat)
break;
if (!first_valid_fmt)
first_valid_fmt = fmt;
}
if (i == CEDRUS_FORMATS_COUNT)
return first_valid_fmt;
return &cedrus_formats[i];
}
void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
{
unsigned int width = pix_fmt->width;
unsigned int height = pix_fmt->height;
unsigned int sizeimage = pix_fmt->sizeimage;
unsigned int bytesperline = pix_fmt->bytesperline;
pix_fmt->field = V4L2_FIELD_NONE;
/* Limit to hardware min/max. */
width = clamp(width, CEDRUS_MIN_WIDTH, CEDRUS_MAX_WIDTH);
height = clamp(height, CEDRUS_MIN_HEIGHT, CEDRUS_MAX_HEIGHT);
switch (pix_fmt->pixelformat) {
case V4L2_PIX_FMT_MPEG2_SLICE:
case V4L2_PIX_FMT_H264_SLICE:
case V4L2_PIX_FMT_HEVC_SLICE:
case V4L2_PIX_FMT_VP8_FRAME:
/* Zero bytes per line for encoded source. */
bytesperline = 0;
/* Choose some minimum size since this can't be 0 */
sizeimage = max_t(u32, SZ_1K, sizeimage);
break;
case V4L2_PIX_FMT_NV12_32L32:
/* 32-aligned stride. */
bytesperline = ALIGN(width, 32);
/* 32-aligned height. */
height = ALIGN(height, 32);
/* Luma plane size. */
sizeimage = bytesperline * height;
/* Chroma plane size. */
sizeimage += bytesperline * ALIGN(height, 64) / 2;
break;
case V4L2_PIX_FMT_NV12:
/* 16-aligned stride. */
bytesperline = ALIGN(width, 16);
/* 16-aligned height. */
height = ALIGN(height, 16);
/* Luma plane size. */
sizeimage = bytesperline * height;
/* Chroma plane size. */
sizeimage += bytesperline * height / 2;
break;
}
pix_fmt->width = width;
pix_fmt->height = height;
pix_fmt->bytesperline = bytesperline;
pix_fmt->sizeimage = sizeimage;
}
static int cedrus_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
strscpy(cap->driver, CEDRUS_NAME, sizeof(cap->driver));
strscpy(cap->card, CEDRUS_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", CEDRUS_NAME);
return 0;
}
static int cedrus_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
u32 direction)
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
unsigned int i, index;
/* Index among formats that match the requested direction. */
index = 0;
for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) {
if (!cedrus_is_capable(ctx, cedrus_formats[i].capabilities))
continue;
if (!(cedrus_formats[i].directions & direction))
continue;
if (index == f->index)
break;
index++;
}
/* Matched format. */
if (i < CEDRUS_FORMATS_COUNT) {
f->pixelformat = cedrus_formats[i].pixelformat;
return 0;
}
return -EINVAL;
}
static int cedrus_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
return cedrus_enum_fmt(file, f, CEDRUS_DECODE_DST);
}
static int cedrus_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
return cedrus_enum_fmt(file, f, CEDRUS_DECODE_SRC);
}
static int cedrus_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
f->fmt.pix = ctx->dst_fmt;
return 0;
}
static int cedrus_g_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
f->fmt.pix = ctx->src_fmt;
return 0;
}
static int cedrus_try_fmt_vid_cap_p(struct cedrus_ctx *ctx,
struct v4l2_pix_format *pix_fmt)
{
struct cedrus_format *fmt =
cedrus_find_format(ctx, pix_fmt->pixelformat,
CEDRUS_DECODE_DST);
if (!fmt)
return -EINVAL;
pix_fmt->pixelformat = fmt->pixelformat;
pix_fmt->width = ctx->src_fmt.width;
pix_fmt->height = ctx->src_fmt.height;
cedrus_prepare_format(pix_fmt);
if (ctx->current_codec->extra_cap_size)
pix_fmt->sizeimage +=
ctx->current_codec->extra_cap_size(ctx, pix_fmt);
return 0;
}
static int cedrus_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
return cedrus_try_fmt_vid_cap_p(cedrus_file2ctx(file), &f->fmt.pix);
}
static int cedrus_try_fmt_vid_out_p(struct cedrus_ctx *ctx,
struct v4l2_pix_format *pix_fmt)
{
struct cedrus_format *fmt =
cedrus_find_format(ctx, pix_fmt->pixelformat,
CEDRUS_DECODE_SRC);
if (!fmt)
return -EINVAL;
pix_fmt->pixelformat = fmt->pixelformat;
cedrus_prepare_format(pix_fmt);
return 0;
}
static int cedrus_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
return cedrus_try_fmt_vid_out_p(cedrus_file2ctx(file), &f->fmt.pix);
}
static int cedrus_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (vb2_is_busy(vq))
return -EBUSY;
ret = cedrus_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
ctx->dst_fmt = f->fmt.pix;
return 0;
}
void cedrus_reset_cap_format(struct cedrus_ctx *ctx)
{
ctx->dst_fmt.pixelformat = 0;
cedrus_try_fmt_vid_cap_p(ctx, &ctx->dst_fmt);
}
static int cedrus_s_fmt_vid_out_p(struct cedrus_ctx *ctx,
struct v4l2_pix_format *pix_fmt)
{
struct vb2_queue *vq;
int ret;
ret = cedrus_try_fmt_vid_out_p(ctx, pix_fmt);
if (ret)
return ret;
ctx->src_fmt = *pix_fmt;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
switch (ctx->src_fmt.pixelformat) {
case V4L2_PIX_FMT_H264_SLICE:
case V4L2_PIX_FMT_HEVC_SLICE:
vq->subsystem_flags |=
VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
break;
default:
vq->subsystem_flags &=
~VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
break;
}
switch (ctx->src_fmt.pixelformat) {
case V4L2_PIX_FMT_MPEG2_SLICE:
ctx->current_codec = &cedrus_dec_ops_mpeg2;
break;
case V4L2_PIX_FMT_H264_SLICE:
ctx->current_codec = &cedrus_dec_ops_h264;
break;
case V4L2_PIX_FMT_HEVC_SLICE:
ctx->current_codec = &cedrus_dec_ops_h265;
break;
case V4L2_PIX_FMT_VP8_FRAME:
ctx->current_codec = &cedrus_dec_ops_vp8;
break;
}
/* Propagate format information to capture. */
ctx->dst_fmt.colorspace = pix_fmt->colorspace;
ctx->dst_fmt.xfer_func = pix_fmt->xfer_func;
ctx->dst_fmt.ycbcr_enc = pix_fmt->ycbcr_enc;
ctx->dst_fmt.quantization = pix_fmt->quantization;
cedrus_reset_cap_format(ctx);
return 0;
}
void cedrus_reset_out_format(struct cedrus_ctx *ctx)
{
ctx->src_fmt.pixelformat = 0;
cedrus_s_fmt_vid_out_p(ctx, &ctx->src_fmt);
}
static int cedrus_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
struct vb2_queue *vq;
struct vb2_queue *peer_vq;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
/*
* In order to support dynamic resolution change,
* the decoder admits a resolution change, as long
* as the pixelformat remains. Can't be done if streaming.
*/
if (vb2_is_streaming(vq) || (vb2_is_busy(vq) &&
f->fmt.pix.pixelformat != ctx->src_fmt.pixelformat))
return -EBUSY;
/*
* Since format change on the OUTPUT queue will reset
* the CAPTURE queue, we can't allow doing so
* when the CAPTURE queue has buffers allocated.
*/
peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (vb2_is_busy(peer_vq))
return -EBUSY;
return cedrus_s_fmt_vid_out_p(cedrus_file2ctx(file), &f->fmt.pix);
}
const struct v4l2_ioctl_ops cedrus_ioctl_ops = {
.vidioc_querycap = cedrus_querycap,
.vidioc_enum_fmt_vid_cap = cedrus_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = cedrus_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = cedrus_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = cedrus_s_fmt_vid_cap,
.vidioc_enum_fmt_vid_out = cedrus_enum_fmt_vid_out,
.vidioc_g_fmt_vid_out = cedrus_g_fmt_vid_out,
.vidioc_try_fmt_vid_out = cedrus_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = cedrus_s_fmt_vid_out,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
.vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static int cedrus_queue_setup(struct vb2_queue *vq, unsigned int *nbufs,
unsigned int *nplanes, unsigned int sizes[],
struct device *alloc_devs[])
{
struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
struct v4l2_pix_format *pix_fmt;
if (V4L2_TYPE_IS_OUTPUT(vq->type))
pix_fmt = &ctx->src_fmt;
else
pix_fmt = &ctx->dst_fmt;
if (*nplanes) {
if (sizes[0] < pix_fmt->sizeimage)
return -EINVAL;
} else {
sizes[0] = pix_fmt->sizeimage;
*nplanes = 1;
}
return 0;
}
static void cedrus_queue_cleanup(struct vb2_queue *vq, u32 state)
{
struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
struct vb2_v4l2_buffer *vbuf;
for (;;) {
if (V4L2_TYPE_IS_OUTPUT(vq->type))
vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
else
vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (!vbuf)
return;
v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
&ctx->hdl);
v4l2_m2m_buf_done(vbuf, state);
}
}
static int cedrus_buf_out_validate(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
vbuf->field = V4L2_FIELD_NONE;
return 0;
}
static int cedrus_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
struct v4l2_pix_format *pix_fmt;
if (V4L2_TYPE_IS_OUTPUT(vq->type))
pix_fmt = &ctx->src_fmt;
else
pix_fmt = &ctx->dst_fmt;
if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage)
return -EINVAL;
/*
* Buffer's bytesused must be written by driver for CAPTURE buffers.
* (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
* it to buffer length).
*/
if (V4L2_TYPE_IS_CAPTURE(vq->type))
vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
return 0;
}
static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
struct cedrus_dev *dev = ctx->dev;
int ret = 0;
if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
ret = pm_runtime_resume_and_get(dev->dev);
if (ret < 0)
goto err_cleanup;
if (ctx->current_codec->start) {
ret = ctx->current_codec->start(ctx);
if (ret)
goto err_pm;
}
}
return 0;
err_pm:
pm_runtime_put(dev->dev);
err_cleanup:
cedrus_queue_cleanup(vq, VB2_BUF_STATE_QUEUED);
return ret;
}
static void cedrus_stop_streaming(struct vb2_queue *vq)
{
struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
struct cedrus_dev *dev = ctx->dev;
if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
if (ctx->current_codec->stop)
ctx->current_codec->stop(ctx);
pm_runtime_put(dev->dev);
}
cedrus_queue_cleanup(vq, VB2_BUF_STATE_ERROR);
}
static void cedrus_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cedrus_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static void cedrus_buf_request_complete(struct vb2_buffer *vb)
{
struct cedrus_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl);
}
static const struct vb2_ops cedrus_qops = {
.queue_setup = cedrus_queue_setup,
.buf_prepare = cedrus_buf_prepare,
.buf_queue = cedrus_buf_queue,
.buf_out_validate = cedrus_buf_out_validate,
.buf_request_complete = cedrus_buf_request_complete,
.start_streaming = cedrus_start_streaming,
.stop_streaming = cedrus_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct cedrus_ctx *ctx = priv;
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct cedrus_buffer);
src_vq->ops = &cedrus_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->dev->dev_mutex;
src_vq->dev = ctx->dev->dev;
src_vq->supports_requests = true;
src_vq->requires_requests = true;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct cedrus_buffer);
dst_vq->ops = &cedrus_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->dev->dev_mutex;
dst_vq->dev = ctx->dev->dev;
return vb2_queue_init(dst_vq);
}
| linux-master | drivers/staging/media/sunxi/cedrus/cedrus_video.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cedrus VPU driver
*
* Copyright (C) 2016 Florent Revest <[email protected]>
* Copyright (C) 2018 Paul Kocialkowski <[email protected]>
* Copyright (C) 2018 Bootlin
*
* Based on the vim2m driver, that is:
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* Pawel Osciak, <[email protected]>
* Marek Szyprowski, <[email protected]>
*/
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include "cedrus.h"
#include "cedrus_dec.h"
#include "cedrus_hw.h"
void cedrus_device_run(void *priv)
{
struct cedrus_ctx *ctx = priv;
struct cedrus_dev *dev = ctx->dev;
struct cedrus_run run = {};
struct media_request *src_req;
int error;
run.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
run.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
/* Apply request(s) controls if needed. */
src_req = run.src->vb2_buf.req_obj.req;
if (src_req)
v4l2_ctrl_request_setup(src_req, &ctx->hdl);
switch (ctx->src_fmt.pixelformat) {
case V4L2_PIX_FMT_MPEG2_SLICE:
run.mpeg2.sequence = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_MPEG2_SEQUENCE);
run.mpeg2.picture = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_MPEG2_PICTURE);
run.mpeg2.quantisation = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_MPEG2_QUANTISATION);
break;
case V4L2_PIX_FMT_H264_SLICE:
run.h264.decode_params = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_H264_DECODE_PARAMS);
run.h264.pps = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_H264_PPS);
run.h264.scaling_matrix = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_H264_SCALING_MATRIX);
run.h264.slice_params = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_H264_SLICE_PARAMS);
run.h264.sps = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_H264_SPS);
run.h264.pred_weights = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_H264_PRED_WEIGHTS);
break;
case V4L2_PIX_FMT_HEVC_SLICE:
run.h265.sps = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_HEVC_SPS);
run.h265.pps = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_HEVC_PPS);
run.h265.slice_params = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_HEVC_SLICE_PARAMS);
run.h265.decode_params = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_HEVC_DECODE_PARAMS);
run.h265.scaling_matrix = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_HEVC_SCALING_MATRIX);
run.h265.entry_points = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS);
run.h265.entry_points_count = cedrus_get_num_of_controls(ctx,
V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS);
break;
case V4L2_PIX_FMT_VP8_FRAME:
run.vp8.frame_params = cedrus_find_control_data(ctx,
V4L2_CID_STATELESS_VP8_FRAME);
break;
default:
break;
}
v4l2_m2m_buf_copy_metadata(run.src, run.dst, true);
cedrus_dst_format_set(dev, &ctx->dst_fmt);
error = ctx->current_codec->setup(ctx, &run);
if (error)
v4l2_err(&ctx->dev->v4l2_dev,
"Failed to setup decoding job: %d\n", error);
/* Complete request(s) controls if needed. */
if (src_req)
v4l2_ctrl_request_complete(src_req, &ctx->hdl);
/* Trigger decoding if setup went well, bail out otherwise. */
if (!error) {
/* Start the watchdog timer. */
schedule_delayed_work(&dev->watchdog_work,
msecs_to_jiffies(2000));
ctx->current_codec->trigger(ctx);
} else {
v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev,
ctx->fh.m2m_ctx,
VB2_BUF_STATE_ERROR);
}
}
| linux-master | drivers/staging/media/sunxi/cedrus/cedrus_dec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cedrus VPU driver
*
* Copyright (C) 2016 Florent Revest <[email protected]>
* Copyright (C) 2018 Paul Kocialkowski <[email protected]>
* Copyright (C) 2018 Bootlin
*
* Based on the vim2m driver, that is:
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* Pawel Osciak, <[email protected]>
* Marek Szyprowski, <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-mem2mem.h>
#include "cedrus.h"
#include "cedrus_video.h"
#include "cedrus_dec.h"
#include "cedrus_hw.h"
static int cedrus_try_ctrl(struct v4l2_ctrl *ctrl)
{
if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) {
const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
if (sps->chroma_format_idc != 1)
/* Only 4:2:0 is supported */
return -EINVAL;
if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
/* Luma and chroma bit depth mismatch */
return -EINVAL;
if (sps->bit_depth_luma_minus8 != 0)
/* Only 8-bit is supported */
return -EINVAL;
} else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) {
const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
struct cedrus_ctx *ctx = container_of(ctrl->handler, struct cedrus_ctx, hdl);
unsigned int bit_depth, max_depth;
struct vb2_queue *vq;
if (sps->chroma_format_idc != 1)
/* Only 4:2:0 is supported */
return -EINVAL;
bit_depth = max(sps->bit_depth_luma_minus8,
sps->bit_depth_chroma_minus8) + 8;
if (cedrus_is_capable(ctx, CEDRUS_CAPABILITY_H265_10_DEC))
max_depth = 10;
else
max_depth = 8;
if (bit_depth > max_depth)
return -EINVAL;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE);
/*
* Bit depth can't be higher than currently set once
* buffers are allocated.
*/
if (vb2_is_busy(vq)) {
if (ctx->bit_depth < bit_depth)
return -EINVAL;
} else {
ctx->bit_depth = bit_depth;
cedrus_reset_cap_format(ctx);
}
}
return 0;
}
static const struct v4l2_ctrl_ops cedrus_ctrl_ops = {
.try_ctrl = cedrus_try_ctrl,
};
static const struct cedrus_control cedrus_controls[] = {
{
.cfg = {
.id = V4L2_CID_STATELESS_MPEG2_SEQUENCE,
},
.capabilities = CEDRUS_CAPABILITY_MPEG2_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_MPEG2_PICTURE,
},
.capabilities = CEDRUS_CAPABILITY_MPEG2_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_MPEG2_QUANTISATION,
},
.capabilities = CEDRUS_CAPABILITY_MPEG2_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
},
.capabilities = CEDRUS_CAPABILITY_H264_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_SLICE_PARAMS,
},
.capabilities = CEDRUS_CAPABILITY_H264_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_SPS,
.ops = &cedrus_ctrl_ops,
},
.capabilities = CEDRUS_CAPABILITY_H264_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_PPS,
},
.capabilities = CEDRUS_CAPABILITY_H264_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
},
.capabilities = CEDRUS_CAPABILITY_H264_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_PRED_WEIGHTS,
},
.capabilities = CEDRUS_CAPABILITY_H264_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_DECODE_MODE,
.max = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
.def = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
},
.capabilities = CEDRUS_CAPABILITY_H264_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_START_CODE,
.max = V4L2_STATELESS_H264_START_CODE_NONE,
.def = V4L2_STATELESS_H264_START_CODE_NONE,
},
.capabilities = CEDRUS_CAPABILITY_H264_DEC,
},
/*
* We only expose supported profiles information,
* and not levels as it's not clear what is supported
* for each hardware/core version.
* In any case, TRY/S_FMT will clamp the format resolution
* to the maximum supported.
*/
{
.cfg = {
.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
.min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
.def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
.max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
.menu_skip_mask =
BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
},
.capabilities = CEDRUS_CAPABILITY_H264_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_HEVC_SPS,
.ops = &cedrus_ctrl_ops,
},
.capabilities = CEDRUS_CAPABILITY_H265_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_HEVC_PPS,
},
.capabilities = CEDRUS_CAPABILITY_H265_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_HEVC_SLICE_PARAMS,
/* The driver can only handle 1 entry per slice for now */
.dims = { 1 },
},
.capabilities = CEDRUS_CAPABILITY_H265_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX,
},
.capabilities = CEDRUS_CAPABILITY_H265_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS,
/* maximum 256 entry point offsets per slice */
.dims = { 256 },
.max = 0xffffffff,
.step = 1,
},
.capabilities = CEDRUS_CAPABILITY_H265_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
.max = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
.def = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
},
.capabilities = CEDRUS_CAPABILITY_H265_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_HEVC_START_CODE,
.max = V4L2_STATELESS_HEVC_START_CODE_NONE,
.def = V4L2_STATELESS_HEVC_START_CODE_NONE,
},
.capabilities = CEDRUS_CAPABILITY_H265_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_VP8_FRAME,
},
.capabilities = CEDRUS_CAPABILITY_VP8_DEC,
},
{
.cfg = {
.id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS,
},
.capabilities = CEDRUS_CAPABILITY_H265_DEC,
},
};
#define CEDRUS_CONTROLS_COUNT ARRAY_SIZE(cedrus_controls)
void *cedrus_find_control_data(struct cedrus_ctx *ctx, u32 id)
{
unsigned int i;
for (i = 0; ctx->ctrls[i]; i++)
if (ctx->ctrls[i]->id == id)
return ctx->ctrls[i]->p_cur.p;
return NULL;
}
u32 cedrus_get_num_of_controls(struct cedrus_ctx *ctx, u32 id)
{
unsigned int i;
for (i = 0; ctx->ctrls[i]; i++)
if (ctx->ctrls[i]->id == id)
return ctx->ctrls[i]->elems;
return 0;
}
static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
{
struct v4l2_ctrl_handler *hdl = &ctx->hdl;
struct v4l2_ctrl *ctrl;
unsigned int ctrl_size;
unsigned int i, j;
v4l2_ctrl_handler_init(hdl, CEDRUS_CONTROLS_COUNT);
if (hdl->error) {
v4l2_err(&dev->v4l2_dev,
"Failed to initialize control handler: %d\n",
hdl->error);
return hdl->error;
}
ctrl_size = sizeof(ctrl) * CEDRUS_CONTROLS_COUNT + 1;
ctx->ctrls = kzalloc(ctrl_size, GFP_KERNEL);
if (!ctx->ctrls)
return -ENOMEM;
j = 0;
for (i = 0; i < CEDRUS_CONTROLS_COUNT; i++) {
if (!cedrus_is_capable(ctx, cedrus_controls[i].capabilities))
continue;
ctrl = v4l2_ctrl_new_custom(hdl, &cedrus_controls[i].cfg,
NULL);
if (hdl->error) {
v4l2_err(&dev->v4l2_dev,
"Failed to create %s control: %d\n",
v4l2_ctrl_get_name(cedrus_controls[i].cfg.id),
hdl->error);
v4l2_ctrl_handler_free(hdl);
kfree(ctx->ctrls);
ctx->ctrls = NULL;
return hdl->error;
}
ctx->ctrls[j++] = ctrl;
}
ctx->fh.ctrl_handler = hdl;
v4l2_ctrl_handler_setup(hdl);
return 0;
}
static int cedrus_request_validate(struct media_request *req)
{
struct media_request_object *obj;
struct cedrus_ctx *ctx = NULL;
unsigned int count;
list_for_each_entry(obj, &req->objects, list) {
struct vb2_buffer *vb;
if (vb2_request_object_is_buffer(obj)) {
vb = container_of(obj, struct vb2_buffer, req_obj);
ctx = vb2_get_drv_priv(vb->vb2_queue);
break;
}
}
if (!ctx)
return -ENOENT;
count = vb2_request_buffer_cnt(req);
if (!count) {
v4l2_info(&ctx->dev->v4l2_dev,
"No buffer was provided with the request\n");
return -ENOENT;
} else if (count > 1) {
v4l2_info(&ctx->dev->v4l2_dev,
"More than one buffer was provided with the request\n");
return -EINVAL;
}
return vb2_request_validate(req);
}
static int cedrus_open(struct file *file)
{
struct cedrus_dev *dev = video_drvdata(file);
struct cedrus_ctx *ctx = NULL;
int ret;
if (mutex_lock_interruptible(&dev->dev_mutex))
return -ERESTARTSYS;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
mutex_unlock(&dev->dev_mutex);
return -ENOMEM;
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
file->private_data = &ctx->fh;
ctx->dev = dev;
ctx->bit_depth = 8;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
&cedrus_queue_init);
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
goto err_free;
}
cedrus_reset_out_format(ctx);
ret = cedrus_init_ctrls(dev, ctx);
if (ret)
goto err_m2m_release;
v4l2_fh_add(&ctx->fh);
mutex_unlock(&dev->dev_mutex);
return 0;
err_m2m_release:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
err_free:
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return ret;
}
static int cedrus_release(struct file *file)
{
struct cedrus_dev *dev = video_drvdata(file);
struct cedrus_ctx *ctx = container_of(file->private_data,
struct cedrus_ctx, fh);
mutex_lock(&dev->dev_mutex);
v4l2_fh_del(&ctx->fh);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->hdl);
kfree(ctx->ctrls);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
}
static const struct v4l2_file_operations cedrus_fops = {
.owner = THIS_MODULE,
.open = cedrus_open,
.release = cedrus_release,
.poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = v4l2_m2m_fop_mmap,
};
static const struct video_device cedrus_video_device = {
.name = CEDRUS_NAME,
.vfl_dir = VFL_DIR_M2M,
.fops = &cedrus_fops,
.ioctl_ops = &cedrus_ioctl_ops,
.minor = -1,
.release = video_device_release_empty,
.device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
};
static const struct v4l2_m2m_ops cedrus_m2m_ops = {
.device_run = cedrus_device_run,
};
static const struct media_device_ops cedrus_m2m_media_ops = {
.req_validate = cedrus_request_validate,
.req_queue = v4l2_m2m_request_queue,
};
static int cedrus_probe(struct platform_device *pdev)
{
struct cedrus_dev *dev;
struct video_device *vfd;
int ret;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
platform_set_drvdata(pdev, dev);
dev->vfd = cedrus_video_device;
dev->dev = &pdev->dev;
dev->pdev = pdev;
ret = cedrus_hw_probe(dev);
if (ret) {
dev_err(&pdev->dev, "Failed to probe hardware\n");
return ret;
}
mutex_init(&dev->dev_mutex);
INIT_DELAYED_WORK(&dev->watchdog_work, cedrus_watchdog);
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to register V4L2 device\n");
return ret;
}
vfd = &dev->vfd;
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
snprintf(vfd->name, sizeof(vfd->name), "%s", cedrus_video_device.name);
video_set_drvdata(vfd, dev);
dev->m2m_dev = v4l2_m2m_init(&cedrus_m2m_ops);
if (IS_ERR(dev->m2m_dev)) {
v4l2_err(&dev->v4l2_dev,
"Failed to initialize V4L2 M2M device\n");
ret = PTR_ERR(dev->m2m_dev);
goto err_v4l2;
}
dev->mdev.dev = &pdev->dev;
strscpy(dev->mdev.model, CEDRUS_NAME, sizeof(dev->mdev.model));
strscpy(dev->mdev.bus_info, "platform:" CEDRUS_NAME,
sizeof(dev->mdev.bus_info));
media_device_init(&dev->mdev);
dev->mdev.ops = &cedrus_m2m_media_ops;
dev->v4l2_dev.mdev = &dev->mdev;
ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
goto err_m2m;
}
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
MEDIA_ENT_F_PROC_VIDEO_DECODER);
if (ret) {
v4l2_err(&dev->v4l2_dev,
"Failed to initialize V4L2 M2M media controller\n");
goto err_video;
}
ret = media_device_register(&dev->mdev);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register media device\n");
goto err_m2m_mc;
}
return 0;
err_m2m_mc:
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
err_video:
video_unregister_device(&dev->vfd);
err_m2m:
v4l2_m2m_release(dev->m2m_dev);
err_v4l2:
v4l2_device_unregister(&dev->v4l2_dev);
return ret;
}
static void cedrus_remove(struct platform_device *pdev)
{
struct cedrus_dev *dev = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&dev->watchdog_work);
if (media_devnode_is_registered(dev->mdev.devnode)) {
media_device_unregister(&dev->mdev);
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
media_device_cleanup(&dev->mdev);
}
v4l2_m2m_release(dev->m2m_dev);
video_unregister_device(&dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
cedrus_hw_remove(dev);
}
static const struct cedrus_variant sun4i_a10_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_MPEG2_DEC |
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
.mod_rate = 320000000,
};
static const struct cedrus_variant sun5i_a13_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_MPEG2_DEC |
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
.mod_rate = 320000000,
};
static const struct cedrus_variant sun7i_a20_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_MPEG2_DEC |
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
.mod_rate = 320000000,
};
static const struct cedrus_variant sun8i_a33_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED |
CEDRUS_CAPABILITY_MPEG2_DEC |
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
.mod_rate = 320000000,
};
static const struct cedrus_variant sun8i_h3_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED |
CEDRUS_CAPABILITY_MPEG2_DEC |
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_H265_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun8i_v3s_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED |
CEDRUS_CAPABILITY_H264_DEC,
.mod_rate = 297000000,
};
static const struct cedrus_variant sun8i_r40_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED |
CEDRUS_CAPABILITY_MPEG2_DEC |
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
.mod_rate = 297000000,
};
static const struct cedrus_variant sun20i_d1_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED |
CEDRUS_CAPABILITY_MPEG2_DEC |
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_H265_DEC,
.mod_rate = 432000000,
};
static const struct cedrus_variant sun50i_a64_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED |
CEDRUS_CAPABILITY_MPEG2_DEC |
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_H265_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h5_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED |
CEDRUS_CAPABILITY_MPEG2_DEC |
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_H265_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h6_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED |
CEDRUS_CAPABILITY_MPEG2_DEC |
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_H265_DEC |
CEDRUS_CAPABILITY_H265_10_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
.mod_rate = 600000000,
};
static const struct of_device_id cedrus_dt_match[] = {
{
.compatible = "allwinner,sun4i-a10-video-engine",
.data = &sun4i_a10_cedrus_variant,
},
{
.compatible = "allwinner,sun5i-a13-video-engine",
.data = &sun5i_a13_cedrus_variant,
},
{
.compatible = "allwinner,sun7i-a20-video-engine",
.data = &sun7i_a20_cedrus_variant,
},
{
.compatible = "allwinner,sun8i-a33-video-engine",
.data = &sun8i_a33_cedrus_variant,
},
{
.compatible = "allwinner,sun8i-h3-video-engine",
.data = &sun8i_h3_cedrus_variant,
},
{
.compatible = "allwinner,sun8i-v3s-video-engine",
.data = &sun8i_v3s_cedrus_variant,
},
{
.compatible = "allwinner,sun8i-r40-video-engine",
.data = &sun8i_r40_cedrus_variant,
},
{
.compatible = "allwinner,sun20i-d1-video-engine",
.data = &sun20i_d1_cedrus_variant,
},
{
.compatible = "allwinner,sun50i-a64-video-engine",
.data = &sun50i_a64_cedrus_variant,
},
{
.compatible = "allwinner,sun50i-h5-video-engine",
.data = &sun50i_h5_cedrus_variant,
},
{
.compatible = "allwinner,sun50i-h6-video-engine",
.data = &sun50i_h6_cedrus_variant,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, cedrus_dt_match);
static const struct dev_pm_ops cedrus_dev_pm_ops = {
SET_RUNTIME_PM_OPS(cedrus_hw_suspend,
cedrus_hw_resume, NULL)
};
static struct platform_driver cedrus_driver = {
.probe = cedrus_probe,
.remove_new = cedrus_remove,
.driver = {
.name = CEDRUS_NAME,
.of_match_table = of_match_ptr(cedrus_dt_match),
.pm = &cedrus_dev_pm_ops,
},
};
module_platform_driver(cedrus_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Florent Revest <[email protected]>");
MODULE_AUTHOR("Paul Kocialkowski <[email protected]>");
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Cedrus VPU driver");
| linux-master | drivers/staging/media/sunxi/cedrus/cedrus.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cedrus VPU driver
*
* Copyright (c) 2013 Jens Kuske <[email protected]>
* Copyright (c) 2018 Bootlin
*/
#include <linux/delay.h>
#include <linux/types.h>
#include <media/videobuf2-dma-contig.h>
#include "cedrus.h"
#include "cedrus_hw.h"
#include "cedrus_regs.h"
enum cedrus_h264_sram_off {
CEDRUS_SRAM_H264_PRED_WEIGHT_TABLE = 0x000,
CEDRUS_SRAM_H264_FRAMEBUFFER_LIST = 0x100,
CEDRUS_SRAM_H264_REF_LIST_0 = 0x190,
CEDRUS_SRAM_H264_REF_LIST_1 = 0x199,
CEDRUS_SRAM_H264_SCALING_LIST_8x8_0 = 0x200,
CEDRUS_SRAM_H264_SCALING_LIST_8x8_1 = 0x210,
CEDRUS_SRAM_H264_SCALING_LIST_4x4 = 0x220,
};
struct cedrus_h264_sram_ref_pic {
__le32 top_field_order_cnt;
__le32 bottom_field_order_cnt;
__le32 frame_info;
__le32 luma_ptr;
__le32 chroma_ptr;
__le32 mv_col_top_ptr;
__le32 mv_col_bot_ptr;
__le32 reserved;
} __packed;
#define CEDRUS_H264_FRAME_NUM 18
#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (32 * SZ_1K)
#define CEDRUS_MIN_PIC_INFO_BUF_SIZE (130 * SZ_1K)
static void cedrus_h264_write_sram(struct cedrus_dev *dev,
enum cedrus_h264_sram_off off,
const void *data, size_t len)
{
const u32 *buffer = data;
size_t count = DIV_ROUND_UP(len, 4);
cedrus_write(dev, VE_AVC_SRAM_PORT_OFFSET, off << 2);
while (count--)
cedrus_write(dev, VE_AVC_SRAM_PORT_DATA, *buffer++);
}
static dma_addr_t cedrus_h264_mv_col_buf_addr(struct cedrus_buffer *buf,
unsigned int field)
{
dma_addr_t addr = buf->codec.h264.mv_col_buf_dma;
/* Adjust for the field */
addr += field * buf->codec.h264.mv_col_buf_size / 2;
return addr;
}
static void cedrus_fill_ref_pic(struct cedrus_ctx *ctx,
struct cedrus_buffer *buf,
unsigned int top_field_order_cnt,
unsigned int bottom_field_order_cnt,
struct cedrus_h264_sram_ref_pic *pic)
{
struct vb2_buffer *vbuf = &buf->m2m_buf.vb.vb2_buf;
pic->top_field_order_cnt = cpu_to_le32(top_field_order_cnt);
pic->bottom_field_order_cnt = cpu_to_le32(bottom_field_order_cnt);
pic->frame_info = cpu_to_le32(buf->codec.h264.pic_type << 8);
pic->luma_ptr = cpu_to_le32(cedrus_buf_addr(vbuf, &ctx->dst_fmt, 0));
pic->chroma_ptr = cpu_to_le32(cedrus_buf_addr(vbuf, &ctx->dst_fmt, 1));
pic->mv_col_top_ptr = cpu_to_le32(cedrus_h264_mv_col_buf_addr(buf, 0));
pic->mv_col_bot_ptr = cpu_to_le32(cedrus_h264_mv_col_buf_addr(buf, 1));
}
static int cedrus_write_frame_list(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
struct cedrus_h264_sram_ref_pic pic_list[CEDRUS_H264_FRAME_NUM];
const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
struct vb2_queue *cap_q;
struct cedrus_buffer *output_buf;
struct cedrus_dev *dev = ctx->dev;
unsigned long used_dpbs = 0;
unsigned int position;
int output = -1;
unsigned int i;
cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
memset(pic_list, 0, sizeof(pic_list));
for (i = 0; i < ARRAY_SIZE(decode->dpb); i++) {
const struct v4l2_h264_dpb_entry *dpb = &decode->dpb[i];
struct cedrus_buffer *cedrus_buf;
struct vb2_buffer *buf;
if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_VALID))
continue;
buf = vb2_find_buffer(cap_q, dpb->reference_ts);
if (!buf)
continue;
cedrus_buf = vb2_to_cedrus_buffer(buf);
position = cedrus_buf->codec.h264.position;
used_dpbs |= BIT(position);
if (run->dst->vb2_buf.timestamp == dpb->reference_ts) {
output = position;
continue;
}
if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
continue;
cedrus_fill_ref_pic(ctx, cedrus_buf,
dpb->top_field_order_cnt,
dpb->bottom_field_order_cnt,
&pic_list[position]);
}
if (output >= 0)
position = output;
else
position = find_first_zero_bit(&used_dpbs, CEDRUS_H264_FRAME_NUM);
output_buf = vb2_to_cedrus_buffer(&run->dst->vb2_buf);
output_buf->codec.h264.position = position;
if (!output_buf->codec.h264.mv_col_buf_size) {
const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
unsigned int field_size;
field_size = DIV_ROUND_UP(ctx->src_fmt.width, 16) *
DIV_ROUND_UP(ctx->src_fmt.height, 16) * 16;
if (!(sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE))
field_size = field_size * 2;
if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY))
field_size = field_size * 2;
output_buf->codec.h264.mv_col_buf_size = field_size * 2;
/* Buffer is never accessed by CPU, so we can skip kernel mapping. */
output_buf->codec.h264.mv_col_buf =
dma_alloc_attrs(dev->dev,
output_buf->codec.h264.mv_col_buf_size,
&output_buf->codec.h264.mv_col_buf_dma,
GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!output_buf->codec.h264.mv_col_buf) {
output_buf->codec.h264.mv_col_buf_size = 0;
return -ENOMEM;
}
}
if (decode->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)
output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_FIELD;
else if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_MBAFF;
else
output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_FRAME;
cedrus_fill_ref_pic(ctx, output_buf,
decode->top_field_order_cnt,
decode->bottom_field_order_cnt,
&pic_list[position]);
cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_FRAMEBUFFER_LIST,
pic_list, sizeof(pic_list));
cedrus_write(dev, VE_H264_OUTPUT_FRAME_IDX, position);
return 0;
}
#define CEDRUS_MAX_REF_IDX 32
static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
struct cedrus_run *run,
const struct v4l2_h264_reference *ref_list,
u8 num_ref, enum cedrus_h264_sram_off sram)
{
const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
struct vb2_queue *cap_q;
struct cedrus_dev *dev = ctx->dev;
u8 sram_array[CEDRUS_MAX_REF_IDX];
unsigned int i;
size_t size;
cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
memset(sram_array, 0, sizeof(sram_array));
for (i = 0; i < num_ref; i++) {
const struct v4l2_h264_dpb_entry *dpb;
const struct cedrus_buffer *cedrus_buf;
unsigned int position;
struct vb2_buffer *buf;
u8 dpb_idx;
dpb_idx = ref_list[i].index;
dpb = &decode->dpb[dpb_idx];
if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
continue;
buf = vb2_find_buffer(cap_q, dpb->reference_ts);
if (!buf)
continue;
cedrus_buf = vb2_to_cedrus_buffer(buf);
position = cedrus_buf->codec.h264.position;
sram_array[i] |= position << 1;
if (ref_list[i].fields == V4L2_H264_BOTTOM_FIELD_REF)
sram_array[i] |= BIT(0);
}
size = min_t(size_t, ALIGN(num_ref, 4), sizeof(sram_array));
cedrus_h264_write_sram(dev, sram, &sram_array, size);
}
static void cedrus_write_ref_list0(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
_cedrus_write_ref_list(ctx, run,
slice->ref_pic_list0,
slice->num_ref_idx_l0_active_minus1 + 1,
CEDRUS_SRAM_H264_REF_LIST_0);
}
static void cedrus_write_ref_list1(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
_cedrus_write_ref_list(ctx, run,
slice->ref_pic_list1,
slice->num_ref_idx_l1_active_minus1 + 1,
CEDRUS_SRAM_H264_REF_LIST_1);
}
static void cedrus_write_scaling_lists(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
const struct v4l2_ctrl_h264_scaling_matrix *scaling =
run->h264.scaling_matrix;
const struct v4l2_ctrl_h264_pps *pps = run->h264.pps;
struct cedrus_dev *dev = ctx->dev;
if (!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT))
return;
cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_0,
scaling->scaling_list_8x8[0],
sizeof(scaling->scaling_list_8x8[0]));
cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_1,
scaling->scaling_list_8x8[1],
sizeof(scaling->scaling_list_8x8[1]));
cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_4x4,
scaling->scaling_list_4x4,
sizeof(scaling->scaling_list_4x4));
}
static void cedrus_write_pred_weight_table(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
const struct v4l2_ctrl_h264_pred_weights *pred_weight =
run->h264.pred_weights;
struct cedrus_dev *dev = ctx->dev;
int i, j, k;
cedrus_write(dev, VE_H264_SHS_WP,
((pred_weight->chroma_log2_weight_denom & 0x7) << 4) |
((pred_weight->luma_log2_weight_denom & 0x7) << 0));
cedrus_write(dev, VE_AVC_SRAM_PORT_OFFSET,
CEDRUS_SRAM_H264_PRED_WEIGHT_TABLE << 2);
for (i = 0; i < ARRAY_SIZE(pred_weight->weight_factors); i++) {
const struct v4l2_h264_weight_factors *factors =
&pred_weight->weight_factors[i];
for (j = 0; j < ARRAY_SIZE(factors->luma_weight); j++) {
u32 val;
val = (((u32)factors->luma_offset[j] & 0x1ff) << 16) |
(factors->luma_weight[j] & 0x1ff);
cedrus_write(dev, VE_AVC_SRAM_PORT_DATA, val);
}
for (j = 0; j < ARRAY_SIZE(factors->chroma_weight); j++) {
for (k = 0; k < ARRAY_SIZE(factors->chroma_weight[0]); k++) {
u32 val;
val = (((u32)factors->chroma_offset[j][k] & 0x1ff) << 16) |
(factors->chroma_weight[j][k] & 0x1ff);
cedrus_write(dev, VE_AVC_SRAM_PORT_DATA, val);
}
}
}
}
/*
* It turns out that using VE_H264_VLD_OFFSET to skip bits is not reliable. In
* rare cases frame is not decoded correctly. However, setting offset to 0 and
* skipping appropriate amount of bits with flush bits trigger always works.
*/
static void cedrus_skip_bits(struct cedrus_dev *dev, int num)
{
int count = 0;
while (count < num) {
int tmp = min(num - count, 32);
cedrus_write(dev, VE_H264_TRIGGER_TYPE,
VE_H264_TRIGGER_TYPE_FLUSH_BITS |
VE_H264_TRIGGER_TYPE_N_BITS(tmp));
while (cedrus_read(dev, VE_H264_STATUS) & VE_H264_STATUS_VLD_BUSY)
udelay(1);
count += tmp;
}
}
static void cedrus_set_params(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
const struct v4l2_ctrl_h264_pps *pps = run->h264.pps;
const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
struct vb2_buffer *src_buf = &run->src->vb2_buf;
struct cedrus_dev *dev = ctx->dev;
dma_addr_t src_buf_addr;
size_t slice_bytes = vb2_get_plane_payload(src_buf, 0);
unsigned int pic_width_in_mbs;
bool mbaff_pic;
u32 reg;
cedrus_write(dev, VE_H264_VLD_LEN, slice_bytes * 8);
cedrus_write(dev, VE_H264_VLD_OFFSET, 0);
src_buf_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
cedrus_write(dev, VE_H264_VLD_END, src_buf_addr + slice_bytes);
cedrus_write(dev, VE_H264_VLD_ADDR,
VE_H264_VLD_ADDR_VAL(src_buf_addr) |
VE_H264_VLD_ADDR_FIRST | VE_H264_VLD_ADDR_VALID |
VE_H264_VLD_ADDR_LAST);
if (ctx->src_fmt.width > 2048) {
cedrus_write(dev, VE_BUF_CTRL,
VE_BUF_CTRL_INTRAPRED_MIXED_RAM |
VE_BUF_CTRL_DBLK_MIXED_RAM);
cedrus_write(dev, VE_DBLK_DRAM_BUF_ADDR,
ctx->codec.h264.deblk_buf_dma);
cedrus_write(dev, VE_INTRAPRED_DRAM_BUF_ADDR,
ctx->codec.h264.intra_pred_buf_dma);
} else {
cedrus_write(dev, VE_BUF_CTRL,
VE_BUF_CTRL_INTRAPRED_INT_SRAM |
VE_BUF_CTRL_DBLK_INT_SRAM);
}
/*
* FIXME: Since the bitstream parsing is done in software, and
* in userspace, this shouldn't be needed anymore. But it
* turns out that removing it breaks the decoding process,
* without any clear indication why.
*/
cedrus_write(dev, VE_H264_TRIGGER_TYPE,
VE_H264_TRIGGER_TYPE_INIT_SWDEC);
cedrus_skip_bits(dev, slice->header_bit_size);
if (V4L2_H264_CTRL_PRED_WEIGHTS_REQUIRED(pps, slice))
cedrus_write_pred_weight_table(ctx, run);
if ((slice->slice_type == V4L2_H264_SLICE_TYPE_P) ||
(slice->slice_type == V4L2_H264_SLICE_TYPE_SP) ||
(slice->slice_type == V4L2_H264_SLICE_TYPE_B))
cedrus_write_ref_list0(ctx, run);
if (slice->slice_type == V4L2_H264_SLICE_TYPE_B)
cedrus_write_ref_list1(ctx, run);
// picture parameters
reg = 0;
/*
* FIXME: the kernel headers are allowing the default value to
* be passed, but the libva doesn't give us that.
*/
reg |= (slice->num_ref_idx_l0_active_minus1 & 0x1f) << 10;
reg |= (slice->num_ref_idx_l1_active_minus1 & 0x1f) << 5;
reg |= (pps->weighted_bipred_idc & 0x3) << 2;
if (pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE)
reg |= VE_H264_PPS_ENTROPY_CODING_MODE;
if (pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED)
reg |= VE_H264_PPS_WEIGHTED_PRED;
if (pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED)
reg |= VE_H264_PPS_CONSTRAINED_INTRA_PRED;
if (pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE)
reg |= VE_H264_PPS_TRANSFORM_8X8_MODE;
cedrus_write(dev, VE_H264_PPS, reg);
// sequence parameters
reg = 0;
reg |= (sps->chroma_format_idc & 0x7) << 19;
reg |= (sps->pic_width_in_mbs_minus1 & 0xff) << 8;
reg |= sps->pic_height_in_map_units_minus1 & 0xff;
if (sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)
reg |= VE_H264_SPS_MBS_ONLY;
if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
reg |= VE_H264_SPS_MB_ADAPTIVE_FRAME_FIELD;
if (sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE)
reg |= VE_H264_SPS_DIRECT_8X8_INFERENCE;
cedrus_write(dev, VE_H264_SPS, reg);
mbaff_pic = !(decode->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) &&
(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
pic_width_in_mbs = sps->pic_width_in_mbs_minus1 + 1;
// slice parameters
reg = 0;
reg |= ((slice->first_mb_in_slice % pic_width_in_mbs) & 0xff) << 24;
reg |= (((slice->first_mb_in_slice / pic_width_in_mbs) *
(mbaff_pic + 1)) & 0xff) << 16;
reg |= decode->nal_ref_idc ? BIT(12) : 0;
reg |= (slice->slice_type & 0xf) << 8;
reg |= slice->cabac_init_idc & 0x3;
if (ctx->fh.m2m_ctx->new_frame)
reg |= VE_H264_SHS_FIRST_SLICE_IN_PIC;
if (decode->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)
reg |= VE_H264_SHS_FIELD_PIC;
if (decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
reg |= VE_H264_SHS_BOTTOM_FIELD;
if (slice->flags & V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED)
reg |= VE_H264_SHS_DIRECT_SPATIAL_MV_PRED;
cedrus_write(dev, VE_H264_SHS, reg);
reg = 0;
reg |= VE_H264_SHS2_NUM_REF_IDX_ACTIVE_OVRD;
reg |= (slice->num_ref_idx_l0_active_minus1 & 0x1f) << 24;
reg |= (slice->num_ref_idx_l1_active_minus1 & 0x1f) << 16;
reg |= (slice->disable_deblocking_filter_idc & 0x3) << 8;
reg |= (slice->slice_alpha_c0_offset_div2 & 0xf) << 4;
reg |= slice->slice_beta_offset_div2 & 0xf;
cedrus_write(dev, VE_H264_SHS2, reg);
reg = 0;
reg |= (pps->second_chroma_qp_index_offset & 0x3f) << 16;
reg |= (pps->chroma_qp_index_offset & 0x3f) << 8;
reg |= (pps->pic_init_qp_minus26 + 26 + slice->slice_qp_delta) & 0x3f;
if (!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT))
reg |= VE_H264_SHS_QP_SCALING_MATRIX_DEFAULT;
cedrus_write(dev, VE_H264_SHS_QP, reg);
// clear status flags
cedrus_write(dev, VE_H264_STATUS, cedrus_read(dev, VE_H264_STATUS));
// enable int
cedrus_write(dev, VE_H264_CTRL,
VE_H264_CTRL_SLICE_DECODE_INT |
VE_H264_CTRL_DECODE_ERR_INT |
VE_H264_CTRL_VLD_DATA_REQ_INT);
}
static enum cedrus_irq_status
cedrus_h264_irq_status(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
u32 reg = cedrus_read(dev, VE_H264_STATUS);
if (reg & (VE_H264_STATUS_DECODE_ERR_INT |
VE_H264_STATUS_VLD_DATA_REQ_INT))
return CEDRUS_IRQ_ERROR;
if (reg & VE_H264_CTRL_SLICE_DECODE_INT)
return CEDRUS_IRQ_OK;
return CEDRUS_IRQ_NONE;
}
static void cedrus_h264_irq_clear(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
cedrus_write(dev, VE_H264_STATUS,
VE_H264_STATUS_INT_MASK);
}
static void cedrus_h264_irq_disable(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
u32 reg = cedrus_read(dev, VE_H264_CTRL);
cedrus_write(dev, VE_H264_CTRL,
reg & ~VE_H264_CTRL_INT_MASK);
}
static int cedrus_h264_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
{
struct cedrus_dev *dev = ctx->dev;
int ret;
cedrus_engine_enable(ctx);
cedrus_write(dev, VE_H264_SDROT_CTRL, 0);
cedrus_write(dev, VE_H264_EXTRA_BUFFER1,
ctx->codec.h264.pic_info_buf_dma);
cedrus_write(dev, VE_H264_EXTRA_BUFFER2,
ctx->codec.h264.neighbor_info_buf_dma);
cedrus_write_scaling_lists(ctx, run);
ret = cedrus_write_frame_list(ctx, run);
if (ret)
return ret;
cedrus_set_params(ctx, run);
return 0;
}
static int cedrus_h264_start(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
unsigned int pic_info_size;
int ret;
/*
* NOTE: All buffers allocated here are only used by HW, so we
* can add DMA_ATTR_NO_KERNEL_MAPPING flag when allocating them.
*/
/* Formula for picture buffer size is taken from CedarX source. */
if (ctx->src_fmt.width > 2048)
pic_info_size = CEDRUS_H264_FRAME_NUM * 0x4000;
else
pic_info_size = CEDRUS_H264_FRAME_NUM * 0x1000;
/*
* FIXME: If V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY is set,
* there is no need to multiply by 2.
*/
pic_info_size += ctx->src_fmt.height * 2 * 64;
if (pic_info_size < CEDRUS_MIN_PIC_INFO_BUF_SIZE)
pic_info_size = CEDRUS_MIN_PIC_INFO_BUF_SIZE;
ctx->codec.h264.pic_info_buf_size = pic_info_size;
ctx->codec.h264.pic_info_buf =
dma_alloc_attrs(dev->dev, ctx->codec.h264.pic_info_buf_size,
&ctx->codec.h264.pic_info_buf_dma,
GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.pic_info_buf)
return -ENOMEM;
/*
* That buffer is supposed to be 16kiB in size, and be aligned
* on 16kiB as well. However, dma_alloc_attrs provides the
* guarantee that we'll have a DMA address aligned on the
* smallest page order that is greater to the requested size,
* so we don't have to overallocate.
*/
ctx->codec.h264.neighbor_info_buf =
dma_alloc_attrs(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
&ctx->codec.h264.neighbor_info_buf_dma,
GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.neighbor_info_buf) {
ret = -ENOMEM;
goto err_pic_buf;
}
if (ctx->src_fmt.width > 2048) {
/*
* Formulas for deblock and intra prediction buffer sizes
* are taken from CedarX source.
*/
ctx->codec.h264.deblk_buf_size =
ALIGN(ctx->src_fmt.width, 32) * 12;
ctx->codec.h264.deblk_buf =
dma_alloc_attrs(dev->dev,
ctx->codec.h264.deblk_buf_size,
&ctx->codec.h264.deblk_buf_dma,
GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.deblk_buf) {
ret = -ENOMEM;
goto err_neighbor_buf;
}
/*
* NOTE: Multiplying by two deviates from CedarX logic, but it
* is for some unknown reason needed for H264 4K decoding on H6.
*/
ctx->codec.h264.intra_pred_buf_size =
ALIGN(ctx->src_fmt.width, 64) * 5 * 2;
ctx->codec.h264.intra_pred_buf =
dma_alloc_attrs(dev->dev,
ctx->codec.h264.intra_pred_buf_size,
&ctx->codec.h264.intra_pred_buf_dma,
GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.intra_pred_buf) {
ret = -ENOMEM;
goto err_deblk_buf;
}
}
return 0;
err_deblk_buf:
dma_free_attrs(dev->dev, ctx->codec.h264.deblk_buf_size,
ctx->codec.h264.deblk_buf,
ctx->codec.h264.deblk_buf_dma,
DMA_ATTR_NO_KERNEL_MAPPING);
err_neighbor_buf:
dma_free_attrs(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
ctx->codec.h264.neighbor_info_buf,
ctx->codec.h264.neighbor_info_buf_dma,
DMA_ATTR_NO_KERNEL_MAPPING);
err_pic_buf:
dma_free_attrs(dev->dev, ctx->codec.h264.pic_info_buf_size,
ctx->codec.h264.pic_info_buf,
ctx->codec.h264.pic_info_buf_dma,
DMA_ATTR_NO_KERNEL_MAPPING);
return ret;
}
static void cedrus_h264_stop(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
struct cedrus_buffer *buf;
struct vb2_queue *vq;
unsigned int i;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
for (i = 0; i < vq->num_buffers; i++) {
buf = vb2_to_cedrus_buffer(vb2_get_buffer(vq, i));
if (buf->codec.h264.mv_col_buf_size > 0) {
dma_free_attrs(dev->dev,
buf->codec.h264.mv_col_buf_size,
buf->codec.h264.mv_col_buf,
buf->codec.h264.mv_col_buf_dma,
DMA_ATTR_NO_KERNEL_MAPPING);
buf->codec.h264.mv_col_buf_size = 0;
}
}
dma_free_attrs(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
ctx->codec.h264.neighbor_info_buf,
ctx->codec.h264.neighbor_info_buf_dma,
DMA_ATTR_NO_KERNEL_MAPPING);
dma_free_attrs(dev->dev, ctx->codec.h264.pic_info_buf_size,
ctx->codec.h264.pic_info_buf,
ctx->codec.h264.pic_info_buf_dma,
DMA_ATTR_NO_KERNEL_MAPPING);
if (ctx->codec.h264.deblk_buf_size)
dma_free_attrs(dev->dev, ctx->codec.h264.deblk_buf_size,
ctx->codec.h264.deblk_buf,
ctx->codec.h264.deblk_buf_dma,
DMA_ATTR_NO_KERNEL_MAPPING);
if (ctx->codec.h264.intra_pred_buf_size)
dma_free_attrs(dev->dev, ctx->codec.h264.intra_pred_buf_size,
ctx->codec.h264.intra_pred_buf,
ctx->codec.h264.intra_pred_buf_dma,
DMA_ATTR_NO_KERNEL_MAPPING);
}
static void cedrus_h264_trigger(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
cedrus_write(dev, VE_H264_TRIGGER_TYPE,
VE_H264_TRIGGER_TYPE_AVC_SLICE_DECODE);
}
struct cedrus_dec_ops cedrus_dec_ops_h264 = {
.irq_clear = cedrus_h264_irq_clear,
.irq_disable = cedrus_h264_irq_disable,
.irq_status = cedrus_h264_irq_status,
.setup = cedrus_h264_setup,
.start = cedrus_h264_start,
.stop = cedrus_h264_stop,
.trigger = cedrus_h264_trigger,
};
| linux-master | drivers/staging/media/sunxi/cedrus/cedrus_h264.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cedrus VPU driver
*
* Copyright (C) 2016 Florent Revest <[email protected]>
* Copyright (C) 2018 Paul Kocialkowski <[email protected]>
* Copyright (C) 2018 Bootlin
*
* Based on the vim2m driver, that is:
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* Pawel Osciak, <[email protected]>
* Marek Szyprowski, <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/soc/sunxi/sunxi_sram.h>
#include <media/videobuf2-core.h>
#include <media/v4l2-mem2mem.h>
#include "cedrus.h"
#include "cedrus_hw.h"
#include "cedrus_regs.h"
int cedrus_engine_enable(struct cedrus_ctx *ctx)
{
u32 reg = 0;
/*
* FIXME: This is only valid on 32-bits DDR's, we should test
* it on the A13/A33.
*/
reg |= VE_MODE_REC_WR_MODE_2MB;
reg |= VE_MODE_DDR_MODE_BW_128;
switch (ctx->src_fmt.pixelformat) {
case V4L2_PIX_FMT_MPEG2_SLICE:
reg |= VE_MODE_DEC_MPEG;
break;
/* H.264 and VP8 both use the same decoding mode bit. */
case V4L2_PIX_FMT_H264_SLICE:
case V4L2_PIX_FMT_VP8_FRAME:
reg |= VE_MODE_DEC_H264;
break;
case V4L2_PIX_FMT_HEVC_SLICE:
reg |= VE_MODE_DEC_H265;
break;
default:
return -EINVAL;
}
if (ctx->src_fmt.width == 4096)
reg |= VE_MODE_PIC_WIDTH_IS_4096;
if (ctx->src_fmt.width > 2048)
reg |= VE_MODE_PIC_WIDTH_MORE_2048;
cedrus_write(ctx->dev, VE_MODE, reg);
return 0;
}
void cedrus_engine_disable(struct cedrus_dev *dev)
{
cedrus_write(dev, VE_MODE, VE_MODE_DISABLED);
}
void cedrus_dst_format_set(struct cedrus_dev *dev,
struct v4l2_pix_format *fmt)
{
unsigned int width = fmt->width;
unsigned int height = fmt->height;
u32 chroma_size;
u32 reg;
switch (fmt->pixelformat) {
case V4L2_PIX_FMT_NV12:
chroma_size = ALIGN(width, 16) * ALIGN(height, 16) / 2;
reg = VE_PRIMARY_OUT_FMT_NV12;
cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
reg = chroma_size / 2;
cedrus_write(dev, VE_PRIMARY_CHROMA_BUF_LEN, reg);
reg = VE_PRIMARY_FB_LINE_STRIDE_LUMA(ALIGN(width, 16)) |
VE_PRIMARY_FB_LINE_STRIDE_CHROMA(ALIGN(width, 16) / 2);
cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg);
break;
case V4L2_PIX_FMT_NV12_32L32:
default:
reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12;
cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
reg = VE_SECONDARY_OUT_FMT_TILED_32_NV12;
cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
break;
}
}
static irqreturn_t cedrus_irq(int irq, void *data)
{
struct cedrus_dev *dev = data;
struct cedrus_ctx *ctx;
enum vb2_buffer_state state;
enum cedrus_irq_status status;
/*
* If cancel_delayed_work returns false it means watchdog already
* executed and finished the job.
*/
if (!cancel_delayed_work(&dev->watchdog_work))
return IRQ_HANDLED;
ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
if (!ctx) {
v4l2_err(&dev->v4l2_dev,
"Instance released before the end of transaction\n");
return IRQ_NONE;
}
status = ctx->current_codec->irq_status(ctx);
if (status == CEDRUS_IRQ_NONE)
return IRQ_NONE;
ctx->current_codec->irq_disable(ctx);
ctx->current_codec->irq_clear(ctx);
if (status == CEDRUS_IRQ_ERROR)
state = VB2_BUF_STATE_ERROR;
else
state = VB2_BUF_STATE_DONE;
v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
state);
return IRQ_HANDLED;
}
void cedrus_watchdog(struct work_struct *work)
{
struct cedrus_dev *dev;
struct cedrus_ctx *ctx;
dev = container_of(to_delayed_work(work),
struct cedrus_dev, watchdog_work);
ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
if (!ctx)
return;
v4l2_err(&dev->v4l2_dev, "frame processing timed out!\n");
reset_control_reset(dev->rstc);
v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
VB2_BUF_STATE_ERROR);
}
int cedrus_hw_suspend(struct device *device)
{
struct cedrus_dev *dev = dev_get_drvdata(device);
reset_control_assert(dev->rstc);
clk_disable_unprepare(dev->ram_clk);
clk_disable_unprepare(dev->mod_clk);
clk_disable_unprepare(dev->ahb_clk);
return 0;
}
int cedrus_hw_resume(struct device *device)
{
struct cedrus_dev *dev = dev_get_drvdata(device);
int ret;
ret = clk_prepare_enable(dev->ahb_clk);
if (ret) {
dev_err(dev->dev, "Failed to enable AHB clock\n");
return ret;
}
ret = clk_prepare_enable(dev->mod_clk);
if (ret) {
dev_err(dev->dev, "Failed to enable MOD clock\n");
goto err_ahb_clk;
}
ret = clk_prepare_enable(dev->ram_clk);
if (ret) {
dev_err(dev->dev, "Failed to enable RAM clock\n");
goto err_mod_clk;
}
ret = reset_control_reset(dev->rstc);
if (ret) {
dev_err(dev->dev, "Failed to apply reset\n");
goto err_ram_clk;
}
return 0;
err_ram_clk:
clk_disable_unprepare(dev->ram_clk);
err_mod_clk:
clk_disable_unprepare(dev->mod_clk);
err_ahb_clk:
clk_disable_unprepare(dev->ahb_clk);
return ret;
}
int cedrus_hw_probe(struct cedrus_dev *dev)
{
const struct cedrus_variant *variant;
int irq_dec;
int ret;
variant = of_device_get_match_data(dev->dev);
if (!variant)
return -EINVAL;
dev->capabilities = variant->capabilities;
irq_dec = platform_get_irq(dev->pdev, 0);
if (irq_dec <= 0)
return irq_dec;
ret = devm_request_irq(dev->dev, irq_dec, cedrus_irq,
0, dev_name(dev->dev), dev);
if (ret) {
dev_err(dev->dev, "Failed to request IRQ\n");
return ret;
}
ret = of_reserved_mem_device_init(dev->dev);
if (ret && ret != -ENODEV) {
dev_err(dev->dev, "Failed to reserve memory\n");
return ret;
}
ret = sunxi_sram_claim(dev->dev);
if (ret) {
dev_err(dev->dev, "Failed to claim SRAM\n");
goto err_mem;
}
dev->ahb_clk = devm_clk_get(dev->dev, "ahb");
if (IS_ERR(dev->ahb_clk)) {
dev_err(dev->dev, "Failed to get AHB clock\n");
ret = PTR_ERR(dev->ahb_clk);
goto err_sram;
}
dev->mod_clk = devm_clk_get(dev->dev, "mod");
if (IS_ERR(dev->mod_clk)) {
dev_err(dev->dev, "Failed to get MOD clock\n");
ret = PTR_ERR(dev->mod_clk);
goto err_sram;
}
dev->ram_clk = devm_clk_get(dev->dev, "ram");
if (IS_ERR(dev->ram_clk)) {
dev_err(dev->dev, "Failed to get RAM clock\n");
ret = PTR_ERR(dev->ram_clk);
goto err_sram;
}
dev->rstc = devm_reset_control_get(dev->dev, NULL);
if (IS_ERR(dev->rstc)) {
dev_err(dev->dev, "Failed to get reset control\n");
ret = PTR_ERR(dev->rstc);
goto err_sram;
}
dev->base = devm_platform_ioremap_resource(dev->pdev, 0);
if (IS_ERR(dev->base)) {
dev_err(dev->dev, "Failed to map registers\n");
ret = PTR_ERR(dev->base);
goto err_sram;
}
ret = clk_set_rate(dev->mod_clk, variant->mod_rate);
if (ret) {
dev_err(dev->dev, "Failed to set clock rate\n");
goto err_sram;
}
pm_runtime_enable(dev->dev);
if (!pm_runtime_enabled(dev->dev)) {
ret = cedrus_hw_resume(dev->dev);
if (ret)
goto err_pm;
}
return 0;
err_pm:
pm_runtime_disable(dev->dev);
err_sram:
sunxi_sram_release(dev->dev);
err_mem:
of_reserved_mem_device_release(dev->dev);
return ret;
}
void cedrus_hw_remove(struct cedrus_dev *dev)
{
pm_runtime_disable(dev->dev);
if (!pm_runtime_status_suspended(dev->dev))
cedrus_hw_suspend(dev->dev);
sunxi_sram_release(dev->dev);
of_reserved_mem_device_release(dev->dev);
}
| linux-master | drivers/staging/media/sunxi/cedrus/cedrus_hw.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cedrus VPU driver
*
* Copyright (C) 2016 Florent Revest <[email protected]>
* Copyright (C) 2018 Paul Kocialkowski <[email protected]>
* Copyright (C) 2018 Bootlin
*/
#include <media/videobuf2-dma-contig.h>
#include "cedrus.h"
#include "cedrus_hw.h"
#include "cedrus_regs.h"
static enum cedrus_irq_status cedrus_mpeg2_irq_status(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
u32 reg;
reg = cedrus_read(dev, VE_DEC_MPEG_STATUS);
reg &= VE_DEC_MPEG_STATUS_CHECK_MASK;
if (!reg)
return CEDRUS_IRQ_NONE;
if (reg & VE_DEC_MPEG_STATUS_CHECK_ERROR ||
!(reg & VE_DEC_MPEG_STATUS_SUCCESS))
return CEDRUS_IRQ_ERROR;
return CEDRUS_IRQ_OK;
}
static void cedrus_mpeg2_irq_clear(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
cedrus_write(dev, VE_DEC_MPEG_STATUS, VE_DEC_MPEG_STATUS_CHECK_MASK);
}
static void cedrus_mpeg2_irq_disable(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
u32 reg = cedrus_read(dev, VE_DEC_MPEG_CTRL);
reg &= ~VE_DEC_MPEG_CTRL_IRQ_MASK;
cedrus_write(dev, VE_DEC_MPEG_CTRL, reg);
}
static int cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
{
const struct v4l2_ctrl_mpeg2_sequence *seq;
const struct v4l2_ctrl_mpeg2_picture *pic;
const struct v4l2_ctrl_mpeg2_quantisation *quantisation;
dma_addr_t src_buf_addr, dst_luma_addr, dst_chroma_addr;
struct cedrus_dev *dev = ctx->dev;
struct vb2_queue *vq;
const u8 *matrix;
unsigned int i;
u32 reg;
seq = run->mpeg2.sequence;
pic = run->mpeg2.picture;
quantisation = run->mpeg2.quantisation;
/* Activate MPEG engine. */
cedrus_engine_enable(ctx);
/* Set intra quantisation matrix. */
matrix = quantisation->intra_quantiser_matrix;
for (i = 0; i < 64; i++) {
reg = VE_DEC_MPEG_IQMINPUT_WEIGHT(i, matrix[i]);
reg |= VE_DEC_MPEG_IQMINPUT_FLAG_INTRA;
cedrus_write(dev, VE_DEC_MPEG_IQMINPUT, reg);
}
/* Set non-intra quantisation matrix. */
matrix = quantisation->non_intra_quantiser_matrix;
for (i = 0; i < 64; i++) {
reg = VE_DEC_MPEG_IQMINPUT_WEIGHT(i, matrix[i]);
reg |= VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA;
cedrus_write(dev, VE_DEC_MPEG_IQMINPUT, reg);
}
/* Set MPEG picture header. */
reg = VE_DEC_MPEG_MP12HDR_SLICE_TYPE(pic->picture_coding_type);
reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 0, pic->f_code[0][0]);
reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 1, pic->f_code[0][1]);
reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 0, pic->f_code[1][0]);
reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 1, pic->f_code[1][1]);
reg |= VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(pic->intra_dc_precision);
reg |= VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(pic->picture_structure);
reg |= VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST);
reg |= VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(pic->flags & V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT);
reg |= VE_DEC_MPEG_MP12HDR_CONCEALMENT_MOTION_VECTORS(pic->flags & V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV);
reg |= VE_DEC_MPEG_MP12HDR_Q_SCALE_TYPE(pic->flags & V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE);
reg |= VE_DEC_MPEG_MP12HDR_INTRA_VLC_FORMAT(pic->flags & V4L2_MPEG2_PIC_FLAG_INTRA_VLC);
reg |= VE_DEC_MPEG_MP12HDR_ALTERNATE_SCAN(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN);
reg |= VE_DEC_MPEG_MP12HDR_FULL_PEL_FORWARD_VECTOR(0);
reg |= VE_DEC_MPEG_MP12HDR_FULL_PEL_BACKWARD_VECTOR(0);
cedrus_write(dev, VE_DEC_MPEG_MP12HDR, reg);
/* Set frame dimensions. */
reg = VE_DEC_MPEG_PICCODEDSIZE_WIDTH(seq->horizontal_size);
reg |= VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(seq->vertical_size);
cedrus_write(dev, VE_DEC_MPEG_PICCODEDSIZE, reg);
reg = VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(ctx->src_fmt.width);
reg |= VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(ctx->src_fmt.height);
cedrus_write(dev, VE_DEC_MPEG_PICBOUNDSIZE, reg);
/* Forward and backward prediction reference buffers. */
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
cedrus_write_ref_buf_addr(ctx, vq, pic->forward_ref_ts,
VE_DEC_MPEG_FWD_REF_LUMA_ADDR,
VE_DEC_MPEG_FWD_REF_CHROMA_ADDR);
cedrus_write_ref_buf_addr(ctx, vq, pic->backward_ref_ts,
VE_DEC_MPEG_BWD_REF_LUMA_ADDR,
VE_DEC_MPEG_BWD_REF_CHROMA_ADDR);
/* Destination luma and chroma buffers. */
dst_luma_addr = cedrus_dst_buf_addr(ctx, &run->dst->vb2_buf, 0);
dst_chroma_addr = cedrus_dst_buf_addr(ctx, &run->dst->vb2_buf, 1);
cedrus_write(dev, VE_DEC_MPEG_REC_LUMA, dst_luma_addr);
cedrus_write(dev, VE_DEC_MPEG_REC_CHROMA, dst_chroma_addr);
/* Source offset and length in bits. */
cedrus_write(dev, VE_DEC_MPEG_VLD_OFFSET, 0);
reg = vb2_get_plane_payload(&run->src->vb2_buf, 0) * 8;
cedrus_write(dev, VE_DEC_MPEG_VLD_LEN, reg);
/* Source beginning and end addresses. */
src_buf_addr = vb2_dma_contig_plane_dma_addr(&run->src->vb2_buf, 0);
reg = VE_DEC_MPEG_VLD_ADDR_BASE(src_buf_addr);
reg |= VE_DEC_MPEG_VLD_ADDR_VALID_PIC_DATA;
reg |= VE_DEC_MPEG_VLD_ADDR_LAST_PIC_DATA;
reg |= VE_DEC_MPEG_VLD_ADDR_FIRST_PIC_DATA;
cedrus_write(dev, VE_DEC_MPEG_VLD_ADDR, reg);
reg = src_buf_addr + vb2_get_plane_payload(&run->src->vb2_buf, 0);
cedrus_write(dev, VE_DEC_MPEG_VLD_END_ADDR, reg);
/* Macroblock address: start at the beginning. */
reg = VE_DEC_MPEG_MBADDR_Y(0) | VE_DEC_MPEG_MBADDR_X(0);
cedrus_write(dev, VE_DEC_MPEG_MBADDR, reg);
/* Clear previous errors. */
cedrus_write(dev, VE_DEC_MPEG_ERROR, 0);
/* Clear correct macroblocks register. */
cedrus_write(dev, VE_DEC_MPEG_CRTMBADDR, 0);
/* Enable appropriate interruptions and components. */
reg = VE_DEC_MPEG_CTRL_IRQ_MASK | VE_DEC_MPEG_CTRL_MC_NO_WRITEBACK |
VE_DEC_MPEG_CTRL_MC_CACHE_EN;
cedrus_write(dev, VE_DEC_MPEG_CTRL, reg);
return 0;
}
static void cedrus_mpeg2_trigger(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
u32 reg;
/* Trigger MPEG engine. */
reg = VE_DEC_MPEG_TRIGGER_HW_MPEG_VLD | VE_DEC_MPEG_TRIGGER_MPEG2 |
VE_DEC_MPEG_TRIGGER_MB_BOUNDARY;
cedrus_write(dev, VE_DEC_MPEG_TRIGGER, reg);
}
struct cedrus_dec_ops cedrus_dec_ops_mpeg2 = {
.irq_clear = cedrus_mpeg2_irq_clear,
.irq_disable = cedrus_mpeg2_irq_disable,
.irq_status = cedrus_mpeg2_irq_status,
.setup = cedrus_mpeg2_setup,
.trigger = cedrus_mpeg2_trigger,
};
| linux-master | drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2021-2022 Bootlin
* Author: Paul Kocialkowski <[email protected]>
*/
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
#include "sun6i_isp.h"
#include "sun6i_isp_capture.h"
#include "sun6i_isp_params.h"
#include "sun6i_isp_proc.h"
#include "sun6i_isp_reg.h"
/* Helpers */
void sun6i_isp_proc_dimensions(struct sun6i_isp_device *isp_dev,
unsigned int *width, unsigned int *height)
{
if (width)
*width = isp_dev->proc.mbus_format.width;
if (height)
*height = isp_dev->proc.mbus_format.height;
}
/* Format */
static const struct sun6i_isp_proc_format sun6i_isp_proc_formats[] = {
{
.mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
.input_format = SUN6I_ISP_INPUT_FMT_RAW_BGGR,
},
{
.mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
.input_format = SUN6I_ISP_INPUT_FMT_RAW_GBRG,
},
{
.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
.input_format = SUN6I_ISP_INPUT_FMT_RAW_GRBG,
},
{
.mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
.input_format = SUN6I_ISP_INPUT_FMT_RAW_RGGB,
},
{
.mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
.input_format = SUN6I_ISP_INPUT_FMT_RAW_BGGR,
},
{
.mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
.input_format = SUN6I_ISP_INPUT_FMT_RAW_GBRG,
},
{
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.input_format = SUN6I_ISP_INPUT_FMT_RAW_GRBG,
},
{
.mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
.input_format = SUN6I_ISP_INPUT_FMT_RAW_RGGB,
},
};
const struct sun6i_isp_proc_format *sun6i_isp_proc_format_find(u32 mbus_code)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(sun6i_isp_proc_formats); i++)
if (sun6i_isp_proc_formats[i].mbus_code == mbus_code)
return &sun6i_isp_proc_formats[i];
return NULL;
}
/* Processor */
static void sun6i_isp_proc_irq_enable(struct sun6i_isp_device *isp_dev)
{
struct regmap *regmap = isp_dev->regmap;
regmap_write(regmap, SUN6I_ISP_FE_INT_EN_REG,
SUN6I_ISP_FE_INT_EN_FINISH |
SUN6I_ISP_FE_INT_EN_START |
SUN6I_ISP_FE_INT_EN_PARA_SAVE |
SUN6I_ISP_FE_INT_EN_PARA_LOAD |
SUN6I_ISP_FE_INT_EN_SRC0_FIFO |
SUN6I_ISP_FE_INT_EN_ROT_FINISH);
}
static void sun6i_isp_proc_irq_disable(struct sun6i_isp_device *isp_dev)
{
struct regmap *regmap = isp_dev->regmap;
regmap_write(regmap, SUN6I_ISP_FE_INT_EN_REG, 0);
}
static void sun6i_isp_proc_irq_clear(struct sun6i_isp_device *isp_dev)
{
struct regmap *regmap = isp_dev->regmap;
regmap_write(regmap, SUN6I_ISP_FE_INT_EN_REG, 0);
regmap_write(regmap, SUN6I_ISP_FE_INT_STA_REG,
SUN6I_ISP_FE_INT_STA_CLEAR);
}
static void sun6i_isp_proc_enable(struct sun6i_isp_device *isp_dev,
struct sun6i_isp_proc_source *source)
{
struct sun6i_isp_proc *proc = &isp_dev->proc;
struct regmap *regmap = isp_dev->regmap;
u8 mode;
/* Frontend */
if (source == &proc->source_csi0)
mode = SUN6I_ISP_SRC_MODE_CSI(0);
else
mode = SUN6I_ISP_SRC_MODE_CSI(1);
regmap_write(regmap, SUN6I_ISP_FE_CFG_REG,
SUN6I_ISP_FE_CFG_EN | SUN6I_ISP_FE_CFG_SRC0_MODE(mode));
regmap_write(regmap, SUN6I_ISP_FE_CTRL_REG,
SUN6I_ISP_FE_CTRL_VCAP_EN | SUN6I_ISP_FE_CTRL_PARA_READY);
}
static void sun6i_isp_proc_disable(struct sun6i_isp_device *isp_dev)
{
struct regmap *regmap = isp_dev->regmap;
/* Frontend */
regmap_write(regmap, SUN6I_ISP_FE_CTRL_REG, 0);
regmap_write(regmap, SUN6I_ISP_FE_CFG_REG, 0);
}
static void sun6i_isp_proc_configure(struct sun6i_isp_device *isp_dev)
{
struct v4l2_mbus_framefmt *mbus_format = &isp_dev->proc.mbus_format;
const struct sun6i_isp_proc_format *format;
u32 value;
/* Module */
value = sun6i_isp_load_read(isp_dev, SUN6I_ISP_MODULE_EN_REG);
value |= SUN6I_ISP_MODULE_EN_SRC0;
sun6i_isp_load_write(isp_dev, SUN6I_ISP_MODULE_EN_REG, value);
/* Input */
format = sun6i_isp_proc_format_find(mbus_format->code);
if (WARN_ON(!format))
return;
sun6i_isp_load_write(isp_dev, SUN6I_ISP_MODE_REG,
SUN6I_ISP_MODE_INPUT_FMT(format->input_format) |
SUN6I_ISP_MODE_INPUT_YUV_SEQ(format->input_yuv_seq) |
SUN6I_ISP_MODE_SHARP(1) |
SUN6I_ISP_MODE_HIST(2));
}
/* V4L2 Subdev */
static int sun6i_isp_proc_s_stream(struct v4l2_subdev *subdev, int on)
{
struct sun6i_isp_device *isp_dev = v4l2_get_subdevdata(subdev);
struct sun6i_isp_proc *proc = &isp_dev->proc;
struct media_pad *local_pad = &proc->pads[SUN6I_ISP_PROC_PAD_SINK_CSI];
struct device *dev = isp_dev->dev;
struct sun6i_isp_proc_source *source;
struct v4l2_subdev *source_subdev;
struct media_pad *remote_pad;
int ret;
/* Source */
remote_pad = media_pad_remote_pad_unique(local_pad);
if (IS_ERR(remote_pad)) {
dev_err(dev,
"zero or more than a single source connected to the bridge\n");
return PTR_ERR(remote_pad);
}
source_subdev = media_entity_to_v4l2_subdev(remote_pad->entity);
if (source_subdev == proc->source_csi0.subdev)
source = &proc->source_csi0;
else
source = &proc->source_csi1;
if (!on) {
sun6i_isp_proc_irq_disable(isp_dev);
v4l2_subdev_call(source_subdev, video, s_stream, 0);
ret = 0;
goto disable;
}
/* PM */
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
/* Clear */
sun6i_isp_proc_irq_clear(isp_dev);
/* Configure */
sun6i_isp_tables_configure(isp_dev);
sun6i_isp_params_configure(isp_dev);
sun6i_isp_proc_configure(isp_dev);
sun6i_isp_capture_configure(isp_dev);
/* State Update */
sun6i_isp_state_update(isp_dev, true);
/* Enable */
sun6i_isp_proc_irq_enable(isp_dev);
sun6i_isp_proc_enable(isp_dev, source);
ret = v4l2_subdev_call(source_subdev, video, s_stream, 1);
if (ret && ret != -ENOIOCTLCMD) {
sun6i_isp_proc_irq_disable(isp_dev);
goto disable;
}
return 0;
disable:
sun6i_isp_proc_disable(isp_dev);
pm_runtime_put(dev);
return ret;
}
static const struct v4l2_subdev_video_ops sun6i_isp_proc_video_ops = {
.s_stream = sun6i_isp_proc_s_stream,
};
static void
sun6i_isp_proc_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format)
{
if (!sun6i_isp_proc_format_find(mbus_format->code))
mbus_format->code = sun6i_isp_proc_formats[0].mbus_code;
mbus_format->field = V4L2_FIELD_NONE;
mbus_format->colorspace = V4L2_COLORSPACE_RAW;
mbus_format->quantization = V4L2_QUANTIZATION_DEFAULT;
mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
static int sun6i_isp_proc_init_cfg(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state)
{
struct sun6i_isp_device *isp_dev = v4l2_get_subdevdata(subdev);
unsigned int pad = SUN6I_ISP_PROC_PAD_SINK_CSI;
struct v4l2_mbus_framefmt *mbus_format =
v4l2_subdev_get_try_format(subdev, state, pad);
struct mutex *lock = &isp_dev->proc.lock;
mutex_lock(lock);
mbus_format->code = sun6i_isp_proc_formats[0].mbus_code;
mbus_format->width = 1280;
mbus_format->height = 720;
sun6i_isp_proc_mbus_format_prepare(mbus_format);
mutex_unlock(lock);
return 0;
}
static int
sun6i_isp_proc_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code_enum)
{
if (code_enum->index >= ARRAY_SIZE(sun6i_isp_proc_formats))
return -EINVAL;
code_enum->code = sun6i_isp_proc_formats[code_enum->index].mbus_code;
return 0;
}
static int sun6i_isp_proc_get_fmt(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct sun6i_isp_device *isp_dev = v4l2_get_subdevdata(subdev);
struct v4l2_mbus_framefmt *mbus_format = &format->format;
struct mutex *lock = &isp_dev->proc.lock;
mutex_lock(lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
*mbus_format = *v4l2_subdev_get_try_format(subdev, state,
format->pad);
else
*mbus_format = isp_dev->proc.mbus_format;
mutex_unlock(lock);
return 0;
}
static int sun6i_isp_proc_set_fmt(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct sun6i_isp_device *isp_dev = v4l2_get_subdevdata(subdev);
struct v4l2_mbus_framefmt *mbus_format = &format->format;
struct mutex *lock = &isp_dev->proc.lock;
mutex_lock(lock);
sun6i_isp_proc_mbus_format_prepare(mbus_format);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
*v4l2_subdev_get_try_format(subdev, state, format->pad) =
*mbus_format;
else
isp_dev->proc.mbus_format = *mbus_format;
mutex_unlock(lock);
return 0;
}
static const struct v4l2_subdev_pad_ops sun6i_isp_proc_pad_ops = {
.init_cfg = sun6i_isp_proc_init_cfg,
.enum_mbus_code = sun6i_isp_proc_enum_mbus_code,
.get_fmt = sun6i_isp_proc_get_fmt,
.set_fmt = sun6i_isp_proc_set_fmt,
};
static const struct v4l2_subdev_ops sun6i_isp_proc_subdev_ops = {
.video = &sun6i_isp_proc_video_ops,
.pad = &sun6i_isp_proc_pad_ops,
};
/* Media Entity */
static const struct media_entity_operations sun6i_isp_proc_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
/* V4L2 Async */
static int sun6i_isp_proc_link(struct sun6i_isp_device *isp_dev,
int sink_pad_index,
struct v4l2_subdev *remote_subdev, bool enabled)
{
struct device *dev = isp_dev->dev;
struct v4l2_subdev *subdev = &isp_dev->proc.subdev;
struct media_entity *sink_entity = &subdev->entity;
struct media_entity *source_entity = &remote_subdev->entity;
int source_pad_index;
int ret;
/* Get the first remote source pad. */
ret = media_entity_get_fwnode_pad(source_entity, remote_subdev->fwnode,
MEDIA_PAD_FL_SOURCE);
if (ret < 0) {
dev_err(dev, "missing source pad in external entity %s\n",
source_entity->name);
return -EINVAL;
}
source_pad_index = ret;
dev_dbg(dev, "creating %s:%u -> %s:%u link\n", source_entity->name,
source_pad_index, sink_entity->name, sink_pad_index);
ret = media_create_pad_link(source_entity, source_pad_index,
sink_entity, sink_pad_index,
enabled ? MEDIA_LNK_FL_ENABLED : 0);
if (ret < 0) {
dev_err(dev, "failed to create %s:%u -> %s:%u link\n",
source_entity->name, source_pad_index,
sink_entity->name, sink_pad_index);
return ret;
}
return 0;
}
static int sun6i_isp_proc_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *remote_subdev,
struct v4l2_async_connection *async_subdev)
{
struct sun6i_isp_device *isp_dev =
container_of(notifier, struct sun6i_isp_device, proc.notifier);
struct sun6i_isp_proc_async_subdev *proc_async_subdev =
container_of(async_subdev, struct sun6i_isp_proc_async_subdev,
async_subdev);
struct sun6i_isp_proc *proc = &isp_dev->proc;
struct sun6i_isp_proc_source *source = proc_async_subdev->source;
bool enabled;
switch (source->endpoint.base.port) {
case SUN6I_ISP_PORT_CSI0:
source = &proc->source_csi0;
enabled = true;
break;
case SUN6I_ISP_PORT_CSI1:
source = &proc->source_csi1;
enabled = !proc->source_csi0.expected;
break;
default:
return -EINVAL;
}
source->subdev = remote_subdev;
return sun6i_isp_proc_link(isp_dev, SUN6I_ISP_PROC_PAD_SINK_CSI,
remote_subdev, enabled);
}
static int
sun6i_isp_proc_notifier_complete(struct v4l2_async_notifier *notifier)
{
struct sun6i_isp_device *isp_dev =
container_of(notifier, struct sun6i_isp_device, proc.notifier);
struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
int ret;
ret = v4l2_device_register_subdev_nodes(v4l2_dev);
if (ret)
return ret;
return 0;
}
static const struct v4l2_async_notifier_operations
sun6i_isp_proc_notifier_ops = {
.bound = sun6i_isp_proc_notifier_bound,
.complete = sun6i_isp_proc_notifier_complete,
};
/* Processor */
static int sun6i_isp_proc_source_setup(struct sun6i_isp_device *isp_dev,
struct sun6i_isp_proc_source *source,
u32 port)
{
struct device *dev = isp_dev->dev;
struct v4l2_async_notifier *notifier = &isp_dev->proc.notifier;
struct v4l2_fwnode_endpoint *endpoint = &source->endpoint;
struct sun6i_isp_proc_async_subdev *proc_async_subdev;
struct fwnode_handle *handle = NULL;
int ret;
handle = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), port, 0, 0);
if (!handle)
return -ENODEV;
ret = v4l2_fwnode_endpoint_parse(handle, endpoint);
if (ret)
goto complete;
proc_async_subdev =
v4l2_async_nf_add_fwnode_remote(notifier, handle,
struct
sun6i_isp_proc_async_subdev);
if (IS_ERR(proc_async_subdev)) {
ret = PTR_ERR(proc_async_subdev);
goto complete;
}
proc_async_subdev->source = source;
source->expected = true;
complete:
fwnode_handle_put(handle);
return ret;
}
int sun6i_isp_proc_setup(struct sun6i_isp_device *isp_dev)
{
struct device *dev = isp_dev->dev;
struct sun6i_isp_proc *proc = &isp_dev->proc;
struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
struct v4l2_async_notifier *notifier = &proc->notifier;
struct v4l2_subdev *subdev = &proc->subdev;
struct media_pad *pads = proc->pads;
int ret;
mutex_init(&proc->lock);
/* V4L2 Subdev */
v4l2_subdev_init(subdev, &sun6i_isp_proc_subdev_ops);
strscpy(subdev->name, SUN6I_ISP_PROC_NAME, sizeof(subdev->name));
subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
subdev->owner = THIS_MODULE;
subdev->dev = dev;
v4l2_set_subdevdata(subdev, isp_dev);
/* Media Entity */
subdev->entity.function = MEDIA_ENT_F_PROC_VIDEO_ISP;
subdev->entity.ops = &sun6i_isp_proc_entity_ops;
/* Media Pads */
pads[SUN6I_ISP_PROC_PAD_SINK_CSI].flags = MEDIA_PAD_FL_SINK |
MEDIA_PAD_FL_MUST_CONNECT;
pads[SUN6I_ISP_PROC_PAD_SINK_PARAMS].flags = MEDIA_PAD_FL_SINK |
MEDIA_PAD_FL_MUST_CONNECT;
pads[SUN6I_ISP_PROC_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&subdev->entity, SUN6I_ISP_PROC_PAD_COUNT,
pads);
if (ret)
return ret;
/* V4L2 Subdev */
ret = v4l2_device_register_subdev(v4l2_dev, subdev);
if (ret < 0) {
v4l2_err(v4l2_dev, "failed to register v4l2 subdev: %d\n", ret);
goto error_media_entity;
}
/* V4L2 Async */
v4l2_async_nf_init(notifier, v4l2_dev);
notifier->ops = &sun6i_isp_proc_notifier_ops;
sun6i_isp_proc_source_setup(isp_dev, &proc->source_csi0,
SUN6I_ISP_PORT_CSI0);
sun6i_isp_proc_source_setup(isp_dev, &proc->source_csi1,
SUN6I_ISP_PORT_CSI1);
ret = v4l2_async_nf_register(notifier);
if (ret) {
v4l2_err(v4l2_dev,
"failed to register v4l2 async notifier: %d\n", ret);
goto error_v4l2_async_notifier;
}
return 0;
error_v4l2_async_notifier:
v4l2_async_nf_cleanup(notifier);
v4l2_device_unregister_subdev(subdev);
error_media_entity:
media_entity_cleanup(&subdev->entity);
return ret;
}
void sun6i_isp_proc_cleanup(struct sun6i_isp_device *isp_dev)
{
struct v4l2_async_notifier *notifier = &isp_dev->proc.notifier;
struct v4l2_subdev *subdev = &isp_dev->proc.subdev;
v4l2_async_nf_unregister(notifier);
v4l2_async_nf_cleanup(notifier);
v4l2_device_unregister_subdev(subdev);
media_entity_cleanup(&subdev->entity);
}
| linux-master | drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2021-2022 Bootlin
* Author: Paul Kocialkowski <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mc.h>
#include "sun6i_isp.h"
#include "sun6i_isp_capture.h"
#include "sun6i_isp_params.h"
#include "sun6i_isp_proc.h"
#include "sun6i_isp_reg.h"
/* Helpers */
u32 sun6i_isp_load_read(struct sun6i_isp_device *isp_dev, u32 offset)
{
u32 *data = (u32 *)(isp_dev->tables.load.data + offset);
return *data;
}
void sun6i_isp_load_write(struct sun6i_isp_device *isp_dev, u32 offset,
u32 value)
{
u32 *data = (u32 *)(isp_dev->tables.load.data + offset);
*data = value;
}
/* State */
/*
* The ISP works with a load buffer, which gets copied to the actual registers
* by the hardware before processing a frame when a specific flag is set.
* This is represented by tracking the ISP state in the different parts of
* the code with explicit sync points:
* - state update: to update the load buffer for the next frame if necessary;
* - state complete: to indicate that the state update was applied.
*/
static void sun6i_isp_state_ready(struct sun6i_isp_device *isp_dev)
{
struct regmap *regmap = isp_dev->regmap;
u32 value;
regmap_read(regmap, SUN6I_ISP_FE_CTRL_REG, &value);
value |= SUN6I_ISP_FE_CTRL_PARA_READY;
regmap_write(regmap, SUN6I_ISP_FE_CTRL_REG, value);
}
static void sun6i_isp_state_complete(struct sun6i_isp_device *isp_dev)
{
unsigned long flags;
spin_lock_irqsave(&isp_dev->state_lock, flags);
sun6i_isp_capture_state_complete(isp_dev);
sun6i_isp_params_state_complete(isp_dev);
spin_unlock_irqrestore(&isp_dev->state_lock, flags);
}
void sun6i_isp_state_update(struct sun6i_isp_device *isp_dev, bool ready_hold)
{
bool update = false;
unsigned long flags;
spin_lock_irqsave(&isp_dev->state_lock, flags);
sun6i_isp_capture_state_update(isp_dev, &update);
sun6i_isp_params_state_update(isp_dev, &update);
if (update && !ready_hold)
sun6i_isp_state_ready(isp_dev);
spin_unlock_irqrestore(&isp_dev->state_lock, flags);
}
/* Tables */
static int sun6i_isp_table_setup(struct sun6i_isp_device *isp_dev,
struct sun6i_isp_table *table)
{
table->data = dma_alloc_coherent(isp_dev->dev, table->size,
&table->address, GFP_KERNEL);
if (!table->data)
return -ENOMEM;
return 0;
}
static void sun6i_isp_table_cleanup(struct sun6i_isp_device *isp_dev,
struct sun6i_isp_table *table)
{
dma_free_coherent(isp_dev->dev, table->size, table->data,
table->address);
}
void sun6i_isp_tables_configure(struct sun6i_isp_device *isp_dev)
{
struct regmap *regmap = isp_dev->regmap;
regmap_write(regmap, SUN6I_ISP_REG_LOAD_ADDR_REG,
SUN6I_ISP_ADDR_VALUE(isp_dev->tables.load.address));
regmap_write(regmap, SUN6I_ISP_REG_SAVE_ADDR_REG,
SUN6I_ISP_ADDR_VALUE(isp_dev->tables.save.address));
regmap_write(regmap, SUN6I_ISP_LUT_TABLE_ADDR_REG,
SUN6I_ISP_ADDR_VALUE(isp_dev->tables.lut.address));
regmap_write(regmap, SUN6I_ISP_DRC_TABLE_ADDR_REG,
SUN6I_ISP_ADDR_VALUE(isp_dev->tables.drc.address));
regmap_write(regmap, SUN6I_ISP_STATS_ADDR_REG,
SUN6I_ISP_ADDR_VALUE(isp_dev->tables.stats.address));
}
static int sun6i_isp_tables_setup(struct sun6i_isp_device *isp_dev,
const struct sun6i_isp_variant *variant)
{
struct sun6i_isp_tables *tables = &isp_dev->tables;
int ret;
tables->load.size = variant->table_load_save_size;
ret = sun6i_isp_table_setup(isp_dev, &tables->load);
if (ret)
return ret;
tables->save.size = variant->table_load_save_size;
ret = sun6i_isp_table_setup(isp_dev, &tables->save);
if (ret)
return ret;
tables->lut.size = variant->table_lut_size;
ret = sun6i_isp_table_setup(isp_dev, &tables->lut);
if (ret)
return ret;
tables->drc.size = variant->table_drc_size;
ret = sun6i_isp_table_setup(isp_dev, &tables->drc);
if (ret)
return ret;
tables->stats.size = variant->table_stats_size;
ret = sun6i_isp_table_setup(isp_dev, &tables->stats);
if (ret)
return ret;
return 0;
}
static void sun6i_isp_tables_cleanup(struct sun6i_isp_device *isp_dev)
{
struct sun6i_isp_tables *tables = &isp_dev->tables;
sun6i_isp_table_cleanup(isp_dev, &tables->stats);
sun6i_isp_table_cleanup(isp_dev, &tables->drc);
sun6i_isp_table_cleanup(isp_dev, &tables->lut);
sun6i_isp_table_cleanup(isp_dev, &tables->save);
sun6i_isp_table_cleanup(isp_dev, &tables->load);
}
/* Media */
static const struct media_device_ops sun6i_isp_media_ops = {
.link_notify = v4l2_pipeline_link_notify,
};
/* V4L2 */
static int sun6i_isp_v4l2_setup(struct sun6i_isp_device *isp_dev)
{
struct sun6i_isp_v4l2 *v4l2 = &isp_dev->v4l2;
struct v4l2_device *v4l2_dev = &v4l2->v4l2_dev;
struct media_device *media_dev = &v4l2->media_dev;
struct device *dev = isp_dev->dev;
int ret;
/* Media Device */
strscpy(media_dev->model, SUN6I_ISP_DESCRIPTION,
sizeof(media_dev->model));
media_dev->ops = &sun6i_isp_media_ops;
media_dev->hw_revision = 0;
media_dev->dev = dev;
media_device_init(media_dev);
ret = media_device_register(media_dev);
if (ret) {
dev_err(dev, "failed to register media device\n");
return ret;
}
/* V4L2 Device */
v4l2_dev->mdev = media_dev;
ret = v4l2_device_register(dev, v4l2_dev);
if (ret) {
dev_err(dev, "failed to register v4l2 device\n");
goto error_media;
}
return 0;
error_media:
media_device_unregister(media_dev);
media_device_cleanup(media_dev);
return ret;
}
static void sun6i_isp_v4l2_cleanup(struct sun6i_isp_device *isp_dev)
{
struct sun6i_isp_v4l2 *v4l2 = &isp_dev->v4l2;
media_device_unregister(&v4l2->media_dev);
v4l2_device_unregister(&v4l2->v4l2_dev);
media_device_cleanup(&v4l2->media_dev);
}
/* Platform */
static irqreturn_t sun6i_isp_interrupt(int irq, void *private)
{
struct sun6i_isp_device *isp_dev = private;
struct regmap *regmap = isp_dev->regmap;
u32 status = 0, enable = 0;
regmap_read(regmap, SUN6I_ISP_FE_INT_STA_REG, &status);
regmap_read(regmap, SUN6I_ISP_FE_INT_EN_REG, &enable);
if (!status)
return IRQ_NONE;
else if (!(status & enable))
goto complete;
/*
* The ISP working cycle starts with a params-load, which makes the
* state from the load buffer active. Then it starts processing the
* frame and gives a finish interrupt. Soon after that, the next state
* coming from the load buffer will be applied for the next frame,
* giving a params-load as well.
*
* Because both frame finish and params-load are received almost
* at the same time (one ISR call), handle them in chronology order.
*/
if (status & SUN6I_ISP_FE_INT_STA_FINISH)
sun6i_isp_capture_finish(isp_dev);
if (status & SUN6I_ISP_FE_INT_STA_PARA_LOAD) {
sun6i_isp_state_complete(isp_dev);
sun6i_isp_state_update(isp_dev, false);
}
complete:
regmap_write(regmap, SUN6I_ISP_FE_INT_STA_REG, status);
return IRQ_HANDLED;
}
static int sun6i_isp_suspend(struct device *dev)
{
struct sun6i_isp_device *isp_dev = dev_get_drvdata(dev);
reset_control_assert(isp_dev->reset);
clk_disable_unprepare(isp_dev->clock_ram);
clk_disable_unprepare(isp_dev->clock_mod);
return 0;
}
static int sun6i_isp_resume(struct device *dev)
{
struct sun6i_isp_device *isp_dev = dev_get_drvdata(dev);
int ret;
ret = reset_control_deassert(isp_dev->reset);
if (ret) {
dev_err(dev, "failed to deassert reset\n");
return ret;
}
ret = clk_prepare_enable(isp_dev->clock_mod);
if (ret) {
dev_err(dev, "failed to enable module clock\n");
goto error_reset;
}
ret = clk_prepare_enable(isp_dev->clock_ram);
if (ret) {
dev_err(dev, "failed to enable ram clock\n");
goto error_clock_mod;
}
return 0;
error_clock_mod:
clk_disable_unprepare(isp_dev->clock_mod);
error_reset:
reset_control_assert(isp_dev->reset);
return ret;
}
static const struct dev_pm_ops sun6i_isp_pm_ops = {
.runtime_suspend = sun6i_isp_suspend,
.runtime_resume = sun6i_isp_resume,
};
static const struct regmap_config sun6i_isp_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x400,
};
static int sun6i_isp_resources_setup(struct sun6i_isp_device *isp_dev,
struct platform_device *platform_dev)
{
struct device *dev = isp_dev->dev;
void __iomem *io_base;
int irq;
int ret;
/* Registers */
io_base = devm_platform_ioremap_resource(platform_dev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
isp_dev->regmap = devm_regmap_init_mmio_clk(dev, "bus", io_base,
&sun6i_isp_regmap_config);
if (IS_ERR(isp_dev->regmap)) {
dev_err(dev, "failed to init register map\n");
return PTR_ERR(isp_dev->regmap);
}
/* Clocks */
isp_dev->clock_mod = devm_clk_get(dev, "mod");
if (IS_ERR(isp_dev->clock_mod)) {
dev_err(dev, "failed to acquire module clock\n");
return PTR_ERR(isp_dev->clock_mod);
}
isp_dev->clock_ram = devm_clk_get(dev, "ram");
if (IS_ERR(isp_dev->clock_ram)) {
dev_err(dev, "failed to acquire ram clock\n");
return PTR_ERR(isp_dev->clock_ram);
}
ret = clk_set_rate_exclusive(isp_dev->clock_mod, 297000000);
if (ret) {
dev_err(dev, "failed to set mod clock rate\n");
return ret;
}
/* Reset */
isp_dev->reset = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(isp_dev->reset)) {
dev_err(dev, "failed to acquire reset\n");
ret = PTR_ERR(isp_dev->reset);
goto error_clock_rate_exclusive;
}
/* Interrupt */
irq = platform_get_irq(platform_dev, 0);
if (irq < 0) {
dev_err(dev, "failed to get interrupt\n");
ret = -ENXIO;
goto error_clock_rate_exclusive;
}
ret = devm_request_irq(dev, irq, sun6i_isp_interrupt, IRQF_SHARED,
SUN6I_ISP_NAME, isp_dev);
if (ret) {
dev_err(dev, "failed to request interrupt\n");
goto error_clock_rate_exclusive;
}
/* Runtime PM */
pm_runtime_enable(dev);
return 0;
error_clock_rate_exclusive:
clk_rate_exclusive_put(isp_dev->clock_mod);
return ret;
}
static void sun6i_isp_resources_cleanup(struct sun6i_isp_device *isp_dev)
{
struct device *dev = isp_dev->dev;
pm_runtime_disable(dev);
clk_rate_exclusive_put(isp_dev->clock_mod);
}
static int sun6i_isp_probe(struct platform_device *platform_dev)
{
struct sun6i_isp_device *isp_dev;
struct device *dev = &platform_dev->dev;
const struct sun6i_isp_variant *variant;
int ret;
variant = of_device_get_match_data(dev);
if (!variant)
return -EINVAL;
isp_dev = devm_kzalloc(dev, sizeof(*isp_dev), GFP_KERNEL);
if (!isp_dev)
return -ENOMEM;
isp_dev->dev = dev;
platform_set_drvdata(platform_dev, isp_dev);
spin_lock_init(&isp_dev->state_lock);
ret = sun6i_isp_resources_setup(isp_dev, platform_dev);
if (ret)
return ret;
ret = sun6i_isp_tables_setup(isp_dev, variant);
if (ret) {
dev_err(dev, "failed to setup tables\n");
goto error_resources;
}
ret = sun6i_isp_v4l2_setup(isp_dev);
if (ret) {
dev_err(dev, "failed to setup v4l2\n");
goto error_tables;
}
ret = sun6i_isp_proc_setup(isp_dev);
if (ret) {
dev_err(dev, "failed to setup proc\n");
goto error_v4l2;
}
ret = sun6i_isp_capture_setup(isp_dev);
if (ret) {
dev_err(dev, "failed to setup capture\n");
goto error_proc;
}
ret = sun6i_isp_params_setup(isp_dev);
if (ret) {
dev_err(dev, "failed to setup params\n");
goto error_capture;
}
return 0;
error_capture:
sun6i_isp_capture_cleanup(isp_dev);
error_proc:
sun6i_isp_proc_cleanup(isp_dev);
error_v4l2:
sun6i_isp_v4l2_cleanup(isp_dev);
error_tables:
sun6i_isp_tables_cleanup(isp_dev);
error_resources:
sun6i_isp_resources_cleanup(isp_dev);
return ret;
}
static void sun6i_isp_remove(struct platform_device *platform_dev)
{
struct sun6i_isp_device *isp_dev = platform_get_drvdata(platform_dev);
sun6i_isp_params_cleanup(isp_dev);
sun6i_isp_capture_cleanup(isp_dev);
sun6i_isp_proc_cleanup(isp_dev);
sun6i_isp_v4l2_cleanup(isp_dev);
sun6i_isp_tables_cleanup(isp_dev);
sun6i_isp_resources_cleanup(isp_dev);
}
/*
* History of sun6i-isp:
* - sun4i-a10-isp: initial ISP tied to the CSI0 controller,
* apparently unused in software implementations;
* - sun6i-a31-isp: separate ISP loosely based on sun4i-a10-isp,
* adding extra modules and features;
* - sun9i-a80-isp: based on sun6i-a31-isp with some register offset changes
* and new modules like saturation and cnr;
* - sun8i-a23-isp/sun8i-h3-isp: based on sun9i-a80-isp with most modules
* related to raw removed;
* - sun8i-a83t-isp: based on sun9i-a80-isp with some register offset changes
* - sun8i-v3s-isp: based on sun8i-a83t-isp with a new disc module;
*/
static const struct sun6i_isp_variant sun8i_v3s_isp_variant = {
.table_load_save_size = 0x1000,
.table_lut_size = 0xe00,
.table_drc_size = 0x600,
.table_stats_size = 0x2100,
};
static const struct of_device_id sun6i_isp_of_match[] = {
{
.compatible = "allwinner,sun8i-v3s-isp",
.data = &sun8i_v3s_isp_variant,
},
{},
};
MODULE_DEVICE_TABLE(of, sun6i_isp_of_match);
static struct platform_driver sun6i_isp_platform_driver = {
.probe = sun6i_isp_probe,
.remove_new = sun6i_isp_remove,
.driver = {
.name = SUN6I_ISP_NAME,
.of_match_table = sun6i_isp_of_match,
.pm = &sun6i_isp_pm_ops,
},
};
module_platform_driver(sun6i_isp_platform_driver);
MODULE_DESCRIPTION("Allwinner A31 Image Signal Processor driver");
MODULE_AUTHOR("Paul Kocialkowski <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2021-2022 Bootlin
* Author: Paul Kocialkowski <[email protected]>
*/
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
#include <media/videobuf2-vmalloc.h>
#include <media/videobuf2-v4l2.h>
#include "sun6i_isp.h"
#include "sun6i_isp_params.h"
#include "sun6i_isp_reg.h"
#include "uapi/sun6i-isp-config.h"
/* Params */
static const struct sun6i_isp_params_config sun6i_isp_params_config_default = {
.modules_used = SUN6I_ISP_MODULE_BAYER,
.bayer = {
.offset_r = 32,
.offset_gr = 32,
.offset_gb = 32,
.offset_b = 32,
.gain_r = 256,
.gain_gr = 256,
.gain_gb = 256,
.gain_b = 256,
},
.bdnf = {
.in_dis_min = 8,
.in_dis_max = 16,
.coefficients_g = { 15, 4, 1 },
.coefficients_rb = { 15, 4 },
},
};
static void sun6i_isp_params_configure_ob(struct sun6i_isp_device *isp_dev)
{
unsigned int width, height;
sun6i_isp_proc_dimensions(isp_dev, &width, &height);
sun6i_isp_load_write(isp_dev, SUN6I_ISP_OB_SIZE_REG,
SUN6I_ISP_OB_SIZE_WIDTH(width) |
SUN6I_ISP_OB_SIZE_HEIGHT(height));
sun6i_isp_load_write(isp_dev, SUN6I_ISP_OB_VALID_REG,
SUN6I_ISP_OB_VALID_WIDTH(width) |
SUN6I_ISP_OB_VALID_HEIGHT(height));
sun6i_isp_load_write(isp_dev, SUN6I_ISP_OB_SRC0_VALID_START_REG,
SUN6I_ISP_OB_SRC0_VALID_START_HORZ(0) |
SUN6I_ISP_OB_SRC0_VALID_START_VERT(0));
}
static void sun6i_isp_params_configure_ae(struct sun6i_isp_device *isp_dev)
{
/* These are default values that need to be set to get an output. */
sun6i_isp_load_write(isp_dev, SUN6I_ISP_AE_CFG_REG,
SUN6I_ISP_AE_CFG_LOW_BRI_TH(0xff) |
SUN6I_ISP_AE_CFG_HORZ_NUM(8) |
SUN6I_ISP_AE_CFG_HIGH_BRI_TH(0xf00) |
SUN6I_ISP_AE_CFG_VERT_NUM(8));
}
static void
sun6i_isp_params_configure_bayer(struct sun6i_isp_device *isp_dev,
const struct sun6i_isp_params_config *config)
{
const struct sun6i_isp_params_config_bayer *bayer = &config->bayer;
sun6i_isp_load_write(isp_dev, SUN6I_ISP_BAYER_OFFSET0_REG,
SUN6I_ISP_BAYER_OFFSET0_R(bayer->offset_r) |
SUN6I_ISP_BAYER_OFFSET0_GR(bayer->offset_gr));
sun6i_isp_load_write(isp_dev, SUN6I_ISP_BAYER_OFFSET1_REG,
SUN6I_ISP_BAYER_OFFSET1_GB(bayer->offset_gb) |
SUN6I_ISP_BAYER_OFFSET1_B(bayer->offset_b));
sun6i_isp_load_write(isp_dev, SUN6I_ISP_BAYER_GAIN0_REG,
SUN6I_ISP_BAYER_GAIN0_R(bayer->gain_r) |
SUN6I_ISP_BAYER_GAIN0_GR(bayer->gain_gr));
sun6i_isp_load_write(isp_dev, SUN6I_ISP_BAYER_GAIN1_REG,
SUN6I_ISP_BAYER_GAIN1_GB(bayer->gain_gb) |
SUN6I_ISP_BAYER_GAIN1_B(bayer->gain_b));
}
static void sun6i_isp_params_configure_wb(struct sun6i_isp_device *isp_dev)
{
/* These are default values that need to be set to get an output. */
sun6i_isp_load_write(isp_dev, SUN6I_ISP_WB_GAIN0_REG,
SUN6I_ISP_WB_GAIN0_R(256) |
SUN6I_ISP_WB_GAIN0_GR(256));
sun6i_isp_load_write(isp_dev, SUN6I_ISP_WB_GAIN1_REG,
SUN6I_ISP_WB_GAIN1_GB(256) |
SUN6I_ISP_WB_GAIN1_B(256));
sun6i_isp_load_write(isp_dev, SUN6I_ISP_WB_CFG_REG,
SUN6I_ISP_WB_CFG_CLIP(0xfff));
}
static void sun6i_isp_params_configure_base(struct sun6i_isp_device *isp_dev)
{
sun6i_isp_params_configure_ae(isp_dev);
sun6i_isp_params_configure_ob(isp_dev);
sun6i_isp_params_configure_wb(isp_dev);
}
static void
sun6i_isp_params_configure_bdnf(struct sun6i_isp_device *isp_dev,
const struct sun6i_isp_params_config *config)
{
const struct sun6i_isp_params_config_bdnf *bdnf = &config->bdnf;
sun6i_isp_load_write(isp_dev, SUN6I_ISP_BDNF_CFG_REG,
SUN6I_ISP_BDNF_CFG_IN_DIS_MIN(bdnf->in_dis_min) |
SUN6I_ISP_BDNF_CFG_IN_DIS_MAX(bdnf->in_dis_max));
sun6i_isp_load_write(isp_dev, SUN6I_ISP_BDNF_COEF_RB_REG,
SUN6I_ISP_BDNF_COEF_RB(0, bdnf->coefficients_rb[0]) |
SUN6I_ISP_BDNF_COEF_RB(1, bdnf->coefficients_rb[1]) |
SUN6I_ISP_BDNF_COEF_RB(2, bdnf->coefficients_rb[2]) |
SUN6I_ISP_BDNF_COEF_RB(3, bdnf->coefficients_rb[3]) |
SUN6I_ISP_BDNF_COEF_RB(4, bdnf->coefficients_rb[4]));
sun6i_isp_load_write(isp_dev, SUN6I_ISP_BDNF_COEF_G_REG,
SUN6I_ISP_BDNF_COEF_G(0, bdnf->coefficients_g[0]) |
SUN6I_ISP_BDNF_COEF_G(1, bdnf->coefficients_g[1]) |
SUN6I_ISP_BDNF_COEF_G(2, bdnf->coefficients_g[2]) |
SUN6I_ISP_BDNF_COEF_G(3, bdnf->coefficients_g[3]) |
SUN6I_ISP_BDNF_COEF_G(4, bdnf->coefficients_g[4]) |
SUN6I_ISP_BDNF_COEF_G(5, bdnf->coefficients_g[5]) |
SUN6I_ISP_BDNF_COEF_G(6, bdnf->coefficients_g[6]));
}
static void
sun6i_isp_params_configure_modules(struct sun6i_isp_device *isp_dev,
const struct sun6i_isp_params_config *config)
{
u32 value;
if (config->modules_used & SUN6I_ISP_MODULE_BDNF)
sun6i_isp_params_configure_bdnf(isp_dev, config);
if (config->modules_used & SUN6I_ISP_MODULE_BAYER)
sun6i_isp_params_configure_bayer(isp_dev, config);
value = sun6i_isp_load_read(isp_dev, SUN6I_ISP_MODULE_EN_REG);
/* Clear all modules but keep input configuration. */
value &= SUN6I_ISP_MODULE_EN_SRC0 | SUN6I_ISP_MODULE_EN_SRC1;
if (config->modules_used & SUN6I_ISP_MODULE_BDNF)
value |= SUN6I_ISP_MODULE_EN_BDNF;
/* Bayer stage is always enabled. */
sun6i_isp_load_write(isp_dev, SUN6I_ISP_MODULE_EN_REG, value);
}
void sun6i_isp_params_configure(struct sun6i_isp_device *isp_dev)
{
struct sun6i_isp_params_state *state = &isp_dev->params.state;
unsigned long flags;
spin_lock_irqsave(&state->lock, flags);
sun6i_isp_params_configure_base(isp_dev);
/* Default config is only applied at the very first stream start. */
if (state->configured)
goto complete;
sun6i_isp_params_configure_modules(isp_dev,
&sun6i_isp_params_config_default);
state->configured = true;
complete:
spin_unlock_irqrestore(&state->lock, flags);
}
/* State */
static void sun6i_isp_params_state_cleanup(struct sun6i_isp_device *isp_dev,
bool error)
{
struct sun6i_isp_params_state *state = &isp_dev->params.state;
struct sun6i_isp_buffer *isp_buffer;
struct vb2_buffer *vb2_buffer;
unsigned long flags;
spin_lock_irqsave(&state->lock, flags);
if (state->pending) {
vb2_buffer = &state->pending->v4l2_buffer.vb2_buf;
vb2_buffer_done(vb2_buffer, error ? VB2_BUF_STATE_ERROR :
VB2_BUF_STATE_QUEUED);
state->pending = NULL;
}
list_for_each_entry(isp_buffer, &state->queue, list) {
vb2_buffer = &isp_buffer->v4l2_buffer.vb2_buf;
vb2_buffer_done(vb2_buffer, error ? VB2_BUF_STATE_ERROR :
VB2_BUF_STATE_QUEUED);
}
INIT_LIST_HEAD(&state->queue);
spin_unlock_irqrestore(&state->lock, flags);
}
void sun6i_isp_params_state_update(struct sun6i_isp_device *isp_dev,
bool *update)
{
struct sun6i_isp_params_state *state = &isp_dev->params.state;
struct sun6i_isp_buffer *isp_buffer;
struct vb2_buffer *vb2_buffer;
const struct sun6i_isp_params_config *config;
unsigned long flags;
spin_lock_irqsave(&state->lock, flags);
if (list_empty(&state->queue))
goto complete;
if (state->pending)
goto complete;
isp_buffer = list_first_entry(&state->queue, struct sun6i_isp_buffer,
list);
vb2_buffer = &isp_buffer->v4l2_buffer.vb2_buf;
config = vb2_plane_vaddr(vb2_buffer, 0);
sun6i_isp_params_configure_modules(isp_dev, config);
list_del(&isp_buffer->list);
state->pending = isp_buffer;
if (update)
*update = true;
complete:
spin_unlock_irqrestore(&state->lock, flags);
}
void sun6i_isp_params_state_complete(struct sun6i_isp_device *isp_dev)
{
struct sun6i_isp_params_state *state = &isp_dev->params.state;
struct sun6i_isp_buffer *isp_buffer;
struct vb2_buffer *vb2_buffer;
unsigned long flags;
spin_lock_irqsave(&state->lock, flags);
if (!state->pending)
goto complete;
isp_buffer = state->pending;
vb2_buffer = &isp_buffer->v4l2_buffer.vb2_buf;
vb2_buffer->timestamp = ktime_get_ns();
/* Parameters will be applied starting from the next frame. */
isp_buffer->v4l2_buffer.sequence = isp_dev->capture.state.sequence + 1;
vb2_buffer_done(vb2_buffer, VB2_BUF_STATE_DONE);
state->pending = NULL;
complete:
spin_unlock_irqrestore(&state->lock, flags);
}
/* Queue */
static int sun6i_isp_params_queue_setup(struct vb2_queue *queue,
unsigned int *buffers_count,
unsigned int *planes_count,
unsigned int sizes[],
struct device *alloc_devs[])
{
struct sun6i_isp_device *isp_dev = vb2_get_drv_priv(queue);
unsigned int size = isp_dev->params.format.fmt.meta.buffersize;
if (*planes_count)
return sizes[0] < size ? -EINVAL : 0;
*planes_count = 1;
sizes[0] = size;
return 0;
}
static int sun6i_isp_params_buffer_prepare(struct vb2_buffer *vb2_buffer)
{
struct sun6i_isp_device *isp_dev =
vb2_get_drv_priv(vb2_buffer->vb2_queue);
struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
unsigned int size = isp_dev->params.format.fmt.meta.buffersize;
if (vb2_plane_size(vb2_buffer, 0) < size) {
v4l2_err(v4l2_dev, "buffer too small (%lu < %u)\n",
vb2_plane_size(vb2_buffer, 0), size);
return -EINVAL;
}
vb2_set_plane_payload(vb2_buffer, 0, size);
return 0;
}
static void sun6i_isp_params_buffer_queue(struct vb2_buffer *vb2_buffer)
{
struct sun6i_isp_device *isp_dev =
vb2_get_drv_priv(vb2_buffer->vb2_queue);
struct sun6i_isp_params_state *state = &isp_dev->params.state;
struct vb2_v4l2_buffer *v4l2_buffer = to_vb2_v4l2_buffer(vb2_buffer);
struct sun6i_isp_buffer *isp_buffer =
container_of(v4l2_buffer, struct sun6i_isp_buffer, v4l2_buffer);
bool capture_streaming = isp_dev->capture.state.streaming;
unsigned long flags;
spin_lock_irqsave(&state->lock, flags);
list_add_tail(&isp_buffer->list, &state->queue);
spin_unlock_irqrestore(&state->lock, flags);
if (state->streaming && capture_streaming)
sun6i_isp_state_update(isp_dev, false);
}
static int sun6i_isp_params_start_streaming(struct vb2_queue *queue,
unsigned int count)
{
struct sun6i_isp_device *isp_dev = vb2_get_drv_priv(queue);
struct sun6i_isp_params_state *state = &isp_dev->params.state;
bool capture_streaming = isp_dev->capture.state.streaming;
state->streaming = true;
/*
* Update the state as soon as possible if capture is streaming,
* otherwise it will be applied when capture starts streaming.
*/
if (capture_streaming)
sun6i_isp_state_update(isp_dev, false);
return 0;
}
static void sun6i_isp_params_stop_streaming(struct vb2_queue *queue)
{
struct sun6i_isp_device *isp_dev = vb2_get_drv_priv(queue);
struct sun6i_isp_params_state *state = &isp_dev->params.state;
state->streaming = false;
sun6i_isp_params_state_cleanup(isp_dev, true);
}
static const struct vb2_ops sun6i_isp_params_queue_ops = {
.queue_setup = sun6i_isp_params_queue_setup,
.buf_prepare = sun6i_isp_params_buffer_prepare,
.buf_queue = sun6i_isp_params_buffer_queue,
.start_streaming = sun6i_isp_params_start_streaming,
.stop_streaming = sun6i_isp_params_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
/* Video Device */
static int sun6i_isp_params_querycap(struct file *file, void *private,
struct v4l2_capability *capability)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
struct video_device *video_dev = &isp_dev->params.video_dev;
strscpy(capability->driver, SUN6I_ISP_NAME, sizeof(capability->driver));
strscpy(capability->card, video_dev->name, sizeof(capability->card));
snprintf(capability->bus_info, sizeof(capability->bus_info),
"platform:%s", dev_name(isp_dev->dev));
return 0;
}
static int sun6i_isp_params_enum_fmt(struct file *file, void *private,
struct v4l2_fmtdesc *fmtdesc)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
struct v4l2_meta_format *params_format =
&isp_dev->params.format.fmt.meta;
if (fmtdesc->index > 0)
return -EINVAL;
fmtdesc->pixelformat = params_format->dataformat;
return 0;
}
static int sun6i_isp_params_g_fmt(struct file *file, void *private,
struct v4l2_format *format)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
*format = isp_dev->params.format;
return 0;
}
static const struct v4l2_ioctl_ops sun6i_isp_params_ioctl_ops = {
.vidioc_querycap = sun6i_isp_params_querycap,
.vidioc_enum_fmt_meta_out = sun6i_isp_params_enum_fmt,
.vidioc_g_fmt_meta_out = sun6i_isp_params_g_fmt,
.vidioc_s_fmt_meta_out = sun6i_isp_params_g_fmt,
.vidioc_try_fmt_meta_out = sun6i_isp_params_g_fmt,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
};
static const struct v4l2_file_operations sun6i_isp_params_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
.open = v4l2_fh_open,
.release = vb2_fop_release,
.mmap = vb2_fop_mmap,
.poll = vb2_fop_poll,
};
/* Params */
int sun6i_isp_params_setup(struct sun6i_isp_device *isp_dev)
{
struct sun6i_isp_params *params = &isp_dev->params;
struct sun6i_isp_params_state *state = ¶ms->state;
struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
struct v4l2_subdev *proc_subdev = &isp_dev->proc.subdev;
struct video_device *video_dev = ¶ms->video_dev;
struct vb2_queue *queue = &isp_dev->params.queue;
struct media_pad *pad = &isp_dev->params.pad;
struct v4l2_format *format = &isp_dev->params.format;
struct v4l2_meta_format *params_format = &format->fmt.meta;
int ret;
/* State */
INIT_LIST_HEAD(&state->queue);
spin_lock_init(&state->lock);
/* Media Pads */
pad->flags = MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MUST_CONNECT;
ret = media_entity_pads_init(&video_dev->entity, 1, pad);
if (ret)
goto error_mutex;
/* Queue */
mutex_init(¶ms->lock);
queue->type = V4L2_BUF_TYPE_META_OUTPUT;
queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
queue->buf_struct_size = sizeof(struct sun6i_isp_buffer);
queue->ops = &sun6i_isp_params_queue_ops;
queue->mem_ops = &vb2_vmalloc_memops;
queue->min_buffers_needed = 1;
queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
queue->lock = ¶ms->lock;
queue->dev = isp_dev->dev;
queue->drv_priv = isp_dev;
ret = vb2_queue_init(queue);
if (ret) {
v4l2_err(v4l2_dev, "failed to initialize vb2 queue: %d\n", ret);
goto error_media_entity;
}
/* V4L2 Format */
format->type = queue->type;
params_format->dataformat = V4L2_META_FMT_SUN6I_ISP_PARAMS;
params_format->buffersize = sizeof(struct sun6i_isp_params_config);
/* Video Device */
strscpy(video_dev->name, SUN6I_ISP_PARAMS_NAME,
sizeof(video_dev->name));
video_dev->device_caps = V4L2_CAP_META_OUTPUT | V4L2_CAP_STREAMING;
video_dev->vfl_dir = VFL_DIR_TX;
video_dev->release = video_device_release_empty;
video_dev->fops = &sun6i_isp_params_fops;
video_dev->ioctl_ops = &sun6i_isp_params_ioctl_ops;
video_dev->v4l2_dev = v4l2_dev;
video_dev->queue = queue;
video_dev->lock = ¶ms->lock;
video_set_drvdata(video_dev, isp_dev);
ret = video_register_device(video_dev, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(v4l2_dev, "failed to register video device: %d\n",
ret);
goto error_media_entity;
}
/* Media Pad Link */
ret = media_create_pad_link(&video_dev->entity, 0,
&proc_subdev->entity,
SUN6I_ISP_PROC_PAD_SINK_PARAMS,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
if (ret < 0) {
v4l2_err(v4l2_dev, "failed to create %s:%u -> %s:%u link\n",
video_dev->entity.name, 0, proc_subdev->entity.name,
SUN6I_ISP_PROC_PAD_SINK_PARAMS);
goto error_video_device;
}
return 0;
error_video_device:
vb2_video_unregister_device(video_dev);
error_media_entity:
media_entity_cleanup(&video_dev->entity);
error_mutex:
mutex_destroy(¶ms->lock);
return ret;
}
void sun6i_isp_params_cleanup(struct sun6i_isp_device *isp_dev)
{
struct sun6i_isp_params *params = &isp_dev->params;
struct video_device *video_dev = ¶ms->video_dev;
vb2_video_unregister_device(video_dev);
media_entity_cleanup(&video_dev->entity);
mutex_destroy(¶ms->lock);
}
| linux-master | drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2021-2022 Bootlin
* Author: Paul Kocialkowski <[email protected]>
*/
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-v4l2.h>
#include "sun6i_isp.h"
#include "sun6i_isp_capture.h"
#include "sun6i_isp_proc.h"
#include "sun6i_isp_reg.h"
/* Helpers */
void sun6i_isp_capture_dimensions(struct sun6i_isp_device *isp_dev,
unsigned int *width, unsigned int *height)
{
if (width)
*width = isp_dev->capture.format.fmt.pix.width;
if (height)
*height = isp_dev->capture.format.fmt.pix.height;
}
void sun6i_isp_capture_format(struct sun6i_isp_device *isp_dev,
u32 *pixelformat)
{
if (pixelformat)
*pixelformat = isp_dev->capture.format.fmt.pix.pixelformat;
}
/* Format */
static const struct sun6i_isp_capture_format sun6i_isp_capture_formats[] = {
{
.pixelformat = V4L2_PIX_FMT_NV12,
.output_format = SUN6I_ISP_OUTPUT_FMT_YUV420SP,
},
{
.pixelformat = V4L2_PIX_FMT_NV21,
.output_format = SUN6I_ISP_OUTPUT_FMT_YVU420SP,
},
};
const struct sun6i_isp_capture_format *
sun6i_isp_capture_format_find(u32 pixelformat)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(sun6i_isp_capture_formats); i++)
if (sun6i_isp_capture_formats[i].pixelformat == pixelformat)
return &sun6i_isp_capture_formats[i];
return NULL;
}
/* Capture */
static void
sun6i_isp_capture_buffer_configure(struct sun6i_isp_device *isp_dev,
struct sun6i_isp_buffer *isp_buffer)
{
const struct v4l2_format_info *info;
struct vb2_buffer *vb2_buffer;
unsigned int width, height;
unsigned int width_aligned;
dma_addr_t address;
u32 pixelformat;
vb2_buffer = &isp_buffer->v4l2_buffer.vb2_buf;
address = vb2_dma_contig_plane_dma_addr(vb2_buffer, 0);
sun6i_isp_load_write(isp_dev, SUN6I_ISP_MCH_Y_ADDR0_REG,
SUN6I_ISP_ADDR_VALUE(address));
sun6i_isp_capture_dimensions(isp_dev, &width, &height);
sun6i_isp_capture_format(isp_dev, &pixelformat);
info = v4l2_format_info(pixelformat);
if (WARN_ON(!info))
return;
/* Stride needs to be aligned to 4. */
width_aligned = ALIGN(width, 2);
if (info->comp_planes > 1) {
address += info->bpp[0] * width_aligned * height;
sun6i_isp_load_write(isp_dev, SUN6I_ISP_MCH_U_ADDR0_REG,
SUN6I_ISP_ADDR_VALUE(address));
}
if (info->comp_planes > 2) {
address += info->bpp[1] *
DIV_ROUND_UP(width_aligned, info->hdiv) *
DIV_ROUND_UP(height, info->vdiv);
sun6i_isp_load_write(isp_dev, SUN6I_ISP_MCH_V_ADDR0_REG,
SUN6I_ISP_ADDR_VALUE(address));
}
}
void sun6i_isp_capture_configure(struct sun6i_isp_device *isp_dev)
{
unsigned int width, height;
unsigned int stride_luma, stride_chroma;
unsigned int stride_luma_div4, stride_chroma_div4 = 0;
const struct sun6i_isp_capture_format *format;
const struct v4l2_format_info *info;
u32 pixelformat;
sun6i_isp_capture_dimensions(isp_dev, &width, &height);
sun6i_isp_capture_format(isp_dev, &pixelformat);
format = sun6i_isp_capture_format_find(pixelformat);
if (WARN_ON(!format))
return;
sun6i_isp_load_write(isp_dev, SUN6I_ISP_MCH_SIZE_CFG_REG,
SUN6I_ISP_MCH_SIZE_CFG_WIDTH(width) |
SUN6I_ISP_MCH_SIZE_CFG_HEIGHT(height));
info = v4l2_format_info(pixelformat);
if (WARN_ON(!info))
return;
stride_luma = width * info->bpp[0];
stride_luma_div4 = DIV_ROUND_UP(stride_luma, 4);
if (info->comp_planes > 1) {
stride_chroma = width * info->bpp[1] / info->hdiv;
stride_chroma_div4 = DIV_ROUND_UP(stride_chroma, 4);
}
sun6i_isp_load_write(isp_dev, SUN6I_ISP_MCH_CFG_REG,
SUN6I_ISP_MCH_CFG_EN |
SUN6I_ISP_MCH_CFG_OUTPUT_FMT(format->output_format) |
SUN6I_ISP_MCH_CFG_STRIDE_Y_DIV4(stride_luma_div4) |
SUN6I_ISP_MCH_CFG_STRIDE_UV_DIV4(stride_chroma_div4));
}
/* State */
static void sun6i_isp_capture_state_cleanup(struct sun6i_isp_device *isp_dev,
bool error)
{
struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
struct sun6i_isp_buffer **isp_buffer_states[] = {
&state->pending, &state->current, &state->complete,
};
struct sun6i_isp_buffer *isp_buffer;
struct vb2_buffer *vb2_buffer;
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&state->lock, flags);
for (i = 0; i < ARRAY_SIZE(isp_buffer_states); i++) {
isp_buffer = *isp_buffer_states[i];
if (!isp_buffer)
continue;
vb2_buffer = &isp_buffer->v4l2_buffer.vb2_buf;
vb2_buffer_done(vb2_buffer, error ? VB2_BUF_STATE_ERROR :
VB2_BUF_STATE_QUEUED);
*isp_buffer_states[i] = NULL;
}
list_for_each_entry(isp_buffer, &state->queue, list) {
vb2_buffer = &isp_buffer->v4l2_buffer.vb2_buf;
vb2_buffer_done(vb2_buffer, error ? VB2_BUF_STATE_ERROR :
VB2_BUF_STATE_QUEUED);
}
INIT_LIST_HEAD(&state->queue);
spin_unlock_irqrestore(&state->lock, flags);
}
void sun6i_isp_capture_state_update(struct sun6i_isp_device *isp_dev,
bool *update)
{
struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
struct sun6i_isp_buffer *isp_buffer;
unsigned long flags;
spin_lock_irqsave(&state->lock, flags);
if (list_empty(&state->queue))
goto complete;
if (state->pending)
goto complete;
isp_buffer = list_first_entry(&state->queue, struct sun6i_isp_buffer,
list);
sun6i_isp_capture_buffer_configure(isp_dev, isp_buffer);
list_del(&isp_buffer->list);
state->pending = isp_buffer;
if (update)
*update = true;
complete:
spin_unlock_irqrestore(&state->lock, flags);
}
void sun6i_isp_capture_state_complete(struct sun6i_isp_device *isp_dev)
{
struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
unsigned long flags;
spin_lock_irqsave(&state->lock, flags);
if (!state->pending)
goto complete;
state->complete = state->current;
state->current = state->pending;
state->pending = NULL;
if (state->complete) {
struct sun6i_isp_buffer *isp_buffer = state->complete;
struct vb2_buffer *vb2_buffer =
&isp_buffer->v4l2_buffer.vb2_buf;
vb2_buffer->timestamp = ktime_get_ns();
isp_buffer->v4l2_buffer.sequence = state->sequence;
vb2_buffer_done(vb2_buffer, VB2_BUF_STATE_DONE);
state->complete = NULL;
}
complete:
spin_unlock_irqrestore(&state->lock, flags);
}
void sun6i_isp_capture_finish(struct sun6i_isp_device *isp_dev)
{
struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
unsigned long flags;
spin_lock_irqsave(&state->lock, flags);
state->sequence++;
spin_unlock_irqrestore(&state->lock, flags);
}
/* Queue */
static int sun6i_isp_capture_queue_setup(struct vb2_queue *queue,
unsigned int *buffers_count,
unsigned int *planes_count,
unsigned int sizes[],
struct device *alloc_devs[])
{
struct sun6i_isp_device *isp_dev = vb2_get_drv_priv(queue);
unsigned int size = isp_dev->capture.format.fmt.pix.sizeimage;
if (*planes_count)
return sizes[0] < size ? -EINVAL : 0;
*planes_count = 1;
sizes[0] = size;
return 0;
}
static int sun6i_isp_capture_buffer_prepare(struct vb2_buffer *vb2_buffer)
{
struct sun6i_isp_device *isp_dev =
vb2_get_drv_priv(vb2_buffer->vb2_queue);
struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
unsigned int size = isp_dev->capture.format.fmt.pix.sizeimage;
if (vb2_plane_size(vb2_buffer, 0) < size) {
v4l2_err(v4l2_dev, "buffer too small (%lu < %u)\n",
vb2_plane_size(vb2_buffer, 0), size);
return -EINVAL;
}
vb2_set_plane_payload(vb2_buffer, 0, size);
return 0;
}
static void sun6i_isp_capture_buffer_queue(struct vb2_buffer *vb2_buffer)
{
struct sun6i_isp_device *isp_dev =
vb2_get_drv_priv(vb2_buffer->vb2_queue);
struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
struct vb2_v4l2_buffer *v4l2_buffer = to_vb2_v4l2_buffer(vb2_buffer);
struct sun6i_isp_buffer *isp_buffer =
container_of(v4l2_buffer, struct sun6i_isp_buffer, v4l2_buffer);
unsigned long flags;
spin_lock_irqsave(&state->lock, flags);
list_add_tail(&isp_buffer->list, &state->queue);
spin_unlock_irqrestore(&state->lock, flags);
/* Update the state to schedule our buffer as soon as possible. */
if (state->streaming)
sun6i_isp_state_update(isp_dev, false);
}
static int sun6i_isp_capture_start_streaming(struct vb2_queue *queue,
unsigned int count)
{
struct sun6i_isp_device *isp_dev = vb2_get_drv_priv(queue);
struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
struct video_device *video_dev = &isp_dev->capture.video_dev;
struct v4l2_subdev *subdev = &isp_dev->proc.subdev;
int ret;
state->sequence = 0;
ret = video_device_pipeline_alloc_start(video_dev);
if (ret < 0)
goto error_state;
state->streaming = true;
ret = v4l2_subdev_call(subdev, video, s_stream, 1);
if (ret && ret != -ENOIOCTLCMD)
goto error_streaming;
return 0;
error_streaming:
state->streaming = false;
video_device_pipeline_stop(video_dev);
error_state:
sun6i_isp_capture_state_cleanup(isp_dev, false);
return ret;
}
static void sun6i_isp_capture_stop_streaming(struct vb2_queue *queue)
{
struct sun6i_isp_device *isp_dev = vb2_get_drv_priv(queue);
struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
struct video_device *video_dev = &isp_dev->capture.video_dev;
struct v4l2_subdev *subdev = &isp_dev->proc.subdev;
v4l2_subdev_call(subdev, video, s_stream, 0);
state->streaming = false;
video_device_pipeline_stop(video_dev);
sun6i_isp_capture_state_cleanup(isp_dev, true);
}
static const struct vb2_ops sun6i_isp_capture_queue_ops = {
.queue_setup = sun6i_isp_capture_queue_setup,
.buf_prepare = sun6i_isp_capture_buffer_prepare,
.buf_queue = sun6i_isp_capture_buffer_queue,
.start_streaming = sun6i_isp_capture_start_streaming,
.stop_streaming = sun6i_isp_capture_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
/* Video Device */
static void sun6i_isp_capture_format_prepare(struct v4l2_format *format)
{
struct v4l2_pix_format *pix_format = &format->fmt.pix;
const struct v4l2_format_info *info;
unsigned int width, height;
unsigned int width_aligned;
unsigned int i;
v4l_bound_align_image(&pix_format->width, SUN6I_ISP_CAPTURE_WIDTH_MIN,
SUN6I_ISP_CAPTURE_WIDTH_MAX, 1,
&pix_format->height, SUN6I_ISP_CAPTURE_HEIGHT_MIN,
SUN6I_ISP_CAPTURE_HEIGHT_MAX, 1, 0);
if (!sun6i_isp_capture_format_find(pix_format->pixelformat))
pix_format->pixelformat =
sun6i_isp_capture_formats[0].pixelformat;
info = v4l2_format_info(pix_format->pixelformat);
if (WARN_ON(!info))
return;
width = pix_format->width;
height = pix_format->height;
/* Stride needs to be aligned to 4. */
width_aligned = ALIGN(width, 2);
pix_format->bytesperline = width_aligned * info->bpp[0];
pix_format->sizeimage = 0;
for (i = 0; i < info->comp_planes; i++) {
unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
pix_format->sizeimage += info->bpp[i] *
DIV_ROUND_UP(width_aligned, hdiv) *
DIV_ROUND_UP(height, vdiv);
}
pix_format->field = V4L2_FIELD_NONE;
pix_format->colorspace = V4L2_COLORSPACE_RAW;
pix_format->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
pix_format->quantization = V4L2_QUANTIZATION_DEFAULT;
pix_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
static int sun6i_isp_capture_querycap(struct file *file, void *private,
struct v4l2_capability *capability)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
struct video_device *video_dev = &isp_dev->capture.video_dev;
strscpy(capability->driver, SUN6I_ISP_NAME, sizeof(capability->driver));
strscpy(capability->card, video_dev->name, sizeof(capability->card));
snprintf(capability->bus_info, sizeof(capability->bus_info),
"platform:%s", dev_name(isp_dev->dev));
return 0;
}
static int sun6i_isp_capture_enum_fmt(struct file *file, void *private,
struct v4l2_fmtdesc *fmtdesc)
{
u32 index = fmtdesc->index;
if (index >= ARRAY_SIZE(sun6i_isp_capture_formats))
return -EINVAL;
fmtdesc->pixelformat = sun6i_isp_capture_formats[index].pixelformat;
return 0;
}
static int sun6i_isp_capture_g_fmt(struct file *file, void *private,
struct v4l2_format *format)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
*format = isp_dev->capture.format;
return 0;
}
static int sun6i_isp_capture_s_fmt(struct file *file, void *private,
struct v4l2_format *format)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
if (vb2_is_busy(&isp_dev->capture.queue))
return -EBUSY;
sun6i_isp_capture_format_prepare(format);
isp_dev->capture.format = *format;
return 0;
}
static int sun6i_isp_capture_try_fmt(struct file *file, void *private,
struct v4l2_format *format)
{
sun6i_isp_capture_format_prepare(format);
return 0;
}
static int sun6i_isp_capture_enum_input(struct file *file, void *private,
struct v4l2_input *input)
{
if (input->index != 0)
return -EINVAL;
input->type = V4L2_INPUT_TYPE_CAMERA;
strscpy(input->name, "Camera", sizeof(input->name));
return 0;
}
static int sun6i_isp_capture_g_input(struct file *file, void *private,
unsigned int *index)
{
*index = 0;
return 0;
}
static int sun6i_isp_capture_s_input(struct file *file, void *private,
unsigned int index)
{
if (index != 0)
return -EINVAL;
return 0;
}
static const struct v4l2_ioctl_ops sun6i_isp_capture_ioctl_ops = {
.vidioc_querycap = sun6i_isp_capture_querycap,
.vidioc_enum_fmt_vid_cap = sun6i_isp_capture_enum_fmt,
.vidioc_g_fmt_vid_cap = sun6i_isp_capture_g_fmt,
.vidioc_s_fmt_vid_cap = sun6i_isp_capture_s_fmt,
.vidioc_try_fmt_vid_cap = sun6i_isp_capture_try_fmt,
.vidioc_enum_input = sun6i_isp_capture_enum_input,
.vidioc_g_input = sun6i_isp_capture_g_input,
.vidioc_s_input = sun6i_isp_capture_s_input,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
};
static int sun6i_isp_capture_open(struct file *file)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
struct video_device *video_dev = &isp_dev->capture.video_dev;
struct mutex *lock = &isp_dev->capture.lock;
int ret;
if (mutex_lock_interruptible(lock))
return -ERESTARTSYS;
ret = v4l2_pipeline_pm_get(&video_dev->entity);
if (ret)
goto error_mutex;
ret = v4l2_fh_open(file);
if (ret)
goto error_pipeline;
mutex_unlock(lock);
return 0;
error_pipeline:
v4l2_pipeline_pm_put(&video_dev->entity);
error_mutex:
mutex_unlock(lock);
return ret;
}
static int sun6i_isp_capture_release(struct file *file)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
struct video_device *video_dev = &isp_dev->capture.video_dev;
struct mutex *lock = &isp_dev->capture.lock;
mutex_lock(lock);
_vb2_fop_release(file, NULL);
v4l2_pipeline_pm_put(&video_dev->entity);
mutex_unlock(lock);
return 0;
}
static const struct v4l2_file_operations sun6i_isp_capture_fops = {
.owner = THIS_MODULE,
.open = sun6i_isp_capture_open,
.release = sun6i_isp_capture_release,
.unlocked_ioctl = video_ioctl2,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
};
/* Media Entity */
static int sun6i_isp_capture_link_validate(struct media_link *link)
{
struct video_device *video_dev =
media_entity_to_video_device(link->sink->entity);
struct sun6i_isp_device *isp_dev = video_get_drvdata(video_dev);
struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
unsigned int capture_width, capture_height;
unsigned int proc_width, proc_height;
sun6i_isp_capture_dimensions(isp_dev, &capture_width, &capture_height);
sun6i_isp_proc_dimensions(isp_dev, &proc_width, &proc_height);
/* No cropping/scaling is supported (yet). */
if (capture_width != proc_width || capture_height != proc_height) {
v4l2_err(v4l2_dev,
"invalid input/output dimensions: %ux%u/%ux%u\n",
proc_width, proc_height, capture_width,
capture_height);
return -EINVAL;
}
return 0;
}
static const struct media_entity_operations sun6i_isp_capture_entity_ops = {
.link_validate = sun6i_isp_capture_link_validate,
};
/* Capture */
int sun6i_isp_capture_setup(struct sun6i_isp_device *isp_dev)
{
struct sun6i_isp_capture *capture = &isp_dev->capture;
struct sun6i_isp_capture_state *state = &capture->state;
struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
struct v4l2_subdev *proc_subdev = &isp_dev->proc.subdev;
struct video_device *video_dev = &capture->video_dev;
struct vb2_queue *queue = &capture->queue;
struct media_pad *pad = &capture->pad;
struct v4l2_format *format = &capture->format;
struct v4l2_pix_format *pix_format = &format->fmt.pix;
int ret;
/* State */
INIT_LIST_HEAD(&state->queue);
spin_lock_init(&state->lock);
/* Media Entity */
video_dev->entity.ops = &sun6i_isp_capture_entity_ops;
/* Media Pads */
pad->flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
ret = media_entity_pads_init(&video_dev->entity, 1, pad);
if (ret)
goto error_mutex;
/* Queue */
mutex_init(&capture->lock);
queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
queue->io_modes = VB2_MMAP | VB2_DMABUF;
queue->buf_struct_size = sizeof(struct sun6i_isp_buffer);
queue->ops = &sun6i_isp_capture_queue_ops;
queue->mem_ops = &vb2_dma_contig_memops;
queue->min_buffers_needed = 2;
queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
queue->lock = &capture->lock;
queue->dev = isp_dev->dev;
queue->drv_priv = isp_dev;
ret = vb2_queue_init(queue);
if (ret) {
v4l2_err(v4l2_dev, "failed to initialize vb2 queue: %d\n", ret);
goto error_media_entity;
}
/* V4L2 Format */
format->type = queue->type;
pix_format->pixelformat = sun6i_isp_capture_formats[0].pixelformat;
pix_format->width = 1280;
pix_format->height = 720;
sun6i_isp_capture_format_prepare(format);
/* Video Device */
strscpy(video_dev->name, SUN6I_ISP_CAPTURE_NAME,
sizeof(video_dev->name));
video_dev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
video_dev->vfl_dir = VFL_DIR_RX;
video_dev->release = video_device_release_empty;
video_dev->fops = &sun6i_isp_capture_fops;
video_dev->ioctl_ops = &sun6i_isp_capture_ioctl_ops;
video_dev->v4l2_dev = v4l2_dev;
video_dev->queue = queue;
video_dev->lock = &capture->lock;
video_set_drvdata(video_dev, isp_dev);
ret = video_register_device(video_dev, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(v4l2_dev, "failed to register video device: %d\n",
ret);
goto error_media_entity;
}
/* Media Pad Link */
ret = media_create_pad_link(&proc_subdev->entity,
SUN6I_ISP_PROC_PAD_SOURCE,
&video_dev->entity, 0,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
if (ret < 0) {
v4l2_err(v4l2_dev, "failed to create %s:%u -> %s:%u link\n",
proc_subdev->entity.name, SUN6I_ISP_PROC_PAD_SOURCE,
video_dev->entity.name, 0);
goto error_video_device;
}
return 0;
error_video_device:
vb2_video_unregister_device(video_dev);
error_media_entity:
media_entity_cleanup(&video_dev->entity);
error_mutex:
mutex_destroy(&capture->lock);
return ret;
}
void sun6i_isp_capture_cleanup(struct sun6i_isp_device *isp_dev)
{
struct sun6i_isp_capture *capture = &isp_dev->capture;
struct video_device *video_dev = &capture->video_dev;
vb2_video_unregister_device(video_dev);
media_entity_cleanup(&video_dev->entity);
mutex_destroy(&capture->lock);
}
| linux-master | drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Maxim MAX9286 Quad GMSL2 Deserializer Driver
*
* Copyright (C) 2021 Renesas Electronics Corporation
* Copyright (C) 2021 Niklas Söderlund
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#define MAX96712_ID 0x20
#define MAX96712_DPLL_FREQ 1000
enum max96712_pattern {
MAX96712_PATTERN_CHECKERBOARD = 0,
MAX96712_PATTERN_GRADIENT,
};
struct max96712_priv {
struct i2c_client *client;
struct regmap *regmap;
struct gpio_desc *gpiod_pwdn;
bool cphy;
struct v4l2_mbus_config_mipi_csi2 mipi;
struct v4l2_subdev sd;
struct v4l2_ctrl_handler ctrl_handler;
struct media_pad pads[1];
enum max96712_pattern pattern;
};
static int max96712_read(struct max96712_priv *priv, int reg)
{
int ret, val;
ret = regmap_read(priv->regmap, reg, &val);
if (ret) {
dev_err(&priv->client->dev, "read 0x%04x failed\n", reg);
return ret;
}
return val;
}
static int max96712_write(struct max96712_priv *priv, unsigned int reg, u8 val)
{
int ret;
ret = regmap_write(priv->regmap, reg, val);
if (ret)
dev_err(&priv->client->dev, "write 0x%04x failed\n", reg);
return ret;
}
static int max96712_update_bits(struct max96712_priv *priv, unsigned int reg,
u8 mask, u8 val)
{
int ret;
ret = regmap_update_bits(priv->regmap, reg, mask, val);
if (ret)
dev_err(&priv->client->dev, "update 0x%04x failed\n", reg);
return ret;
}
static int max96712_write_bulk(struct max96712_priv *priv, unsigned int reg,
const void *val, size_t val_count)
{
int ret;
ret = regmap_bulk_write(priv->regmap, reg, val, val_count);
if (ret)
dev_err(&priv->client->dev, "bulk write 0x%04x failed\n", reg);
return ret;
}
static int max96712_write_bulk_value(struct max96712_priv *priv,
unsigned int reg, unsigned int val,
size_t val_count)
{
unsigned int i;
u8 values[4];
for (i = 1; i <= val_count; i++)
values[i - 1] = (val >> ((val_count - i) * 8)) & 0xff;
return max96712_write_bulk(priv, reg, &values, val_count);
}
static void max96712_reset(struct max96712_priv *priv)
{
max96712_update_bits(priv, 0x13, 0x40, 0x40);
msleep(20);
}
static void max96712_mipi_enable(struct max96712_priv *priv, bool enable)
{
if (enable) {
max96712_update_bits(priv, 0x40b, 0x02, 0x02);
max96712_update_bits(priv, 0x8a0, 0x80, 0x80);
} else {
max96712_update_bits(priv, 0x8a0, 0x80, 0x00);
max96712_update_bits(priv, 0x40b, 0x02, 0x00);
}
}
static void max96712_mipi_configure(struct max96712_priv *priv)
{
unsigned int i;
u8 phy5 = 0;
max96712_mipi_enable(priv, false);
/* Select 2x4 mode. */
max96712_write(priv, 0x8a0, 0x04);
/* TODO: Add support for 2-lane and 1-lane configurations. */
if (priv->cphy) {
/* Configure a 3-lane C-PHY using PHY0 and PHY1. */
max96712_write(priv, 0x94a, 0xa0);
/* Configure C-PHY timings. */
max96712_write(priv, 0x8ad, 0x3f);
max96712_write(priv, 0x8ae, 0x7d);
} else {
/* Configure a 4-lane D-PHY using PHY0 and PHY1. */
max96712_write(priv, 0x94a, 0xc0);
}
/* Configure lane mapping for PHY0 and PHY1. */
/* TODO: Add support for lane swapping. */
max96712_write(priv, 0x8a3, 0xe4);
/* Configure lane polarity for PHY0 and PHY1. */
for (i = 0; i < priv->mipi.num_data_lanes + 1; i++)
if (priv->mipi.lane_polarities[i])
phy5 |= BIT(i == 0 ? 5 : i < 3 ? i - 1 : i);
max96712_write(priv, 0x8a5, phy5);
/* Set link frequency for PHY0 and PHY1. */
max96712_update_bits(priv, 0x415, 0x3f,
((MAX96712_DPLL_FREQ / 100) & 0x1f) | BIT(5));
max96712_update_bits(priv, 0x418, 0x3f,
((MAX96712_DPLL_FREQ / 100) & 0x1f) | BIT(5));
/* Enable PHY0 and PHY1 */
max96712_update_bits(priv, 0x8a2, 0xf0, 0x30);
}
static void max96712_pattern_enable(struct max96712_priv *priv, bool enable)
{
const u32 h_active = 1920;
const u32 h_fp = 88;
const u32 h_sw = 44;
const u32 h_bp = 148;
const u32 h_tot = h_active + h_fp + h_sw + h_bp;
const u32 v_active = 1080;
const u32 v_fp = 4;
const u32 v_sw = 5;
const u32 v_bp = 36;
const u32 v_tot = v_active + v_fp + v_sw + v_bp;
if (!enable) {
max96712_write(priv, 0x1051, 0x00);
return;
}
/* PCLK 75MHz. */
max96712_write(priv, 0x0009, 0x01);
/* Configure Video Timing Generator for 1920x1080 @ 30 fps. */
max96712_write_bulk_value(priv, 0x1052, 0, 3);
max96712_write_bulk_value(priv, 0x1055, v_sw * h_tot, 3);
max96712_write_bulk_value(priv, 0x1058,
(v_active + v_fp + + v_bp) * h_tot, 3);
max96712_write_bulk_value(priv, 0x105b, 0, 3);
max96712_write_bulk_value(priv, 0x105e, h_sw, 2);
max96712_write_bulk_value(priv, 0x1060, h_active + h_fp + h_bp, 2);
max96712_write_bulk_value(priv, 0x1062, v_tot, 2);
max96712_write_bulk_value(priv, 0x1064,
h_tot * (v_sw + v_bp) + (h_sw + h_bp), 3);
max96712_write_bulk_value(priv, 0x1067, h_active, 2);
max96712_write_bulk_value(priv, 0x1069, h_fp + h_sw + h_bp, 2);
max96712_write_bulk_value(priv, 0x106b, v_active, 2);
/* Generate VS, HS and DE in free-running mode. */
max96712_write(priv, 0x1050, 0xfb);
/* Configure Video Pattern Generator. */
if (priv->pattern == MAX96712_PATTERN_CHECKERBOARD) {
/* Set checkerboard pattern size. */
max96712_write(priv, 0x1074, 0x3c);
max96712_write(priv, 0x1075, 0x3c);
max96712_write(priv, 0x1076, 0x3c);
/* Set checkerboard pattern colors. */
max96712_write_bulk_value(priv, 0x106e, 0xfecc00, 3);
max96712_write_bulk_value(priv, 0x1071, 0x006aa7, 3);
/* Generate checkerboard pattern. */
max96712_write(priv, 0x1051, 0x10);
} else {
/* Set gradient increment. */
max96712_write(priv, 0x106d, 0x10);
/* Generate gradient pattern. */
max96712_write(priv, 0x1051, 0x20);
}
}
static int max96712_s_stream(struct v4l2_subdev *sd, int enable)
{
struct max96712_priv *priv = v4l2_get_subdevdata(sd);
if (enable) {
max96712_pattern_enable(priv, true);
max96712_mipi_enable(priv, true);
} else {
max96712_mipi_enable(priv, false);
max96712_pattern_enable(priv, false);
}
return 0;
}
static const struct v4l2_subdev_video_ops max96712_video_ops = {
.s_stream = max96712_s_stream,
};
static int max96712_get_pad_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
format->format.width = 1920;
format->format.height = 1080;
format->format.code = MEDIA_BUS_FMT_RGB888_1X24;
format->format.field = V4L2_FIELD_NONE;
return 0;
}
static const struct v4l2_subdev_pad_ops max96712_pad_ops = {
.get_fmt = max96712_get_pad_format,
.set_fmt = max96712_get_pad_format,
};
static const struct v4l2_subdev_ops max96712_subdev_ops = {
.video = &max96712_video_ops,
.pad = &max96712_pad_ops,
};
static const char * const max96712_test_pattern[] = {
"Checkerboard",
"Gradient",
};
static int max96712_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct max96712_priv *priv =
container_of(ctrl->handler, struct max96712_priv, ctrl_handler);
switch (ctrl->id) {
case V4L2_CID_TEST_PATTERN:
priv->pattern = ctrl->val ?
MAX96712_PATTERN_GRADIENT :
MAX96712_PATTERN_CHECKERBOARD;
break;
}
return 0;
}
static const struct v4l2_ctrl_ops max96712_ctrl_ops = {
.s_ctrl = max96712_s_ctrl,
};
static int max96712_v4l2_register(struct max96712_priv *priv)
{
long pixel_rate;
int ret;
v4l2_i2c_subdev_init(&priv->sd, priv->client, &max96712_subdev_ops);
priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
v4l2_ctrl_handler_init(&priv->ctrl_handler, 2);
/*
* TODO: Once V4L2_CID_LINK_FREQ is changed from a menu control to an
* INT64 control it should be used here instead of V4L2_CID_PIXEL_RATE.
*/
pixel_rate = MAX96712_DPLL_FREQ / priv->mipi.num_data_lanes * 1000000;
v4l2_ctrl_new_std(&priv->ctrl_handler, NULL, V4L2_CID_PIXEL_RATE,
pixel_rate, pixel_rate, 1, pixel_rate);
v4l2_ctrl_new_std_menu_items(&priv->ctrl_handler, &max96712_ctrl_ops,
V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(max96712_test_pattern) - 1,
0, 0, max96712_test_pattern);
priv->sd.ctrl_handler = &priv->ctrl_handler;
ret = priv->ctrl_handler.error;
if (ret)
goto error;
priv->pads[0].flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&priv->sd.entity, 1, priv->pads);
if (ret)
goto error;
v4l2_set_subdevdata(&priv->sd, priv);
ret = v4l2_async_register_subdev(&priv->sd);
if (ret < 0) {
dev_err(&priv->client->dev, "Unable to register subdevice\n");
goto error;
}
return 0;
error:
v4l2_ctrl_handler_free(&priv->ctrl_handler);
return ret;
}
static int max96712_parse_dt(struct max96712_priv *priv)
{
struct fwnode_handle *ep;
struct v4l2_fwnode_endpoint v4l2_ep = {
.bus_type = V4L2_MBUS_UNKNOWN,
};
unsigned int supported_lanes;
int ret;
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(&priv->client->dev), 4,
0, 0);
if (!ep) {
dev_err(&priv->client->dev, "Not connected to subdevice\n");
return -EINVAL;
}
ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep);
fwnode_handle_put(ep);
if (ret) {
dev_err(&priv->client->dev, "Could not parse v4l2 endpoint\n");
return -EINVAL;
}
switch (v4l2_ep.bus_type) {
case V4L2_MBUS_CSI2_DPHY:
supported_lanes = 4;
priv->cphy = false;
break;
case V4L2_MBUS_CSI2_CPHY:
supported_lanes = 3;
priv->cphy = true;
break;
default:
dev_err(&priv->client->dev, "Unsupported bus-type %u\n",
v4l2_ep.bus_type);
return -EINVAL;
}
if (v4l2_ep.bus.mipi_csi2.num_data_lanes != supported_lanes) {
dev_err(&priv->client->dev, "Only %u data lanes supported\n",
supported_lanes);
return -EINVAL;
}
priv->mipi = v4l2_ep.bus.mipi_csi2;
return 0;
}
static const struct regmap_config max96712_i2c_regmap = {
.reg_bits = 16,
.val_bits = 8,
.max_register = 0x1f00,
};
static int max96712_probe(struct i2c_client *client)
{
struct max96712_priv *priv;
int ret;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->client = client;
i2c_set_clientdata(client, priv);
priv->regmap = devm_regmap_init_i2c(client, &max96712_i2c_regmap);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
priv->gpiod_pwdn = devm_gpiod_get_optional(&client->dev, "enable",
GPIOD_OUT_HIGH);
if (IS_ERR(priv->gpiod_pwdn))
return PTR_ERR(priv->gpiod_pwdn);
gpiod_set_consumer_name(priv->gpiod_pwdn, "max96712-pwdn");
gpiod_set_value_cansleep(priv->gpiod_pwdn, 1);
if (priv->gpiod_pwdn)
usleep_range(4000, 5000);
if (max96712_read(priv, 0x4a) != MAX96712_ID)
return -ENODEV;
max96712_reset(priv);
ret = max96712_parse_dt(priv);
if (ret)
return ret;
max96712_mipi_configure(priv);
return max96712_v4l2_register(priv);
}
static void max96712_remove(struct i2c_client *client)
{
struct max96712_priv *priv = i2c_get_clientdata(client);
v4l2_async_unregister_subdev(&priv->sd);
gpiod_set_value_cansleep(priv->gpiod_pwdn, 0);
}
static const struct of_device_id max96712_of_table[] = {
{ .compatible = "maxim,max96712" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, max96712_of_table);
static struct i2c_driver max96712_i2c_driver = {
.driver = {
.name = "max96712",
.of_match_table = of_match_ptr(max96712_of_table),
},
.probe = max96712_probe,
.remove = max96712_remove,
};
module_i2c_driver(max96712_i2c_driver);
MODULE_DESCRIPTION("Maxim MAX96712 Quad GMSL2 Deserializer Driver");
MODULE_AUTHOR("Niklas Söderlund <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/media/max96712/max96712.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Microchip Image Sensor Controller (ISC) common clock driver setup
*
* Copyright (C) 2016 Microchip Technology, Inc.
*
* Author: Songjun Wu
* Author: Eugen Hristev <[email protected]>
*
*/
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include "atmel-isc-regs.h"
#include "atmel-isc.h"
static int isc_wait_clk_stable(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
struct regmap *regmap = isc_clk->regmap;
unsigned long timeout = jiffies + usecs_to_jiffies(1000);
unsigned int status;
while (time_before(jiffies, timeout)) {
regmap_read(regmap, ISC_CLKSR, &status);
if (!(status & ISC_CLKSR_SIP))
return 0;
usleep_range(10, 250);
}
return -ETIMEDOUT;
}
static int isc_clk_prepare(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
int ret;
ret = pm_runtime_resume_and_get(isc_clk->dev);
if (ret < 0)
return ret;
return isc_wait_clk_stable(hw);
}
static void isc_clk_unprepare(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
isc_wait_clk_stable(hw);
pm_runtime_put_sync(isc_clk->dev);
}
static int isc_clk_enable(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 id = isc_clk->id;
struct regmap *regmap = isc_clk->regmap;
unsigned long flags;
unsigned int status;
dev_dbg(isc_clk->dev, "ISC CLK: %s, id = %d, div = %d, parent id = %d\n",
__func__, id, isc_clk->div, isc_clk->parent_id);
spin_lock_irqsave(&isc_clk->lock, flags);
regmap_update_bits(regmap, ISC_CLKCFG,
ISC_CLKCFG_DIV_MASK(id) | ISC_CLKCFG_SEL_MASK(id),
(isc_clk->div << ISC_CLKCFG_DIV_SHIFT(id)) |
(isc_clk->parent_id << ISC_CLKCFG_SEL_SHIFT(id)));
regmap_write(regmap, ISC_CLKEN, ISC_CLK(id));
spin_unlock_irqrestore(&isc_clk->lock, flags);
regmap_read(regmap, ISC_CLKSR, &status);
if (status & ISC_CLK(id))
return 0;
else
return -EINVAL;
}
static void isc_clk_disable(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 id = isc_clk->id;
unsigned long flags;
spin_lock_irqsave(&isc_clk->lock, flags);
regmap_write(isc_clk->regmap, ISC_CLKDIS, ISC_CLK(id));
spin_unlock_irqrestore(&isc_clk->lock, flags);
}
static int isc_clk_is_enabled(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 status;
int ret;
ret = pm_runtime_resume_and_get(isc_clk->dev);
if (ret < 0)
return 0;
regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
pm_runtime_put_sync(isc_clk->dev);
return status & ISC_CLK(isc_clk->id) ? 1 : 0;
}
static unsigned long
isc_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
return DIV_ROUND_CLOSEST(parent_rate, isc_clk->div + 1);
}
static int isc_clk_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
long best_rate = -EINVAL;
int best_diff = -1;
unsigned int i, div;
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
struct clk_hw *parent;
unsigned long parent_rate;
parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
parent_rate = clk_hw_get_rate(parent);
if (!parent_rate)
continue;
for (div = 1; div < ISC_CLK_MAX_DIV + 2; div++) {
unsigned long rate;
int diff;
rate = DIV_ROUND_CLOSEST(parent_rate, div);
diff = abs(req->rate - rate);
if (best_diff < 0 || best_diff > diff) {
best_rate = rate;
best_diff = diff;
req->best_parent_rate = parent_rate;
req->best_parent_hw = parent;
}
if (!best_diff || rate < req->rate)
break;
}
if (!best_diff)
break;
}
dev_dbg(isc_clk->dev,
"ISC CLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
__func__, best_rate,
__clk_get_name((req->best_parent_hw)->clk),
req->best_parent_rate);
if (best_rate < 0)
return best_rate;
req->rate = best_rate;
return 0;
}
static int isc_clk_set_parent(struct clk_hw *hw, u8 index)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
if (index >= clk_hw_get_num_parents(hw))
return -EINVAL;
isc_clk->parent_id = index;
return 0;
}
static u8 isc_clk_get_parent(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
return isc_clk->parent_id;
}
static int isc_clk_set_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 div;
if (!rate)
return -EINVAL;
div = DIV_ROUND_CLOSEST(parent_rate, rate);
if (div > (ISC_CLK_MAX_DIV + 1) || !div)
return -EINVAL;
isc_clk->div = div - 1;
return 0;
}
static const struct clk_ops isc_clk_ops = {
.prepare = isc_clk_prepare,
.unprepare = isc_clk_unprepare,
.enable = isc_clk_enable,
.disable = isc_clk_disable,
.is_enabled = isc_clk_is_enabled,
.recalc_rate = isc_clk_recalc_rate,
.determine_rate = isc_clk_determine_rate,
.set_parent = isc_clk_set_parent,
.get_parent = isc_clk_get_parent,
.set_rate = isc_clk_set_rate,
};
static int isc_clk_register(struct isc_device *isc, unsigned int id)
{
struct regmap *regmap = isc->regmap;
struct device_node *np = isc->dev->of_node;
struct isc_clk *isc_clk;
struct clk_init_data init;
const char *clk_name = np->name;
const char *parent_names[3];
int num_parents;
if (id == ISC_ISPCK && !isc->ispck_required)
return 0;
num_parents = of_clk_get_parent_count(np);
if (num_parents < 1 || num_parents > 3)
return -EINVAL;
if (num_parents > 2 && id == ISC_ISPCK)
num_parents = 2;
of_clk_parent_fill(np, parent_names, num_parents);
if (id == ISC_MCK)
of_property_read_string(np, "clock-output-names", &clk_name);
else
clk_name = "isc-ispck";
init.parent_names = parent_names;
init.num_parents = num_parents;
init.name = clk_name;
init.ops = &isc_clk_ops;
init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
isc_clk = &isc->isc_clks[id];
isc_clk->hw.init = &init;
isc_clk->regmap = regmap;
isc_clk->id = id;
isc_clk->dev = isc->dev;
spin_lock_init(&isc_clk->lock);
isc_clk->clk = clk_register(isc->dev, &isc_clk->hw);
if (IS_ERR(isc_clk->clk)) {
dev_err(isc->dev, "%s: clock register fail\n", clk_name);
return PTR_ERR(isc_clk->clk);
} else if (id == ISC_MCK) {
of_clk_add_provider(np, of_clk_src_simple_get, isc_clk->clk);
}
return 0;
}
int atmel_isc_clk_init(struct isc_device *isc)
{
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++)
isc->isc_clks[i].clk = ERR_PTR(-EINVAL);
for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
ret = isc_clk_register(isc, i);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(atmel_isc_clk_init);
void atmel_isc_clk_cleanup(struct isc_device *isc)
{
unsigned int i;
of_clk_del_provider(isc->dev->of_node);
for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
struct isc_clk *isc_clk = &isc->isc_clks[i];
if (!IS_ERR(isc_clk->clk))
clk_unregister(isc_clk->clk);
}
}
EXPORT_SYMBOL_GPL(atmel_isc_clk_cleanup);
| linux-master | drivers/staging/media/deprecated/atmel/atmel-isc-clk.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Microchip Image Sensor Controller (ISC) driver
*
* Copyright (C) 2016-2019 Microchip Technology, Inc.
*
* Author: Songjun Wu
* Author: Eugen Hristev <[email protected]>
*
*
* Sensor-->PFE-->WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB-->RLP-->DMA
*
* ISC video pipeline integrates the following submodules:
* PFE: Parallel Front End to sample the camera sensor input stream
* WB: Programmable white balance in the Bayer domain
* CFA: Color filter array interpolation module
* CC: Programmable color correction
* GAM: Gamma correction
* CSC: Programmable color space conversion
* CBC: Contrast and Brightness control
* SUB: This module performs YCbCr444 to YCbCr420 chrominance subsampling
* RLP: This module performs rounding, range limiting
* and packing of the incoming data
*/
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/videodev2.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-image-sizes.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#include <media/videobuf2-dma-contig.h>
#include "atmel-isc-regs.h"
#include "atmel-isc.h"
#define ISC_SAMA5D2_MAX_SUPPORT_WIDTH 2592
#define ISC_SAMA5D2_MAX_SUPPORT_HEIGHT 1944
#define ISC_SAMA5D2_PIPELINE \
(WB_ENABLE | CFA_ENABLE | CC_ENABLE | GAM_ENABLES | CSC_ENABLE | \
CBC_ENABLE | SUB422_ENABLE | SUB420_ENABLE)
/* This is a list of the formats that the ISC can *output* */
static const struct isc_format sama5d2_controller_formats[] = {
{
.fourcc = V4L2_PIX_FMT_ARGB444,
}, {
.fourcc = V4L2_PIX_FMT_ARGB555,
}, {
.fourcc = V4L2_PIX_FMT_RGB565,
}, {
.fourcc = V4L2_PIX_FMT_ABGR32,
}, {
.fourcc = V4L2_PIX_FMT_XBGR32,
}, {
.fourcc = V4L2_PIX_FMT_YUV420,
}, {
.fourcc = V4L2_PIX_FMT_YUYV,
}, {
.fourcc = V4L2_PIX_FMT_YUV422P,
}, {
.fourcc = V4L2_PIX_FMT_GREY,
}, {
.fourcc = V4L2_PIX_FMT_Y10,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR8,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG8,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG8,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB8,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR10,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG10,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG10,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB10,
},
};
/* This is a list of formats that the ISC can receive as *input* */
static struct isc_format sama5d2_formats_list[] = {
{
.fourcc = V4L2_PIX_FMT_SBGGR8,
.mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
.cfa_baycfg = ISC_BAY_CFG_BGBG,
},
{
.fourcc = V4L2_PIX_FMT_SGBRG8,
.mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
.cfa_baycfg = ISC_BAY_CFG_GBGB,
},
{
.fourcc = V4L2_PIX_FMT_SGRBG8,
.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
.cfa_baycfg = ISC_BAY_CFG_GRGR,
},
{
.fourcc = V4L2_PIX_FMT_SRGGB8,
.mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
.cfa_baycfg = ISC_BAY_CFG_RGRG,
},
{
.fourcc = V4L2_PIX_FMT_SBGGR10,
.mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
.cfa_baycfg = ISC_BAY_CFG_RGRG,
},
{
.fourcc = V4L2_PIX_FMT_SGBRG10,
.mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
.cfa_baycfg = ISC_BAY_CFG_GBGB,
},
{
.fourcc = V4L2_PIX_FMT_SGRBG10,
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
.cfa_baycfg = ISC_BAY_CFG_GRGR,
},
{
.fourcc = V4L2_PIX_FMT_SRGGB10,
.mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
.cfa_baycfg = ISC_BAY_CFG_RGRG,
},
{
.fourcc = V4L2_PIX_FMT_SBGGR12,
.mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
.cfa_baycfg = ISC_BAY_CFG_BGBG,
},
{
.fourcc = V4L2_PIX_FMT_SGBRG12,
.mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
.cfa_baycfg = ISC_BAY_CFG_GBGB,
},
{
.fourcc = V4L2_PIX_FMT_SGRBG12,
.mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
.cfa_baycfg = ISC_BAY_CFG_GRGR,
},
{
.fourcc = V4L2_PIX_FMT_SRGGB12,
.mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
.cfa_baycfg = ISC_BAY_CFG_RGRG,
},
{
.fourcc = V4L2_PIX_FMT_GREY,
.mbus_code = MEDIA_BUS_FMT_Y8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
},
{
.fourcc = V4L2_PIX_FMT_YUYV,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
},
{
.fourcc = V4L2_PIX_FMT_RGB565,
.mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
},
{
.fourcc = V4L2_PIX_FMT_Y10,
.mbus_code = MEDIA_BUS_FMT_Y10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
},
};
static void isc_sama5d2_config_csc(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
/* Convert RGB to YUV */
regmap_write(regmap, ISC_CSC_YR_YG + isc->offsets.csc,
0x42 | (0x81 << 16));
regmap_write(regmap, ISC_CSC_YB_OY + isc->offsets.csc,
0x19 | (0x10 << 16));
regmap_write(regmap, ISC_CSC_CBR_CBG + isc->offsets.csc,
0xFDA | (0xFB6 << 16));
regmap_write(regmap, ISC_CSC_CBB_OCB + isc->offsets.csc,
0x70 | (0x80 << 16));
regmap_write(regmap, ISC_CSC_CRR_CRG + isc->offsets.csc,
0x70 | (0xFA2 << 16));
regmap_write(regmap, ISC_CSC_CRB_OCR + isc->offsets.csc,
0xFEE | (0x80 << 16));
}
static void isc_sama5d2_config_cbc(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
regmap_write(regmap, ISC_CBC_BRIGHT + isc->offsets.cbc,
isc->ctrls.brightness);
regmap_write(regmap, ISC_CBC_CONTRAST + isc->offsets.cbc,
isc->ctrls.contrast);
}
static void isc_sama5d2_config_cc(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
/* Configure each register at the neutral fixed point 1.0 or 0.0 */
regmap_write(regmap, ISC_CC_RR_RG, (1 << 8));
regmap_write(regmap, ISC_CC_RB_OR, 0);
regmap_write(regmap, ISC_CC_GR_GG, (1 << 8) << 16);
regmap_write(regmap, ISC_CC_GB_OG, 0);
regmap_write(regmap, ISC_CC_BR_BG, 0);
regmap_write(regmap, ISC_CC_BB_OB, (1 << 8));
}
static void isc_sama5d2_config_ctrls(struct isc_device *isc,
const struct v4l2_ctrl_ops *ops)
{
struct isc_ctrls *ctrls = &isc->ctrls;
struct v4l2_ctrl_handler *hdl = &ctrls->handler;
ctrls->contrast = 256;
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 256);
}
static void isc_sama5d2_config_dpc(struct isc_device *isc)
{
/* This module is not present on sama5d2 pipeline */
}
static void isc_sama5d2_config_gam(struct isc_device *isc)
{
/* No specific gamma configuration */
}
static void isc_sama5d2_config_rlp(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
u32 rlp_mode = isc->config.rlp_cfg_mode;
/*
* In sama5d2, the YUV planar modes and the YUYV modes are treated
* in the same way in RLP register.
* Normally, YYCC mode should be Luma(n) - Color B(n) - Color R (n)
* and YCYC should be Luma(n + 1) - Color B (n) - Luma (n) - Color R (n)
* but in sama5d2, the YCYC mode does not exist, and YYCC must be
* selected for both planar and interleaved modes, as in fact
* both modes are supported.
*
* Thus, if the YCYC mode is selected, replace it with the
* sama5d2-compliant mode which is YYCC .
*/
if ((rlp_mode & ISC_RLP_CFG_MODE_MASK) == ISC_RLP_CFG_MODE_YCYC) {
rlp_mode &= ~ISC_RLP_CFG_MODE_MASK;
rlp_mode |= ISC_RLP_CFG_MODE_YYCC;
}
regmap_update_bits(regmap, ISC_RLP_CFG + isc->offsets.rlp,
ISC_RLP_CFG_MODE_MASK, rlp_mode);
}
static void isc_sama5d2_adapt_pipeline(struct isc_device *isc)
{
isc->try_config.bits_pipeline &= ISC_SAMA5D2_PIPELINE;
}
/* Gamma table with gamma 1/2.2 */
static const u32 isc_sama5d2_gamma_table[][GAMMA_ENTRIES] = {
/* 0 --> gamma 1/1.8 */
{ 0x65, 0x66002F, 0x950025, 0xBB0020, 0xDB001D, 0xF8001A,
0x1130018, 0x12B0017, 0x1420016, 0x1580014, 0x16D0013, 0x1810012,
0x1940012, 0x1A60012, 0x1B80011, 0x1C90010, 0x1DA0010, 0x1EA000F,
0x1FA000F, 0x209000F, 0x218000F, 0x227000E, 0x235000E, 0x243000E,
0x251000E, 0x25F000D, 0x26C000D, 0x279000D, 0x286000D, 0x293000C,
0x2A0000C, 0x2AC000C, 0x2B8000C, 0x2C4000C, 0x2D0000B, 0x2DC000B,
0x2E7000B, 0x2F3000B, 0x2FE000B, 0x309000B, 0x314000B, 0x31F000A,
0x32A000A, 0x334000B, 0x33F000A, 0x349000A, 0x354000A, 0x35E000A,
0x368000A, 0x372000A, 0x37C000A, 0x386000A, 0x3900009, 0x399000A,
0x3A30009, 0x3AD0009, 0x3B60009, 0x3BF000A, 0x3C90009, 0x3D20009,
0x3DB0009, 0x3E40009, 0x3ED0009, 0x3F60009 },
/* 1 --> gamma 1/2 */
{ 0x7F, 0x800034, 0xB50028, 0xDE0021, 0x100001E, 0x11E001B,
0x1390019, 0x1520017, 0x16A0015, 0x1800014, 0x1940014, 0x1A80013,
0x1BB0012, 0x1CD0011, 0x1DF0010, 0x1EF0010, 0x200000F, 0x20F000F,
0x21F000E, 0x22D000F, 0x23C000E, 0x24A000E, 0x258000D, 0x265000D,
0x273000C, 0x27F000D, 0x28C000C, 0x299000C, 0x2A5000C, 0x2B1000B,
0x2BC000C, 0x2C8000B, 0x2D3000C, 0x2DF000B, 0x2EA000A, 0x2F5000A,
0x2FF000B, 0x30A000A, 0x314000B, 0x31F000A, 0x329000A, 0x333000A,
0x33D0009, 0x3470009, 0x350000A, 0x35A0009, 0x363000A, 0x36D0009,
0x3760009, 0x37F0009, 0x3880009, 0x3910009, 0x39A0009, 0x3A30009,
0x3AC0008, 0x3B40009, 0x3BD0008, 0x3C60008, 0x3CE0008, 0x3D60009,
0x3DF0008, 0x3E70008, 0x3EF0008, 0x3F70008 },
/* 2 --> gamma 1/2.2 */
{ 0x99, 0x9B0038, 0xD4002A, 0xFF0023, 0x122001F, 0x141001B,
0x15D0019, 0x1760017, 0x18E0015, 0x1A30015, 0x1B80013, 0x1CC0012,
0x1DE0011, 0x1F00010, 0x2010010, 0x2110010, 0x221000F, 0x230000F,
0x23F000E, 0x24D000E, 0x25B000D, 0x269000C, 0x276000C, 0x283000C,
0x28F000C, 0x29B000C, 0x2A7000C, 0x2B3000B, 0x2BF000B, 0x2CA000B,
0x2D5000B, 0x2E0000A, 0x2EB000A, 0x2F5000A, 0x2FF000A, 0x30A000A,
0x3140009, 0x31E0009, 0x327000A, 0x3310009, 0x33A0009, 0x3440009,
0x34D0009, 0x3560009, 0x35F0009, 0x3680008, 0x3710008, 0x3790009,
0x3820008, 0x38A0008, 0x3930008, 0x39B0008, 0x3A30008, 0x3AB0008,
0x3B30008, 0x3BB0008, 0x3C30008, 0x3CB0007, 0x3D20008, 0x3DA0007,
0x3E20007, 0x3E90007, 0x3F00008, 0x3F80007 },
};
static int isc_parse_dt(struct device *dev, struct isc_device *isc)
{
struct device_node *np = dev->of_node;
struct device_node *epn = NULL;
struct isc_subdev_entity *subdev_entity;
unsigned int flags;
int ret;
INIT_LIST_HEAD(&isc->subdev_entities);
while (1) {
struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
epn = of_graph_get_next_endpoint(np, epn);
if (!epn)
return 0;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
&v4l2_epn);
if (ret) {
ret = -EINVAL;
dev_err(dev, "Could not parse the endpoint\n");
break;
}
subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity),
GFP_KERNEL);
if (!subdev_entity) {
ret = -ENOMEM;
break;
}
subdev_entity->epn = epn;
flags = v4l2_epn.bus.parallel.flags;
if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
subdev_entity->pfe_cfg0 = ISC_PFE_CFG0_HPOL_LOW;
if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_VPOL_LOW;
if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW;
if (v4l2_epn.bus_type == V4L2_MBUS_BT656)
subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_CCIR_CRC |
ISC_PFE_CFG0_CCIR656;
list_add_tail(&subdev_entity->list, &isc->subdev_entities);
}
of_node_put(epn);
return ret;
}
static int atmel_isc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct isc_device *isc;
void __iomem *io_base;
struct isc_subdev_entity *subdev_entity;
int irq;
int ret;
u32 ver;
isc = devm_kzalloc(dev, sizeof(*isc), GFP_KERNEL);
if (!isc)
return -ENOMEM;
platform_set_drvdata(pdev, isc);
isc->dev = dev;
io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
isc->regmap = devm_regmap_init_mmio(dev, io_base, &atmel_isc_regmap_config);
if (IS_ERR(isc->regmap)) {
ret = PTR_ERR(isc->regmap);
dev_err(dev, "failed to init register map: %d\n", ret);
return ret;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, atmel_isc_interrupt, 0,
"atmel-sama5d2-isc", isc);
if (ret < 0) {
dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
irq, ret);
return ret;
}
isc->gamma_table = isc_sama5d2_gamma_table;
isc->gamma_max = 2;
isc->max_width = ISC_SAMA5D2_MAX_SUPPORT_WIDTH;
isc->max_height = ISC_SAMA5D2_MAX_SUPPORT_HEIGHT;
isc->config_dpc = isc_sama5d2_config_dpc;
isc->config_csc = isc_sama5d2_config_csc;
isc->config_cbc = isc_sama5d2_config_cbc;
isc->config_cc = isc_sama5d2_config_cc;
isc->config_gam = isc_sama5d2_config_gam;
isc->config_rlp = isc_sama5d2_config_rlp;
isc->config_ctrls = isc_sama5d2_config_ctrls;
isc->adapt_pipeline = isc_sama5d2_adapt_pipeline;
isc->offsets.csc = ISC_SAMA5D2_CSC_OFFSET;
isc->offsets.cbc = ISC_SAMA5D2_CBC_OFFSET;
isc->offsets.sub422 = ISC_SAMA5D2_SUB422_OFFSET;
isc->offsets.sub420 = ISC_SAMA5D2_SUB420_OFFSET;
isc->offsets.rlp = ISC_SAMA5D2_RLP_OFFSET;
isc->offsets.his = ISC_SAMA5D2_HIS_OFFSET;
isc->offsets.dma = ISC_SAMA5D2_DMA_OFFSET;
isc->offsets.version = ISC_SAMA5D2_VERSION_OFFSET;
isc->offsets.his_entry = ISC_SAMA5D2_HIS_ENTRY_OFFSET;
isc->controller_formats = sama5d2_controller_formats;
isc->controller_formats_size = ARRAY_SIZE(sama5d2_controller_formats);
isc->formats_list = sama5d2_formats_list;
isc->formats_list_size = ARRAY_SIZE(sama5d2_formats_list);
/* sama5d2-isc - 8 bits per beat */
isc->dcfg = ISC_DCFG_YMBSIZE_BEATS8 | ISC_DCFG_CMBSIZE_BEATS8;
/* sama5d2-isc : ISPCK is required and mandatory */
isc->ispck_required = true;
ret = atmel_isc_pipeline_init(isc);
if (ret)
return ret;
isc->hclock = devm_clk_get(dev, "hclock");
if (IS_ERR(isc->hclock)) {
ret = PTR_ERR(isc->hclock);
dev_err(dev, "failed to get hclock: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(isc->hclock);
if (ret) {
dev_err(dev, "failed to enable hclock: %d\n", ret);
return ret;
}
ret = atmel_isc_clk_init(isc);
if (ret) {
dev_err(dev, "failed to init isc clock: %d\n", ret);
goto unprepare_hclk;
}
ret = v4l2_device_register(dev, &isc->v4l2_dev);
if (ret) {
dev_err(dev, "unable to register v4l2 device.\n");
goto unprepare_clk;
}
ret = isc_parse_dt(dev, isc);
if (ret) {
dev_err(dev, "fail to parse device tree\n");
goto unregister_v4l2_device;
}
if (list_empty(&isc->subdev_entities)) {
dev_err(dev, "no subdev found\n");
ret = -ENODEV;
goto unregister_v4l2_device;
}
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
struct v4l2_async_connection *asd;
struct fwnode_handle *fwnode =
of_fwnode_handle(subdev_entity->epn);
v4l2_async_nf_init(&subdev_entity->notifier, &isc->v4l2_dev);
asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
fwnode,
struct v4l2_async_connection);
of_node_put(subdev_entity->epn);
subdev_entity->epn = NULL;
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto cleanup_subdev;
}
subdev_entity->notifier.ops = &atmel_isc_async_ops;
ret = v4l2_async_nf_register(&subdev_entity->notifier);
if (ret) {
dev_err(dev, "fail to register async notifier\n");
goto cleanup_subdev;
}
if (video_is_registered(&isc->video_dev))
break;
}
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_request_idle(dev);
isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
ret = clk_prepare_enable(isc->ispck);
if (ret) {
dev_err(dev, "failed to enable ispck: %d\n", ret);
goto disable_pm;
}
/* ispck should be greater or equal to hclock */
ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
if (ret) {
dev_err(dev, "failed to set ispck rate: %d\n", ret);
goto unprepare_clk;
}
regmap_read(isc->regmap, ISC_VERSION + isc->offsets.version, &ver);
dev_info(dev, "Microchip ISC version %x\n", ver);
return 0;
unprepare_clk:
clk_disable_unprepare(isc->ispck);
disable_pm:
pm_runtime_disable(dev);
cleanup_subdev:
atmel_isc_subdev_cleanup(isc);
unregister_v4l2_device:
v4l2_device_unregister(&isc->v4l2_dev);
unprepare_hclk:
clk_disable_unprepare(isc->hclock);
atmel_isc_clk_cleanup(isc);
return ret;
}
static void atmel_isc_remove(struct platform_device *pdev)
{
struct isc_device *isc = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
atmel_isc_subdev_cleanup(isc);
v4l2_device_unregister(&isc->v4l2_dev);
clk_disable_unprepare(isc->ispck);
clk_disable_unprepare(isc->hclock);
atmel_isc_clk_cleanup(isc);
}
static int __maybe_unused isc_runtime_suspend(struct device *dev)
{
struct isc_device *isc = dev_get_drvdata(dev);
clk_disable_unprepare(isc->ispck);
clk_disable_unprepare(isc->hclock);
return 0;
}
static int __maybe_unused isc_runtime_resume(struct device *dev)
{
struct isc_device *isc = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(isc->hclock);
if (ret)
return ret;
ret = clk_prepare_enable(isc->ispck);
if (ret)
clk_disable_unprepare(isc->hclock);
return ret;
}
static const struct dev_pm_ops atmel_isc_dev_pm_ops = {
SET_RUNTIME_PM_OPS(isc_runtime_suspend, isc_runtime_resume, NULL)
};
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id atmel_isc_of_match[] = {
{ .compatible = "atmel,sama5d2-isc" },
{ }
};
MODULE_DEVICE_TABLE(of, atmel_isc_of_match);
#endif
static struct platform_driver atmel_isc_driver = {
.probe = atmel_isc_probe,
.remove_new = atmel_isc_remove,
.driver = {
.name = "atmel-sama5d2-isc",
.pm = &atmel_isc_dev_pm_ops,
.of_match_table = of_match_ptr(atmel_isc_of_match),
},
};
module_platform_driver(atmel_isc_driver);
MODULE_AUTHOR("Songjun Wu");
MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Microchip eXtended Image Sensor Controller (XISC) driver
*
* Copyright (C) 2019-2021 Microchip Technology, Inc. and its subsidiaries
*
* Author: Eugen Hristev <[email protected]>
*
* Sensor-->PFE-->DPC-->WB-->CFA-->CC-->GAM-->VHXS-->CSC-->CBHS-->SUB-->RLP-->DMA-->HIS
*
* ISC video pipeline integrates the following submodules:
* PFE: Parallel Front End to sample the camera sensor input stream
* DPC: Defective Pixel Correction with black offset correction, green disparity
* correction and defective pixel correction (3 modules total)
* WB: Programmable white balance in the Bayer domain
* CFA: Color filter array interpolation module
* CC: Programmable color correction
* GAM: Gamma correction
*VHXS: Vertical and Horizontal Scaler
* CSC: Programmable color space conversion
*CBHS: Contrast Brightness Hue and Saturation control
* SUB: This module performs YCbCr444 to YCbCr420 chrominance subsampling
* RLP: This module performs rounding, range limiting
* and packing of the incoming data
* DMA: This module performs DMA master accesses to write frames to external RAM
* HIS: Histogram module performs statistic counters on the frames
*/
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/videodev2.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-image-sizes.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#include <media/videobuf2-dma-contig.h>
#include "atmel-isc-regs.h"
#include "atmel-isc.h"
#define ISC_SAMA7G5_MAX_SUPPORT_WIDTH 3264
#define ISC_SAMA7G5_MAX_SUPPORT_HEIGHT 2464
#define ISC_SAMA7G5_PIPELINE \
(WB_ENABLE | CFA_ENABLE | CC_ENABLE | GAM_ENABLES | CSC_ENABLE | \
CBC_ENABLE | SUB422_ENABLE | SUB420_ENABLE)
/* This is a list of the formats that the ISC can *output* */
static const struct isc_format sama7g5_controller_formats[] = {
{
.fourcc = V4L2_PIX_FMT_ARGB444,
}, {
.fourcc = V4L2_PIX_FMT_ARGB555,
}, {
.fourcc = V4L2_PIX_FMT_RGB565,
}, {
.fourcc = V4L2_PIX_FMT_ABGR32,
}, {
.fourcc = V4L2_PIX_FMT_XBGR32,
}, {
.fourcc = V4L2_PIX_FMT_YUV420,
}, {
.fourcc = V4L2_PIX_FMT_UYVY,
}, {
.fourcc = V4L2_PIX_FMT_VYUY,
}, {
.fourcc = V4L2_PIX_FMT_YUYV,
}, {
.fourcc = V4L2_PIX_FMT_YUV422P,
}, {
.fourcc = V4L2_PIX_FMT_GREY,
}, {
.fourcc = V4L2_PIX_FMT_Y10,
}, {
.fourcc = V4L2_PIX_FMT_Y16,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR8,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG8,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG8,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB8,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR10,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG10,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG10,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB10,
},
};
/* This is a list of formats that the ISC can receive as *input* */
static struct isc_format sama7g5_formats_list[] = {
{
.fourcc = V4L2_PIX_FMT_SBGGR8,
.mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
.cfa_baycfg = ISC_BAY_CFG_BGBG,
},
{
.fourcc = V4L2_PIX_FMT_SGBRG8,
.mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
.cfa_baycfg = ISC_BAY_CFG_GBGB,
},
{
.fourcc = V4L2_PIX_FMT_SGRBG8,
.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
.cfa_baycfg = ISC_BAY_CFG_GRGR,
},
{
.fourcc = V4L2_PIX_FMT_SRGGB8,
.mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
.cfa_baycfg = ISC_BAY_CFG_RGRG,
},
{
.fourcc = V4L2_PIX_FMT_SBGGR10,
.mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
.cfa_baycfg = ISC_BAY_CFG_RGRG,
},
{
.fourcc = V4L2_PIX_FMT_SGBRG10,
.mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
.cfa_baycfg = ISC_BAY_CFG_GBGB,
},
{
.fourcc = V4L2_PIX_FMT_SGRBG10,
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
.cfa_baycfg = ISC_BAY_CFG_GRGR,
},
{
.fourcc = V4L2_PIX_FMT_SRGGB10,
.mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
.cfa_baycfg = ISC_BAY_CFG_RGRG,
},
{
.fourcc = V4L2_PIX_FMT_SBGGR12,
.mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
.cfa_baycfg = ISC_BAY_CFG_BGBG,
},
{
.fourcc = V4L2_PIX_FMT_SGBRG12,
.mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
.cfa_baycfg = ISC_BAY_CFG_GBGB,
},
{
.fourcc = V4L2_PIX_FMT_SGRBG12,
.mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
.cfa_baycfg = ISC_BAY_CFG_GRGR,
},
{
.fourcc = V4L2_PIX_FMT_SRGGB12,
.mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
.cfa_baycfg = ISC_BAY_CFG_RGRG,
},
{
.fourcc = V4L2_PIX_FMT_GREY,
.mbus_code = MEDIA_BUS_FMT_Y8_1X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
},
{
.fourcc = V4L2_PIX_FMT_YUYV,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
},
{
.fourcc = V4L2_PIX_FMT_UYVY,
.mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
},
{
.fourcc = V4L2_PIX_FMT_RGB565,
.mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
},
{
.fourcc = V4L2_PIX_FMT_Y10,
.mbus_code = MEDIA_BUS_FMT_Y10_1X10,
.pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
},
};
static void isc_sama7g5_config_csc(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
/* Convert RGB to YUV */
regmap_write(regmap, ISC_CSC_YR_YG + isc->offsets.csc,
0x42 | (0x81 << 16));
regmap_write(regmap, ISC_CSC_YB_OY + isc->offsets.csc,
0x19 | (0x10 << 16));
regmap_write(regmap, ISC_CSC_CBR_CBG + isc->offsets.csc,
0xFDA | (0xFB6 << 16));
regmap_write(regmap, ISC_CSC_CBB_OCB + isc->offsets.csc,
0x70 | (0x80 << 16));
regmap_write(regmap, ISC_CSC_CRR_CRG + isc->offsets.csc,
0x70 | (0xFA2 << 16));
regmap_write(regmap, ISC_CSC_CRB_OCR + isc->offsets.csc,
0xFEE | (0x80 << 16));
}
static void isc_sama7g5_config_cbc(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
/* Configure what is set via v4l2 ctrls */
regmap_write(regmap, ISC_CBC_BRIGHT + isc->offsets.cbc, isc->ctrls.brightness);
regmap_write(regmap, ISC_CBC_CONTRAST + isc->offsets.cbc, isc->ctrls.contrast);
/* Configure Hue and Saturation as neutral midpoint */
regmap_write(regmap, ISC_CBCHS_HUE, 0);
regmap_write(regmap, ISC_CBCHS_SAT, (1 << 4));
}
static void isc_sama7g5_config_cc(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
/* Configure each register at the neutral fixed point 1.0 or 0.0 */
regmap_write(regmap, ISC_CC_RR_RG, (1 << 8));
regmap_write(regmap, ISC_CC_RB_OR, 0);
regmap_write(regmap, ISC_CC_GR_GG, (1 << 8) << 16);
regmap_write(regmap, ISC_CC_GB_OG, 0);
regmap_write(regmap, ISC_CC_BR_BG, 0);
regmap_write(regmap, ISC_CC_BB_OB, (1 << 8));
}
static void isc_sama7g5_config_ctrls(struct isc_device *isc,
const struct v4l2_ctrl_ops *ops)
{
struct isc_ctrls *ctrls = &isc->ctrls;
struct v4l2_ctrl_handler *hdl = &ctrls->handler;
ctrls->contrast = 16;
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 16);
}
static void isc_sama7g5_config_dpc(struct isc_device *isc)
{
u32 bay_cfg = isc->config.sd_format->cfa_baycfg;
struct regmap *regmap = isc->regmap;
regmap_update_bits(regmap, ISC_DPC_CFG, ISC_DPC_CFG_BLOFF_MASK,
(64 << ISC_DPC_CFG_BLOFF_SHIFT));
regmap_update_bits(regmap, ISC_DPC_CFG, ISC_DPC_CFG_BAYCFG_MASK,
(bay_cfg << ISC_DPC_CFG_BAYCFG_SHIFT));
}
static void isc_sama7g5_config_gam(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
regmap_update_bits(regmap, ISC_GAM_CTRL, ISC_GAM_CTRL_BIPART,
ISC_GAM_CTRL_BIPART);
}
static void isc_sama7g5_config_rlp(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
u32 rlp_mode = isc->config.rlp_cfg_mode;
regmap_update_bits(regmap, ISC_RLP_CFG + isc->offsets.rlp,
ISC_RLP_CFG_MODE_MASK | ISC_RLP_CFG_LSH |
ISC_RLP_CFG_YMODE_MASK, rlp_mode);
}
static void isc_sama7g5_adapt_pipeline(struct isc_device *isc)
{
isc->try_config.bits_pipeline &= ISC_SAMA7G5_PIPELINE;
}
/* Gamma table with gamma 1/2.2 */
static const u32 isc_sama7g5_gamma_table[][GAMMA_ENTRIES] = {
/* index 0 --> gamma bipartite */
{
0x980, 0x4c0320, 0x650260, 0x7801e0, 0x8701a0, 0x940180,
0xa00160, 0xab0120, 0xb40120, 0xbd0120, 0xc60100, 0xce0100,
0xd600e0, 0xdd00e0, 0xe400e0, 0xeb00c0, 0xf100c0, 0xf700c0,
0xfd00c0, 0x10300a0, 0x10800c0, 0x10e00a0, 0x11300a0, 0x11800a0,
0x11d00a0, 0x12200a0, 0x12700a0, 0x12c0080, 0x13000a0, 0x1350080,
0x13900a0, 0x13e0080, 0x1420076, 0x17d0062, 0x1ae0054, 0x1d8004a,
0x1fd0044, 0x21f003e, 0x23e003a, 0x25b0036, 0x2760032, 0x28f0030,
0x2a7002e, 0x2be002c, 0x2d4002c, 0x2ea0028, 0x2fe0028, 0x3120026,
0x3250024, 0x3370024, 0x3490022, 0x35a0022, 0x36b0020, 0x37b0020,
0x38b0020, 0x39b001e, 0x3aa001e, 0x3b9001c, 0x3c7001c, 0x3d5001c,
0x3e3001c, 0x3f1001c, 0x3ff001a, 0x40c001a },
};
static int xisc_parse_dt(struct device *dev, struct isc_device *isc)
{
struct device_node *np = dev->of_node;
struct device_node *epn = NULL;
struct isc_subdev_entity *subdev_entity;
unsigned int flags;
int ret;
bool mipi_mode;
INIT_LIST_HEAD(&isc->subdev_entities);
mipi_mode = of_property_read_bool(np, "microchip,mipi-mode");
while (1) {
struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
epn = of_graph_get_next_endpoint(np, epn);
if (!epn)
return 0;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
&v4l2_epn);
if (ret) {
ret = -EINVAL;
dev_err(dev, "Could not parse the endpoint\n");
break;
}
subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity),
GFP_KERNEL);
if (!subdev_entity) {
ret = -ENOMEM;
break;
}
subdev_entity->epn = epn;
flags = v4l2_epn.bus.parallel.flags;
if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
subdev_entity->pfe_cfg0 = ISC_PFE_CFG0_HPOL_LOW;
if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_VPOL_LOW;
if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW;
if (v4l2_epn.bus_type == V4L2_MBUS_BT656)
subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_CCIR_CRC |
ISC_PFE_CFG0_CCIR656;
if (mipi_mode)
subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_MIPI;
list_add_tail(&subdev_entity->list, &isc->subdev_entities);
}
of_node_put(epn);
return ret;
}
static int microchip_xisc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct isc_device *isc;
void __iomem *io_base;
struct isc_subdev_entity *subdev_entity;
int irq;
int ret;
u32 ver;
isc = devm_kzalloc(dev, sizeof(*isc), GFP_KERNEL);
if (!isc)
return -ENOMEM;
platform_set_drvdata(pdev, isc);
isc->dev = dev;
io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
isc->regmap = devm_regmap_init_mmio(dev, io_base, &atmel_isc_regmap_config);
if (IS_ERR(isc->regmap)) {
ret = PTR_ERR(isc->regmap);
dev_err(dev, "failed to init register map: %d\n", ret);
return ret;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, atmel_isc_interrupt, 0,
"microchip-sama7g5-xisc", isc);
if (ret < 0) {
dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
irq, ret);
return ret;
}
isc->gamma_table = isc_sama7g5_gamma_table;
isc->gamma_max = 0;
isc->max_width = ISC_SAMA7G5_MAX_SUPPORT_WIDTH;
isc->max_height = ISC_SAMA7G5_MAX_SUPPORT_HEIGHT;
isc->config_dpc = isc_sama7g5_config_dpc;
isc->config_csc = isc_sama7g5_config_csc;
isc->config_cbc = isc_sama7g5_config_cbc;
isc->config_cc = isc_sama7g5_config_cc;
isc->config_gam = isc_sama7g5_config_gam;
isc->config_rlp = isc_sama7g5_config_rlp;
isc->config_ctrls = isc_sama7g5_config_ctrls;
isc->adapt_pipeline = isc_sama7g5_adapt_pipeline;
isc->offsets.csc = ISC_SAMA7G5_CSC_OFFSET;
isc->offsets.cbc = ISC_SAMA7G5_CBC_OFFSET;
isc->offsets.sub422 = ISC_SAMA7G5_SUB422_OFFSET;
isc->offsets.sub420 = ISC_SAMA7G5_SUB420_OFFSET;
isc->offsets.rlp = ISC_SAMA7G5_RLP_OFFSET;
isc->offsets.his = ISC_SAMA7G5_HIS_OFFSET;
isc->offsets.dma = ISC_SAMA7G5_DMA_OFFSET;
isc->offsets.version = ISC_SAMA7G5_VERSION_OFFSET;
isc->offsets.his_entry = ISC_SAMA7G5_HIS_ENTRY_OFFSET;
isc->controller_formats = sama7g5_controller_formats;
isc->controller_formats_size = ARRAY_SIZE(sama7g5_controller_formats);
isc->formats_list = sama7g5_formats_list;
isc->formats_list_size = ARRAY_SIZE(sama7g5_formats_list);
/* sama7g5-isc RAM access port is full AXI4 - 32 bits per beat */
isc->dcfg = ISC_DCFG_YMBSIZE_BEATS32 | ISC_DCFG_CMBSIZE_BEATS32;
/* sama7g5-isc : ISPCK does not exist, ISC is clocked by MCK */
isc->ispck_required = false;
ret = atmel_isc_pipeline_init(isc);
if (ret)
return ret;
isc->hclock = devm_clk_get(dev, "hclock");
if (IS_ERR(isc->hclock)) {
ret = PTR_ERR(isc->hclock);
dev_err(dev, "failed to get hclock: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(isc->hclock);
if (ret) {
dev_err(dev, "failed to enable hclock: %d\n", ret);
return ret;
}
ret = atmel_isc_clk_init(isc);
if (ret) {
dev_err(dev, "failed to init isc clock: %d\n", ret);
goto unprepare_hclk;
}
ret = v4l2_device_register(dev, &isc->v4l2_dev);
if (ret) {
dev_err(dev, "unable to register v4l2 device.\n");
goto unprepare_hclk;
}
ret = xisc_parse_dt(dev, isc);
if (ret) {
dev_err(dev, "fail to parse device tree\n");
goto unregister_v4l2_device;
}
if (list_empty(&isc->subdev_entities)) {
dev_err(dev, "no subdev found\n");
ret = -ENODEV;
goto unregister_v4l2_device;
}
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
struct v4l2_async_connection *asd;
struct fwnode_handle *fwnode =
of_fwnode_handle(subdev_entity->epn);
v4l2_async_nf_init(&subdev_entity->notifier, &isc->v4l2_dev);
asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
fwnode,
struct v4l2_async_connection);
of_node_put(subdev_entity->epn);
subdev_entity->epn = NULL;
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto cleanup_subdev;
}
subdev_entity->notifier.ops = &atmel_isc_async_ops;
ret = v4l2_async_nf_register(&subdev_entity->notifier);
if (ret) {
dev_err(dev, "fail to register async notifier\n");
goto cleanup_subdev;
}
if (video_is_registered(&isc->video_dev))
break;
}
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_request_idle(dev);
regmap_read(isc->regmap, ISC_VERSION + isc->offsets.version, &ver);
dev_info(dev, "Microchip XISC version %x\n", ver);
return 0;
cleanup_subdev:
atmel_isc_subdev_cleanup(isc);
unregister_v4l2_device:
v4l2_device_unregister(&isc->v4l2_dev);
unprepare_hclk:
clk_disable_unprepare(isc->hclock);
atmel_isc_clk_cleanup(isc);
return ret;
}
static void microchip_xisc_remove(struct platform_device *pdev)
{
struct isc_device *isc = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
atmel_isc_subdev_cleanup(isc);
v4l2_device_unregister(&isc->v4l2_dev);
clk_disable_unprepare(isc->hclock);
atmel_isc_clk_cleanup(isc);
}
static int __maybe_unused xisc_runtime_suspend(struct device *dev)
{
struct isc_device *isc = dev_get_drvdata(dev);
clk_disable_unprepare(isc->hclock);
return 0;
}
static int __maybe_unused xisc_runtime_resume(struct device *dev)
{
struct isc_device *isc = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(isc->hclock);
if (ret)
return ret;
return ret;
}
static const struct dev_pm_ops microchip_xisc_dev_pm_ops = {
SET_RUNTIME_PM_OPS(xisc_runtime_suspend, xisc_runtime_resume, NULL)
};
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id microchip_xisc_of_match[] = {
{ .compatible = "microchip,sama7g5-isc" },
{ }
};
MODULE_DEVICE_TABLE(of, microchip_xisc_of_match);
#endif
static struct platform_driver microchip_xisc_driver = {
.probe = microchip_xisc_probe,
.remove_new = microchip_xisc_remove,
.driver = {
.name = "microchip-sama7g5-xisc",
.pm = µchip_xisc_dev_pm_ops,
.of_match_table = of_match_ptr(microchip_xisc_of_match),
},
};
module_platform_driver(microchip_xisc_driver);
MODULE_AUTHOR("Eugen Hristev <[email protected]>");
MODULE_DESCRIPTION("The V4L2 driver for Microchip-XISC");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Microchip Image Sensor Controller (ISC) common driver base
*
* Copyright (C) 2016-2019 Microchip Technology, Inc.
*
* Author: Songjun Wu
* Author: Eugen Hristev <[email protected]>
*
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/videodev2.h>
#include <linux/atmel-isc-media.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-image-sizes.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#include <media/videobuf2-dma-contig.h>
#include "atmel-isc-regs.h"
#include "atmel-isc.h"
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
static unsigned int sensor_preferred = 1;
module_param(sensor_preferred, uint, 0644);
MODULE_PARM_DESC(sensor_preferred,
"Sensor is preferred to output the specified format (1-on 0-off), default 1");
#define ISC_IS_FORMAT_RAW(mbus_code) \
(((mbus_code) & 0xf000) == 0x3000)
#define ISC_IS_FORMAT_GREY(mbus_code) \
(((mbus_code) == MEDIA_BUS_FMT_Y10_1X10) | \
(((mbus_code) == MEDIA_BUS_FMT_Y8_1X8)))
static inline void isc_update_v4l2_ctrls(struct isc_device *isc)
{
struct isc_ctrls *ctrls = &isc->ctrls;
/* In here we set the v4l2 controls w.r.t. our pipeline config */
v4l2_ctrl_s_ctrl(isc->r_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_R]);
v4l2_ctrl_s_ctrl(isc->b_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_B]);
v4l2_ctrl_s_ctrl(isc->gr_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_GR]);
v4l2_ctrl_s_ctrl(isc->gb_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_GB]);
v4l2_ctrl_s_ctrl(isc->r_off_ctrl, ctrls->offset[ISC_HIS_CFG_MODE_R]);
v4l2_ctrl_s_ctrl(isc->b_off_ctrl, ctrls->offset[ISC_HIS_CFG_MODE_B]);
v4l2_ctrl_s_ctrl(isc->gr_off_ctrl, ctrls->offset[ISC_HIS_CFG_MODE_GR]);
v4l2_ctrl_s_ctrl(isc->gb_off_ctrl, ctrls->offset[ISC_HIS_CFG_MODE_GB]);
}
static inline void isc_update_awb_ctrls(struct isc_device *isc)
{
struct isc_ctrls *ctrls = &isc->ctrls;
/* In here we set our actual hw pipeline config */
regmap_write(isc->regmap, ISC_WB_O_RGR,
((ctrls->offset[ISC_HIS_CFG_MODE_R])) |
((ctrls->offset[ISC_HIS_CFG_MODE_GR]) << 16));
regmap_write(isc->regmap, ISC_WB_O_BGB,
((ctrls->offset[ISC_HIS_CFG_MODE_B])) |
((ctrls->offset[ISC_HIS_CFG_MODE_GB]) << 16));
regmap_write(isc->regmap, ISC_WB_G_RGR,
ctrls->gain[ISC_HIS_CFG_MODE_R] |
(ctrls->gain[ISC_HIS_CFG_MODE_GR] << 16));
regmap_write(isc->regmap, ISC_WB_G_BGB,
ctrls->gain[ISC_HIS_CFG_MODE_B] |
(ctrls->gain[ISC_HIS_CFG_MODE_GB] << 16));
}
static inline void isc_reset_awb_ctrls(struct isc_device *isc)
{
unsigned int c;
for (c = ISC_HIS_CFG_MODE_GR; c <= ISC_HIS_CFG_MODE_B; c++) {
/* gains have a fixed point at 9 decimals */
isc->ctrls.gain[c] = 1 << 9;
/* offsets are in 2's complements */
isc->ctrls.offset[c] = 0;
}
}
static int isc_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct isc_device *isc = vb2_get_drv_priv(vq);
unsigned int size = isc->fmt.fmt.pix.sizeimage;
if (*nplanes)
return sizes[0] < size ? -EINVAL : 0;
*nplanes = 1;
sizes[0] = size;
return 0;
}
static int isc_buffer_prepare(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct isc_device *isc = vb2_get_drv_priv(vb->vb2_queue);
unsigned long size = isc->fmt.fmt.pix.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
v4l2_err(&isc->v4l2_dev, "buffer too small (%lu < %lu)\n",
vb2_plane_size(vb, 0), size);
return -EINVAL;
}
vb2_set_plane_payload(vb, 0, size);
vbuf->field = isc->fmt.fmt.pix.field;
return 0;
}
static void isc_crop_pfe(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
u32 h, w;
h = isc->fmt.fmt.pix.height;
w = isc->fmt.fmt.pix.width;
/*
* In case the sensor is not RAW, it will output a pixel (12-16 bits)
* with two samples on the ISC Data bus (which is 8-12)
* ISC will count each sample, so, we need to multiply these values
* by two, to get the real number of samples for the required pixels.
*/
if (!ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code)) {
h <<= 1;
w <<= 1;
}
/*
* We limit the column/row count that the ISC will output according
* to the configured resolution that we want.
* This will avoid the situation where the sensor is misconfigured,
* sending more data, and the ISC will just take it and DMA to memory,
* causing corruption.
*/
regmap_write(regmap, ISC_PFE_CFG1,
(ISC_PFE_CFG1_COLMIN(0) & ISC_PFE_CFG1_COLMIN_MASK) |
(ISC_PFE_CFG1_COLMAX(w - 1) & ISC_PFE_CFG1_COLMAX_MASK));
regmap_write(regmap, ISC_PFE_CFG2,
(ISC_PFE_CFG2_ROWMIN(0) & ISC_PFE_CFG2_ROWMIN_MASK) |
(ISC_PFE_CFG2_ROWMAX(h - 1) & ISC_PFE_CFG2_ROWMAX_MASK));
regmap_update_bits(regmap, ISC_PFE_CFG0,
ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN,
ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN);
}
static void isc_start_dma(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
u32 sizeimage = isc->fmt.fmt.pix.sizeimage;
u32 dctrl_dview;
dma_addr_t addr0;
addr0 = vb2_dma_contig_plane_dma_addr(&isc->cur_frm->vb.vb2_buf, 0);
regmap_write(regmap, ISC_DAD0 + isc->offsets.dma, addr0);
switch (isc->config.fourcc) {
case V4L2_PIX_FMT_YUV420:
regmap_write(regmap, ISC_DAD1 + isc->offsets.dma,
addr0 + (sizeimage * 2) / 3);
regmap_write(regmap, ISC_DAD2 + isc->offsets.dma,
addr0 + (sizeimage * 5) / 6);
break;
case V4L2_PIX_FMT_YUV422P:
regmap_write(regmap, ISC_DAD1 + isc->offsets.dma,
addr0 + sizeimage / 2);
regmap_write(regmap, ISC_DAD2 + isc->offsets.dma,
addr0 + (sizeimage * 3) / 4);
break;
default:
break;
}
dctrl_dview = isc->config.dctrl_dview;
regmap_write(regmap, ISC_DCTRL + isc->offsets.dma,
dctrl_dview | ISC_DCTRL_IE_IS);
spin_lock(&isc->awb_lock);
regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_CAPTURE);
spin_unlock(&isc->awb_lock);
}
static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
{
struct regmap *regmap = isc->regmap;
struct isc_ctrls *ctrls = &isc->ctrls;
u32 val, bay_cfg;
const u32 *gamma;
unsigned int i;
/* WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB422-->SUB420 */
for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) {
val = pipeline & BIT(i) ? 1 : 0;
regmap_field_write(isc->pipeline[i], val);
}
if (!pipeline)
return;
bay_cfg = isc->config.sd_format->cfa_baycfg;
regmap_write(regmap, ISC_WB_CFG, bay_cfg);
isc_update_awb_ctrls(isc);
isc_update_v4l2_ctrls(isc);
regmap_write(regmap, ISC_CFA_CFG, bay_cfg | ISC_CFA_CFG_EITPOL);
gamma = &isc->gamma_table[ctrls->gamma_index][0];
regmap_bulk_write(regmap, ISC_GAM_BENTRY, gamma, GAMMA_ENTRIES);
regmap_bulk_write(regmap, ISC_GAM_GENTRY, gamma, GAMMA_ENTRIES);
regmap_bulk_write(regmap, ISC_GAM_RENTRY, gamma, GAMMA_ENTRIES);
isc->config_dpc(isc);
isc->config_csc(isc);
isc->config_cbc(isc);
isc->config_cc(isc);
isc->config_gam(isc);
}
static int isc_update_profile(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
u32 sr;
int counter = 100;
regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_UPPRO);
regmap_read(regmap, ISC_CTRLSR, &sr);
while ((sr & ISC_CTRL_UPPRO) && counter--) {
usleep_range(1000, 2000);
regmap_read(regmap, ISC_CTRLSR, &sr);
}
if (counter < 0) {
v4l2_warn(&isc->v4l2_dev, "Time out to update profile\n");
return -ETIMEDOUT;
}
return 0;
}
static void isc_set_histogram(struct isc_device *isc, bool enable)
{
struct regmap *regmap = isc->regmap;
struct isc_ctrls *ctrls = &isc->ctrls;
if (enable) {
regmap_write(regmap, ISC_HIS_CFG + isc->offsets.his,
ISC_HIS_CFG_MODE_GR |
(isc->config.sd_format->cfa_baycfg
<< ISC_HIS_CFG_BAYSEL_SHIFT) |
ISC_HIS_CFG_RAR);
regmap_write(regmap, ISC_HIS_CTRL + isc->offsets.his,
ISC_HIS_CTRL_EN);
regmap_write(regmap, ISC_INTEN, ISC_INT_HISDONE);
ctrls->hist_id = ISC_HIS_CFG_MODE_GR;
isc_update_profile(isc);
regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ);
ctrls->hist_stat = HIST_ENABLED;
} else {
regmap_write(regmap, ISC_INTDIS, ISC_INT_HISDONE);
regmap_write(regmap, ISC_HIS_CTRL + isc->offsets.his,
ISC_HIS_CTRL_DIS);
ctrls->hist_stat = HIST_DISABLED;
}
}
static int isc_configure(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
u32 pfe_cfg0, dcfg, mask, pipeline;
struct isc_subdev_entity *subdev = isc->current_subdev;
pfe_cfg0 = isc->config.sd_format->pfe_cfg0_bps;
pipeline = isc->config.bits_pipeline;
dcfg = isc->config.dcfg_imode | isc->dcfg;
pfe_cfg0 |= subdev->pfe_cfg0 | ISC_PFE_CFG0_MODE_PROGRESSIVE;
mask = ISC_PFE_CFG0_BPS_MASK | ISC_PFE_CFG0_HPOL_LOW |
ISC_PFE_CFG0_VPOL_LOW | ISC_PFE_CFG0_PPOL_LOW |
ISC_PFE_CFG0_MODE_MASK | ISC_PFE_CFG0_CCIR_CRC |
ISC_PFE_CFG0_CCIR656 | ISC_PFE_CFG0_MIPI;
regmap_update_bits(regmap, ISC_PFE_CFG0, mask, pfe_cfg0);
isc->config_rlp(isc);
regmap_write(regmap, ISC_DCFG + isc->offsets.dma, dcfg);
/* Set the pipeline */
isc_set_pipeline(isc, pipeline);
/*
* The current implemented histogram is available for RAW R, B, GB, GR
* channels. We need to check if sensor is outputting RAW BAYER
*/
if (isc->ctrls.awb &&
ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code))
isc_set_histogram(isc, true);
else
isc_set_histogram(isc, false);
/* Update profile */
return isc_update_profile(isc);
}
static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct isc_device *isc = vb2_get_drv_priv(vq);
struct regmap *regmap = isc->regmap;
struct isc_buffer *buf;
unsigned long flags;
int ret;
/* Enable stream on the sub device */
ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 1);
if (ret && ret != -ENOIOCTLCMD) {
v4l2_err(&isc->v4l2_dev, "stream on failed in subdev %d\n",
ret);
goto err_start_stream;
}
ret = pm_runtime_resume_and_get(isc->dev);
if (ret < 0) {
v4l2_err(&isc->v4l2_dev, "RPM resume failed in subdev %d\n",
ret);
goto err_pm_get;
}
ret = isc_configure(isc);
if (unlikely(ret))
goto err_configure;
/* Enable DMA interrupt */
regmap_write(regmap, ISC_INTEN, ISC_INT_DDONE);
spin_lock_irqsave(&isc->dma_queue_lock, flags);
isc->sequence = 0;
isc->stop = false;
reinit_completion(&isc->comp);
isc->cur_frm = list_first_entry(&isc->dma_queue,
struct isc_buffer, list);
list_del(&isc->cur_frm->list);
isc_crop_pfe(isc);
isc_start_dma(isc);
spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
/* if we streaming from RAW, we can do one-shot white balance adj */
if (ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code))
v4l2_ctrl_activate(isc->do_wb_ctrl, true);
return 0;
err_configure:
pm_runtime_put_sync(isc->dev);
err_pm_get:
v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 0);
err_start_stream:
spin_lock_irqsave(&isc->dma_queue_lock, flags);
list_for_each_entry(buf, &isc->dma_queue, list)
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
INIT_LIST_HEAD(&isc->dma_queue);
spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
return ret;
}
static void isc_stop_streaming(struct vb2_queue *vq)
{
struct isc_device *isc = vb2_get_drv_priv(vq);
unsigned long flags;
struct isc_buffer *buf;
int ret;
mutex_lock(&isc->awb_mutex);
v4l2_ctrl_activate(isc->do_wb_ctrl, false);
isc->stop = true;
/* Wait until the end of the current frame */
if (isc->cur_frm && !wait_for_completion_timeout(&isc->comp, 5 * HZ))
v4l2_err(&isc->v4l2_dev,
"Timeout waiting for end of the capture\n");
mutex_unlock(&isc->awb_mutex);
/* Disable DMA interrupt */
regmap_write(isc->regmap, ISC_INTDIS, ISC_INT_DDONE);
pm_runtime_put_sync(isc->dev);
/* Disable stream on the sub device */
ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 0);
if (ret && ret != -ENOIOCTLCMD)
v4l2_err(&isc->v4l2_dev, "stream off failed in subdev\n");
/* Release all active buffers */
spin_lock_irqsave(&isc->dma_queue_lock, flags);
if (unlikely(isc->cur_frm)) {
vb2_buffer_done(&isc->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
isc->cur_frm = NULL;
}
list_for_each_entry(buf, &isc->dma_queue, list)
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
INIT_LIST_HEAD(&isc->dma_queue);
spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
}
static void isc_buffer_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct isc_buffer *buf = container_of(vbuf, struct isc_buffer, vb);
struct isc_device *isc = vb2_get_drv_priv(vb->vb2_queue);
unsigned long flags;
spin_lock_irqsave(&isc->dma_queue_lock, flags);
if (!isc->cur_frm && list_empty(&isc->dma_queue) &&
vb2_start_streaming_called(vb->vb2_queue)) {
isc->cur_frm = buf;
isc_start_dma(isc);
} else
list_add_tail(&buf->list, &isc->dma_queue);
spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
}
static struct isc_format *find_format_by_fourcc(struct isc_device *isc,
unsigned int fourcc)
{
unsigned int num_formats = isc->num_user_formats;
struct isc_format *fmt;
unsigned int i;
for (i = 0; i < num_formats; i++) {
fmt = isc->user_formats[i];
if (fmt->fourcc == fourcc)
return fmt;
}
return NULL;
}
static const struct vb2_ops isc_vb2_ops = {
.queue_setup = isc_queue_setup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.buf_prepare = isc_buffer_prepare,
.start_streaming = isc_start_streaming,
.stop_streaming = isc_stop_streaming,
.buf_queue = isc_buffer_queue,
};
static int isc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct isc_device *isc = video_drvdata(file);
strscpy(cap->driver, "microchip-isc", sizeof(cap->driver));
strscpy(cap->card, "Atmel Image Sensor Controller", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", isc->v4l2_dev.name);
return 0;
}
static int isc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct isc_device *isc = video_drvdata(file);
u32 index = f->index;
u32 i, supported_index;
if (index < isc->controller_formats_size) {
f->pixelformat = isc->controller_formats[index].fourcc;
return 0;
}
index -= isc->controller_formats_size;
supported_index = 0;
for (i = 0; i < isc->formats_list_size; i++) {
if (!ISC_IS_FORMAT_RAW(isc->formats_list[i].mbus_code) ||
!isc->formats_list[i].sd_support)
continue;
if (supported_index == index) {
f->pixelformat = isc->formats_list[i].fourcc;
return 0;
}
supported_index++;
}
return -EINVAL;
}
static int isc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct isc_device *isc = video_drvdata(file);
*fmt = isc->fmt;
return 0;
}
/*
* Checks the current configured format, if ISC can output it,
* considering which type of format the ISC receives from the sensor
*/
static int isc_try_validate_formats(struct isc_device *isc)
{
int ret;
bool bayer = false, yuv = false, rgb = false, grey = false;
/* all formats supported by the RLP module are OK */
switch (isc->try_config.fourcc) {
case V4L2_PIX_FMT_SBGGR8:
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
case V4L2_PIX_FMT_SBGGR10:
case V4L2_PIX_FMT_SGBRG10:
case V4L2_PIX_FMT_SGRBG10:
case V4L2_PIX_FMT_SRGGB10:
case V4L2_PIX_FMT_SBGGR12:
case V4L2_PIX_FMT_SGBRG12:
case V4L2_PIX_FMT_SGRBG12:
case V4L2_PIX_FMT_SRGGB12:
ret = 0;
bayer = true;
break;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YUV422P:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_VYUY:
ret = 0;
yuv = true;
break;
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_ABGR32:
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_ARGB444:
case V4L2_PIX_FMT_ARGB555:
ret = 0;
rgb = true;
break;
case V4L2_PIX_FMT_GREY:
case V4L2_PIX_FMT_Y10:
case V4L2_PIX_FMT_Y16:
ret = 0;
grey = true;
break;
default:
/* any other different formats are not supported */
ret = -EINVAL;
}
v4l2_dbg(1, debug, &isc->v4l2_dev,
"Format validation, requested rgb=%u, yuv=%u, grey=%u, bayer=%u\n",
rgb, yuv, grey, bayer);
/* we cannot output RAW if we do not receive RAW */
if ((bayer) && !ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code))
return -EINVAL;
/* we cannot output GREY if we do not receive RAW/GREY */
if (grey && !ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code) &&
!ISC_IS_FORMAT_GREY(isc->try_config.sd_format->mbus_code))
return -EINVAL;
return ret;
}
/*
* Configures the RLP and DMA modules, depending on the output format
* configured for the ISC.
* If direct_dump == true, just dump raw data 8/16 bits depending on format.
*/
static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
{
isc->try_config.rlp_cfg_mode = 0;
switch (isc->try_config.fourcc) {
case V4L2_PIX_FMT_SBGGR8:
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 8;
isc->try_config.bpp_v4l2 = 8;
break;
case V4L2_PIX_FMT_SBGGR10:
case V4L2_PIX_FMT_SGBRG10:
case V4L2_PIX_FMT_SGRBG10:
case V4L2_PIX_FMT_SRGGB10:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_SBGGR12:
case V4L2_PIX_FMT_SGBRG12:
case V4L2_PIX_FMT_SGRBG12:
case V4L2_PIX_FMT_SRGGB12:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_RGB565:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_RGB565;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_ARGB444:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB444;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_ARGB555:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB555;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_ABGR32:
case V4L2_PIX_FMT_XBGR32:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB32;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 32;
isc->try_config.bpp_v4l2 = 32;
break;
case V4L2_PIX_FMT_YUV420:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_YC420P;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PLANAR;
isc->try_config.bpp = 12;
isc->try_config.bpp_v4l2 = 8; /* only first plane */
break;
case V4L2_PIX_FMT_YUV422P:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_YC422P;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PLANAR;
isc->try_config.bpp = 16;
isc->try_config.bpp_v4l2 = 8; /* only first plane */
break;
case V4L2_PIX_FMT_YUYV:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_YUYV;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_UYVY:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_UYVY;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_VYUY:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_VYUY;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
isc->try_config.bpp_v4l2 = 16;
break;
case V4L2_PIX_FMT_GREY:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY8;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 8;
isc->try_config.bpp_v4l2 = 8;
break;
case V4L2_PIX_FMT_Y16:
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY10 | ISC_RLP_CFG_LSH;
fallthrough;
case V4L2_PIX_FMT_Y10:
isc->try_config.rlp_cfg_mode |= ISC_RLP_CFG_MODE_DATY10;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
isc->try_config.bpp_v4l2 = 16;
break;
default:
return -EINVAL;
}
if (direct_dump) {
isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
return 0;
}
return 0;
}
/*
* Configuring pipeline modules, depending on which format the ISC outputs
* and considering which format it has as input from the sensor.
*/
static int isc_try_configure_pipeline(struct isc_device *isc)
{
switch (isc->try_config.fourcc) {
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_ARGB555:
case V4L2_PIX_FMT_ARGB444:
case V4L2_PIX_FMT_ABGR32:
case V4L2_PIX_FMT_XBGR32:
/* if sensor format is RAW, we convert inside ISC */
if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
isc->try_config.bits_pipeline = CFA_ENABLE |
WB_ENABLE | GAM_ENABLES | DPC_BLCENABLE |
CC_ENABLE;
} else {
isc->try_config.bits_pipeline = 0x0;
}
break;
case V4L2_PIX_FMT_YUV420:
/* if sensor format is RAW, we convert inside ISC */
if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
isc->try_config.bits_pipeline = CFA_ENABLE |
CSC_ENABLE | GAM_ENABLES | WB_ENABLE |
SUB420_ENABLE | SUB422_ENABLE | CBC_ENABLE |
DPC_BLCENABLE;
} else {
isc->try_config.bits_pipeline = 0x0;
}
break;
case V4L2_PIX_FMT_YUV422P:
/* if sensor format is RAW, we convert inside ISC */
if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
isc->try_config.bits_pipeline = CFA_ENABLE |
CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
SUB422_ENABLE | CBC_ENABLE | DPC_BLCENABLE;
} else {
isc->try_config.bits_pipeline = 0x0;
}
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_VYUY:
/* if sensor format is RAW, we convert inside ISC */
if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
isc->try_config.bits_pipeline = CFA_ENABLE |
CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
SUB422_ENABLE | CBC_ENABLE | DPC_BLCENABLE;
} else {
isc->try_config.bits_pipeline = 0x0;
}
break;
case V4L2_PIX_FMT_GREY:
case V4L2_PIX_FMT_Y16:
/* if sensor format is RAW, we convert inside ISC */
if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
isc->try_config.bits_pipeline = CFA_ENABLE |
CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
CBC_ENABLE | DPC_BLCENABLE;
} else {
isc->try_config.bits_pipeline = 0x0;
}
break;
default:
if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code))
isc->try_config.bits_pipeline = WB_ENABLE | DPC_BLCENABLE;
else
isc->try_config.bits_pipeline = 0x0;
}
/* Tune the pipeline to product specific */
isc->adapt_pipeline(isc);
return 0;
}
static void isc_try_fse(struct isc_device *isc,
struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_frame_size_enum fse = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
int ret;
/*
* If we do not know yet which format the subdev is using, we cannot
* do anything.
*/
if (!isc->try_config.sd_format)
return;
fse.code = isc->try_config.sd_format->mbus_code;
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
sd_state, &fse);
/*
* Attempt to obtain format size from subdev. If not available,
* just use the maximum ISC can receive.
*/
if (ret) {
sd_state->pads->try_crop.width = isc->max_width;
sd_state->pads->try_crop.height = isc->max_height;
} else {
sd_state->pads->try_crop.width = fse.max_width;
sd_state->pads->try_crop.height = fse.max_height;
}
}
static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
u32 *code)
{
int i;
struct isc_format *sd_fmt = NULL, *direct_fmt = NULL;
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg = {};
struct v4l2_subdev_state pad_state = {
.pads = &pad_cfg,
};
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
u32 mbus_code;
int ret;
bool rlp_dma_direct_dump = false;
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
/* Step 1: find a RAW format that is supported */
for (i = 0; i < isc->num_user_formats; i++) {
if (ISC_IS_FORMAT_RAW(isc->user_formats[i]->mbus_code)) {
sd_fmt = isc->user_formats[i];
break;
}
}
/* Step 2: We can continue with this RAW format, or we can look
* for better: maybe sensor supports directly what we need.
*/
direct_fmt = find_format_by_fourcc(isc, pixfmt->pixelformat);
/* Step 3: We have both. We decide given the module parameter which
* one to use.
*/
if (direct_fmt && sd_fmt && sensor_preferred)
sd_fmt = direct_fmt;
/* Step 4: we do not have RAW but we have a direct format. Use it. */
if (direct_fmt && !sd_fmt)
sd_fmt = direct_fmt;
/* Step 5: if we are using a direct format, we need to package
* everything as 8 bit data and just dump it
*/
if (sd_fmt == direct_fmt)
rlp_dma_direct_dump = true;
/* Step 6: We have no format. This can happen if the userspace
* requests some weird/invalid format.
* In this case, default to whatever we have
*/
if (!sd_fmt && !direct_fmt) {
sd_fmt = isc->user_formats[isc->num_user_formats - 1];
v4l2_dbg(1, debug, &isc->v4l2_dev,
"Sensor not supporting %.4s, using %.4s\n",
(char *)&pixfmt->pixelformat, (char *)&sd_fmt->fourcc);
}
if (!sd_fmt) {
ret = -EINVAL;
goto isc_try_fmt_err;
}
/* Step 7: Print out what we decided for debugging */
v4l2_dbg(1, debug, &isc->v4l2_dev,
"Preferring to have sensor using format %.4s\n",
(char *)&sd_fmt->fourcc);
/* Step 8: at this moment we decided which format the subdev will use */
isc->try_config.sd_format = sd_fmt;
/* Limit to Atmel ISC hardware capabilities */
if (pixfmt->width > isc->max_width)
pixfmt->width = isc->max_width;
if (pixfmt->height > isc->max_height)
pixfmt->height = isc->max_height;
/*
* The mbus format is the one the subdev outputs.
* The pixels will be transferred in this format Sensor -> ISC
*/
mbus_code = sd_fmt->mbus_code;
/*
* Validate formats. If the required format is not OK, default to raw.
*/
isc->try_config.fourcc = pixfmt->pixelformat;
if (isc_try_validate_formats(isc)) {
pixfmt->pixelformat = isc->try_config.fourcc = sd_fmt->fourcc;
/* Re-try to validate the new format */
ret = isc_try_validate_formats(isc);
if (ret)
goto isc_try_fmt_err;
}
ret = isc_try_configure_rlp_dma(isc, rlp_dma_direct_dump);
if (ret)
goto isc_try_fmt_err;
ret = isc_try_configure_pipeline(isc);
if (ret)
goto isc_try_fmt_err;
/* Obtain frame sizes if possible to have crop requirements ready */
isc_try_fse(isc, &pad_state);
v4l2_fill_mbus_format(&format.format, pixfmt, mbus_code);
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
&pad_state, &format);
if (ret < 0)
goto isc_try_fmt_subdev_err;
v4l2_fill_pix_format(pixfmt, &format.format);
/* Limit to Atmel ISC hardware capabilities */
if (pixfmt->width > isc->max_width)
pixfmt->width = isc->max_width;
if (pixfmt->height > isc->max_height)
pixfmt->height = isc->max_height;
pixfmt->field = V4L2_FIELD_NONE;
pixfmt->bytesperline = (pixfmt->width * isc->try_config.bpp_v4l2) >> 3;
pixfmt->sizeimage = ((pixfmt->width * isc->try_config.bpp) >> 3) *
pixfmt->height;
if (code)
*code = mbus_code;
return 0;
isc_try_fmt_err:
v4l2_err(&isc->v4l2_dev, "Could not find any possible format for a working pipeline\n");
isc_try_fmt_subdev_err:
memset(&isc->try_config, 0, sizeof(isc->try_config));
return ret;
}
static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
{
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
u32 mbus_code = 0;
int ret;
ret = isc_try_fmt(isc, f, &mbus_code);
if (ret)
return ret;
v4l2_fill_mbus_format(&format.format, &f->fmt.pix, mbus_code);
ret = v4l2_subdev_call(isc->current_subdev->sd, pad,
set_fmt, NULL, &format);
if (ret < 0)
return ret;
/* Limit to Atmel ISC hardware capabilities */
if (f->fmt.pix.width > isc->max_width)
f->fmt.pix.width = isc->max_width;
if (f->fmt.pix.height > isc->max_height)
f->fmt.pix.height = isc->max_height;
isc->fmt = *f;
if (isc->try_config.sd_format && isc->config.sd_format &&
isc->try_config.sd_format != isc->config.sd_format) {
isc->ctrls.hist_stat = HIST_INIT;
isc_reset_awb_ctrls(isc);
isc_update_v4l2_ctrls(isc);
}
/* make the try configuration active */
isc->config = isc->try_config;
v4l2_dbg(1, debug, &isc->v4l2_dev, "New ISC configuration in place\n");
return 0;
}
static int isc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct isc_device *isc = video_drvdata(file);
if (vb2_is_busy(&isc->vb2_vidq))
return -EBUSY;
return isc_set_fmt(isc, f);
}
static int isc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct isc_device *isc = video_drvdata(file);
return isc_try_fmt(isc, f, NULL);
}
static int isc_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
if (inp->index != 0)
return -EINVAL;
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = 0;
strscpy(inp->name, "Camera", sizeof(inp->name));
return 0;
}
static int isc_g_input(struct file *file, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
static int isc_s_input(struct file *file, void *priv, unsigned int i)
{
if (i > 0)
return -EINVAL;
return 0;
}
static int isc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
struct isc_device *isc = video_drvdata(file);
return v4l2_g_parm_cap(video_devdata(file), isc->current_subdev->sd, a);
}
static int isc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
struct isc_device *isc = video_drvdata(file);
return v4l2_s_parm_cap(video_devdata(file), isc->current_subdev->sd, a);
}
static int isc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct isc_device *isc = video_drvdata(file);
int ret = -EINVAL;
int i;
if (fsize->index)
return -EINVAL;
for (i = 0; i < isc->num_user_formats; i++)
if (isc->user_formats[i]->fourcc == fsize->pixel_format)
ret = 0;
for (i = 0; i < isc->controller_formats_size; i++)
if (isc->controller_formats[i].fourcc == fsize->pixel_format)
ret = 0;
if (ret)
return ret;
fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
fsize->stepwise.min_width = 16;
fsize->stepwise.max_width = isc->max_width;
fsize->stepwise.min_height = 16;
fsize->stepwise.max_height = isc->max_height;
fsize->stepwise.step_width = 1;
fsize->stepwise.step_height = 1;
return 0;
}
static const struct v4l2_ioctl_ops isc_ioctl_ops = {
.vidioc_querycap = isc_querycap,
.vidioc_enum_fmt_vid_cap = isc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = isc_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = isc_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = isc_try_fmt_vid_cap,
.vidioc_enum_input = isc_enum_input,
.vidioc_g_input = isc_g_input,
.vidioc_s_input = isc_s_input,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_parm = isc_g_parm,
.vidioc_s_parm = isc_s_parm,
.vidioc_enum_framesizes = isc_enum_framesizes,
.vidioc_log_status = v4l2_ctrl_log_status,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static int isc_open(struct file *file)
{
struct isc_device *isc = video_drvdata(file);
struct v4l2_subdev *sd = isc->current_subdev->sd;
int ret;
if (mutex_lock_interruptible(&isc->lock))
return -ERESTARTSYS;
ret = v4l2_fh_open(file);
if (ret < 0)
goto unlock;
if (!v4l2_fh_is_singular_file(file))
goto unlock;
ret = v4l2_subdev_call(sd, core, s_power, 1);
if (ret < 0 && ret != -ENOIOCTLCMD) {
v4l2_fh_release(file);
goto unlock;
}
ret = isc_set_fmt(isc, &isc->fmt);
if (ret) {
v4l2_subdev_call(sd, core, s_power, 0);
v4l2_fh_release(file);
}
unlock:
mutex_unlock(&isc->lock);
return ret;
}
static int isc_release(struct file *file)
{
struct isc_device *isc = video_drvdata(file);
struct v4l2_subdev *sd = isc->current_subdev->sd;
bool fh_singular;
int ret;
mutex_lock(&isc->lock);
fh_singular = v4l2_fh_is_singular_file(file);
ret = _vb2_fop_release(file, NULL);
if (fh_singular)
v4l2_subdev_call(sd, core, s_power, 0);
mutex_unlock(&isc->lock);
return ret;
}
static const struct v4l2_file_operations isc_fops = {
.owner = THIS_MODULE,
.open = isc_open,
.release = isc_release,
.unlocked_ioctl = video_ioctl2,
.read = vb2_fop_read,
.mmap = vb2_fop_mmap,
.poll = vb2_fop_poll,
};
irqreturn_t atmel_isc_interrupt(int irq, void *dev_id)
{
struct isc_device *isc = (struct isc_device *)dev_id;
struct regmap *regmap = isc->regmap;
u32 isc_intsr, isc_intmask, pending;
irqreturn_t ret = IRQ_NONE;
regmap_read(regmap, ISC_INTSR, &isc_intsr);
regmap_read(regmap, ISC_INTMASK, &isc_intmask);
pending = isc_intsr & isc_intmask;
if (likely(pending & ISC_INT_DDONE)) {
spin_lock(&isc->dma_queue_lock);
if (isc->cur_frm) {
struct vb2_v4l2_buffer *vbuf = &isc->cur_frm->vb;
struct vb2_buffer *vb = &vbuf->vb2_buf;
vb->timestamp = ktime_get_ns();
vbuf->sequence = isc->sequence++;
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
isc->cur_frm = NULL;
}
if (!list_empty(&isc->dma_queue) && !isc->stop) {
isc->cur_frm = list_first_entry(&isc->dma_queue,
struct isc_buffer, list);
list_del(&isc->cur_frm->list);
isc_start_dma(isc);
}
if (isc->stop)
complete(&isc->comp);
ret = IRQ_HANDLED;
spin_unlock(&isc->dma_queue_lock);
}
if (pending & ISC_INT_HISDONE) {
schedule_work(&isc->awb_work);
ret = IRQ_HANDLED;
}
return ret;
}
EXPORT_SYMBOL_GPL(atmel_isc_interrupt);
static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max)
{
struct regmap *regmap = isc->regmap;
struct isc_ctrls *ctrls = &isc->ctrls;
u32 *hist_count = &ctrls->hist_count[ctrls->hist_id];
u32 *hist_entry = &ctrls->hist_entry[0];
u32 i;
*min = 0;
*max = HIST_ENTRIES;
regmap_bulk_read(regmap, ISC_HIS_ENTRY + isc->offsets.his_entry,
hist_entry, HIST_ENTRIES);
*hist_count = 0;
/*
* we deliberately ignore the end of the histogram,
* the most white pixels
*/
for (i = 1; i < HIST_ENTRIES; i++) {
if (*hist_entry && !*min)
*min = i;
if (*hist_entry)
*max = i;
*hist_count += i * (*hist_entry++);
}
if (!*min)
*min = 1;
v4l2_dbg(1, debug, &isc->v4l2_dev,
"isc wb: hist_id %u, hist_count %u",
ctrls->hist_id, *hist_count);
}
static void isc_wb_update(struct isc_ctrls *ctrls)
{
struct isc_device *isc = container_of(ctrls, struct isc_device, ctrls);
u32 *hist_count = &ctrls->hist_count[0];
u32 c, offset[4];
u64 avg = 0;
/* We compute two gains, stretch gain and grey world gain */
u32 s_gain[4], gw_gain[4];
/*
* According to Grey World, we need to set gains for R/B to normalize
* them towards the green channel.
* Thus we want to keep Green as fixed and adjust only Red/Blue
* Compute the average of the both green channels first
*/
avg = (u64)hist_count[ISC_HIS_CFG_MODE_GR] +
(u64)hist_count[ISC_HIS_CFG_MODE_GB];
avg >>= 1;
v4l2_dbg(1, debug, &isc->v4l2_dev,
"isc wb: green components average %llu\n", avg);
/* Green histogram is null, nothing to do */
if (!avg)
return;
for (c = ISC_HIS_CFG_MODE_GR; c <= ISC_HIS_CFG_MODE_B; c++) {
/*
* the color offset is the minimum value of the histogram.
* we stretch this color to the full range by substracting
* this value from the color component.
*/
offset[c] = ctrls->hist_minmax[c][HIST_MIN_INDEX];
/*
* The offset is always at least 1. If the offset is 1, we do
* not need to adjust it, so our result must be zero.
* the offset is computed in a histogram on 9 bits (0..512)
* but the offset in register is based on
* 12 bits pipeline (0..4096).
* we need to shift with the 3 bits that the histogram is
* ignoring
*/
ctrls->offset[c] = (offset[c] - 1) << 3;
/*
* the offset is then taken and converted to 2's complements,
* and must be negative, as we subtract this value from the
* color components
*/
ctrls->offset[c] = -ctrls->offset[c];
/*
* the stretch gain is the total number of histogram bins
* divided by the actual range of color component (Max - Min)
* If we compute gain like this, the actual color component
* will be stretched to the full histogram.
* We need to shift 9 bits for precision, we have 9 bits for
* decimals
*/
s_gain[c] = (HIST_ENTRIES << 9) /
(ctrls->hist_minmax[c][HIST_MAX_INDEX] -
ctrls->hist_minmax[c][HIST_MIN_INDEX] + 1);
/*
* Now we have to compute the gain w.r.t. the average.
* Add/lose gain to the component towards the average.
* If it happens that the component is zero, use the
* fixed point value : 1.0 gain.
*/
if (hist_count[c])
gw_gain[c] = div_u64(avg << 9, hist_count[c]);
else
gw_gain[c] = 1 << 9;
v4l2_dbg(1, debug, &isc->v4l2_dev,
"isc wb: component %d, s_gain %u, gw_gain %u\n",
c, s_gain[c], gw_gain[c]);
/* multiply both gains and adjust for decimals */
ctrls->gain[c] = s_gain[c] * gw_gain[c];
ctrls->gain[c] >>= 9;
/* make sure we are not out of range */
ctrls->gain[c] = clamp_val(ctrls->gain[c], 0, GENMASK(12, 0));
v4l2_dbg(1, debug, &isc->v4l2_dev,
"isc wb: component %d, final gain %u\n",
c, ctrls->gain[c]);
}
}
static void isc_awb_work(struct work_struct *w)
{
struct isc_device *isc =
container_of(w, struct isc_device, awb_work);
struct regmap *regmap = isc->regmap;
struct isc_ctrls *ctrls = &isc->ctrls;
u32 hist_id = ctrls->hist_id;
u32 baysel;
unsigned long flags;
u32 min, max;
int ret;
if (ctrls->hist_stat != HIST_ENABLED)
return;
isc_hist_count(isc, &min, &max);
v4l2_dbg(1, debug, &isc->v4l2_dev,
"isc wb mode %d: hist min %u , max %u\n", hist_id, min, max);
ctrls->hist_minmax[hist_id][HIST_MIN_INDEX] = min;
ctrls->hist_minmax[hist_id][HIST_MAX_INDEX] = max;
if (hist_id != ISC_HIS_CFG_MODE_B) {
hist_id++;
} else {
isc_wb_update(ctrls);
hist_id = ISC_HIS_CFG_MODE_GR;
}
ctrls->hist_id = hist_id;
baysel = isc->config.sd_format->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT;
ret = pm_runtime_resume_and_get(isc->dev);
if (ret < 0)
return;
/*
* only update if we have all the required histograms and controls
* if awb has been disabled, we need to reset registers as well.
*/
if (hist_id == ISC_HIS_CFG_MODE_GR || ctrls->awb == ISC_WB_NONE) {
/*
* It may happen that DMA Done IRQ will trigger while we are
* updating white balance registers here.
* In that case, only parts of the controls have been updated.
* We can avoid that by locking the section.
*/
spin_lock_irqsave(&isc->awb_lock, flags);
isc_update_awb_ctrls(isc);
spin_unlock_irqrestore(&isc->awb_lock, flags);
/*
* if we are doing just the one time white balance adjustment,
* we are basically done.
*/
if (ctrls->awb == ISC_WB_ONETIME) {
v4l2_info(&isc->v4l2_dev,
"Completed one time white-balance adjustment.\n");
/* update the v4l2 controls values */
isc_update_v4l2_ctrls(isc);
ctrls->awb = ISC_WB_NONE;
}
}
regmap_write(regmap, ISC_HIS_CFG + isc->offsets.his,
hist_id | baysel | ISC_HIS_CFG_RAR);
/*
* We have to make sure the streaming has not stopped meanwhile.
* ISC requires a frame to clock the internal profile update.
* To avoid issues, lock the sequence with a mutex
*/
mutex_lock(&isc->awb_mutex);
/* streaming is not active anymore */
if (isc->stop) {
mutex_unlock(&isc->awb_mutex);
return;
}
isc_update_profile(isc);
mutex_unlock(&isc->awb_mutex);
/* if awb has been disabled, we don't need to start another histogram */
if (ctrls->awb)
regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ);
pm_runtime_put_sync(isc->dev);
}
static int isc_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct isc_device *isc = container_of(ctrl->handler,
struct isc_device, ctrls.handler);
struct isc_ctrls *ctrls = &isc->ctrls;
if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
return 0;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
ctrls->brightness = ctrl->val & ISC_CBC_BRIGHT_MASK;
break;
case V4L2_CID_CONTRAST:
ctrls->contrast = ctrl->val & ISC_CBC_CONTRAST_MASK;
break;
case V4L2_CID_GAMMA:
ctrls->gamma_index = ctrl->val;
break;
default:
return -EINVAL;
}
return 0;
}
static const struct v4l2_ctrl_ops isc_ctrl_ops = {
.s_ctrl = isc_s_ctrl,
};
static int isc_s_awb_ctrl(struct v4l2_ctrl *ctrl)
{
struct isc_device *isc = container_of(ctrl->handler,
struct isc_device, ctrls.handler);
struct isc_ctrls *ctrls = &isc->ctrls;
if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
return 0;
switch (ctrl->id) {
case V4L2_CID_AUTO_WHITE_BALANCE:
if (ctrl->val == 1)
ctrls->awb = ISC_WB_AUTO;
else
ctrls->awb = ISC_WB_NONE;
/* configure the controls with new values from v4l2 */
if (ctrl->cluster[ISC_CTRL_R_GAIN]->is_new)
ctrls->gain[ISC_HIS_CFG_MODE_R] = isc->r_gain_ctrl->val;
if (ctrl->cluster[ISC_CTRL_B_GAIN]->is_new)
ctrls->gain[ISC_HIS_CFG_MODE_B] = isc->b_gain_ctrl->val;
if (ctrl->cluster[ISC_CTRL_GR_GAIN]->is_new)
ctrls->gain[ISC_HIS_CFG_MODE_GR] = isc->gr_gain_ctrl->val;
if (ctrl->cluster[ISC_CTRL_GB_GAIN]->is_new)
ctrls->gain[ISC_HIS_CFG_MODE_GB] = isc->gb_gain_ctrl->val;
if (ctrl->cluster[ISC_CTRL_R_OFF]->is_new)
ctrls->offset[ISC_HIS_CFG_MODE_R] = isc->r_off_ctrl->val;
if (ctrl->cluster[ISC_CTRL_B_OFF]->is_new)
ctrls->offset[ISC_HIS_CFG_MODE_B] = isc->b_off_ctrl->val;
if (ctrl->cluster[ISC_CTRL_GR_OFF]->is_new)
ctrls->offset[ISC_HIS_CFG_MODE_GR] = isc->gr_off_ctrl->val;
if (ctrl->cluster[ISC_CTRL_GB_OFF]->is_new)
ctrls->offset[ISC_HIS_CFG_MODE_GB] = isc->gb_off_ctrl->val;
isc_update_awb_ctrls(isc);
mutex_lock(&isc->awb_mutex);
if (vb2_is_streaming(&isc->vb2_vidq)) {
/*
* If we are streaming, we can update profile to
* have the new settings in place.
*/
isc_update_profile(isc);
} else {
/*
* The auto cluster will activate automatically this
* control. This has to be deactivated when not
* streaming.
*/
v4l2_ctrl_activate(isc->do_wb_ctrl, false);
}
mutex_unlock(&isc->awb_mutex);
/* if we have autowhitebalance on, start histogram procedure */
if (ctrls->awb == ISC_WB_AUTO &&
vb2_is_streaming(&isc->vb2_vidq) &&
ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code))
isc_set_histogram(isc, true);
/*
* for one time whitebalance adjustment, check the button,
* if it's pressed, perform the one time operation.
*/
if (ctrls->awb == ISC_WB_NONE &&
ctrl->cluster[ISC_CTRL_DO_WB]->is_new &&
!(ctrl->cluster[ISC_CTRL_DO_WB]->flags &
V4L2_CTRL_FLAG_INACTIVE)) {
ctrls->awb = ISC_WB_ONETIME;
isc_set_histogram(isc, true);
v4l2_dbg(1, debug, &isc->v4l2_dev,
"One time white-balance started.\n");
}
return 0;
}
return 0;
}
static int isc_g_volatile_awb_ctrl(struct v4l2_ctrl *ctrl)
{
struct isc_device *isc = container_of(ctrl->handler,
struct isc_device, ctrls.handler);
struct isc_ctrls *ctrls = &isc->ctrls;
switch (ctrl->id) {
/* being a cluster, this id will be called for every control */
case V4L2_CID_AUTO_WHITE_BALANCE:
ctrl->cluster[ISC_CTRL_R_GAIN]->val =
ctrls->gain[ISC_HIS_CFG_MODE_R];
ctrl->cluster[ISC_CTRL_B_GAIN]->val =
ctrls->gain[ISC_HIS_CFG_MODE_B];
ctrl->cluster[ISC_CTRL_GR_GAIN]->val =
ctrls->gain[ISC_HIS_CFG_MODE_GR];
ctrl->cluster[ISC_CTRL_GB_GAIN]->val =
ctrls->gain[ISC_HIS_CFG_MODE_GB];
ctrl->cluster[ISC_CTRL_R_OFF]->val =
ctrls->offset[ISC_HIS_CFG_MODE_R];
ctrl->cluster[ISC_CTRL_B_OFF]->val =
ctrls->offset[ISC_HIS_CFG_MODE_B];
ctrl->cluster[ISC_CTRL_GR_OFF]->val =
ctrls->offset[ISC_HIS_CFG_MODE_GR];
ctrl->cluster[ISC_CTRL_GB_OFF]->val =
ctrls->offset[ISC_HIS_CFG_MODE_GB];
break;
}
return 0;
}
static const struct v4l2_ctrl_ops isc_awb_ops = {
.s_ctrl = isc_s_awb_ctrl,
.g_volatile_ctrl = isc_g_volatile_awb_ctrl,
};
#define ISC_CTRL_OFF(_name, _id, _name_str) \
static const struct v4l2_ctrl_config _name = { \
.ops = &isc_awb_ops, \
.id = _id, \
.name = _name_str, \
.type = V4L2_CTRL_TYPE_INTEGER, \
.flags = V4L2_CTRL_FLAG_SLIDER, \
.min = -4095, \
.max = 4095, \
.step = 1, \
.def = 0, \
}
ISC_CTRL_OFF(isc_r_off_ctrl, ISC_CID_R_OFFSET, "Red Component Offset");
ISC_CTRL_OFF(isc_b_off_ctrl, ISC_CID_B_OFFSET, "Blue Component Offset");
ISC_CTRL_OFF(isc_gr_off_ctrl, ISC_CID_GR_OFFSET, "Green Red Component Offset");
ISC_CTRL_OFF(isc_gb_off_ctrl, ISC_CID_GB_OFFSET, "Green Blue Component Offset");
#define ISC_CTRL_GAIN(_name, _id, _name_str) \
static const struct v4l2_ctrl_config _name = { \
.ops = &isc_awb_ops, \
.id = _id, \
.name = _name_str, \
.type = V4L2_CTRL_TYPE_INTEGER, \
.flags = V4L2_CTRL_FLAG_SLIDER, \
.min = 0, \
.max = 8191, \
.step = 1, \
.def = 512, \
}
ISC_CTRL_GAIN(isc_r_gain_ctrl, ISC_CID_R_GAIN, "Red Component Gain");
ISC_CTRL_GAIN(isc_b_gain_ctrl, ISC_CID_B_GAIN, "Blue Component Gain");
ISC_CTRL_GAIN(isc_gr_gain_ctrl, ISC_CID_GR_GAIN, "Green Red Component Gain");
ISC_CTRL_GAIN(isc_gb_gain_ctrl, ISC_CID_GB_GAIN, "Green Blue Component Gain");
static int isc_ctrl_init(struct isc_device *isc)
{
const struct v4l2_ctrl_ops *ops = &isc_ctrl_ops;
struct isc_ctrls *ctrls = &isc->ctrls;
struct v4l2_ctrl_handler *hdl = &ctrls->handler;
int ret;
ctrls->hist_stat = HIST_INIT;
isc_reset_awb_ctrls(isc);
ret = v4l2_ctrl_handler_init(hdl, 13);
if (ret < 0)
return ret;
/* Initialize product specific controls. For example, contrast */
isc->config_ctrls(isc, ops);
ctrls->brightness = 0;
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, -1024, 1023, 1, 0);
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAMMA, 0, isc->gamma_max, 1,
isc->gamma_max);
isc->awb_ctrl = v4l2_ctrl_new_std(hdl, &isc_awb_ops,
V4L2_CID_AUTO_WHITE_BALANCE,
0, 1, 1, 1);
/* do_white_balance is a button, so min,max,step,default are ignored */
isc->do_wb_ctrl = v4l2_ctrl_new_std(hdl, &isc_awb_ops,
V4L2_CID_DO_WHITE_BALANCE,
0, 0, 0, 0);
if (!isc->do_wb_ctrl) {
ret = hdl->error;
v4l2_ctrl_handler_free(hdl);
return ret;
}
v4l2_ctrl_activate(isc->do_wb_ctrl, false);
isc->r_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_r_gain_ctrl, NULL);
isc->b_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_b_gain_ctrl, NULL);
isc->gr_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gr_gain_ctrl, NULL);
isc->gb_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gb_gain_ctrl, NULL);
isc->r_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_r_off_ctrl, NULL);
isc->b_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_b_off_ctrl, NULL);
isc->gr_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gr_off_ctrl, NULL);
isc->gb_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gb_off_ctrl, NULL);
/*
* The cluster is in auto mode with autowhitebalance enabled
* and manual mode otherwise.
*/
v4l2_ctrl_auto_cluster(10, &isc->awb_ctrl, 0, true);
v4l2_ctrl_handler_setup(hdl);
return 0;
}
static int isc_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_connection *asd)
{
struct isc_device *isc = container_of(notifier->v4l2_dev,
struct isc_device, v4l2_dev);
struct isc_subdev_entity *subdev_entity =
container_of(notifier, struct isc_subdev_entity, notifier);
if (video_is_registered(&isc->video_dev)) {
v4l2_err(&isc->v4l2_dev, "only supports one sub-device.\n");
return -EBUSY;
}
subdev_entity->sd = subdev;
return 0;
}
static void isc_async_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_connection *asd)
{
struct isc_device *isc = container_of(notifier->v4l2_dev,
struct isc_device, v4l2_dev);
mutex_destroy(&isc->awb_mutex);
cancel_work_sync(&isc->awb_work);
video_unregister_device(&isc->video_dev);
v4l2_ctrl_handler_free(&isc->ctrls.handler);
}
static struct isc_format *find_format_by_code(struct isc_device *isc,
unsigned int code, int *index)
{
struct isc_format *fmt = &isc->formats_list[0];
unsigned int i;
for (i = 0; i < isc->formats_list_size; i++) {
if (fmt->mbus_code == code) {
*index = i;
return fmt;
}
fmt++;
}
return NULL;
}
static int isc_formats_init(struct isc_device *isc)
{
struct isc_format *fmt;
struct v4l2_subdev *subdev = isc->current_subdev->sd;
unsigned int num_fmts, i, j;
u32 list_size = isc->formats_list_size;
struct v4l2_subdev_mbus_code_enum mbus_code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
num_fmts = 0;
while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
NULL, &mbus_code)) {
mbus_code.index++;
fmt = find_format_by_code(isc, mbus_code.code, &i);
if (!fmt) {
v4l2_warn(&isc->v4l2_dev, "Mbus code %x not supported\n",
mbus_code.code);
continue;
}
fmt->sd_support = true;
num_fmts++;
}
if (!num_fmts)
return -ENXIO;
isc->num_user_formats = num_fmts;
isc->user_formats = devm_kcalloc(isc->dev,
num_fmts, sizeof(*isc->user_formats),
GFP_KERNEL);
if (!isc->user_formats)
return -ENOMEM;
fmt = &isc->formats_list[0];
for (i = 0, j = 0; i < list_size; i++) {
if (fmt->sd_support)
isc->user_formats[j++] = fmt;
fmt++;
}
return 0;
}
static int isc_set_default_fmt(struct isc_device *isc)
{
struct v4l2_format f = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.fmt.pix = {
.width = VGA_WIDTH,
.height = VGA_HEIGHT,
.field = V4L2_FIELD_NONE,
.pixelformat = isc->user_formats[0]->fourcc,
},
};
int ret;
ret = isc_try_fmt(isc, &f, NULL);
if (ret)
return ret;
isc->fmt = f;
return 0;
}
static int isc_async_complete(struct v4l2_async_notifier *notifier)
{
struct isc_device *isc = container_of(notifier->v4l2_dev,
struct isc_device, v4l2_dev);
struct video_device *vdev = &isc->video_dev;
struct vb2_queue *q = &isc->vb2_vidq;
int ret = 0;
INIT_WORK(&isc->awb_work, isc_awb_work);
ret = v4l2_device_register_subdev_nodes(&isc->v4l2_dev);
if (ret < 0) {
v4l2_err(&isc->v4l2_dev, "Failed to register subdev nodes\n");
return ret;
}
isc->current_subdev = container_of(notifier,
struct isc_subdev_entity, notifier);
mutex_init(&isc->lock);
mutex_init(&isc->awb_mutex);
init_completion(&isc->comp);
/* Initialize videobuf2 queue */
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
q->drv_priv = isc;
q->buf_struct_size = sizeof(struct isc_buffer);
q->ops = &isc_vb2_ops;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &isc->lock;
q->min_buffers_needed = 1;
q->dev = isc->dev;
ret = vb2_queue_init(q);
if (ret < 0) {
v4l2_err(&isc->v4l2_dev,
"vb2_queue_init() failed: %d\n", ret);
goto isc_async_complete_err;
}
/* Init video dma queues */
INIT_LIST_HEAD(&isc->dma_queue);
spin_lock_init(&isc->dma_queue_lock);
spin_lock_init(&isc->awb_lock);
ret = isc_formats_init(isc);
if (ret < 0) {
v4l2_err(&isc->v4l2_dev,
"Init format failed: %d\n", ret);
goto isc_async_complete_err;
}
ret = isc_set_default_fmt(isc);
if (ret) {
v4l2_err(&isc->v4l2_dev, "Could not set default format\n");
goto isc_async_complete_err;
}
ret = isc_ctrl_init(isc);
if (ret) {
v4l2_err(&isc->v4l2_dev, "Init isc ctrols failed: %d\n", ret);
goto isc_async_complete_err;
}
/* Register video device */
strscpy(vdev->name, KBUILD_MODNAME, sizeof(vdev->name));
vdev->release = video_device_release_empty;
vdev->fops = &isc_fops;
vdev->ioctl_ops = &isc_ioctl_ops;
vdev->v4l2_dev = &isc->v4l2_dev;
vdev->vfl_dir = VFL_DIR_RX;
vdev->queue = q;
vdev->lock = &isc->lock;
vdev->ctrl_handler = &isc->ctrls.handler;
vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
video_set_drvdata(vdev, isc);
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
v4l2_err(&isc->v4l2_dev,
"video_register_device failed: %d\n", ret);
goto isc_async_complete_err;
}
return 0;
isc_async_complete_err:
mutex_destroy(&isc->awb_mutex);
mutex_destroy(&isc->lock);
return ret;
}
const struct v4l2_async_notifier_operations atmel_isc_async_ops = {
.bound = isc_async_bound,
.unbind = isc_async_unbind,
.complete = isc_async_complete,
};
EXPORT_SYMBOL_GPL(atmel_isc_async_ops);
void atmel_isc_subdev_cleanup(struct isc_device *isc)
{
struct isc_subdev_entity *subdev_entity;
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
v4l2_async_nf_unregister(&subdev_entity->notifier);
v4l2_async_nf_cleanup(&subdev_entity->notifier);
}
INIT_LIST_HEAD(&isc->subdev_entities);
}
EXPORT_SYMBOL_GPL(atmel_isc_subdev_cleanup);
int atmel_isc_pipeline_init(struct isc_device *isc)
{
struct device *dev = isc->dev;
struct regmap *regmap = isc->regmap;
struct regmap_field *regs;
unsigned int i;
/*
* DPCEN-->GDCEN-->BLCEN-->WB-->CFA-->CC-->
* GAM-->VHXS-->CSC-->CBC-->SUB422-->SUB420
*/
const struct reg_field regfields[ISC_PIPE_LINE_NODE_NUM] = {
REG_FIELD(ISC_DPC_CTRL, 0, 0),
REG_FIELD(ISC_DPC_CTRL, 1, 1),
REG_FIELD(ISC_DPC_CTRL, 2, 2),
REG_FIELD(ISC_WB_CTRL, 0, 0),
REG_FIELD(ISC_CFA_CTRL, 0, 0),
REG_FIELD(ISC_CC_CTRL, 0, 0),
REG_FIELD(ISC_GAM_CTRL, 0, 0),
REG_FIELD(ISC_GAM_CTRL, 1, 1),
REG_FIELD(ISC_GAM_CTRL, 2, 2),
REG_FIELD(ISC_GAM_CTRL, 3, 3),
REG_FIELD(ISC_VHXS_CTRL, 0, 0),
REG_FIELD(ISC_CSC_CTRL + isc->offsets.csc, 0, 0),
REG_FIELD(ISC_CBC_CTRL + isc->offsets.cbc, 0, 0),
REG_FIELD(ISC_SUB422_CTRL + isc->offsets.sub422, 0, 0),
REG_FIELD(ISC_SUB420_CTRL + isc->offsets.sub420, 0, 0),
};
for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) {
regs = devm_regmap_field_alloc(dev, regmap, regfields[i]);
if (IS_ERR(regs))
return PTR_ERR(regs);
isc->pipeline[i] = regs;
}
return 0;
}
EXPORT_SYMBOL_GPL(atmel_isc_pipeline_init);
/* regmap configuration */
#define ATMEL_ISC_REG_MAX 0xd5c
const struct regmap_config atmel_isc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = ATMEL_ISC_REG_MAX,
};
EXPORT_SYMBOL_GPL(atmel_isc_regmap_config);
MODULE_AUTHOR("Songjun Wu");
MODULE_AUTHOR("Eugen Hristev");
MODULE_DESCRIPTION("Atmel ISC common code base");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/staging/media/deprecated/atmel/atmel-isc-base.c |
/*
* Copyright (c) 2016 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <linux/uaccess.h>
#include <drm/drm_atomic.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
/**
* DOC: overview
*
* Color management or color space adjustments is supported through a set of 5
* properties on the &drm_crtc object. They are set up by calling
* drm_crtc_enable_color_mgmt().
*
* "DEGAMMA_LUT”:
* Blob property to set the degamma lookup table (LUT) mapping pixel data
* from the framebuffer before it is given to the transformation matrix.
* The data is interpreted as an array of &struct drm_color_lut elements.
* Hardware might choose not to use the full precision of the LUT elements
* nor use all the elements of the LUT (for example the hardware might
* choose to interpolate between LUT[0] and LUT[4]).
*
* Setting this to NULL (blob property value set to 0) means a
* linear/pass-thru gamma table should be used. This is generally the
* driver boot-up state too. Drivers can access this blob through
* &drm_crtc_state.degamma_lut.
*
* “DEGAMMA_LUT_SIZE”:
* Unsinged range property to give the size of the lookup table to be set
* on the DEGAMMA_LUT property (the size depends on the underlying
* hardware). If drivers support multiple LUT sizes then they should
* publish the largest size, and sub-sample smaller sized LUTs (e.g. for
* split-gamma modes) appropriately.
*
* “CTM”:
* Blob property to set the current transformation matrix (CTM) apply to
* pixel data after the lookup through the degamma LUT and before the
* lookup through the gamma LUT. The data is interpreted as a struct
* &drm_color_ctm.
*
* Setting this to NULL (blob property value set to 0) means a
* unit/pass-thru matrix should be used. This is generally the driver
* boot-up state too. Drivers can access the blob for the color conversion
* matrix through &drm_crtc_state.ctm.
*
* “GAMMA_LUT”:
* Blob property to set the gamma lookup table (LUT) mapping pixel data
* after the transformation matrix to data sent to the connector. The
* data is interpreted as an array of &struct drm_color_lut elements.
* Hardware might choose not to use the full precision of the LUT elements
* nor use all the elements of the LUT (for example the hardware might
* choose to interpolate between LUT[0] and LUT[4]).
*
* Setting this to NULL (blob property value set to 0) means a
* linear/pass-thru gamma table should be used. This is generally the
* driver boot-up state too. Drivers can access this blob through
* &drm_crtc_state.gamma_lut.
*
* Note that for mostly historical reasons stemming from Xorg heritage,
* this is also used to store the color map (also sometimes color lut, CLUT
* or color palette) for indexed formats like DRM_FORMAT_C8.
*
* “GAMMA_LUT_SIZE”:
* Unsigned range property to give the size of the lookup table to be set
* on the GAMMA_LUT property (the size depends on the underlying hardware).
* If drivers support multiple LUT sizes then they should publish the
* largest size, and sub-sample smaller sized LUTs (e.g. for split-gamma
* modes) appropriately.
*
* There is also support for a legacy gamma table, which is set up by calling
* drm_mode_crtc_set_gamma_size(). The DRM core will then alias the legacy gamma
* ramp with "GAMMA_LUT" or, if that is unavailable, "DEGAMMA_LUT".
*
* Support for different non RGB color encodings is controlled through
* &drm_plane specific COLOR_ENCODING and COLOR_RANGE properties. They
* are set up by calling drm_plane_create_color_properties().
*
* "COLOR_ENCODING":
* Optional plane enum property to support different non RGB
* color encodings. The driver can provide a subset of standard
* enum values supported by the DRM plane.
*
* "COLOR_RANGE":
* Optional plane enum property to support different non RGB
* color parameter ranges. The driver can provide a subset of
* standard enum values supported by the DRM plane.
*/
/**
* drm_color_ctm_s31_32_to_qm_n
*
* @user_input: input value
* @m: number of integer bits, only support m <= 32, include the sign-bit
* @n: number of fractional bits, only support n <= 32
*
* Convert and clamp S31.32 sign-magnitude to Qm.n (signed 2's complement).
* The sign-bit BIT(m+n-1) and above are 0 for positive value and 1 for negative
* the range of value is [-2^(m-1), 2^(m-1) - 2^-n]
*
* For example
* A Q3.12 format number:
* - required bit: 3 + 12 = 15bits
* - range: [-2^2, 2^2 - 2^−15]
*
* NOTE: the m can be zero if all bit_precision are used to present fractional
* bits like Q0.32
*/
u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n)
{
u64 mag = (user_input & ~BIT_ULL(63)) >> (32 - n);
bool negative = !!(user_input & BIT_ULL(63));
s64 val;
WARN_ON(m > 32 || n > 32);
val = clamp_val(mag, 0, negative ?
BIT_ULL(n + m - 1) : BIT_ULL(n + m - 1) - 1);
return negative ? -val : val;
}
EXPORT_SYMBOL(drm_color_ctm_s31_32_to_qm_n);
/**
* drm_crtc_enable_color_mgmt - enable color management properties
* @crtc: DRM CRTC
* @degamma_lut_size: the size of the degamma lut (before CSC)
* @has_ctm: whether to attach ctm_property for CSC matrix
* @gamma_lut_size: the size of the gamma lut (after CSC)
*
* This function lets the driver enable the color correction
* properties on a CRTC. This includes 3 degamma, csc and gamma
* properties that userspace can set and 2 size properties to inform
* the userspace of the lut sizes. Each of the properties are
* optional. The gamma and degamma properties are only attached if
* their size is not 0 and ctm_property is only attached if has_ctm is
* true.
*/
void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
uint degamma_lut_size,
bool has_ctm,
uint gamma_lut_size)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
if (degamma_lut_size) {
drm_object_attach_property(&crtc->base,
config->degamma_lut_property, 0);
drm_object_attach_property(&crtc->base,
config->degamma_lut_size_property,
degamma_lut_size);
}
if (has_ctm)
drm_object_attach_property(&crtc->base,
config->ctm_property, 0);
if (gamma_lut_size) {
drm_object_attach_property(&crtc->base,
config->gamma_lut_property, 0);
drm_object_attach_property(&crtc->base,
config->gamma_lut_size_property,
gamma_lut_size);
}
}
EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
/**
* drm_mode_crtc_set_gamma_size - set the gamma table size
* @crtc: CRTC to set the gamma table size for
* @gamma_size: size of the gamma table
*
* Drivers which support gamma tables should set this to the supported gamma
* table size when initializing the CRTC. Currently the drm core only supports a
* fixed gamma table size.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size)
{
uint16_t *r_base, *g_base, *b_base;
int i;
crtc->gamma_size = gamma_size;
crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
GFP_KERNEL);
if (!crtc->gamma_store) {
crtc->gamma_size = 0;
return -ENOMEM;
}
r_base = crtc->gamma_store;
g_base = r_base + gamma_size;
b_base = g_base + gamma_size;
for (i = 0; i < gamma_size; i++) {
r_base[i] = i << 8;
g_base[i] = i << 8;
b_base[i] = i << 8;
}
return 0;
}
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
/**
* drm_crtc_supports_legacy_gamma - does the crtc support legacy gamma correction table
* @crtc: CRTC object
*
* Returns true/false if the given crtc supports setting the legacy gamma
* correction table.
*/
static bool drm_crtc_supports_legacy_gamma(struct drm_crtc *crtc)
{
u32 gamma_id = crtc->dev->mode_config.gamma_lut_property->base.id;
u32 degamma_id = crtc->dev->mode_config.degamma_lut_property->base.id;
if (!crtc->gamma_size)
return false;
if (crtc->funcs->gamma_set)
return true;
return !!(drm_mode_obj_find_prop_id(&crtc->base, gamma_id) ||
drm_mode_obj_find_prop_id(&crtc->base, degamma_id));
}
/**
* drm_crtc_legacy_gamma_set - set the legacy gamma correction table
* @crtc: CRTC object
* @red: red correction table
* @green: green correction table
* @blue: blue correction table
* @size: size of the tables
* @ctx: lock acquire context
*
* Implements support for legacy gamma correction table for drivers
* that have set drm_crtc_funcs.gamma_set or that support color management
* through the DEGAMMA_LUT/GAMMA_LUT properties. See
* drm_crtc_enable_color_mgmt() and the containing chapter for
* how the atomic color management and gamma tables work.
*
* This function sets the gamma using drm_crtc_funcs.gamma_set if set, or
* alternatively using crtc color management properties.
*/
static int drm_crtc_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
u32 size,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
struct drm_property_blob *blob;
struct drm_color_lut *blob_data;
u32 gamma_id = dev->mode_config.gamma_lut_property->base.id;
u32 degamma_id = dev->mode_config.degamma_lut_property->base.id;
bool use_gamma_lut;
int i, ret = 0;
bool replaced;
if (crtc->funcs->gamma_set)
return crtc->funcs->gamma_set(crtc, red, green, blue, size, ctx);
if (drm_mode_obj_find_prop_id(&crtc->base, gamma_id))
use_gamma_lut = true;
else if (drm_mode_obj_find_prop_id(&crtc->base, degamma_id))
use_gamma_lut = false;
else
return -ENODEV;
state = drm_atomic_state_alloc(crtc->dev);
if (!state)
return -ENOMEM;
blob = drm_property_create_blob(dev,
sizeof(struct drm_color_lut) * size,
NULL);
if (IS_ERR(blob)) {
ret = PTR_ERR(blob);
blob = NULL;
goto fail;
}
/* Prepare GAMMA_LUT with the legacy values. */
blob_data = blob->data;
for (i = 0; i < size; i++) {
blob_data[i].red = red[i];
blob_data[i].green = green[i];
blob_data[i].blue = blue[i];
}
state->acquire_ctx = ctx;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto fail;
}
/* Set GAMMA_LUT and reset DEGAMMA_LUT and CTM */
replaced = drm_property_replace_blob(&crtc_state->degamma_lut,
use_gamma_lut ? NULL : blob);
replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
replaced |= drm_property_replace_blob(&crtc_state->gamma_lut,
use_gamma_lut ? blob : NULL);
crtc_state->color_mgmt_changed |= replaced;
ret = drm_atomic_commit(state);
fail:
drm_atomic_state_put(state);
drm_property_blob_put(blob);
return ret;
}
/**
* drm_mode_gamma_set_ioctl - set the gamma table
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Set the gamma table of a CRTC to the one passed in by the user. Userspace can
* inquire the required gamma table size through drm_mode_gamma_get_ioctl.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
struct drm_modeset_acquire_ctx ctx;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id);
if (!crtc)
return -ENOENT;
if (!drm_crtc_supports_legacy_gamma(crtc))
return -ENOSYS;
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size)
return -EINVAL;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
size = crtc_lut->gamma_size * (sizeof(uint16_t));
r_base = crtc->gamma_store;
if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
ret = -EFAULT;
goto out;
}
g_base = r_base + size;
if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
ret = -EFAULT;
goto out;
}
b_base = g_base + size;
if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
ret = -EFAULT;
goto out;
}
ret = drm_crtc_legacy_gamma_set(crtc, r_base, g_base, b_base,
crtc->gamma_size, &ctx);
out:
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
return ret;
}
/**
* drm_mode_gamma_get_ioctl - get the gamma table
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Copy the current gamma table into the storage provided. This also provides
* the gamma table size the driver expects, which can be used to size the
* allocated storage.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id);
if (!crtc)
return -ENOENT;
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size)
return -EINVAL;
drm_modeset_lock(&crtc->mutex, NULL);
size = crtc_lut->gamma_size * (sizeof(uint16_t));
r_base = crtc->gamma_store;
if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
ret = -EFAULT;
goto out;
}
g_base = r_base + size;
if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
ret = -EFAULT;
goto out;
}
b_base = g_base + size;
if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
ret = -EFAULT;
goto out;
}
out:
drm_modeset_unlock(&crtc->mutex);
return ret;
}
static const char * const color_encoding_name[] = {
[DRM_COLOR_YCBCR_BT601] = "ITU-R BT.601 YCbCr",
[DRM_COLOR_YCBCR_BT709] = "ITU-R BT.709 YCbCr",
[DRM_COLOR_YCBCR_BT2020] = "ITU-R BT.2020 YCbCr",
};
static const char * const color_range_name[] = {
[DRM_COLOR_YCBCR_FULL_RANGE] = "YCbCr full range",
[DRM_COLOR_YCBCR_LIMITED_RANGE] = "YCbCr limited range",
};
/**
* drm_get_color_encoding_name - return a string for color encoding
* @encoding: color encoding to compute name of
*
* In contrast to the other drm_get_*_name functions this one here returns a
* const pointer and hence is threadsafe.
*/
const char *drm_get_color_encoding_name(enum drm_color_encoding encoding)
{
if (WARN_ON(encoding >= ARRAY_SIZE(color_encoding_name)))
return "unknown";
return color_encoding_name[encoding];
}
/**
* drm_get_color_range_name - return a string for color range
* @range: color range to compute name of
*
* In contrast to the other drm_get_*_name functions this one here returns a
* const pointer and hence is threadsafe.
*/
const char *drm_get_color_range_name(enum drm_color_range range)
{
if (WARN_ON(range >= ARRAY_SIZE(color_range_name)))
return "unknown";
return color_range_name[range];
}
/**
* drm_plane_create_color_properties - color encoding related plane properties
* @plane: plane object
* @supported_encodings: bitfield indicating supported color encodings
* @supported_ranges: bitfileld indicating supported color ranges
* @default_encoding: default color encoding
* @default_range: default color range
*
* Create and attach plane specific COLOR_ENCODING and COLOR_RANGE
* properties to @plane. The supported encodings and ranges should
* be provided in supported_encodings and supported_ranges bitmasks.
* Each bit set in the bitmask indicates that its number as enum
* value is supported.
*/
int drm_plane_create_color_properties(struct drm_plane *plane,
u32 supported_encodings,
u32 supported_ranges,
enum drm_color_encoding default_encoding,
enum drm_color_range default_range)
{
struct drm_device *dev = plane->dev;
struct drm_property *prop;
struct drm_prop_enum_list enum_list[max_t(int, DRM_COLOR_ENCODING_MAX,
DRM_COLOR_RANGE_MAX)];
int i, len;
if (WARN_ON(supported_encodings == 0 ||
(supported_encodings & -BIT(DRM_COLOR_ENCODING_MAX)) != 0 ||
(supported_encodings & BIT(default_encoding)) == 0))
return -EINVAL;
if (WARN_ON(supported_ranges == 0 ||
(supported_ranges & -BIT(DRM_COLOR_RANGE_MAX)) != 0 ||
(supported_ranges & BIT(default_range)) == 0))
return -EINVAL;
len = 0;
for (i = 0; i < DRM_COLOR_ENCODING_MAX; i++) {
if ((supported_encodings & BIT(i)) == 0)
continue;
enum_list[len].type = i;
enum_list[len].name = color_encoding_name[i];
len++;
}
prop = drm_property_create_enum(dev, 0, "COLOR_ENCODING",
enum_list, len);
if (!prop)
return -ENOMEM;
plane->color_encoding_property = prop;
drm_object_attach_property(&plane->base, prop, default_encoding);
if (plane->state)
plane->state->color_encoding = default_encoding;
len = 0;
for (i = 0; i < DRM_COLOR_RANGE_MAX; i++) {
if ((supported_ranges & BIT(i)) == 0)
continue;
enum_list[len].type = i;
enum_list[len].name = color_range_name[i];
len++;
}
prop = drm_property_create_enum(dev, 0, "COLOR_RANGE",
enum_list, len);
if (!prop)
return -ENOMEM;
plane->color_range_property = prop;
drm_object_attach_property(&plane->base, prop, default_range);
if (plane->state)
plane->state->color_range = default_range;
return 0;
}
EXPORT_SYMBOL(drm_plane_create_color_properties);
/**
* drm_color_lut_check - check validity of lookup table
* @lut: property blob containing LUT to check
* @tests: bitmask of tests to run
*
* Helper to check whether a userspace-provided lookup table is valid and
* satisfies hardware requirements. Drivers pass a bitmask indicating which of
* the tests in &drm_color_lut_tests should be performed.
*
* Returns 0 on success, -EINVAL on failure.
*/
int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests)
{
const struct drm_color_lut *entry;
int i;
if (!lut || !tests)
return 0;
entry = lut->data;
for (i = 0; i < drm_color_lut_size(lut); i++) {
if (tests & DRM_COLOR_LUT_EQUAL_CHANNELS) {
if (entry[i].red != entry[i].blue ||
entry[i].red != entry[i].green) {
DRM_DEBUG_KMS("All LUT entries must have equal r/g/b\n");
return -EINVAL;
}
}
if (i > 0 && tests & DRM_COLOR_LUT_NON_DECREASING) {
if (entry[i].red < entry[i - 1].red ||
entry[i].green < entry[i - 1].green ||
entry[i].blue < entry[i - 1].blue) {
DRM_DEBUG_KMS("LUT entries must never decrease.\n");
return -EINVAL;
}
}
}
return 0;
}
EXPORT_SYMBOL(drm_color_lut_check);
| linux-master | drivers/gpu/drm/drm_color_mgmt.c |
/*
* Copyright (c) 2006-2008 Intel Corporation
* Copyright (c) 2007 Dave Airlie <[email protected]>
*
* DRM core CRTC related functions
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*
* Authors:
* Keith Packard
* Eric Anholt <[email protected]>
* Dave Airlie <[email protected]>
* Jesse Barnes <[email protected]>
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/dynamic_debug.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "drm_crtc_helper_internal.h"
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
"DRM_UT_DRIVER",
"DRM_UT_KMS",
"DRM_UT_PRIME",
"DRM_UT_ATOMIC",
"DRM_UT_VBL",
"DRM_UT_STATE",
"DRM_UT_LEASE",
"DRM_UT_DP",
"DRM_UT_DRMRES");
/**
* DOC: overview
*
* The CRTC modeset helper library provides a default set_config implementation
* in drm_crtc_helper_set_config(). Plus a few other convenience functions using
* the same callbacks which drivers can use to e.g. restore the modeset
* configuration on resume with drm_helper_resume_force_mode().
*
* Note that this helper library doesn't track the current power state of CRTCs
* and encoders. It can call callbacks like &drm_encoder_helper_funcs.dpms even
* though the hardware is already in the desired state. This deficiency has been
* fixed in the atomic helpers.
*
* The driver callbacks are mostly compatible with the atomic modeset helpers,
* except for the handling of the primary plane: Atomic helpers require that the
* primary plane is implemented as a real standalone plane and not directly tied
* to the CRTC state. For easier transition this library provides functions to
* implement the old semantics required by the CRTC helpers using the new plane
* and atomic helper callbacks.
*
* Drivers are strongly urged to convert to the atomic helpers (by way of first
* converting to the plane helpers). New drivers must not use these functions
* but need to implement the atomic interface instead, potentially using the
* atomic helpers for that.
*
* These legacy modeset helpers use the same function table structures as
* all other modesetting helpers. See the documentation for struct
* &drm_crtc_helper_funcs, &struct drm_encoder_helper_funcs and struct
* &drm_connector_helper_funcs.
*/
/**
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
*
* Checks whether @encoder is with the current mode setting output configuration
* in use by any connector. This doesn't mean that it is actually enabled since
* the DPMS state is tracked separately.
*
* Returns:
* True if @encoder is used, false otherwise.
*/
bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct drm_device *dev = encoder->dev;
WARN_ON(drm_drv_uses_atomic_modeset(dev));
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
*/
if (!oops_in_progress) {
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
}
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder == encoder) {
drm_connector_list_iter_end(&conn_iter);
return true;
}
}
drm_connector_list_iter_end(&conn_iter);
return false;
}
EXPORT_SYMBOL(drm_helper_encoder_in_use);
/**
* drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
* @crtc: CRTC to check
*
* Checks whether @crtc is with the current mode setting output configuration
* in use by any connector. This doesn't mean that it is actually enabled since
* the DPMS state is tracked separately.
*
* Returns:
* True if @crtc is used, false otherwise.
*/
bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
WARN_ON(drm_drv_uses_atomic_modeset(dev));
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
*/
if (!oops_in_progress)
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
drm_for_each_encoder(encoder, dev)
if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
return true;
return false;
}
EXPORT_SYMBOL(drm_helper_crtc_in_use);
static void
drm_encoder_disable(struct drm_encoder *encoder)
{
const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
if (!encoder_funcs)
return;
if (encoder_funcs->disable)
(*encoder_funcs->disable)(encoder);
else if (encoder_funcs->dpms)
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
}
static void __drm_helper_disable_unused_functions(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_crtc *crtc;
drm_warn_on_modeset_not_all_locked(dev);
drm_for_each_encoder(encoder, dev) {
if (!drm_helper_encoder_in_use(encoder)) {
drm_encoder_disable(encoder);
/* disconnect encoder from any connector */
encoder->crtc = NULL;
}
}
drm_for_each_crtc(crtc, dev) {
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled) {
if (crtc_funcs->disable)
(*crtc_funcs->disable)(crtc);
else
(*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
crtc->primary->fb = NULL;
}
}
}
/**
* drm_helper_disable_unused_functions - disable unused objects
* @dev: DRM device
*
* This function walks through the entire mode setting configuration of @dev. It
* will remove any CRTC links of unused encoders and encoder links of
* disconnected connectors. Then it will disable all unused encoders and CRTCs
* either by calling their disable callback if available or by calling their
* dpms callback with DRM_MODE_DPMS_OFF.
*
* NOTE:
*
* This function is part of the legacy modeset helper library and will cause
* major confusion with atomic drivers. This is because atomic helpers guarantee
* to never call ->disable() hooks on a disabled function, or ->enable() hooks
* on an enabled functions. drm_helper_disable_unused_functions() on the other
* hand throws such guarantees into the wind and calls disable hooks
* unconditionally on unused functions.
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
WARN_ON(drm_drv_uses_atomic_modeset(dev));
drm_modeset_lock_all(dev);
__drm_helper_disable_unused_functions(dev);
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
/*
* Check the CRTC we're going to map each output to vs. its current
* CRTC. If they don't match, we have to disable the output and the CRTC
* since the driver will have to re-route things.
*/
static void
drm_crtc_prepare_encoders(struct drm_device *dev)
{
const struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, dev) {
encoder_funcs = encoder->helper_private;
if (!encoder_funcs)
continue;
/* Disable unused encoders */
if (encoder->crtc == NULL)
drm_encoder_disable(encoder);
}
}
/**
* drm_crtc_helper_set_mode - internal helper to set a mode
* @crtc: CRTC to program
* @mode: mode to use
* @x: horizontal offset into the surface
* @y: vertical offset into the surface
* @old_fb: old framebuffer, for cleanup
*
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
* to fixup or reject the mode prior to trying to set it. This is an internal
* helper that drivers could e.g. use to update properties that require the
* entire output pipe to be disabled and re-enabled in a new configuration. For
* example for changing whether audio is enabled on a hdmi link or for changing
* panel fitter or dither attributes. It is also called by the
* drm_crtc_helper_set_config() helper function to drive the mode setting
* sequence.
*
* Returns:
* True if the mode was set successfully, false otherwise.
*/
bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
const struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
bool saved_enabled;
struct drm_encoder *encoder;
bool ret = true;
WARN_ON(drm_drv_uses_atomic_modeset(dev));
drm_warn_on_modeset_not_all_locked(dev);
saved_enabled = crtc->enabled;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled)
return true;
adjusted_mode = drm_mode_duplicate(dev, mode);
if (!adjusted_mode) {
crtc->enabled = saved_enabled;
return false;
}
drm_mode_init(&saved_mode, &crtc->mode);
drm_mode_init(&saved_hwmode, &crtc->hwmode);
saved_x = crtc->x;
saved_y = crtc->y;
/* Update crtc values up front so the driver can rely on them for mode
* setting.
*/
drm_mode_copy(&crtc->mode, mode);
crtc->x = x;
crtc->y = y;
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
encoder_funcs = encoder->helper_private;
if (!encoder_funcs)
continue;
encoder_funcs = encoder->helper_private;
if (encoder_funcs->mode_fixup) {
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
DRM_DEBUG_KMS("Encoder fixup failed\n");
goto done;
}
}
}
if (crtc_funcs->mode_fixup) {
if (!(ret = crtc_funcs->mode_fixup(crtc, mode,
adjusted_mode))) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
goto done;
}
}
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
drm_mode_copy(&crtc->hwmode, adjusted_mode);
/* Prepare the encoders and CRTCs before setting the mode. */
drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
encoder_funcs = encoder->helper_private;
if (!encoder_funcs)
continue;
/* Disable the encoders as the first thing we do. */
if (encoder_funcs->prepare)
encoder_funcs->prepare(encoder);
}
drm_crtc_prepare_encoders(dev);
crtc_funcs->prepare(crtc);
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
*/
ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
if (!ret)
goto done;
drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
encoder_funcs = encoder->helper_private;
if (!encoder_funcs)
continue;
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%s]\n",
encoder->base.id, encoder->name, mode->name);
if (encoder_funcs->mode_set)
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
crtc_funcs->commit(crtc);
drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
encoder_funcs = encoder->helper_private;
if (!encoder_funcs)
continue;
if (encoder_funcs->commit)
encoder_funcs->commit(encoder);
}
/* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
*/
drm_calc_timestamping_constants(crtc, &crtc->hwmode);
/* FIXME: add subpixel order */
done:
drm_mode_destroy(dev, adjusted_mode);
if (!ret) {
crtc->enabled = saved_enabled;
drm_mode_copy(&crtc->mode, &saved_mode);
drm_mode_copy(&crtc->hwmode, &saved_hwmode);
crtc->x = saved_x;
crtc->y = saved_y;
}
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
/**
* drm_crtc_helper_atomic_check() - Helper to check CRTC atomic-state
* @crtc: CRTC to check
* @state: atomic state object
*
* Provides a default CRTC-state check handler for CRTCs that only have
* one primary plane attached to it.
*
* This is often the case for the CRTC of simple framebuffers. See also
* drm_plane_helper_atomic_check() for the respective plane-state check
* helper function.
*
* RETURNS:
* Zero on success, or an errno code otherwise.
*/
int drm_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (!new_crtc_state->enable)
return 0;
return drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
}
EXPORT_SYMBOL(drm_crtc_helper_atomic_check);
static void
drm_crtc_helper_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_encoder *encoder;
/* Decouple all encoders and their attached connectors from this crtc */
drm_for_each_encoder(encoder, dev) {
struct drm_connector_list_iter conn_iter;
if (encoder->crtc != crtc)
continue;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder != encoder)
continue;
connector->encoder = NULL;
/*
* drm_helper_disable_unused_functions() ought to be
* doing this, but since we've decoupled the encoder
* from the connector above, the required connection
* between them is henceforth no longer available.
*/
connector->dpms = DRM_MODE_DPMS_OFF;
/* we keep a reference while the encoder is bound */
drm_connector_put(connector);
}
drm_connector_list_iter_end(&conn_iter);
}
__drm_helper_disable_unused_functions(dev);
}
/*
* For connectors that support multiple encoders, either the
* .atomic_best_encoder() or .best_encoder() operation must be implemented.
*/
struct drm_encoder *
drm_connector_get_single_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
WARN_ON(hweight32(connector->possible_encoders) > 1);
drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
}
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @set: mode set configuration
* @ctx: lock acquire context, not used here
*
* The drm_crtc_helper_set_config() helper function implements the of
* &drm_crtc_funcs.set_config callback for drivers using the legacy CRTC
* helpers.
*
* It first tries to locate the best encoder for each connector by calling the
* connector @drm_connector_helper_funcs.best_encoder helper operation.
*
* After locating the appropriate encoders, the helper function will call the
* mode_fixup encoder and CRTC helper operations to adjust the requested mode,
* or reject it completely in which case an error will be returned to the
* application. If the new configuration after mode adjustment is identical to
* the current configuration the helper function will return without performing
* any other operation.
*
* If the adjusted mode is identical to the current mode but changes to the
* frame buffer need to be applied, the drm_crtc_helper_set_config() function
* will call the CRTC &drm_crtc_helper_funcs.mode_set_base helper operation.
*
* If the adjusted mode differs from the current mode, or if the
* ->mode_set_base() helper operation is not provided, the helper function
* performs a full mode set sequence by calling the ->prepare(), ->mode_set()
* and ->commit() CRTC and encoder helper operations, in that order.
* Alternatively it can also use the dpms and disable helper operations. For
* details see &struct drm_crtc_helper_funcs and struct
* &drm_encoder_helper_funcs.
*
* This function is deprecated. New drivers must implement atomic modeset
* support, for which this function is unsuitable. Instead drivers should use
* drm_atomic_helper_set_config().
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev;
struct drm_crtc **save_encoder_crtcs, *new_crtc;
struct drm_encoder **save_connector_encoders, *new_encoder, *encoder;
bool mode_changed = false; /* if true do a full mode set */
bool fb_changed = false; /* if true and !mode_changed just do a flip */
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
int count = 0, ro, fail = 0;
const struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_mode_set save_set;
int ret;
int i;
DRM_DEBUG_KMS("\n");
BUG_ON(!set);
BUG_ON(!set->crtc);
BUG_ON(!set->crtc->helper_private);
/* Enforce sane interface api - has been abused by the fb helper. */
BUG_ON(!set->mode && set->fb);
BUG_ON(set->fb && set->num_connectors == 0);
crtc_funcs = set->crtc->helper_private;
dev = set->crtc->dev;
WARN_ON(drm_drv_uses_atomic_modeset(dev));
if (!set->mode)
set->fb = NULL;
if (set->fb) {
DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n",
set->crtc->base.id, set->crtc->name,
set->fb->base.id,
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n",
set->crtc->base.id, set->crtc->name);
drm_crtc_helper_disable(set->crtc);
return 0;
}
drm_warn_on_modeset_not_all_locked(dev);
/*
* Allocate space for the backup of all (non-pointer) encoder and
* connector data.
*/
save_encoder_crtcs = kcalloc(dev->mode_config.num_encoder,
sizeof(struct drm_crtc *), GFP_KERNEL);
if (!save_encoder_crtcs)
return -ENOMEM;
save_connector_encoders = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_encoder *), GFP_KERNEL);
if (!save_connector_encoders) {
kfree(save_encoder_crtcs);
return -ENOMEM;
}
/*
* Copy data. Note that driver private data is not affected.
* Should anything bad happen only the expected state is
* restored, not the drivers personal bookkeeping.
*/
count = 0;
drm_for_each_encoder(encoder, dev) {
save_encoder_crtcs[count++] = encoder->crtc;
}
count = 0;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
save_connector_encoders[count++] = connector->encoder;
drm_connector_list_iter_end(&conn_iter);
save_set.crtc = set->crtc;
save_set.mode = &set->crtc->mode;
save_set.x = set->crtc->x;
save_set.y = set->crtc->y;
save_set.fb = set->crtc->primary->fb;
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
if (set->crtc->primary->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->primary->fb == NULL) {
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
mode_changed = true;
} else if (set->fb->format != set->crtc->primary->fb->format) {
mode_changed = true;
} else
fb_changed = true;
}
if (set->x != set->crtc->x || set->y != set->crtc->y)
fb_changed = true;
if (!drm_mode_equal(set->mode, &set->crtc->mode)) {
DRM_DEBUG_KMS("modes are different, full mode set\n");
drm_mode_debug_printmodeline(&set->crtc->mode);
drm_mode_debug_printmodeline(set->mode);
mode_changed = true;
}
/* take a reference on all unbound connectors in set, reuse the
* already taken reference for bound connectors
*/
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro]->encoder)
continue;
drm_connector_get(set->connectors[ro]);
}
/* a) traverse passed in connector list and get encoders for them */
count = 0;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
new_encoder = connector->encoder;
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == connector) {
if (connector_funcs->best_encoder)
new_encoder = connector_funcs->best_encoder(connector);
else
new_encoder = drm_connector_get_single_encoder(connector);
/* if we can't get an encoder for a connector
we are setting now - then fail */
if (new_encoder == NULL)
/* don't break so fail path works correct */
fail = 1;
if (connector->dpms != DRM_MODE_DPMS_ON) {
DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
mode_changed = true;
}
break;
}
}
if (new_encoder != connector->encoder) {
DRM_DEBUG_KMS("encoder changed, full mode switch\n");
mode_changed = true;
/* If the encoder is reused for another connector, then
* the appropriate crtc will be set later.
*/
if (connector->encoder)
connector->encoder->crtc = NULL;
connector->encoder = new_encoder;
}
}
drm_connector_list_iter_end(&conn_iter);
if (fail) {
ret = -EINVAL;
goto fail;
}
count = 0;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (!connector->encoder)
continue;
if (connector->encoder->crtc == set->crtc)
new_crtc = NULL;
else
new_crtc = connector->encoder->crtc;
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == connector)
new_crtc = set->crtc;
}
/* Make sure the new CRTC will work with the encoder */
if (new_crtc &&
!drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
ret = -EINVAL;
drm_connector_list_iter_end(&conn_iter);
goto fail;
}
if (new_crtc != connector->encoder->crtc) {
DRM_DEBUG_KMS("crtc changed, full mode switch\n");
mode_changed = true;
connector->encoder->crtc = new_crtc;
}
if (new_crtc) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n",
connector->base.id, connector->name,
new_crtc->base.id, new_crtc->name);
} else {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
connector->base.id, connector->name);
}
}
drm_connector_list_iter_end(&conn_iter);
/* mode_set_base is not a required function */
if (fb_changed && !crtc_funcs->mode_set_base)
mode_changed = true;
if (mode_changed) {
if (drm_helper_crtc_in_use(set->crtc)) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
set->crtc->primary->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
save_set.fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d:%s]\n",
set->crtc->base.id, set->crtc->name);
set->crtc->primary->fb = save_set.fb;
ret = -EINVAL;
goto fail;
}
DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
for (i = 0; i < set->num_connectors; i++) {
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
set->connectors[i]->name);
set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}
__drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
set->crtc->x = set->x;
set->crtc->y = set->y;
set->crtc->primary->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
set->x, set->y, save_set.fb);
if (ret != 0) {
set->crtc->x = save_set.x;
set->crtc->y = save_set.y;
set->crtc->primary->fb = save_set.fb;
goto fail;
}
}
kfree(save_connector_encoders);
kfree(save_encoder_crtcs);
return 0;
fail:
/* Restore all previous data. */
count = 0;
drm_for_each_encoder(encoder, dev) {
encoder->crtc = save_encoder_crtcs[count++];
}
count = 0;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
connector->encoder = save_connector_encoders[count++];
drm_connector_list_iter_end(&conn_iter);
/* after fail drop reference on all unbound connectors in set, let
* bound connectors keep their reference
*/
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro]->encoder)
continue;
drm_connector_put(set->connectors[ro]);
}
/* Try to restore the config */
if (mode_changed &&
!drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
save_set.y, save_set.fb))
DRM_ERROR("failed to restore config after modeset failure\n");
kfree(save_connector_encoders);
kfree(save_encoder_crtcs);
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
{
int dpms = DRM_MODE_DPMS_OFF;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct drm_device *dev = encoder->dev;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
if (connector->encoder == encoder)
if (connector->dpms < dpms)
dpms = connector->dpms;
drm_connector_list_iter_end(&conn_iter);
return dpms;
}
/* Helper which handles bridge ordering around encoder dpms */
static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
{
const struct drm_encoder_helper_funcs *encoder_funcs;
encoder_funcs = encoder->helper_private;
if (!encoder_funcs)
return;
if (encoder_funcs->dpms)
encoder_funcs->dpms(encoder, mode);
}
static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
{
int dpms = DRM_MODE_DPMS_OFF;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct drm_device *dev = crtc->dev;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
if (connector->encoder && connector->encoder->crtc == crtc)
if (connector->dpms < dpms)
dpms = connector->dpms;
drm_connector_list_iter_end(&conn_iter);
return dpms;
}
/**
* drm_helper_connector_dpms() - connector dpms helper implementation
* @connector: affected connector
* @mode: DPMS mode
*
* The drm_helper_connector_dpms() helper function implements the
* &drm_connector_funcs.dpms callback for drivers using the legacy CRTC
* helpers.
*
* This is the main helper function provided by the CRTC helper framework for
* implementing the DPMS connector attribute. It computes the new desired DPMS
* state for all encoders and CRTCs in the output mesh and calls the
* &drm_crtc_helper_funcs.dpms and &drm_encoder_helper_funcs.dpms callbacks
* provided by the driver.
*
* This function is deprecated. New drivers must implement atomic modeset
* support, where DPMS is handled in the DRM core.
*
* Returns:
* Always returns 0.
*/
int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
struct drm_encoder *encoder = connector->encoder;
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
WARN_ON(drm_drv_uses_atomic_modeset(connector->dev));
if (mode == connector->dpms)
return 0;
old_dpms = connector->dpms;
connector->dpms = mode;
if (encoder)
encoder_dpms = drm_helper_choose_encoder_dpms(encoder);
/* from off to on, do crtc then encoder */
if (mode < old_dpms) {
if (crtc) {
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
}
if (encoder)
drm_helper_encoder_dpms(encoder, encoder_dpms);
}
/* from on to off, do encoder then crtc */
if (mode > old_dpms) {
if (encoder)
drm_helper_encoder_dpms(encoder, encoder_dpms);
if (crtc) {
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
}
}
return 0;
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
/**
* drm_helper_resume_force_mode - force-restore mode setting configuration
* @dev: drm_device which should be restored
*
* Drivers which use the mode setting helpers can use this function to
* force-restore the mode setting configuration e.g. on resume or when something
* else might have trampled over the hw state (like some overzealous old BIOSen
* tended to do).
*
* This helper doesn't provide a error return value since restoring the old
* config should never fail due to resource allocation issues since the driver
* has successfully set the restored configuration already. Hence this should
* boil down to the equivalent of a few dpms on calls, which also don't provide
* an error code.
*
* Drivers where simply restoring an old configuration again might fail (e.g.
* due to slight differences in allocating shared resources when the
* configuration is restored in a different order than when userspace set it up)
* need to use their own restore logic.
*
* This function is deprecated. New drivers should implement atomic mode-
* setting and use the atomic suspend/resume helpers.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/
void drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
const struct drm_crtc_helper_funcs *crtc_funcs;
int encoder_dpms;
bool ret;
WARN_ON(drm_drv_uses_atomic_modeset(dev));
drm_modeset_lock_all(dev);
drm_for_each_crtc(crtc, dev) {
if (!crtc->enabled)
continue;
ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->primary->fb);
/* Restoring the old config should never fail! */
if (ret == false)
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
/* Turn off outputs that were already powered off */
if (drm_helper_choose_crtc_dpms(crtc)) {
drm_for_each_encoder(encoder, dev) {
if(encoder->crtc != crtc)
continue;
encoder_dpms = drm_helper_choose_encoder_dpms(
encoder);
drm_helper_encoder_dpms(encoder, encoder_dpms);
}
crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
}
}
/* disable the unused connectors while restoring the modesetting */
__drm_helper_disable_unused_functions(dev);
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
/**
* drm_helper_force_disable_all - Forcibly turn off all enabled CRTCs
* @dev: DRM device whose CRTCs to turn off
*
* Drivers may want to call this on unload to ensure that all displays are
* unlit and the GPU is in a consistent, low power state. Takes modeset locks.
*
* Note: This should only be used by non-atomic legacy drivers. For an atomic
* version look at drm_atomic_helper_shutdown().
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_helper_force_disable_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
int ret = 0;
drm_modeset_lock_all(dev);
drm_for_each_crtc(crtc, dev)
if (crtc->enabled) {
struct drm_mode_set set = {
.crtc = crtc,
};
ret = drm_mode_set_config_internal(&set);
if (ret)
goto out;
}
out:
drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_helper_force_disable_all);
| linux-master | drivers/gpu/drm/drm_crtc_helper.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/module.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
/**
* DOC: overview
*
* This library provides helper functions for gem objects backed by
* ttm.
*/
/**
* drm_gem_ttm_print_info() - Print &ttm_buffer_object info for debugfs
* @p: DRM printer
* @indent: Tab indentation level
* @gem: GEM object
*
* This function can be used as &drm_gem_object_funcs.print_info
* callback.
*/
void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *gem)
{
static const char * const plname[] = {
[ TTM_PL_SYSTEM ] = "system",
[ TTM_PL_TT ] = "tt",
[ TTM_PL_VRAM ] = "vram",
[ TTM_PL_PRIV ] = "priv",
[ 16 ] = "cached",
[ 17 ] = "uncached",
[ 18 ] = "wc",
[ 19 ] = "contig",
[ 21 ] = "pinned", /* NO_EVICT */
[ 22 ] = "topdown",
};
const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
drm_printf_indent(p, indent, "placement=");
drm_print_bits(p, bo->resource->placement, plname, ARRAY_SIZE(plname));
drm_printf(p, "\n");
if (bo->resource->bus.is_iomem)
drm_printf_indent(p, indent, "bus.offset=%lx\n",
(unsigned long)bo->resource->bus.offset);
}
EXPORT_SYMBOL(drm_gem_ttm_print_info);
/**
* drm_gem_ttm_vmap() - vmap &ttm_buffer_object
* @gem: GEM object.
* @map: [out] returns the dma-buf mapping.
*
* Maps a GEM object with ttm_bo_vmap(). This function can be used as
* &drm_gem_object_funcs.vmap callback.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_gem_ttm_vmap(struct drm_gem_object *gem,
struct iosys_map *map)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
return ttm_bo_vmap(bo, map);
}
EXPORT_SYMBOL(drm_gem_ttm_vmap);
/**
* drm_gem_ttm_vunmap() - vunmap &ttm_buffer_object
* @gem: GEM object.
* @map: dma-buf mapping.
*
* Unmaps a GEM object with ttm_bo_vunmap(). This function can be used as
* &drm_gem_object_funcs.vmap callback.
*/
void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
struct iosys_map *map)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
ttm_bo_vunmap(bo, map);
}
EXPORT_SYMBOL(drm_gem_ttm_vunmap);
/**
* drm_gem_ttm_mmap() - mmap &ttm_buffer_object
* @gem: GEM object.
* @vma: vm area.
*
* This function can be used as &drm_gem_object_funcs.mmap
* callback.
*/
int drm_gem_ttm_mmap(struct drm_gem_object *gem,
struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
int ret;
ret = ttm_bo_mmap_obj(vma, bo);
if (ret < 0)
return ret;
/*
* ttm has its own object refcounting, so drop gem reference
* to avoid double accounting counting.
*/
drm_gem_object_put(gem);
return 0;
}
EXPORT_SYMBOL(drm_gem_ttm_mmap);
/**
* drm_gem_ttm_dumb_map_offset() - Implements struct &drm_driver.dumb_map_offset
* @file: DRM file pointer.
* @dev: DRM device.
* @handle: GEM handle
* @offset: Returns the mapping's memory offset on success
*
* Provides an implementation of struct &drm_driver.dumb_map_offset for
* TTM-based GEM drivers. TTM allocates the offset internally and
* drm_gem_ttm_dumb_map_offset() returns it for dumb-buffer implementations.
*
* See struct &drm_driver.dumb_map_offset.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_gem_ttm_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
struct drm_gem_object *gem;
gem = drm_gem_object_lookup(file, handle);
if (!gem)
return -ENOENT;
*offset = drm_vma_node_offset_addr(&gem->vma_node);
drm_gem_object_put(gem);
return 0;
}
EXPORT_SYMBOL(drm_gem_ttm_dumb_map_offset);
MODULE_DESCRIPTION("DRM gem ttm helpers");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/drm_gem_ttm_helper.c |
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_modeset_lock.h>
#include <drm/drm_print.h>
/**
* DOC: kms locking
*
* As KMS moves toward more fine grained locking, and atomic ioctl where
* userspace can indirectly control locking order, it becomes necessary
* to use &ww_mutex and acquire-contexts to avoid deadlocks. But because
* the locking is more distributed around the driver code, we want a bit
* of extra utility/tracking out of our acquire-ctx. This is provided
* by &struct drm_modeset_lock and &struct drm_modeset_acquire_ctx.
*
* For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.rst
*
* The basic usage pattern is to::
*
* drm_modeset_acquire_init(ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
* retry:
* foreach (lock in random_ordered_set_of_locks) {
* ret = drm_modeset_lock(lock, ctx)
* if (ret == -EDEADLK) {
* ret = drm_modeset_backoff(ctx);
* if (!ret)
* goto retry;
* }
* if (ret)
* goto out;
* }
* ... do stuff ...
* out:
* drm_modeset_drop_locks(ctx);
* drm_modeset_acquire_fini(ctx);
*
* For convenience this control flow is implemented in
* DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() for the case
* where all modeset locks need to be taken through drm_modeset_lock_all_ctx().
*
* If all that is needed is a single modeset lock, then the &struct
* drm_modeset_acquire_ctx is not needed and the locking can be simplified
* by passing a NULL instead of ctx in the drm_modeset_lock() call or
* calling drm_modeset_lock_single_interruptible(). To unlock afterwards
* call drm_modeset_unlock().
*
* On top of these per-object locks using &ww_mutex there's also an overall
* &drm_mode_config.mutex, for protecting everything else. Mostly this means
* probe state of connectors, and preventing hotplug add/removal of connectors.
*
* Finally there's a bunch of dedicated locks to protect drm core internal
* lists and lookup data structures.
*/
static DEFINE_WW_CLASS(crtc_ww_class);
#if IS_ENABLED(CONFIG_DRM_DEBUG_MODESET_LOCK)
static noinline depot_stack_handle_t __drm_stack_depot_save(void)
{
unsigned long entries[8];
unsigned int n;
n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
}
static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
{
struct drm_printer p = drm_debug_printer("drm_modeset_lock");
unsigned long *entries;
unsigned int nr_entries;
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
if (!buf)
return;
nr_entries = stack_depot_fetch(stack_depot, &entries);
stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 2);
drm_printf(&p, "attempting to lock a contended lock without backoff:\n%s", buf);
kfree(buf);
}
static void __drm_stack_depot_init(void)
{
stack_depot_init();
}
#else /* CONFIG_DRM_DEBUG_MODESET_LOCK */
static depot_stack_handle_t __drm_stack_depot_save(void)
{
return 0;
}
static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
{
}
static void __drm_stack_depot_init(void)
{
}
#endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: DRM device
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented. Locks must be dropped by calling the
* drm_modeset_unlock_all() function.
*
* This function is deprecated. It allocates a lock acquisition context and
* stores it in &drm_device.mode_config. This facilitate conversion of
* existing code because it removes the need to manually deal with the
* acquisition context, but it is also brittle because the context is global
* and care must be taken not to nest calls. New code should use the
* drm_modeset_lock_all_ctx() function and pass in the context explicitly.
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
if (WARN_ON(!ctx))
return;
mutex_lock(&config->mutex);
drm_modeset_acquire_init(ctx, 0);
retry:
ret = drm_modeset_lock_all_ctx(dev, ctx);
if (ret < 0) {
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
drm_modeset_acquire_fini(ctx);
kfree(ctx);
return;
}
ww_acquire_done(&ctx->ww_ctx);
WARN_ON(config->acquire_ctx);
/*
* We hold the locks now, so it is safe to stash the acquisition
* context for drm_modeset_unlock_all().
*/
config->acquire_ctx = ctx;
drm_warn_on_modeset_not_all_locked(dev);
}
EXPORT_SYMBOL(drm_modeset_lock_all);
/**
* drm_modeset_unlock_all - drop all modeset locks
* @dev: DRM device
*
* This function drops all modeset locks taken by a previous call to the
* drm_modeset_lock_all() function.
*
* This function is deprecated. It uses the lock acquisition context stored
* in &drm_device.mode_config. This facilitates conversion of existing
* code because it removes the need to manually deal with the acquisition
* context, but it is also brittle because the context is global and care must
* be taken not to nest calls. New code should pass the acquisition context
* directly to the drm_modeset_drop_locks() function.
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
if (WARN_ON(!ctx))
return;
config->acquire_ctx = NULL;
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
kfree(ctx);
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_modeset_unlock_all);
/**
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
* @dev: device
*
* Useful as a debug assert.
*/
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
{
struct drm_crtc *crtc;
/* Locking is currently fubar in the panic handler. */
if (oops_in_progress)
return;
drm_for_each_crtc(crtc, dev)
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
}
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
/**
* drm_modeset_acquire_init - initialize acquire context
* @ctx: the acquire context
* @flags: 0 or %DRM_MODESET_ACQUIRE_INTERRUPTIBLE
*
* When passing %DRM_MODESET_ACQUIRE_INTERRUPTIBLE to @flags,
* all calls to drm_modeset_lock() will perform an interruptible
* wait.
*/
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
uint32_t flags)
{
memset(ctx, 0, sizeof(*ctx));
ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
INIT_LIST_HEAD(&ctx->locked);
if (flags & DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
ctx->interruptible = true;
}
EXPORT_SYMBOL(drm_modeset_acquire_init);
/**
* drm_modeset_acquire_fini - cleanup acquire context
* @ctx: the acquire context
*/
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
{
ww_acquire_fini(&ctx->ww_ctx);
}
EXPORT_SYMBOL(drm_modeset_acquire_fini);
/**
* drm_modeset_drop_locks - drop all locks
* @ctx: the acquire context
*
* Drop all locks currently held against this acquire context.
*/
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
{
if (WARN_ON(ctx->contended))
__drm_stack_depot_print(ctx->stack_depot);
while (!list_empty(&ctx->locked)) {
struct drm_modeset_lock *lock;
lock = list_first_entry(&ctx->locked,
struct drm_modeset_lock, head);
drm_modeset_unlock(lock);
}
}
EXPORT_SYMBOL(drm_modeset_drop_locks);
static inline int modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx,
bool interruptible, bool slow)
{
int ret;
if (WARN_ON(ctx->contended))
__drm_stack_depot_print(ctx->stack_depot);
if (ctx->trylock_only) {
lockdep_assert_held(&ctx->ww_ctx);
if (!ww_mutex_trylock(&lock->mutex, NULL))
return -EBUSY;
else
return 0;
} else if (interruptible && slow) {
ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
} else if (interruptible) {
ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
} else if (slow) {
ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
ret = 0;
} else {
ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
}
if (!ret) {
WARN_ON(!list_empty(&lock->head));
list_add(&lock->head, &ctx->locked);
} else if (ret == -EALREADY) {
/* we already hold the lock.. this is fine. For atomic
* we will need to be able to drm_modeset_lock() things
* without having to keep track of what is already locked
* or not.
*/
ret = 0;
} else if (ret == -EDEADLK) {
ctx->contended = lock;
ctx->stack_depot = __drm_stack_depot_save();
}
return ret;
}
/**
* drm_modeset_backoff - deadlock avoidance backoff
* @ctx: the acquire context
*
* If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
* you must call this function to drop all currently held locks and
* block until the contended lock becomes available.
*
* This function returns 0 on success, or -ERESTARTSYS if this context
* is initialized with %DRM_MODESET_ACQUIRE_INTERRUPTIBLE and the
* wait has been interrupted.
*/
int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
{
struct drm_modeset_lock *contended = ctx->contended;
ctx->contended = NULL;
ctx->stack_depot = 0;
if (WARN_ON(!contended))
return 0;
drm_modeset_drop_locks(ctx);
return modeset_lock(contended, ctx, ctx->interruptible, true);
}
EXPORT_SYMBOL(drm_modeset_backoff);
/**
* drm_modeset_lock_init - initialize lock
* @lock: lock to init
*/
void drm_modeset_lock_init(struct drm_modeset_lock *lock)
{
ww_mutex_init(&lock->mutex, &crtc_ww_class);
INIT_LIST_HEAD(&lock->head);
__drm_stack_depot_init();
}
EXPORT_SYMBOL(drm_modeset_lock_init);
/**
* drm_modeset_lock - take modeset lock
* @lock: lock to take
* @ctx: acquire ctx
*
* If @ctx is not NULL, then its ww acquire context is used and the
* lock will be tracked by the context and can be released by calling
* drm_modeset_drop_locks(). If -EDEADLK is returned, this means a
* deadlock scenario has been detected and it is an error to attempt
* to take any more locks without first calling drm_modeset_backoff().
*
* If the @ctx is not NULL and initialized with
* %DRM_MODESET_ACQUIRE_INTERRUPTIBLE, this function will fail with
* -ERESTARTSYS when interrupted.
*
* If @ctx is NULL then the function call behaves like a normal,
* uninterruptible non-nesting mutex_lock() call.
*/
int drm_modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx)
{
if (ctx)
return modeset_lock(lock, ctx, ctx->interruptible, false);
ww_mutex_lock(&lock->mutex, NULL);
return 0;
}
EXPORT_SYMBOL(drm_modeset_lock);
/**
* drm_modeset_lock_single_interruptible - take a single modeset lock
* @lock: lock to take
*
* This function behaves as drm_modeset_lock() with a NULL context,
* but performs interruptible waits.
*
* This function returns 0 on success, or -ERESTARTSYS when interrupted.
*/
int drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock)
{
return ww_mutex_lock_interruptible(&lock->mutex, NULL);
}
EXPORT_SYMBOL(drm_modeset_lock_single_interruptible);
/**
* drm_modeset_unlock - drop modeset lock
* @lock: lock to release
*/
void drm_modeset_unlock(struct drm_modeset_lock *lock)
{
list_del_init(&lock->head);
ww_mutex_unlock(&lock->mutex);
}
EXPORT_SYMBOL(drm_modeset_unlock);
/**
* drm_modeset_lock_all_ctx - take all modeset locks
* @dev: DRM device
* @ctx: lock acquisition context
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented.
*
* Unlike drm_modeset_lock_all(), it doesn't take the &drm_mode_config.mutex
* since that lock isn't required for modeset state changes. Callers which
* need to grab that lock too need to do so outside of the acquire context
* @ctx.
*
* Locks acquired with this function should be released by calling the
* drm_modeset_drop_locks() function on @ctx.
*
* See also: DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END()
*
* Returns: 0 on success or a negative error-code on failure.
*/
int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_private_obj *privobj;
struct drm_crtc *crtc;
struct drm_plane *plane;
int ret;
ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
if (ret)
return ret;
drm_for_each_crtc(crtc, dev) {
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
return ret;
}
drm_for_each_plane(plane, dev) {
ret = drm_modeset_lock(&plane->mutex, ctx);
if (ret)
return ret;
}
drm_for_each_privobj(privobj, dev) {
ret = drm_modeset_lock(&privobj->lock, ctx);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(drm_modeset_lock_all_ctx);
| linux-master | drivers/gpu/drm/drm_modeset_lock.c |
/*
* drm_irq.c IRQ and vblank support
*
* \author Rickard E. (Rik) Faith <[email protected]>
* \author Gareth Hughes <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/kthread.h>
#include <linux/moduleparam.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "drm_internal.h"
#include "drm_trace.h"
/**
* DOC: vblank handling
*
* From the computer's perspective, every time the monitor displays
* a new frame the scanout engine has "scanned out" the display image
* from top to bottom, one row of pixels at a time. The current row
* of pixels is referred to as the current scanline.
*
* In addition to the display's visible area, there's usually a couple of
* extra scanlines which aren't actually displayed on the screen.
* These extra scanlines don't contain image data and are occasionally used
* for features like audio and infoframes. The region made up of these
* scanlines is referred to as the vertical blanking region, or vblank for
* short.
*
* For historical reference, the vertical blanking period was designed to
* give the electron gun (on CRTs) enough time to move back to the top of
* the screen to start scanning out the next frame. Similar for horizontal
* blanking periods. They were designed to give the electron gun enough
* time to move back to the other side of the screen to start scanning the
* next scanline.
*
* ::
*
*
* physical → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽
* top of | |
* display | |
* | New frame |
* | |
* |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓|
* |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| ← Scanline,
* |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| updates the
* | | frame as it
* | | travels down
* | | ("scan out")
* | Old frame |
* | |
* | |
* | |
* | | physical
* | | bottom of
* vertical |⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽| ← display
* blanking ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
* region → ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
* ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
* start of → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽
* new frame
*
* "Physical top of display" is the reference point for the high-precision/
* corrected timestamp.
*
* On a lot of display hardware, programming needs to take effect during the
* vertical blanking period so that settings like gamma, the image buffer
* buffer to be scanned out, etc. can safely be changed without showing
* any visual artifacts on the screen. In some unforgiving hardware, some of
* this programming has to both start and end in the same vblank. To help
* with the timing of the hardware programming, an interrupt is usually
* available to notify the driver when it can start the updating of registers.
* The interrupt is in this context named the vblank interrupt.
*
* The vblank interrupt may be fired at different points depending on the
* hardware. Some hardware implementations will fire the interrupt when the
* new frame start, other implementations will fire the interrupt at different
* points in time.
*
* Vertical blanking plays a major role in graphics rendering. To achieve
* tear-free display, users must synchronize page flips and/or rendering to
* vertical blanking. The DRM API offers ioctls to perform page flips
* synchronized to vertical blanking and wait for vertical blanking.
*
* The DRM core handles most of the vertical blanking management logic, which
* involves filtering out spurious interrupts, keeping race-free blanking
* counters, coping with counter wrap-around and resets and keeping use counts.
* It relies on the driver to generate vertical blanking interrupts and
* optionally provide a hardware vertical blanking counter.
*
* Drivers must initialize the vertical blanking handling core with a call to
* drm_vblank_init(). Minimally, a driver needs to implement
* &drm_crtc_funcs.enable_vblank and &drm_crtc_funcs.disable_vblank plus call
* drm_crtc_handle_vblank() in its vblank interrupt handler for working vblank
* support.
*
* Vertical blanking interrupts can be enabled by the DRM core or by drivers
* themselves (for instance to handle page flipping operations). The DRM core
* maintains a vertical blanking use count to ensure that the interrupts are not
* disabled while a user still needs them. To increment the use count, drivers
* call drm_crtc_vblank_get() and release the vblank reference again with
* drm_crtc_vblank_put(). In between these two calls vblank interrupts are
* guaranteed to be enabled.
*
* On many hardware disabling the vblank interrupt cannot be done in a race-free
* manner, see &drm_driver.vblank_disable_immediate and
* &drm_driver.max_vblank_count. In that case the vblank core only disables the
* vblanks after a timer has expired, which can be configured through the
* ``vblankoffdelay`` module parameter.
*
* Drivers for hardware without support for vertical-blanking interrupts
* must not call drm_vblank_init(). For such drivers, atomic helpers will
* automatically generate fake vblank events as part of the display update.
* This functionality also can be controlled by the driver by enabling and
* disabling struct drm_crtc_state.no_vblank.
*/
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
*/
#define DRM_TIMESTAMP_MAXRETRIES 3
/* Threshold in nanoseconds for detection of redundant
* vblank irq in drm_handle_vblank(). 1 msec should be ok.
*/
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
ktime_t *tvblank, bool in_vblank_irq);
static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
static void store_vblank(struct drm_device *dev, unsigned int pipe,
u32 vblank_count_inc,
ktime_t t_vblank, u32 last)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
assert_spin_locked(&dev->vblank_time_lock);
vblank->last = last;
write_seqlock(&vblank->seqlock);
vblank->time = t_vblank;
atomic64_add(vblank_count_inc, &vblank->count);
write_sequnlock(&vblank->seqlock);
}
static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
return vblank->max_vblank_count ?: dev->max_vblank_count;
}
/*
* "No hw counter" fallback implementation of .get_vblank_counter() hook,
* if there is no usable hardware frame counter available.
*/
static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
{
drm_WARN_ON_ONCE(dev, drm_max_vblank_count(dev, pipe) != 0);
return 0;
}
static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
if (drm_WARN_ON(dev, !crtc))
return 0;
if (crtc->funcs->get_vblank_counter)
return crtc->funcs->get_vblank_counter(crtc);
}
#ifdef CONFIG_DRM_LEGACY
else if (dev->driver->get_vblank_counter) {
return dev->driver->get_vblank_counter(dev, pipe);
}
#endif
return drm_vblank_no_hw_counter(dev, pipe);
}
/*
* Reset the stored timestamp for the current vblank count to correspond
* to the last vblank occurred.
*
* Only to be called from drm_crtc_vblank_on().
*
* Note: caller must hold &drm_device.vbl_lock since this reads & writes
* device vblank fields.
*/
static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe)
{
u32 cur_vblank;
bool rc;
ktime_t t_vblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
spin_lock(&dev->vblank_time_lock);
/*
* sample the current counter to avoid random jumps
* when drm_vblank_enable() applies the diff
*/
do {
cur_vblank = __get_vblank_counter(dev, pipe);
rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, false);
} while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
/*
* Only reinitialize corresponding vblank timestamp if high-precision query
* available and didn't fail. Otherwise reinitialize delayed at next vblank
* interrupt and assign 0 for now, to mark the vblanktimestamp as invalid.
*/
if (!rc)
t_vblank = 0;
/*
* +1 to make sure user will never see the same
* vblank counter value before and after a modeset
*/
store_vblank(dev, pipe, 1, t_vblank, cur_vblank);
spin_unlock(&dev->vblank_time_lock);
}
/*
* Call back into the driver to update the appropriate vblank counter
* (specified by @pipe). Deal with wraparound, if it occurred, and
* update the last read value so we can deal with wraparound on the next
* call if necessary.
*
* Only necessary when going from off->on, to account for frames we
* didn't get an interrupt for.
*
* Note: caller must hold &drm_device.vbl_lock since this reads & writes
* device vblank fields.
*/
static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 cur_vblank, diff;
bool rc;
ktime_t t_vblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
int framedur_ns = vblank->framedur_ns;
u32 max_vblank_count = drm_max_vblank_count(dev, pipe);
/*
* Interrupts were disabled prior to this call, so deal with counter
* wrap if needed.
* NOTE! It's possible we lost a full dev->max_vblank_count + 1 events
* here if the register is small or we had vblank interrupts off for
* a long time.
*
* We repeat the hardware vblank counter & timestamp query until
* we get consistent results. This to prevent races between gpu
* updating its hardware counter while we are retrieving the
* corresponding vblank timestamp.
*/
do {
cur_vblank = __get_vblank_counter(dev, pipe);
rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, in_vblank_irq);
} while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
if (max_vblank_count) {
/* trust the hw counter when it's around */
diff = (cur_vblank - vblank->last) & max_vblank_count;
} else if (rc && framedur_ns) {
u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
/*
* Figure out how many vblanks we've missed based
* on the difference in the timestamps and the
* frame/field duration.
*/
drm_dbg_vbl(dev, "crtc %u: Calculating number of vblanks."
" diff_ns = %lld, framedur_ns = %d)\n",
pipe, (long long)diff_ns, framedur_ns);
diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns);
if (diff == 0 && in_vblank_irq)
drm_dbg_vbl(dev, "crtc %u: Redundant vblirq ignored\n",
pipe);
} else {
/* some kind of default for drivers w/o accurate vbl timestamping */
diff = in_vblank_irq ? 1 : 0;
}
/*
* Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
* interval? If so then vblank irqs keep running and it will likely
* happen that the hardware vblank counter is not trustworthy as it
* might reset at some point in that interval and vblank timestamps
* are not trustworthy either in that interval. Iow. this can result
* in a bogus diff >> 1 which must be avoided as it would cause
* random large forward jumps of the software vblank counter.
*/
if (diff > 1 && (vblank->inmodeset & 0x2)) {
drm_dbg_vbl(dev,
"clamping vblank bump to 1 on crtc %u: diffr=%u"
" due to pre-modeset.\n", pipe, diff);
diff = 1;
}
drm_dbg_vbl(dev, "updating vblank count on crtc %u:"
" current=%llu, diff=%u, hw=%u hw_last=%u\n",
pipe, (unsigned long long)atomic64_read(&vblank->count),
diff, cur_vblank, vblank->last);
if (diff == 0) {
drm_WARN_ON_ONCE(dev, cur_vblank != vblank->last);
return;
}
/*
* Only reinitialize corresponding vblank timestamp if high-precision query
* available and didn't fail, or we were called from the vblank interrupt.
* Otherwise reinitialize delayed at next vblank interrupt and assign 0
* for now, to mark the vblanktimestamp as invalid.
*/
if (!rc && !in_vblank_irq)
t_vblank = 0;
store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
}
u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u64 count;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return 0;
count = atomic64_read(&vblank->count);
/*
* This read barrier corresponds to the implicit write barrier of the
* write seqlock in store_vblank(). Note that this is the only place
* where we need an explicit barrier, since all other access goes
* through drm_vblank_count_and_time(), which already has the required
* read barrier curtesy of the read seqlock.
*/
smp_rmb();
return count;
}
/**
* drm_crtc_accurate_vblank_count - retrieve the master vblank counter
* @crtc: which counter to retrieve
*
* This function is similar to drm_crtc_vblank_count() but this function
* interpolates to handle a race with vblank interrupts using the high precision
* timestamping support.
*
* This is mostly useful for hardware that can obtain the scanout position, but
* doesn't have a hardware frame counter.
*/
u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
u64 vblank;
unsigned long flags;
drm_WARN_ONCE(dev, drm_debug_enabled(DRM_UT_VBL) &&
!crtc->funcs->get_vblank_timestamp,
"This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
drm_update_vblank_count(dev, pipe, false);
vblank = drm_vblank_count(dev, pipe);
spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
return vblank;
}
EXPORT_SYMBOL(drm_crtc_accurate_vblank_count);
static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
{
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
if (drm_WARN_ON(dev, !crtc))
return;
if (crtc->funcs->disable_vblank)
crtc->funcs->disable_vblank(crtc);
}
#ifdef CONFIG_DRM_LEGACY
else {
dev->driver->disable_vblank(dev, pipe);
}
#endif
}
/*
* Disable vblank irq's on crtc, make sure that last vblank count
* of hardware and corresponding consistent software vblank counter
* are preserved, even if there are any spurious vblank irq's after
* disable.
*/
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
assert_spin_locked(&dev->vbl_lock);
/* Prevent vblank irq processing while disabling vblank irqs,
* so no updates of timestamps or count can happen after we've
* disabled. Needed to prevent races in case of delayed irq's.
*/
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
/*
* Update vblank count and disable vblank interrupts only if the
* interrupts were enabled. This avoids calling the ->disable_vblank()
* operation in atomic context with the hardware potentially runtime
* suspended.
*/
if (!vblank->enabled)
goto out;
/*
* Update the count and timestamp to maintain the
* appearance that the counter has been ticking all along until
* this time. This makes the count account for the entire time
* between drm_crtc_vblank_on() and drm_crtc_vblank_off().
*/
drm_update_vblank_count(dev, pipe, false);
__disable_vblank(dev, pipe);
vblank->enabled = false;
out:
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
static void vblank_disable_fn(struct timer_list *t)
{
struct drm_vblank_crtc *vblank = from_timer(vblank, t, disable_timer);
struct drm_device *dev = vblank->dev;
unsigned int pipe = vblank->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
drm_dbg_core(dev, "disabling vblank on crtc %u\n", pipe);
drm_vblank_disable_and_save(dev, pipe);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
static void drm_vblank_init_release(struct drm_device *dev, void *ptr)
{
struct drm_vblank_crtc *vblank = ptr;
drm_WARN_ON(dev, READ_ONCE(vblank->enabled) &&
drm_core_check_feature(dev, DRIVER_MODESET));
drm_vblank_destroy_worker(vblank);
del_timer_sync(&vblank->disable_timer);
}
/**
* drm_vblank_init - initialize vblank support
* @dev: DRM device
* @num_crtcs: number of CRTCs supported by @dev
*
* This function initializes vblank support for @num_crtcs display pipelines.
* Cleanup is handled automatically through a cleanup function added with
* drmm_add_action_or_reset().
*
* Returns:
* Zero on success or a negative error code on failure.
*/
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
{
int ret;
unsigned int i;
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->vblank_time_lock);
dev->vblank = drmm_kcalloc(dev, num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
if (!dev->vblank)
return -ENOMEM;
dev->num_crtcs = num_crtcs;
for (i = 0; i < num_crtcs; i++) {
struct drm_vblank_crtc *vblank = &dev->vblank[i];
vblank->dev = dev;
vblank->pipe = i;
init_waitqueue_head(&vblank->queue);
timer_setup(&vblank->disable_timer, vblank_disable_fn, 0);
seqlock_init(&vblank->seqlock);
ret = drmm_add_action_or_reset(dev, drm_vblank_init_release,
vblank);
if (ret)
return ret;
ret = drm_vblank_worker_init(vblank);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(drm_vblank_init);
/**
* drm_dev_has_vblank - test if vblanking has been initialized for
* a device
* @dev: the device
*
* Drivers may call this function to test if vblank support is
* initialized for a device. For most hardware this means that vblanking
* can also be enabled.
*
* Atomic helpers use this function to initialize
* &drm_crtc_state.no_vblank. See also drm_atomic_helper_check_modeset().
*
* Returns:
* True if vblanking has been initialized for the given device, false
* otherwise.
*/
bool drm_dev_has_vblank(const struct drm_device *dev)
{
return dev->num_crtcs != 0;
}
EXPORT_SYMBOL(drm_dev_has_vblank);
/**
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
* @crtc: which CRTC's vblank waitqueue to retrieve
*
* This function returns a pointer to the vblank waitqueue for the CRTC.
* Drivers can use this to implement vblank waits using wait_event() and related
* functions.
*/
wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
{
return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
}
EXPORT_SYMBOL(drm_crtc_vblank_waitqueue);
/**
* drm_calc_timestamping_constants - calculate vblank timestamp constants
* @crtc: drm_crtc whose timestamp constants should be updated.
* @mode: display mode containing the scanout timings
*
* Calculate and store various constants which are later needed by vblank and
* swap-completion timestamping, e.g, by
* drm_crtc_vblank_helper_get_vblank_timestamp(). They are derived from
* CRTC's true scanout timing, so they take things like panel scaling or
* other adjustments into account.
*/
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int linedur_ns = 0, framedur_ns = 0;
int dotclock = mode->crtc_clock;
if (!drm_dev_has_vblank(dev))
return;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
/* Valid dotclock? */
if (dotclock > 0) {
int frame_size = mode->crtc_htotal * mode->crtc_vtotal;
/*
* Convert scanline length in pixels and video
* dot clock to line duration and frame duration
* in nanoseconds:
*/
linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, dotclock);
framedur_ns = div_u64((u64) frame_size * 1000000, dotclock);
/*
* Fields of interlaced scanout modes are only half a frame duration.
*/
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
framedur_ns /= 2;
} else {
drm_err(dev, "crtc %u: Can't calculate constants, dotclock = 0!\n",
crtc->base.id);
}
vblank->linedur_ns = linedur_ns;
vblank->framedur_ns = framedur_ns;
drm_mode_copy(&vblank->hwmode, mode);
drm_dbg_core(dev,
"crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
crtc->base.id, mode->crtc_htotal,
mode->crtc_vtotal, mode->crtc_vdisplay);
drm_dbg_core(dev, "crtc %u: clock %d kHz framedur %d linedur %d\n",
crtc->base.id, dotclock, framedur_ns, linedur_ns);
}
EXPORT_SYMBOL(drm_calc_timestamping_constants);
/**
* drm_crtc_vblank_helper_get_vblank_timestamp_internal - precise vblank
* timestamp helper
* @crtc: CRTC whose vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
* @vblank_time: Pointer to time which should receive the timestamp
* @in_vblank_irq:
* True when called from drm_crtc_handle_vblank(). Some drivers
* need to apply some workarounds for gpu-specific vblank irq quirks
* if flag is set.
* @get_scanout_position:
* Callback function to retrieve the scanout position. See
* @struct drm_crtc_helper_funcs.get_scanout_position.
*
* Implements calculation of exact vblank timestamps from given drm_display_mode
* timings and current video scanout position of a CRTC.
*
* The current implementation only handles standard video modes. For double scan
* and interlaced modes the driver is supposed to adjust the hardware mode
* (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to
* match the scanout position reported.
*
* Note that atomic drivers must call drm_calc_timestamping_constants() before
* enabling a CRTC. The atomic helpers already take care of that in
* drm_atomic_helper_calc_timestamping_constants().
*
* Returns:
*
* Returns true on success, and false on failure, i.e. when no accurate
* timestamp could be acquired.
*/
bool
drm_crtc_vblank_helper_get_vblank_timestamp_internal(
struct drm_crtc *crtc, int *max_error, ktime_t *vblank_time,
bool in_vblank_irq,
drm_vblank_get_scanout_position_func get_scanout_position)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct timespec64 ts_etime, ts_vblank_time;
ktime_t stime, etime;
bool vbl_status;
const struct drm_display_mode *mode;
int vpos, hpos, i;
int delta_ns, duration_ns;
if (pipe >= dev->num_crtcs) {
drm_err(dev, "Invalid crtc %u\n", pipe);
return false;
}
/* Scanout position query not supported? Should not happen. */
if (!get_scanout_position) {
drm_err(dev, "Called from CRTC w/o get_scanout_position()!?\n");
return false;
}
if (drm_drv_uses_atomic_modeset(dev))
mode = &vblank->hwmode;
else
mode = &crtc->hwmode;
/* If mode timing undefined, just return as no-op:
* Happens during initial modesetting of a crtc.
*/
if (mode->crtc_clock == 0) {
drm_dbg_core(dev, "crtc %u: Noop due to uninitialized mode.\n",
pipe);
drm_WARN_ON_ONCE(dev, drm_drv_uses_atomic_modeset(dev));
return false;
}
/* Get current scanout position with system timestamp.
* Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
* if single query takes longer than max_error nanoseconds.
*
* This guarantees a tight bound on maximum error if
* code gets preempted or delayed for some reason.
*/
for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
/*
* Get vertical and horizontal scanout position vpos, hpos,
* and bounding timestamps stime, etime, pre/post query.
*/
vbl_status = get_scanout_position(crtc, in_vblank_irq,
&vpos, &hpos,
&stime, &etime,
mode);
/* Return as no-op if scanout query unsupported or failed. */
if (!vbl_status) {
drm_dbg_core(dev,
"crtc %u : scanoutpos query failed.\n",
pipe);
return false;
}
/* Compute uncertainty in timestamp of scanout position query. */
duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
/* Accept result with < max_error nsecs timing uncertainty. */
if (duration_ns <= *max_error)
break;
}
/* Noisy system timing? */
if (i == DRM_TIMESTAMP_MAXRETRIES) {
drm_dbg_core(dev,
"crtc %u: Noisy timestamp %d us > %d us [%d reps].\n",
pipe, duration_ns / 1000, *max_error / 1000, i);
}
/* Return upper bound of timestamp precision error. */
*max_error = duration_ns;
/* Convert scanout position into elapsed time at raw_time query
* since start of scanout at first display scanline. delta_ns
* can be negative if start of scanout hasn't happened yet.
*/
delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos),
mode->crtc_clock);
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
*vblank_time = ktime_sub_ns(etime, delta_ns);
if (!drm_debug_enabled(DRM_UT_VBL))
return true;
ts_etime = ktime_to_timespec64(etime);
ts_vblank_time = ktime_to_timespec64(*vblank_time);
drm_dbg_vbl(dev,
"crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n",
pipe, hpos, vpos,
(u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000,
(u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000,
duration_ns / 1000, i);
return true;
}
EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal);
/**
* drm_crtc_vblank_helper_get_vblank_timestamp - precise vblank timestamp
* helper
* @crtc: CRTC whose vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
* @vblank_time: Pointer to time which should receive the timestamp
* @in_vblank_irq:
* True when called from drm_crtc_handle_vblank(). Some drivers
* need to apply some workarounds for gpu-specific vblank irq quirks
* if flag is set.
*
* Implements calculation of exact vblank timestamps from given drm_display_mode
* timings and current video scanout position of a CRTC. This can be directly
* used as the &drm_crtc_funcs.get_vblank_timestamp implementation of a kms
* driver if &drm_crtc_helper_funcs.get_scanout_position is implemented.
*
* The current implementation only handles standard video modes. For double scan
* and interlaced modes the driver is supposed to adjust the hardware mode
* (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to
* match the scanout position reported.
*
* Note that atomic drivers must call drm_calc_timestamping_constants() before
* enabling a CRTC. The atomic helpers already take care of that in
* drm_atomic_helper_calc_timestamping_constants().
*
* Returns:
*
* Returns true on success, and false on failure, i.e. when no accurate
* timestamp could be acquired.
*/
bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc,
int *max_error,
ktime_t *vblank_time,
bool in_vblank_irq)
{
return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
crtc, max_error, vblank_time, in_vblank_irq,
crtc->helper_private->get_scanout_position);
}
EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp);
/**
* drm_crtc_get_last_vbltimestamp - retrieve raw timestamp for the most
* recent vblank interval
* @crtc: CRTC whose vblank timestamp to retrieve
* @tvblank: Pointer to target time which should receive the timestamp
* @in_vblank_irq:
* True when called from drm_crtc_handle_vblank(). Some drivers
* need to apply some workarounds for gpu-specific vblank irq quirks
* if flag is set.
*
* Fetches the system timestamp corresponding to the time of the most recent
* vblank interval on specified CRTC. May call into kms-driver to
* compute the timestamp with a high-precision GPU specific method.
*
* Returns zero if timestamp originates from uncorrected do_gettimeofday()
* call, i.e., it isn't very precisely locked to the true vblank.
*
* Returns:
* True if timestamp is considered to be very precise, false otherwise.
*/
static bool
drm_crtc_get_last_vbltimestamp(struct drm_crtc *crtc, ktime_t *tvblank,
bool in_vblank_irq)
{
bool ret = false;
/* Define requested maximum error on timestamps (nanoseconds). */
int max_error = (int) drm_timestamp_precision * 1000;
/* Query driver if possible and precision timestamping enabled. */
if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) {
ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error,
tvblank, in_vblank_irq);
}
/* GPU high precision timestamp query unsupported or failed.
* Return current monotonic/gettimeofday timestamp as best estimate.
*/
if (!ret)
*tvblank = ktime_get();
return ret;
}
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
ktime_t *tvblank, bool in_vblank_irq)
{
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
return drm_crtc_get_last_vbltimestamp(crtc, tvblank, in_vblank_irq);
}
/**
* drm_crtc_vblank_count - retrieve "cooked" vblank counter value
* @crtc: which counter to retrieve
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity. Note that this timer isn't correct against a racing
* vblank interrupt (since it only reports the software vblank counter), see
* drm_crtc_accurate_vblank_count() for such use-cases.
*
* Note that for a given vblank counter value drm_crtc_handle_vblank()
* and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
* provide a barrier: Any writes done before calling
* drm_crtc_handle_vblank() will be visible to callers of the later
* functions, if the vblank count is the same or a later one.
*
* See also &drm_vblank_crtc.count.
*
* Returns:
* The software vblank counter.
*/
u64 drm_crtc_vblank_count(struct drm_crtc *crtc)
{
return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_count);
/**
* drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
* system timestamp corresponding to that vblank counter value.
* @dev: DRM device
* @pipe: index of CRTC whose counter to retrieve
* @vblanktime: Pointer to ktime_t to receive the vblank timestamp.
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
*
* This is the legacy version of drm_crtc_vblank_count_and_time().
*/
static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
ktime_t *vblanktime)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u64 vblank_count;
unsigned int seq;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) {
*vblanktime = 0;
return 0;
}
do {
seq = read_seqbegin(&vblank->seqlock);
vblank_count = atomic64_read(&vblank->count);
*vblanktime = vblank->time;
} while (read_seqretry(&vblank->seqlock, seq));
return vblank_count;
}
/**
* drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
* and the system timestamp corresponding to that vblank counter value
* @crtc: which counter to retrieve
* @vblanktime: Pointer to time to receive the vblank timestamp.
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
*
* Note that for a given vblank counter value drm_crtc_handle_vblank()
* and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
* provide a barrier: Any writes done before calling
* drm_crtc_handle_vblank() will be visible to callers of the later
* functions, if the vblank count is the same or a later one.
*
* See also &drm_vblank_crtc.count.
*/
u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
ktime_t *vblanktime)
{
return drm_vblank_count_and_time(crtc->dev, drm_crtc_index(crtc),
vblanktime);
}
EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
/**
* drm_crtc_next_vblank_start - calculate the time of the next vblank
* @crtc: the crtc for which to calculate next vblank time
* @vblanktime: pointer to time to receive the next vblank timestamp.
*
* Calculate the expected time of the start of the next vblank period,
* based on time of previous vblank and frame duration
*/
int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime)
{
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank;
struct drm_display_mode *mode;
u64 vblank_start;
if (!drm_dev_has_vblank(crtc->dev))
return -EINVAL;
vblank = &crtc->dev->vblank[pipe];
mode = &vblank->hwmode;
if (!vblank->framedur_ns || !vblank->linedur_ns)
return -EINVAL;
if (!drm_crtc_get_last_vbltimestamp(crtc, vblanktime, false))
return -EINVAL;
vblank_start = DIV_ROUND_DOWN_ULL(
(u64)vblank->framedur_ns * mode->crtc_vblank_start,
mode->crtc_vtotal);
*vblanktime = ktime_add(*vblanktime, ns_to_ktime(vblank_start));
return 0;
}
EXPORT_SYMBOL(drm_crtc_next_vblank_start);
static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e,
u64 seq, ktime_t now)
{
struct timespec64 tv;
switch (e->event.base.type) {
case DRM_EVENT_VBLANK:
case DRM_EVENT_FLIP_COMPLETE:
tv = ktime_to_timespec64(now);
e->event.vbl.sequence = seq;
/*
* e->event is a user space structure, with hardcoded unsigned
* 32-bit seconds/microseconds. This is safe as we always use
* monotonic timestamps since linux-4.15
*/
e->event.vbl.tv_sec = tv.tv_sec;
e->event.vbl.tv_usec = tv.tv_nsec / 1000;
break;
case DRM_EVENT_CRTC_SEQUENCE:
if (seq)
e->event.seq.sequence = seq;
e->event.seq.time_ns = ktime_to_ns(now);
break;
}
trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq);
/*
* Use the same timestamp for any associated fence signal to avoid
* mismatch in timestamps for vsync & fence events triggered by the
* same HW event. Frameworks like SurfaceFlinger in Android expects the
* retire-fence timestamp to match exactly with HW vsync as it uses it
* for its software vsync modeling.
*/
drm_send_event_timestamp_locked(dev, &e->base, now);
}
/**
* drm_crtc_arm_vblank_event - arm vblank event after pageflip
* @crtc: the source CRTC of the vblank event
* @e: the event to send
*
* A lot of drivers need to generate vblank events for the very next vblank
* interrupt. For example when the page flip interrupt happens when the page
* flip gets armed, but not when it actually executes within the next vblank
* period. This helper function implements exactly the required vblank arming
* behaviour.
*
* NOTE: Drivers using this to send out the &drm_crtc_state.event as part of an
* atomic commit must ensure that the next vblank happens at exactly the same
* time as the atomic commit is committed to the hardware. This function itself
* does **not** protect against the next vblank interrupt racing with either this
* function call or the atomic commit operation. A possible sequence could be:
*
* 1. Driver commits new hardware state into vblank-synchronized registers.
* 2. A vblank happens, committing the hardware state. Also the corresponding
* vblank interrupt is fired off and fully processed by the interrupt
* handler.
* 3. The atomic commit operation proceeds to call drm_crtc_arm_vblank_event().
* 4. The event is only send out for the next vblank, which is wrong.
*
* An equivalent race can happen when the driver calls
* drm_crtc_arm_vblank_event() before writing out the new hardware state.
*
* The only way to make this work safely is to prevent the vblank from firing
* (and the hardware from committing anything else) until the entire atomic
* commit sequence has run to completion. If the hardware does not have such a
* feature (e.g. using a "go" bit), then it is unsafe to use this functions.
* Instead drivers need to manually send out the event from their interrupt
* handler by calling drm_crtc_send_vblank_event() and make sure that there's no
* possible race with the hardware committing the atomic update.
*
* Caller must hold a vblank reference for the event @e acquired by a
* drm_crtc_vblank_get(), which will be dropped when the next vblank arrives.
*/
void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
assert_spin_locked(&dev->event_lock);
e->pipe = pipe;
e->sequence = drm_crtc_accurate_vblank_count(crtc) + 1;
list_add_tail(&e->base.link, &dev->vblank_event_list);
}
EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
/**
* drm_crtc_send_vblank_event - helper to send vblank event after pageflip
* @crtc: the source CRTC of the vblank event
* @e: the event to send
*
* Updates sequence # and timestamp on event for the most recently processed
* vblank, and sends it to userspace. Caller must hold event lock.
*
* See drm_crtc_arm_vblank_event() for a helper which can be used in certain
* situation, especially to send out events for atomic commit operations.
*/
void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
{
struct drm_device *dev = crtc->dev;
u64 seq;
unsigned int pipe = drm_crtc_index(crtc);
ktime_t now;
if (drm_dev_has_vblank(dev)) {
seq = drm_vblank_count_and_time(dev, pipe, &now);
} else {
seq = 0;
now = ktime_get();
}
e->pipe = pipe;
send_vblank_event(dev, e, seq, now);
}
EXPORT_SYMBOL(drm_crtc_send_vblank_event);
static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
{
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
if (drm_WARN_ON(dev, !crtc))
return 0;
if (crtc->funcs->enable_vblank)
return crtc->funcs->enable_vblank(crtc);
}
#ifdef CONFIG_DRM_LEGACY
else if (dev->driver->enable_vblank) {
return dev->driver->enable_vblank(dev, pipe);
}
#endif
return -EINVAL;
}
static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int ret = 0;
assert_spin_locked(&dev->vbl_lock);
spin_lock(&dev->vblank_time_lock);
if (!vblank->enabled) {
/*
* Enable vblank irqs under vblank_time_lock protection.
* All vblank count & timestamp updates are held off
* until we are done reinitializing master counter and
* timestamps. Filtercode in drm_handle_vblank() will
* prevent double-accounting of same vblank interval.
*/
ret = __enable_vblank(dev, pipe);
drm_dbg_core(dev, "enabling vblank on crtc %u, ret: %d\n",
pipe, ret);
if (ret) {
atomic_dec(&vblank->refcount);
} else {
drm_update_vblank_count(dev, pipe, 0);
/* drm_update_vblank_count() includes a wmb so we just
* need to ensure that the compiler emits the write
* to mark the vblank as enabled after the call
* to drm_update_vblank_count().
*/
WRITE_ONCE(vblank->enabled, true);
}
}
spin_unlock(&dev->vblank_time_lock);
return ret;
}
int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
int ret = 0;
if (!drm_dev_has_vblank(dev))
return -EINVAL;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return -EINVAL;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &vblank->refcount) == 1) {
ret = drm_vblank_enable(dev, pipe);
} else {
if (!vblank->enabled) {
atomic_dec(&vblank->refcount);
ret = -EINVAL;
}
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
return ret;
}
/**
* drm_crtc_vblank_get - get a reference count on vblank events
* @crtc: which CRTC to own
*
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
* Returns:
* Zero on success or a negative error code on failure.
*/
int drm_crtc_vblank_get(struct drm_crtc *crtc)
{
return drm_vblank_get(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_get);
void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
if (drm_WARN_ON(dev, atomic_read(&vblank->refcount) == 0))
return;
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&vblank->refcount)) {
if (drm_vblank_offdelay == 0)
return;
else if (drm_vblank_offdelay < 0)
vblank_disable_fn(&vblank->disable_timer);
else if (!dev->vblank_disable_immediate)
mod_timer(&vblank->disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
}
/**
* drm_crtc_vblank_put - give up ownership of vblank events
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
*/
void drm_crtc_vblank_put(struct drm_crtc *crtc)
{
drm_vblank_put(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_put);
/**
* drm_wait_one_vblank - wait for one vblank
* @dev: DRM device
* @pipe: CRTC index
*
* This waits for one vblank to pass on @pipe, using the irq driver interfaces.
* It is a failure to call this when the vblank irq for @pipe is disabled, e.g.
* due to lack of driver support or because the crtc is off.
*
* This is the legacy version of drm_crtc_wait_one_vblank().
*/
void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int ret;
u64 last;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
ret = drm_vblank_get(dev, pipe);
if (drm_WARN(dev, ret, "vblank not available on crtc %i, ret=%i\n",
pipe, ret))
return;
last = drm_vblank_count(dev, pipe);
ret = wait_event_timeout(vblank->queue,
last != drm_vblank_count(dev, pipe),
msecs_to_jiffies(100));
drm_WARN(dev, ret == 0, "vblank wait timed out on crtc %i\n", pipe);
drm_vblank_put(dev, pipe);
}
EXPORT_SYMBOL(drm_wait_one_vblank);
/**
* drm_crtc_wait_one_vblank - wait for one vblank
* @crtc: DRM crtc
*
* This waits for one vblank to pass on @crtc, using the irq driver interfaces.
* It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
* due to lack of driver support or because the crtc is off.
*/
void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
{
drm_wait_one_vblank(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
/**
* drm_crtc_vblank_off - disable vblank events on a CRTC
* @crtc: CRTC in question
*
* Drivers can use this function to shut down the vblank interrupt handling when
* disabling a crtc. This function ensures that the latest vblank frame count is
* stored so that drm_vblank_on can restore it again.
*
* Drivers must use this function when the hardware vblank counter can get
* reset, e.g. when suspending or disabling the @crtc in general.
*/
void drm_crtc_vblank_off(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e, *t;
ktime_t now;
u64 seq;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
/*
* Grab event_lock early to prevent vblank work from being scheduled
* while we're in the middle of shutting down vblank interrupts
*/
spin_lock_irq(&dev->event_lock);
spin_lock(&dev->vbl_lock);
drm_dbg_vbl(dev, "crtc %d, vblank enabled %d, inmodeset %d\n",
pipe, vblank->enabled, vblank->inmodeset);
/* Avoid redundant vblank disables without previous
* drm_crtc_vblank_on(). */
if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
drm_vblank_disable_and_save(dev, pipe);
wake_up(&vblank->queue);
/*
* Prevent subsequent drm_vblank_get() from re-enabling
* the vblank interrupt by bumping the refcount.
*/
if (!vblank->inmodeset) {
atomic_inc(&vblank->refcount);
vblank->inmodeset = 1;
}
spin_unlock(&dev->vbl_lock);
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, pipe, &now);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != pipe)
continue;
drm_dbg_core(dev, "Sending premature vblank event on disable: "
"wanted %llu, current %llu\n",
e->sequence, seq);
list_del(&e->base.link);
drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, now);
}
/* Cancel any leftover pending vblank work */
drm_vblank_cancel_pending_works(vblank);
spin_unlock_irq(&dev->event_lock);
/* Will be reset by the modeset helpers when re-enabling the crtc by
* calling drm_calc_timestamping_constants(). */
vblank->hwmode.crtc_clock = 0;
/* Wait for any vblank work that's still executing to finish */
drm_vblank_flush_worker(vblank);
}
EXPORT_SYMBOL(drm_crtc_vblank_off);
/**
* drm_crtc_vblank_reset - reset vblank state to off on a CRTC
* @crtc: CRTC in question
*
* Drivers can use this function to reset the vblank state to off at load time.
* Drivers should use this together with the drm_crtc_vblank_off() and
* drm_crtc_vblank_on() functions. The difference compared to
* drm_crtc_vblank_off() is that this function doesn't save the vblank counter
* and hence doesn't need to call any driver hooks.
*
* This is useful for recovering driver state e.g. on driver load, or on resume.
*/
void drm_crtc_vblank_reset(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
spin_lock_irq(&dev->vbl_lock);
/*
* Prevent subsequent drm_vblank_get() from enabling the vblank
* interrupt by bumping the refcount.
*/
if (!vblank->inmodeset) {
atomic_inc(&vblank->refcount);
vblank->inmodeset = 1;
}
spin_unlock_irq(&dev->vbl_lock);
drm_WARN_ON(dev, !list_empty(&dev->vblank_event_list));
drm_WARN_ON(dev, !list_empty(&vblank->pending_work));
}
EXPORT_SYMBOL(drm_crtc_vblank_reset);
/**
* drm_crtc_set_max_vblank_count - configure the hw max vblank counter value
* @crtc: CRTC in question
* @max_vblank_count: max hardware vblank counter value
*
* Update the maximum hardware vblank counter value for @crtc
* at runtime. Useful for hardware where the operation of the
* hardware vblank counter depends on the currently active
* display configuration.
*
* For example, if the hardware vblank counter does not work
* when a specific connector is active the maximum can be set
* to zero. And when that specific connector isn't active the
* maximum can again be set to the appropriate non-zero value.
*
* If used, must be called before drm_vblank_on().
*/
void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
u32 max_vblank_count)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
drm_WARN_ON(dev, dev->max_vblank_count);
drm_WARN_ON(dev, !READ_ONCE(vblank->inmodeset));
vblank->max_vblank_count = max_vblank_count;
}
EXPORT_SYMBOL(drm_crtc_set_max_vblank_count);
/**
* drm_crtc_vblank_on - enable vblank events on a CRTC
* @crtc: CRTC in question
*
* This functions restores the vblank interrupt state captured with
* drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note
* that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be
* unbalanced and so can also be unconditionally called in driver load code to
* reflect the current hardware state of the crtc.
*/
void drm_crtc_vblank_on(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
spin_lock_irq(&dev->vbl_lock);
drm_dbg_vbl(dev, "crtc %d, vblank enabled %d, inmodeset %d\n",
pipe, vblank->enabled, vblank->inmodeset);
/* Drop our private "prevent drm_vblank_get" refcount */
if (vblank->inmodeset) {
atomic_dec(&vblank->refcount);
vblank->inmodeset = 0;
}
drm_reset_vblank_timestamp(dev, pipe);
/*
* re-enable interrupts if there are users left, or the
* user wishes vblank interrupts to be enabled all the time.
*/
if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
drm_WARN_ON(dev, drm_vblank_enable(dev, pipe));
spin_unlock_irq(&dev->vbl_lock);
}
EXPORT_SYMBOL(drm_crtc_vblank_on);
static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
{
ktime_t t_vblank;
struct drm_vblank_crtc *vblank;
int framedur_ns;
u64 diff_ns;
u32 cur_vblank, diff = 1;
int count = DRM_TIMESTAMP_MAXRETRIES;
u32 max_vblank_count = drm_max_vblank_count(dev, pipe);
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
assert_spin_locked(&dev->vbl_lock);
assert_spin_locked(&dev->vblank_time_lock);
vblank = &dev->vblank[pipe];
drm_WARN_ONCE(dev,
drm_debug_enabled(DRM_UT_VBL) && !vblank->framedur_ns,
"Cannot compute missed vblanks without frame duration\n");
framedur_ns = vblank->framedur_ns;
do {
cur_vblank = __get_vblank_counter(dev, pipe);
drm_get_last_vbltimestamp(dev, pipe, &t_vblank, false);
} while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
if (framedur_ns)
diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns);
drm_dbg_vbl(dev,
"missed %d vblanks in %lld ns, frame duration=%d ns, hw_diff=%d\n",
diff, diff_ns, framedur_ns, cur_vblank - vblank->last);
vblank->last = (cur_vblank - diff) & max_vblank_count;
}
/**
* drm_crtc_vblank_restore - estimate missed vblanks and update vblank count.
* @crtc: CRTC in question
*
* Power manamement features can cause frame counter resets between vblank
* disable and enable. Drivers can use this function in their
* &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since
* the last &drm_crtc_funcs.disable_vblank using timestamps and update the
* vblank counter.
*
* Note that drivers must have race-free high-precision timestamping support,
* i.e. &drm_crtc_funcs.get_vblank_timestamp must be hooked up and
* &drm_driver.vblank_disable_immediate must be set to indicate the
* time-stamping functions are race-free against vblank hardware counter
* increments.
*/
void drm_crtc_vblank_restore(struct drm_crtc *crtc)
{
WARN_ON_ONCE(!crtc->funcs->get_vblank_timestamp);
WARN_ON_ONCE(!crtc->dev->vblank_disable_immediate);
drm_vblank_restore(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_restore);
static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!drm_dev_has_vblank(dev))
return;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
* have the kernel take a reference on the CRTC (just once though
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
if (!vblank->inmodeset) {
vblank->inmodeset = 0x1;
if (drm_vblank_get(dev, pipe) == 0)
vblank->inmodeset |= 0x2;
}
}
static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!drm_dev_has_vblank(dev))
return;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
if (vblank->inmodeset) {
spin_lock_irq(&dev->vbl_lock);
drm_reset_vblank_timestamp(dev, pipe);
spin_unlock_irq(&dev->vbl_lock);
if (vblank->inmodeset & 0x2)
drm_vblank_put(dev, pipe);
vblank->inmodeset = 0;
}
}
int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
unsigned int pipe;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!drm_dev_has_vblank(dev))
return 0;
/* KMS drivers handle this internally */
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return 0;
pipe = modeset->crtc;
if (pipe >= dev->num_crtcs)
return -EINVAL;
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
drm_legacy_vblank_pre_modeset(dev, pipe);
break;
case _DRM_POST_MODESET:
drm_legacy_vblank_post_modeset(dev, pipe);
break;
default:
return -EINVAL;
}
return 0;
}
static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
u64 req_seq,
union drm_wait_vblank *vblwait,
struct drm_file *file_priv)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e;
ktime_t now;
u64 seq;
int ret;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL) {
ret = -ENOMEM;
goto err_put;
}
e->pipe = pipe;
e->event.base.type = DRM_EVENT_VBLANK;
e->event.base.length = sizeof(e->event.vbl);
e->event.vbl.user_data = vblwait->request.signal;
e->event.vbl.crtc_id = 0;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
if (crtc)
e->event.vbl.crtc_id = crtc->base.id;
}
spin_lock_irq(&dev->event_lock);
/*
* drm_crtc_vblank_off() might have been called after we called
* drm_vblank_get(). drm_crtc_vblank_off() holds event_lock around the
* vblank disable, so no need for further locking. The reference from
* drm_vblank_get() protects against vblank disable from another source.
*/
if (!READ_ONCE(vblank->enabled)) {
ret = -EINVAL;
goto err_unlock;
}
ret = drm_event_reserve_init_locked(dev, file_priv, &e->base,
&e->event.base);
if (ret)
goto err_unlock;
seq = drm_vblank_count_and_time(dev, pipe, &now);
drm_dbg_core(dev, "event on vblank count %llu, current %llu, crtc %u\n",
req_seq, seq, pipe);
trace_drm_vblank_event_queued(file_priv, pipe, req_seq);
e->sequence = req_seq;
if (drm_vblank_passed(seq, req_seq)) {
drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, now);
vblwait->reply.sequence = seq;
} else {
/* drm_handle_vblank_events will call drm_vblank_put */
list_add_tail(&e->base.link, &dev->vblank_event_list);
vblwait->reply.sequence = req_seq;
}
spin_unlock_irq(&dev->event_lock);
return 0;
err_unlock:
spin_unlock_irq(&dev->event_lock);
kfree(e);
err_put:
drm_vblank_put(dev, pipe);
return ret;
}
static bool drm_wait_vblank_is_query(union drm_wait_vblank *vblwait)
{
if (vblwait->request.sequence)
return false;
return _DRM_VBLANK_RELATIVE ==
(vblwait->request.type & (_DRM_VBLANK_TYPES_MASK |
_DRM_VBLANK_EVENT |
_DRM_VBLANK_NEXTONMISS));
}
/*
* Widen a 32-bit param to 64-bits.
*
* \param narrow 32-bit value (missing upper 32 bits)
* \param near 64-bit value that should be 'close' to near
*
* This function returns a 64-bit value using the lower 32-bits from
* 'narrow' and constructing the upper 32-bits so that the result is
* as close as possible to 'near'.
*/
static u64 widen_32_to_64(u32 narrow, u64 near)
{
return near + (s32) (narrow - near);
}
static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe,
struct drm_wait_vblank_reply *reply)
{
ktime_t now;
struct timespec64 ts;
/*
* drm_wait_vblank_reply is a UAPI structure that uses 'long'
* to store the seconds. This is safe as we always use monotonic
* timestamps since linux-4.15.
*/
reply->sequence = drm_vblank_count_and_time(dev, pipe, &now);
ts = ktime_to_timespec64(now);
reply->tval_sec = (u32)ts.tv_sec;
reply->tval_usec = ts.tv_nsec / 1000;
}
static bool drm_wait_vblank_supported(struct drm_device *dev)
{
#if IS_ENABLED(CONFIG_DRM_LEGACY)
if (unlikely(drm_core_check_feature(dev, DRIVER_LEGACY)))
return dev->irq_enabled;
#endif
return drm_dev_has_vblank(dev);
}
int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_crtc *crtc;
struct drm_vblank_crtc *vblank;
union drm_wait_vblank *vblwait = data;
int ret;
u64 req_seq, seq;
unsigned int pipe_index;
unsigned int flags, pipe, high_pipe;
if (!drm_wait_vblank_supported(dev))
return -EOPNOTSUPP;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
return -EINVAL;
if (vblwait->request.type &
~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
_DRM_VBLANK_HIGH_CRTC_MASK)) {
drm_dbg_core(dev,
"Unsupported type value 0x%x, supported mask 0x%x\n",
vblwait->request.type,
(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
_DRM_VBLANK_HIGH_CRTC_MASK));
return -EINVAL;
}
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
if (high_pipe)
pipe_index = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
else
pipe_index = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
/* Convert lease-relative crtc index into global crtc index */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
pipe = 0;
drm_for_each_crtc(crtc, dev) {
if (drm_lease_held(file_priv, crtc->base.id)) {
if (pipe_index == 0)
break;
pipe_index--;
}
pipe++;
}
} else {
pipe = pipe_index;
}
if (pipe >= dev->num_crtcs)
return -EINVAL;
vblank = &dev->vblank[pipe];
/* If the counter is currently enabled and accurate, short-circuit
* queries to return the cached timestamp of the last vblank.
*/
if (dev->vblank_disable_immediate &&
drm_wait_vblank_is_query(vblwait) &&
READ_ONCE(vblank->enabled)) {
drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
return 0;
}
ret = drm_vblank_get(dev, pipe);
if (ret) {
drm_dbg_core(dev,
"crtc %d failed to acquire vblank counter, %d\n",
pipe, ret);
return ret;
}
seq = drm_vblank_count(dev, pipe);
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
req_seq = seq + vblwait->request.sequence;
vblwait->request.sequence = req_seq;
vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
break;
case _DRM_VBLANK_ABSOLUTE:
req_seq = widen_32_to_64(vblwait->request.sequence, seq);
break;
default:
ret = -EINVAL;
goto done;
}
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
drm_vblank_passed(seq, req_seq)) {
req_seq = seq + 1;
vblwait->request.type &= ~_DRM_VBLANK_NEXTONMISS;
vblwait->request.sequence = req_seq;
}
if (flags & _DRM_VBLANK_EVENT) {
/* must hold on to the vblank ref until the event fires
* drm_vblank_put will be called asynchronously
*/
return drm_queue_vblank_event(dev, pipe, req_seq, vblwait, file_priv);
}
if (req_seq != seq) {
int wait;
drm_dbg_core(dev, "waiting on vblank count %llu, crtc %u\n",
req_seq, pipe);
wait = wait_event_interruptible_timeout(vblank->queue,
drm_vblank_passed(drm_vblank_count(dev, pipe), req_seq) ||
!READ_ONCE(vblank->enabled),
msecs_to_jiffies(3000));
switch (wait) {
case 0:
/* timeout */
ret = -EBUSY;
break;
case -ERESTARTSYS:
/* interrupted by signal */
ret = -EINTR;
break;
default:
ret = 0;
break;
}
}
if (ret != -EINTR) {
drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
drm_dbg_core(dev, "crtc %d returning %u to client\n",
pipe, vblwait->reply.sequence);
} else {
drm_dbg_core(dev, "crtc %d vblank wait interrupted by signal\n",
pipe);
}
done:
drm_vblank_put(dev, pipe);
return ret;
}
static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
{
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
bool high_prec = false;
struct drm_pending_vblank_event *e, *t;
ktime_t now;
u64 seq;
assert_spin_locked(&dev->event_lock);
seq = drm_vblank_count_and_time(dev, pipe, &now);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != pipe)
continue;
if (!drm_vblank_passed(seq, e->sequence))
continue;
drm_dbg_core(dev, "vblank event on %llu, current %llu\n",
e->sequence, seq);
list_del(&e->base.link);
drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, now);
}
if (crtc && crtc->funcs->get_vblank_timestamp)
high_prec = true;
trace_drm_vblank_event(pipe, seq, now, high_prec);
}
/**
* drm_handle_vblank - handle a vblank event
* @dev: DRM device
* @pipe: index of CRTC where this event occurred
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
*
* This is the legacy version of drm_crtc_handle_vblank().
*/
bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
bool disable_irq;
if (drm_WARN_ON_ONCE(dev, !drm_dev_has_vblank(dev)))
return false;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return false;
spin_lock_irqsave(&dev->event_lock, irqflags);
/* Need timestamp lock to prevent concurrent execution with
* vblank enable/disable, as this would cause inconsistent
* or corrupted timestamps and vblank counts.
*/
spin_lock(&dev->vblank_time_lock);
/* Vblank irq handling disabled. Nothing to do. */
if (!vblank->enabled) {
spin_unlock(&dev->vblank_time_lock);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
return false;
}
drm_update_vblank_count(dev, pipe, true);
spin_unlock(&dev->vblank_time_lock);
wake_up(&vblank->queue);
/* With instant-off, we defer disabling the interrupt until after
* we finish processing the following vblank after all events have
* been signaled. The disable has to be last (after
* drm_handle_vblank_events) so that the timestamp is always accurate.
*/
disable_irq = (dev->vblank_disable_immediate &&
drm_vblank_offdelay > 0 &&
!atomic_read(&vblank->refcount));
drm_handle_vblank_events(dev, pipe);
drm_handle_vblank_works(vblank);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
if (disable_irq)
vblank_disable_fn(&vblank->disable_timer);
return true;
}
EXPORT_SYMBOL(drm_handle_vblank);
/**
* drm_crtc_handle_vblank - handle a vblank event
* @crtc: where this event occurred
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
*
* This is the native KMS version of drm_handle_vblank().
*
* Note that for a given vblank counter value drm_crtc_handle_vblank()
* and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
* provide a barrier: Any writes done before calling
* drm_crtc_handle_vblank() will be visible to callers of the later
* functions, if the vblank count is the same or a later one.
*
* See also &drm_vblank_crtc.count.
*
* Returns:
* True if the event was successfully handled, false on failure.
*/
bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
{
return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_handle_vblank);
/*
* Get crtc VBLANK count.
*
* \param dev DRM device
* \param data user argument, pointing to a drm_crtc_get_sequence structure.
* \param file_priv drm file private for the user's open file descriptor
*/
int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_crtc *crtc;
struct drm_vblank_crtc *vblank;
int pipe;
struct drm_crtc_get_sequence *get_seq = data;
ktime_t now;
bool vblank_enabled;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
if (!drm_dev_has_vblank(dev))
return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id);
if (!crtc)
return -ENOENT;
pipe = drm_crtc_index(crtc);
vblank = &dev->vblank[pipe];
vblank_enabled = dev->vblank_disable_immediate && READ_ONCE(vblank->enabled);
if (!vblank_enabled) {
ret = drm_crtc_vblank_get(crtc);
if (ret) {
drm_dbg_core(dev,
"crtc %d failed to acquire vblank counter, %d\n",
pipe, ret);
return ret;
}
}
drm_modeset_lock(&crtc->mutex, NULL);
if (crtc->state)
get_seq->active = crtc->state->enable;
else
get_seq->active = crtc->enabled;
drm_modeset_unlock(&crtc->mutex);
get_seq->sequence = drm_vblank_count_and_time(dev, pipe, &now);
get_seq->sequence_ns = ktime_to_ns(now);
if (!vblank_enabled)
drm_crtc_vblank_put(crtc);
return 0;
}
/*
* Queue a event for VBLANK sequence
*
* \param dev DRM device
* \param data user argument, pointing to a drm_crtc_queue_sequence structure.
* \param file_priv drm file private for the user's open file descriptor
*/
int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_crtc *crtc;
struct drm_vblank_crtc *vblank;
int pipe;
struct drm_crtc_queue_sequence *queue_seq = data;
ktime_t now;
struct drm_pending_vblank_event *e;
u32 flags;
u64 seq;
u64 req_seq;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
if (!drm_dev_has_vblank(dev))
return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id);
if (!crtc)
return -ENOENT;
flags = queue_seq->flags;
/* Check valid flag bits */
if (flags & ~(DRM_CRTC_SEQUENCE_RELATIVE|
DRM_CRTC_SEQUENCE_NEXT_ON_MISS))
return -EINVAL;
pipe = drm_crtc_index(crtc);
vblank = &dev->vblank[pipe];
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL)
return -ENOMEM;
ret = drm_crtc_vblank_get(crtc);
if (ret) {
drm_dbg_core(dev,
"crtc %d failed to acquire vblank counter, %d\n",
pipe, ret);
goto err_free;
}
seq = drm_vblank_count_and_time(dev, pipe, &now);
req_seq = queue_seq->sequence;
if (flags & DRM_CRTC_SEQUENCE_RELATIVE)
req_seq += seq;
if ((flags & DRM_CRTC_SEQUENCE_NEXT_ON_MISS) && drm_vblank_passed(seq, req_seq))
req_seq = seq + 1;
e->pipe = pipe;
e->event.base.type = DRM_EVENT_CRTC_SEQUENCE;
e->event.base.length = sizeof(e->event.seq);
e->event.seq.user_data = queue_seq->user_data;
spin_lock_irq(&dev->event_lock);
/*
* drm_crtc_vblank_off() might have been called after we called
* drm_crtc_vblank_get(). drm_crtc_vblank_off() holds event_lock around the
* vblank disable, so no need for further locking. The reference from
* drm_crtc_vblank_get() protects against vblank disable from another source.
*/
if (!READ_ONCE(vblank->enabled)) {
ret = -EINVAL;
goto err_unlock;
}
ret = drm_event_reserve_init_locked(dev, file_priv, &e->base,
&e->event.base);
if (ret)
goto err_unlock;
e->sequence = req_seq;
if (drm_vblank_passed(seq, req_seq)) {
drm_crtc_vblank_put(crtc);
send_vblank_event(dev, e, seq, now);
queue_seq->sequence = seq;
} else {
/* drm_handle_vblank_events will call drm_vblank_put */
list_add_tail(&e->base.link, &dev->vblank_event_list);
queue_seq->sequence = req_seq;
}
spin_unlock_irq(&dev->event_lock);
return 0;
err_unlock:
spin_unlock_irq(&dev->event_lock);
drm_crtc_vblank_put(crtc);
err_free:
kfree(e);
return ret;
}
| linux-master | drivers/gpu/drm/drm_vblank.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/component.h>
#include <linux/export.h>
#include <linux/list.h>
#include <linux/media-bus-format.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
/**
* DOC: overview
*
* A set of helper functions to aid DRM drivers in parsing standard DT
* properties.
*/
/**
* drm_of_crtc_port_mask - find the mask of a registered CRTC by port OF node
* @dev: DRM device
* @port: port OF node
*
* Given a port OF node, return the possible mask of the corresponding
* CRTC within a device's list of CRTCs. Returns zero if not found.
*/
uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
struct device_node *port)
{
unsigned int index = 0;
struct drm_crtc *tmp;
drm_for_each_crtc(tmp, dev) {
if (tmp->port == port)
return 1 << index;
index++;
}
return 0;
}
EXPORT_SYMBOL(drm_of_crtc_port_mask);
/**
* drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port
* @dev: DRM device
* @port: encoder port to scan for endpoints
*
* Scan all endpoints attached to a port, locate their attached CRTCs,
* and generate the DRM mask of CRTCs which may be attached to this
* encoder.
*
* See Documentation/devicetree/bindings/graph.txt for the bindings.
*/
uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
{
struct device_node *remote_port, *ep;
uint32_t possible_crtcs = 0;
for_each_endpoint_of_node(port, ep) {
remote_port = of_graph_get_remote_port(ep);
if (!remote_port) {
of_node_put(ep);
return 0;
}
possible_crtcs |= drm_of_crtc_port_mask(dev, remote_port);
of_node_put(remote_port);
}
return possible_crtcs;
}
EXPORT_SYMBOL(drm_of_find_possible_crtcs);
/**
* drm_of_component_match_add - Add a component helper OF node match rule
* @master: master device
* @matchptr: component match pointer
* @compare: compare function used for matching component
* @node: of_node
*/
void drm_of_component_match_add(struct device *master,
struct component_match **matchptr,
int (*compare)(struct device *, void *),
struct device_node *node)
{
of_node_get(node);
component_match_add_release(master, matchptr, component_release_of,
compare, node);
}
EXPORT_SYMBOL_GPL(drm_of_component_match_add);
/**
* drm_of_component_probe - Generic probe function for a component based master
* @dev: master device containing the OF node
* @compare_of: compare function used for matching components
* @m_ops: component master ops to be used
*
* Parse the platform device OF node and bind all the components associated
* with the master. Interface ports are added before the encoders in order to
* satisfy their .bind requirements
* See Documentation/devicetree/bindings/graph.txt for the bindings.
*
* Returns zero if successful, or one of the standard error codes if it fails.
*/
int drm_of_component_probe(struct device *dev,
int (*compare_of)(struct device *, void *),
const struct component_master_ops *m_ops)
{
struct device_node *ep, *port, *remote;
struct component_match *match = NULL;
int i;
if (!dev->of_node)
return -EINVAL;
/*
* Bind the crtc's ports first, so that drm_of_find_possible_crtcs()
* called from encoder's .bind callbacks works as expected
*/
for (i = 0; ; i++) {
port = of_parse_phandle(dev->of_node, "ports", i);
if (!port)
break;
if (of_device_is_available(port->parent))
drm_of_component_match_add(dev, &match, compare_of,
port);
of_node_put(port);
}
if (i == 0) {
dev_err(dev, "missing 'ports' property\n");
return -ENODEV;
}
if (!match) {
dev_err(dev, "no available port\n");
return -ENODEV;
}
/*
* For bound crtcs, bind the encoders attached to their remote endpoint
*/
for (i = 0; ; i++) {
port = of_parse_phandle(dev->of_node, "ports", i);
if (!port)
break;
if (!of_device_is_available(port->parent)) {
of_node_put(port);
continue;
}
for_each_child_of_node(port, ep) {
remote = of_graph_get_remote_port_parent(ep);
if (!remote || !of_device_is_available(remote)) {
of_node_put(remote);
continue;
} else if (!of_device_is_available(remote->parent)) {
dev_warn(dev, "parent device of %pOF is not available\n",
remote);
of_node_put(remote);
continue;
}
drm_of_component_match_add(dev, &match, compare_of,
remote);
of_node_put(remote);
}
of_node_put(port);
}
return component_master_add_with_match(dev, m_ops, match);
}
EXPORT_SYMBOL(drm_of_component_probe);
/*
* drm_of_encoder_active_endpoint - return the active encoder endpoint
* @node: device tree node containing encoder input ports
* @encoder: drm_encoder
*
* Given an encoder device node and a drm_encoder with a connected crtc,
* parse the encoder endpoint connecting to the crtc port.
*/
int drm_of_encoder_active_endpoint(struct device_node *node,
struct drm_encoder *encoder,
struct of_endpoint *endpoint)
{
struct device_node *ep;
struct drm_crtc *crtc = encoder->crtc;
struct device_node *port;
int ret;
if (!node || !crtc)
return -EINVAL;
for_each_endpoint_of_node(node, ep) {
port = of_graph_get_remote_port(ep);
of_node_put(port);
if (port == crtc->port) {
ret = of_graph_parse_endpoint(ep, endpoint);
of_node_put(ep);
return ret;
}
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
/**
* drm_of_find_panel_or_bridge - return connected panel or bridge device
* @np: device tree node containing encoder output ports
* @port: port in the device tree node
* @endpoint: endpoint in the device tree node
* @panel: pointer to hold returned drm_panel
* @bridge: pointer to hold returned drm_bridge
*
* Given a DT node's port and endpoint number, find the connected node and
* return either the associated struct drm_panel or drm_bridge device. Either
* @panel or @bridge must not be NULL.
*
* This function is deprecated and should not be used in new drivers. Use
* devm_drm_of_get_bridge() instead.
*
* Returns zero if successful, or one of the standard error codes if it fails.
*/
int drm_of_find_panel_or_bridge(const struct device_node *np,
int port, int endpoint,
struct drm_panel **panel,
struct drm_bridge **bridge)
{
int ret = -EPROBE_DEFER;
struct device_node *remote;
if (!panel && !bridge)
return -EINVAL;
if (panel)
*panel = NULL;
/*
* of_graph_get_remote_node() produces a noisy error message if port
* node isn't found and the absence of the port is a legit case here,
* so at first we silently check whether graph presents in the
* device-tree node.
*/
if (!of_graph_is_present(np))
return -ENODEV;
remote = of_graph_get_remote_node(np, port, endpoint);
if (!remote)
return -ENODEV;
if (panel) {
*panel = of_drm_find_panel(remote);
if (!IS_ERR(*panel))
ret = 0;
else
*panel = NULL;
}
/* No panel found yet, check for a bridge next. */
if (bridge) {
if (ret) {
*bridge = of_drm_find_bridge(remote);
if (*bridge)
ret = 0;
} else {
*bridge = NULL;
}
}
of_node_put(remote);
return ret;
}
EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);
enum drm_of_lvds_pixels {
DRM_OF_LVDS_EVEN = BIT(0),
DRM_OF_LVDS_ODD = BIT(1),
};
static int drm_of_lvds_get_port_pixels_type(struct device_node *port_node)
{
bool even_pixels =
of_property_read_bool(port_node, "dual-lvds-even-pixels");
bool odd_pixels =
of_property_read_bool(port_node, "dual-lvds-odd-pixels");
return (even_pixels ? DRM_OF_LVDS_EVEN : 0) |
(odd_pixels ? DRM_OF_LVDS_ODD : 0);
}
static int drm_of_lvds_get_remote_pixels_type(
const struct device_node *port_node)
{
struct device_node *endpoint = NULL;
int pixels_type = -EPIPE;
for_each_child_of_node(port_node, endpoint) {
struct device_node *remote_port;
int current_pt;
if (!of_node_name_eq(endpoint, "endpoint"))
continue;
remote_port = of_graph_get_remote_port(endpoint);
if (!remote_port) {
of_node_put(endpoint);
return -EPIPE;
}
current_pt = drm_of_lvds_get_port_pixels_type(remote_port);
of_node_put(remote_port);
if (pixels_type < 0)
pixels_type = current_pt;
/*
* Sanity check, ensure that all remote endpoints have the same
* pixel type. We may lift this restriction later if we need to
* support multiple sinks with different dual-link
* configurations by passing the endpoints explicitly to
* drm_of_lvds_get_dual_link_pixel_order().
*/
if (!current_pt || pixels_type != current_pt) {
of_node_put(endpoint);
return -EINVAL;
}
}
return pixels_type;
}
/**
* drm_of_lvds_get_dual_link_pixel_order - Get LVDS dual-link pixel order
* @port1: First DT port node of the Dual-link LVDS source
* @port2: Second DT port node of the Dual-link LVDS source
*
* An LVDS dual-link connection is made of two links, with even pixels
* transitting on one link, and odd pixels on the other link. This function
* returns, for two ports of an LVDS dual-link source, which port shall transmit
* the even and odd pixels, based on the requirements of the connected sink.
*
* The pixel order is determined from the dual-lvds-even-pixels and
* dual-lvds-odd-pixels properties in the sink's DT port nodes. If those
* properties are not present, or if their usage is not valid, this function
* returns -EINVAL.
*
* If either port is not connected, this function returns -EPIPE.
*
* @port1 and @port2 are typically DT sibling nodes, but may have different
* parents when, for instance, two separate LVDS encoders carry the even and odd
* pixels.
*
* Return:
* * DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS - @port1 carries even pixels and @port2
* carries odd pixels
* * DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS - @port1 carries odd pixels and @port2
* carries even pixels
* * -EINVAL - @port1 and @port2 are not connected to a dual-link LVDS sink, or
* the sink configuration is invalid
* * -EPIPE - when @port1 or @port2 are not connected
*/
int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
const struct device_node *port2)
{
int remote_p1_pt, remote_p2_pt;
if (!port1 || !port2)
return -EINVAL;
remote_p1_pt = drm_of_lvds_get_remote_pixels_type(port1);
if (remote_p1_pt < 0)
return remote_p1_pt;
remote_p2_pt = drm_of_lvds_get_remote_pixels_type(port2);
if (remote_p2_pt < 0)
return remote_p2_pt;
/*
* A valid dual-lVDS bus is found when one remote port is marked with
* "dual-lvds-even-pixels", and the other remote port is marked with
* "dual-lvds-odd-pixels", bail out if the markers are not right.
*/
if (remote_p1_pt + remote_p2_pt != DRM_OF_LVDS_EVEN + DRM_OF_LVDS_ODD)
return -EINVAL;
return remote_p1_pt == DRM_OF_LVDS_EVEN ?
DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS :
DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
}
EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order);
/**
* drm_of_lvds_get_data_mapping - Get LVDS data mapping
* @port: DT port node of the LVDS source or sink
*
* Convert DT "data-mapping" property string value into media bus format value.
*
* Return:
* * MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - data-mapping is "jeida-18"
* * MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA - data-mapping is "jeida-24"
* * MEDIA_BUS_FMT_RGB888_1X7X4_SPWG - data-mapping is "vesa-24"
* * -EINVAL - the "data-mapping" property is unsupported
* * -ENODEV - the "data-mapping" property is missing
*/
int drm_of_lvds_get_data_mapping(const struct device_node *port)
{
const char *mapping;
int ret;
ret = of_property_read_string(port, "data-mapping", &mapping);
if (ret < 0)
return -ENODEV;
if (!strcmp(mapping, "jeida-18"))
return MEDIA_BUS_FMT_RGB666_1X7X3_SPWG;
if (!strcmp(mapping, "jeida-24"))
return MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA;
if (!strcmp(mapping, "vesa-24"))
return MEDIA_BUS_FMT_RGB888_1X7X4_SPWG;
return -EINVAL;
}
EXPORT_SYMBOL_GPL(drm_of_lvds_get_data_mapping);
/**
* drm_of_get_data_lanes_count - Get DSI/(e)DP data lane count
* @endpoint: DT endpoint node of the DSI/(e)DP source or sink
* @min: minimum supported number of data lanes
* @max: maximum supported number of data lanes
*
* Count DT "data-lanes" property elements and check for validity.
*
* Return:
* * min..max - positive integer count of "data-lanes" elements
* * -ve - the "data-lanes" property is missing or invalid
* * -EINVAL - the "data-lanes" property is unsupported
*/
int drm_of_get_data_lanes_count(const struct device_node *endpoint,
const unsigned int min, const unsigned int max)
{
int ret;
ret = of_property_count_u32_elems(endpoint, "data-lanes");
if (ret < 0)
return ret;
if (ret < min || ret > max)
return -EINVAL;
return ret;
}
EXPORT_SYMBOL_GPL(drm_of_get_data_lanes_count);
/**
* drm_of_get_data_lanes_count_ep - Get DSI/(e)DP data lane count by endpoint
* @port: DT port node of the DSI/(e)DP source or sink
* @port_reg: identifier (value of reg property) of the parent port node
* @reg: identifier (value of reg property) of the endpoint node
* @min: minimum supported number of data lanes
* @max: maximum supported number of data lanes
*
* Count DT "data-lanes" property elements and check for validity.
* This variant uses endpoint specifier.
*
* Return:
* * min..max - positive integer count of "data-lanes" elements
* * -EINVAL - the "data-mapping" property is unsupported
* * -ENODEV - the "data-mapping" property is missing
*/
int drm_of_get_data_lanes_count_ep(const struct device_node *port,
int port_reg, int reg,
const unsigned int min,
const unsigned int max)
{
struct device_node *endpoint;
int ret;
endpoint = of_graph_get_endpoint_by_regs(port, port_reg, reg);
ret = drm_of_get_data_lanes_count(endpoint, min, max);
of_node_put(endpoint);
return ret;
}
EXPORT_SYMBOL_GPL(drm_of_get_data_lanes_count_ep);
#if IS_ENABLED(CONFIG_DRM_MIPI_DSI)
/**
* drm_of_get_dsi_bus - find the DSI bus for a given device
* @dev: parent device of display (SPI, I2C)
*
* Gets parent DSI bus for a DSI device controlled through a bus other
* than MIPI-DCS (SPI, I2C, etc.) using the Device Tree.
*
* Returns pointer to mipi_dsi_host if successful, -EINVAL if the
* request is unsupported, -EPROBE_DEFER if the DSI host is found but
* not available, or -ENODEV otherwise.
*/
struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev)
{
struct mipi_dsi_host *dsi_host;
struct device_node *endpoint, *dsi_host_node;
/*
* Get first endpoint child from device.
*/
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
if (!endpoint)
return ERR_PTR(-ENODEV);
/*
* Follow the first endpoint to get the DSI host node and then
* release the endpoint since we no longer need it.
*/
dsi_host_node = of_graph_get_remote_port_parent(endpoint);
of_node_put(endpoint);
if (!dsi_host_node)
return ERR_PTR(-ENODEV);
/*
* Get the DSI host from the DSI host node. If we get an error
* or the return is null assume we're not ready to probe just
* yet. Release the DSI host node since we're done with it.
*/
dsi_host = of_find_mipi_dsi_host_by_node(dsi_host_node);
of_node_put(dsi_host_node);
if (IS_ERR_OR_NULL(dsi_host))
return ERR_PTR(-EPROBE_DEFER);
return dsi_host;
}
EXPORT_SYMBOL_GPL(drm_of_get_dsi_bus);
#endif /* CONFIG_DRM_MIPI_DSI */
| linux-master | drivers/gpu/drm/drm_of.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include <linux/kmemleak.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <drm/drm_buddy.h>
static struct kmem_cache *slab_blocks;
static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
struct drm_buddy_block *parent,
unsigned int order,
u64 offset)
{
struct drm_buddy_block *block;
BUG_ON(order > DRM_BUDDY_MAX_ORDER);
block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
if (!block)
return NULL;
block->header = offset;
block->header |= order;
block->parent = parent;
BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
return block;
}
static void drm_block_free(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
kmem_cache_free(slab_blocks, block);
}
static void list_insert_sorted(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
struct drm_buddy_block *node;
struct list_head *head;
head = &mm->free_list[drm_buddy_block_order(block)];
if (list_empty(head)) {
list_add(&block->link, head);
return;
}
list_for_each_entry(node, head, link)
if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
break;
__list_add(&block->link, node->link.prev, &node->link);
}
static void mark_allocated(struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_ALLOCATED;
list_del(&block->link);
}
static void mark_free(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_FREE;
list_insert_sorted(mm, block);
}
static void mark_split(struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_SPLIT;
list_del(&block->link);
}
/**
* drm_buddy_init - init memory manager
*
* @mm: DRM buddy manager to initialize
* @size: size in bytes to manage
* @chunk_size: minimum page size in bytes for our allocations
*
* Initializes the memory manager and its resources.
*
* Returns:
* 0 on success, error code on failure.
*/
int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
{
unsigned int i;
u64 offset;
if (size < chunk_size)
return -EINVAL;
if (chunk_size < PAGE_SIZE)
return -EINVAL;
if (!is_power_of_2(chunk_size))
return -EINVAL;
size = round_down(size, chunk_size);
mm->size = size;
mm->avail = size;
mm->chunk_size = chunk_size;
mm->max_order = ilog2(size) - ilog2(chunk_size);
BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
mm->free_list = kmalloc_array(mm->max_order + 1,
sizeof(struct list_head),
GFP_KERNEL);
if (!mm->free_list)
return -ENOMEM;
for (i = 0; i <= mm->max_order; ++i)
INIT_LIST_HEAD(&mm->free_list[i]);
mm->n_roots = hweight64(size);
mm->roots = kmalloc_array(mm->n_roots,
sizeof(struct drm_buddy_block *),
GFP_KERNEL);
if (!mm->roots)
goto out_free_list;
offset = 0;
i = 0;
/*
* Split into power-of-two blocks, in case we are given a size that is
* not itself a power-of-two.
*/
do {
struct drm_buddy_block *root;
unsigned int order;
u64 root_size;
order = ilog2(size) - ilog2(chunk_size);
root_size = chunk_size << order;
root = drm_block_alloc(mm, NULL, order, offset);
if (!root)
goto out_free_roots;
mark_free(mm, root);
BUG_ON(i > mm->max_order);
BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
mm->roots[i] = root;
offset += root_size;
size -= root_size;
i++;
} while (size);
return 0;
out_free_roots:
while (i--)
drm_block_free(mm, mm->roots[i]);
kfree(mm->roots);
out_free_list:
kfree(mm->free_list);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_buddy_init);
/**
* drm_buddy_fini - tear down the memory manager
*
* @mm: DRM buddy manager to free
*
* Cleanup memory manager resources and the freelist
*/
void drm_buddy_fini(struct drm_buddy *mm)
{
int i;
for (i = 0; i < mm->n_roots; ++i) {
WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
drm_block_free(mm, mm->roots[i]);
}
WARN_ON(mm->avail != mm->size);
kfree(mm->roots);
kfree(mm->free_list);
}
EXPORT_SYMBOL(drm_buddy_fini);
static int split_block(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
unsigned int block_order = drm_buddy_block_order(block) - 1;
u64 offset = drm_buddy_block_offset(block);
BUG_ON(!drm_buddy_block_is_free(block));
BUG_ON(!drm_buddy_block_order(block));
block->left = drm_block_alloc(mm, block, block_order, offset);
if (!block->left)
return -ENOMEM;
block->right = drm_block_alloc(mm, block, block_order,
offset + (mm->chunk_size << block_order));
if (!block->right) {
drm_block_free(mm, block->left);
return -ENOMEM;
}
mark_free(mm, block->left);
mark_free(mm, block->right);
mark_split(block);
return 0;
}
static struct drm_buddy_block *
__get_buddy(struct drm_buddy_block *block)
{
struct drm_buddy_block *parent;
parent = block->parent;
if (!parent)
return NULL;
if (parent->left == block)
return parent->right;
return parent->left;
}
/**
* drm_get_buddy - get buddy address
*
* @block: DRM buddy block
*
* Returns the corresponding buddy block for @block, or NULL
* if this is a root block and can't be merged further.
* Requires some kind of locking to protect against
* any concurrent allocate and free operations.
*/
struct drm_buddy_block *
drm_get_buddy(struct drm_buddy_block *block)
{
return __get_buddy(block);
}
EXPORT_SYMBOL(drm_get_buddy);
static void __drm_buddy_free(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
struct drm_buddy_block *parent;
while ((parent = block->parent)) {
struct drm_buddy_block *buddy;
buddy = __get_buddy(block);
if (!drm_buddy_block_is_free(buddy))
break;
list_del(&buddy->link);
drm_block_free(mm, block);
drm_block_free(mm, buddy);
block = parent;
}
mark_free(mm, block);
}
/**
* drm_buddy_free_block - free a block
*
* @mm: DRM buddy manager
* @block: block to be freed
*/
void drm_buddy_free_block(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
BUG_ON(!drm_buddy_block_is_allocated(block));
mm->avail += drm_buddy_block_size(mm, block);
__drm_buddy_free(mm, block);
}
EXPORT_SYMBOL(drm_buddy_free_block);
/**
* drm_buddy_free_list - free blocks
*
* @mm: DRM buddy manager
* @objects: input list head to free blocks
*/
void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
{
struct drm_buddy_block *block, *on;
list_for_each_entry_safe(block, on, objects, link) {
drm_buddy_free_block(mm, block);
cond_resched();
}
INIT_LIST_HEAD(objects);
}
EXPORT_SYMBOL(drm_buddy_free_list);
static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
{
return s1 <= e2 && e1 >= s2;
}
static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
{
return s1 <= s2 && e1 >= e2;
}
static struct drm_buddy_block *
alloc_range_bias(struct drm_buddy *mm,
u64 start, u64 end,
unsigned int order)
{
struct drm_buddy_block *block;
struct drm_buddy_block *buddy;
LIST_HEAD(dfs);
int err;
int i;
end = end - 1;
for (i = 0; i < mm->n_roots; ++i)
list_add_tail(&mm->roots[i]->tmp_link, &dfs);
do {
u64 block_start;
u64 block_end;
block = list_first_entry_or_null(&dfs,
struct drm_buddy_block,
tmp_link);
if (!block)
break;
list_del(&block->tmp_link);
if (drm_buddy_block_order(block) < order)
continue;
block_start = drm_buddy_block_offset(block);
block_end = block_start + drm_buddy_block_size(mm, block) - 1;
if (!overlaps(start, end, block_start, block_end))
continue;
if (drm_buddy_block_is_allocated(block))
continue;
if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) {
/*
* Find the free block within the range.
*/
if (drm_buddy_block_is_free(block))
return block;
continue;
}
if (!drm_buddy_block_is_split(block)) {
err = split_block(mm, block);
if (unlikely(err))
goto err_undo;
}
list_add(&block->right->tmp_link, &dfs);
list_add(&block->left->tmp_link, &dfs);
} while (1);
return ERR_PTR(-ENOSPC);
err_undo:
/*
* We really don't want to leave around a bunch of split blocks, since
* bigger is better, so make sure we merge everything back before we
* free the allocated blocks.
*/
buddy = __get_buddy(block);
if (buddy &&
(drm_buddy_block_is_free(block) &&
drm_buddy_block_is_free(buddy)))
__drm_buddy_free(mm, block);
return ERR_PTR(err);
}
static struct drm_buddy_block *
get_maxblock(struct drm_buddy *mm, unsigned int order)
{
struct drm_buddy_block *max_block = NULL, *node;
unsigned int i;
for (i = order; i <= mm->max_order; ++i) {
if (!list_empty(&mm->free_list[i])) {
node = list_last_entry(&mm->free_list[i],
struct drm_buddy_block,
link);
if (!max_block) {
max_block = node;
continue;
}
if (drm_buddy_block_offset(node) >
drm_buddy_block_offset(max_block)) {
max_block = node;
}
}
}
return max_block;
}
static struct drm_buddy_block *
alloc_from_freelist(struct drm_buddy *mm,
unsigned int order,
unsigned long flags)
{
struct drm_buddy_block *block = NULL;
unsigned int tmp;
int err;
if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
block = get_maxblock(mm, order);
if (block)
/* Store the obtained block order */
tmp = drm_buddy_block_order(block);
} else {
for (tmp = order; tmp <= mm->max_order; ++tmp) {
if (!list_empty(&mm->free_list[tmp])) {
block = list_last_entry(&mm->free_list[tmp],
struct drm_buddy_block,
link);
if (block)
break;
}
}
}
if (!block)
return ERR_PTR(-ENOSPC);
BUG_ON(!drm_buddy_block_is_free(block));
while (tmp != order) {
err = split_block(mm, block);
if (unlikely(err))
goto err_undo;
block = block->right;
tmp--;
}
return block;
err_undo:
if (tmp != order)
__drm_buddy_free(mm, block);
return ERR_PTR(err);
}
static int __alloc_range(struct drm_buddy *mm,
struct list_head *dfs,
u64 start, u64 size,
struct list_head *blocks)
{
struct drm_buddy_block *block;
struct drm_buddy_block *buddy;
LIST_HEAD(allocated);
u64 end;
int err;
end = start + size - 1;
do {
u64 block_start;
u64 block_end;
block = list_first_entry_or_null(dfs,
struct drm_buddy_block,
tmp_link);
if (!block)
break;
list_del(&block->tmp_link);
block_start = drm_buddy_block_offset(block);
block_end = block_start + drm_buddy_block_size(mm, block) - 1;
if (!overlaps(start, end, block_start, block_end))
continue;
if (drm_buddy_block_is_allocated(block)) {
err = -ENOSPC;
goto err_free;
}
if (contains(start, end, block_start, block_end)) {
if (!drm_buddy_block_is_free(block)) {
err = -ENOSPC;
goto err_free;
}
mark_allocated(block);
mm->avail -= drm_buddy_block_size(mm, block);
list_add_tail(&block->link, &allocated);
continue;
}
if (!drm_buddy_block_is_split(block)) {
err = split_block(mm, block);
if (unlikely(err))
goto err_undo;
}
list_add(&block->right->tmp_link, dfs);
list_add(&block->left->tmp_link, dfs);
} while (1);
list_splice_tail(&allocated, blocks);
return 0;
err_undo:
/*
* We really don't want to leave around a bunch of split blocks, since
* bigger is better, so make sure we merge everything back before we
* free the allocated blocks.
*/
buddy = __get_buddy(block);
if (buddy &&
(drm_buddy_block_is_free(block) &&
drm_buddy_block_is_free(buddy)))
__drm_buddy_free(mm, block);
err_free:
drm_buddy_free_list(mm, &allocated);
return err;
}
static int __drm_buddy_alloc_range(struct drm_buddy *mm,
u64 start,
u64 size,
struct list_head *blocks)
{
LIST_HEAD(dfs);
int i;
for (i = 0; i < mm->n_roots; ++i)
list_add_tail(&mm->roots[i]->tmp_link, &dfs);
return __alloc_range(mm, &dfs, start, size, blocks);
}
/**
* drm_buddy_block_trim - free unused pages
*
* @mm: DRM buddy manager
* @new_size: original size requested
* @blocks: Input and output list of allocated blocks.
* MUST contain single block as input to be trimmed.
* On success will contain the newly allocated blocks
* making up the @new_size. Blocks always appear in
* ascending order
*
* For contiguous allocation, we round up the size to the nearest
* power of two value, drivers consume *actual* size, so remaining
* portions are unused and can be optionally freed with this function
*
* Returns:
* 0 on success, error code on failure.
*/
int drm_buddy_block_trim(struct drm_buddy *mm,
u64 new_size,
struct list_head *blocks)
{
struct drm_buddy_block *parent;
struct drm_buddy_block *block;
LIST_HEAD(dfs);
u64 new_start;
int err;
if (!list_is_singular(blocks))
return -EINVAL;
block = list_first_entry(blocks,
struct drm_buddy_block,
link);
if (WARN_ON(!drm_buddy_block_is_allocated(block)))
return -EINVAL;
if (new_size > drm_buddy_block_size(mm, block))
return -EINVAL;
if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
return -EINVAL;
if (new_size == drm_buddy_block_size(mm, block))
return 0;
list_del(&block->link);
mark_free(mm, block);
mm->avail += drm_buddy_block_size(mm, block);
/* Prevent recursively freeing this node */
parent = block->parent;
block->parent = NULL;
new_start = drm_buddy_block_offset(block);
list_add(&block->tmp_link, &dfs);
err = __alloc_range(mm, &dfs, new_start, new_size, blocks);
if (err) {
mark_allocated(block);
mm->avail -= drm_buddy_block_size(mm, block);
list_add(&block->link, blocks);
}
block->parent = parent;
return err;
}
EXPORT_SYMBOL(drm_buddy_block_trim);
/**
* drm_buddy_alloc_blocks - allocate power-of-two blocks
*
* @mm: DRM buddy manager to allocate from
* @start: start of the allowed range for this block
* @end: end of the allowed range for this block
* @size: size of the allocation
* @min_page_size: alignment of the allocation
* @blocks: output list head to add allocated blocks
* @flags: DRM_BUDDY_*_ALLOCATION flags
*
* alloc_range_bias() called on range limitations, which traverses
* the tree and returns the desired block.
*
* alloc_from_freelist() called when *no* range restrictions
* are enforced, which picks the block from the freelist.
*
* Returns:
* 0 on success, error code on failure.
*/
int drm_buddy_alloc_blocks(struct drm_buddy *mm,
u64 start, u64 end, u64 size,
u64 min_page_size,
struct list_head *blocks,
unsigned long flags)
{
struct drm_buddy_block *block = NULL;
unsigned int min_order, order;
unsigned long pages;
LIST_HEAD(allocated);
int err;
if (size < mm->chunk_size)
return -EINVAL;
if (min_page_size < mm->chunk_size)
return -EINVAL;
if (!is_power_of_2(min_page_size))
return -EINVAL;
if (!IS_ALIGNED(start | end | size, mm->chunk_size))
return -EINVAL;
if (end > mm->size)
return -EINVAL;
if (range_overflows(start, size, mm->size))
return -EINVAL;
/* Actual range allocation */
if (start + size == end)
return __drm_buddy_alloc_range(mm, start, size, blocks);
if (!IS_ALIGNED(size, min_page_size))
return -EINVAL;
pages = size >> ilog2(mm->chunk_size);
order = fls(pages) - 1;
min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
do {
order = min(order, (unsigned int)fls(pages) - 1);
BUG_ON(order > mm->max_order);
BUG_ON(order < min_order);
do {
if (flags & DRM_BUDDY_RANGE_ALLOCATION)
/* Allocate traversing within the range */
block = alloc_range_bias(mm, start, end, order);
else
/* Allocate from freelist */
block = alloc_from_freelist(mm, order, flags);
if (!IS_ERR(block))
break;
if (order-- == min_order) {
err = -ENOSPC;
goto err_free;
}
} while (1);
mark_allocated(block);
mm->avail -= drm_buddy_block_size(mm, block);
kmemleak_update_trace(block);
list_add_tail(&block->link, &allocated);
pages -= BIT(order);
if (!pages)
break;
} while (1);
list_splice_tail(&allocated, blocks);
return 0;
err_free:
drm_buddy_free_list(mm, &allocated);
return err;
}
EXPORT_SYMBOL(drm_buddy_alloc_blocks);
/**
* drm_buddy_block_print - print block information
*
* @mm: DRM buddy manager
* @block: DRM buddy block
* @p: DRM printer to use
*/
void drm_buddy_block_print(struct drm_buddy *mm,
struct drm_buddy_block *block,
struct drm_printer *p)
{
u64 start = drm_buddy_block_offset(block);
u64 size = drm_buddy_block_size(mm, block);
drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
}
EXPORT_SYMBOL(drm_buddy_block_print);
/**
* drm_buddy_print - print allocator state
*
* @mm: DRM buddy manager
* @p: DRM printer to use
*/
void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
{
int order;
drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
for (order = mm->max_order; order >= 0; order--) {
struct drm_buddy_block *block;
u64 count = 0, free;
list_for_each_entry(block, &mm->free_list[order], link) {
BUG_ON(!drm_buddy_block_is_free(block));
count++;
}
drm_printf(p, "order-%2d ", order);
free = count * (mm->chunk_size << order);
if (free < SZ_1M)
drm_printf(p, "free: %8llu KiB", free >> 10);
else
drm_printf(p, "free: %8llu MiB", free >> 20);
drm_printf(p, ", blocks: %llu\n", count);
}
}
EXPORT_SYMBOL(drm_buddy_print);
static void drm_buddy_module_exit(void)
{
kmem_cache_destroy(slab_blocks);
}
static int __init drm_buddy_module_init(void)
{
slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
if (!slab_blocks)
return -ENOMEM;
return 0;
}
module_init(drm_buddy_module_init);
module_exit(drm_buddy_module_exit);
MODULE_DESCRIPTION("DRM Buddy Allocator");
MODULE_LICENSE("Dual MIT/GPL");
| linux-master | drivers/gpu/drm/drm_buddy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drm kms/fb dma helper functions
*
* Copyright (C) 2012 Analog Devices Inc.
* Author: Lars-Peter Clausen <[email protected]>
*
* Based on udl_fbdev.c
* Copyright (C) 2012 Red Hat
*/
#include <drm/drm_damage_helper.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
/**
* DOC: framebuffer dma helper functions
*
* Provides helper functions for creating a DMA-contiguous framebuffer.
*
* Depending on the platform, the buffers may be physically non-contiguous and
* mapped through an IOMMU or a similar mechanism, or allocated from
* physically-contiguous memory (using, for instance, CMA or a pool of memory
* reserved at early boot). This is handled behind the scenes by the DMA mapping
* API.
*
* drm_gem_fb_create() is used in the &drm_mode_config_funcs.fb_create
* callback function to create a DMA-contiguous framebuffer.
*/
/**
* drm_fb_dma_get_gem_obj() - Get DMA GEM object for framebuffer
* @fb: The framebuffer
* @plane: Which plane
*
* Return the DMA GEM object for given framebuffer.
*
* This function will usually be called from the CRTC callback functions.
*/
struct drm_gem_dma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane)
{
struct drm_gem_object *gem;
gem = drm_gem_fb_get_obj(fb, plane);
if (!gem)
return NULL;
return to_drm_gem_dma_obj(gem);
}
EXPORT_SYMBOL_GPL(drm_fb_dma_get_gem_obj);
/**
* drm_fb_dma_get_gem_addr() - Get DMA (bus) address for framebuffer, for pixel
* formats where values are grouped in blocks this will get you the beginning of
* the block
* @fb: The framebuffer
* @state: Which state of drm plane
* @plane: Which plane
* Return the DMA GEM address for given framebuffer.
*
* This function will usually be called from the PLANE callback functions.
*/
dma_addr_t drm_fb_dma_get_gem_addr(struct drm_framebuffer *fb,
struct drm_plane_state *state,
unsigned int plane)
{
struct drm_gem_dma_object *obj;
dma_addr_t dma_addr;
u8 h_div = 1, v_div = 1;
u32 block_w = drm_format_info_block_width(fb->format, plane);
u32 block_h = drm_format_info_block_height(fb->format, plane);
u32 block_size = fb->format->char_per_block[plane];
u32 sample_x;
u32 sample_y;
u32 block_start_y;
u32 num_hblocks;
obj = drm_fb_dma_get_gem_obj(fb, plane);
if (!obj)
return 0;
dma_addr = obj->dma_addr + fb->offsets[plane];
if (plane > 0) {
h_div = fb->format->hsub;
v_div = fb->format->vsub;
}
sample_x = (state->src_x >> 16) / h_div;
sample_y = (state->src_y >> 16) / v_div;
block_start_y = (sample_y / block_h) * block_h;
num_hblocks = sample_x / block_w;
dma_addr += fb->pitches[plane] * block_start_y;
dma_addr += block_size * num_hblocks;
return dma_addr;
}
EXPORT_SYMBOL_GPL(drm_fb_dma_get_gem_addr);
/**
* drm_fb_dma_sync_non_coherent - Sync GEM object to non-coherent backing
* memory
* @drm: DRM device
* @old_state: Old plane state
* @state: New plane state
*
* This function can be used by drivers that use damage clips and have
* DMA GEM objects backed by non-coherent memory. Calling this function
* in a plane's .atomic_update ensures that all the data in the backing
* memory have been written to RAM.
*/
void drm_fb_dma_sync_non_coherent(struct drm_device *drm,
struct drm_plane_state *old_state,
struct drm_plane_state *state)
{
const struct drm_format_info *finfo = state->fb->format;
struct drm_atomic_helper_damage_iter iter;
const struct drm_gem_dma_object *dma_obj;
unsigned int offset, i;
struct drm_rect clip;
dma_addr_t daddr;
size_t nb_bytes;
for (i = 0; i < finfo->num_planes; i++) {
dma_obj = drm_fb_dma_get_gem_obj(state->fb, i);
if (!dma_obj->map_noncoherent)
continue;
daddr = drm_fb_dma_get_gem_addr(state->fb, state, i);
drm_atomic_helper_damage_iter_init(&iter, old_state, state);
drm_atomic_for_each_plane_damage(&iter, &clip) {
/* Ignore x1/x2 values, invalidate complete lines */
offset = clip.y1 * state->fb->pitches[i];
nb_bytes = (clip.y2 - clip.y1) * state->fb->pitches[i];
dma_sync_single_for_device(drm->dev, daddr + offset,
nb_bytes, DMA_TO_DEVICE);
}
}
}
EXPORT_SYMBOL_GPL(drm_fb_dma_sync_non_coherent);
| linux-master | drivers/gpu/drm/drm_fb_dma_helper.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drm_sysfs.c - Modifications to drm_sysfs_class.c to support
* extra sysfs attribute from DRM. Normal drm_sysfs_class
* does not allow adding attributes.
*
* Copyright (c) 2004 Jon Smirl <[email protected]>
* Copyright (c) 2003-2004 Greg Kroah-Hartman <[email protected]>
* Copyright (c) 2003-2004 IBM Corp.
*/
#include <linux/acpi.h>
#include <linux/component.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/i2c.h>
#include <linux/kdev_t.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <drm/drm_accel.h>
#include <drm/drm_connector.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_modes.h>
#include <drm/drm_print.h>
#include <drm/drm_property.h>
#include <drm/drm_sysfs.h>
#include "drm_internal.h"
#include "drm_crtc_internal.h"
#define to_drm_minor(d) dev_get_drvdata(d)
#define to_drm_connector(d) dev_get_drvdata(d)
/**
* DOC: overview
*
* DRM provides very little additional support to drivers for sysfs
* interactions, beyond just all the standard stuff. Drivers who want to expose
* additional sysfs properties and property groups can attach them at either
* &drm_device.dev or &drm_connector.kdev.
*
* Registration is automatically handled when calling drm_dev_register(), or
* drm_connector_register() in case of hot-plugged connectors. Unregistration is
* also automatically handled by drm_dev_unregister() and
* drm_connector_unregister().
*/
static struct device_type drm_sysfs_device_minor = {
.name = "drm_minor"
};
static struct device_type drm_sysfs_device_connector = {
.name = "drm_connector",
};
struct class *drm_class;
#ifdef CONFIG_ACPI
static bool drm_connector_acpi_bus_match(struct device *dev)
{
return dev->type == &drm_sysfs_device_connector;
}
static struct acpi_device *drm_connector_acpi_find_companion(struct device *dev)
{
struct drm_connector *connector = to_drm_connector(dev);
return to_acpi_device_node(connector->fwnode);
}
static struct acpi_bus_type drm_connector_acpi_bus = {
.name = "drm_connector",
.match = drm_connector_acpi_bus_match,
.find_companion = drm_connector_acpi_find_companion,
};
static void drm_sysfs_acpi_register(void)
{
register_acpi_bus_type(&drm_connector_acpi_bus);
}
static void drm_sysfs_acpi_unregister(void)
{
unregister_acpi_bus_type(&drm_connector_acpi_bus);
}
#else
static void drm_sysfs_acpi_register(void) { }
static void drm_sysfs_acpi_unregister(void) { }
#endif
static char *drm_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
}
static int typec_connector_bind(struct device *dev,
struct device *typec_connector, void *data)
{
int ret;
ret = sysfs_create_link(&dev->kobj, &typec_connector->kobj, "typec_connector");
if (ret)
return ret;
ret = sysfs_create_link(&typec_connector->kobj, &dev->kobj, "drm_connector");
if (ret)
sysfs_remove_link(&dev->kobj, "typec_connector");
return ret;
}
static void typec_connector_unbind(struct device *dev,
struct device *typec_connector, void *data)
{
sysfs_remove_link(&typec_connector->kobj, "drm_connector");
sysfs_remove_link(&dev->kobj, "typec_connector");
}
static const struct component_ops typec_connector_ops = {
.bind = typec_connector_bind,
.unbind = typec_connector_unbind,
};
static CLASS_ATTR_STRING(version, S_IRUGO, "drm 1.1.0 20060810");
/**
* drm_sysfs_init - initialize sysfs helpers
*
* This is used to create the DRM class, which is the implicit parent of any
* other top-level DRM sysfs objects.
*
* You must call drm_sysfs_destroy() to release the allocated resources.
*
* Return: 0 on success, negative error code on failure.
*/
int drm_sysfs_init(void)
{
int err;
drm_class = class_create("drm");
if (IS_ERR(drm_class))
return PTR_ERR(drm_class);
err = class_create_file(drm_class, &class_attr_version.attr);
if (err) {
class_destroy(drm_class);
drm_class = NULL;
return err;
}
drm_class->devnode = drm_devnode;
drm_sysfs_acpi_register();
return 0;
}
/**
* drm_sysfs_destroy - destroys DRM class
*
* Destroy the DRM device class.
*/
void drm_sysfs_destroy(void)
{
if (IS_ERR_OR_NULL(drm_class))
return;
drm_sysfs_acpi_unregister();
class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
drm_class = NULL;
}
static void drm_sysfs_release(struct device *dev)
{
kfree(dev);
}
/*
* Connector properties
*/
static ssize_t status_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct drm_connector *connector = to_drm_connector(device);
struct drm_device *dev = connector->dev;
enum drm_connector_force old_force;
int ret;
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
if (ret)
return ret;
old_force = connector->force;
if (sysfs_streq(buf, "detect"))
connector->force = 0;
else if (sysfs_streq(buf, "on"))
connector->force = DRM_FORCE_ON;
else if (sysfs_streq(buf, "on-digital"))
connector->force = DRM_FORCE_ON_DIGITAL;
else if (sysfs_streq(buf, "off"))
connector->force = DRM_FORCE_OFF;
else
ret = -EINVAL;
if (old_force != connector->force || !connector->force) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force updated from %d to %d or reprobing\n",
connector->base.id,
connector->name,
old_force, connector->force);
connector->funcs->fill_modes(connector,
dev->mode_config.max_width,
dev->mode_config.max_height);
}
mutex_unlock(&dev->mode_config.mutex);
return ret ? ret : count;
}
static ssize_t status_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct drm_connector *connector = to_drm_connector(device);
enum drm_connector_status status;
status = READ_ONCE(connector->status);
return sysfs_emit(buf, "%s\n",
drm_get_connector_status_name(status));
}
static ssize_t dpms_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct drm_connector *connector = to_drm_connector(device);
int dpms;
dpms = READ_ONCE(connector->dpms);
return sysfs_emit(buf, "%s\n", drm_get_dpms_name(dpms));
}
static ssize_t enabled_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct drm_connector *connector = to_drm_connector(device);
bool enabled;
enabled = READ_ONCE(connector->encoder);
return sysfs_emit(buf, enabled ? "enabled\n" : "disabled\n");
}
static ssize_t edid_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *connector_dev = kobj_to_dev(kobj);
struct drm_connector *connector = to_drm_connector(connector_dev);
unsigned char *edid;
size_t size;
ssize_t ret = 0;
mutex_lock(&connector->dev->mode_config.mutex);
if (!connector->edid_blob_ptr)
goto unlock;
edid = connector->edid_blob_ptr->data;
size = connector->edid_blob_ptr->length;
if (!edid)
goto unlock;
if (off >= size)
goto unlock;
if (off + count > size)
count = size - off;
memcpy(buf, edid + off, count);
ret = count;
unlock:
mutex_unlock(&connector->dev->mode_config.mutex);
return ret;
}
static ssize_t modes_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct drm_connector *connector = to_drm_connector(device);
struct drm_display_mode *mode;
int written = 0;
mutex_lock(&connector->dev->mode_config.mutex);
list_for_each_entry(mode, &connector->modes, head) {
written += scnprintf(buf + written, PAGE_SIZE - written, "%s\n",
mode->name);
}
mutex_unlock(&connector->dev->mode_config.mutex);
return written;
}
static ssize_t connector_id_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct drm_connector *connector = to_drm_connector(device);
return sysfs_emit(buf, "%d\n", connector->base.id);
}
static DEVICE_ATTR_RW(status);
static DEVICE_ATTR_RO(enabled);
static DEVICE_ATTR_RO(dpms);
static DEVICE_ATTR_RO(modes);
static DEVICE_ATTR_RO(connector_id);
static struct attribute *connector_dev_attrs[] = {
&dev_attr_status.attr,
&dev_attr_enabled.attr,
&dev_attr_dpms.attr,
&dev_attr_modes.attr,
&dev_attr_connector_id.attr,
NULL
};
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
.size = 0,
.read = edid_show,
};
static struct bin_attribute *connector_bin_attrs[] = {
&edid_attr,
NULL
};
static const struct attribute_group connector_dev_group = {
.attrs = connector_dev_attrs,
.bin_attrs = connector_bin_attrs,
};
static const struct attribute_group *connector_dev_groups[] = {
&connector_dev_group,
NULL
};
int drm_sysfs_connector_add(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct device *kdev;
int r;
if (connector->kdev)
return 0;
kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
if (!kdev)
return -ENOMEM;
device_initialize(kdev);
kdev->class = drm_class;
kdev->type = &drm_sysfs_device_connector;
kdev->parent = dev->primary->kdev;
kdev->groups = connector_dev_groups;
kdev->release = drm_sysfs_release;
dev_set_drvdata(kdev, connector);
r = dev_set_name(kdev, "card%d-%s", dev->primary->index, connector->name);
if (r)
goto err_free;
DRM_DEBUG("adding \"%s\" to sysfs\n",
connector->name);
r = device_add(kdev);
if (r) {
drm_err(dev, "failed to register connector device: %d\n", r);
goto err_free;
}
connector->kdev = kdev;
if (dev_fwnode(kdev)) {
r = component_add(kdev, &typec_connector_ops);
if (r)
drm_err(dev, "failed to add component to create link to typec connector\n");
}
if (connector->ddc)
return sysfs_create_link(&connector->kdev->kobj,
&connector->ddc->dev.kobj, "ddc");
return 0;
err_free:
put_device(kdev);
return r;
}
void drm_sysfs_connector_remove(struct drm_connector *connector)
{
if (!connector->kdev)
return;
if (connector->ddc)
sysfs_remove_link(&connector->kdev->kobj, "ddc");
if (dev_fwnode(connector->kdev))
component_del(connector->kdev, &typec_connector_ops);
DRM_DEBUG("removing \"%s\" from sysfs\n",
connector->name);
device_unregister(connector->kdev);
connector->kdev = NULL;
}
void drm_sysfs_lease_event(struct drm_device *dev)
{
char *event_string = "LEASE=1";
char *envp[] = { event_string, NULL };
DRM_DEBUG("generating lease event\n");
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
/**
* drm_sysfs_hotplug_event - generate a DRM uevent
* @dev: DRM device
*
* Send a uevent for the DRM device specified by @dev. Currently we only
* set HOTPLUG=1 in the uevent environment, but this could be expanded to
* deal with other types of events.
*
* Any new uapi should be using the drm_sysfs_connector_status_event()
* for uevents on connector status change.
*/
void drm_sysfs_hotplug_event(struct drm_device *dev)
{
char *event_string = "HOTPLUG=1";
char *envp[] = { event_string, NULL };
DRM_DEBUG("generating hotplug event\n");
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
/**
* drm_sysfs_connector_hotplug_event - generate a DRM uevent for any connector
* change
* @connector: connector which has changed
*
* Send a uevent for the DRM connector specified by @connector. This will send
* a uevent with the properties HOTPLUG=1 and CONNECTOR.
*/
void drm_sysfs_connector_hotplug_event(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
char hotplug_str[] = "HOTPLUG=1", conn_id[21];
char *envp[] = { hotplug_str, conn_id, NULL };
snprintf(conn_id, sizeof(conn_id),
"CONNECTOR=%u", connector->base.id);
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] generating connector hotplug event\n",
connector->base.id, connector->name);
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(drm_sysfs_connector_hotplug_event);
/**
* drm_sysfs_connector_property_event - generate a DRM uevent for connector
* property change
* @connector: connector on which property changed
* @property: connector property which has changed.
*
* Send a uevent for the specified DRM connector and property. Currently we
* set HOTPLUG=1 and connector id along with the attached property id
* related to the change.
*/
void drm_sysfs_connector_property_event(struct drm_connector *connector,
struct drm_property *property)
{
struct drm_device *dev = connector->dev;
char hotplug_str[] = "HOTPLUG=1", conn_id[21], prop_id[21];
char *envp[4] = { hotplug_str, conn_id, prop_id, NULL };
WARN_ON(!drm_mode_obj_find_prop_id(&connector->base,
property->base.id));
snprintf(conn_id, ARRAY_SIZE(conn_id),
"CONNECTOR=%u", connector->base.id);
snprintf(prop_id, ARRAY_SIZE(prop_id),
"PROPERTY=%u", property->base.id);
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] generating connector property event for [PROP:%d:%s]\n",
connector->base.id, connector->name,
property->base.id, property->name);
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(drm_sysfs_connector_property_event);
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
const char *minor_str;
struct device *kdev;
int r;
kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
if (!kdev)
return ERR_PTR(-ENOMEM);
device_initialize(kdev);
if (minor->type == DRM_MINOR_ACCEL) {
minor_str = "accel%d";
accel_set_device_instance_params(kdev, minor->index);
} else {
if (minor->type == DRM_MINOR_RENDER)
minor_str = "renderD%d";
else
minor_str = "card%d";
kdev->devt = MKDEV(DRM_MAJOR, minor->index);
kdev->class = drm_class;
kdev->type = &drm_sysfs_device_minor;
}
kdev->parent = minor->dev->dev;
kdev->release = drm_sysfs_release;
dev_set_drvdata(kdev, minor);
r = dev_set_name(kdev, minor_str, minor->index);
if (r < 0)
goto err_free;
return kdev;
err_free:
put_device(kdev);
return ERR_PTR(r);
}
/**
* drm_class_device_register - register new device with the DRM sysfs class
* @dev: device to register
*
* Registers a new &struct device within the DRM sysfs class. Essentially only
* used by ttm to have a place for its global settings. Drivers should never use
* this.
*/
int drm_class_device_register(struct device *dev)
{
if (!drm_class || IS_ERR(drm_class))
return -ENOENT;
dev->class = drm_class;
return device_register(dev);
}
EXPORT_SYMBOL_GPL(drm_class_device_register);
/**
* drm_class_device_unregister - unregister device with the DRM sysfs class
* @dev: device to unregister
*
* Unregisters a &struct device from the DRM sysfs class. Essentially only used
* by ttm to have a place for its global settings. Drivers should never use
* this.
*/
void drm_class_device_unregister(struct device *dev)
{
return device_unregister(dev);
}
EXPORT_SYMBOL_GPL(drm_class_device_unregister);
| linux-master | drivers/gpu/drm/drm_sysfs.c |
/*
* drm_irq.c IRQ and vblank support
*
* \author Rickard E. (Rik) Faith <[email protected]>
* \author Gareth Hughes <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Created: Fri Mar 19 14:30:16 1999 by [email protected]
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/interrupt.h> /* For task queue support */
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_legacy.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "drm_internal.h"
static int drm_legacy_irq_install(struct drm_device *dev, int irq)
{
int ret;
unsigned long sh_flags = 0;
if (irq == 0)
return -EINVAL;
if (dev->irq_enabled)
return -EBUSY;
dev->irq_enabled = true;
DRM_DEBUG("irq=%d\n", irq);
/* Before installing handler */
if (dev->driver->irq_preinstall)
dev->driver->irq_preinstall(dev);
/* PCI devices require shared interrupts. */
if (dev_is_pci(dev->dev))
sh_flags = IRQF_SHARED;
ret = request_irq(irq, dev->driver->irq_handler,
sh_flags, dev->driver->name, dev);
if (ret < 0) {
dev->irq_enabled = false;
return ret;
}
/* After installing handler */
if (dev->driver->irq_postinstall)
ret = dev->driver->irq_postinstall(dev);
if (ret < 0) {
dev->irq_enabled = false;
if (drm_core_check_feature(dev, DRIVER_LEGACY))
vga_client_unregister(to_pci_dev(dev->dev));
free_irq(irq, dev);
} else {
dev->irq = irq;
}
return ret;
}
int drm_legacy_irq_uninstall(struct drm_device *dev)
{
unsigned long irqflags;
bool irq_enabled;
int i;
irq_enabled = dev->irq_enabled;
dev->irq_enabled = false;
/*
* Wake up any waiters so they don't hang. This is just to paper over
* issues for UMS drivers which aren't in full control of their
* vblank/irq handling. KMS drivers must ensure that vblanks are all
* disabled when uninstalling the irq handler.
*/
if (drm_dev_has_vblank(dev)) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
struct drm_vblank_crtc *vblank = &dev->vblank[i];
if (!vblank->enabled)
continue;
WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));
drm_vblank_disable_and_save(dev, i);
wake_up(&vblank->queue);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
if (!irq_enabled)
return -EINVAL;
DRM_DEBUG("irq=%d\n", dev->irq);
if (drm_core_check_feature(dev, DRIVER_LEGACY))
vga_client_unregister(to_pci_dev(dev->dev));
if (dev->driver->irq_uninstall)
dev->driver->irq_uninstall(dev);
free_irq(dev->irq, dev);
return 0;
}
EXPORT_SYMBOL(drm_legacy_irq_uninstall);
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_control *ctl = data;
int ret = 0, irq;
struct pci_dev *pdev;
/* if we haven't irq we fallback for compatibility reasons -
* this used to be a separate function in drm_dma.h
*/
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return 0;
/* UMS was only ever supported on pci devices. */
if (WARN_ON(!dev_is_pci(dev->dev)))
return -EINVAL;
switch (ctl->func) {
case DRM_INST_HANDLER:
pdev = to_pci_dev(dev->dev);
irq = pdev->irq;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != irq)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
ret = drm_legacy_irq_install(dev, irq);
mutex_unlock(&dev->struct_mutex);
return ret;
case DRM_UNINST_HANDLER:
mutex_lock(&dev->struct_mutex);
ret = drm_legacy_irq_uninstall(dev);
mutex_unlock(&dev->struct_mutex);
return ret;
default:
return -EINVAL;
}
}
| linux-master | drivers/gpu/drm/drm_irq.c |
/*
* \file drm_ioc32.c
*
* 32-bit ioctl compatibility routines for the DRM.
*
* \author Paul Mackerras <[email protected]>
*
* Copyright (C) Paul Mackerras 2005.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <linux/ratelimit.h>
#include <linux/export.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
#include "drm_legacy.h"
#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
#define DRM_IOCTL_GET_MAP32 DRM_IOWR(0x04, drm_map32_t)
#define DRM_IOCTL_GET_CLIENT32 DRM_IOWR(0x05, drm_client32_t)
#define DRM_IOCTL_GET_STATS32 DRM_IOR( 0x06, drm_stats32_t)
#define DRM_IOCTL_SET_UNIQUE32 DRM_IOW( 0x10, drm_unique32_t)
#define DRM_IOCTL_ADD_MAP32 DRM_IOWR(0x15, drm_map32_t)
#define DRM_IOCTL_ADD_BUFS32 DRM_IOWR(0x16, drm_buf_desc32_t)
#define DRM_IOCTL_MARK_BUFS32 DRM_IOW( 0x17, drm_buf_desc32_t)
#define DRM_IOCTL_INFO_BUFS32 DRM_IOWR(0x18, drm_buf_info32_t)
#define DRM_IOCTL_MAP_BUFS32 DRM_IOWR(0x19, drm_buf_map32_t)
#define DRM_IOCTL_FREE_BUFS32 DRM_IOW( 0x1a, drm_buf_free32_t)
#define DRM_IOCTL_RM_MAP32 DRM_IOW( 0x1b, drm_map32_t)
#define DRM_IOCTL_SET_SAREA_CTX32 DRM_IOW( 0x1c, drm_ctx_priv_map32_t)
#define DRM_IOCTL_GET_SAREA_CTX32 DRM_IOWR(0x1d, drm_ctx_priv_map32_t)
#define DRM_IOCTL_RES_CTX32 DRM_IOWR(0x26, drm_ctx_res32_t)
#define DRM_IOCTL_DMA32 DRM_IOWR(0x29, drm_dma32_t)
#define DRM_IOCTL_AGP_ENABLE32 DRM_IOW( 0x32, drm_agp_mode32_t)
#define DRM_IOCTL_AGP_INFO32 DRM_IOR( 0x33, drm_agp_info32_t)
#define DRM_IOCTL_AGP_ALLOC32 DRM_IOWR(0x34, drm_agp_buffer32_t)
#define DRM_IOCTL_AGP_FREE32 DRM_IOW( 0x35, drm_agp_buffer32_t)
#define DRM_IOCTL_AGP_BIND32 DRM_IOW( 0x36, drm_agp_binding32_t)
#define DRM_IOCTL_AGP_UNBIND32 DRM_IOW( 0x37, drm_agp_binding32_t)
#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t)
#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t)
#define DRM_IOCTL_UPDATE_DRAW32 DRM_IOW( 0x3f, drm_update_draw32_t)
#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
typedef struct drm_version_32 {
int version_major; /* Major version */
int version_minor; /* Minor version */
int version_patchlevel; /* Patch level */
u32 name_len; /* Length of name buffer */
u32 name; /* Name of driver */
u32 date_len; /* Length of date buffer */
u32 date; /* User-space buffer to hold date */
u32 desc_len; /* Length of desc buffer */
u32 desc; /* User-space buffer to hold desc */
} drm_version32_t;
static int compat_drm_version(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_version32_t v32;
struct drm_version v;
int err;
if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
return -EFAULT;
memset(&v, 0, sizeof(v));
v = (struct drm_version) {
.name_len = v32.name_len,
.name = compat_ptr(v32.name),
.date_len = v32.date_len,
.date = compat_ptr(v32.date),
.desc_len = v32.desc_len,
.desc = compat_ptr(v32.desc),
};
err = drm_ioctl_kernel(file, drm_version, &v,
DRM_RENDER_ALLOW);
if (err)
return err;
v32.version_major = v.version_major;
v32.version_minor = v.version_minor;
v32.version_patchlevel = v.version_patchlevel;
v32.name_len = v.name_len;
v32.date_len = v.date_len;
v32.desc_len = v.desc_len;
if (copy_to_user((void __user *)arg, &v32, sizeof(v32)))
return -EFAULT;
return 0;
}
typedef struct drm_unique32 {
u32 unique_len; /* Length of unique */
u32 unique; /* Unique name for driver instantiation */
} drm_unique32_t;
static int compat_drm_getunique(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_unique32_t uq32;
struct drm_unique uq;
int err;
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
return -EFAULT;
memset(&uq, 0, sizeof(uq));
uq = (struct drm_unique){
.unique_len = uq32.unique_len,
.unique = compat_ptr(uq32.unique),
};
err = drm_ioctl_kernel(file, drm_getunique, &uq, 0);
if (err)
return err;
uq32.unique_len = uq.unique_len;
if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32)))
return -EFAULT;
return 0;
}
static int compat_drm_setunique(struct file *file, unsigned int cmd,
unsigned long arg)
{
/* it's dead */
return -EINVAL;
}
#if IS_ENABLED(CONFIG_DRM_LEGACY)
typedef struct drm_map32 {
u32 offset; /* Requested physical address (0 for SAREA) */
u32 size; /* Requested physical size (bytes) */
enum drm_map_type type; /* Type of memory to map */
enum drm_map_flags flags; /* Flags */
u32 handle; /* User-space: "Handle" to pass to mmap() */
int mtrr; /* MTRR slot used */
} drm_map32_t;
static int compat_drm_getmap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_map32_t __user *argp = (void __user *)arg;
drm_map32_t m32;
struct drm_map map;
int err;
if (copy_from_user(&m32, argp, sizeof(m32)))
return -EFAULT;
map.offset = m32.offset;
err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, 0);
if (err)
return err;
m32.offset = map.offset;
m32.size = map.size;
m32.type = map.type;
m32.flags = map.flags;
m32.handle = ptr_to_compat((void __user *)map.handle);
m32.mtrr = map.mtrr;
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;
return 0;
}
static int compat_drm_addmap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_map32_t __user *argp = (void __user *)arg;
drm_map32_t m32;
struct drm_map map;
int err;
if (copy_from_user(&m32, argp, sizeof(m32)))
return -EFAULT;
map.offset = m32.offset;
map.size = m32.size;
map.type = m32.type;
map.flags = m32.flags;
err = drm_ioctl_kernel(file, drm_legacy_addmap_ioctl, &map,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
m32.offset = map.offset;
m32.mtrr = map.mtrr;
m32.handle = ptr_to_compat((void __user *)map.handle);
if (map.handle != compat_ptr(m32.handle))
pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
map.handle, m32.type, m32.offset);
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;
return 0;
}
static int compat_drm_rmmap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_map32_t __user *argp = (void __user *)arg;
struct drm_map map;
u32 handle;
if (get_user(handle, &argp->handle))
return -EFAULT;
map.handle = compat_ptr(handle);
return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH);
}
#endif
typedef struct drm_client32 {
int idx; /* Which client desired? */
int auth; /* Is client authenticated? */
u32 pid; /* Process ID */
u32 uid; /* User ID */
u32 magic; /* Magic */
u32 iocs; /* Ioctl count */
} drm_client32_t;
static int compat_drm_getclient(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_client32_t c32;
drm_client32_t __user *argp = (void __user *)arg;
struct drm_client client;
int err;
if (copy_from_user(&c32, argp, sizeof(c32)))
return -EFAULT;
memset(&client, 0, sizeof(client));
client.idx = c32.idx;
err = drm_ioctl_kernel(file, drm_getclient, &client, 0);
if (err)
return err;
c32.idx = client.idx;
c32.auth = client.auth;
c32.pid = client.pid;
c32.uid = client.uid;
c32.magic = client.magic;
c32.iocs = client.iocs;
if (copy_to_user(argp, &c32, sizeof(c32)))
return -EFAULT;
return 0;
}
typedef struct drm_stats32 {
u32 count;
struct {
u32 value;
enum drm_stat_type type;
} data[15];
} drm_stats32_t;
static int compat_drm_getstats(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_stats32_t __user *argp = (void __user *)arg;
/* getstats is defunct, just clear */
if (clear_user(argp, sizeof(drm_stats32_t)))
return -EFAULT;
return 0;
}
#if IS_ENABLED(CONFIG_DRM_LEGACY)
typedef struct drm_buf_desc32 {
int count; /* Number of buffers of this size */
int size; /* Size in bytes */
int low_mark; /* Low water mark */
int high_mark; /* High water mark */
int flags;
u32 agp_start; /* Start address in the AGP aperture */
} drm_buf_desc32_t;
static int compat_drm_addbufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_desc32_t __user *argp = (void __user *)arg;
drm_buf_desc32_t desc32;
struct drm_buf_desc desc;
int err;
if (copy_from_user(&desc32, argp, sizeof(drm_buf_desc32_t)))
return -EFAULT;
desc = (struct drm_buf_desc){
desc32.count, desc32.size, desc32.low_mark, desc32.high_mark,
desc32.flags, desc32.agp_start
};
err = drm_ioctl_kernel(file, drm_legacy_addbufs, &desc,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
desc32 = (drm_buf_desc32_t){
desc.count, desc.size, desc.low_mark, desc.high_mark,
desc.flags, desc.agp_start
};
if (copy_to_user(argp, &desc32, sizeof(drm_buf_desc32_t)))
return -EFAULT;
return 0;
}
static int compat_drm_markbufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_desc32_t b32;
drm_buf_desc32_t __user *argp = (void __user *)arg;
struct drm_buf_desc buf;
if (copy_from_user(&b32, argp, sizeof(b32)))
return -EFAULT;
buf.size = b32.size;
buf.low_mark = b32.low_mark;
buf.high_mark = b32.high_mark;
return drm_ioctl_kernel(file, drm_legacy_markbufs, &buf,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_buf_info32 {
int count; /**< Entries in list */
u32 list;
} drm_buf_info32_t;
static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from)
{
drm_buf_info32_t *request = data;
drm_buf_desc32_t __user *to = compat_ptr(request->list);
drm_buf_desc32_t v = {.count = from->buf_count,
.size = from->buf_size,
.low_mark = from->low_mark,
.high_mark = from->high_mark};
if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)))
return -EFAULT;
return 0;
}
static int drm_legacy_infobufs32(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_buf_info32_t *request = data;
return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf32);
}
static int compat_drm_infobufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_info32_t req32;
drm_buf_info32_t __user *argp = (void __user *)arg;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
if (req32.count < 0)
req32.count = 0;
err = drm_ioctl_kernel(file, drm_legacy_infobufs32, &req32, DRM_AUTH);
if (err)
return err;
if (put_user(req32.count, &argp->count))
return -EFAULT;
return 0;
}
typedef struct drm_buf_pub32 {
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
u32 address; /**< Address of buffer */
} drm_buf_pub32_t;
typedef struct drm_buf_map32 {
int count; /**< Length of the buffer list */
u32 virtual; /**< Mmap'd area in user-virtual */
u32 list; /**< Buffer information */
} drm_buf_map32_t;
static int map_one_buf32(void *data, int idx, unsigned long virtual,
struct drm_buf *buf)
{
drm_buf_map32_t *request = data;
drm_buf_pub32_t __user *to = compat_ptr(request->list) + idx;
drm_buf_pub32_t v;
v.idx = buf->idx;
v.total = buf->total;
v.used = 0;
v.address = virtual + buf->offset;
if (copy_to_user(to, &v, sizeof(v)))
return -EFAULT;
return 0;
}
static int drm_legacy_mapbufs32(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_buf_map32_t *request = data;
void __user *v;
int err = __drm_legacy_mapbufs(dev, data, &request->count,
&v, map_one_buf32,
file_priv);
request->virtual = ptr_to_compat(v);
return err;
}
static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_map32_t __user *argp = (void __user *)arg;
drm_buf_map32_t req32;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
if (req32.count < 0)
return -EINVAL;
err = drm_ioctl_kernel(file, drm_legacy_mapbufs32, &req32, DRM_AUTH);
if (err)
return err;
if (put_user(req32.count, &argp->count)
|| put_user(req32.virtual, &argp->virtual))
return -EFAULT;
return 0;
}
typedef struct drm_buf_free32 {
int count;
u32 list;
} drm_buf_free32_t;
static int compat_drm_freebufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_free32_t req32;
struct drm_buf_free request;
drm_buf_free32_t __user *argp = (void __user *)arg;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
request.count = req32.count;
request.list = compat_ptr(req32.list);
return drm_ioctl_kernel(file, drm_legacy_freebufs, &request, DRM_AUTH);
}
typedef struct drm_ctx_priv_map32 {
unsigned int ctx_id; /**< Context requesting private mapping */
u32 handle; /**< Handle of map */
} drm_ctx_priv_map32_t;
static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_ctx_priv_map32_t req32;
struct drm_ctx_priv_map request;
drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
request.ctx_id = req32.ctx_id;
request.handle = compat_ptr(req32.handle);
return drm_ioctl_kernel(file, drm_legacy_setsareactx, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct drm_ctx_priv_map req;
drm_ctx_priv_map32_t req32;
drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
req.ctx_id = req32.ctx_id;
err = drm_ioctl_kernel(file, drm_legacy_getsareactx, &req, DRM_AUTH);
if (err)
return err;
req32.handle = ptr_to_compat((void __user *)req.handle);
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
return 0;
}
typedef struct drm_ctx_res32 {
int count;
u32 contexts;
} drm_ctx_res32_t;
static int compat_drm_resctx(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_ctx_res32_t __user *argp = (void __user *)arg;
drm_ctx_res32_t res32;
struct drm_ctx_res res;
int err;
if (copy_from_user(&res32, argp, sizeof(res32)))
return -EFAULT;
res.count = res32.count;
res.contexts = compat_ptr(res32.contexts);
err = drm_ioctl_kernel(file, drm_legacy_resctx, &res, DRM_AUTH);
if (err)
return err;
res32.count = res.count;
if (copy_to_user(argp, &res32, sizeof(res32)))
return -EFAULT;
return 0;
}
typedef struct drm_dma32 {
int context; /**< Context handle */
int send_count; /**< Number of buffers to send */
u32 send_indices; /**< List of handles to buffers */
u32 send_sizes; /**< Lengths of data to send */
enum drm_dma_flags flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
u32 request_indices; /**< Buffer information */
u32 request_sizes;
int granted_count; /**< Number of buffers granted */
} drm_dma32_t;
static int compat_drm_dma(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_dma32_t d32;
drm_dma32_t __user *argp = (void __user *)arg;
struct drm_dma d;
int err;
if (copy_from_user(&d32, argp, sizeof(d32)))
return -EFAULT;
d.context = d32.context;
d.send_count = d32.send_count;
d.send_indices = compat_ptr(d32.send_indices);
d.send_sizes = compat_ptr(d32.send_sizes);
d.flags = d32.flags;
d.request_count = d32.request_count;
d.request_indices = compat_ptr(d32.request_indices);
d.request_sizes = compat_ptr(d32.request_sizes);
err = drm_ioctl_kernel(file, drm_legacy_dma_ioctl, &d, DRM_AUTH);
if (err)
return err;
if (put_user(d.request_size, &argp->request_size)
|| put_user(d.granted_count, &argp->granted_count))
return -EFAULT;
return 0;
}
#endif
#if IS_ENABLED(CONFIG_DRM_LEGACY)
#if IS_ENABLED(CONFIG_AGP)
typedef struct drm_agp_mode32 {
u32 mode; /**< AGP mode */
} drm_agp_mode32_t;
static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_mode32_t __user *argp = (void __user *)arg;
struct drm_agp_mode mode;
if (get_user(mode.mode, &argp->mode))
return -EFAULT;
return drm_ioctl_kernel(file, drm_legacy_agp_enable_ioctl, &mode,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_agp_info32 {
int agp_version_major;
int agp_version_minor;
u32 mode;
u32 aperture_base; /* physical address */
u32 aperture_size; /* bytes */
u32 memory_allowed; /* bytes */
u32 memory_used;
/* PCI information */
unsigned short id_vendor;
unsigned short id_device;
} drm_agp_info32_t;
static int compat_drm_agp_info(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_info32_t __user *argp = (void __user *)arg;
drm_agp_info32_t i32;
struct drm_agp_info info;
int err;
err = drm_ioctl_kernel(file, drm_legacy_agp_info_ioctl, &info, DRM_AUTH);
if (err)
return err;
i32.agp_version_major = info.agp_version_major;
i32.agp_version_minor = info.agp_version_minor;
i32.mode = info.mode;
i32.aperture_base = info.aperture_base;
i32.aperture_size = info.aperture_size;
i32.memory_allowed = info.memory_allowed;
i32.memory_used = info.memory_used;
i32.id_vendor = info.id_vendor;
i32.id_device = info.id_device;
if (copy_to_user(argp, &i32, sizeof(i32)))
return -EFAULT;
return 0;
}
typedef struct drm_agp_buffer32 {
u32 size; /**< In bytes -- will round to page boundary */
u32 handle; /**< Used for binding / unbinding */
u32 type; /**< Type of memory to allocate */
u32 physical; /**< Physical used by i810 */
} drm_agp_buffer32_t;
static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_buffer32_t __user *argp = (void __user *)arg;
drm_agp_buffer32_t req32;
struct drm_agp_buffer request;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
request.size = req32.size;
request.type = req32.type;
err = drm_ioctl_kernel(file, drm_legacy_agp_alloc_ioctl, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
req32.handle = request.handle;
req32.physical = request.physical;
if (copy_to_user(argp, &req32, sizeof(req32))) {
drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
return -EFAULT;
}
return 0;
}
static int compat_drm_agp_free(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_buffer32_t __user *argp = (void __user *)arg;
struct drm_agp_buffer request;
if (get_user(request.handle, &argp->handle))
return -EFAULT;
return drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_agp_binding32 {
u32 handle; /**< From drm_agp_buffer */
u32 offset; /**< In bytes -- will round to page boundary */
} drm_agp_binding32_t;
static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_binding32_t __user *argp = (void __user *)arg;
drm_agp_binding32_t req32;
struct drm_agp_binding request;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
request.handle = req32.handle;
request.offset = req32.offset;
return drm_ioctl_kernel(file, drm_legacy_agp_bind_ioctl, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_binding32_t __user *argp = (void __user *)arg;
struct drm_agp_binding request;
if (get_user(request.handle, &argp->handle))
return -EFAULT;
return drm_ioctl_kernel(file, drm_legacy_agp_unbind_ioctl, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
#endif /* CONFIG_AGP */
typedef struct drm_scatter_gather32 {
u32 size; /**< In bytes -- will round to page boundary */
u32 handle; /**< Used for mapping / unmapping */
} drm_scatter_gather32_t;
static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_scatter_gather32_t __user *argp = (void __user *)arg;
struct drm_scatter_gather request;
int err;
if (get_user(request.size, &argp->size))
return -EFAULT;
err = drm_ioctl_kernel(file, drm_legacy_sg_alloc, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
/* XXX not sure about the handle conversion here... */
if (put_user(request.handle >> PAGE_SHIFT, &argp->handle))
return -EFAULT;
return 0;
}
static int compat_drm_sg_free(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_scatter_gather32_t __user *argp = (void __user *)arg;
struct drm_scatter_gather request;
unsigned long x;
if (get_user(x, &argp->handle))
return -EFAULT;
request.handle = x << PAGE_SHIFT;
return drm_ioctl_kernel(file, drm_legacy_sg_free, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
#endif
#if defined(CONFIG_X86)
typedef struct drm_update_draw32 {
drm_drawable_t handle;
unsigned int type;
unsigned int num;
/* 64-bit version has a 32-bit pad here */
u64 data; /**< Pointer */
} __attribute__((packed)) drm_update_draw32_t;
static int compat_drm_update_draw(struct file *file, unsigned int cmd,
unsigned long arg)
{
/* update_draw is defunct */
return 0;
}
#endif
struct drm_wait_vblank_request32 {
enum drm_vblank_seq_type type;
unsigned int sequence;
u32 signal;
};
struct drm_wait_vblank_reply32 {
enum drm_vblank_seq_type type;
unsigned int sequence;
s32 tval_sec;
s32 tval_usec;
};
typedef union drm_wait_vblank32 {
struct drm_wait_vblank_request32 request;
struct drm_wait_vblank_reply32 reply;
} drm_wait_vblank32_t;
static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_wait_vblank32_t __user *argp = (void __user *)arg;
drm_wait_vblank32_t req32;
union drm_wait_vblank req;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
memset(&req, 0, sizeof(req));
req.request.type = req32.request.type;
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
req32.reply.type = req.reply.type;
req32.reply.sequence = req.reply.sequence;
req32.reply.tval_sec = req.reply.tval_sec;
req32.reply.tval_usec = req.reply.tval_usec;
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
return err;
}
#if defined(CONFIG_X86)
typedef struct drm_mode_fb_cmd232 {
u32 fb_id;
u32 width;
u32 height;
u32 pixel_format;
u32 flags;
u32 handles[4];
u32 pitches[4];
u32 offsets[4];
u64 modifier[4];
} __attribute__((packed)) drm_mode_fb_cmd232_t;
static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
struct drm_mode_fb_cmd2 req64;
int err;
memset(&req64, 0, sizeof(req64));
if (copy_from_user(&req64, argp,
offsetof(drm_mode_fb_cmd232_t, modifier)))
return -EFAULT;
if (copy_from_user(&req64.modifier, &argp->modifier,
sizeof(req64.modifier)))
return -EFAULT;
err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64, 0);
if (err)
return err;
if (put_user(req64.fb_id, &argp->fb_id))
return -EFAULT;
return 0;
}
#endif
static struct {
drm_ioctl_compat_t *fn;
char *name;
} drm_compat_ioctls[] = {
#define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n}
DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique),
#if IS_ENABLED(CONFIG_DRM_LEGACY)
DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap),
#endif
DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats),
DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique),
#if IS_ENABLED(CONFIG_DRM_LEGACY)
DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap),
DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs),
DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs),
DRM_IOCTL32_DEF(DRM_IOCTL_INFO_BUFS, compat_drm_infobufs),
DRM_IOCTL32_DEF(DRM_IOCTL_MAP_BUFS, compat_drm_mapbufs),
DRM_IOCTL32_DEF(DRM_IOCTL_FREE_BUFS, compat_drm_freebufs),
DRM_IOCTL32_DEF(DRM_IOCTL_RM_MAP, compat_drm_rmmap),
DRM_IOCTL32_DEF(DRM_IOCTL_SET_SAREA_CTX, compat_drm_setsareactx),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx),
DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx),
DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma),
#if IS_ENABLED(CONFIG_AGP)
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable),
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info),
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ALLOC, compat_drm_agp_alloc),
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_FREE, compat_drm_agp_free),
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind),
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind),
#endif
#endif
#if IS_ENABLED(CONFIG_DRM_LEGACY)
DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc),
DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free),
#endif
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw),
#endif
DRM_IOCTL32_DEF(DRM_IOCTL_WAIT_VBLANK, compat_drm_wait_vblank),
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
DRM_IOCTL32_DEF(DRM_IOCTL_MODE_ADDFB2, compat_drm_mode_addfb2),
#endif
};
/**
* drm_compat_ioctl - 32bit IOCTL compatibility handler for DRM drivers
* @filp: file this ioctl is called on
* @cmd: ioctl cmd number
* @arg: user argument
*
* Compatibility handler for 32 bit userspace running on 64 kernels. All actual
* IOCTL handling is forwarded to drm_ioctl(), while marshalling structures as
* appropriate. Note that this only handles DRM core IOCTLs, if the driver has
* botched IOCTL itself, it must handle those by wrapping this function.
*
* Returns:
* Zero on success, negative error code on failure.
*/
long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
drm_ioctl_compat_t *fn;
int ret;
/* Assume that ioctls without an explicit compat routine will just
* work. This may not always be a good assumption, but it's better
* than always failing.
*/
if (nr >= ARRAY_SIZE(drm_compat_ioctls))
return drm_ioctl(filp, cmd, arg);
fn = drm_compat_ioctls[nr].fn;
if (!fn)
return drm_ioctl(filp, cmd, arg);
drm_dbg_core(dev, "comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, %s\n",
current->comm, task_pid_nr(current),
(long)old_encode_dev(file_priv->minor->kdev->devt),
file_priv->authenticated,
drm_compat_ioctls[nr].name);
ret = (*fn)(filp, cmd, arg);
if (ret)
drm_dbg_core(dev, "ret = %d\n", ret);
return ret;
}
EXPORT_SYMBOL(drm_compat_ioctl);
| linux-master | drivers/gpu/drm/drm_ioc32.c |
/*
* Copyright (c) 2006-2008 Intel Corporation
* Copyright (c) 2007 Dave Airlie <[email protected]>
* Copyright (c) 2008 Red Hat Inc.
*
* DRM core CRTC related functions
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*
* Authors:
* Keith Packard
* Eric Anholt <[email protected]>
* Dave Airlie <[email protected]>
* Jesse Barnes <[email protected]>
*/
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/dma-fence.h>
#include <linux/uaccess.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
#include <drm/drm_debugfs_crc.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include <drm/drm_file.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
/**
* DOC: overview
*
* A CRTC represents the overall display pipeline. It receives pixel data from
* &drm_plane and blends them together. The &drm_display_mode is also attached
* to the CRTC, specifying display timings. On the output side the data is fed
* to one or more &drm_encoder, which are then each connected to one
* &drm_connector.
*
* To create a CRTC, a KMS drivers allocates and zeroes an instances of
* &struct drm_crtc (possibly as part of a larger structure) and registers it
* with a call to drm_crtc_init_with_planes().
*
* The CRTC is also the entry point for legacy modeset operations, see
* &drm_crtc_funcs.set_config, legacy plane operations, see
* &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2, and other legacy
* operations like &drm_crtc_funcs.gamma_set. For atomic drivers all these
* features are controlled through &drm_property and
* &drm_mode_config_funcs.atomic_check.
*/
/**
* drm_crtc_from_index - find the registered CRTC at an index
* @dev: DRM device
* @idx: index of registered CRTC to find for
*
* Given a CRTC index, return the registered CRTC from DRM device's
* list of CRTCs with matching index. This is the inverse of drm_crtc_index().
* It's useful in the vblank callbacks (like &drm_driver.enable_vblank or
* &drm_driver.disable_vblank), since that still deals with indices instead
* of pointers to &struct drm_crtc."
*/
struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx)
{
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, dev)
if (idx == crtc->index)
return crtc;
return NULL;
}
EXPORT_SYMBOL(drm_crtc_from_index);
int drm_crtc_force_disable(struct drm_crtc *crtc)
{
struct drm_mode_set set = {
.crtc = crtc,
};
WARN_ON(drm_drv_uses_atomic_modeset(crtc->dev));
return drm_mode_set_config_internal(&set);
}
static unsigned int drm_num_crtcs(struct drm_device *dev)
{
unsigned int num = 0;
struct drm_crtc *tmp;
drm_for_each_crtc(tmp, dev) {
num++;
}
return num;
}
int drm_crtc_register_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
int ret = 0;
drm_for_each_crtc(crtc, dev) {
drm_debugfs_crtc_add(crtc);
if (crtc->funcs->late_register)
ret = crtc->funcs->late_register(crtc);
if (ret)
return ret;
}
return 0;
}
void drm_crtc_unregister_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, dev) {
if (crtc->funcs->early_unregister)
crtc->funcs->early_unregister(crtc);
drm_debugfs_crtc_remove(crtc);
}
}
static int drm_crtc_crc_init(struct drm_crtc *crtc)
{
#ifdef CONFIG_DEBUG_FS
spin_lock_init(&crtc->crc.lock);
init_waitqueue_head(&crtc->crc.wq);
crtc->crc.source = kstrdup("auto", GFP_KERNEL);
if (!crtc->crc.source)
return -ENOMEM;
#endif
return 0;
}
static void drm_crtc_crc_fini(struct drm_crtc *crtc)
{
#ifdef CONFIG_DEBUG_FS
kfree(crtc->crc.source);
#endif
}
static const struct dma_fence_ops drm_crtc_fence_ops;
static struct drm_crtc *fence_to_crtc(struct dma_fence *fence)
{
BUG_ON(fence->ops != &drm_crtc_fence_ops);
return container_of(fence->lock, struct drm_crtc, fence_lock);
}
static const char *drm_crtc_fence_get_driver_name(struct dma_fence *fence)
{
struct drm_crtc *crtc = fence_to_crtc(fence);
return crtc->dev->driver->name;
}
static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence)
{
struct drm_crtc *crtc = fence_to_crtc(fence);
return crtc->timeline_name;
}
static const struct dma_fence_ops drm_crtc_fence_ops = {
.get_driver_name = drm_crtc_fence_get_driver_name,
.get_timeline_name = drm_crtc_fence_get_timeline_name,
};
struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
{
struct dma_fence *fence;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return NULL;
dma_fence_init(fence, &drm_crtc_fence_ops, &crtc->fence_lock,
crtc->fence_context, ++crtc->fence_seqno);
return fence;
}
/**
* DOC: standard CRTC properties
*
* DRM CRTCs have a few standardized properties:
*
* ACTIVE:
* Atomic property for setting the power state of the CRTC. When set to 1
* the CRTC will actively display content. When set to 0 the CRTC will be
* powered off. There is no expectation that user-space will reset CRTC
* resources like the mode and planes when setting ACTIVE to 0.
*
* User-space can rely on an ACTIVE change to 1 to never fail an atomic
* test as long as no other property has changed. If a change to ACTIVE
* fails an atomic test, this is a driver bug. For this reason setting
* ACTIVE to 0 must not release internal resources (like reserved memory
* bandwidth or clock generators).
*
* Note that the legacy DPMS property on connectors is internally routed
* to control this property for atomic drivers.
* MODE_ID:
* Atomic property for setting the CRTC display timings. The value is the
* ID of a blob containing the DRM mode info. To disable the CRTC,
* user-space must set this property to 0.
*
* Setting MODE_ID to 0 will release reserved resources for the CRTC.
* SCALING_FILTER:
* Atomic property for setting the scaling filter for CRTC scaler
*
* The value of this property can be one of the following:
*
* Default:
* Driver's default scaling filter
* Nearest Neighbor:
* Nearest Neighbor scaling filter
*/
__printf(6, 0)
static int __drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs,
const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
WARN_ON(primary && primary->type != DRM_PLANE_TYPE_PRIMARY);
WARN_ON(cursor && cursor->type != DRM_PLANE_TYPE_CURSOR);
/* crtc index is used with 32bit bitmasks */
if (WARN_ON(config->num_crtc >= 32))
return -EINVAL;
WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
(!funcs->atomic_destroy_state ||
!funcs->atomic_duplicate_state));
crtc->dev = dev;
crtc->funcs = funcs;
INIT_LIST_HEAD(&crtc->commit_list);
spin_lock_init(&crtc->commit_lock);
drm_modeset_lock_init(&crtc->mutex);
ret = drm_mode_object_add(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
return ret;
if (name) {
crtc->name = kvasprintf(GFP_KERNEL, name, ap);
} else {
crtc->name = kasprintf(GFP_KERNEL, "crtc-%d",
drm_num_crtcs(dev));
}
if (!crtc->name) {
drm_mode_object_unregister(dev, &crtc->base);
return -ENOMEM;
}
crtc->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&crtc->fence_lock);
snprintf(crtc->timeline_name, sizeof(crtc->timeline_name),
"CRTC:%d-%s", crtc->base.id, crtc->name);
crtc->base.properties = &crtc->properties;
list_add_tail(&crtc->head, &config->crtc_list);
crtc->index = config->num_crtc++;
crtc->primary = primary;
crtc->cursor = cursor;
if (primary && !primary->possible_crtcs)
primary->possible_crtcs = drm_crtc_mask(crtc);
if (cursor && !cursor->possible_crtcs)
cursor->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_crtc_crc_init(crtc);
if (ret) {
drm_mode_object_unregister(dev, &crtc->base);
return ret;
}
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&crtc->base, config->prop_active, 0);
drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
drm_object_attach_property(&crtc->base,
config->prop_out_fence_ptr, 0);
drm_object_attach_property(&crtc->base,
config->prop_vrr_enabled, 0);
}
return 0;
}
/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
* @dev: DRM device
* @crtc: CRTC object to init
* @primary: Primary plane for CRTC
* @cursor: Cursor plane for CRTC
* @funcs: callbacks for the new CRTC
* @name: printf style format string for the CRTC name, or NULL for default name
*
* Inits a new object created as base part of a driver crtc object. Drivers
* should use this function instead of drm_crtc_init(), which is only provided
* for backwards compatibility with drivers which do not yet support universal
* planes). For really simple hardware which has only 1 plane look at
* drm_simple_display_pipe_init() instead.
* The &drm_crtc_funcs.destroy hook should call drm_crtc_cleanup() and kfree()
* the crtc structure. The crtc structure should not be allocated with
* devm_kzalloc().
*
* The @primary and @cursor planes are only relevant for legacy uAPI, see
* &drm_crtc.primary and &drm_crtc.cursor.
*
* Note: consider using drmm_crtc_alloc_with_planes() or
* drmm_crtc_init_with_planes() instead of drm_crtc_init_with_planes()
* to let the DRM managed resource infrastructure take care of cleanup
* and deallocation.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs,
const char *name, ...)
{
va_list ap;
int ret;
WARN_ON(!funcs->destroy);
va_start(ap, name);
ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
name, ap);
va_end(ap);
return ret;
}
EXPORT_SYMBOL(drm_crtc_init_with_planes);
static void drmm_crtc_init_with_planes_cleanup(struct drm_device *dev,
void *ptr)
{
struct drm_crtc *crtc = ptr;
drm_crtc_cleanup(crtc);
}
__printf(6, 0)
static int __drmm_crtc_init_with_planes(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs,
const char *name,
va_list args)
{
int ret;
drm_WARN_ON(dev, funcs && funcs->destroy);
ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
name, args);
if (ret)
return ret;
ret = drmm_add_action_or_reset(dev, drmm_crtc_init_with_planes_cleanup,
crtc);
if (ret)
return ret;
return 0;
}
/**
* drmm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
* @dev: DRM device
* @crtc: CRTC object to init
* @primary: Primary plane for CRTC
* @cursor: Cursor plane for CRTC
* @funcs: callbacks for the new CRTC
* @name: printf style format string for the CRTC name, or NULL for default name
*
* Inits a new object created as base part of a driver crtc object. Drivers
* should use this function instead of drm_crtc_init(), which is only provided
* for backwards compatibility with drivers which do not yet support universal
* planes). For really simple hardware which has only 1 plane look at
* drm_simple_display_pipe_init() instead.
*
* Cleanup is automatically handled through registering
* drmm_crtc_cleanup() with drmm_add_action(). The crtc structure should
* be allocated with drmm_kzalloc().
*
* The @drm_crtc_funcs.destroy hook must be NULL.
*
* The @primary and @cursor planes are only relevant for legacy uAPI, see
* &drm_crtc.primary and &drm_crtc.cursor.
*
* Returns:
* Zero on success, error code on failure.
*/
int drmm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs,
const char *name, ...)
{
va_list ap;
int ret;
va_start(ap, name);
ret = __drmm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
name, ap);
va_end(ap);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL(drmm_crtc_init_with_planes);
void *__drmm_crtc_alloc_with_planes(struct drm_device *dev,
size_t size, size_t offset,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs,
const char *name, ...)
{
void *container;
struct drm_crtc *crtc;
va_list ap;
int ret;
if (WARN_ON(!funcs || funcs->destroy))
return ERR_PTR(-EINVAL);
container = drmm_kzalloc(dev, size, GFP_KERNEL);
if (!container)
return ERR_PTR(-ENOMEM);
crtc = container + offset;
va_start(ap, name);
ret = __drmm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
name, ap);
va_end(ap);
if (ret)
return ERR_PTR(ret);
return container;
}
EXPORT_SYMBOL(__drmm_crtc_alloc_with_planes);
/**
* drm_crtc_cleanup - Clean up the core crtc usage
* @crtc: CRTC to cleanup
*
* This function cleans up @crtc and removes it from the DRM mode setting
* core. Note that the function does *not* free the crtc structure itself,
* this is the responsibility of the caller.
*/
void drm_crtc_cleanup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
/* Note that the crtc_list is considered to be static; should we
* remove the drm_crtc at runtime we would have to decrement all
* the indices on the drm_crtc after us in the crtc_list.
*/
drm_crtc_crc_fini(crtc);
kfree(crtc->gamma_store);
crtc->gamma_store = NULL;
drm_modeset_lock_fini(&crtc->mutex);
drm_mode_object_unregister(dev, &crtc->base);
list_del(&crtc->head);
dev->mode_config.num_crtc--;
WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state);
if (crtc->state && crtc->funcs->atomic_destroy_state)
crtc->funcs->atomic_destroy_state(crtc, crtc->state);
kfree(crtc->name);
memset(crtc, 0, sizeof(*crtc));
}
EXPORT_SYMBOL(drm_crtc_cleanup);
/**
* drm_mode_getcrtc - get CRTC configuration
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Construct a CRTC configuration structure to return to the user.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_getcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc *crtc_resp = data;
struct drm_crtc *crtc;
struct drm_plane *plane;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, crtc_resp->crtc_id);
if (!crtc)
return -ENOENT;
plane = crtc->primary;
crtc_resp->gamma_size = crtc->gamma_size;
drm_modeset_lock(&plane->mutex, NULL);
if (plane->state && plane->state->fb)
crtc_resp->fb_id = plane->state->fb->base.id;
else if (!plane->state && plane->fb)
crtc_resp->fb_id = plane->fb->base.id;
else
crtc_resp->fb_id = 0;
if (plane->state) {
crtc_resp->x = plane->state->src_x >> 16;
crtc_resp->y = plane->state->src_y >> 16;
}
drm_modeset_unlock(&plane->mutex);
drm_modeset_lock(&crtc->mutex, NULL);
if (crtc->state) {
if (crtc->state->enable) {
drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->state->mode);
crtc_resp->mode_valid = 1;
} else {
crtc_resp->mode_valid = 0;
}
} else {
crtc_resp->x = crtc->x;
crtc_resp->y = crtc->y;
if (crtc->enabled) {
drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->mode);
crtc_resp->mode_valid = 1;
} else {
crtc_resp->mode_valid = 0;
}
}
if (!file_priv->aspect_ratio_allowed)
crtc_resp->mode.flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
drm_modeset_unlock(&crtc->mutex);
return 0;
}
static int __drm_mode_set_config_internal(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_crtc *crtc = set->crtc;
struct drm_framebuffer *fb;
struct drm_crtc *tmp;
int ret;
WARN_ON(drm_drv_uses_atomic_modeset(crtc->dev));
/*
* NOTE: ->set_config can also disable other crtcs (if we steal all
* connectors from it), hence we need to refcount the fbs across all
* crtcs. Atomic modeset will have saner semantics ...
*/
drm_for_each_crtc(tmp, crtc->dev) {
struct drm_plane *plane = tmp->primary;
plane->old_fb = plane->fb;
}
fb = set->fb;
ret = crtc->funcs->set_config(set, ctx);
if (ret == 0) {
struct drm_plane *plane = crtc->primary;
plane->crtc = fb ? crtc : NULL;
plane->fb = fb;
}
drm_for_each_crtc(tmp, crtc->dev) {
struct drm_plane *plane = tmp->primary;
if (plane->fb)
drm_framebuffer_get(plane->fb);
if (plane->old_fb)
drm_framebuffer_put(plane->old_fb);
plane->old_fb = NULL;
}
return ret;
}
/**
* drm_mode_set_config_internal - helper to call &drm_mode_config_funcs.set_config
* @set: modeset config to set
*
* This is a little helper to wrap internal calls to the
* &drm_mode_config_funcs.set_config driver interface. The only thing it adds is
* correct refcounting dance.
*
* This should only be used by non-atomic legacy drivers.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_set_config_internal(struct drm_mode_set *set)
{
WARN_ON(drm_drv_uses_atomic_modeset(set->crtc->dev));
return __drm_mode_set_config_internal(set, NULL);
}
EXPORT_SYMBOL(drm_mode_set_config_internal);
/**
* drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
* CRTC viewport
* @crtc: CRTC that framebuffer will be displayed on
* @x: x panning
* @y: y panning
* @mode: mode that framebuffer will be displayed under
* @fb: framebuffer to check size of
*/
int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
const struct drm_framebuffer *fb)
{
int hdisplay, vdisplay;
drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay);
if (crtc->state &&
drm_rotation_90_or_270(crtc->primary->state->rotation))
swap(hdisplay, vdisplay);
return drm_framebuffer_check_src_coords(x << 16, y << 16,
hdisplay << 16, vdisplay << 16,
fb);
}
EXPORT_SYMBOL(drm_crtc_check_viewport);
/**
* drm_mode_setcrtc - set CRTC configuration
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Build a new CRTC configuration based on user request.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_mode_crtc *crtc_req = data;
struct drm_crtc *crtc;
struct drm_plane *plane;
struct drm_connector **connector_set = NULL, *connector;
struct drm_framebuffer *fb = NULL;
struct drm_display_mode *mode = NULL;
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
struct drm_modeset_acquire_ctx ctx;
int ret;
int i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
/*
* Universal plane src offsets are only 16.16, prevent havoc for
* drivers using universal plane code internally.
*/
if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
return -ERANGE;
crtc = drm_crtc_find(dev, file_priv, crtc_req->crtc_id);
if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
return -ENOENT;
}
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
plane = crtc->primary;
/* allow disabling with the primary plane leased */
if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id))
return -EACCES;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx,
DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
if (crtc_req->mode_valid) {
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
struct drm_framebuffer *old_fb;
if (plane->state)
old_fb = plane->state->fb;
else
old_fb = plane->fb;
if (!old_fb) {
DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
ret = -EINVAL;
goto out;
}
fb = old_fb;
/* Make refcounting symmetric with the lookup path. */
drm_framebuffer_get(fb);
} else {
fb = drm_framebuffer_lookup(dev, file_priv, crtc_req->fb_id);
if (!fb) {
DRM_DEBUG_KMS("Unknown FB ID%d\n",
crtc_req->fb_id);
ret = -ENOENT;
goto out;
}
}
mode = drm_mode_create(dev);
if (!mode) {
ret = -ENOMEM;
goto out;
}
if (!file_priv->aspect_ratio_allowed &&
(crtc_req->mode.flags & DRM_MODE_FLAG_PIC_AR_MASK) != DRM_MODE_FLAG_PIC_AR_NONE) {
DRM_DEBUG_KMS("Unexpected aspect-ratio flag bits\n");
ret = -EINVAL;
goto out;
}
ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode);
if (ret) {
DRM_DEBUG_KMS("Invalid mode (ret=%d, status=%s)\n",
ret, drm_get_mode_status_name(mode->status));
drm_mode_debug_printmodeline(mode);
goto out;
}
/*
* Check whether the primary plane supports the fb pixel format.
* Drivers not implementing the universal planes API use a
* default formats list provided by the DRM core which doesn't
* match real hardware capabilities. Skip the check in that
* case.
*/
if (!plane->format_default) {
ret = drm_plane_check_pixel_format(plane,
fb->format->format,
fb->modifier);
if (ret) {
DRM_DEBUG_KMS("Invalid pixel format %p4cc, modifier 0x%llx\n",
&fb->format->format,
fb->modifier);
goto out;
}
}
ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
mode, fb);
if (ret)
goto out;
}
if (crtc_req->count_connectors == 0 && mode) {
DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
ret = -EINVAL;
goto out;
}
if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
crtc_req->count_connectors);
ret = -EINVAL;
goto out;
}
if (crtc_req->count_connectors > 0) {
u32 out_id;
/* Avoid unbounded kernel memory allocation */
if (crtc_req->count_connectors > config->num_connector) {
ret = -EINVAL;
goto out;
}
connector_set = kmalloc_array(crtc_req->count_connectors,
sizeof(struct drm_connector *),
GFP_KERNEL);
if (!connector_set) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < crtc_req->count_connectors; i++) {
connector_set[i] = NULL;
set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
if (get_user(out_id, &set_connectors_ptr[i])) {
ret = -EFAULT;
goto out;
}
connector = drm_connector_lookup(dev, file_priv, out_id);
if (!connector) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
ret = -ENOENT;
goto out;
}
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
connector->name);
connector_set[i] = connector;
}
}
set.crtc = crtc;
set.x = crtc_req->x;
set.y = crtc_req->y;
set.mode = mode;
set.connectors = connector_set;
set.num_connectors = crtc_req->count_connectors;
set.fb = fb;
if (drm_drv_uses_atomic_modeset(dev))
ret = crtc->funcs->set_config(&set, &ctx);
else
ret = __drm_mode_set_config_internal(&set, &ctx);
out:
if (fb)
drm_framebuffer_put(fb);
if (connector_set) {
for (i = 0; i < crtc_req->count_connectors; i++) {
if (connector_set[i])
drm_connector_put(connector_set[i]);
}
}
kfree(connector_set);
drm_mode_destroy(dev, mode);
/* In case we need to retry... */
connector_set = NULL;
fb = NULL;
mode = NULL;
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
return ret;
}
int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
int ret = -EINVAL;
struct drm_crtc *crtc = obj_to_crtc(obj);
if (crtc->funcs->set_property)
ret = crtc->funcs->set_property(crtc, property, value);
if (!ret)
drm_object_property_set_value(obj, property, value);
return ret;
}
/**
* drm_crtc_create_scaling_filter_property - create a new scaling filter
* property
*
* @crtc: drm CRTC
* @supported_filters: bitmask of supported scaling filters, must include
* BIT(DRM_SCALING_FILTER_DEFAULT).
*
* This function lets driver to enable the scaling filter property on a given
* CRTC.
*
* RETURNS:
* Zero for success or -errno
*/
int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
unsigned int supported_filters)
{
struct drm_property *prop =
drm_create_scaling_filter_prop(crtc->dev, supported_filters);
if (IS_ERR(prop))
return PTR_ERR(prop);
drm_object_attach_property(&crtc->base, prop,
DRM_SCALING_FILTER_DEFAULT);
crtc->scaling_filter_property = prop;
return 0;
}
EXPORT_SYMBOL(drm_crtc_create_scaling_filter_property);
| linux-master | drivers/gpu/drm/drm_crtc.c |
/*
* Created: Sun Dec 21 13:08:50 2008 by [email protected]
*
* Copyright 2008 Ben Gamari <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
#include <drm/drm_bridge.h>
#include <drm/drm_client.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_managed.h>
#include <drm/drm_gpuva_mgr.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
#if defined(CONFIG_DEBUG_FS)
/***************************************************
* Initialization, etc.
**************************************************/
static int drm_name_info(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
struct drm_master *master;
mutex_lock(&dev->master_mutex);
master = dev->master;
seq_printf(m, "%s", dev->driver->name);
if (dev->dev)
seq_printf(m, " dev=%s", dev_name(dev->dev));
if (master && master->unique)
seq_printf(m, " master=%s", master->unique);
if (dev->unique)
seq_printf(m, " unique=%s", dev->unique);
seq_printf(m, "\n");
mutex_unlock(&dev->master_mutex);
return 0;
}
static int drm_clients_info(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
struct drm_file *priv;
kuid_t uid;
seq_printf(m,
"%20s %5s %3s master a %5s %10s\n",
"command",
"tgid",
"dev",
"uid",
"magic");
/* dev->filelist is sorted youngest first, but we want to present
* oldest first (i.e. kernel, servers, clients), so walk backwardss.
*/
mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
struct task_struct *task;
bool is_current_master = drm_is_current_master(priv);
rcu_read_lock(); /* locks pid_task()->comm */
task = pid_task(priv->pid, PIDTYPE_TGID);
uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
task ? task->comm : "<unknown>",
pid_vnr(priv->pid),
priv->minor->index,
is_current_master ? 'y' : 'n',
priv->authenticated ? 'y' : 'n',
from_kuid_munged(seq_user_ns(m), uid),
priv->magic);
rcu_read_unlock();
}
mutex_unlock(&dev->filelist_mutex);
return 0;
}
static int drm_gem_one_name_info(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
struct seq_file *m = data;
seq_printf(m, "%6d %8zd %7d %8d\n",
obj->name, obj->size,
obj->handle_count,
kref_read(&obj->refcount));
return 0;
}
static int drm_gem_name_info(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
seq_printf(m, " name size handles refcount\n");
mutex_lock(&dev->object_name_lock);
idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
mutex_unlock(&dev->object_name_lock);
return 0;
}
static const struct drm_debugfs_info drm_debugfs_list[] = {
{"name", drm_name_info, 0},
{"clients", drm_clients_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
};
#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
static int drm_debugfs_open(struct inode *inode, struct file *file)
{
struct drm_info_node *node = inode->i_private;
return single_open(file, node->info_ent->show, node);
}
static int drm_debugfs_entry_open(struct inode *inode, struct file *file)
{
struct drm_debugfs_entry *entry = inode->i_private;
struct drm_debugfs_info *node = &entry->file;
return single_open(file, node->show, entry);
}
static const struct file_operations drm_debugfs_entry_fops = {
.owner = THIS_MODULE,
.open = drm_debugfs_entry_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations drm_debugfs_fops = {
.owner = THIS_MODULE,
.open = drm_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/**
* drm_debugfs_gpuva_info - dump the given DRM GPU VA space
* @m: pointer to the &seq_file to write
* @mgr: the &drm_gpuva_manager representing the GPU VA space
*
* Dumps the GPU VA mappings of a given DRM GPU VA manager.
*
* For each DRM GPU VA space drivers should call this function from their
* &drm_info_list's show callback.
*
* Returns: 0 on success, -ENODEV if the &mgr is not initialized
*/
int drm_debugfs_gpuva_info(struct seq_file *m,
struct drm_gpuva_manager *mgr)
{
struct drm_gpuva *va, *kva = &mgr->kernel_alloc_node;
if (!mgr->name)
return -ENODEV;
seq_printf(m, "DRM GPU VA space (%s) [0x%016llx;0x%016llx]\n",
mgr->name, mgr->mm_start, mgr->mm_start + mgr->mm_range);
seq_printf(m, "Kernel reserved node [0x%016llx;0x%016llx]\n",
kva->va.addr, kva->va.addr + kva->va.range);
seq_puts(m, "\n");
seq_puts(m, " VAs | start | range | end | object | object offset\n");
seq_puts(m, "-------------------------------------------------------------------------------------------------------------\n");
drm_gpuva_for_each_va(va, mgr) {
if (unlikely(va == kva))
continue;
seq_printf(m, " | 0x%016llx | 0x%016llx | 0x%016llx | 0x%016llx | 0x%016llx\n",
va->va.addr, va->va.range, va->va.addr + va->va.range,
(u64)(uintptr_t)va->gem.obj, va->gem.offset);
}
return 0;
}
EXPORT_SYMBOL(drm_debugfs_gpuva_info);
/**
* drm_debugfs_create_files - Initialize a given set of debugfs files for DRM
* minor
* @files: The array of files to create
* @count: The number of files given
* @root: DRI debugfs dir entry.
* @minor: device minor number
*
* Create a given set of debugfs files represented by an array of
* &struct drm_info_list in the given root directory. These files will be removed
* automatically on drm_debugfs_cleanup().
*/
void drm_debugfs_create_files(const struct drm_info_list *files, int count,
struct dentry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct drm_info_node *tmp;
int i;
for (i = 0; i < count; i++) {
u32 features = files[i].driver_features;
if (features && !drm_core_check_all_features(dev, features))
continue;
tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
if (tmp == NULL)
continue;
tmp->minor = minor;
tmp->dent = debugfs_create_file(files[i].name,
0444, root, tmp,
&drm_debugfs_fops);
tmp->info_ent = &files[i];
mutex_lock(&minor->debugfs_lock);
list_add(&tmp->list, &minor->debugfs_list);
mutex_unlock(&minor->debugfs_lock);
}
}
EXPORT_SYMBOL(drm_debugfs_create_files);
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root)
{
struct drm_device *dev = minor->dev;
struct drm_debugfs_entry *entry, *tmp;
char name[64];
INIT_LIST_HEAD(&minor->debugfs_list);
mutex_init(&minor->debugfs_lock);
sprintf(name, "%d", minor_id);
minor->debugfs_root = debugfs_create_dir(name, root);
drm_debugfs_add_files(minor->dev, drm_debugfs_list, DRM_DEBUGFS_ENTRIES);
if (drm_drv_uses_atomic_modeset(dev)) {
drm_atomic_debugfs_init(minor);
drm_bridge_debugfs_init(minor);
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_framebuffer_debugfs_init(minor);
drm_client_debugfs_init(minor);
}
if (dev->driver->debugfs_init)
dev->driver->debugfs_init(minor);
list_for_each_entry_safe(entry, tmp, &dev->debugfs_list, list) {
debugfs_create_file(entry->file.name, 0444,
minor->debugfs_root, entry, &drm_debugfs_entry_fops);
list_del(&entry->list);
}
return 0;
}
void drm_debugfs_late_register(struct drm_device *dev)
{
struct drm_minor *minor = dev->primary;
struct drm_debugfs_entry *entry, *tmp;
if (!minor)
return;
list_for_each_entry_safe(entry, tmp, &dev->debugfs_list, list) {
debugfs_create_file(entry->file.name, 0444,
minor->debugfs_root, entry, &drm_debugfs_entry_fops);
list_del(&entry->list);
}
}
int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
struct drm_minor *minor)
{
struct list_head *pos, *q;
struct drm_info_node *tmp;
int i;
mutex_lock(&minor->debugfs_lock);
for (i = 0; i < count; i++) {
list_for_each_safe(pos, q, &minor->debugfs_list) {
tmp = list_entry(pos, struct drm_info_node, list);
if (tmp->info_ent == &files[i]) {
debugfs_remove(tmp->dent);
list_del(pos);
kfree(tmp);
}
}
}
mutex_unlock(&minor->debugfs_lock);
return 0;
}
EXPORT_SYMBOL(drm_debugfs_remove_files);
static void drm_debugfs_remove_all_files(struct drm_minor *minor)
{
struct drm_info_node *node, *tmp;
mutex_lock(&minor->debugfs_lock);
list_for_each_entry_safe(node, tmp, &minor->debugfs_list, list) {
debugfs_remove(node->dent);
list_del(&node->list);
kfree(node);
}
mutex_unlock(&minor->debugfs_lock);
}
void drm_debugfs_cleanup(struct drm_minor *minor)
{
if (!minor->debugfs_root)
return;
drm_debugfs_remove_all_files(minor);
debugfs_remove_recursive(minor->debugfs_root);
minor->debugfs_root = NULL;
}
/**
* drm_debugfs_add_file - Add a given file to the DRM device debugfs file list
* @dev: drm device for the ioctl
* @name: debugfs file name
* @show: show callback
* @data: driver-private data, should not be device-specific
*
* Add a given file entry to the DRM device debugfs file list to be created on
* drm_debugfs_init.
*/
void drm_debugfs_add_file(struct drm_device *dev, const char *name,
int (*show)(struct seq_file*, void*), void *data)
{
struct drm_debugfs_entry *entry = drmm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
if (!entry)
return;
entry->file.name = name;
entry->file.show = show;
entry->file.data = data;
entry->dev = dev;
mutex_lock(&dev->debugfs_mutex);
list_add(&entry->list, &dev->debugfs_list);
mutex_unlock(&dev->debugfs_mutex);
}
EXPORT_SYMBOL(drm_debugfs_add_file);
/**
* drm_debugfs_add_files - Add an array of files to the DRM device debugfs file list
* @dev: drm device for the ioctl
* @files: The array of files to create
* @count: The number of files given
*
* Add a given set of debugfs files represented by an array of
* &struct drm_debugfs_info in the DRM device debugfs file list.
*/
void drm_debugfs_add_files(struct drm_device *dev, const struct drm_debugfs_info *files, int count)
{
int i;
for (i = 0; i < count; i++)
drm_debugfs_add_file(dev, files[i].name, files[i].show, files[i].data);
}
EXPORT_SYMBOL(drm_debugfs_add_files);
static int connector_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
seq_printf(m, "%s\n", drm_get_connector_force_name(connector->force));
return 0;
}
static int connector_open(struct inode *inode, struct file *file)
{
struct drm_connector *dev = inode->i_private;
return single_open(file, connector_show, dev);
}
static ssize_t connector_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
struct drm_connector *connector = m->private;
char buf[12];
if (len > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, ubuf, len))
return -EFAULT;
buf[len] = '\0';
if (sysfs_streq(buf, "on"))
connector->force = DRM_FORCE_ON;
else if (sysfs_streq(buf, "digital"))
connector->force = DRM_FORCE_ON_DIGITAL;
else if (sysfs_streq(buf, "off"))
connector->force = DRM_FORCE_OFF;
else if (sysfs_streq(buf, "unspecified"))
connector->force = DRM_FORCE_UNSPECIFIED;
else
return -EINVAL;
return len;
}
static int edid_show(struct seq_file *m, void *data)
{
return drm_edid_override_show(m->private, m);
}
static int edid_open(struct inode *inode, struct file *file)
{
struct drm_connector *dev = inode->i_private;
return single_open(file, edid_show, dev);
}
static ssize_t edid_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
struct drm_connector *connector = m->private;
char *buf;
int ret;
buf = memdup_user(ubuf, len);
if (IS_ERR(buf))
return PTR_ERR(buf);
if (len == 5 && !strncmp(buf, "reset", 5))
ret = drm_edid_override_reset(connector);
else
ret = drm_edid_override_set(connector, buf, len);
kfree(buf);
return ret ? ret : len;
}
/*
* Returns the min and max vrr vfreq through the connector's debugfs file.
* Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range
*/
static int vrr_range_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
if (connector->status != connector_status_connected)
return -ENODEV;
seq_printf(m, "Min: %u\n", connector->display_info.monitor_range.min_vfreq);
seq_printf(m, "Max: %u\n", connector->display_info.monitor_range.max_vfreq);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(vrr_range);
/*
* Returns Connector's max supported bpc through debugfs file.
* Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
*/
static int output_bpc_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
if (connector->status != connector_status_connected)
return -ENODEV;
seq_printf(m, "Maximum: %u\n", connector->display_info.bpc);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(output_bpc);
static const struct file_operations drm_edid_fops = {
.owner = THIS_MODULE,
.open = edid_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = edid_write
};
static const struct file_operations drm_connector_fops = {
.owner = THIS_MODULE,
.open = connector_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = connector_write
};
void drm_debugfs_connector_add(struct drm_connector *connector)
{
struct drm_minor *minor = connector->dev->primary;
struct dentry *root;
if (!minor->debugfs_root)
return;
root = debugfs_create_dir(connector->name, minor->debugfs_root);
connector->debugfs_entry = root;
/* force */
debugfs_create_file("force", 0644, root, connector,
&drm_connector_fops);
/* edid */
debugfs_create_file("edid_override", 0644, root, connector,
&drm_edid_fops);
/* vrr range */
debugfs_create_file("vrr_range", 0444, root, connector,
&vrr_range_fops);
/* max bpc */
debugfs_create_file("output_bpc", 0444, root, connector,
&output_bpc_fops);
if (connector->funcs->debugfs_init)
connector->funcs->debugfs_init(connector, root);
}
void drm_debugfs_connector_remove(struct drm_connector *connector)
{
if (!connector->debugfs_entry)
return;
debugfs_remove_recursive(connector->debugfs_entry);
connector->debugfs_entry = NULL;
}
void drm_debugfs_crtc_add(struct drm_crtc *crtc)
{
struct drm_minor *minor = crtc->dev->primary;
struct dentry *root;
char *name;
name = kasprintf(GFP_KERNEL, "crtc-%d", crtc->index);
if (!name)
return;
root = debugfs_create_dir(name, minor->debugfs_root);
kfree(name);
crtc->debugfs_entry = root;
drm_debugfs_crtc_crc_add(crtc);
}
void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
{
debugfs_remove_recursive(crtc->debugfs_entry);
crtc->debugfs_entry = NULL;
}
#endif /* CONFIG_DEBUG_FS */
| linux-master | drivers/gpu/drm/drm_debugfs.c |
/*
* Copyright (C) 2016 Red Hat
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rob Clark <[email protected]>
*/
#include <linux/stdarg.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/dynamic_debug.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
/*
* __drm_debug: Enable debug output.
* Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
*/
unsigned long __drm_debug;
EXPORT_SYMBOL(__drm_debug);
MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
module_param_named(debug, __drm_debug, ulong, 0600);
#else
/* classnames must match vals of enum drm_debug_category */
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
"DRM_UT_DRIVER",
"DRM_UT_KMS",
"DRM_UT_PRIME",
"DRM_UT_ATOMIC",
"DRM_UT_VBL",
"DRM_UT_STATE",
"DRM_UT_LEASE",
"DRM_UT_DP",
"DRM_UT_DRMRES");
static struct ddebug_class_param drm_debug_bitmap = {
.bits = &__drm_debug,
.flags = "p",
.map = &drm_debug_classes,
};
module_param_cb(debug, ¶m_ops_dyndbg_classes, &drm_debug_bitmap, 0600);
#endif
void __drm_puts_coredump(struct drm_printer *p, const char *str)
{
struct drm_print_iterator *iterator = p->arg;
ssize_t len;
if (!iterator->remain)
return;
if (iterator->offset < iterator->start) {
ssize_t copy;
len = strlen(str);
if (iterator->offset + len <= iterator->start) {
iterator->offset += len;
return;
}
copy = len - (iterator->start - iterator->offset);
if (copy > iterator->remain)
copy = iterator->remain;
/* Copy out the bit of the string that we need */
memcpy(iterator->data,
str + (iterator->start - iterator->offset), copy);
iterator->offset = iterator->start + copy;
iterator->remain -= copy;
} else {
ssize_t pos = iterator->offset - iterator->start;
len = min_t(ssize_t, strlen(str), iterator->remain);
memcpy(iterator->data + pos, str, len);
iterator->offset += len;
iterator->remain -= len;
}
}
EXPORT_SYMBOL(__drm_puts_coredump);
void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf)
{
struct drm_print_iterator *iterator = p->arg;
size_t len;
char *buf;
if (!iterator->remain)
return;
/* Figure out how big the string will be */
len = snprintf(NULL, 0, "%pV", vaf);
/* This is the easiest path, we've already advanced beyond the offset */
if (iterator->offset + len <= iterator->start) {
iterator->offset += len;
return;
}
/* Then check if we can directly copy into the target buffer */
if ((iterator->offset >= iterator->start) && (len < iterator->remain)) {
ssize_t pos = iterator->offset - iterator->start;
snprintf(((char *) iterator->data) + pos,
iterator->remain, "%pV", vaf);
iterator->offset += len;
iterator->remain -= len;
return;
}
/*
* Finally, hit the slow path and make a temporary string to copy over
* using _drm_puts_coredump
*/
buf = kmalloc(len + 1, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (!buf)
return;
snprintf(buf, len + 1, "%pV", vaf);
__drm_puts_coredump(p, (const char *) buf);
kfree(buf);
}
EXPORT_SYMBOL(__drm_printfn_coredump);
void __drm_puts_seq_file(struct drm_printer *p, const char *str)
{
seq_puts(p->arg, str);
}
EXPORT_SYMBOL(__drm_puts_seq_file);
void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf)
{
seq_printf(p->arg, "%pV", vaf);
}
EXPORT_SYMBOL(__drm_printfn_seq_file);
void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf)
{
dev_info(p->arg, "[" DRM_NAME "] %pV", vaf);
}
EXPORT_SYMBOL(__drm_printfn_info);
void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
{
/* pr_debug callsite decorations are unhelpful here */
printk(KERN_DEBUG "%s %pV", p->prefix, vaf);
}
EXPORT_SYMBOL(__drm_printfn_debug);
void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf)
{
pr_err("*ERROR* %s %pV", p->prefix, vaf);
}
EXPORT_SYMBOL(__drm_printfn_err);
/**
* drm_puts - print a const string to a &drm_printer stream
* @p: the &drm printer
* @str: const string
*
* Allow &drm_printer types that have a constant string
* option to use it.
*/
void drm_puts(struct drm_printer *p, const char *str)
{
if (p->puts)
p->puts(p, str);
else
drm_printf(p, "%s", str);
}
EXPORT_SYMBOL(drm_puts);
/**
* drm_printf - print to a &drm_printer stream
* @p: the &drm_printer
* @f: format string
*/
void drm_printf(struct drm_printer *p, const char *f, ...)
{
va_list args;
va_start(args, f);
drm_vprintf(p, f, &args);
va_end(args);
}
EXPORT_SYMBOL(drm_printf);
/**
* drm_print_bits - print bits to a &drm_printer stream
*
* Print bits (in flag fields for example) in human readable form.
*
* @p: the &drm_printer
* @value: field value.
* @bits: Array with bit names.
* @nbits: Size of bit names array.
*/
void drm_print_bits(struct drm_printer *p, unsigned long value,
const char * const bits[], unsigned int nbits)
{
bool first = true;
unsigned int i;
if (WARN_ON_ONCE(nbits > BITS_PER_TYPE(value)))
nbits = BITS_PER_TYPE(value);
for_each_set_bit(i, &value, nbits) {
if (WARN_ON_ONCE(!bits[i]))
continue;
drm_printf(p, "%s%s", first ? "" : ",",
bits[i]);
first = false;
}
if (first)
drm_printf(p, "(none)");
}
EXPORT_SYMBOL(drm_print_bits);
void drm_dev_printk(const struct device *dev, const char *level,
const char *format, ...)
{
struct va_format vaf;
va_list args;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
if (dev)
dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
__builtin_return_address(0), &vaf);
else
printk("%s" "[" DRM_NAME ":%ps] %pV",
level, __builtin_return_address(0), &vaf);
va_end(args);
}
EXPORT_SYMBOL(drm_dev_printk);
void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
enum drm_debug_category category, const char *format, ...)
{
struct va_format vaf;
va_list args;
if (!__drm_debug_enabled(category))
return;
/* we know we are printing for either syslog, tracefs, or both */
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
if (dev)
dev_printk(KERN_DEBUG, dev, "[" DRM_NAME ":%ps] %pV",
__builtin_return_address(0), &vaf);
else
printk(KERN_DEBUG "[" DRM_NAME ":%ps] %pV",
__builtin_return_address(0), &vaf);
va_end(args);
}
EXPORT_SYMBOL(__drm_dev_dbg);
void ___drm_dbg(struct _ddebug *desc, enum drm_debug_category category, const char *format, ...)
{
struct va_format vaf;
va_list args;
if (!__drm_debug_enabled(category))
return;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
printk(KERN_DEBUG "[" DRM_NAME ":%ps] %pV",
__builtin_return_address(0), &vaf);
va_end(args);
}
EXPORT_SYMBOL(___drm_dbg);
void __drm_err(const char *format, ...)
{
struct va_format vaf;
va_list args;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV",
__builtin_return_address(0), &vaf);
va_end(args);
}
EXPORT_SYMBOL(__drm_err);
/**
* drm_print_regset32 - print the contents of registers to a
* &drm_printer stream.
*
* @p: the &drm printer
* @regset: the list of registers to print.
*
* Often in driver debug, it's useful to be able to either capture the
* contents of registers in the steady state using debugfs or at
* specific points during operation. This lets the driver have a
* single list of registers for both.
*/
void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset)
{
int namelen = 0;
int i;
for (i = 0; i < regset->nregs; i++)
namelen = max(namelen, (int)strlen(regset->regs[i].name));
for (i = 0; i < regset->nregs; i++) {
drm_printf(p, "%*s = 0x%08x\n",
namelen, regset->regs[i].name,
readl(regset->base + regset->regs[i].offset));
}
}
EXPORT_SYMBOL(drm_print_regset32);
| linux-master | drivers/gpu/drm/drm_print.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Noralf Trønnes
* Copyright (c) 2006-2009 Red Hat Inc.
* Copyright (c) 2006-2008 Intel Corporation
* Jesse Barnes <[email protected]>
* Copyright (c) 2007 Dave Airlie <[email protected]>
*/
#include "drm/drm_modeset_lock.h"
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <drm/drm_atomic.h>
#include <drm/drm_client.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
#define DRM_CLIENT_MAX_CLONED_CONNECTORS 8
struct drm_client_offset {
int x, y;
};
int drm_client_modeset_create(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
unsigned int num_crtc = dev->mode_config.num_crtc;
unsigned int max_connector_count = 1;
struct drm_mode_set *modeset;
struct drm_crtc *crtc;
unsigned int i = 0;
/* Add terminating zero entry to enable index less iteration */
client->modesets = kcalloc(num_crtc + 1, sizeof(*client->modesets), GFP_KERNEL);
if (!client->modesets)
return -ENOMEM;
mutex_init(&client->modeset_mutex);
drm_for_each_crtc(crtc, dev)
client->modesets[i++].crtc = crtc;
/* Cloning is only supported in the single crtc case. */
if (num_crtc == 1)
max_connector_count = DRM_CLIENT_MAX_CLONED_CONNECTORS;
for (modeset = client->modesets; modeset->crtc; modeset++) {
modeset->connectors = kcalloc(max_connector_count,
sizeof(*modeset->connectors), GFP_KERNEL);
if (!modeset->connectors)
goto err_free;
}
return 0;
err_free:
drm_client_modeset_free(client);
return -ENOMEM;
}
static void drm_client_modeset_release(struct drm_client_dev *client)
{
struct drm_mode_set *modeset;
unsigned int i;
drm_client_for_each_modeset(modeset, client) {
drm_mode_destroy(client->dev, modeset->mode);
modeset->mode = NULL;
modeset->fb = NULL;
for (i = 0; i < modeset->num_connectors; i++) {
drm_connector_put(modeset->connectors[i]);
modeset->connectors[i] = NULL;
}
modeset->num_connectors = 0;
}
}
void drm_client_modeset_free(struct drm_client_dev *client)
{
struct drm_mode_set *modeset;
mutex_lock(&client->modeset_mutex);
drm_client_modeset_release(client);
drm_client_for_each_modeset(modeset, client)
kfree(modeset->connectors);
mutex_unlock(&client->modeset_mutex);
mutex_destroy(&client->modeset_mutex);
kfree(client->modesets);
}
static struct drm_mode_set *
drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
{
struct drm_mode_set *modeset;
drm_client_for_each_modeset(modeset, client)
if (modeset->crtc == crtc)
return modeset;
return NULL;
}
static struct drm_display_mode *
drm_connector_get_tiled_mode(struct drm_connector *connector)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
mode->vdisplay == connector->tile_v_size)
return mode;
}
return NULL;
}
static struct drm_display_mode *
drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
mode->vdisplay == connector->tile_v_size)
continue;
return mode;
}
return NULL;
}
static struct drm_display_mode *
drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay > width ||
mode->vdisplay > height)
continue;
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return mode;
}
return NULL;
}
static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector)
{
struct drm_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode;
bool prefer_non_interlace;
/*
* Find a user-defined mode. If the user gave us a valid
* mode on the kernel command line, it will show up in this
* list.
*/
list_for_each_entry(mode, &connector->modes, head) {
if (mode->type & DRM_MODE_TYPE_USERDEF)
return mode;
}
cmdline_mode = &connector->cmdline_mode;
if (cmdline_mode->specified == false)
return NULL;
/*
* Attempt to find a matching mode in the list of modes we
* have gotten so far.
*/
prefer_non_interlace = !cmdline_mode->interlace;
again:
list_for_each_entry(mode, &connector->modes, head) {
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
mode->vdisplay != cmdline_mode->yres)
continue;
if (cmdline_mode->refresh_specified) {
if (drm_mode_vrefresh(mode) != cmdline_mode->refresh)
continue;
}
if (cmdline_mode->interlace) {
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
} else if (prefer_non_interlace) {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
continue;
}
return mode;
}
if (prefer_non_interlace) {
prefer_non_interlace = false;
goto again;
}
return NULL;
}
static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
if (connector->display_info.non_desktop)
return false;
if (strict)
enable = connector->status == connector_status_connected;
else
enable = connector->status != connector_status_disconnected;
return enable;
}
static void drm_client_connectors_enabled(struct drm_connector **connectors,
unsigned int connector_count,
bool *enabled)
{
bool any_enabled = false;
struct drm_connector *connector;
int i = 0;
for (i = 0; i < connector_count; i++) {
connector = connectors[i];
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
connector->display_info.non_desktop ? "non desktop" : str_yes_no(enabled[i]));
any_enabled |= enabled[i];
}
if (any_enabled)
return;
for (i = 0; i < connector_count; i++)
enabled[i] = drm_connector_enabled(connectors[i], false);
}
static bool drm_client_target_cloned(struct drm_device *dev,
struct drm_connector **connectors,
unsigned int connector_count,
struct drm_display_mode **modes,
struct drm_client_offset *offsets,
bool *enabled, int width, int height)
{
int count, i, j;
bool can_clone = false;
struct drm_display_mode *dmt_mode, *mode;
/* only contemplate cloning in the single crtc case */
if (dev->mode_config.num_crtc > 1)
return false;
count = 0;
for (i = 0; i < connector_count; i++) {
if (enabled[i])
count++;
}
/* only contemplate cloning if more than one connector is enabled */
if (count <= 1)
return false;
/* check the command line or if nothing common pick 1024x768 */
can_clone = true;
for (i = 0; i < connector_count; i++) {
if (!enabled[i])
continue;
modes[i] = drm_connector_pick_cmdline_mode(connectors[i]);
if (!modes[i]) {
can_clone = false;
break;
}
for (j = 0; j < i; j++) {
if (!enabled[j])
continue;
if (!drm_mode_match(modes[j], modes[i],
DRM_MODE_MATCH_TIMINGS |
DRM_MODE_MATCH_CLOCK |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS))
can_clone = false;
}
}
if (can_clone) {
DRM_DEBUG_KMS("can clone using command line\n");
return true;
}
/* try and find a 1024x768 mode on each connector */
can_clone = true;
dmt_mode = drm_mode_find_dmt(dev, 1024, 768, 60, false);
if (!dmt_mode)
goto fail;
for (i = 0; i < connector_count; i++) {
if (!enabled[i])
continue;
list_for_each_entry(mode, &connectors[i]->modes, head) {
if (drm_mode_match(mode, dmt_mode,
DRM_MODE_MATCH_TIMINGS |
DRM_MODE_MATCH_CLOCK |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS))
modes[i] = mode;
}
if (!modes[i])
can_clone = false;
}
kfree(dmt_mode);
if (can_clone) {
DRM_DEBUG_KMS("can clone using 1024x768\n");
return true;
}
fail:
DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
return false;
}
static int drm_client_get_tile_offsets(struct drm_connector **connectors,
unsigned int connector_count,
struct drm_display_mode **modes,
struct drm_client_offset *offsets,
int idx,
int h_idx, int v_idx)
{
struct drm_connector *connector;
int i;
int hoffset = 0, voffset = 0;
for (i = 0; i < connector_count; i++) {
connector = connectors[i];
if (!connector->has_tile)
continue;
if (!modes[i] && (h_idx || v_idx)) {
DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i,
connector->base.id);
continue;
}
if (connector->tile_h_loc < h_idx)
hoffset += modes[i]->hdisplay;
if (connector->tile_v_loc < v_idx)
voffset += modes[i]->vdisplay;
}
offsets[idx].x = hoffset;
offsets[idx].y = voffset;
DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
return 0;
}
static bool drm_client_target_preferred(struct drm_connector **connectors,
unsigned int connector_count,
struct drm_display_mode **modes,
struct drm_client_offset *offsets,
bool *enabled, int width, int height)
{
const u64 mask = BIT_ULL(connector_count) - 1;
struct drm_connector *connector;
u64 conn_configured = 0;
int tile_pass = 0;
int num_tiled_conns = 0;
int i;
for (i = 0; i < connector_count; i++) {
if (connectors[i]->has_tile &&
connectors[i]->status == connector_status_connected)
num_tiled_conns++;
}
retry:
for (i = 0; i < connector_count; i++) {
connector = connectors[i];
if (conn_configured & BIT_ULL(i))
continue;
if (enabled[i] == false) {
conn_configured |= BIT_ULL(i);
continue;
}
/* first pass over all the untiled connectors */
if (tile_pass == 0 && connector->has_tile)
continue;
if (tile_pass == 1) {
if (connector->tile_h_loc != 0 ||
connector->tile_v_loc != 0)
continue;
} else {
if (connector->tile_h_loc != tile_pass - 1 &&
connector->tile_v_loc != tile_pass - 1)
/* if this tile_pass doesn't cover any of the tiles - keep going */
continue;
/*
* find the tile offsets for this pass - need to find
* all tiles left and above
*/
drm_client_get_tile_offsets(connectors, connector_count, modes, offsets, i,
connector->tile_h_loc, connector->tile_v_loc);
}
DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
connector->base.id);
/* got for command line mode first */
modes[i] = drm_connector_pick_cmdline_mode(connector);
if (!modes[i]) {
DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
connector->base.id, connector->tile_group ? connector->tile_group->id : 0);
modes[i] = drm_connector_has_preferred_mode(connector, width, height);
}
/* No preferred modes, pick one off the list */
if (!modes[i] && !list_empty(&connector->modes)) {
list_for_each_entry(modes[i], &connector->modes, head)
break;
}
/*
* In case of tiled mode if all tiles not present fallback to
* first available non tiled mode.
* After all tiles are present, try to find the tiled mode
* for all and if tiled mode not present due to fbcon size
* limitations, use first non tiled mode only for
* tile 0,0 and set to no mode for all other tiles.
*/
if (connector->has_tile) {
if (num_tiled_conns <
connector->num_h_tile * connector->num_v_tile ||
(connector->tile_h_loc == 0 &&
connector->tile_v_loc == 0 &&
!drm_connector_get_tiled_mode(connector))) {
DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
connector->base.id);
modes[i] = drm_connector_fallback_non_tiled_mode(connector);
} else {
modes[i] = drm_connector_get_tiled_mode(connector);
}
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
conn_configured |= BIT_ULL(i);
}
if ((conn_configured & mask) != mask) {
tile_pass++;
goto retry;
}
return true;
}
static bool connector_has_possible_crtc(struct drm_connector *connector,
struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->possible_crtcs & drm_crtc_mask(crtc))
return true;
}
return false;
}
static int drm_client_pick_crtcs(struct drm_client_dev *client,
struct drm_connector **connectors,
unsigned int connector_count,
struct drm_crtc **best_crtcs,
struct drm_display_mode **modes,
int n, int width, int height)
{
struct drm_device *dev = client->dev;
struct drm_connector *connector;
int my_score, best_score, score;
struct drm_crtc **crtcs, *crtc;
struct drm_mode_set *modeset;
int o;
if (n == connector_count)
return 0;
connector = connectors[n];
best_crtcs[n] = NULL;
best_score = drm_client_pick_crtcs(client, connectors, connector_count,
best_crtcs, modes, n + 1, width, height);
if (modes[n] == NULL)
return best_score;
crtcs = kcalloc(connector_count, sizeof(*crtcs), GFP_KERNEL);
if (!crtcs)
return best_score;
my_score = 1;
if (connector->status == connector_status_connected)
my_score++;
if (connector->cmdline_mode.specified)
my_score++;
if (drm_connector_has_preferred_mode(connector, width, height))
my_score++;
/*
* select a crtc for this connector and then attempt to configure
* remaining connectors
*/
drm_client_for_each_modeset(modeset, client) {
crtc = modeset->crtc;
if (!connector_has_possible_crtc(connector, crtc))
continue;
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
break;
if (o < n) {
/* ignore cloning unless only a single crtc */
if (dev->mode_config.num_crtc > 1)
continue;
if (!drm_mode_equal(modes[o], modes[n]))
continue;
}
crtcs[n] = crtc;
memcpy(crtcs, best_crtcs, n * sizeof(*crtcs));
score = my_score + drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, n + 1, width, height);
if (score > best_score) {
best_score = score;
memcpy(best_crtcs, crtcs, connector_count * sizeof(*crtcs));
}
}
kfree(crtcs);
return best_score;
}
/* Try to read the BIOS display configuration and use it for the initial config */
static bool drm_client_firmware_config(struct drm_client_dev *client,
struct drm_connector **connectors,
unsigned int connector_count,
struct drm_crtc **crtcs,
struct drm_display_mode **modes,
struct drm_client_offset *offsets,
bool *enabled, int width, int height)
{
const int count = min_t(unsigned int, connector_count, BITS_PER_LONG);
unsigned long conn_configured, conn_seq, mask;
struct drm_device *dev = client->dev;
int i, j;
bool *save_enabled;
bool fallback = true, ret = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
int num_tiled_conns = 0;
struct drm_modeset_acquire_ctx ctx;
if (!drm_drv_uses_atomic_modeset(dev))
return false;
if (WARN_ON(count <= 0))
return false;
save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
if (!save_enabled)
return false;
drm_modeset_acquire_init(&ctx, 0);
while (drm_modeset_lock_all_ctx(dev, &ctx) != 0)
drm_modeset_backoff(&ctx);
memcpy(save_enabled, enabled, count);
mask = GENMASK(count - 1, 0);
conn_configured = 0;
for (i = 0; i < count; i++) {
if (connectors[i]->has_tile &&
connectors[i]->status == connector_status_connected)
num_tiled_conns++;
}
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_crtc *new_crtc;
connector = connectors[i];
if (conn_configured & BIT(i))
continue;
if (conn_seq == 0 && !connector->has_tile)
continue;
if (connector->status == connector_status_connected)
num_connectors_detected++;
if (!enabled[i]) {
DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
connector->name);
conn_configured |= BIT(i);
continue;
}
if (connector->force == DRM_FORCE_OFF) {
DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
connector->name);
enabled[i] = false;
continue;
}
encoder = connector->state->best_encoder;
if (!encoder || WARN_ON(!connector->state->crtc)) {
if (connector->force > DRM_FORCE_OFF)
goto bail;
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
connector->name);
enabled[i] = false;
conn_configured |= BIT(i);
continue;
}
num_connectors_enabled++;
new_crtc = connector->state->crtc;
/*
* Make sure we're not trying to drive multiple connectors
* with a single CRTC, since our cloning support may not
* match the BIOS.
*/
for (j = 0; j < count; j++) {
if (crtcs[j] == new_crtc) {
DRM_DEBUG_KMS("fallback: cloned configuration\n");
goto bail;
}
}
DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
connector->name);
/* go for command line mode first */
modes[i] = drm_connector_pick_cmdline_mode(connector);
/* try for preferred next */
if (!modes[i]) {
DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
connector->name, connector->has_tile);
modes[i] = drm_connector_has_preferred_mode(connector, width, height);
}
/* No preferred mode marked by the EDID? Are there any modes? */
if (!modes[i] && !list_empty(&connector->modes)) {
DRM_DEBUG_KMS("using first mode listed on connector %s\n",
connector->name);
modes[i] = list_first_entry(&connector->modes,
struct drm_display_mode,
head);
}
/* last resort: use current mode */
if (!modes[i]) {
/*
* IMPORTANT: We want to use the adjusted mode (i.e.
* after the panel fitter upscaling) as the initial
* config, not the input mode, which is what crtc->mode
* usually contains. But since our current
* code puts a mode derived from the post-pfit timings
* into crtc->mode this works out correctly.
*
* This is crtc->mode and not crtc->state->mode for the
* fastboot check to work correctly.
*/
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
connector->name);
modes[i] = &connector->state->crtc->mode;
}
/*
* In case of tiled modes, if all tiles are not present
* then fallback to a non tiled mode.
*/
if (connector->has_tile &&
num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
connector->base.id);
modes[i] = drm_connector_fallback_non_tiled_mode(connector);
}
crtcs[i] = new_crtc;
DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
connector->name,
connector->state->crtc->base.id,
connector->state->crtc->name,
modes[i]->hdisplay, modes[i]->vdisplay,
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
fallback = false;
conn_configured |= BIT(i);
}
if ((conn_configured & mask) != mask && conn_configured != conn_seq)
goto retry;
/*
* If the BIOS didn't enable everything it could, fall back to have the
* same user experiencing of lighting up as much as possible like the
* fbdev helper library.
*/
if (num_connectors_enabled != num_connectors_detected &&
num_connectors_enabled < dev->mode_config.num_crtc) {
DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
num_connectors_detected);
fallback = true;
}
if (fallback) {
bail:
DRM_DEBUG_KMS("Not using firmware configuration\n");
memcpy(enabled, save_enabled, count);
ret = false;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
kfree(save_enabled);
return ret;
}
/**
* drm_client_modeset_probe() - Probe for displays
* @client: DRM client
* @width: Maximum display mode width (optional)
* @height: Maximum display mode height (optional)
*
* This function sets up display pipelines for enabled connectors and stores the
* config in the client's modeset array.
*
* Returns:
* Zero on success or negative error code on failure.
*/
int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height)
{
struct drm_connector *connector, **connectors = NULL;
struct drm_connector_list_iter conn_iter;
struct drm_device *dev = client->dev;
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
bool *enabled;
DRM_DEBUG_KMS("\n");
if (!width)
width = dev->mode_config.max_width;
if (!height)
height = dev->mode_config.max_height;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_client_for_each_connector_iter(connector, &conn_iter) {
struct drm_connector **tmp;
tmp = krealloc(connectors, (connector_count + 1) * sizeof(*connectors), GFP_KERNEL);
if (!tmp) {
ret = -ENOMEM;
goto free_connectors;
}
connectors = tmp;
drm_connector_get(connector);
connectors[connector_count++] = connector;
}
drm_connector_list_iter_end(&conn_iter);
if (!connector_count)
return 0;
crtcs = kcalloc(connector_count, sizeof(*crtcs), GFP_KERNEL);
modes = kcalloc(connector_count, sizeof(*modes), GFP_KERNEL);
offsets = kcalloc(connector_count, sizeof(*offsets), GFP_KERNEL);
enabled = kcalloc(connector_count, sizeof(bool), GFP_KERNEL);
if (!crtcs || !modes || !enabled || !offsets) {
DRM_ERROR("Memory allocation failed\n");
ret = -ENOMEM;
goto out;
}
mutex_lock(&client->modeset_mutex);
mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < connector_count; i++)
total_modes_count += connectors[i]->funcs->fill_modes(connectors[i], width, height);
if (!total_modes_count)
DRM_DEBUG_KMS("No connectors reported connected with modes\n");
drm_client_connectors_enabled(connectors, connector_count, enabled);
if (!drm_client_firmware_config(client, connectors, connector_count, crtcs,
modes, offsets, enabled, width, height)) {
memset(modes, 0, connector_count * sizeof(*modes));
memset(crtcs, 0, connector_count * sizeof(*crtcs));
memset(offsets, 0, connector_count * sizeof(*offsets));
if (!drm_client_target_cloned(dev, connectors, connector_count, modes,
offsets, enabled, width, height) &&
!drm_client_target_preferred(connectors, connector_count, modes,
offsets, enabled, width, height))
DRM_ERROR("Unable to find initial modes\n");
DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
width, height);
drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, 0, width, height);
}
mutex_unlock(&dev->mode_config.mutex);
drm_client_modeset_release(client);
for (i = 0; i < connector_count; i++) {
struct drm_display_mode *mode = modes[i];
struct drm_crtc *crtc = crtcs[i];
struct drm_client_offset *offset = &offsets[i];
if (mode && crtc) {
struct drm_mode_set *modeset = drm_client_find_modeset(client, crtc);
struct drm_connector *connector = connectors[i];
DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
mode->name, crtc->base.id, offset->x, offset->y);
if (WARN_ON_ONCE(modeset->num_connectors == DRM_CLIENT_MAX_CLONED_CONNECTORS ||
(dev->mode_config.num_crtc > 1 && modeset->num_connectors == 1))) {
ret = -EINVAL;
break;
}
kfree(modeset->mode);
modeset->mode = drm_mode_duplicate(dev, mode);
drm_connector_get(connector);
modeset->connectors[modeset->num_connectors++] = connector;
modeset->x = offset->x;
modeset->y = offset->y;
}
}
mutex_unlock(&client->modeset_mutex);
out:
kfree(crtcs);
kfree(modes);
kfree(offsets);
kfree(enabled);
free_connectors:
for (i = 0; i < connector_count; i++)
drm_connector_put(connectors[i]);
kfree(connectors);
return ret;
}
EXPORT_SYMBOL(drm_client_modeset_probe);
/**
* drm_client_rotation() - Check the initial rotation value
* @modeset: DRM modeset
* @rotation: Returned rotation value
*
* This function checks if the primary plane in @modeset can hw rotate
* to match the rotation needed on its connector.
*
* Note: Currently only 0 and 180 degrees are supported.
*
* Return:
* True if the plane can do the rotation, false otherwise.
*/
bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
{
struct drm_connector *connector = modeset->connectors[0];
struct drm_plane *plane = modeset->crtc->primary;
struct drm_cmdline_mode *cmdline;
u64 valid_mask = 0;
unsigned int i;
if (!modeset->num_connectors)
return false;
switch (connector->display_info.panel_orientation) {
case DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP:
*rotation = DRM_MODE_ROTATE_180;
break;
case DRM_MODE_PANEL_ORIENTATION_LEFT_UP:
*rotation = DRM_MODE_ROTATE_90;
break;
case DRM_MODE_PANEL_ORIENTATION_RIGHT_UP:
*rotation = DRM_MODE_ROTATE_270;
break;
default:
*rotation = DRM_MODE_ROTATE_0;
}
/**
* The panel already defined the default rotation
* through its orientation. Whatever has been provided
* on the command line needs to be added to that.
*
* Unfortunately, the rotations are at different bit
* indices, so the math to add them up are not as
* trivial as they could.
*
* Reflections on the other hand are pretty trivial to deal with, a
* simple XOR between the two handle the addition nicely.
*/
cmdline = &connector->cmdline_mode;
if (cmdline->specified && cmdline->rotation_reflection) {
unsigned int cmdline_rest, panel_rest;
unsigned int cmdline_rot, panel_rot;
unsigned int sum_rot, sum_rest;
panel_rot = ilog2(*rotation & DRM_MODE_ROTATE_MASK);
cmdline_rot = ilog2(cmdline->rotation_reflection & DRM_MODE_ROTATE_MASK);
sum_rot = (panel_rot + cmdline_rot) % 4;
panel_rest = *rotation & ~DRM_MODE_ROTATE_MASK;
cmdline_rest = cmdline->rotation_reflection & ~DRM_MODE_ROTATE_MASK;
sum_rest = panel_rest ^ cmdline_rest;
*rotation = (1 << sum_rot) | sum_rest;
}
/*
* TODO: support 90 / 270 degree hardware rotation,
* depending on the hardware this may require the framebuffer
* to be in a specific tiling format.
*/
if (((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0 &&
(*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180) ||
!plane->rotation_property)
return false;
for (i = 0; i < plane->rotation_property->num_values; i++)
valid_mask |= (1ULL << plane->rotation_property->values[i]);
if (!(*rotation & valid_mask))
return false;
return true;
}
EXPORT_SYMBOL(drm_client_rotation);
static int drm_client_modeset_commit_atomic(struct drm_client_dev *client, bool active, bool check)
{
struct drm_device *dev = client->dev;
struct drm_plane *plane;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_mode_set *mode_set;
int ret;
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(dev);
if (!state) {
ret = -ENOMEM;
goto out_ctx;
}
state->acquire_ctx = &ctx;
retry:
drm_for_each_plane(plane, dev) {
struct drm_plane_state *plane_state;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto out_state;
}
plane_state->rotation = DRM_MODE_ROTATE_0;
/* disable non-primary: */
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
continue;
ret = __drm_atomic_helper_disable_plane(plane, plane_state);
if (ret != 0)
goto out_state;
}
drm_client_for_each_modeset(mode_set, client) {
struct drm_plane *primary = mode_set->crtc->primary;
unsigned int rotation;
if (drm_client_rotation(mode_set, &rotation)) {
struct drm_plane_state *plane_state;
/* Cannot fail as we've already gotten the plane state above */
plane_state = drm_atomic_get_new_plane_state(state, primary);
plane_state->rotation = rotation;
}
ret = __drm_atomic_helper_set_config(mode_set, state);
if (ret != 0)
goto out_state;
/*
* __drm_atomic_helper_set_config() sets active when a
* mode is set, unconditionally clear it if we force DPMS off
*/
if (!active) {
struct drm_crtc *crtc = mode_set->crtc;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
crtc_state->active = false;
}
}
if (check)
ret = drm_atomic_check_only(state);
else
ret = drm_atomic_commit(state);
out_state:
if (ret == -EDEADLK)
goto backoff;
drm_atomic_state_put(state);
out_ctx:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
backoff:
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}
static int drm_client_modeset_commit_legacy(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
struct drm_mode_set *mode_set;
struct drm_plane *plane;
int ret = 0;
drm_modeset_lock_all(dev);
drm_for_each_plane(plane, dev) {
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
if (plane->rotation_property)
drm_mode_plane_set_obj_prop(plane,
plane->rotation_property,
DRM_MODE_ROTATE_0);
}
drm_client_for_each_modeset(mode_set, client) {
struct drm_crtc *crtc = mode_set->crtc;
if (crtc->funcs->cursor_set2) {
ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
if (ret)
goto out;
} else if (crtc->funcs->cursor_set) {
ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
if (ret)
goto out;
}
ret = drm_mode_set_config_internal(mode_set);
if (ret)
goto out;
}
out:
drm_modeset_unlock_all(dev);
return ret;
}
/**
* drm_client_modeset_check() - Check modeset configuration
* @client: DRM client
*
* Check modeset configuration.
*
* Returns:
* Zero on success or negative error code on failure.
*/
int drm_client_modeset_check(struct drm_client_dev *client)
{
int ret;
if (!drm_drv_uses_atomic_modeset(client->dev))
return 0;
mutex_lock(&client->modeset_mutex);
ret = drm_client_modeset_commit_atomic(client, true, true);
mutex_unlock(&client->modeset_mutex);
return ret;
}
EXPORT_SYMBOL(drm_client_modeset_check);
/**
* drm_client_modeset_commit_locked() - Force commit CRTC configuration
* @client: DRM client
*
* Commit modeset configuration to crtcs without checking if there is a DRM
* master. The assumption is that the caller already holds an internal DRM
* master reference acquired with drm_master_internal_acquire().
*
* Returns:
* Zero on success or negative error code on failure.
*/
int drm_client_modeset_commit_locked(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret;
mutex_lock(&client->modeset_mutex);
if (drm_drv_uses_atomic_modeset(dev))
ret = drm_client_modeset_commit_atomic(client, true, false);
else
ret = drm_client_modeset_commit_legacy(client);
mutex_unlock(&client->modeset_mutex);
return ret;
}
EXPORT_SYMBOL(drm_client_modeset_commit_locked);
/**
* drm_client_modeset_commit() - Commit CRTC configuration
* @client: DRM client
*
* Commit modeset configuration to crtcs.
*
* Returns:
* Zero on success or negative error code on failure.
*/
int drm_client_modeset_commit(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret;
if (!drm_master_internal_acquire(dev))
return -EBUSY;
ret = drm_client_modeset_commit_locked(client);
drm_master_internal_release(dev);
return ret;
}
EXPORT_SYMBOL(drm_client_modeset_commit);
static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dpms_mode)
{
struct drm_device *dev = client->dev;
struct drm_connector *connector;
struct drm_mode_set *modeset;
struct drm_modeset_acquire_ctx ctx;
int j;
int ret;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
drm_client_for_each_modeset(modeset, client) {
if (!modeset->crtc->enabled)
continue;
for (j = 0; j < modeset->num_connectors; j++) {
connector = modeset->connectors[j];
connector->funcs->dpms(connector, dpms_mode);
drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode);
}
}
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
}
/**
* drm_client_modeset_dpms() - Set DPMS mode
* @client: DRM client
* @mode: DPMS mode
*
* Note: For atomic drivers @mode is reduced to on/off.
*
* Returns:
* Zero on success or negative error code on failure.
*/
int drm_client_modeset_dpms(struct drm_client_dev *client, int mode)
{
struct drm_device *dev = client->dev;
int ret = 0;
if (!drm_master_internal_acquire(dev))
return -EBUSY;
mutex_lock(&client->modeset_mutex);
if (drm_drv_uses_atomic_modeset(dev))
ret = drm_client_modeset_commit_atomic(client, mode == DRM_MODE_DPMS_ON, false);
else
drm_client_modeset_dpms_legacy(client, mode);
mutex_unlock(&client->modeset_mutex);
drm_master_internal_release(dev);
return ret;
}
EXPORT_SYMBOL(drm_client_modeset_dpms);
#ifdef CONFIG_DRM_KUNIT_TEST
#include "tests/drm_client_modeset_test.c"
#endif
| linux-master | drivers/gpu/drm/drm_client_modeset.c |
/*
* Copyright © 2008 Intel Corporation
* Copyright © 2016 Collabora Ltd
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Based on code from the i915 driver.
* Original author: Damien Lespiau <[email protected]>
*
*/
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs_crc.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
/**
* DOC: CRC ABI
*
* DRM device drivers can provide to userspace CRC information of each frame as
* it reached a given hardware component (a CRC sampling "source").
*
* Userspace can control generation of CRCs in a given CRTC by writing to the
* file dri/0/crtc-N/crc/control in debugfs, with N being the :ref:`index of
* the CRTC<crtc_index>`. Accepted values are source names (which are
* driver-specific) and the "auto" keyword, which will let the driver select a
* default source of frame CRCs for this CRTC.
*
* Once frame CRC generation is enabled, userspace can capture them by reading
* the dri/0/crtc-N/crc/data file. Each line in that file contains the frame
* number in the first field and then a number of unsigned integer fields
* containing the CRC data. Fields are separated by a single space and the number
* of CRC fields is source-specific.
*
* Note that though in some cases the CRC is computed in a specified way and on
* the frame contents as supplied by userspace (eDP 1.3), in general the CRC
* computation is performed in an unspecified way and on frame contents that have
* been already processed in also an unspecified way and thus userspace cannot
* rely on being able to generate matching CRC values for the frame contents that
* it submits. In this general case, the maximum userspace can do is to compare
* the reported CRCs of frames that should have the same contents.
*
* On the driver side the implementation effort is minimal, drivers only need to
* implement &drm_crtc_funcs.set_crc_source and &drm_crtc_funcs.verify_crc_source.
* The debugfs files are automatically set up if those vfuncs are set. CRC samples
* need to be captured in the driver by calling drm_crtc_add_crc_entry().
* Depending on the driver and HW requirements, &drm_crtc_funcs.set_crc_source
* may result in a commit (even a full modeset).
*
* CRC results must be reliable across non-full-modeset atomic commits, so if a
* commit via DRM_IOCTL_MODE_ATOMIC would disable or otherwise interfere with
* CRC generation, then the driver must mark that commit as a full modeset
* (drm_atomic_crtc_needs_modeset() should return true). As a result, to ensure
* consistent results, generic userspace must re-setup CRC generation after a
* legacy SETCRTC or an atomic commit with DRM_MODE_ATOMIC_ALLOW_MODESET.
*/
static int crc_control_show(struct seq_file *m, void *data)
{
struct drm_crtc *crtc = m->private;
if (crtc->funcs->get_crc_sources) {
size_t count;
const char *const *sources = crtc->funcs->get_crc_sources(crtc,
&count);
size_t values_cnt;
int i;
if (count == 0 || !sources)
goto out;
for (i = 0; i < count; i++)
if (!crtc->funcs->verify_crc_source(crtc, sources[i],
&values_cnt)) {
if (strcmp(sources[i], crtc->crc.source))
seq_printf(m, "%s\n", sources[i]);
else
seq_printf(m, "%s*\n", sources[i]);
}
}
return 0;
out:
seq_printf(m, "%s*\n", crtc->crc.source);
return 0;
}
static int crc_control_open(struct inode *inode, struct file *file)
{
struct drm_crtc *crtc = inode->i_private;
return single_open(file, crc_control_show, crtc);
}
static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
struct drm_crtc *crtc = m->private;
struct drm_crtc_crc *crc = &crtc->crc;
char *source;
size_t values_cnt;
int ret;
if (len == 0)
return 0;
if (len > PAGE_SIZE - 1) {
DRM_DEBUG_KMS("Expected < %lu bytes into crtc crc control\n",
PAGE_SIZE);
return -E2BIG;
}
source = memdup_user_nul(ubuf, len);
if (IS_ERR(source))
return PTR_ERR(source);
if (source[len - 1] == '\n')
source[len - 1] = '\0';
ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
if (ret) {
kfree(source);
return ret;
}
spin_lock_irq(&crc->lock);
if (crc->opened) {
spin_unlock_irq(&crc->lock);
kfree(source);
return -EBUSY;
}
kfree(crc->source);
crc->source = source;
spin_unlock_irq(&crc->lock);
*offp += len;
return len;
}
static const struct file_operations drm_crtc_crc_control_fops = {
.owner = THIS_MODULE,
.open = crc_control_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = crc_control_write
};
static int crtc_crc_data_count(struct drm_crtc_crc *crc)
{
assert_spin_locked(&crc->lock);
return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR);
}
static void crtc_crc_cleanup(struct drm_crtc_crc *crc)
{
kfree(crc->entries);
crc->overflow = false;
crc->entries = NULL;
crc->head = 0;
crc->tail = 0;
crc->values_cnt = 0;
crc->opened = false;
}
static int crtc_crc_open(struct inode *inode, struct file *filep)
{
struct drm_crtc *crtc = inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
struct drm_crtc_crc_entry *entries = NULL;
size_t values_cnt;
int ret = 0;
if (drm_drv_uses_atomic_modeset(crtc->dev)) {
ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
if (ret)
return ret;
if (!crtc->state->active)
ret = -EIO;
drm_modeset_unlock(&crtc->mutex);
if (ret)
return ret;
}
ret = crtc->funcs->verify_crc_source(crtc, crc->source, &values_cnt);
if (ret)
return ret;
if (WARN_ON(values_cnt > DRM_MAX_CRC_NR))
return -EINVAL;
if (WARN_ON(values_cnt == 0))
return -EINVAL;
entries = kcalloc(DRM_CRC_ENTRIES_NR, sizeof(*entries), GFP_KERNEL);
if (!entries)
return -ENOMEM;
spin_lock_irq(&crc->lock);
if (!crc->opened) {
crc->opened = true;
crc->entries = entries;
crc->values_cnt = values_cnt;
} else {
ret = -EBUSY;
}
spin_unlock_irq(&crc->lock);
if (ret) {
kfree(entries);
return ret;
}
ret = crtc->funcs->set_crc_source(crtc, crc->source);
if (ret)
goto err;
return 0;
err:
spin_lock_irq(&crc->lock);
crtc_crc_cleanup(crc);
spin_unlock_irq(&crc->lock);
return ret;
}
static int crtc_crc_release(struct inode *inode, struct file *filep)
{
struct drm_crtc *crtc = filep->f_inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
/* terminate the infinite while loop if 'drm_dp_aux_crc_work' running */
spin_lock_irq(&crc->lock);
crc->opened = false;
spin_unlock_irq(&crc->lock);
crtc->funcs->set_crc_source(crtc, NULL);
spin_lock_irq(&crc->lock);
crtc_crc_cleanup(crc);
spin_unlock_irq(&crc->lock);
return 0;
}
/*
* 1 frame field of 10 chars plus a number of CRC fields of 10 chars each, space
* separated, with a newline at the end and null-terminated.
*/
#define LINE_LEN(values_cnt) (10 + 11 * values_cnt + 1 + 1)
#define MAX_LINE_LEN (LINE_LEN(DRM_MAX_CRC_NR))
static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf,
size_t count, loff_t *pos)
{
struct drm_crtc *crtc = filep->f_inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
struct drm_crtc_crc_entry *entry;
char buf[MAX_LINE_LEN];
int ret, i;
spin_lock_irq(&crc->lock);
if (!crc->source) {
spin_unlock_irq(&crc->lock);
return 0;
}
/* Nothing to read? */
while (crtc_crc_data_count(crc) == 0) {
if (filep->f_flags & O_NONBLOCK) {
spin_unlock_irq(&crc->lock);
return -EAGAIN;
}
ret = wait_event_interruptible_lock_irq(crc->wq,
crtc_crc_data_count(crc),
crc->lock);
if (ret) {
spin_unlock_irq(&crc->lock);
return ret;
}
}
/* We know we have an entry to be read */
entry = &crc->entries[crc->tail];
if (count < LINE_LEN(crc->values_cnt)) {
spin_unlock_irq(&crc->lock);
return -EINVAL;
}
BUILD_BUG_ON_NOT_POWER_OF_2(DRM_CRC_ENTRIES_NR);
crc->tail = (crc->tail + 1) & (DRM_CRC_ENTRIES_NR - 1);
spin_unlock_irq(&crc->lock);
if (entry->has_frame_counter)
sprintf(buf, "0x%08x", entry->frame);
else
sprintf(buf, "XXXXXXXXXX");
for (i = 0; i < crc->values_cnt; i++)
sprintf(buf + 10 + i * 11, " 0x%08x", entry->crcs[i]);
sprintf(buf + 10 + crc->values_cnt * 11, "\n");
if (copy_to_user(user_buf, buf, LINE_LEN(crc->values_cnt)))
return -EFAULT;
return LINE_LEN(crc->values_cnt);
}
static __poll_t crtc_crc_poll(struct file *file, poll_table *wait)
{
struct drm_crtc *crtc = file->f_inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
__poll_t ret = 0;
poll_wait(file, &crc->wq, wait);
spin_lock_irq(&crc->lock);
if (crc->source && crtc_crc_data_count(crc))
ret |= EPOLLIN | EPOLLRDNORM;
spin_unlock_irq(&crc->lock);
return ret;
}
static const struct file_operations drm_crtc_crc_data_fops = {
.owner = THIS_MODULE,
.open = crtc_crc_open,
.read = crtc_crc_read,
.poll = crtc_crc_poll,
.release = crtc_crc_release,
};
void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
{
struct dentry *crc_ent;
if (!crtc->funcs->set_crc_source || !crtc->funcs->verify_crc_source)
return;
crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
debugfs_create_file("control", S_IRUGO | S_IWUSR, crc_ent, crtc,
&drm_crtc_crc_control_fops);
debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
&drm_crtc_crc_data_fops);
}
/**
* drm_crtc_add_crc_entry - Add entry with CRC information for a frame
* @crtc: CRTC to which the frame belongs
* @has_frame: whether this entry has a frame number to go with
* @frame: number of the frame these CRCs are about
* @crcs: array of CRC values, with length matching #drm_crtc_crc.values_cnt
*
* For each frame, the driver polls the source of CRCs for new data and calls
* this function to add them to the buffer from where userspace reads.
*/
int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
uint32_t frame, uint32_t *crcs)
{
struct drm_crtc_crc *crc = &crtc->crc;
struct drm_crtc_crc_entry *entry;
int head, tail;
unsigned long flags;
spin_lock_irqsave(&crc->lock, flags);
/* Caller may not have noticed yet that userspace has stopped reading */
if (!crc->entries) {
spin_unlock_irqrestore(&crc->lock, flags);
return -EINVAL;
}
head = crc->head;
tail = crc->tail;
if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
bool was_overflow = crc->overflow;
crc->overflow = true;
spin_unlock_irqrestore(&crc->lock, flags);
if (!was_overflow)
DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
return -ENOBUFS;
}
entry = &crc->entries[head];
entry->frame = frame;
entry->has_frame_counter = has_frame;
memcpy(&entry->crcs, crcs, sizeof(*crcs) * crc->values_cnt);
head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
crc->head = head;
spin_unlock_irqrestore(&crc->lock, flags);
wake_up_interruptible(&crc->wq);
return 0;
}
EXPORT_SYMBOL_GPL(drm_crtc_add_crc_entry);
| linux-master | drivers/gpu/drm/drm_debugfs_crc.c |
/*
* Copyright (C) 2011-2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
/**
* drm_rect_intersect - intersect two rectangles
* @r1: first rectangle
* @r2: second rectangle
*
* Calculate the intersection of rectangles @r1 and @r2.
* @r1 will be overwritten with the intersection.
*
* RETURNS:
* %true if rectangle @r1 is still visible after the operation,
* %false otherwise.
*/
bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
{
r1->x1 = max(r1->x1, r2->x1);
r1->y1 = max(r1->y1, r2->y1);
r1->x2 = min(r1->x2, r2->x2);
r1->y2 = min(r1->y2, r2->y2);
return drm_rect_visible(r1);
}
EXPORT_SYMBOL(drm_rect_intersect);
static u32 clip_scaled(int src, int dst, int *clip)
{
u64 tmp;
if (dst == 0)
return 0;
/* Only clip what we have. Keeps the result bounded. */
*clip = min(*clip, dst);
tmp = mul_u32_u32(src, dst - *clip);
/*
* Round toward 1.0 when clipping so that we don't accidentally
* change upscaling to downscaling or vice versa.
*/
if (src < (dst << 16))
return DIV_ROUND_UP_ULL(tmp, dst);
else
return DIV_ROUND_DOWN_ULL(tmp, dst);
}
/**
* drm_rect_clip_scaled - perform a scaled clip operation
* @src: source window rectangle
* @dst: destination window rectangle
* @clip: clip rectangle
*
* Clip rectangle @dst by rectangle @clip. Clip rectangle @src by
* the corresponding amounts, retaining the vertical and horizontal scaling
* factors from @src to @dst.
*
* RETURNS:
*
* %true if rectangle @dst is still visible after being clipped,
* %false otherwise.
*/
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip)
{
int diff;
diff = clip->x1 - dst->x1;
if (diff > 0) {
u32 new_src_w = clip_scaled(drm_rect_width(src),
drm_rect_width(dst), &diff);
src->x1 = src->x2 - new_src_w;
dst->x1 += diff;
}
diff = clip->y1 - dst->y1;
if (diff > 0) {
u32 new_src_h = clip_scaled(drm_rect_height(src),
drm_rect_height(dst), &diff);
src->y1 = src->y2 - new_src_h;
dst->y1 += diff;
}
diff = dst->x2 - clip->x2;
if (diff > 0) {
u32 new_src_w = clip_scaled(drm_rect_width(src),
drm_rect_width(dst), &diff);
src->x2 = src->x1 + new_src_w;
dst->x2 -= diff;
}
diff = dst->y2 - clip->y2;
if (diff > 0) {
u32 new_src_h = clip_scaled(drm_rect_height(src),
drm_rect_height(dst), &diff);
src->y2 = src->y1 + new_src_h;
dst->y2 -= diff;
}
return drm_rect_visible(dst);
}
EXPORT_SYMBOL(drm_rect_clip_scaled);
static int drm_calc_scale(int src, int dst)
{
int scale = 0;
if (WARN_ON(src < 0 || dst < 0))
return -EINVAL;
if (dst == 0)
return 0;
if (src > (dst << 16))
return DIV_ROUND_UP(src, dst);
else
scale = src / dst;
return scale;
}
/**
* drm_rect_calc_hscale - calculate the horizontal scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_hscale: minimum allowed horizontal scaling factor
* @max_hscale: maximum allowed horizontal scaling factor
*
* Calculate the horizontal scaling factor as
* (@src width) / (@dst width).
*
* If the scale is below 1 << 16, round down. If the scale is above
* 1 << 16, round up. This will calculate the scale with the most
* pessimistic limit calculation.
*
* RETURNS:
* The horizontal scaling factor, or errno of out of limits.
*/
int drm_rect_calc_hscale(const struct drm_rect *src,
const struct drm_rect *dst,
int min_hscale, int max_hscale)
{
int src_w = drm_rect_width(src);
int dst_w = drm_rect_width(dst);
int hscale = drm_calc_scale(src_w, dst_w);
if (hscale < 0 || dst_w == 0)
return hscale;
if (hscale < min_hscale || hscale > max_hscale)
return -ERANGE;
return hscale;
}
EXPORT_SYMBOL(drm_rect_calc_hscale);
/**
* drm_rect_calc_vscale - calculate the vertical scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_vscale: minimum allowed vertical scaling factor
* @max_vscale: maximum allowed vertical scaling factor
*
* Calculate the vertical scaling factor as
* (@src height) / (@dst height).
*
* If the scale is below 1 << 16, round down. If the scale is above
* 1 << 16, round up. This will calculate the scale with the most
* pessimistic limit calculation.
*
* RETURNS:
* The vertical scaling factor, or errno of out of limits.
*/
int drm_rect_calc_vscale(const struct drm_rect *src,
const struct drm_rect *dst,
int min_vscale, int max_vscale)
{
int src_h = drm_rect_height(src);
int dst_h = drm_rect_height(dst);
int vscale = drm_calc_scale(src_h, dst_h);
if (vscale < 0 || dst_h == 0)
return vscale;
if (vscale < min_vscale || vscale > max_vscale)
return -ERANGE;
return vscale;
}
EXPORT_SYMBOL(drm_rect_calc_vscale);
/**
* drm_rect_debug_print - print the rectangle information
* @prefix: prefix string
* @r: rectangle to print
* @fixed_point: rectangle is in 16.16 fixed point format
*/
void drm_rect_debug_print(const char *prefix, const struct drm_rect *r, bool fixed_point)
{
if (fixed_point)
DRM_DEBUG_KMS("%s" DRM_RECT_FP_FMT "\n", prefix, DRM_RECT_FP_ARG(r));
else
DRM_DEBUG_KMS("%s" DRM_RECT_FMT "\n", prefix, DRM_RECT_ARG(r));
}
EXPORT_SYMBOL(drm_rect_debug_print);
/**
* drm_rect_rotate - Rotate the rectangle
* @r: rectangle to be rotated
* @width: Width of the coordinate space
* @height: Height of the coordinate space
* @rotation: Transformation to be applied
*
* Apply @rotation to the coordinates of rectangle @r.
*
* @width and @height combined with @rotation define
* the location of the new origin.
*
* @width correcsponds to the horizontal and @height
* to the vertical axis of the untransformed coordinate
* space.
*/
void drm_rect_rotate(struct drm_rect *r,
int width, int height,
unsigned int rotation)
{
struct drm_rect tmp;
if (rotation & (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y)) {
tmp = *r;
if (rotation & DRM_MODE_REFLECT_X) {
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
}
if (rotation & DRM_MODE_REFLECT_Y) {
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
}
}
switch (rotation & DRM_MODE_ROTATE_MASK) {
case DRM_MODE_ROTATE_0:
break;
case DRM_MODE_ROTATE_90:
tmp = *r;
r->x1 = tmp.y1;
r->x2 = tmp.y2;
r->y1 = width - tmp.x2;
r->y2 = width - tmp.x1;
break;
case DRM_MODE_ROTATE_180:
tmp = *r;
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
break;
case DRM_MODE_ROTATE_270:
tmp = *r;
r->x1 = height - tmp.y2;
r->x2 = height - tmp.y1;
r->y1 = tmp.x1;
r->y2 = tmp.x2;
break;
default:
break;
}
}
EXPORT_SYMBOL(drm_rect_rotate);
/**
* drm_rect_rotate_inv - Inverse rotate the rectangle
* @r: rectangle to be rotated
* @width: Width of the coordinate space
* @height: Height of the coordinate space
* @rotation: Transformation whose inverse is to be applied
*
* Apply the inverse of @rotation to the coordinates
* of rectangle @r.
*
* @width and @height combined with @rotation define
* the location of the new origin.
*
* @width correcsponds to the horizontal and @height
* to the vertical axis of the original untransformed
* coordinate space, so that you never have to flip
* them when doing a rotatation and its inverse.
* That is, if you do ::
*
* drm_rect_rotate(&r, width, height, rotation);
* drm_rect_rotate_inv(&r, width, height, rotation);
*
* you will always get back the original rectangle.
*/
void drm_rect_rotate_inv(struct drm_rect *r,
int width, int height,
unsigned int rotation)
{
struct drm_rect tmp;
switch (rotation & DRM_MODE_ROTATE_MASK) {
case DRM_MODE_ROTATE_0:
break;
case DRM_MODE_ROTATE_90:
tmp = *r;
r->x1 = width - tmp.y2;
r->x2 = width - tmp.y1;
r->y1 = tmp.x1;
r->y2 = tmp.x2;
break;
case DRM_MODE_ROTATE_180:
tmp = *r;
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
break;
case DRM_MODE_ROTATE_270:
tmp = *r;
r->x1 = tmp.y1;
r->x2 = tmp.y2;
r->y1 = height - tmp.x2;
r->y2 = height - tmp.x1;
break;
default:
break;
}
if (rotation & (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y)) {
tmp = *r;
if (rotation & DRM_MODE_REFLECT_X) {
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
}
if (rotation & DRM_MODE_REFLECT_Y) {
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
}
}
}
EXPORT_SYMBOL(drm_rect_rotate_inv);
| linux-master | drivers/gpu/drm/drm_rect.c |
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
* Copyright 2016 Intel Corporation
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Generic simple memory manager implementation. Intended to be used as a base
* class implementation for more advanced memory managers.
*
* Note that the algorithm used is quite simple and there might be substantial
* performance gains if a smarter free list is implemented. Currently it is
* just an unordered stack of free regions. This could easily be improved if
* an RB-tree is used instead. At least if we expect heavy fragmentation.
*
* Aligned allocations can also see improvement.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <linux/export.h>
#include <linux/interval_tree_generic.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <drm/drm_mm.h>
/**
* DOC: Overview
*
* drm_mm provides a simple range allocator. The drivers are free to use the
* resource allocator from the linux core if it suits them, the upside of drm_mm
* is that it's in the DRM core. Which means that it's easier to extend for
* some of the crazier special purpose needs of gpus.
*
* The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
* Drivers are free to embed either of them into their own suitable
* datastructures. drm_mm itself will not do any memory allocations of its own,
* so if drivers choose not to embed nodes they need to still allocate them
* themselves.
*
* The range allocator also supports reservation of preallocated blocks. This is
* useful for taking over initial mode setting configurations from the firmware,
* where an object needs to be created which exactly matches the firmware's
* scanout target. As long as the range is still free it can be inserted anytime
* after the allocator is initialized, which helps with avoiding looped
* dependencies in the driver load sequence.
*
* drm_mm maintains a stack of most recently freed holes, which of all
* simplistic datastructures seems to be a fairly decent approach to clustering
* allocations and avoiding too much fragmentation. This means free space
* searches are O(num_holes). Given that all the fancy features drm_mm supports
* something better would be fairly complex and since gfx thrashing is a fairly
* steep cliff not a real concern. Removing a node again is O(1).
*
* drm_mm supports a few features: Alignment and range restrictions can be
* supplied. Furthermore every &drm_mm_node has a color value (which is just an
* opaque unsigned long) which in conjunction with a driver callback can be used
* to implement sophisticated placement restrictions. The i915 DRM driver uses
* this to implement guard pages between incompatible caching domains in the
* graphics TT.
*
* Two behaviors are supported for searching and allocating: bottom-up and
* top-down. The default is bottom-up. Top-down allocation can be used if the
* memory area has different restrictions, or just to reduce fragmentation.
*
* Finally iteration helpers to walk all nodes and all holes are provided as are
* some basic allocator dumpers for debugging.
*
* Note that this range allocator is not thread-safe, drivers need to protect
* modifications with their own locking. The idea behind this is that for a full
* memory manager additional data needs to be protected anyway, hence internal
* locking would be fully redundant.
*/
#ifdef CONFIG_DRM_DEBUG_MM
#include <linux/stackdepot.h>
#define STACKDEPTH 32
#define BUFSZ 4096
static noinline void save_stack(struct drm_mm_node *node)
{
unsigned long entries[STACKDEPTH];
unsigned int n;
n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
/* May be called under spinlock, so avoid sleeping */
node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
}
static void show_leaks(struct drm_mm *mm)
{
struct drm_mm_node *node;
char *buf;
buf = kmalloc(BUFSZ, GFP_KERNEL);
if (!buf)
return;
list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
if (!node->stack) {
DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
node->start, node->size);
continue;
}
stack_depot_snprint(node->stack, buf, BUFSZ, 0);
DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
node->start, node->size, buf);
}
kfree(buf);
}
#undef STACKDEPTH
#undef BUFSZ
#else
static void save_stack(struct drm_mm_node *node) { }
static void show_leaks(struct drm_mm *mm) { }
#endif
#define START(node) ((node)->start)
#define LAST(node) ((node)->start + (node)->size - 1)
INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
u64, __subtree_last,
START, LAST, static inline, drm_mm_interval_tree)
struct drm_mm_node *
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
{
return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
start, last) ?: (struct drm_mm_node *)&mm->head_node;
}
EXPORT_SYMBOL(__drm_mm_interval_first);
static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
struct drm_mm_node *node)
{
struct drm_mm *mm = hole_node->mm;
struct rb_node **link, *rb;
struct drm_mm_node *parent;
bool leftmost;
node->__subtree_last = LAST(node);
if (drm_mm_node_allocated(hole_node)) {
rb = &hole_node->rb;
while (rb) {
parent = rb_entry(rb, struct drm_mm_node, rb);
if (parent->__subtree_last >= node->__subtree_last)
break;
parent->__subtree_last = node->__subtree_last;
rb = rb_parent(rb);
}
rb = &hole_node->rb;
link = &hole_node->rb.rb_right;
leftmost = false;
} else {
rb = NULL;
link = &mm->interval_tree.rb_root.rb_node;
leftmost = true;
}
while (*link) {
rb = *link;
parent = rb_entry(rb, struct drm_mm_node, rb);
if (parent->__subtree_last < node->__subtree_last)
parent->__subtree_last = node->__subtree_last;
if (node->start < parent->start) {
link = &parent->rb.rb_left;
} else {
link = &parent->rb.rb_right;
leftmost = false;
}
}
rb_link_node(&node->rb, rb, link);
rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
&drm_mm_interval_tree_augment);
}
#define HOLE_SIZE(NODE) ((NODE)->hole_size)
#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
static u64 rb_to_hole_size(struct rb_node *rb)
{
return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
}
static void insert_hole_size(struct rb_root_cached *root,
struct drm_mm_node *node)
{
struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
u64 x = node->hole_size;
bool first = true;
while (*link) {
rb = *link;
if (x > rb_to_hole_size(rb)) {
link = &rb->rb_left;
} else {
link = &rb->rb_right;
first = false;
}
}
rb_link_node(&node->rb_hole_size, rb, link);
rb_insert_color_cached(&node->rb_hole_size, root, first);
}
RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
struct drm_mm_node, rb_hole_addr,
u64, subtree_max_hole, HOLE_SIZE)
static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
{
struct rb_node **link = &root->rb_node, *rb_parent = NULL;
u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
struct drm_mm_node *parent;
while (*link) {
rb_parent = *link;
parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
if (parent->subtree_max_hole < subtree_max_hole)
parent->subtree_max_hole = subtree_max_hole;
if (start < HOLE_ADDR(parent))
link = &parent->rb_hole_addr.rb_left;
else
link = &parent->rb_hole_addr.rb_right;
}
rb_link_node(&node->rb_hole_addr, rb_parent, link);
rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
}
static void add_hole(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
node->hole_size =
__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
node->subtree_max_hole = node->hole_size;
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
insert_hole_size(&mm->holes_size, node);
insert_hole_addr(&mm->holes_addr, node);
list_add(&node->hole_stack, &mm->hole_stack);
}
static void rm_hole(struct drm_mm_node *node)
{
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
list_del(&node->hole_stack);
rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
&augment_callbacks);
node->hole_size = 0;
node->subtree_max_hole = 0;
DRM_MM_BUG_ON(drm_mm_hole_follows(node));
}
static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
{
return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
}
static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
{
return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
}
static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
{
struct rb_node *rb = mm->holes_size.rb_root.rb_node;
struct drm_mm_node *best = NULL;
do {
struct drm_mm_node *node =
rb_entry(rb, struct drm_mm_node, rb_hole_size);
if (size <= node->hole_size) {
best = node;
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
} while (rb);
return best;
}
static bool usable_hole_addr(struct rb_node *rb, u64 size)
{
return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size;
}
static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
{
struct rb_node *rb = mm->holes_addr.rb_node;
struct drm_mm_node *node = NULL;
while (rb) {
u64 hole_start;
if (!usable_hole_addr(rb, size))
break;
node = rb_hole_addr_to_node(rb);
hole_start = __drm_mm_hole_node_start(node);
if (addr < hole_start)
rb = node->rb_hole_addr.rb_left;
else if (addr > hole_start + node->hole_size)
rb = node->rb_hole_addr.rb_right;
else
break;
}
return node;
}
static struct drm_mm_node *
first_hole(struct drm_mm *mm,
u64 start, u64 end, u64 size,
enum drm_mm_insert_mode mode)
{
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
return best_hole(mm, size);
case DRM_MM_INSERT_LOW:
return find_hole_addr(mm, start, size);
case DRM_MM_INSERT_HIGH:
return find_hole_addr(mm, end, size);
case DRM_MM_INSERT_EVICT:
return list_first_entry_or_null(&mm->hole_stack,
struct drm_mm_node,
hole_stack);
}
}
/**
* DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions
* @name: name of function to declare
* @first: first rb member to traverse (either rb_left or rb_right).
* @last: last rb member to traverse (either rb_right or rb_left).
*
* This macro declares a function to return the next hole of the addr rb tree.
* While traversing the tree we take the searched size into account and only
* visit branches with potential big enough holes.
*/
#define DECLARE_NEXT_HOLE_ADDR(name, first, last) \
static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \
{ \
struct rb_node *parent, *node = &entry->rb_hole_addr; \
\
if (!entry || RB_EMPTY_NODE(node)) \
return NULL; \
\
if (usable_hole_addr(node->first, size)) { \
node = node->first; \
while (usable_hole_addr(node->last, size)) \
node = node->last; \
return rb_hole_addr_to_node(node); \
} \
\
while ((parent = rb_parent(node)) && node == parent->first) \
node = parent; \
\
return rb_hole_addr_to_node(parent); \
}
DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right)
DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left)
static struct drm_mm_node *
next_hole(struct drm_mm *mm,
struct drm_mm_node *node,
u64 size,
enum drm_mm_insert_mode mode)
{
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
case DRM_MM_INSERT_LOW:
return next_hole_low_addr(node, size);
case DRM_MM_INSERT_HIGH:
return next_hole_high_addr(node, size);
case DRM_MM_INSERT_EVICT:
node = list_next_entry(node, hole_stack);
return &node->hole_stack == &mm->hole_stack ? NULL : node;
}
}
/**
* drm_mm_reserve_node - insert an pre-initialized node
* @mm: drm_mm allocator to insert @node into
* @node: drm_mm_node to insert
*
* This functions inserts an already set-up &drm_mm_node into the allocator,
* meaning that start, size and color must be set by the caller. All other
* fields must be cleared to 0. This is useful to initialize the allocator with
* preallocated objects which must be set-up before the range allocator can be
* set-up, e.g. when taking over a firmware framebuffer.
*
* Returns:
* 0 on success, -ENOSPC if there's no hole where @node is.
*/
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
struct drm_mm_node *hole;
u64 hole_start, hole_end;
u64 adj_start, adj_end;
u64 end;
end = node->start + node->size;
if (unlikely(end <= node->start))
return -ENOSPC;
/* Find the relevant hole to add our node to */
hole = find_hole_addr(mm, node->start, 0);
if (!hole)
return -ENOSPC;
adj_start = hole_start = __drm_mm_hole_node_start(hole);
adj_end = hole_end = hole_start + hole->hole_size;
if (mm->color_adjust)
mm->color_adjust(hole, node->color, &adj_start, &adj_end);
if (adj_start > node->start || adj_end < end)
return -ENOSPC;
node->mm = mm;
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
node->hole_size = 0;
rm_hole(hole);
if (node->start > hole_start)
add_hole(hole);
if (end < hole_end)
add_hole(node);
save_stack(node);
return 0;
}
EXPORT_SYMBOL(drm_mm_reserve_node);
static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
{
return rb ? rb_to_hole_size(rb) : 0;
}
/**
* drm_mm_insert_node_in_range - ranged search for space and insert @node
* @mm: drm_mm to allocate from
* @node: preallocate node to insert
* @size: size of the allocation
* @alignment: alignment of the allocation
* @color: opaque tag value to use for this node
* @range_start: start of the allowed range for this node
* @range_end: end of the allowed range for this node
* @mode: fine-tune the allocation search and placement
*
* The preallocated @node must be cleared to 0.
*
* Returns:
* 0 on success, -ENOSPC if there's no suitable hole.
*/
int drm_mm_insert_node_in_range(struct drm_mm * const mm,
struct drm_mm_node * const node,
u64 size, u64 alignment,
unsigned long color,
u64 range_start, u64 range_end,
enum drm_mm_insert_mode mode)
{
struct drm_mm_node *hole;
u64 remainder_mask;
bool once;
DRM_MM_BUG_ON(range_start > range_end);
if (unlikely(size == 0 || range_end - range_start < size))
return -ENOSPC;
if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
return -ENOSPC;
if (alignment <= 1)
alignment = 0;
once = mode & DRM_MM_INSERT_ONCE;
mode &= ~DRM_MM_INSERT_ONCE;
remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
for (hole = first_hole(mm, range_start, range_end, size, mode);
hole;
hole = once ? NULL : next_hole(mm, hole, size, mode)) {
u64 hole_start = __drm_mm_hole_node_start(hole);
u64 hole_end = hole_start + hole->hole_size;
u64 adj_start, adj_end;
u64 col_start, col_end;
if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
break;
if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
break;
col_start = hole_start;
col_end = hole_end;
if (mm->color_adjust)
mm->color_adjust(hole, color, &col_start, &col_end);
adj_start = max(col_start, range_start);
adj_end = min(col_end, range_end);
if (adj_end <= adj_start || adj_end - adj_start < size)
continue;
if (mode == DRM_MM_INSERT_HIGH)
adj_start = adj_end - size;
if (alignment) {
u64 rem;
if (likely(remainder_mask))
rem = adj_start & remainder_mask;
else
div64_u64_rem(adj_start, alignment, &rem);
if (rem) {
adj_start -= rem;
if (mode != DRM_MM_INSERT_HIGH)
adj_start += alignment;
if (adj_start < max(col_start, range_start) ||
min(col_end, range_end) - adj_start < size)
continue;
if (adj_end <= adj_start ||
adj_end - adj_start < size)
continue;
}
}
node->mm = mm;
node->size = size;
node->start = adj_start;
node->color = color;
node->hole_size = 0;
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
rm_hole(hole);
if (adj_start > hole_start)
add_hole(hole);
if (adj_start + size < hole_end)
add_hole(node);
save_stack(node);
return 0;
}
return -ENOSPC;
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
{
return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
}
/**
* drm_mm_remove_node - Remove a memory node from the allocator.
* @node: drm_mm_node to remove
*
* This just removes a node from its drm_mm allocator. The node does not need to
* be cleared again before it can be re-inserted into this or any other drm_mm
* allocator. It is a bug to call this function on a unallocated node.
*/
void drm_mm_remove_node(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
prev_node = list_prev_entry(node, node_list);
if (drm_mm_hole_follows(node))
rm_hole(node);
drm_mm_interval_tree_remove(node, &mm->interval_tree);
list_del(&node->node_list);
if (drm_mm_hole_follows(prev_node))
rm_hole(prev_node);
add_hole(prev_node);
clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
}
EXPORT_SYMBOL(drm_mm_remove_node);
/**
* drm_mm_replace_node - move an allocation from @old to @new
* @old: drm_mm_node to remove from the allocator
* @new: drm_mm_node which should inherit @old's allocation
*
* This is useful for when drivers embed the drm_mm_node structure and hence
* can't move allocations by reassigning pointers. It's a combination of remove
* and insert with the guarantee that the allocation start will match.
*/
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
struct drm_mm *mm = old->mm;
DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
*new = *old;
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
list_replace(&old->node_list, &new->node_list);
rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
if (drm_mm_hole_follows(old)) {
list_replace(&old->hole_stack, &new->hole_stack);
rb_replace_node_cached(&old->rb_hole_size,
&new->rb_hole_size,
&mm->holes_size);
rb_replace_node(&old->rb_hole_addr,
&new->rb_hole_addr,
&mm->holes_addr);
}
clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
}
EXPORT_SYMBOL(drm_mm_replace_node);
/**
* DOC: lru scan roster
*
* Very often GPUs need to have continuous allocations for a given object. When
* evicting objects to make space for a new one it is therefore not most
* efficient when we simply start to select all objects from the tail of an LRU
* until there's a suitable hole: Especially for big objects or nodes that
* otherwise have special allocation constraints there's a good chance we evict
* lots of (smaller) objects unnecessarily.
*
* The DRM range allocator supports this use-case through the scanning
* interfaces. First a scan operation needs to be initialized with
* drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
* objects to the roster, probably by walking an LRU list, but this can be
* freely implemented. Eviction candidates are added using
* drm_mm_scan_add_block() until a suitable hole is found or there are no
* further evictable objects. Eviction roster metadata is tracked in &struct
* drm_mm_scan.
*
* The driver must walk through all objects again in exactly the reverse
* order to restore the allocator state. Note that while the allocator is used
* in the scan mode no other operation is allowed.
*
* Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
* reported true) in the scan, and any overlapping nodes after color adjustment
* (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
* since freeing a node is also O(1) the overall complexity is
* O(scanned_objects). So like the free stack which needs to be walked before a
* scan operation even begins this is linear in the number of objects. It
* doesn't seem to hurt too badly.
*/
/**
* drm_mm_scan_init_with_range - initialize range-restricted lru scanning
* @scan: scan state
* @mm: drm_mm to scan
* @size: size of the allocation
* @alignment: alignment of the allocation
* @color: opaque tag value to use for the allocation
* @start: start of the allowed range for the allocation
* @end: end of the allowed range for the allocation
* @mode: fine-tune the allocation search and placement
*
* This simply sets up the scanning routines with the parameters for the desired
* hole.
*
* Warning:
* As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
struct drm_mm *mm,
u64 size,
u64 alignment,
unsigned long color,
u64 start,
u64 end,
enum drm_mm_insert_mode mode)
{
DRM_MM_BUG_ON(start >= end);
DRM_MM_BUG_ON(!size || size > end - start);
DRM_MM_BUG_ON(mm->scan_active);
scan->mm = mm;
if (alignment <= 1)
alignment = 0;
scan->color = color;
scan->alignment = alignment;
scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
scan->size = size;
scan->mode = mode;
DRM_MM_BUG_ON(end <= start);
scan->range_start = start;
scan->range_end = end;
scan->hit_start = U64_MAX;
scan->hit_end = 0;
}
EXPORT_SYMBOL(drm_mm_scan_init_with_range);
/**
* drm_mm_scan_add_block - add a node to the scan list
* @scan: the active drm_mm scanner
* @node: drm_mm_node to add
*
* Add a node to the scan list that might be freed to make space for the desired
* hole.
*
* Returns:
* True if a hole has been found, false otherwise.
*/
bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
struct drm_mm_node *node)
{
struct drm_mm *mm = scan->mm;
struct drm_mm_node *hole;
u64 hole_start, hole_end;
u64 col_start, col_end;
u64 adj_start, adj_end;
DRM_MM_BUG_ON(node->mm != mm);
DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
__set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
mm->scan_active++;
/* Remove this block from the node_list so that we enlarge the hole
* (distance between the end of our previous node and the start of
* or next), without poisoning the link so that we can restore it
* later in drm_mm_scan_remove_block().
*/
hole = list_prev_entry(node, node_list);
DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
__list_del_entry(&node->node_list);
hole_start = __drm_mm_hole_node_start(hole);
hole_end = __drm_mm_hole_node_end(hole);
col_start = hole_start;
col_end = hole_end;
if (mm->color_adjust)
mm->color_adjust(hole, scan->color, &col_start, &col_end);
adj_start = max(col_start, scan->range_start);
adj_end = min(col_end, scan->range_end);
if (adj_end <= adj_start || adj_end - adj_start < scan->size)
return false;
if (scan->mode == DRM_MM_INSERT_HIGH)
adj_start = adj_end - scan->size;
if (scan->alignment) {
u64 rem;
if (likely(scan->remainder_mask))
rem = adj_start & scan->remainder_mask;
else
div64_u64_rem(adj_start, scan->alignment, &rem);
if (rem) {
adj_start -= rem;
if (scan->mode != DRM_MM_INSERT_HIGH)
adj_start += scan->alignment;
if (adj_start < max(col_start, scan->range_start) ||
min(col_end, scan->range_end) - adj_start < scan->size)
return false;
if (adj_end <= adj_start ||
adj_end - adj_start < scan->size)
return false;
}
}
scan->hit_start = adj_start;
scan->hit_end = adj_start + scan->size;
DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
DRM_MM_BUG_ON(scan->hit_start < hole_start);
DRM_MM_BUG_ON(scan->hit_end > hole_end);
return true;
}
EXPORT_SYMBOL(drm_mm_scan_add_block);
/**
* drm_mm_scan_remove_block - remove a node from the scan list
* @scan: the active drm_mm scanner
* @node: drm_mm_node to remove
*
* Nodes **must** be removed in exactly the reverse order from the scan list as
* they have been added (e.g. using list_add() as they are added and then
* list_for_each() over that eviction list to remove), otherwise the internal
* state of the memory manager will be corrupted.
*
* When the scan list is empty, the selected memory nodes can be freed. An
* immediately following drm_mm_insert_node_in_range_generic() or one of the
* simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
* the just freed block (because it's at the top of the free_stack list).
*
* Returns:
* True if this block should be evicted, false otherwise. Will always
* return false when no hole has been found.
*/
bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
struct drm_mm_node *node)
{
struct drm_mm_node *prev_node;
DRM_MM_BUG_ON(node->mm != scan->mm);
DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
__clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
DRM_MM_BUG_ON(!node->mm->scan_active);
node->mm->scan_active--;
/* During drm_mm_scan_add_block() we decoupled this node leaving
* its pointers intact. Now that the caller is walking back along
* the eviction list we can restore this block into its rightful
* place on the full node_list. To confirm that the caller is walking
* backwards correctly we check that prev_node->next == node->next,
* i.e. both believe the same node should be on the other side of the
* hole.
*/
prev_node = list_prev_entry(node, node_list);
DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
list_next_entry(node, node_list));
list_add(&node->node_list, &prev_node->node_list);
return (node->start + node->size > scan->hit_start &&
node->start < scan->hit_end);
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);
/**
* drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
* @scan: drm_mm scan with target hole
*
* After completing an eviction scan and removing the selected nodes, we may
* need to remove a few more nodes from either side of the target hole if
* mm.color_adjust is being used.
*
* Returns:
* A node to evict, or NULL if there are no overlapping nodes.
*/
struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
{
struct drm_mm *mm = scan->mm;
struct drm_mm_node *hole;
u64 hole_start, hole_end;
DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
if (!mm->color_adjust)
return NULL;
/*
* The hole found during scanning should ideally be the first element
* in the hole_stack list, but due to side-effects in the driver it
* may not be.
*/
list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
hole_start = __drm_mm_hole_node_start(hole);
hole_end = hole_start + hole->hole_size;
if (hole_start <= scan->hit_start &&
hole_end >= scan->hit_end)
break;
}
/* We should only be called after we found the hole previously */
DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
if (unlikely(&hole->hole_stack == &mm->hole_stack))
return NULL;
DRM_MM_BUG_ON(hole_start > scan->hit_start);
DRM_MM_BUG_ON(hole_end < scan->hit_end);
mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
if (hole_start > scan->hit_start)
return hole;
if (hole_end < scan->hit_end)
return list_next_entry(hole, node_list);
return NULL;
}
EXPORT_SYMBOL(drm_mm_scan_color_evict);
/**
* drm_mm_init - initialize a drm-mm allocator
* @mm: the drm_mm structure to initialize
* @start: start of the range managed by @mm
* @size: end of the range managed by @mm
*
* Note that @mm must be cleared to 0 before calling this function.
*/
void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
{
DRM_MM_BUG_ON(start + size <= start);
mm->color_adjust = NULL;
INIT_LIST_HEAD(&mm->hole_stack);
mm->interval_tree = RB_ROOT_CACHED;
mm->holes_size = RB_ROOT_CACHED;
mm->holes_addr = RB_ROOT;
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
mm->head_node.flags = 0;
mm->head_node.mm = mm;
mm->head_node.start = start + size;
mm->head_node.size = -size;
add_hole(&mm->head_node);
mm->scan_active = 0;
#ifdef CONFIG_DRM_DEBUG_MM
stack_depot_init();
#endif
}
EXPORT_SYMBOL(drm_mm_init);
/**
* drm_mm_takedown - clean up a drm_mm allocator
* @mm: drm_mm allocator to clean up
*
* Note that it is a bug to call this function on an allocator which is not
* clean.
*/
void drm_mm_takedown(struct drm_mm *mm)
{
if (WARN(!drm_mm_clean(mm),
"Memory manager not clean during takedown.\n"))
show_leaks(mm);
}
EXPORT_SYMBOL(drm_mm_takedown);
static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
{
u64 start, size;
size = entry->hole_size;
if (size) {
start = drm_mm_hole_node_start(entry);
drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
start, start + size, size);
}
return size;
}
/**
* drm_mm_print - print allocator state
* @mm: drm_mm allocator to print
* @p: DRM printer to use
*/
void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
{
const struct drm_mm_node *entry;
u64 total_used = 0, total_free = 0, total = 0;
total_free += drm_mm_dump_hole(p, &mm->head_node);
drm_mm_for_each_node(entry, mm) {
drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
entry->start + entry->size, entry->size);
total_used += entry->size;
total_free += drm_mm_dump_hole(p, entry);
}
total = total_free + total_used;
drm_printf(p, "total: %llu, used %llu free %llu\n", total,
total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_print);
| linux-master | drivers/gpu/drm/drm_mm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/dma-resv.h>
#include <linux/dma-fence-chain.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "drm_internal.h"
/**
* DOC: overview
*
* The GEM atomic helpers library implements generic atomic-commit
* functions for drivers that use GEM objects. Currently, it provides
* synchronization helpers, and plane state and framebuffer BO mappings
* for planes with shadow buffers.
*
* Before scanout, a plane's framebuffer needs to be synchronized with
* possible writers that draw into the framebuffer. All drivers should
* call drm_gem_plane_helper_prepare_fb() from their implementation of
* struct &drm_plane_helper.prepare_fb . It sets the plane's fence from
* the framebuffer so that the DRM core can synchronize access automatically.
* drm_gem_plane_helper_prepare_fb() can also be used directly as
* implementation of prepare_fb.
*
* .. code-block:: c
*
* #include <drm/drm_gem_atomic_helper.h>
*
* struct drm_plane_helper_funcs driver_plane_helper_funcs = {
* ...,
* . prepare_fb = drm_gem_plane_helper_prepare_fb,
* };
*
* A driver using a shadow buffer copies the content of the shadow buffers
* into the HW's framebuffer memory during an atomic update. This requires
* a mapping of the shadow buffer into kernel address space. The mappings
* cannot be established by commit-tail functions, such as atomic_update,
* as this would violate locking rules around dma_buf_vmap().
*
* The helpers for shadow-buffered planes establish and release mappings,
* and provide struct drm_shadow_plane_state, which stores the plane's mapping
* for commit-tail functions.
*
* Shadow-buffered planes can easily be enabled by using the provided macros
* %DRM_GEM_SHADOW_PLANE_FUNCS and %DRM_GEM_SHADOW_PLANE_HELPER_FUNCS.
* These macros set up the plane and plane-helper callbacks to point to the
* shadow-buffer helpers.
*
* .. code-block:: c
*
* #include <drm/drm_gem_atomic_helper.h>
*
* struct drm_plane_funcs driver_plane_funcs = {
* ...,
* DRM_GEM_SHADOW_PLANE_FUNCS,
* };
*
* struct drm_plane_helper_funcs driver_plane_helper_funcs = {
* ...,
* DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
* };
*
* In the driver's atomic-update function, shadow-buffer mappings are available
* from the plane state. Use to_drm_shadow_plane_state() to upcast from
* struct drm_plane_state.
*
* .. code-block:: c
*
* void driver_plane_atomic_update(struct drm_plane *plane,
* struct drm_plane_state *old_plane_state)
* {
* struct drm_plane_state *plane_state = plane->state;
* struct drm_shadow_plane_state *shadow_plane_state =
* to_drm_shadow_plane_state(plane_state);
*
* // access shadow buffer via shadow_plane_state->map
* }
*
* A mapping address for each of the framebuffer's buffer object is stored in
* struct &drm_shadow_plane_state.map. The mappings are valid while the state
* is being used.
*
* Drivers that use struct drm_simple_display_pipe can use
* %DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS to initialize the rsp
* callbacks. Access to shadow-buffer mappings is similar to regular
* atomic_update.
*
* .. code-block:: c
*
* struct drm_simple_display_pipe_funcs driver_pipe_funcs = {
* ...,
* DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
* };
*
* void driver_pipe_enable(struct drm_simple_display_pipe *pipe,
* struct drm_crtc_state *crtc_state,
* struct drm_plane_state *plane_state)
* {
* struct drm_shadow_plane_state *shadow_plane_state =
* to_drm_shadow_plane_state(plane_state);
*
* // access shadow buffer via shadow_plane_state->map
* }
*/
/*
* Plane Helpers
*/
/**
* drm_gem_plane_helper_prepare_fb() - Prepare a GEM backed framebuffer
* @plane: Plane
* @state: Plane state the fence will be attached to
*
* This function extracts the exclusive fence from &drm_gem_object.resv and
* attaches it to plane state for the atomic helper to wait on. This is
* necessary to correctly implement implicit synchronization for any buffers
* shared as a struct &dma_buf. This function can be used as the
* &drm_plane_helper_funcs.prepare_fb callback.
*
* There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
* GEM based framebuffer drivers which have their buffers always pinned in
* memory.
*
* This function is the default implementation for GEM drivers of
* &drm_plane_helper_funcs.prepare_fb if no callback is provided.
*/
int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct dma_fence *fence = dma_fence_get(state->fence);
enum dma_resv_usage usage;
size_t i;
int ret;
if (!state->fb)
return 0;
/*
* Only add the kernel fences here if there is already a fence set via
* explicit fencing interfaces on the atomic ioctl.
*
* This way explicit fencing can be used to overrule implicit fencing,
* which is important to make explicit fencing use-cases work: One
* example is using one buffer for 2 screens with different refresh
* rates. Implicit fencing will clamp rendering to the refresh rate of
* the slower screen, whereas explicit fence allows 2 independent
* render and display loops on a single buffer. If a driver allows
* obeys both implicit and explicit fences for plane updates, then it
* will break all the benefits of explicit fencing.
*/
usage = fence ? DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_WRITE;
for (i = 0; i < state->fb->format->num_planes; ++i) {
struct drm_gem_object *obj = drm_gem_fb_get_obj(state->fb, i);
struct dma_fence *new;
if (!obj) {
ret = -EINVAL;
goto error;
}
ret = dma_resv_get_singleton(obj->resv, usage, &new);
if (ret)
goto error;
if (new && fence) {
struct dma_fence_chain *chain = dma_fence_chain_alloc();
if (!chain) {
ret = -ENOMEM;
goto error;
}
dma_fence_chain_init(chain, fence, new, 1);
fence = &chain->base;
} else if (new) {
fence = new;
}
}
dma_fence_put(state->fence);
state->fence = fence;
return 0;
error:
dma_fence_put(fence);
return ret;
}
EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
/*
* Shadow-buffered Planes
*/
/**
* __drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
* @plane: the plane
* @new_shadow_plane_state: the new shadow-buffered plane state
*
* This function duplicates shadow-buffered plane state. This is helpful for drivers
* that subclass struct drm_shadow_plane_state.
*
* The function does not duplicate existing mappings of the shadow buffers.
* Mappings are maintained during the atomic commit by the plane's prepare_fb
* and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb()
* for corresponding helpers.
*/
void
__drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
struct drm_shadow_plane_state *new_shadow_plane_state)
{
__drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
}
EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state);
/**
* drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
* @plane: the plane
*
* This function implements struct &drm_plane_funcs.atomic_duplicate_state for
* shadow-buffered planes. It assumes the existing state to be of type
* struct drm_shadow_plane_state and it allocates the new state to be of this
* type.
*
* The function does not duplicate existing mappings of the shadow buffers.
* Mappings are maintained during the atomic commit by the plane's prepare_fb
* and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb()
* for corresponding helpers.
*
* Returns:
* A pointer to a new plane state on success, or NULL otherwise.
*/
struct drm_plane_state *
drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane)
{
struct drm_plane_state *plane_state = plane->state;
struct drm_shadow_plane_state *new_shadow_plane_state;
if (!plane_state)
return NULL;
new_shadow_plane_state = kzalloc(sizeof(*new_shadow_plane_state), GFP_KERNEL);
if (!new_shadow_plane_state)
return NULL;
__drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
return &new_shadow_plane_state->base;
}
EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state);
/**
* __drm_gem_destroy_shadow_plane_state - cleans up shadow-buffered plane state
* @shadow_plane_state: the shadow-buffered plane state
*
* This function cleans up shadow-buffered plane state. Helpful for drivers that
* subclass struct drm_shadow_plane_state.
*/
void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state)
{
__drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base);
}
EXPORT_SYMBOL(__drm_gem_destroy_shadow_plane_state);
/**
* drm_gem_destroy_shadow_plane_state - deletes shadow-buffered plane state
* @plane: the plane
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
* This function implements struct &drm_plane_funcs.atomic_destroy_state
* for shadow-buffered planes. It expects that mappings of shadow buffers
* have been released already.
*/
void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
struct drm_plane_state *plane_state)
{
struct drm_shadow_plane_state *shadow_plane_state =
to_drm_shadow_plane_state(plane_state);
__drm_gem_destroy_shadow_plane_state(shadow_plane_state);
kfree(shadow_plane_state);
}
EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
/**
* __drm_gem_reset_shadow_plane - resets a shadow-buffered plane
* @plane: the plane
* @shadow_plane_state: the shadow-buffered plane state
*
* This function resets state for shadow-buffered planes. Helpful
* for drivers that subclass struct drm_shadow_plane_state.
*/
void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state)
{
__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
}
EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);
/**
* drm_gem_reset_shadow_plane - resets a shadow-buffered plane
* @plane: the plane
*
* This function implements struct &drm_plane_funcs.reset_plane for
* shadow-buffered planes. It assumes the current plane state to be
* of type struct drm_shadow_plane and it allocates the new state of
* this type.
*/
void drm_gem_reset_shadow_plane(struct drm_plane *plane)
{
struct drm_shadow_plane_state *shadow_plane_state;
if (plane->state) {
drm_gem_destroy_shadow_plane_state(plane, plane->state);
plane->state = NULL; /* must be set to NULL here */
}
shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL);
if (!shadow_plane_state)
return;
__drm_gem_reset_shadow_plane(plane, shadow_plane_state);
}
EXPORT_SYMBOL(drm_gem_reset_shadow_plane);
/**
* drm_gem_begin_shadow_fb_access - prepares shadow framebuffers for CPU access
* @plane: the plane
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
* This function implements struct &drm_plane_helper_funcs.begin_fb_access. It
* maps all buffer objects of the plane's framebuffer into kernel address
* space and stores them in struct &drm_shadow_plane_state.map. The first data
* bytes are available in struct &drm_shadow_plane_state.data.
*
* See drm_gem_end_shadow_fb_access() for cleanup.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state)
{
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
if (!fb)
return 0;
return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data);
}
EXPORT_SYMBOL(drm_gem_begin_shadow_fb_access);
/**
* drm_gem_end_shadow_fb_access - releases shadow framebuffers from CPU access
* @plane: the plane
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
* This function implements struct &drm_plane_helper_funcs.end_fb_access. It
* undoes all effects of drm_gem_begin_shadow_fb_access() in reverse order.
*
* See drm_gem_begin_shadow_fb_access() for more information.
*/
void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state)
{
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
if (!fb)
return;
drm_gem_fb_vunmap(fb, shadow_plane_state->map);
}
EXPORT_SYMBOL(drm_gem_end_shadow_fb_access);
/**
* drm_gem_simple_kms_begin_shadow_fb_access - prepares shadow framebuffers for CPU access
* @pipe: the simple display pipe
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
* This function implements struct drm_simple_display_funcs.begin_fb_access.
*
* See drm_gem_begin_shadow_fb_access() for details and
* drm_gem_simple_kms_cleanup_shadow_fb() for cleanup.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(drm_gem_simple_kms_begin_shadow_fb_access);
/**
* drm_gem_simple_kms_end_shadow_fb_access - releases shadow framebuffers from CPU access
* @pipe: the simple display pipe
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
* This function implements struct drm_simple_display_funcs.end_fb_access.
* It undoes all effects of drm_gem_simple_kms_begin_shadow_fb_access() in
* reverse order.
*
* See drm_gem_simple_kms_begin_shadow_fb_access().
*/
void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
drm_gem_end_shadow_fb_access(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(drm_gem_simple_kms_end_shadow_fb_access);
/**
* drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane
* @pipe: the simple display pipe
*
* This function implements struct drm_simple_display_funcs.reset_plane
* for shadow-buffered planes.
*/
void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe)
{
drm_gem_reset_shadow_plane(&pipe->plane);
}
EXPORT_SYMBOL(drm_gem_simple_kms_reset_shadow_plane);
/**
* drm_gem_simple_kms_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
* @pipe: the simple display pipe
*
* This function implements struct drm_simple_display_funcs.duplicate_plane_state
* for shadow-buffered planes. It does not duplicate existing mappings of the shadow
* buffers. Mappings are maintained during the atomic commit by the plane's prepare_fb
* and cleanup_fb helpers.
*
* Returns:
* A pointer to a new plane state on success, or NULL otherwise.
*/
struct drm_plane_state *
drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe)
{
return drm_gem_duplicate_shadow_plane_state(&pipe->plane);
}
EXPORT_SYMBOL(drm_gem_simple_kms_duplicate_shadow_plane_state);
/**
* drm_gem_simple_kms_destroy_shadow_plane_state - resets shadow-buffered plane state
* @pipe: the simple display pipe
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
* This function implements struct drm_simple_display_funcs.destroy_plane_state
* for shadow-buffered planes. It expects that mappings of shadow buffers
* have been released already.
*/
void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(drm_gem_simple_kms_destroy_shadow_plane_state);
| linux-master | drivers/gpu/drm/drm_gem_atomic_helper.c |
/*
* Copyright 2017 Red Hat
* Parts ported from amdgpu (fence wait code).
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
*
*/
/**
* DOC: Overview
*
* DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
* container for a synchronization primitive which can be used by userspace
* to explicitly synchronize GPU commands, can be shared between userspace
* processes, and can be shared between different DRM drivers.
* Their primary use-case is to implement Vulkan fences and semaphores.
* The syncobj userspace API provides ioctls for several operations:
*
* - Creation and destruction of syncobjs
* - Import and export of syncobjs to/from a syncobj file descriptor
* - Import and export a syncobj's underlying fence to/from a sync file
* - Reset a syncobj (set its fence to NULL)
* - Signal a syncobj (set a trivially signaled fence)
* - Wait for a syncobj's fence to appear and be signaled
*
* The syncobj userspace API also provides operations to manipulate a syncobj
* in terms of a timeline of struct &dma_fence_chain rather than a single
* struct &dma_fence, through the following operations:
*
* - Signal a given point on the timeline
* - Wait for a given point to appear and/or be signaled
* - Import and export from/to a given point of a timeline
*
* At it's core, a syncobj is simply a wrapper around a pointer to a struct
* &dma_fence which may be NULL.
* When a syncobj is first created, its pointer is either NULL or a pointer
* to an already signaled fence depending on whether the
* &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
* &DRM_IOCTL_SYNCOBJ_CREATE.
*
* If the syncobj is considered as a binary (its state is either signaled or
* unsignaled) primitive, when GPU work is enqueued in a DRM driver to signal
* the syncobj, the syncobj's fence is replaced with a fence which will be
* signaled by the completion of that work.
* If the syncobj is considered as a timeline primitive, when GPU work is
* enqueued in a DRM driver to signal the a given point of the syncobj, a new
* struct &dma_fence_chain pointing to the DRM driver's fence and also
* pointing to the previous fence that was in the syncobj. The new struct
* &dma_fence_chain fence replace the syncobj's fence and will be signaled by
* completion of the DRM driver's work and also any work associated with the
* fence previously in the syncobj.
*
* When GPU work which waits on a syncobj is enqueued in a DRM driver, at the
* time the work is enqueued, it waits on the syncobj's fence before
* submitting the work to hardware. That fence is either :
*
* - The syncobj's current fence if the syncobj is considered as a binary
* primitive.
* - The struct &dma_fence associated with a given point if the syncobj is
* considered as a timeline primitive.
*
* If the syncobj's fence is NULL or not present in the syncobj's timeline,
* the enqueue operation is expected to fail.
*
* With binary syncobj, all manipulation of the syncobjs's fence happens in
* terms of the current fence at the time the ioctl is called by userspace
* regardless of whether that operation is an immediate host-side operation
* (signal or reset) or or an operation which is enqueued in some driver
* queue. &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used
* to manipulate a syncobj from the host by resetting its pointer to NULL or
* setting its pointer to a fence which is already signaled.
*
* With a timeline syncobj, all manipulation of the synobj's fence happens in
* terms of a u64 value referring to point in the timeline. See
* dma_fence_chain_find_seqno() to see how a given point is found in the
* timeline.
*
* Note that applications should be careful to always use timeline set of
* ioctl() when dealing with syncobj considered as timeline. Using a binary
* set of ioctl() with a syncobj considered as timeline could result incorrect
* synchronization. The use of binary syncobj is supported through the
* timeline set of ioctl() by using a point value of 0, this will reproduce
* the behavior of the binary set of ioctl() (for example replace the
* syncobj's fence when signaling).
*
*
* Host-side wait on syncobjs
* --------------------------
*
* &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
* host-side wait on all of the syncobj fences simultaneously.
* If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
* all of the syncobj fences to be signaled before it returns.
* Otherwise, it returns once at least one syncobj fence has been signaled
* and the index of a signaled fence is written back to the client.
*
* Unlike the enqueued GPU work dependencies which fail if they see a NULL
* fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
* the host-side wait will first wait for the syncobj to receive a non-NULL
* fence and then wait on that fence.
* If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
* syncobjs in the array has a NULL fence, -EINVAL will be returned.
* Assuming the syncobj starts off with a NULL fence, this allows a client
* to do a host wait in one thread (or process) which waits on GPU work
* submitted in another thread (or process) without having to manually
* synchronize between the two.
* This requirement is inherited from the Vulkan fence API.
*
* Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
* handles as well as an array of u64 points and does a host-side wait on all
* of syncobj fences at the given points simultaneously.
*
* &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT also adds the ability to wait for a given
* fence to materialize on the timeline without waiting for the fence to be
* signaled by using the &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE flag. This
* requirement is inherited from the wait-before-signal behavior required by
* the Vulkan timeline semaphore API.
*
* Alternatively, &DRM_IOCTL_SYNCOBJ_EVENTFD can be used to wait without
* blocking: an eventfd will be signaled when the syncobj is. This is useful to
* integrate the wait in an event loop.
*
*
* Import/export of syncobjs
* -------------------------
*
* &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
* provide two mechanisms for import/export of syncobjs.
*
* The first lets the client import or export an entire syncobj to a file
* descriptor.
* These fd's are opaque and have no other use case, except passing the
* syncobj between processes.
* All exported file descriptors and any syncobj handles created as a
* result of importing those file descriptors own a reference to the
* same underlying struct &drm_syncobj and the syncobj can be used
* persistently across all the processes with which it is shared.
* The syncobj is freed only once the last reference is dropped.
* Unlike dma-buf, importing a syncobj creates a new handle (with its own
* reference) for every import instead of de-duplicating.
* The primary use-case of this persistent import/export is for shared
* Vulkan fences and semaphores.
*
* The second import/export mechanism, which is indicated by
* &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
* &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
* import/export the syncobj's current fence from/to a &sync_file.
* When a syncobj is exported to a sync file, that sync file wraps the
* sycnobj's fence at the time of export and any later signal or reset
* operations on the syncobj will not affect the exported sync file.
* When a sync file is imported into a syncobj, the syncobj's fence is set
* to the fence wrapped by that sync file.
* Because sync files are immutable, resetting or signaling the syncobj
* will not affect any sync files whose fences have been imported into the
* syncobj.
*
*
* Import/export of timeline points in timeline syncobjs
* -----------------------------------------------------
*
* &DRM_IOCTL_SYNCOBJ_TRANSFER provides a mechanism to transfer a struct
* &dma_fence_chain of a syncobj at a given u64 point to another u64 point
* into another syncobj.
*
* Note that if you want to transfer a struct &dma_fence_chain from a given
* point on a timeline syncobj from/into a binary syncobj, you can use the
* point 0 to mean take/replace the fence in the syncobj.
*/
#include <linux/anon_inodes.h>
#include <linux/dma-fence-unwrap.h>
#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
#include <drm/drm_utils.h>
#include "drm_internal.h"
struct syncobj_wait_entry {
struct list_head node;
struct task_struct *task;
struct dma_fence *fence;
struct dma_fence_cb fence_cb;
u64 point;
};
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait);
struct syncobj_eventfd_entry {
struct list_head node;
struct dma_fence *fence;
struct dma_fence_cb fence_cb;
struct drm_syncobj *syncobj;
struct eventfd_ctx *ev_fd_ctx;
u64 point;
u32 flags;
};
static void
syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
struct syncobj_eventfd_entry *entry);
/**
* drm_syncobj_find - lookup and reference a sync object.
* @file_private: drm file private pointer
* @handle: sync object handle to lookup.
*
* Returns a reference to the syncobj pointed to by handle or NULL. The
* reference must be released by calling drm_syncobj_put().
*/
struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
u32 handle)
{
struct drm_syncobj *syncobj;
spin_lock(&file_private->syncobj_table_lock);
/* Check if we currently have a reference on the object */
syncobj = idr_find(&file_private->syncobj_idr, handle);
if (syncobj)
drm_syncobj_get(syncobj);
spin_unlock(&file_private->syncobj_table_lock);
return syncobj;
}
EXPORT_SYMBOL(drm_syncobj_find);
static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait)
{
struct dma_fence *fence;
if (wait->fence)
return;
spin_lock(&syncobj->lock);
/* We've already tried once to get a fence and failed. Now that we
* have the lock, try one more time just to be sure we don't add a
* callback when a fence has already been set.
*/
fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
dma_fence_put(fence);
list_add_tail(&wait->node, &syncobj->cb_list);
} else if (!fence) {
wait->fence = dma_fence_get_stub();
} else {
wait->fence = fence;
}
spin_unlock(&syncobj->lock);
}
static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait)
{
if (!wait->node.next)
return;
spin_lock(&syncobj->lock);
list_del_init(&wait->node);
spin_unlock(&syncobj->lock);
}
static void
syncobj_eventfd_entry_free(struct syncobj_eventfd_entry *entry)
{
eventfd_ctx_put(entry->ev_fd_ctx);
dma_fence_put(entry->fence);
/* This happens either inside the syncobj lock, or after the node has
* already been removed from the list.
*/
list_del(&entry->node);
kfree(entry);
}
static void
drm_syncobj_add_eventfd(struct drm_syncobj *syncobj,
struct syncobj_eventfd_entry *entry)
{
spin_lock(&syncobj->lock);
list_add_tail(&entry->node, &syncobj->ev_fd_list);
syncobj_eventfd_entry_func(syncobj, entry);
spin_unlock(&syncobj->lock);
}
/**
* drm_syncobj_add_point - add new timeline point to the syncobj
* @syncobj: sync object to add timeline point do
* @chain: chain node to use to add the point
* @fence: fence to encapsulate in the chain node
* @point: sequence number to use for the point
*
* Add the chain node as new timeline point to the syncobj.
*/
void drm_syncobj_add_point(struct drm_syncobj *syncobj,
struct dma_fence_chain *chain,
struct dma_fence *fence,
uint64_t point)
{
struct syncobj_wait_entry *wait_cur, *wait_tmp;
struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
struct dma_fence *prev;
dma_fence_get(fence);
spin_lock(&syncobj->lock);
prev = drm_syncobj_fence_get(syncobj);
/* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
if (prev && prev->seqno >= point)
DRM_DEBUG("You are adding an unorder point to timeline!\n");
dma_fence_chain_init(chain, prev, fence, point);
rcu_assign_pointer(syncobj->fence, &chain->base);
list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
syncobj_wait_syncobj_func(syncobj, wait_cur);
list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
spin_unlock(&syncobj->lock);
/* Walk the chain once to trigger garbage collection */
dma_fence_chain_for_each(fence, prev);
dma_fence_put(prev);
}
EXPORT_SYMBOL(drm_syncobj_add_point);
/**
* drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in
* @fence: fence to install in sync file.
*
* This replaces the fence on a sync object.
*/
void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence)
{
struct dma_fence *old_fence;
struct syncobj_wait_entry *wait_cur, *wait_tmp;
struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
if (fence)
dma_fence_get(fence);
spin_lock(&syncobj->lock);
old_fence = rcu_dereference_protected(syncobj->fence,
lockdep_is_held(&syncobj->lock));
rcu_assign_pointer(syncobj->fence, fence);
if (fence != old_fence) {
list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
syncobj_wait_syncobj_func(syncobj, wait_cur);
list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
}
spin_unlock(&syncobj->lock);
dma_fence_put(old_fence);
}
EXPORT_SYMBOL(drm_syncobj_replace_fence);
/**
* drm_syncobj_assign_null_handle - assign a stub fence to the sync object
* @syncobj: sync object to assign the fence on
*
* Assign a already signaled stub fence to the sync object.
*/
static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
{
struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
if (!fence)
return -ENOMEM;
drm_syncobj_replace_fence(syncobj, fence);
dma_fence_put(fence);
return 0;
}
/* 5s default for wait submission */
#define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
/**
* drm_syncobj_find_fence - lookup and reference the fence in a sync object
* @file_private: drm file private pointer
* @handle: sync object handle to lookup.
* @point: timeline point
* @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
* @fence: out parameter for the fence
*
* This is just a convenience function that combines drm_syncobj_find() and
* drm_syncobj_fence_get().
*
* Returns 0 on success or a negative error value on failure. On success @fence
* contains a reference to the fence, which must be released by calling
* dma_fence_put().
*/
int drm_syncobj_find_fence(struct drm_file *file_private,
u32 handle, u64 point, u64 flags,
struct dma_fence **fence)
{
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
struct syncobj_wait_entry wait;
u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
int ret;
if (!syncobj)
return -ENOENT;
/* Waiting for userspace with locks help is illegal cause that can
* trivial deadlock with page faults for example. Make lockdep complain
* about it early on.
*/
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
might_sleep();
lockdep_assert_none_held_once();
}
*fence = drm_syncobj_fence_get(syncobj);
if (*fence) {
ret = dma_fence_chain_find_seqno(fence, point);
if (!ret) {
/* If the requested seqno is already signaled
* drm_syncobj_find_fence may return a NULL
* fence. To make sure the recipient gets
* signalled, use a new fence instead.
*/
if (!*fence)
*fence = dma_fence_get_stub();
goto out;
}
dma_fence_put(*fence);
} else {
ret = -EINVAL;
}
if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
goto out;
memset(&wait, 0, sizeof(wait));
wait.task = current;
wait.point = point;
drm_syncobj_fence_add_wait(syncobj, &wait);
do {
set_current_state(TASK_INTERRUPTIBLE);
if (wait.fence) {
ret = 0;
break;
}
if (timeout == 0) {
ret = -ETIME;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
timeout = schedule_timeout(timeout);
} while (1);
__set_current_state(TASK_RUNNING);
*fence = wait.fence;
if (wait.node.next)
drm_syncobj_remove_wait(syncobj, &wait);
out:
drm_syncobj_put(syncobj);
return ret;
}
EXPORT_SYMBOL(drm_syncobj_find_fence);
/**
* drm_syncobj_free - free a sync object.
* @kref: kref to free.
*
* Only to be called from kref_put in drm_syncobj_put.
*/
void drm_syncobj_free(struct kref *kref)
{
struct drm_syncobj *syncobj = container_of(kref,
struct drm_syncobj,
refcount);
struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
drm_syncobj_replace_fence(syncobj, NULL);
list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
syncobj_eventfd_entry_free(ev_fd_cur);
kfree(syncobj);
}
EXPORT_SYMBOL(drm_syncobj_free);
/**
* drm_syncobj_create - create a new syncobj
* @out_syncobj: returned syncobj
* @flags: DRM_SYNCOBJ_* flags
* @fence: if non-NULL, the syncobj will represent this fence
*
* This is the first function to create a sync object. After creating, drivers
* probably want to make it available to userspace, either through
* drm_syncobj_get_handle() or drm_syncobj_get_fd().
*
* Returns 0 on success or a negative error value on failure.
*/
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
struct dma_fence *fence)
{
int ret;
struct drm_syncobj *syncobj;
syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
if (!syncobj)
return -ENOMEM;
kref_init(&syncobj->refcount);
INIT_LIST_HEAD(&syncobj->cb_list);
INIT_LIST_HEAD(&syncobj->ev_fd_list);
spin_lock_init(&syncobj->lock);
if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
ret = drm_syncobj_assign_null_handle(syncobj);
if (ret < 0) {
drm_syncobj_put(syncobj);
return ret;
}
}
if (fence)
drm_syncobj_replace_fence(syncobj, fence);
*out_syncobj = syncobj;
return 0;
}
EXPORT_SYMBOL(drm_syncobj_create);
/**
* drm_syncobj_get_handle - get a handle from a syncobj
* @file_private: drm file private pointer
* @syncobj: Sync object to export
* @handle: out parameter with the new handle
*
* Exports a sync object created with drm_syncobj_create() as a handle on
* @file_private to userspace.
*
* Returns 0 on success or a negative error value on failure.
*/
int drm_syncobj_get_handle(struct drm_file *file_private,
struct drm_syncobj *syncobj, u32 *handle)
{
int ret;
/* take a reference to put in the idr */
drm_syncobj_get(syncobj);
idr_preload(GFP_KERNEL);
spin_lock(&file_private->syncobj_table_lock);
ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
spin_unlock(&file_private->syncobj_table_lock);
idr_preload_end();
if (ret < 0) {
drm_syncobj_put(syncobj);
return ret;
}
*handle = ret;
return 0;
}
EXPORT_SYMBOL(drm_syncobj_get_handle);
static int drm_syncobj_create_as_handle(struct drm_file *file_private,
u32 *handle, uint32_t flags)
{
int ret;
struct drm_syncobj *syncobj;
ret = drm_syncobj_create(&syncobj, flags, NULL);
if (ret)
return ret;
ret = drm_syncobj_get_handle(file_private, syncobj, handle);
drm_syncobj_put(syncobj);
return ret;
}
static int drm_syncobj_destroy(struct drm_file *file_private,
u32 handle)
{
struct drm_syncobj *syncobj;
spin_lock(&file_private->syncobj_table_lock);
syncobj = idr_remove(&file_private->syncobj_idr, handle);
spin_unlock(&file_private->syncobj_table_lock);
if (!syncobj)
return -EINVAL;
drm_syncobj_put(syncobj);
return 0;
}
static int drm_syncobj_file_release(struct inode *inode, struct file *file)
{
struct drm_syncobj *syncobj = file->private_data;
drm_syncobj_put(syncobj);
return 0;
}
static const struct file_operations drm_syncobj_file_fops = {
.release = drm_syncobj_file_release,
};
/**
* drm_syncobj_get_fd - get a file descriptor from a syncobj
* @syncobj: Sync object to export
* @p_fd: out parameter with the new file descriptor
*
* Exports a sync object created with drm_syncobj_create() as a file descriptor.
*
* Returns 0 on success or a negative error value on failure.
*/
int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
{
struct file *file;
int fd;
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
return fd;
file = anon_inode_getfile("syncobj_file",
&drm_syncobj_file_fops,
syncobj, 0);
if (IS_ERR(file)) {
put_unused_fd(fd);
return PTR_ERR(file);
}
drm_syncobj_get(syncobj);
fd_install(fd, file);
*p_fd = fd;
return 0;
}
EXPORT_SYMBOL(drm_syncobj_get_fd);
static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
u32 handle, int *p_fd)
{
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
int ret;
if (!syncobj)
return -EINVAL;
ret = drm_syncobj_get_fd(syncobj, p_fd);
drm_syncobj_put(syncobj);
return ret;
}
static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
int fd, u32 *handle)
{
struct drm_syncobj *syncobj;
struct fd f = fdget(fd);
int ret;
if (!f.file)
return -EINVAL;
if (f.file->f_op != &drm_syncobj_file_fops) {
fdput(f);
return -EINVAL;
}
/* take a reference to put in the idr */
syncobj = f.file->private_data;
drm_syncobj_get(syncobj);
idr_preload(GFP_KERNEL);
spin_lock(&file_private->syncobj_table_lock);
ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
spin_unlock(&file_private->syncobj_table_lock);
idr_preload_end();
if (ret > 0) {
*handle = ret;
ret = 0;
} else
drm_syncobj_put(syncobj);
fdput(f);
return ret;
}
static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
int fd, int handle)
{
struct dma_fence *fence = sync_file_get_fence(fd);
struct drm_syncobj *syncobj;
if (!fence)
return -EINVAL;
syncobj = drm_syncobj_find(file_private, handle);
if (!syncobj) {
dma_fence_put(fence);
return -ENOENT;
}
drm_syncobj_replace_fence(syncobj, fence);
dma_fence_put(fence);
drm_syncobj_put(syncobj);
return 0;
}
static int drm_syncobj_export_sync_file(struct drm_file *file_private,
int handle, int *p_fd)
{
int ret;
struct dma_fence *fence;
struct sync_file *sync_file;
int fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
return fd;
ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
if (ret)
goto err_put_fd;
sync_file = sync_file_create(fence);
dma_fence_put(fence);
if (!sync_file) {
ret = -EINVAL;
goto err_put_fd;
}
fd_install(fd, sync_file->file);
*p_fd = fd;
return 0;
err_put_fd:
put_unused_fd(fd);
return ret;
}
/**
* drm_syncobj_open - initializes syncobj file-private structures at devnode open time
* @file_private: drm file-private structure to set up
*
* Called at device open time, sets up the structure for handling refcounting
* of sync objects.
*/
void
drm_syncobj_open(struct drm_file *file_private)
{
idr_init_base(&file_private->syncobj_idr, 1);
spin_lock_init(&file_private->syncobj_table_lock);
}
static int
drm_syncobj_release_handle(int id, void *ptr, void *data)
{
struct drm_syncobj *syncobj = ptr;
drm_syncobj_put(syncobj);
return 0;
}
/**
* drm_syncobj_release - release file-private sync object resources
* @file_private: drm file-private structure to clean up
*
* Called at close time when the filp is going away.
*
* Releases any remaining references on objects by this filp.
*/
void
drm_syncobj_release(struct drm_file *file_private)
{
idr_for_each(&file_private->syncobj_idr,
&drm_syncobj_release_handle, file_private);
idr_destroy(&file_private->syncobj_idr);
}
int
drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_create *args = data;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
/* no valid flags yet */
if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
return -EINVAL;
return drm_syncobj_create_as_handle(file_private,
&args->handle, args->flags);
}
int
drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_destroy *args = data;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
/* make sure padding is empty */
if (args->pad)
return -EINVAL;
return drm_syncobj_destroy(file_private, args->handle);
}
int
drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
if (args->pad)
return -EINVAL;
if (args->flags != 0 &&
args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
return -EINVAL;
if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
return drm_syncobj_export_sync_file(file_private, args->handle,
&args->fd);
return drm_syncobj_handle_to_fd(file_private, args->handle,
&args->fd);
}
int
drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
if (args->pad)
return -EINVAL;
if (args->flags != 0 &&
args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
return -EINVAL;
if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
return drm_syncobj_import_sync_file_fence(file_private,
args->fd,
args->handle);
return drm_syncobj_fd_to_handle(file_private, args->fd,
&args->handle);
}
static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
struct drm_syncobj_transfer *args)
{
struct drm_syncobj *timeline_syncobj = NULL;
struct dma_fence *fence, *tmp;
struct dma_fence_chain *chain;
int ret;
timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
if (!timeline_syncobj) {
return -ENOENT;
}
ret = drm_syncobj_find_fence(file_private, args->src_handle,
args->src_point, args->flags,
&tmp);
if (ret)
goto err_put_timeline;
fence = dma_fence_unwrap_merge(tmp);
dma_fence_put(tmp);
if (!fence) {
ret = -ENOMEM;
goto err_put_timeline;
}
chain = dma_fence_chain_alloc();
if (!chain) {
ret = -ENOMEM;
goto err_free_fence;
}
drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
err_free_fence:
dma_fence_put(fence);
err_put_timeline:
drm_syncobj_put(timeline_syncobj);
return ret;
}
static int
drm_syncobj_transfer_to_binary(struct drm_file *file_private,
struct drm_syncobj_transfer *args)
{
struct drm_syncobj *binary_syncobj = NULL;
struct dma_fence *fence;
int ret;
binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
if (!binary_syncobj)
return -ENOENT;
ret = drm_syncobj_find_fence(file_private, args->src_handle,
args->src_point, args->flags, &fence);
if (ret)
goto err;
drm_syncobj_replace_fence(binary_syncobj, fence);
dma_fence_put(fence);
err:
drm_syncobj_put(binary_syncobj);
return ret;
}
int
drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_transfer *args = data;
int ret;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
if (args->pad)
return -EINVAL;
if (args->dst_point)
ret = drm_syncobj_transfer_to_timeline(file_private, args);
else
ret = drm_syncobj_transfer_to_binary(file_private, args);
return ret;
}
static void syncobj_wait_fence_func(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
struct syncobj_wait_entry *wait =
container_of(cb, struct syncobj_wait_entry, fence_cb);
wake_up_process(wait->task);
}
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait)
{
struct dma_fence *fence;
/* This happens inside the syncobj lock */
fence = rcu_dereference_protected(syncobj->fence,
lockdep_is_held(&syncobj->lock));
dma_fence_get(fence);
if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
dma_fence_put(fence);
return;
} else if (!fence) {
wait->fence = dma_fence_get_stub();
} else {
wait->fence = fence;
}
wake_up_process(wait->task);
list_del_init(&wait->node);
}
static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
void __user *user_points,
uint32_t count,
uint32_t flags,
signed long timeout,
uint32_t *idx)
{
struct syncobj_wait_entry *entries;
struct dma_fence *fence;
uint64_t *points;
uint32_t signaled_count, i;
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
lockdep_assert_none_held_once();
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
if (points == NULL)
return -ENOMEM;
if (!user_points) {
memset(points, 0, count * sizeof(uint64_t));
} else if (copy_from_user(points, user_points,
sizeof(uint64_t) * count)) {
timeout = -EFAULT;
goto err_free_points;
}
entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
if (!entries) {
timeout = -ENOMEM;
goto err_free_points;
}
/* Walk the list of sync objects and initialize entries. We do
* this up-front so that we can properly return -EINVAL if there is
* a syncobj with a missing fence and then never have the chance of
* returning -EINVAL again.
*/
signaled_count = 0;
for (i = 0; i < count; ++i) {
struct dma_fence *fence;
entries[i].task = current;
entries[i].point = points[i];
fence = drm_syncobj_fence_get(syncobjs[i]);
if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
dma_fence_put(fence);
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
continue;
} else {
timeout = -EINVAL;
goto cleanup_entries;
}
}
if (fence)
entries[i].fence = fence;
else
entries[i].fence = dma_fence_get_stub();
if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
dma_fence_is_signaled(entries[i].fence)) {
if (signaled_count == 0 && idx)
*idx = i;
signaled_count++;
}
}
if (signaled_count == count ||
(signaled_count > 0 &&
!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
goto cleanup_entries;
/* There's a very annoying laxness in the dma_fence API here, in
* that backends are not required to automatically report when a
* fence is signaled prior to fence->ops->enable_signaling() being
* called. So here if we fail to match signaled_count, we need to
* fallthough and try a 0 timeout wait!
*/
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
for (i = 0; i < count; ++i)
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
do {
set_current_state(TASK_INTERRUPTIBLE);
signaled_count = 0;
for (i = 0; i < count; ++i) {
fence = entries[i].fence;
if (!fence)
continue;
if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
dma_fence_is_signaled(fence) ||
(!entries[i].fence_cb.func &&
dma_fence_add_callback(fence,
&entries[i].fence_cb,
syncobj_wait_fence_func))) {
/* The fence has been signaled */
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
signaled_count++;
} else {
if (idx)
*idx = i;
goto done_waiting;
}
}
}
if (signaled_count == count)
goto done_waiting;
if (timeout == 0) {
timeout = -ETIME;
goto done_waiting;
}
if (signal_pending(current)) {
timeout = -ERESTARTSYS;
goto done_waiting;
}
timeout = schedule_timeout(timeout);
} while (1);
done_waiting:
__set_current_state(TASK_RUNNING);
cleanup_entries:
for (i = 0; i < count; ++i) {
drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
if (entries[i].fence_cb.func)
dma_fence_remove_callback(entries[i].fence,
&entries[i].fence_cb);
dma_fence_put(entries[i].fence);
}
kfree(entries);
err_free_points:
kfree(points);
return timeout;
}
/**
* drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
*
* @timeout_nsec: timeout nsec component in ns, 0 for poll
*
* Calculate the timeout in jiffies from an absolute time in sec/nsec.
*/
signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
{
ktime_t abs_timeout, now;
u64 timeout_ns, timeout_jiffies64;
/* make 0 timeout means poll - absolute 0 doesn't seem valid */
if (timeout_nsec == 0)
return 0;
abs_timeout = ns_to_ktime(timeout_nsec);
now = ktime_get();
if (!ktime_after(abs_timeout, now))
return 0;
timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
/* clamp timeout to avoid infinite timeout */
if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
return MAX_SCHEDULE_TIMEOUT - 1;
return timeout_jiffies64 + 1;
}
EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
static int drm_syncobj_array_wait(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
struct drm_syncobj_timeline_wait *timeline_wait,
struct drm_syncobj **syncobjs, bool timeline)
{
signed long timeout = 0;
uint32_t first = ~0;
if (!timeline) {
timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
timeout = drm_syncobj_array_wait_timeout(syncobjs,
NULL,
wait->count_handles,
wait->flags,
timeout, &first);
if (timeout < 0)
return timeout;
wait->first_signaled = first;
} else {
timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
timeout = drm_syncobj_array_wait_timeout(syncobjs,
u64_to_user_ptr(timeline_wait->points),
timeline_wait->count_handles,
timeline_wait->flags,
timeout, &first);
if (timeout < 0)
return timeout;
timeline_wait->first_signaled = first;
}
return 0;
}
static int drm_syncobj_array_find(struct drm_file *file_private,
void __user *user_handles,
uint32_t count_handles,
struct drm_syncobj ***syncobjs_out)
{
uint32_t i, *handles;
struct drm_syncobj **syncobjs;
int ret;
handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
if (handles == NULL)
return -ENOMEM;
if (copy_from_user(handles, user_handles,
sizeof(uint32_t) * count_handles)) {
ret = -EFAULT;
goto err_free_handles;
}
syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
if (syncobjs == NULL) {
ret = -ENOMEM;
goto err_free_handles;
}
for (i = 0; i < count_handles; i++) {
syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
if (!syncobjs[i]) {
ret = -ENOENT;
goto err_put_syncobjs;
}
}
kfree(handles);
*syncobjs_out = syncobjs;
return 0;
err_put_syncobjs:
while (i-- > 0)
drm_syncobj_put(syncobjs[i]);
kfree(syncobjs);
err_free_handles:
kfree(handles);
return ret;
}
static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
uint32_t count)
{
uint32_t i;
for (i = 0; i < count; i++)
drm_syncobj_put(syncobjs[i]);
kfree(syncobjs);
}
int
drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_wait *args = data;
struct drm_syncobj **syncobjs;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
return -EINVAL;
if (args->count_handles == 0)
return -EINVAL;
ret = drm_syncobj_array_find(file_private,
u64_to_user_ptr(args->handles),
args->count_handles,
&syncobjs);
if (ret < 0)
return ret;
ret = drm_syncobj_array_wait(dev, file_private,
args, NULL, syncobjs, false);
drm_syncobj_array_free(syncobjs, args->count_handles);
return ret;
}
int
drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_timeline_wait *args = data;
struct drm_syncobj **syncobjs;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
return -EINVAL;
if (args->count_handles == 0)
return -EINVAL;
ret = drm_syncobj_array_find(file_private,
u64_to_user_ptr(args->handles),
args->count_handles,
&syncobjs);
if (ret < 0)
return ret;
ret = drm_syncobj_array_wait(dev, file_private,
NULL, args, syncobjs, true);
drm_syncobj_array_free(syncobjs, args->count_handles);
return ret;
}
static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
struct syncobj_eventfd_entry *entry =
container_of(cb, struct syncobj_eventfd_entry, fence_cb);
eventfd_signal(entry->ev_fd_ctx, 1);
syncobj_eventfd_entry_free(entry);
}
static void
syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
struct syncobj_eventfd_entry *entry)
{
int ret;
struct dma_fence *fence;
/* This happens inside the syncobj lock */
fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
ret = dma_fence_chain_find_seqno(&fence, entry->point);
if (ret != 0 || !fence) {
dma_fence_put(fence);
return;
}
list_del_init(&entry->node);
entry->fence = fence;
if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) {
eventfd_signal(entry->ev_fd_ctx, 1);
syncobj_eventfd_entry_free(entry);
} else {
ret = dma_fence_add_callback(fence, &entry->fence_cb,
syncobj_eventfd_entry_fence_func);
if (ret == -ENOENT) {
eventfd_signal(entry->ev_fd_ctx, 1);
syncobj_eventfd_entry_free(entry);
}
}
}
int
drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_eventfd *args = data;
struct drm_syncobj *syncobj;
struct eventfd_ctx *ev_fd_ctx;
struct syncobj_eventfd_entry *entry;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
if (args->flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)
return -EINVAL;
if (args->pad)
return -EINVAL;
syncobj = drm_syncobj_find(file_private, args->handle);
if (!syncobj)
return -ENOENT;
ev_fd_ctx = eventfd_ctx_fdget(args->fd);
if (IS_ERR(ev_fd_ctx))
return PTR_ERR(ev_fd_ctx);
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
eventfd_ctx_put(ev_fd_ctx);
return -ENOMEM;
}
entry->syncobj = syncobj;
entry->ev_fd_ctx = ev_fd_ctx;
entry->point = args->point;
entry->flags = args->flags;
drm_syncobj_add_eventfd(syncobj, entry);
drm_syncobj_put(syncobj);
return 0;
}
int
drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_array *args = data;
struct drm_syncobj **syncobjs;
uint32_t i;
int ret;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
if (args->pad != 0)
return -EINVAL;
if (args->count_handles == 0)
return -EINVAL;
ret = drm_syncobj_array_find(file_private,
u64_to_user_ptr(args->handles),
args->count_handles,
&syncobjs);
if (ret < 0)
return ret;
for (i = 0; i < args->count_handles; i++)
drm_syncobj_replace_fence(syncobjs[i], NULL);
drm_syncobj_array_free(syncobjs, args->count_handles);
return 0;
}
int
drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_array *args = data;
struct drm_syncobj **syncobjs;
uint32_t i;
int ret;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
if (args->pad != 0)
return -EINVAL;
if (args->count_handles == 0)
return -EINVAL;
ret = drm_syncobj_array_find(file_private,
u64_to_user_ptr(args->handles),
args->count_handles,
&syncobjs);
if (ret < 0)
return ret;
for (i = 0; i < args->count_handles; i++) {
ret = drm_syncobj_assign_null_handle(syncobjs[i]);
if (ret < 0)
break;
}
drm_syncobj_array_free(syncobjs, args->count_handles);
return ret;
}
int
drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_timeline_array *args = data;
struct drm_syncobj **syncobjs;
struct dma_fence_chain **chains;
uint64_t *points;
uint32_t i, j;
int ret;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
if (args->flags != 0)
return -EINVAL;
if (args->count_handles == 0)
return -EINVAL;
ret = drm_syncobj_array_find(file_private,
u64_to_user_ptr(args->handles),
args->count_handles,
&syncobjs);
if (ret < 0)
return ret;
points = kmalloc_array(args->count_handles, sizeof(*points),
GFP_KERNEL);
if (!points) {
ret = -ENOMEM;
goto out;
}
if (!u64_to_user_ptr(args->points)) {
memset(points, 0, args->count_handles * sizeof(uint64_t));
} else if (copy_from_user(points, u64_to_user_ptr(args->points),
sizeof(uint64_t) * args->count_handles)) {
ret = -EFAULT;
goto err_points;
}
chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
if (!chains) {
ret = -ENOMEM;
goto err_points;
}
for (i = 0; i < args->count_handles; i++) {
chains[i] = dma_fence_chain_alloc();
if (!chains[i]) {
for (j = 0; j < i; j++)
dma_fence_chain_free(chains[j]);
ret = -ENOMEM;
goto err_chains;
}
}
for (i = 0; i < args->count_handles; i++) {
struct dma_fence *fence = dma_fence_get_stub();
drm_syncobj_add_point(syncobjs[i], chains[i],
fence, points[i]);
dma_fence_put(fence);
}
err_chains:
kfree(chains);
err_points:
kfree(points);
out:
drm_syncobj_array_free(syncobjs, args->count_handles);
return ret;
}
int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_timeline_array *args = data;
struct drm_syncobj **syncobjs;
uint64_t __user *points = u64_to_user_ptr(args->points);
uint32_t i;
int ret;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
return -EINVAL;
if (args->count_handles == 0)
return -EINVAL;
ret = drm_syncobj_array_find(file_private,
u64_to_user_ptr(args->handles),
args->count_handles,
&syncobjs);
if (ret < 0)
return ret;
for (i = 0; i < args->count_handles; i++) {
struct dma_fence_chain *chain;
struct dma_fence *fence;
uint64_t point;
fence = drm_syncobj_fence_get(syncobjs[i]);
chain = to_dma_fence_chain(fence);
if (chain) {
struct dma_fence *iter, *last_signaled =
dma_fence_get(fence);
if (args->flags &
DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
point = fence->seqno;
} else {
dma_fence_chain_for_each(iter, fence) {
if (iter->context != fence->context) {
dma_fence_put(iter);
/* It is most likely that timeline has
* unorder points. */
break;
}
dma_fence_put(last_signaled);
last_signaled = dma_fence_get(iter);
}
point = dma_fence_is_signaled(last_signaled) ?
last_signaled->seqno :
to_dma_fence_chain(last_signaled)->prev_seqno;
}
dma_fence_put(last_signaled);
} else {
point = 0;
}
dma_fence_put(fence);
ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
ret = ret ? -EFAULT : 0;
if (ret)
break;
}
drm_syncobj_array_free(syncobjs, args->count_handles);
return ret;
}
| linux-master | drivers/gpu/drm/drm_syncobj.c |
#include <drm/drm_file.h>
#define CREATE_TRACE_POINTS
#include "drm_trace.h"
| linux-master | drivers/gpu/drm/drm_trace_points.c |
/*
* Copyright (c) 2016 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <linux/export.h>
#include <linux/uaccess.h>
#include <drm/drm_atomic.h>
#include <drm/drm_drv.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
/*
* Internal function to assign a slot in the object idr and optionally
* register the object into the idr.
*/
int __drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj,
uint32_t obj_type, bool register_obj,
void (*obj_free_cb)(struct kref *kref))
{
int ret;
WARN_ON(!dev->driver->load && dev->registered && !obj_free_cb);
mutex_lock(&dev->mode_config.idr_mutex);
ret = idr_alloc(&dev->mode_config.object_idr, register_obj ? obj : NULL,
1, 0, GFP_KERNEL);
if (ret >= 0) {
/*
* Set up the object linking under the protection of the idr
* lock so that other users can't see inconsistent state.
*/
obj->id = ret;
obj->type = obj_type;
if (obj_free_cb) {
obj->free_cb = obj_free_cb;
kref_init(&obj->refcount);
}
}
mutex_unlock(&dev->mode_config.idr_mutex);
return ret < 0 ? ret : 0;
}
/**
* drm_mode_object_add - allocate a new modeset identifier
* @dev: DRM device
* @obj: object pointer, used to generate unique ID
* @obj_type: object type
*
* Create a unique identifier based on @ptr in @dev's identifier space. Used
* for tracking modes, CRTCs and connectors.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_mode_object_add(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type)
{
return __drm_mode_object_add(dev, obj, obj_type, true, NULL);
}
void drm_mode_object_register(struct drm_device *dev,
struct drm_mode_object *obj)
{
mutex_lock(&dev->mode_config.idr_mutex);
idr_replace(&dev->mode_config.object_idr, obj, obj->id);
mutex_unlock(&dev->mode_config.idr_mutex);
}
/**
* drm_mode_object_unregister - free a modeset identifier
* @dev: DRM device
* @object: object to free
*
* Free @id from @dev's unique identifier pool.
* This function can be called multiple times, and guards against
* multiple removals.
* These modeset identifiers are _not_ reference counted. Hence don't use this
* for reference counted modeset objects like framebuffers.
*/
void drm_mode_object_unregister(struct drm_device *dev,
struct drm_mode_object *object)
{
WARN_ON(!dev->driver->load && dev->registered && !object->free_cb);
mutex_lock(&dev->mode_config.idr_mutex);
if (object->id) {
idr_remove(&dev->mode_config.object_idr, object->id);
object->id = 0;
}
mutex_unlock(&dev->mode_config.idr_mutex);
}
/**
* drm_mode_object_lease_required - check types which must be leased to be used
* @type: type of object
*
* Returns whether the provided type of drm_mode_object must
* be owned or leased to be used by a process.
*/
bool drm_mode_object_lease_required(uint32_t type)
{
switch(type) {
case DRM_MODE_OBJECT_CRTC:
case DRM_MODE_OBJECT_CONNECTOR:
case DRM_MODE_OBJECT_PLANE:
return true;
default:
return false;
}
}
struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.object_idr, id);
if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
obj = NULL;
if (obj && obj->id != id)
obj = NULL;
if (obj && drm_mode_object_lease_required(obj->type) &&
!_drm_lease_held(file_priv, obj->id)) {
drm_dbg_kms(dev, "[OBJECT:%d] not included in lease", id);
obj = NULL;
}
if (obj && obj->free_cb) {
if (!kref_get_unless_zero(&obj->refcount))
obj = NULL;
}
mutex_unlock(&dev->mode_config.idr_mutex);
return obj;
}
/**
* drm_mode_object_find - look up a drm object with static lifetime
* @dev: drm device
* @file_priv: drm file
* @id: id of the mode object
* @type: type of the mode object
*
* This function is used to look up a modeset object. It will acquire a
* reference for reference counted objects. This reference must be dropped again
* by callind drm_mode_object_put().
*/
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
obj = __drm_mode_object_find(dev, file_priv, id, type);
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
/**
* drm_mode_object_put - release a mode object reference
* @obj: DRM mode object
*
* This function decrements the object's refcount if it is a refcounted modeset
* object. It is a no-op on any other object. This is used to drop references
* acquired with drm_mode_object_get().
*/
void drm_mode_object_put(struct drm_mode_object *obj)
{
if (obj->free_cb) {
DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, kref_read(&obj->refcount));
kref_put(&obj->refcount, obj->free_cb);
}
}
EXPORT_SYMBOL(drm_mode_object_put);
/**
* drm_mode_object_get - acquire a mode object reference
* @obj: DRM mode object
*
* This function increments the object's refcount if it is a refcounted modeset
* object. It is a no-op on any other object. References should be dropped again
* by calling drm_mode_object_put().
*/
void drm_mode_object_get(struct drm_mode_object *obj)
{
if (obj->free_cb) {
DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, kref_read(&obj->refcount));
kref_get(&obj->refcount);
}
}
EXPORT_SYMBOL(drm_mode_object_get);
/**
* drm_object_attach_property - attach a property to a modeset object
* @obj: drm modeset object
* @property: property to attach
* @init_val: initial value of the property
*
* This attaches the given property to the modeset object with the given initial
* value. Currently this function cannot fail since the properties are stored in
* a statically sized array.
*
* Note that all properties must be attached before the object itself is
* registered and accessible from userspace.
*/
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
{
int count = obj->properties->count;
struct drm_device *dev = property->dev;
if (obj->type == DRM_MODE_OBJECT_CONNECTOR) {
struct drm_connector *connector = obj_to_connector(obj);
WARN_ON(!dev->driver->load &&
connector->registration_state == DRM_CONNECTOR_REGISTERED);
} else {
WARN_ON(!dev->driver->load && dev->registered);
}
if (count == DRM_OBJECT_MAX_PROPERTY) {
WARN(1, "Failed to attach object property (type: 0x%x). Please "
"increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
"you see this message on the same object type.\n",
obj->type);
return;
}
obj->properties->properties[count] = property;
obj->properties->values[count] = init_val;
obj->properties->count++;
}
EXPORT_SYMBOL(drm_object_attach_property);
/**
* drm_object_property_set_value - set the value of a property
* @obj: drm mode object to set property value for
* @property: property to set
* @val: value the property should be set to
*
* This function sets a given property on a given object. This function only
* changes the software state of the property, it does not call into the
* driver's ->set_property callback.
*
* Note that atomic drivers should not have any need to call this, the core will
* ensure consistency of values reported back to userspace through the
* appropriate ->atomic_get_property callback. Only legacy drivers should call
* this function to update the tracked value (after clamping and other
* restrictions have been applied).
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t val)
{
int i;
WARN_ON(drm_drv_uses_atomic_modeset(property->dev) &&
!(property->flags & DRM_MODE_PROP_IMMUTABLE));
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->properties[i] == property) {
obj->properties->values[i] = val;
return 0;
}
}
return -EINVAL;
}
EXPORT_SYMBOL(drm_object_property_set_value);
static int __drm_object_property_get_prop_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *val)
{
int i;
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->properties[i] == property) {
*val = obj->properties->values[i];
return 0;
}
}
return -EINVAL;
}
static int __drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *val)
{
/* read-only properties bypass atomic mechanism and still store
* their value in obj->properties->values[].. mostly to avoid
* having to deal w/ EDID and similar props in atomic paths:
*/
if (drm_drv_uses_atomic_modeset(property->dev) &&
!(property->flags & DRM_MODE_PROP_IMMUTABLE))
return drm_atomic_get_property(obj, property, val);
return __drm_object_property_get_prop_value(obj, property, val);
}
/**
* drm_object_property_get_value - retrieve the value of a property
* @obj: drm mode object to get property value from
* @property: property to retrieve
* @val: storage for the property value
*
* This function retrieves the softare state of the given property for the given
* property. Since there is no driver callback to retrieve the current property
* value this might be out of sync with the hardware, depending upon the driver
* and property.
*
* Atomic drivers should never call this function directly, the core will read
* out property values through the various ->atomic_get_property callbacks.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
WARN_ON(drm_drv_uses_atomic_modeset(property->dev));
return __drm_object_property_get_value(obj, property, val);
}
EXPORT_SYMBOL(drm_object_property_get_value);
/**
* drm_object_property_get_default_value - retrieve the default value of a
* property when in atomic mode.
* @obj: drm mode object to get property value from
* @property: property to retrieve
* @val: storage for the property value
*
* This function retrieves the default state of the given property as passed in
* to drm_object_attach_property
*
* Only atomic drivers should call this function directly, as for non-atomic
* drivers it will return the current value.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_object_property_get_default_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *val)
{
WARN_ON(!drm_drv_uses_atomic_modeset(property->dev));
return __drm_object_property_get_prop_value(obj, property, val);
}
EXPORT_SYMBOL(drm_object_property_get_default_value);
/* helper for getconnector and getproperties ioctls */
int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
uint32_t __user *prop_ptr,
uint64_t __user *prop_values,
uint32_t *arg_count_props)
{
int i, ret, count;
for (i = 0, count = 0; i < obj->properties->count; i++) {
struct drm_property *prop = obj->properties->properties[i];
uint64_t val;
if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
continue;
if (*arg_count_props > count) {
ret = __drm_object_property_get_value(obj, prop, &val);
if (ret)
return ret;
if (put_user(prop->base.id, prop_ptr + count))
return -EFAULT;
if (put_user(val, prop_values + count))
return -EFAULT;
}
count++;
}
*arg_count_props = count;
return 0;
}
/**
* drm_mode_obj_get_properties_ioctl - get the current value of a object's property
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* This function retrieves the current value for an object's property. Compared
* to the connector specific ioctl this one is extended to also work on crtc and
* plane objects.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_obj_get_properties *arg = data;
struct drm_mode_object *obj;
struct drm_modeset_acquire_ctx ctx;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
if (!obj) {
ret = -ENOENT;
goto out;
}
if (!obj->properties) {
ret = -EINVAL;
goto out_unref;
}
ret = drm_mode_object_get_properties(obj, file_priv->atomic,
(uint32_t __user *)(unsigned long)(arg->props_ptr),
(uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
&arg->count_props);
out_unref:
drm_mode_object_put(obj);
out:
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
return ret;
}
struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj,
uint32_t prop_id)
{
int i;
for (i = 0; i < obj->properties->count; i++)
if (obj->properties->properties[i]->base.id == prop_id)
return obj->properties->properties[i];
return NULL;
}
static int set_property_legacy(struct drm_mode_object *obj,
struct drm_property *prop,
uint64_t prop_value)
{
struct drm_device *dev = prop->dev;
struct drm_mode_object *ref;
struct drm_modeset_acquire_ctx ctx;
int ret = -EINVAL;
if (!drm_property_change_valid_get(prop, prop_value, &ref))
return -EINVAL;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
switch (obj->type) {
case DRM_MODE_OBJECT_CONNECTOR:
ret = drm_connector_set_obj_prop(obj, prop, prop_value);
break;
case DRM_MODE_OBJECT_CRTC:
ret = drm_mode_crtc_set_obj_prop(obj, prop, prop_value);
break;
case DRM_MODE_OBJECT_PLANE:
ret = drm_mode_plane_set_obj_prop(obj_to_plane(obj),
prop, prop_value);
break;
}
drm_property_change_valid_put(prop, ref);
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
return ret;
}
static int set_property_atomic(struct drm_mode_object *obj,
struct drm_file *file_priv,
struct drm_property *prop,
uint64_t prop_value)
{
struct drm_device *dev = prop->dev;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
int ret;
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
drm_modeset_acquire_init(&ctx, 0);
state->acquire_ctx = &ctx;
retry:
if (prop == state->dev->mode_config.dpms_property) {
if (obj->type != DRM_MODE_OBJECT_CONNECTOR) {
ret = -EINVAL;
goto out;
}
ret = drm_atomic_connector_commit_dpms(state,
obj_to_connector(obj),
prop_value);
} else {
ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value);
if (ret)
goto out;
ret = drm_atomic_commit(state);
}
out:
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}
drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_obj_set_property *arg = data;
struct drm_mode_object *arg_obj;
struct drm_property *property;
int ret = -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
arg_obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
if (!arg_obj)
return -ENOENT;
if (!arg_obj->properties)
goto out_unref;
property = drm_mode_obj_find_prop_id(arg_obj, arg->prop_id);
if (!property)
goto out_unref;
if (drm_drv_uses_atomic_modeset(property->dev))
ret = set_property_atomic(arg_obj, file_priv, property, arg->value);
else
ret = set_property_legacy(arg_obj, property, arg->value);
out_unref:
drm_mode_object_put(arg_obj);
return ret;
}
| linux-master | drivers/gpu/drm/drm_mode_object.c |
/*
* Copyright (c) 2016 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <linux/export.h>
#include <linux/uaccess.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_auth.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include <drm/drm_util.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
/**
* DOC: overview
*
* Frame buffers are abstract memory objects that provide a source of pixels to
* scanout to a CRTC. Applications explicitly request the creation of frame
* buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and receive an opaque
* handle that can be passed to the KMS CRTC control, plane configuration and
* page flip functions.
*
* Frame buffers rely on the underlying memory manager for allocating backing
* storage. When creating a frame buffer applications pass a memory handle
* (or a list of memory handles for multi-planar formats) through the
* &struct drm_mode_fb_cmd2 argument. For drivers using GEM as their userspace
* buffer management interface this would be a GEM handle. Drivers are however
* free to use their own backing storage object handles, e.g. vmwgfx directly
* exposes special TTM handles to userspace and so expects TTM handles in the
* create ioctl and not GEM handles.
*
* Framebuffers are tracked with &struct drm_framebuffer. They are published
* using drm_framebuffer_init() - after calling that function userspace can use
* and access the framebuffer object. The helper function
* drm_helper_mode_fill_fb_struct() can be used to pre-fill the required
* metadata fields.
*
* The lifetime of a drm framebuffer is controlled with a reference count,
* drivers can grab additional references with drm_framebuffer_get() and drop
* them again with drm_framebuffer_put(). For driver-private framebuffers for
* which the last reference is never dropped (e.g. for the fbdev framebuffer
* when the struct &struct drm_framebuffer is embedded into the fbdev helper
* struct) drivers can manually clean up a framebuffer at module unload time
* with drm_framebuffer_unregister_private(). But doing this is not
* recommended, and it's better to have a normal free-standing &struct
* drm_framebuffer.
*/
int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
const struct drm_framebuffer *fb)
{
unsigned int fb_width, fb_height;
fb_width = fb->width << 16;
fb_height = fb->height << 16;
/* Make sure source coordinates are inside the fb. */
if (src_w > fb_width ||
src_x > fb_width - src_w ||
src_h > fb_height ||
src_y > fb_height - src_h) {
drm_dbg_kms(fb->dev, "Invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
src_y >> 16, ((src_y & 0xffff) * 15625) >> 10,
fb->width, fb->height);
return -ENOSPC;
}
return 0;
}
/**
* drm_mode_addfb - add an FB to the graphics configuration
* @dev: drm device for the ioctl
* @or: pointer to request structure
* @file_priv: drm file
*
* Add a new FB to the specified CRTC, given a user request. This is the
* original addfb ioctl which only supported RGB formats.
*
* Called by the user via ioctl, or by an in-kernel client.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
struct drm_file *file_priv)
{
struct drm_mode_fb_cmd2 r = {};
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
r.pixel_format = drm_driver_legacy_fb_format(dev, or->bpp, or->depth);
if (r.pixel_format == DRM_FORMAT_INVALID) {
drm_dbg_kms(dev, "bad {bpp:%d, depth:%d}\n", or->bpp, or->depth);
return -EINVAL;
}
/* convert to new format and call new ioctl */
r.fb_id = or->fb_id;
r.width = or->width;
r.height = or->height;
r.pitches[0] = or->pitch;
r.handles[0] = or->handle;
ret = drm_mode_addfb2(dev, &r, file_priv);
if (ret)
return ret;
or->fb_id = r.fb_id;
return 0;
}
int drm_mode_addfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
return drm_mode_addfb(dev, data, file_priv);
}
static int fb_plane_width(int width,
const struct drm_format_info *format, int plane)
{
if (plane == 0)
return width;
return DIV_ROUND_UP(width, format->hsub);
}
static int fb_plane_height(int height,
const struct drm_format_info *format, int plane)
{
if (plane == 0)
return height;
return DIV_ROUND_UP(height, format->vsub);
}
static int framebuffer_check(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *r)
{
const struct drm_format_info *info;
int i;
/* check if the format is supported at all */
if (!__drm_format_info(r->pixel_format)) {
drm_dbg_kms(dev, "bad framebuffer format %p4cc\n",
&r->pixel_format);
return -EINVAL;
}
if (r->width == 0) {
drm_dbg_kms(dev, "bad framebuffer width %u\n", r->width);
return -EINVAL;
}
if (r->height == 0) {
drm_dbg_kms(dev, "bad framebuffer height %u\n", r->height);
return -EINVAL;
}
/* now let the driver pick its own format info */
info = drm_get_format_info(dev, r);
for (i = 0; i < info->num_planes; i++) {
unsigned int width = fb_plane_width(r->width, info, i);
unsigned int height = fb_plane_height(r->height, info, i);
unsigned int block_size = info->char_per_block[i];
u64 min_pitch = drm_format_info_min_pitch(info, i, width);
if (!block_size && (r->modifier[i] == DRM_FORMAT_MOD_LINEAR)) {
drm_dbg_kms(dev, "Format requires non-linear modifier for plane %d\n", i);
return -EINVAL;
}
if (!r->handles[i]) {
drm_dbg_kms(dev, "no buffer object handle for plane %d\n", i);
return -EINVAL;
}
if (min_pitch > UINT_MAX)
return -ERANGE;
if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
return -ERANGE;
if (block_size && r->pitches[i] < min_pitch) {
drm_dbg_kms(dev, "bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
drm_dbg_kms(dev, "bad fb modifier %llu for plane %d\n",
r->modifier[i], i);
return -EINVAL;
}
if (r->flags & DRM_MODE_FB_MODIFIERS &&
r->modifier[i] != r->modifier[0]) {
drm_dbg_kms(dev, "bad fb modifier %llu for plane %d\n",
r->modifier[i], i);
return -EINVAL;
}
/* modifier specific checks: */
switch (r->modifier[i]) {
case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
/* NOTE: the pitch restriction may be lifted later if it turns
* out that no hw has this restriction:
*/
if (r->pixel_format != DRM_FORMAT_NV12 ||
width % 128 || height % 32 ||
r->pitches[i] % 128) {
drm_dbg_kms(dev, "bad modifier data for plane %d\n", i);
return -EINVAL;
}
break;
default:
break;
}
}
for (i = info->num_planes; i < 4; i++) {
if (r->modifier[i]) {
drm_dbg_kms(dev, "non-zero modifier for unused plane %d\n", i);
return -EINVAL;
}
/* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */
if (!(r->flags & DRM_MODE_FB_MODIFIERS))
continue;
if (r->handles[i]) {
drm_dbg_kms(dev, "buffer object handle for unused plane %d\n", i);
return -EINVAL;
}
if (r->pitches[i]) {
drm_dbg_kms(dev, "non-zero pitch for unused plane %d\n", i);
return -EINVAL;
}
if (r->offsets[i]) {
drm_dbg_kms(dev, "non-zero offset for unused plane %d\n", i);
return -EINVAL;
}
}
return 0;
}
struct drm_framebuffer *
drm_internal_framebuffer_create(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret;
if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
drm_dbg_kms(dev, "bad framebuffer flags 0x%08x\n", r->flags);
return ERR_PTR(-EINVAL);
}
if ((config->min_width > r->width) || (r->width > config->max_width)) {
drm_dbg_kms(dev, "bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
return ERR_PTR(-EINVAL);
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
drm_dbg_kms(dev, "bad framebuffer height %d, should be >= %d && <= %d\n",
r->height, config->min_height, config->max_height);
return ERR_PTR(-EINVAL);
}
if (r->flags & DRM_MODE_FB_MODIFIERS &&
dev->mode_config.fb_modifiers_not_supported) {
drm_dbg_kms(dev, "driver does not support fb modifiers\n");
return ERR_PTR(-EINVAL);
}
ret = framebuffer_check(dev, r);
if (ret)
return ERR_PTR(ret);
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
drm_dbg_kms(dev, "could not create framebuffer\n");
return fb;
}
return fb;
}
EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_internal_framebuffer_create);
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Add a new FB to the specified CRTC, given a user request with format. This is
* the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
* and uses fourcc codes as pixel format specifiers.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd2 *r = data;
struct drm_framebuffer *fb;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
fb = drm_internal_framebuffer_create(dev, r, file_priv);
if (IS_ERR(fb))
return PTR_ERR(fb);
drm_dbg_kms(dev, "[FB:%d]\n", fb->base.id);
r->fb_id = fb->base.id;
/* Transfer ownership to the filp for reaping on close */
mutex_lock(&file_priv->fbs_lock);
list_add(&fb->filp_head, &file_priv->fbs);
mutex_unlock(&file_priv->fbs_lock);
return 0;
}
int drm_mode_addfb2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
#ifdef __BIG_ENDIAN
if (!dev->mode_config.quirk_addfb_prefer_host_byte_order) {
/*
* Drivers must set the
* quirk_addfb_prefer_host_byte_order quirk to make
* the drm_mode_addfb() compat code work correctly on
* bigendian machines.
*
* If they don't they interpret pixel_format values
* incorrectly for bug compatibility, which in turn
* implies the ADDFB2 ioctl does not work correctly
* then. So block it to make userspace fallback to
* ADDFB.
*/
drm_dbg_kms(dev, "addfb2 broken on bigendian");
return -EOPNOTSUPP;
}
#endif
return drm_mode_addfb2(dev, data, file_priv);
}
struct drm_mode_rmfb_work {
struct work_struct work;
struct list_head fbs;
};
static void drm_mode_rmfb_work_fn(struct work_struct *w)
{
struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
while (!list_empty(&arg->fbs)) {
struct drm_framebuffer *fb =
list_first_entry(&arg->fbs, typeof(*fb), filp_head);
drm_dbg_kms(fb->dev,
"Removing [FB:%d] from all active usage due to RMFB ioctl\n",
fb->base.id);
list_del_init(&fb->filp_head);
drm_framebuffer_remove(fb);
}
}
/**
* drm_mode_rmfb - remove an FB from the configuration
* @dev: drm device
* @fb_id: id of framebuffer to remove
* @file_priv: drm file
*
* Remove the specified FB.
*
* Called by the user via ioctl, or by an in-kernel client.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
struct drm_file *file_priv)
{
struct drm_framebuffer *fb = NULL;
struct drm_framebuffer *fbl = NULL;
int found = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
fb = drm_framebuffer_lookup(dev, file_priv, fb_id);
if (!fb)
return -ENOENT;
mutex_lock(&file_priv->fbs_lock);
list_for_each_entry(fbl, &file_priv->fbs, filp_head)
if (fb == fbl)
found = 1;
if (!found) {
mutex_unlock(&file_priv->fbs_lock);
goto fail_unref;
}
list_del_init(&fb->filp_head);
mutex_unlock(&file_priv->fbs_lock);
/* drop the reference we picked up in framebuffer lookup */
drm_framebuffer_put(fb);
/*
* we now own the reference that was stored in the fbs list
*
* drm_framebuffer_remove may fail with -EINTR on pending signals,
* so run this in a separate stack as there's no way to correctly
* handle this after the fb is already removed from the lookup table.
*/
if (drm_framebuffer_read_refcount(fb) > 1) {
struct drm_mode_rmfb_work arg;
INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
INIT_LIST_HEAD(&arg.fbs);
list_add_tail(&fb->filp_head, &arg.fbs);
schedule_work(&arg.work);
flush_work(&arg.work);
destroy_work_on_stack(&arg.work);
} else
drm_framebuffer_put(fb);
return 0;
fail_unref:
drm_framebuffer_put(fb);
return -ENOENT;
}
int drm_mode_rmfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
uint32_t *fb_id = data;
return drm_mode_rmfb(dev, *fb_id, file_priv);
}
/**
* drm_mode_getfb - get FB info
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Lookup the FB given its ID and return info about it.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd *r = data;
struct drm_framebuffer *fb;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
if (!fb)
return -ENOENT;
/* Multi-planar framebuffers need getfb2. */
if (fb->format->num_planes > 1) {
ret = -EINVAL;
goto out;
}
if (!fb->funcs->create_handle) {
ret = -ENODEV;
goto out;
}
r->height = fb->height;
r->width = fb->width;
r->depth = fb->format->depth;
r->bpp = drm_format_info_bpp(fb->format, 0);
r->pitch = fb->pitches[0];
/* GET_FB() is an unprivileged ioctl so we must not return a
* buffer-handle to non-master processes! For
* backwards-compatibility reasons, we cannot make GET_FB() privileged,
* so just return an invalid handle for non-masters.
*/
if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN)) {
r->handle = 0;
ret = 0;
goto out;
}
ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
out:
drm_framebuffer_put(fb);
return ret;
}
/**
* drm_mode_getfb2_ioctl - get extended FB info
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Lookup the FB given its ID and return info about it.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_getfb2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd2 *r = data;
struct drm_framebuffer *fb;
unsigned int i;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
if (!fb)
return -ENOENT;
/* For multi-plane framebuffers, we require the driver to place the
* GEM objects directly in the drm_framebuffer. For single-plane
* framebuffers, we can fall back to create_handle.
*/
if (!fb->obj[0] &&
(fb->format->num_planes > 1 || !fb->funcs->create_handle)) {
ret = -ENODEV;
goto out;
}
r->height = fb->height;
r->width = fb->width;
r->pixel_format = fb->format->format;
r->flags = 0;
if (!dev->mode_config.fb_modifiers_not_supported)
r->flags |= DRM_MODE_FB_MODIFIERS;
for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
r->handles[i] = 0;
r->pitches[i] = 0;
r->offsets[i] = 0;
r->modifier[i] = 0;
}
for (i = 0; i < fb->format->num_planes; i++) {
r->pitches[i] = fb->pitches[i];
r->offsets[i] = fb->offsets[i];
if (!dev->mode_config.fb_modifiers_not_supported)
r->modifier[i] = fb->modifier;
}
/* GET_FB2() is an unprivileged ioctl so we must not return a
* buffer-handle to non master/root processes! To match GET_FB()
* just return invalid handles (0) for non masters/root
* rather than making GET_FB2() privileged.
*/
if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN)) {
ret = 0;
goto out;
}
for (i = 0; i < fb->format->num_planes; i++) {
int j;
/* If we reuse the same object for multiple planes, also
* return the same handle.
*/
for (j = 0; j < i; j++) {
if (fb->obj[i] == fb->obj[j]) {
r->handles[i] = r->handles[j];
break;
}
}
if (r->handles[i])
continue;
if (fb->obj[i]) {
ret = drm_gem_handle_create(file_priv, fb->obj[i],
&r->handles[i]);
} else {
WARN_ON(i > 0);
ret = fb->funcs->create_handle(fb, file_priv,
&r->handles[i]);
}
if (ret != 0)
goto out;
}
out:
if (ret != 0) {
/* Delete any previously-created handles on failure. */
for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
int j;
if (r->handles[i])
drm_gem_handle_delete(file_priv, r->handles[i]);
/* Zero out any handles identical to the one we just
* deleted.
*/
for (j = i + 1; j < ARRAY_SIZE(r->handles); j++) {
if (r->handles[j] == r->handles[i])
r->handles[j] = 0;
}
}
}
drm_framebuffer_put(fb);
return ret;
}
/**
* drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Lookup the FB and flush out the damaged area supplied by userspace as a clip
* rectangle list. Generic userspace which does frontbuffer rendering must call
* this ioctl to flush out the changes on manual-update display outputs, e.g.
* usb display-link, mipi manual update panels or edp panel self refresh modes.
*
* Modesetting drivers which always update the frontbuffer do not need to
* implement the corresponding &drm_framebuffer_funcs.dirty callback.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_clip_rect __user *clips_ptr;
struct drm_clip_rect *clips = NULL;
struct drm_mode_fb_dirty_cmd *r = data;
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
if (!fb)
return -ENOENT;
num_clips = r->num_clips;
clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
if (!num_clips != !clips_ptr) {
ret = -EINVAL;
goto out_err1;
}
flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
/* If userspace annotates copy, clips must come in pairs */
if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
ret = -EINVAL;
goto out_err1;
}
if (num_clips && clips_ptr) {
if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
ret = -EINVAL;
goto out_err1;
}
clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (!clips) {
ret = -ENOMEM;
goto out_err1;
}
ret = copy_from_user(clips, clips_ptr,
num_clips * sizeof(*clips));
if (ret) {
ret = -EFAULT;
goto out_err2;
}
}
if (fb->funcs->dirty) {
ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
clips, num_clips);
} else {
ret = -ENOSYS;
}
out_err2:
kfree(clips);
out_err1:
drm_framebuffer_put(fb);
return ret;
}
/**
* drm_fb_release - remove and free the FBs on this file
* @priv: drm file for the ioctl
*
* Destroy all the FBs associated with @filp.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
void drm_fb_release(struct drm_file *priv)
{
struct drm_framebuffer *fb, *tfb;
struct drm_mode_rmfb_work arg;
INIT_LIST_HEAD(&arg.fbs);
/*
* When the file gets released that means no one else can access the fb
* list any more, so no need to grab fpriv->fbs_lock. And we need to
* avoid upsetting lockdep since the universal cursor code adds a
* framebuffer while holding mutex locks.
*
* Note that a real deadlock between fpriv->fbs_lock and the modeset
* locks is impossible here since no one else but this function can get
* at it any more.
*/
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
if (drm_framebuffer_read_refcount(fb) > 1) {
list_move_tail(&fb->filp_head, &arg.fbs);
} else {
list_del_init(&fb->filp_head);
/* This drops the fpriv->fbs reference. */
drm_framebuffer_put(fb);
}
}
if (!list_empty(&arg.fbs)) {
INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
schedule_work(&arg.work);
flush_work(&arg.work);
destroy_work_on_stack(&arg.work);
}
}
void drm_framebuffer_free(struct kref *kref)
{
struct drm_framebuffer *fb =
container_of(kref, struct drm_framebuffer, base.refcount);
struct drm_device *dev = fb->dev;
/*
* The lookup idr holds a weak reference, which has not necessarily been
* removed at this point. Check for that.
*/
drm_mode_object_unregister(dev, &fb->base);
fb->funcs->destroy(fb);
}
/**
* drm_framebuffer_init - initialize a framebuffer
* @dev: DRM device
* @fb: framebuffer to be initialized
* @funcs: ... with these functions
*
* Allocates an ID for the framebuffer's parent mode object, sets its mode
* functions & device file and adds it to the master fd list.
*
* IMPORTANT:
* This functions publishes the fb and makes it available for concurrent access
* by other users. Which means by this point the fb _must_ be fully set up -
* since all the fb attributes are invariant over its lifetime, no further
* locking but only correct reference counting is required.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs)
{
int ret;
if (WARN_ON_ONCE(fb->dev != dev || !fb->format))
return -EINVAL;
INIT_LIST_HEAD(&fb->filp_head);
fb->funcs = funcs;
strcpy(fb->comm, current->comm);
ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB,
false, drm_framebuffer_free);
if (ret)
goto out;
mutex_lock(&dev->mode_config.fb_lock);
dev->mode_config.num_fb++;
list_add(&fb->head, &dev->mode_config.fb_list);
mutex_unlock(&dev->mode_config.fb_lock);
drm_mode_object_register(dev, &fb->base);
out:
return ret;
}
EXPORT_SYMBOL(drm_framebuffer_init);
/**
* drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
* @dev: drm device
* @file_priv: drm file to check for lease against.
* @id: id of the fb object
*
* If successful, this grabs an additional reference to the framebuffer -
* callers need to make sure to eventually unreference the returned framebuffer
* again, using drm_framebuffer_put().
*/
struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t id)
{
struct drm_mode_object *obj;
struct drm_framebuffer *fb = NULL;
obj = __drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_FB);
if (obj)
fb = obj_to_fb(obj);
return fb;
}
EXPORT_SYMBOL(drm_framebuffer_lookup);
/**
* drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
* @fb: fb to unregister
*
* Drivers need to call this when cleaning up driver-private framebuffers, e.g.
* those used for fbdev. Note that the caller must hold a reference of its own,
* i.e. the object may not be destroyed through this call (since it'll lead to a
* locking inversion).
*
* NOTE: This function is deprecated. For driver-private framebuffers it is not
* recommended to embed a framebuffer struct info fbdev struct, instead, a
* framebuffer pointer is preferred and drm_framebuffer_put() should be called
* when the framebuffer is to be cleaned up.
*/
void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
{
struct drm_device *dev;
if (!fb)
return;
dev = fb->dev;
/* Mark fb as reaped and drop idr ref. */
drm_mode_object_unregister(dev, &fb->base);
}
EXPORT_SYMBOL(drm_framebuffer_unregister_private);
/**
* drm_framebuffer_cleanup - remove a framebuffer object
* @fb: framebuffer to remove
*
* Cleanup framebuffer. This function is intended to be used from the drivers
* &drm_framebuffer_funcs.destroy callback. It can also be used to clean up
* driver private framebuffers embedded into a larger structure.
*
* Note that this function does not remove the fb from active usage - if it is
* still used anywhere, hilarity can ensue since userspace could call getfb on
* the id and get back -EINVAL. Obviously no concern at driver unload time.
*
* Also, the framebuffer will not be removed from the lookup idr - for
* user-created framebuffers this will happen in the rmfb ioctl. For
* driver-private objects (e.g. for fbdev) drivers need to explicitly call
* drm_framebuffer_unregister_private.
*/
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
mutex_lock(&dev->mode_config.fb_lock);
list_del(&fb->head);
dev->mode_config.num_fb--;
mutex_unlock(&dev->mode_config.fb_lock);
}
EXPORT_SYMBOL(drm_framebuffer_cleanup);
static int atomic_remove_fb(struct drm_framebuffer *fb)
{
struct drm_modeset_acquire_ctx ctx;
struct drm_device *dev = fb->dev;
struct drm_atomic_state *state;
struct drm_plane *plane;
struct drm_connector *conn __maybe_unused;
struct drm_connector_state *conn_state;
int i, ret;
unsigned plane_mask;
bool disable_crtcs = false;
retry_disable:
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(dev);
if (!state) {
ret = -ENOMEM;
goto out;
}
state->acquire_ctx = &ctx;
retry:
plane_mask = 0;
ret = drm_modeset_lock_all_ctx(dev, &ctx);
if (ret)
goto unlock;
drm_for_each_plane(plane, dev) {
struct drm_plane_state *plane_state;
if (plane->state->fb != fb)
continue;
drm_dbg_kms(dev,
"Disabling [PLANE:%d:%s] because [FB:%d] is removed\n",
plane->base.id, plane->name, fb->base.id);
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto unlock;
}
if (disable_crtcs && plane_state->crtc->primary == plane) {
struct drm_crtc_state *crtc_state;
drm_dbg_kms(dev,
"Disabling [CRTC:%d:%s] because [FB:%d] is removed\n",
plane_state->crtc->base.id,
plane_state->crtc->name, fb->base.id);
crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
ret = drm_atomic_add_affected_connectors(state, plane_state->crtc);
if (ret)
goto unlock;
crtc_state->active = false;
ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
if (ret)
goto unlock;
}
drm_atomic_set_fb_for_plane(plane_state, NULL);
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret)
goto unlock;
plane_mask |= drm_plane_mask(plane);
}
/* This list is only filled when disable_crtcs is set. */
for_each_new_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
if (ret)
goto unlock;
}
if (plane_mask)
ret = drm_atomic_commit(state);
unlock:
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}
drm_atomic_state_put(state);
out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
if (ret == -EINVAL && !disable_crtcs) {
disable_crtcs = true;
goto retry_disable;
}
return ret;
}
static void legacy_remove_fb(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
struct drm_plane *plane;
drm_modeset_lock_all(dev);
/* remove from any CRTC */
drm_for_each_crtc(crtc, dev) {
if (crtc->primary->fb == fb) {
drm_dbg_kms(dev,
"Disabling [CRTC:%d:%s] because [FB:%d] is removed\n",
crtc->base.id, crtc->name, fb->base.id);
/* should turn off the crtc */
if (drm_crtc_force_disable(crtc))
DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
}
}
drm_for_each_plane(plane, dev) {
if (plane->fb == fb) {
drm_dbg_kms(dev,
"Disabling [PLANE:%d:%s] because [FB:%d] is removed\n",
plane->base.id, plane->name, fb->base.id);
drm_plane_force_disable(plane);
}
}
drm_modeset_unlock_all(dev);
}
/**
* drm_framebuffer_remove - remove and unreference a framebuffer object
* @fb: framebuffer to remove
*
* Scans all the CRTCs and planes in @dev's mode_config. If they're
* using @fb, removes it, setting it to NULL. Then drops the reference to the
* passed-in framebuffer. Might take the modeset locks.
*
* Note that this function optimizes the cleanup away if the caller holds the
* last reference to the framebuffer. It is also guaranteed to not take the
* modeset locks in this case.
*/
void drm_framebuffer_remove(struct drm_framebuffer *fb)
{
struct drm_device *dev;
if (!fb)
return;
dev = fb->dev;
WARN_ON(!list_empty(&fb->filp_head));
/*
* drm ABI mandates that we remove any deleted framebuffers from active
* usage. But since most sane clients only remove framebuffers they no
* longer need, try to optimize this away.
*
* Since we're holding a reference ourselves, observing a refcount of 1
* means that we're the last holder and can skip it. Also, the refcount
* can never increase from 1 again, so we don't need any barriers or
* locks.
*
* Note that userspace could try to race with use and instate a new
* usage _after_ we've cleared all current ones. End result will be an
* in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
* in this manner.
*/
if (drm_framebuffer_read_refcount(fb) > 1) {
if (drm_drv_uses_atomic_modeset(dev)) {
int ret = atomic_remove_fb(fb);
WARN(ret, "atomic remove_fb failed with %i\n", ret);
} else
legacy_remove_fb(fb);
}
drm_framebuffer_put(fb);
}
EXPORT_SYMBOL(drm_framebuffer_remove);
/**
* drm_framebuffer_plane_width - width of the plane given the first plane
* @width: width of the first plane
* @fb: the framebuffer
* @plane: plane index
*
* Returns:
* The width of @plane, given that the width of the first plane is @width.
*/
int drm_framebuffer_plane_width(int width,
const struct drm_framebuffer *fb, int plane)
{
if (plane >= fb->format->num_planes)
return 0;
return fb_plane_width(width, fb->format, plane);
}
EXPORT_SYMBOL(drm_framebuffer_plane_width);
/**
* drm_framebuffer_plane_height - height of the plane given the first plane
* @height: height of the first plane
* @fb: the framebuffer
* @plane: plane index
*
* Returns:
* The height of @plane, given that the height of the first plane is @height.
*/
int drm_framebuffer_plane_height(int height,
const struct drm_framebuffer *fb, int plane)
{
if (plane >= fb->format->num_planes)
return 0;
return fb_plane_height(height, fb->format, plane);
}
EXPORT_SYMBOL(drm_framebuffer_plane_height);
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb)
{
unsigned int i;
drm_printf_indent(p, indent, "allocated by = %s\n", fb->comm);
drm_printf_indent(p, indent, "refcount=%u\n",
drm_framebuffer_read_refcount(fb));
drm_printf_indent(p, indent, "format=%p4cc\n", &fb->format->format);
drm_printf_indent(p, indent, "modifier=0x%llx\n", fb->modifier);
drm_printf_indent(p, indent, "size=%ux%u\n", fb->width, fb->height);
drm_printf_indent(p, indent, "layers:\n");
for (i = 0; i < fb->format->num_planes; i++) {
drm_printf_indent(p, indent + 1, "size[%u]=%dx%d\n", i,
drm_framebuffer_plane_width(fb->width, fb, i),
drm_framebuffer_plane_height(fb->height, fb, i));
drm_printf_indent(p, indent + 1, "pitch[%u]=%u\n", i, fb->pitches[i]);
drm_printf_indent(p, indent + 1, "offset[%u]=%u\n", i, fb->offsets[i]);
drm_printf_indent(p, indent + 1, "obj[%u]:%s\n", i,
fb->obj[i] ? "" : "(null)");
if (fb->obj[i])
drm_gem_print_info(p, indent + 2, fb->obj[i]);
}
}
#ifdef CONFIG_DEBUG_FS
static int drm_framebuffer_info(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
struct drm_framebuffer *fb;
mutex_lock(&dev->mode_config.fb_lock);
drm_for_each_fb(fb, dev) {
drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
drm_framebuffer_print_info(&p, 1, fb);
}
mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
static const struct drm_debugfs_info drm_framebuffer_debugfs_list[] = {
{ "framebuffer", drm_framebuffer_info, 0 },
};
void drm_framebuffer_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_add_files(minor->dev, drm_framebuffer_debugfs_list,
ARRAY_SIZE(drm_framebuffer_debugfs_list));
}
#endif
| linux-master | drivers/gpu/drm/drm_framebuffer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright © 2017 Keith Packard <[email protected]>
*/
#include <linux/file.h>
#include <linux/uaccess.h>
#include <drm/drm_auth.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_lease.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
/**
* DOC: drm leasing
*
* DRM leases provide information about whether a DRM master may control a DRM
* mode setting object. This enables the creation of multiple DRM masters that
* manage subsets of display resources.
*
* The original DRM master of a device 'owns' the available drm resources. It
* may create additional DRM masters and 'lease' resources which it controls
* to the new DRM master. This gives the new DRM master control over the
* leased resources until the owner revokes the lease, or the new DRM master
* is closed. Some helpful terminology:
*
* - An 'owner' is a &struct drm_master that is not leasing objects from
* another &struct drm_master, and hence 'owns' the objects. The owner can be
* identified as the &struct drm_master for which &drm_master.lessor is NULL.
*
* - A 'lessor' is a &struct drm_master which is leasing objects to one or more
* other &struct drm_master. Currently, lessees are not allowed to
* create sub-leases, hence the lessor is the same as the owner.
*
* - A 'lessee' is a &struct drm_master which is leasing objects from some
* other &struct drm_master. Each lessee only leases resources from a single
* lessor recorded in &drm_master.lessor, and holds the set of objects that
* it is leasing in &drm_master.leases.
*
* - A 'lease' is a contract between the lessor and lessee that identifies
* which resources may be controlled by the lessee. All of the resources
* that are leased must be owned by or leased to the lessor, and lessors are
* not permitted to lease the same object to multiple lessees.
*
* The set of objects any &struct drm_master 'controls' is limited to the set
* of objects it leases (for lessees) or all objects (for owners).
*
* Objects not controlled by a &struct drm_master cannot be modified through
* the various state manipulating ioctls, and any state reported back to user
* space will be edited to make them appear idle and/or unusable. For
* instance, connectors always report 'disconnected', while encoders
* report no possible crtcs or clones.
*
* Since each lessee may lease objects from a single lessor, display resource
* leases form a tree of &struct drm_master. As lessees are currently not
* allowed to create sub-leases, the tree depth is limited to 1. All of
* these get activated simultaneously when the top level device owner changes
* through the SETMASTER or DROPMASTER IOCTL, so &drm_device.master points to
* the owner at the top of the lease tree (i.e. the &struct drm_master for which
* &drm_master.lessor is NULL). The full list of lessees that are leasing
* objects from the owner can be searched via the owner's
* &drm_master.lessee_idr.
*/
#define drm_for_each_lessee(lessee, lessor) \
list_for_each_entry((lessee), &(lessor)->lessees, lessee_list)
static uint64_t drm_lease_idr_object;
struct drm_master *drm_lease_owner(struct drm_master *master)
{
while (master->lessor != NULL)
master = master->lessor;
return master;
}
static struct drm_master*
_drm_find_lessee(struct drm_master *master, int lessee_id)
{
lockdep_assert_held(&master->dev->mode_config.idr_mutex);
return idr_find(&drm_lease_owner(master)->lessee_idr, lessee_id);
}
static int _drm_lease_held_master(struct drm_master *master, int id)
{
lockdep_assert_held(&master->dev->mode_config.idr_mutex);
if (master->lessor)
return idr_find(&master->leases, id) != NULL;
return true;
}
/* Checks if the given object has been leased to some lessee of drm_master */
static bool _drm_has_leased(struct drm_master *master, int id)
{
struct drm_master *lessee;
lockdep_assert_held(&master->dev->mode_config.idr_mutex);
drm_for_each_lessee(lessee, master)
if (_drm_lease_held_master(lessee, id))
return true;
return false;
}
/* Called with idr_mutex held */
bool _drm_lease_held(struct drm_file *file_priv, int id)
{
bool ret;
struct drm_master *master;
if (!file_priv)
return true;
master = drm_file_get_master(file_priv);
if (!master)
return true;
ret = _drm_lease_held_master(master, id);
drm_master_put(&master);
return ret;
}
bool drm_lease_held(struct drm_file *file_priv, int id)
{
struct drm_master *master;
bool ret;
if (!file_priv)
return true;
master = drm_file_get_master(file_priv);
if (!master)
return true;
if (!master->lessor) {
ret = true;
goto out;
}
mutex_lock(&master->dev->mode_config.idr_mutex);
ret = _drm_lease_held_master(master, id);
mutex_unlock(&master->dev->mode_config.idr_mutex);
out:
drm_master_put(&master);
return ret;
}
/*
* Given a bitmask of crtcs to check, reconstructs a crtc mask based on the
* crtcs which are visible through the specified file.
*/
uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
{
struct drm_master *master;
struct drm_device *dev;
struct drm_crtc *crtc;
int count_in, count_out;
uint32_t crtcs_out = 0;
if (!file_priv)
return crtcs_in;
master = drm_file_get_master(file_priv);
if (!master)
return crtcs_in;
if (!master->lessor) {
crtcs_out = crtcs_in;
goto out;
}
dev = master->dev;
count_in = count_out = 0;
mutex_lock(&master->dev->mode_config.idr_mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (_drm_lease_held_master(master, crtc->base.id)) {
uint32_t mask_in = 1ul << count_in;
if ((crtcs_in & mask_in) != 0) {
uint32_t mask_out = 1ul << count_out;
crtcs_out |= mask_out;
}
count_out++;
}
count_in++;
}
mutex_unlock(&master->dev->mode_config.idr_mutex);
out:
drm_master_put(&master);
return crtcs_out;
}
/*
* Uses drm_master_create to allocate a new drm_master, then checks to
* make sure all of the desired objects can be leased, atomically
* leasing them to the new drmmaster.
*
* ERR_PTR(-EACCES) some other master holds the title to any object
* ERR_PTR(-ENOENT) some object is not a valid DRM object for this device
* ERR_PTR(-EBUSY) some other lessee holds title to this object
* ERR_PTR(-EEXIST) same object specified more than once in the provided list
* ERR_PTR(-ENOMEM) allocation failed
*/
static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr *leases)
{
struct drm_device *dev = lessor->dev;
int error;
struct drm_master *lessee;
int object;
int id;
void *entry;
drm_dbg_lease(dev, "lessor %d\n", lessor->lessee_id);
lessee = drm_master_create(lessor->dev);
if (!lessee) {
drm_dbg_lease(dev, "drm_master_create failed\n");
return ERR_PTR(-ENOMEM);
}
mutex_lock(&dev->mode_config.idr_mutex);
idr_for_each_entry(leases, entry, object) {
error = 0;
if (!idr_find(&dev->mode_config.object_idr, object))
error = -ENOENT;
else if (_drm_has_leased(lessor, object))
error = -EBUSY;
if (error != 0) {
drm_dbg_lease(dev, "object %d failed %d\n", object, error);
goto out_lessee;
}
}
/* Insert the new lessee into the tree */
id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
if (id < 0) {
error = id;
goto out_lessee;
}
lessee->lessee_id = id;
lessee->lessor = drm_master_get(lessor);
list_add_tail(&lessee->lessee_list, &lessor->lessees);
/* Move the leases over */
lessee->leases = *leases;
drm_dbg_lease(dev, "new lessee %d %p, lessor %d %p\n",
lessee->lessee_id, lessee, lessor->lessee_id, lessor);
mutex_unlock(&dev->mode_config.idr_mutex);
return lessee;
out_lessee:
mutex_unlock(&dev->mode_config.idr_mutex);
drm_master_put(&lessee);
return ERR_PTR(error);
}
void drm_lease_destroy(struct drm_master *master)
{
struct drm_device *dev = master->dev;
mutex_lock(&dev->mode_config.idr_mutex);
drm_dbg_lease(dev, "drm_lease_destroy %d\n", master->lessee_id);
/* This master is referenced by all lessees, hence it cannot be destroyed
* until all of them have been
*/
WARN_ON(!list_empty(&master->lessees));
/* Remove this master from the lessee idr in the owner */
if (master->lessee_id != 0) {
drm_dbg_lease(dev, "remove master %d from device list of lessees\n",
master->lessee_id);
idr_remove(&(drm_lease_owner(master)->lessee_idr), master->lessee_id);
}
/* Remove this master from any lessee list it may be on */
list_del(&master->lessee_list);
mutex_unlock(&dev->mode_config.idr_mutex);
if (master->lessor) {
/* Tell the master to check the lessee list */
drm_sysfs_lease_event(dev);
drm_master_put(&master->lessor);
}
drm_dbg_lease(dev, "drm_lease_destroy done %d\n", master->lessee_id);
}
static void _drm_lease_revoke(struct drm_master *top)
{
int object;
void *entry;
struct drm_master *master = top;
lockdep_assert_held(&top->dev->mode_config.idr_mutex);
/*
* Walk the tree starting at 'top' emptying all leases. Because
* the tree is fully connected, we can do this without recursing
*/
for (;;) {
drm_dbg_lease(master->dev, "revoke leases for %p %d\n",
master, master->lessee_id);
/* Evacuate the lease */
idr_for_each_entry(&master->leases, entry, object)
idr_remove(&master->leases, object);
/* Depth-first list walk */
/* Down */
if (!list_empty(&master->lessees)) {
master = list_first_entry(&master->lessees, struct drm_master, lessee_list);
} else {
/* Up */
while (master != top && master == list_last_entry(&master->lessor->lessees, struct drm_master, lessee_list))
master = master->lessor;
if (master == top)
break;
/* Over */
master = list_next_entry(master, lessee_list);
}
}
}
void drm_lease_revoke(struct drm_master *top)
{
mutex_lock(&top->dev->mode_config.idr_mutex);
_drm_lease_revoke(top);
mutex_unlock(&top->dev->mode_config.idr_mutex);
}
static int validate_lease(struct drm_device *dev,
int object_count,
struct drm_mode_object **objects,
bool universal_planes)
{
int o;
int has_crtc = -1;
int has_connector = -1;
int has_plane = -1;
/* we want to confirm that there is at least one crtc, plane
connector object. */
for (o = 0; o < object_count; o++) {
if (objects[o]->type == DRM_MODE_OBJECT_CRTC && has_crtc == -1) {
has_crtc = o;
}
if (objects[o]->type == DRM_MODE_OBJECT_CONNECTOR && has_connector == -1)
has_connector = o;
if (universal_planes) {
if (objects[o]->type == DRM_MODE_OBJECT_PLANE && has_plane == -1)
has_plane = o;
}
}
if (has_crtc == -1 || has_connector == -1)
return -EINVAL;
if (universal_planes && has_plane == -1)
return -EINVAL;
return 0;
}
static int fill_object_idr(struct drm_device *dev,
struct drm_file *lessor_priv,
struct idr *leases,
int object_count,
u32 *object_ids)
{
struct drm_mode_object **objects;
u32 o;
int ret;
bool universal_planes = READ_ONCE(lessor_priv->universal_planes);
objects = kcalloc(object_count, sizeof(struct drm_mode_object *),
GFP_KERNEL);
if (!objects)
return -ENOMEM;
/* step one - get references to all the mode objects
and check for validity. */
for (o = 0; o < object_count; o++) {
objects[o] = drm_mode_object_find(dev, lessor_priv,
object_ids[o],
DRM_MODE_OBJECT_ANY);
if (!objects[o]) {
ret = -ENOENT;
goto out_free_objects;
}
if (!drm_mode_object_lease_required(objects[o]->type)) {
DRM_DEBUG_KMS("invalid object for lease\n");
ret = -EINVAL;
goto out_free_objects;
}
}
ret = validate_lease(dev, object_count, objects, universal_planes);
if (ret) {
drm_dbg_lease(dev, "lease validation failed\n");
goto out_free_objects;
}
/* add their IDs to the lease request - taking into account
universal planes */
for (o = 0; o < object_count; o++) {
struct drm_mode_object *obj = objects[o];
u32 object_id = objects[o]->id;
drm_dbg_lease(dev, "Adding object %d to lease\n", object_id);
/*
* We're using an IDR to hold the set of leased
* objects, but we don't need to point at the object's
* data structure from the lease as the main object_idr
* will be used to actually find that. Instead, all we
* really want is a 'leased/not-leased' result, for
* which any non-NULL pointer will work fine.
*/
ret = idr_alloc(leases, &drm_lease_idr_object , object_id, object_id + 1, GFP_KERNEL);
if (ret < 0) {
drm_dbg_lease(dev, "Object %d cannot be inserted into leases (%d)\n",
object_id, ret);
goto out_free_objects;
}
if (obj->type == DRM_MODE_OBJECT_CRTC && !universal_planes) {
struct drm_crtc *crtc = obj_to_crtc(obj);
ret = idr_alloc(leases, &drm_lease_idr_object, crtc->primary->base.id, crtc->primary->base.id + 1, GFP_KERNEL);
if (ret < 0) {
drm_dbg_lease(dev, "Object primary plane %d cannot be inserted into leases (%d)\n",
object_id, ret);
goto out_free_objects;
}
if (crtc->cursor) {
ret = idr_alloc(leases, &drm_lease_idr_object, crtc->cursor->base.id, crtc->cursor->base.id + 1, GFP_KERNEL);
if (ret < 0) {
drm_dbg_lease(dev, "Object cursor plane %d cannot be inserted into leases (%d)\n",
object_id, ret);
goto out_free_objects;
}
}
}
}
ret = 0;
out_free_objects:
for (o = 0; o < object_count; o++) {
if (objects[o])
drm_mode_object_put(objects[o]);
}
kfree(objects);
return ret;
}
/*
* The master associated with the specified file will have a lease
* created containing the objects specified in the ioctl structure.
* A file descriptor will be allocated for that and returned to the
* application.
*/
int drm_mode_create_lease_ioctl(struct drm_device *dev,
void *data, struct drm_file *lessor_priv)
{
struct drm_mode_create_lease *cl = data;
size_t object_count;
int ret = 0;
struct idr leases;
struct drm_master *lessor;
struct drm_master *lessee = NULL;
struct file *lessee_file = NULL;
struct file *lessor_file = lessor_priv->filp;
struct drm_file *lessee_priv;
int fd = -1;
uint32_t *object_ids;
/* Can't lease without MODESET */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK))) {
drm_dbg_lease(dev, "invalid flags\n");
return -EINVAL;
}
lessor = drm_file_get_master(lessor_priv);
/* Do not allow sub-leases */
if (lessor->lessor) {
drm_dbg_lease(dev, "recursive leasing not allowed\n");
ret = -EINVAL;
goto out_lessor;
}
object_count = cl->object_count;
/* Handle leased objects, if any */
idr_init(&leases);
if (object_count != 0) {
object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
array_size(object_count, sizeof(__u32)));
if (IS_ERR(object_ids)) {
ret = PTR_ERR(object_ids);
idr_destroy(&leases);
goto out_lessor;
}
/* fill and validate the object idr */
ret = fill_object_idr(dev, lessor_priv, &leases,
object_count, object_ids);
kfree(object_ids);
if (ret) {
drm_dbg_lease(dev, "lease object lookup failed: %i\n", ret);
idr_destroy(&leases);
goto out_lessor;
}
}
/* Allocate a file descriptor for the lease */
fd = get_unused_fd_flags(cl->flags & (O_CLOEXEC | O_NONBLOCK));
if (fd < 0) {
idr_destroy(&leases);
ret = fd;
goto out_lessor;
}
drm_dbg_lease(dev, "Creating lease\n");
/* lessee will take the ownership of leases */
lessee = drm_lease_create(lessor, &leases);
if (IS_ERR(lessee)) {
ret = PTR_ERR(lessee);
idr_destroy(&leases);
goto out_leases;
}
/* Clone the lessor file to create a new file for us */
drm_dbg_lease(dev, "Allocating lease file\n");
lessee_file = file_clone_open(lessor_file);
if (IS_ERR(lessee_file)) {
ret = PTR_ERR(lessee_file);
goto out_lessee;
}
lessee_priv = lessee_file->private_data;
/* Change the file to a master one */
drm_master_put(&lessee_priv->master);
lessee_priv->master = lessee;
lessee_priv->is_master = 1;
lessee_priv->authenticated = 1;
/* Pass fd back to userspace */
drm_dbg_lease(dev, "Returning fd %d id %d\n", fd, lessee->lessee_id);
cl->fd = fd;
cl->lessee_id = lessee->lessee_id;
/* Hook up the fd */
fd_install(fd, lessee_file);
drm_master_put(&lessor);
drm_dbg_lease(dev, "drm_mode_create_lease_ioctl succeeded\n");
return 0;
out_lessee:
drm_master_put(&lessee);
out_leases:
put_unused_fd(fd);
out_lessor:
drm_master_put(&lessor);
drm_dbg_lease(dev, "drm_mode_create_lease_ioctl failed: %d\n", ret);
return ret;
}
int drm_mode_list_lessees_ioctl(struct drm_device *dev,
void *data, struct drm_file *lessor_priv)
{
struct drm_mode_list_lessees *arg = data;
__u32 __user *lessee_ids = (__u32 __user *) (uintptr_t) (arg->lessees_ptr);
__u32 count_lessees = arg->count_lessees;
struct drm_master *lessor, *lessee;
int count;
int ret = 0;
if (arg->pad)
return -EINVAL;
/* Can't lease without MODESET */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
lessor = drm_file_get_master(lessor_priv);
drm_dbg_lease(dev, "List lessees for %d\n", lessor->lessee_id);
mutex_lock(&dev->mode_config.idr_mutex);
count = 0;
drm_for_each_lessee(lessee, lessor) {
/* Only list un-revoked leases */
if (!idr_is_empty(&lessee->leases)) {
if (count_lessees > count) {
drm_dbg_lease(dev, "Add lessee %d\n",
lessee->lessee_id);
ret = put_user(lessee->lessee_id, lessee_ids + count);
if (ret)
break;
}
count++;
}
}
drm_dbg_lease(dev, "Lessor leases to %d\n", count);
if (ret == 0)
arg->count_lessees = count;
mutex_unlock(&dev->mode_config.idr_mutex);
drm_master_put(&lessor);
return ret;
}
/* Return the list of leased objects for the specified lessee */
int drm_mode_get_lease_ioctl(struct drm_device *dev,
void *data, struct drm_file *lessee_priv)
{
struct drm_mode_get_lease *arg = data;
__u32 __user *object_ids = (__u32 __user *) (uintptr_t) (arg->objects_ptr);
__u32 count_objects = arg->count_objects;
struct drm_master *lessee;
struct idr *object_idr;
int count;
void *entry;
int object;
int ret = 0;
if (arg->pad)
return -EINVAL;
/* Can't lease without MODESET */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
lessee = drm_file_get_master(lessee_priv);
drm_dbg_lease(dev, "get lease for %d\n", lessee->lessee_id);
mutex_lock(&dev->mode_config.idr_mutex);
if (lessee->lessor == NULL)
/* owner can use all objects */
object_idr = &lessee->dev->mode_config.object_idr;
else
/* lessee can only use allowed object */
object_idr = &lessee->leases;
count = 0;
idr_for_each_entry(object_idr, entry, object) {
if (count_objects > count) {
drm_dbg_lease(dev, "adding object %d\n", object);
ret = put_user(object, object_ids + count);
if (ret)
break;
}
count++;
}
DRM_DEBUG("lease holds %d objects\n", count);
if (ret == 0)
arg->count_objects = count;
mutex_unlock(&dev->mode_config.idr_mutex);
drm_master_put(&lessee);
return ret;
}
/*
* This removes all of the objects from the lease without
* actually getting rid of the lease itself; that way all
* references to it still work correctly
*/
int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
void *data, struct drm_file *lessor_priv)
{
struct drm_mode_revoke_lease *arg = data;
struct drm_master *lessor;
struct drm_master *lessee;
int ret = 0;
drm_dbg_lease(dev, "revoke lease for %d\n", arg->lessee_id);
/* Can't lease without MODESET */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
lessor = drm_file_get_master(lessor_priv);
mutex_lock(&dev->mode_config.idr_mutex);
lessee = _drm_find_lessee(lessor, arg->lessee_id);
/* No such lessee */
if (!lessee) {
ret = -ENOENT;
goto fail;
}
/* Lease is not held by lessor */
if (lessee->lessor != lessor) {
ret = -EACCES;
goto fail;
}
_drm_lease_revoke(lessee);
fail:
mutex_unlock(&dev->mode_config.idr_mutex);
drm_master_put(&lessor);
return ret;
}
| linux-master | drivers/gpu/drm/drm_lease.c |
/*
* Copyright (c) 2006-2009 Red Hat Inc.
* Copyright (c) 2006-2008 Intel Corporation
* Copyright (c) 2007 Dave Airlie <[email protected]>
*
* DRM framebuffer helper functions
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*
* Authors:
* Dave Airlie <[email protected]>
* Jesse Barnes <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/console.h>
#include <linux/pci.h>
#include <linux/sysrq.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_atomic.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "drm_internal.h"
static bool drm_fbdev_emulation = true;
module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600);
MODULE_PARM_DESC(fbdev_emulation,
"Enable legacy fbdev emulation [default=true]");
static int drm_fbdev_overalloc = CONFIG_DRM_FBDEV_OVERALLOC;
module_param(drm_fbdev_overalloc, int, 0444);
MODULE_PARM_DESC(drm_fbdev_overalloc,
"Overallocation of the fbdev buffer (%) [default="
__MODULE_STRING(CONFIG_DRM_FBDEV_OVERALLOC) "]");
/*
* In order to keep user-space compatibility, we want in certain use-cases
* to keep leaking the fbdev physical address to the user-space program
* handling the fbdev buffer.
*
* This is a bad habit, essentially kept to support closed-source OpenGL
* drivers that should really be moved into open-source upstream projects
* instead of using legacy physical addresses in user space to communicate
* with other out-of-tree kernel modules.
*
* This module_param *should* be removed as soon as possible and be
* considered as a broken and legacy behaviour from a modern fbdev device.
*/
static bool drm_leak_fbdev_smem;
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
MODULE_PARM_DESC(drm_leak_fbdev_smem,
"Allow unsafe leaking fbdev physical smem address [default=false]");
#endif
static LIST_HEAD(kernel_fb_helper_list);
static DEFINE_MUTEX(kernel_fb_helper_lock);
/**
* DOC: fbdev helpers
*
* The fb helper functions are useful to provide an fbdev on top of a drm kernel
* mode setting driver. They can be used mostly independently from the crtc
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*
* Drivers that support a dumb buffer with a virtual address and mmap support,
* should try out the generic fbdev emulation using drm_fbdev_generic_setup().
* It will automatically set up deferred I/O if the driver requires a shadow
* buffer.
*
* Existing fbdev implementations should restore the fbdev console by using
* drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
* They should also notify the fb helper code from updates to the output
* configuration by using drm_fb_helper_output_poll_changed() as their
* &drm_mode_config_funcs.output_poll_changed callback. New implementations
* of fbdev should be build on top of struct &drm_client_funcs, which handles
* this automatically. Setting the old callbacks should be avoided.
*
* For suspend/resume consider using drm_mode_config_helper_suspend() and
* drm_mode_config_helper_resume() which takes care of fbdev as well.
*
* All other functions exported by the fb helper library can be used to
* implement the fbdev driver interface by the driver.
*
* It is possible, though perhaps somewhat tricky, to implement race-free
* hotplug detection using the fbdev helpers. The drm_fb_helper_prepare()
* helper must be called first to initialize the minimum required to make
* hotplug detection work. Drivers also need to make sure to properly set up
* the &drm_mode_config.funcs member. After calling drm_kms_helper_poll_init()
* it is safe to enable interrupts and start processing hotplug events. At the
* same time, drivers should initialize all modeset objects such as CRTCs,
* encoders and connectors. To finish up the fbdev helper initialization, the
* drm_fb_helper_init() function is called. To probe for all attached displays
* and set up an initial configuration using the detected hardware, drivers
* should call drm_fb_helper_initial_config().
*
* If &drm_framebuffer_funcs.dirty is set, the
* drm_fb_helper_{cfb,sys}_{write,fillrect,copyarea,imageblit} functions will
* accumulate changes and schedule &drm_fb_helper.dirty_work to run right
* away. This worker then calls the dirty() function ensuring that it will
* always run in process context since the fb_*() function could be running in
* atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io
* callback it will also schedule dirty_work with the damage collected from the
* mmap page writes.
*
* Deferred I/O is not compatible with SHMEM. Such drivers should request an
* fbdev shadow buffer and call drm_fbdev_generic_setup() instead.
*/
static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
{
uint16_t *r_base, *g_base, *b_base;
if (crtc->funcs->gamma_set == NULL)
return;
r_base = crtc->gamma_store;
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base,
crtc->gamma_size, NULL);
}
/**
* drm_fb_helper_debug_enter - implementation for &fb_ops.fb_debug_enter
* @info: fbdev registered by the helper
*/
int drm_fb_helper_debug_enter(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
const struct drm_crtc_helper_funcs *funcs;
struct drm_mode_set *mode_set;
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
mutex_lock(&helper->client.modeset_mutex);
drm_client_for_each_modeset(mode_set, &helper->client) {
if (!mode_set->crtc->enabled)
continue;
funcs = mode_set->crtc->helper_private;
if (funcs->mode_set_base_atomic == NULL)
continue;
if (drm_drv_uses_atomic_modeset(mode_set->crtc->dev))
continue;
funcs->mode_set_base_atomic(mode_set->crtc,
mode_set->fb,
mode_set->x,
mode_set->y,
ENTER_ATOMIC_MODE_SET);
}
mutex_unlock(&helper->client.modeset_mutex);
}
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_debug_enter);
/**
* drm_fb_helper_debug_leave - implementation for &fb_ops.fb_debug_leave
* @info: fbdev registered by the helper
*/
int drm_fb_helper_debug_leave(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
struct drm_client_dev *client = &helper->client;
struct drm_device *dev = helper->dev;
struct drm_crtc *crtc;
const struct drm_crtc_helper_funcs *funcs;
struct drm_mode_set *mode_set;
struct drm_framebuffer *fb;
mutex_lock(&client->modeset_mutex);
drm_client_for_each_modeset(mode_set, client) {
crtc = mode_set->crtc;
if (drm_drv_uses_atomic_modeset(crtc->dev))
continue;
funcs = crtc->helper_private;
fb = crtc->primary->fb;
if (!crtc->enabled)
continue;
if (!fb) {
drm_err(dev, "no fb to restore?\n");
continue;
}
if (funcs->mode_set_base_atomic == NULL)
continue;
drm_fb_helper_restore_lut_atomic(mode_set->crtc);
funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
crtc->y, LEAVE_ATOMIC_MODE_SET);
}
mutex_unlock(&client->modeset_mutex);
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
static int
__drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
bool force)
{
bool do_delayed;
int ret;
if (!drm_fbdev_emulation || !fb_helper)
return -ENODEV;
if (READ_ONCE(fb_helper->deferred_setup))
return 0;
mutex_lock(&fb_helper->lock);
if (force) {
/*
* Yes this is the _locked version which expects the master lock
* to be held. But for forced restores we're intentionally
* racing here, see drm_fb_helper_set_par().
*/
ret = drm_client_modeset_commit_locked(&fb_helper->client);
} else {
ret = drm_client_modeset_commit(&fb_helper->client);
}
do_delayed = fb_helper->delayed_hotplug;
if (do_delayed)
fb_helper->delayed_hotplug = false;
mutex_unlock(&fb_helper->lock);
if (do_delayed)
drm_fb_helper_hotplug_event(fb_helper);
return ret;
}
/**
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
* @fb_helper: driver-allocated fbdev helper, can be NULL
*
* This should be called from driver's drm &drm_driver.lastclose callback
* when implementing an fbcon on top of kms using this helper. This ensures that
* the user isn't greeted with a black screen when e.g. X dies.
*
* RETURNS:
* Zero if everything went ok, negative error code otherwise.
*/
int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
{
return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false);
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
#ifdef CONFIG_MAGIC_SYSRQ
/* emergency restore, don't bother with error reporting */
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
struct drm_fb_helper *helper;
mutex_lock(&kernel_fb_helper_lock);
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
struct drm_device *dev = helper->dev;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
continue;
mutex_lock(&helper->lock);
drm_client_modeset_commit_locked(&helper->client);
mutex_unlock(&helper->lock);
}
mutex_unlock(&kernel_fb_helper_lock);
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
static void drm_fb_helper_sysrq(u8 dummy1)
{
schedule_work(&drm_fb_helper_restore_work);
}
static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.handler = drm_fb_helper_sysrq,
.help_msg = "force-fb(v)",
.action_msg = "Restore framebuffer console",
};
#else
static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
mutex_lock(&fb_helper->lock);
drm_client_modeset_dpms(&fb_helper->client, dpms_mode);
mutex_unlock(&fb_helper->lock);
}
/**
* drm_fb_helper_blank - implementation for &fb_ops.fb_blank
* @blank: desired blanking state
* @info: fbdev registered by the helper
*/
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
if (oops_in_progress)
return -EBUSY;
switch (blank) {
/* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK:
drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON);
break;
/* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: On, VSync: Off */
case FB_BLANK_VSYNC_SUSPEND:
drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND);
break;
/* Display: Off; HSync: Off, VSync: Off */
case FB_BLANK_POWERDOWN:
drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF);
break;
}
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_blank);
static void drm_fb_helper_resume_worker(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
resume_work);
console_lock();
fb_set_suspend(helper->info, 0);
console_unlock();
}
static void drm_fb_helper_fb_dirty(struct drm_fb_helper *helper)
{
struct drm_device *dev = helper->dev;
struct drm_clip_rect *clip = &helper->damage_clip;
struct drm_clip_rect clip_copy;
unsigned long flags;
int ret;
if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty))
return;
spin_lock_irqsave(&helper->damage_lock, flags);
clip_copy = *clip;
clip->x1 = clip->y1 = ~0;
clip->x2 = clip->y2 = 0;
spin_unlock_irqrestore(&helper->damage_lock, flags);
ret = helper->funcs->fb_dirty(helper, &clip_copy);
if (ret)
goto err;
return;
err:
/*
* Restore damage clip rectangle on errors. The next run
* of the damage worker will perform the update.
*/
spin_lock_irqsave(&helper->damage_lock, flags);
clip->x1 = min_t(u32, clip->x1, clip_copy.x1);
clip->y1 = min_t(u32, clip->y1, clip_copy.y1);
clip->x2 = max_t(u32, clip->x2, clip_copy.x2);
clip->y2 = max_t(u32, clip->y2, clip_copy.y2);
spin_unlock_irqrestore(&helper->damage_lock, flags);
}
static void drm_fb_helper_damage_work(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, damage_work);
drm_fb_helper_fb_dirty(helper);
}
/**
* drm_fb_helper_prepare - setup a drm_fb_helper structure
* @dev: DRM device
* @helper: driver-allocated fbdev helper structure to set up
* @preferred_bpp: Preferred bits per pixel for the device.
* @funcs: pointer to structure of functions associate with this helper
*
* Sets up the bare minimum to make the framebuffer helper usable. This is
* useful to implement race-free initialization of the polling helpers.
*/
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
unsigned int preferred_bpp,
const struct drm_fb_helper_funcs *funcs)
{
/*
* Pick a preferred bpp of 32 if no value has been given. This
* will select XRGB8888 for the framebuffer formats. All drivers
* have to support XRGB8888 for backwards compatibility with legacy
* userspace, so it's the safe choice here.
*
* TODO: Replace struct drm_mode_config.preferred_depth and this
* bpp value with a preferred format that is given as struct
* drm_format_info. Then derive all other values from the
* format.
*/
if (!preferred_bpp)
preferred_bpp = 32;
INIT_LIST_HEAD(&helper->kernel_fb_list);
spin_lock_init(&helper->damage_lock);
INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker);
INIT_WORK(&helper->damage_work, drm_fb_helper_damage_work);
helper->damage_clip.x1 = helper->damage_clip.y1 = ~0;
mutex_init(&helper->lock);
helper->funcs = funcs;
helper->dev = dev;
helper->preferred_bpp = preferred_bpp;
}
EXPORT_SYMBOL(drm_fb_helper_prepare);
/**
* drm_fb_helper_unprepare - clean up a drm_fb_helper structure
* @fb_helper: driver-allocated fbdev helper structure to set up
*
* Cleans up the framebuffer helper. Inverse of drm_fb_helper_prepare().
*/
void drm_fb_helper_unprepare(struct drm_fb_helper *fb_helper)
{
mutex_destroy(&fb_helper->lock);
}
EXPORT_SYMBOL(drm_fb_helper_unprepare);
/**
* drm_fb_helper_init - initialize a &struct drm_fb_helper
* @dev: drm device
* @fb_helper: driver-allocated fbdev helper structure to initialize
*
* This allocates the structures for the fbdev helper with the given limits.
* Note that this won't yet touch the hardware (through the driver interfaces)
* nor register the fbdev. This is only done in drm_fb_helper_initial_config()
* to allow driver writes more control over the exact init sequence.
*
* Drivers must call drm_fb_helper_prepare() before calling this function.
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *fb_helper)
{
int ret;
/*
* If this is not the generic fbdev client, initialize a drm_client
* without callbacks so we can use the modesets.
*/
if (!fb_helper->client.funcs) {
ret = drm_client_init(dev, &fb_helper->client, "drm_fb_helper", NULL);
if (ret)
return ret;
}
dev->fb_helper = fb_helper;
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_init);
/**
* drm_fb_helper_alloc_info - allocate fb_info and some of its members
* @fb_helper: driver-allocated fbdev helper
*
* A helper to alloc fb_info and the member cmap. Called by the driver
* within the fb_probe fb_helper callback function. Drivers do not
* need to release the allocated fb_info structure themselves, this is
* automatically done when calling drm_fb_helper_fini().
*
* RETURNS:
* fb_info pointer if things went okay, pointer containing error code
* otherwise
*/
struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
{
struct device *dev = fb_helper->dev->dev;
struct fb_info *info;
int ret;
info = framebuffer_alloc(0, dev);
if (!info)
return ERR_PTR(-ENOMEM);
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret)
goto err_release;
fb_helper->info = info;
info->skip_vt_switch = true;
return info;
err_release:
framebuffer_release(info);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(drm_fb_helper_alloc_info);
/**
* drm_fb_helper_release_info - release fb_info and its members
* @fb_helper: driver-allocated fbdev helper
*
* A helper to release fb_info and the member cmap. Drivers do not
* need to release the allocated fb_info structure themselves, this is
* automatically done when calling drm_fb_helper_fini().
*/
void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
{
struct fb_info *info = fb_helper->info;
if (!info)
return;
fb_helper->info = NULL;
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
EXPORT_SYMBOL(drm_fb_helper_release_info);
/**
* drm_fb_helper_unregister_info - unregister fb_info framebuffer device
* @fb_helper: driver-allocated fbdev helper, can be NULL
*
* A wrapper around unregister_framebuffer, to release the fb_info
* framebuffer device. This must be called before releasing all resources for
* @fb_helper by calling drm_fb_helper_fini().
*/
void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper)
{
if (fb_helper && fb_helper->info)
unregister_framebuffer(fb_helper->info);
}
EXPORT_SYMBOL(drm_fb_helper_unregister_info);
/**
* drm_fb_helper_fini - finialize a &struct drm_fb_helper
* @fb_helper: driver-allocated fbdev helper, can be NULL
*
* This cleans up all remaining resources associated with @fb_helper.
*/
void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
{
if (!fb_helper)
return;
fb_helper->dev->fb_helper = NULL;
if (!drm_fbdev_emulation)
return;
cancel_work_sync(&fb_helper->resume_work);
cancel_work_sync(&fb_helper->damage_work);
drm_fb_helper_release_info(fb_helper);
mutex_lock(&kernel_fb_helper_lock);
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list))
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
mutex_unlock(&kernel_fb_helper_lock);
if (!fb_helper->client.funcs)
drm_client_release(&fb_helper->client);
}
EXPORT_SYMBOL(drm_fb_helper_fini);
static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u32 y,
u32 width, u32 height)
{
struct drm_clip_rect *clip = &helper->damage_clip;
unsigned long flags;
spin_lock_irqsave(&helper->damage_lock, flags);
clip->x1 = min_t(u32, clip->x1, x);
clip->y1 = min_t(u32, clip->y1, y);
clip->x2 = max_t(u32, clip->x2, x + width);
clip->y2 = max_t(u32, clip->y2, y + height);
spin_unlock_irqrestore(&helper->damage_lock, flags);
}
static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
u32 width, u32 height)
{
drm_fb_helper_add_damage_clip(helper, x, y, width, height);
schedule_work(&helper->damage_work);
}
/*
* Convert memory region into area of scanlines and pixels per
* scanline. The parameters off and len must not reach beyond
* the end of the framebuffer.
*/
static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len,
struct drm_rect *clip)
{
u32 line_length = info->fix.line_length;
u32 fb_height = info->var.yres;
off_t end = off + len;
u32 x1 = 0;
u32 y1 = off / line_length;
u32 x2 = info->var.xres;
u32 y2 = DIV_ROUND_UP(end, line_length);
/* Don't allow any of them beyond the bottom bound of display area */
if (y1 > fb_height)
y1 = fb_height;
if (y2 > fb_height)
y2 = fb_height;
if ((y2 - y1) == 1) {
/*
* We've only written to a single scanline. Try to reduce
* the number of horizontal pixels that need an update.
*/
off_t bit_off = (off % line_length) * 8;
off_t bit_end = (end % line_length) * 8;
x1 = bit_off / info->var.bits_per_pixel;
x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel);
}
drm_rect_init(clip, x1, y1, x2 - x1, y2 - y1);
}
/* Don't use in new code. */
void drm_fb_helper_damage_range(struct fb_info *info, off_t off, size_t len)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_rect damage_area;
drm_fb_helper_memory_range_to_clip(info, off, len, &damage_area);
drm_fb_helper_damage(fb_helper, damage_area.x1, damage_area.y1,
drm_rect_width(&damage_area),
drm_rect_height(&damage_area));
}
EXPORT_SYMBOL(drm_fb_helper_damage_range);
/* Don't use in new code. */
void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height)
{
struct drm_fb_helper *fb_helper = info->par;
drm_fb_helper_damage(fb_helper, x, y, width, height);
}
EXPORT_SYMBOL(drm_fb_helper_damage_area);
/**
* drm_fb_helper_deferred_io() - fbdev deferred_io callback function
* @info: fb_info struct pointer
* @pagereflist: list of mmap framebuffer pages that have to be flushed
*
* This function is used as the &fb_deferred_io.deferred_io
* callback function for flushing the fbdev mmap writes.
*/
void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist)
{
struct drm_fb_helper *helper = info->par;
unsigned long start, end, min_off, max_off, total_size;
struct fb_deferred_io_pageref *pageref;
struct drm_rect damage_area;
min_off = ULONG_MAX;
max_off = 0;
list_for_each_entry(pageref, pagereflist, list) {
start = pageref->offset;
end = start + PAGE_SIZE;
min_off = min(min_off, start);
max_off = max(max_off, end);
}
/*
* As we can only track pages, we might reach beyond the end
* of the screen and account for non-existing scanlines. Hence,
* keep the covered memory area within the screen buffer.
*/
if (info->screen_size)
total_size = info->screen_size;
else
total_size = info->fix.smem_len;
max_off = min(max_off, total_size);
if (min_off < max_off) {
drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area);
drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1,
drm_rect_width(&damage_area),
drm_rect_height(&damage_area));
}
}
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
/**
* drm_fb_helper_set_suspend - wrapper around fb_set_suspend
* @fb_helper: driver-allocated fbdev helper, can be NULL
* @suspend: whether to suspend or resume
*
* A wrapper around fb_set_suspend implemented by fbdev core.
* Use drm_fb_helper_set_suspend_unlocked() if you don't need to take
* the lock yourself
*/
void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend)
{
if (fb_helper && fb_helper->info)
fb_set_suspend(fb_helper->info, suspend);
}
EXPORT_SYMBOL(drm_fb_helper_set_suspend);
/**
* drm_fb_helper_set_suspend_unlocked - wrapper around fb_set_suspend that also
* takes the console lock
* @fb_helper: driver-allocated fbdev helper, can be NULL
* @suspend: whether to suspend or resume
*
* A wrapper around fb_set_suspend() that takes the console lock. If the lock
* isn't available on resume, a worker is tasked with waiting for the lock
* to become available. The console lock can be pretty contented on resume
* due to all the printk activity.
*
* This function can be called multiple times with the same state since
* &fb_info.state is checked to see if fbdev is running or not before locking.
*
* Use drm_fb_helper_set_suspend() if you need to take the lock yourself.
*/
void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
bool suspend)
{
if (!fb_helper || !fb_helper->info)
return;
/* make sure there's no pending/ongoing resume */
flush_work(&fb_helper->resume_work);
if (suspend) {
if (fb_helper->info->state != FBINFO_STATE_RUNNING)
return;
console_lock();
} else {
if (fb_helper->info->state == FBINFO_STATE_RUNNING)
return;
if (!console_trylock()) {
schedule_work(&fb_helper->resume_work);
return;
}
}
fb_set_suspend(fb_helper->info, suspend);
console_unlock();
}
EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked);
static int setcmap_pseudo_palette(struct fb_cmap *cmap, struct fb_info *info)
{
u32 *palette = (u32 *)info->pseudo_palette;
int i;
if (cmap->start + cmap->len > 16)
return -EINVAL;
for (i = 0; i < cmap->len; ++i) {
u16 red = cmap->red[i];
u16 green = cmap->green[i];
u16 blue = cmap->blue[i];
u32 value;
red >>= 16 - info->var.red.length;
green >>= 16 - info->var.green.length;
blue >>= 16 - info->var.blue.length;
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
if (info->var.transp.length > 0) {
u32 mask = (1 << info->var.transp.length) - 1;
mask <<= info->var.transp.offset;
value |= mask;
}
palette[cmap->start + i] = value;
}
return 0;
}
static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_mode_set *modeset;
struct drm_crtc *crtc;
u16 *r, *g, *b;
int ret = 0;
drm_modeset_lock_all(fb_helper->dev);
drm_client_for_each_modeset(modeset, &fb_helper->client) {
crtc = modeset->crtc;
if (!crtc->funcs->gamma_set || !crtc->gamma_size) {
ret = -EINVAL;
goto out;
}
if (cmap->start + cmap->len > crtc->gamma_size) {
ret = -EINVAL;
goto out;
}
r = crtc->gamma_store;
g = r + crtc->gamma_size;
b = g + crtc->gamma_size;
memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r));
memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g));
memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b));
ret = crtc->funcs->gamma_set(crtc, r, g, b,
crtc->gamma_size, NULL);
if (ret)
goto out;
}
out:
drm_modeset_unlock_all(fb_helper->dev);
return ret;
}
static struct drm_property_blob *setcmap_new_gamma_lut(struct drm_crtc *crtc,
struct fb_cmap *cmap)
{
struct drm_device *dev = crtc->dev;
struct drm_property_blob *gamma_lut;
struct drm_color_lut *lut;
int size = crtc->gamma_size;
int i;
if (!size || cmap->start + cmap->len > size)
return ERR_PTR(-EINVAL);
gamma_lut = drm_property_create_blob(dev, sizeof(*lut) * size, NULL);
if (IS_ERR(gamma_lut))
return gamma_lut;
lut = gamma_lut->data;
if (cmap->start || cmap->len != size) {
u16 *r = crtc->gamma_store;
u16 *g = r + crtc->gamma_size;
u16 *b = g + crtc->gamma_size;
for (i = 0; i < cmap->start; i++) {
lut[i].red = r[i];
lut[i].green = g[i];
lut[i].blue = b[i];
}
for (i = cmap->start + cmap->len; i < size; i++) {
lut[i].red = r[i];
lut[i].green = g[i];
lut[i].blue = b[i];
}
}
for (i = 0; i < cmap->len; i++) {
lut[cmap->start + i].red = cmap->red[i];
lut[cmap->start + i].green = cmap->green[i];
lut[cmap->start + i].blue = cmap->blue[i];
}
return gamma_lut;
}
static int setcmap_atomic(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_property_blob *gamma_lut = NULL;
struct drm_modeset_acquire_ctx ctx;
struct drm_crtc_state *crtc_state;
struct drm_atomic_state *state;
struct drm_mode_set *modeset;
struct drm_crtc *crtc;
u16 *r, *g, *b;
bool replaced;
int ret = 0;
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(dev);
if (!state) {
ret = -ENOMEM;
goto out_ctx;
}
state->acquire_ctx = &ctx;
retry:
drm_client_for_each_modeset(modeset, &fb_helper->client) {
crtc = modeset->crtc;
if (!gamma_lut)
gamma_lut = setcmap_new_gamma_lut(crtc, cmap);
if (IS_ERR(gamma_lut)) {
ret = PTR_ERR(gamma_lut);
gamma_lut = NULL;
goto out_state;
}
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto out_state;
}
/*
* FIXME: This always uses gamma_lut. Some HW have only
* degamma_lut, in which case we should reset gamma_lut and set
* degamma_lut. See drm_crtc_legacy_gamma_set().
*/
replaced = drm_property_replace_blob(&crtc_state->degamma_lut,
NULL);
replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
replaced |= drm_property_replace_blob(&crtc_state->gamma_lut,
gamma_lut);
crtc_state->color_mgmt_changed |= replaced;
}
ret = drm_atomic_commit(state);
if (ret)
goto out_state;
drm_client_for_each_modeset(modeset, &fb_helper->client) {
crtc = modeset->crtc;
r = crtc->gamma_store;
g = r + crtc->gamma_size;
b = g + crtc->gamma_size;
memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r));
memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g));
memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b));
}
out_state:
if (ret == -EDEADLK)
goto backoff;
drm_property_blob_put(gamma_lut);
drm_atomic_state_put(state);
out_ctx:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
backoff:
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}
/**
* drm_fb_helper_setcmap - implementation for &fb_ops.fb_setcmap
* @cmap: cmap to set
* @info: fbdev registered by the helper
*/
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
int ret;
if (oops_in_progress)
return -EBUSY;
mutex_lock(&fb_helper->lock);
if (!drm_master_internal_acquire(dev)) {
ret = -EBUSY;
goto unlock;
}
mutex_lock(&fb_helper->client.modeset_mutex);
if (info->fix.visual == FB_VISUAL_TRUECOLOR)
ret = setcmap_pseudo_palette(cmap, info);
else if (drm_drv_uses_atomic_modeset(fb_helper->dev))
ret = setcmap_atomic(cmap, info);
else
ret = setcmap_legacy(cmap, info);
mutex_unlock(&fb_helper->client.modeset_mutex);
drm_master_internal_release(dev);
unlock:
mutex_unlock(&fb_helper->lock);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
/**
* drm_fb_helper_ioctl - legacy ioctl implementation
* @info: fbdev registered by the helper
* @cmd: ioctl command
* @arg: ioctl argument
*
* A helper to implement the standard fbdev ioctl. Only
* FBIO_WAITFORVSYNC is implemented for now.
*/
int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
int ret = 0;
mutex_lock(&fb_helper->lock);
if (!drm_master_internal_acquire(dev)) {
ret = -EBUSY;
goto unlock;
}
switch (cmd) {
case FBIO_WAITFORVSYNC:
/*
* Only consider the first CRTC.
*
* This ioctl is supposed to take the CRTC number as
* an argument, but in fbdev times, what that number
* was supposed to be was quite unclear, different
* drivers were passing that argument differently
* (some by reference, some by value), and most of the
* userspace applications were just hardcoding 0 as an
* argument.
*
* The first CRTC should be the integrated panel on
* most drivers, so this is the best choice we can
* make. If we're not smart enough here, one should
* just consider switch the userspace to KMS.
*/
crtc = fb_helper->client.modesets[0].crtc;
/*
* Only wait for a vblank event if the CRTC is
* enabled, otherwise just don't do anythintg,
* not even report an error.
*/
ret = drm_crtc_vblank_get(crtc);
if (!ret) {
drm_crtc_wait_one_vblank(crtc);
drm_crtc_vblank_put(crtc);
}
ret = 0;
break;
default:
ret = -ENOTTY;
}
drm_master_internal_release(dev);
unlock:
mutex_unlock(&fb_helper->lock);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_ioctl);
static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1,
const struct fb_var_screeninfo *var_2)
{
return var_1->bits_per_pixel == var_2->bits_per_pixel &&
var_1->grayscale == var_2->grayscale &&
var_1->red.offset == var_2->red.offset &&
var_1->red.length == var_2->red.length &&
var_1->red.msb_right == var_2->red.msb_right &&
var_1->green.offset == var_2->green.offset &&
var_1->green.length == var_2->green.length &&
var_1->green.msb_right == var_2->green.msb_right &&
var_1->blue.offset == var_2->blue.offset &&
var_1->blue.length == var_2->blue.length &&
var_1->blue.msb_right == var_2->blue.msb_right &&
var_1->transp.offset == var_2->transp.offset &&
var_1->transp.length == var_2->transp.length &&
var_1->transp.msb_right == var_2->transp.msb_right;
}
static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
const struct drm_format_info *format)
{
u8 depth = format->depth;
if (format->is_color_indexed) {
var->red.offset = 0;
var->green.offset = 0;
var->blue.offset = 0;
var->red.length = depth;
var->green.length = depth;
var->blue.length = depth;
var->transp.offset = 0;
var->transp.length = 0;
return;
}
switch (depth) {
case 15:
var->red.offset = 10;
var->green.offset = 5;
var->blue.offset = 0;
var->red.length = 5;
var->green.length = 5;
var->blue.length = 5;
var->transp.offset = 15;
var->transp.length = 1;
break;
case 16:
var->red.offset = 11;
var->green.offset = 5;
var->blue.offset = 0;
var->red.length = 5;
var->green.length = 6;
var->blue.length = 5;
var->transp.offset = 0;
break;
case 24:
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 32:
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.offset = 24;
var->transp.length = 8;
break;
default:
break;
}
}
static void __fill_var(struct fb_var_screeninfo *var, struct fb_info *info,
struct drm_framebuffer *fb)
{
int i;
var->xres_virtual = fb->width;
var->yres_virtual = fb->height;
var->accel_flags = 0;
var->bits_per_pixel = drm_format_info_bpp(fb->format, 0);
var->height = info->var.height;
var->width = info->var.width;
var->left_margin = var->right_margin = 0;
var->upper_margin = var->lower_margin = 0;
var->hsync_len = var->vsync_len = 0;
var->sync = var->vmode = 0;
var->rotate = 0;
var->colorspace = 0;
for (i = 0; i < 4; i++)
var->reserved[i] = 0;
}
/**
* drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
* @var: screeninfo to check
* @info: fbdev registered by the helper
*/
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
const struct drm_format_info *format = fb->format;
struct drm_device *dev = fb_helper->dev;
unsigned int bpp;
if (in_dbg_master())
return -EINVAL;
if (var->pixclock != 0) {
drm_dbg_kms(dev, "fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
var->pixclock = 0;
}
switch (format->format) {
case DRM_FORMAT_C1:
case DRM_FORMAT_C2:
case DRM_FORMAT_C4:
/* supported format with sub-byte pixels */
break;
default:
if ((drm_format_info_block_width(format, 0) > 1) ||
(drm_format_info_block_height(format, 0) > 1))
return -EINVAL;
break;
}
/*
* Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested.
*/
bpp = drm_format_info_bpp(format, 0);
if (var->bits_per_pixel > bpp ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
drm_dbg_kms(dev, "fb requested width/height/bpp can't fit in current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
fb->width, fb->height, bpp);
return -EINVAL;
}
__fill_var(var, info, fb);
/*
* fb_pan_display() validates this, but fb_set_par() doesn't and just
* falls over. Note that __fill_var above adjusts y/res_virtual.
*/
if (var->yoffset > var->yres_virtual - var->yres ||
var->xoffset > var->xres_virtual - var->xres)
return -EINVAL;
/* We neither support grayscale nor FOURCC (also stored in here). */
if (var->grayscale > 0)
return -EINVAL;
if (var->nonstd)
return -EINVAL;
/*
* Workaround for SDL 1.2, which is known to be setting all pixel format
* fields values to zero in some cases. We treat this situation as a
* kind of "use some reasonable autodetected values".
*/
if (!var->red.offset && !var->green.offset &&
!var->blue.offset && !var->transp.offset &&
!var->red.length && !var->green.length &&
!var->blue.length && !var->transp.length &&
!var->red.msb_right && !var->green.msb_right &&
!var->blue.msb_right && !var->transp.msb_right) {
drm_fb_helper_fill_pixel_fmt(var, format);
}
/*
* drm fbdev emulation doesn't support changing the pixel format at all,
* so reject all pixel format changing requests.
*/
if (!drm_fb_pixel_format_equal(var, &info->var)) {
drm_dbg_kms(dev, "fbdev emulation doesn't support changing the pixel format\n");
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_check_var);
/**
* drm_fb_helper_set_par - implementation for &fb_ops.fb_set_par
* @info: fbdev registered by the helper
*
* This will let fbcon do the mode init and is called at initialization time by
* the fbdev core when registering the driver, and later on through the hotplug
* callback.
*/
int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct fb_var_screeninfo *var = &info->var;
bool force;
if (oops_in_progress)
return -EBUSY;
/*
* Normally we want to make sure that a kms master takes precedence over
* fbdev, to avoid fbdev flickering and occasionally stealing the
* display status. But Xorg first sets the vt back to text mode using
* the KDSET IOCTL with KD_TEXT, and only after that drops the master
* status when exiting.
*
* In the past this was caught by drm_fb_helper_lastclose(), but on
* modern systems where logind always keeps a drm fd open to orchestrate
* the vt switching, this doesn't work.
*
* To not break the userspace ABI we have this special case here, which
* is only used for the above case. Everything else uses the normal
* commit function, which ensures that we never steal the display from
* an active drm master.
*/
force = var->activate & FB_ACTIVATE_KD_TEXT;
__drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
static void pan_set(struct drm_fb_helper *fb_helper, int x, int y)
{
struct drm_mode_set *mode_set;
mutex_lock(&fb_helper->client.modeset_mutex);
drm_client_for_each_modeset(mode_set, &fb_helper->client) {
mode_set->x = x;
mode_set->y = y;
}
mutex_unlock(&fb_helper->client.modeset_mutex);
}
static int pan_display_atomic(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
int ret;
pan_set(fb_helper, var->xoffset, var->yoffset);
ret = drm_client_modeset_commit_locked(&fb_helper->client);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
} else
pan_set(fb_helper, info->var.xoffset, info->var.yoffset);
return ret;
}
static int pan_display_legacy(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_client_dev *client = &fb_helper->client;
struct drm_mode_set *modeset;
int ret = 0;
mutex_lock(&client->modeset_mutex);
drm_modeset_lock_all(fb_helper->dev);
drm_client_for_each_modeset(modeset, client) {
modeset->x = var->xoffset;
modeset->y = var->yoffset;
if (modeset->num_connectors) {
ret = drm_mode_set_config_internal(modeset);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
}
}
}
drm_modeset_unlock_all(fb_helper->dev);
mutex_unlock(&client->modeset_mutex);
return ret;
}
/**
* drm_fb_helper_pan_display - implementation for &fb_ops.fb_pan_display
* @var: updated screen information
* @info: fbdev registered by the helper
*/
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
int ret;
if (oops_in_progress)
return -EBUSY;
mutex_lock(&fb_helper->lock);
if (!drm_master_internal_acquire(dev)) {
ret = -EBUSY;
goto unlock;
}
if (drm_drv_uses_atomic_modeset(dev))
ret = pan_display_atomic(var, info);
else
ret = pan_display_legacy(var, info);
drm_master_internal_release(dev);
unlock:
mutex_unlock(&fb_helper->lock);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
static uint32_t drm_fb_helper_find_format(struct drm_fb_helper *fb_helper, const uint32_t *formats,
size_t format_count, uint32_t bpp, uint32_t depth)
{
struct drm_device *dev = fb_helper->dev;
uint32_t format;
size_t i;
/*
* Do not consider YUV or other complicated formats
* for framebuffers. This means only legacy formats
* are supported (fmt->depth is a legacy field), but
* the framebuffer emulation can only deal with such
* formats, specifically RGB/BGA formats.
*/
format = drm_mode_legacy_fb_format(bpp, depth);
if (!format)
goto err;
for (i = 0; i < format_count; ++i) {
if (formats[i] == format)
return format;
}
err:
/* We found nothing. */
drm_warn(dev, "bpp/depth value of %u/%u not supported\n", bpp, depth);
return DRM_FORMAT_INVALID;
}
static uint32_t drm_fb_helper_find_color_mode_format(struct drm_fb_helper *fb_helper,
const uint32_t *formats, size_t format_count,
unsigned int color_mode)
{
struct drm_device *dev = fb_helper->dev;
uint32_t bpp, depth;
switch (color_mode) {
case 1:
case 2:
case 4:
case 8:
case 16:
case 24:
bpp = depth = color_mode;
break;
case 15:
bpp = 16;
depth = 15;
break;
case 32:
bpp = 32;
depth = 24;
break;
default:
drm_info(dev, "unsupported color mode of %d\n", color_mode);
return DRM_FORMAT_INVALID;
}
return drm_fb_helper_find_format(fb_helper, formats, format_count, bpp, depth);
}
static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
int crtc_count = 0;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
struct drm_mode_set *mode_set;
uint32_t surface_format = DRM_FORMAT_INVALID;
const struct drm_format_info *info;
memset(sizes, 0, sizeof(*sizes));
sizes->fb_width = (u32)-1;
sizes->fb_height = (u32)-1;
drm_client_for_each_modeset(mode_set, client) {
struct drm_crtc *crtc = mode_set->crtc;
struct drm_plane *plane = crtc->primary;
drm_dbg_kms(dev, "test CRTC %u primary plane\n", drm_crtc_index(crtc));
drm_connector_list_iter_begin(fb_helper->dev, &conn_iter);
drm_client_for_each_connector_iter(connector, &conn_iter) {
struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
if (!cmdline_mode->bpp_specified)
continue;
surface_format = drm_fb_helper_find_color_mode_format(fb_helper,
plane->format_types,
plane->format_count,
cmdline_mode->bpp);
if (surface_format != DRM_FORMAT_INVALID)
break; /* found supported format */
}
drm_connector_list_iter_end(&conn_iter);
if (surface_format != DRM_FORMAT_INVALID)
break; /* found supported format */
/* try preferred color mode */
surface_format = drm_fb_helper_find_color_mode_format(fb_helper,
plane->format_types,
plane->format_count,
fb_helper->preferred_bpp);
if (surface_format != DRM_FORMAT_INVALID)
break; /* found supported format */
}
if (surface_format == DRM_FORMAT_INVALID) {
/*
* If none of the given color modes works, fall back
* to XRGB8888. Drivers are expected to provide this
* format for compatibility with legacy applications.
*/
drm_warn(dev, "No compatible format found\n");
surface_format = drm_driver_legacy_fb_format(dev, 32, 24);
}
info = drm_format_info(surface_format);
sizes->surface_bpp = drm_format_info_bpp(info, 0);
sizes->surface_depth = info->depth;
/* first up get a count of crtcs now in use and new min/maxes width/heights */
crtc_count = 0;
drm_client_for_each_modeset(mode_set, client) {
struct drm_display_mode *desired_mode;
int x, y, j;
/* in case of tile group, are we the last tile vert or horiz?
* If no tile group you are always the last one both vertically
* and horizontally
*/
bool lastv = true, lasth = true;
desired_mode = mode_set->mode;
if (!desired_mode)
continue;
crtc_count++;
x = mode_set->x;
y = mode_set->y;
sizes->surface_width =
max_t(u32, desired_mode->hdisplay + x, sizes->surface_width);
sizes->surface_height =
max_t(u32, desired_mode->vdisplay + y, sizes->surface_height);
for (j = 0; j < mode_set->num_connectors; j++) {
struct drm_connector *connector = mode_set->connectors[j];
if (connector->has_tile &&
desired_mode->hdisplay == connector->tile_h_size &&
desired_mode->vdisplay == connector->tile_v_size) {
lasth = (connector->tile_h_loc == (connector->num_h_tile - 1));
lastv = (connector->tile_v_loc == (connector->num_v_tile - 1));
/* cloning to multiple tiles is just crazy-talk, so: */
break;
}
}
if (lasth)
sizes->fb_width = min_t(u32, desired_mode->hdisplay + x, sizes->fb_width);
if (lastv)
sizes->fb_height = min_t(u32, desired_mode->vdisplay + y, sizes->fb_height);
}
if (crtc_count == 0 || sizes->fb_width == -1 || sizes->fb_height == -1) {
drm_info(dev, "Cannot find any crtc or sizes\n");
return -EAGAIN;
}
return 0;
}
static int drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct drm_mode_config *config = &dev->mode_config;
int ret;
mutex_lock(&client->modeset_mutex);
ret = __drm_fb_helper_find_sizes(fb_helper, sizes);
mutex_unlock(&client->modeset_mutex);
if (ret)
return ret;
/* Handle our overallocation */
sizes->surface_height *= drm_fbdev_overalloc;
sizes->surface_height /= 100;
if (sizes->surface_height > config->max_height) {
drm_dbg_kms(dev, "Fbdev over-allocation too large; clamping height to %d\n",
config->max_height);
sizes->surface_height = config->max_height;
}
return 0;
}
/*
* Allocates the backing storage and sets up the fbdev info structure through
* the ->fb_probe callback.
*/
static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_surface_size sizes;
int ret;
ret = drm_fb_helper_find_sizes(fb_helper, &sizes);
if (ret) {
/* First time: disable all crtc's.. */
if (!fb_helper->deferred_setup)
drm_client_modeset_commit(client);
return ret;
}
/* push down into drivers */
ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
if (ret < 0)
return ret;
strcpy(fb_helper->fb->comm, "[fbcon]");
/* Set the fb info for vgaswitcheroo clients. Does nothing otherwise. */
if (dev_is_pci(dev->dev))
vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), fb_helper->info);
return 0;
}
static void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
bool is_color_indexed)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = is_color_indexed ? FB_VISUAL_PSEUDOCOLOR
: FB_VISUAL_TRUECOLOR;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
info->fix.accel = FB_ACCEL_NONE;
info->fix.line_length = pitch;
}
static void drm_fb_helper_fill_var(struct fb_info *info,
struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
struct drm_framebuffer *fb = fb_helper->fb;
const struct drm_format_info *format = fb->format;
switch (format->format) {
case DRM_FORMAT_C1:
case DRM_FORMAT_C2:
case DRM_FORMAT_C4:
/* supported format with sub-byte pixels */
break;
default:
WARN_ON((drm_format_info_block_width(format, 0) > 1) ||
(drm_format_info_block_height(format, 0) > 1));
break;
}
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xoffset = 0;
info->var.yoffset = 0;
__fill_var(&info->var, info, fb);
info->var.activate = FB_ACTIVATE_NOW;
drm_fb_helper_fill_pixel_fmt(&info->var, format);
info->var.xres = fb_width;
info->var.yres = fb_height;
}
/**
* drm_fb_helper_fill_info - initializes fbdev information
* @info: fbdev instance to set up
* @fb_helper: fb helper instance to use as template
* @sizes: describes fbdev size and scanout surface size
*
* Sets up the variable and fixed fbdev metainformation from the given fb helper
* instance and the drm framebuffer allocated in &drm_fb_helper.fb.
*
* Drivers should call this (or their equivalent setup code) from their
* &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev
* backing storage framebuffer.
*/
void drm_fb_helper_fill_info(struct fb_info *info,
struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_framebuffer *fb = fb_helper->fb;
drm_fb_helper_fill_fix(info, fb->pitches[0],
fb->format->is_color_indexed);
drm_fb_helper_fill_var(info, fb_helper,
sizes->fb_width, sizes->fb_height);
info->par = fb_helper;
/*
* The DRM drivers fbdev emulation device name can be confusing if the
* driver name also has a "drm" suffix on it. Leading to names such as
* "simpledrmdrmfb" in /proc/fb. Unfortunately, it's an uAPI and can't
* be changed due user-space tools (e.g: pm-utils) matching against it.
*/
snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb",
fb_helper->dev->driver->name);
}
EXPORT_SYMBOL(drm_fb_helper_fill_info);
/*
* This is a continuation of drm_setup_crtcs() that sets up anything related
* to the framebuffer. During initialization, drm_setup_crtcs() is called before
* the framebuffer has been allocated (fb_helper->fb and fb_helper->info).
* So, any setup that touches those fields needs to be done here instead of in
* drm_setup_crtcs().
*/
static void drm_setup_crtcs_fb(struct drm_fb_helper *fb_helper)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_connector_list_iter conn_iter;
struct fb_info *info = fb_helper->info;
unsigned int rotation, sw_rotations = 0;
struct drm_connector *connector;
struct drm_mode_set *modeset;
mutex_lock(&client->modeset_mutex);
drm_client_for_each_modeset(modeset, client) {
if (!modeset->num_connectors)
continue;
modeset->fb = fb_helper->fb;
if (drm_client_rotation(modeset, &rotation))
/* Rotating in hardware, fbcon should not rotate */
sw_rotations |= DRM_MODE_ROTATE_0;
else
sw_rotations |= rotation;
}
mutex_unlock(&client->modeset_mutex);
drm_connector_list_iter_begin(fb_helper->dev, &conn_iter);
drm_client_for_each_connector_iter(connector, &conn_iter) {
/* use first connected connector for the physical dimensions */
if (connector->status == connector_status_connected) {
info->var.width = connector->display_info.width_mm;
info->var.height = connector->display_info.height_mm;
break;
}
}
drm_connector_list_iter_end(&conn_iter);
switch (sw_rotations) {
case DRM_MODE_ROTATE_0:
info->fbcon_rotate_hint = FB_ROTATE_UR;
break;
case DRM_MODE_ROTATE_90:
info->fbcon_rotate_hint = FB_ROTATE_CCW;
break;
case DRM_MODE_ROTATE_180:
info->fbcon_rotate_hint = FB_ROTATE_UD;
break;
case DRM_MODE_ROTATE_270:
info->fbcon_rotate_hint = FB_ROTATE_CW;
break;
default:
/*
* Multiple bits are set / multiple rotations requested
* fbcon cannot handle separate rotation settings per
* output, so fallback to unrotated.
*/
info->fbcon_rotate_hint = FB_ROTATE_UR;
}
}
/* Note: Drops fb_helper->lock before returning. */
static int
__drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct fb_info *info;
unsigned int width, height;
int ret;
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
drm_client_modeset_probe(&fb_helper->client, width, height);
ret = drm_fb_helper_single_fb_probe(fb_helper);
if (ret < 0) {
if (ret == -EAGAIN) {
fb_helper->deferred_setup = true;
ret = 0;
}
mutex_unlock(&fb_helper->lock);
return ret;
}
drm_setup_crtcs_fb(fb_helper);
fb_helper->deferred_setup = false;
info = fb_helper->info;
info->var.pixclock = 0;
if (!drm_leak_fbdev_smem)
info->flags |= FBINFO_HIDE_SMEM_START;
/* Need to drop locks to avoid recursive deadlock in
* register_framebuffer. This is ok because the only thing left to do is
* register the fbdev emulation instance in kernel_fb_helper_list. */
mutex_unlock(&fb_helper->lock);
ret = register_framebuffer(info);
if (ret < 0)
return ret;
drm_info(dev, "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
mutex_lock(&kernel_fb_helper_lock);
if (list_empty(&kernel_fb_helper_list))
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
mutex_unlock(&kernel_fb_helper_lock);
return 0;
}
/**
* drm_fb_helper_initial_config - setup a sane initial connector configuration
* @fb_helper: fb_helper device struct
*
* Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
* Note that this also registers the fbdev and so allows userspace to call into
* the driver through the fbdev interfaces.
*
* This function will call down into the &drm_fb_helper_funcs.fb_probe callback
* to let the driver allocate and initialize the fbdev info structure and the
* drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided
* as a helper to setup simple default values for the fbdev info structure.
*
* HANG DEBUGGING:
*
* When you have fbcon support built-in or already loaded, this function will do
* a full modeset to setup the fbdev console. Due to locking misdesign in the
* VT/fbdev subsystem that entire modeset sequence has to be done while holding
* console_lock. Until console_unlock is called no dmesg lines will be sent out
* to consoles, not even serial console. This means when your driver crashes,
* you will see absolutely nothing else but a system stuck in this function,
* with no further output. Any kind of printk() you place within your own driver
* or in the drm core modeset code will also never show up.
*
* Standard debug practice is to run the fbcon setup without taking the
* console_lock as a hack, to be able to see backtraces and crashes on the
* serial line. This can be done by setting the fb.lockless_register_fb=1 kernel
* cmdline option.
*
* The other option is to just disable fbdev emulation since very likely the
* first modeset from userspace will crash in the same way, and is even easier
* to debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0
* kernel cmdline option.
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper)
{
int ret;
if (!drm_fbdev_emulation)
return 0;
mutex_lock(&fb_helper->lock);
ret = __drm_fb_helper_initial_config_and_unlock(fb_helper);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_initial_config);
/**
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
* probing all the outputs attached to the fb
* @fb_helper: driver-allocated fbdev helper, can be NULL
*
* Scan the connectors attached to the fb_helper and try to put together a
* setup after notification of a change in output configuration.
*
* Called at runtime, takes the mode config locks to be able to check/change the
* modeset configuration. Must be run from process context (which usually means
* either the output polling work or a work item launched from the driver's
* hotplug interrupt).
*
* Note that drivers may call this even before calling
* drm_fb_helper_initial_config but only after drm_fb_helper_init. This allows
* for a race-free fbcon setup and will make sure that the fbdev emulation will
* not miss any hotplug events.
*
* RETURNS:
* 0 on success and a non-zero error code otherwise.
*/
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
int err = 0;
if (!drm_fbdev_emulation || !fb_helper)
return 0;
mutex_lock(&fb_helper->lock);
if (fb_helper->deferred_setup) {
err = __drm_fb_helper_initial_config_and_unlock(fb_helper);
return err;
}
if (!fb_helper->fb || !drm_master_internal_acquire(fb_helper->dev)) {
fb_helper->delayed_hotplug = true;
mutex_unlock(&fb_helper->lock);
return err;
}
drm_master_internal_release(fb_helper->dev);
drm_dbg_kms(fb_helper->dev, "\n");
drm_client_modeset_probe(&fb_helper->client, fb_helper->fb->width, fb_helper->fb->height);
drm_setup_crtcs_fb(fb_helper);
mutex_unlock(&fb_helper->lock);
drm_fb_helper_set_par(fb_helper->info);
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
/**
* drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation
* @dev: DRM device
*
* This function can be used as the &drm_driver->lastclose callback for drivers
* that only need to call drm_fb_helper_restore_fbdev_mode_unlocked().
*/
void drm_fb_helper_lastclose(struct drm_device *dev)
{
drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper);
}
EXPORT_SYMBOL(drm_fb_helper_lastclose);
/**
* drm_fb_helper_output_poll_changed - DRM mode config \.output_poll_changed
* helper for fbdev emulation
* @dev: DRM device
*
* This function can be used as the
* &drm_mode_config_funcs.output_poll_changed callback for drivers that only
* need to call drm_fbdev.hotplug_event().
*/
void drm_fb_helper_output_poll_changed(struct drm_device *dev)
{
drm_fb_helper_hotplug_event(dev->fb_helper);
}
EXPORT_SYMBOL(drm_fb_helper_output_poll_changed);
| linux-master | drivers/gpu/drm/drm_fb_helper.c |
/*
* Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/module.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
static DEFINE_MUTEX(panel_lock);
static LIST_HEAD(panel_list);
/**
* DOC: drm panel
*
* The DRM panel helpers allow drivers to register panel objects with a
* central registry and provide functions to retrieve those panels in display
* drivers.
*
* For easy integration into drivers using the &drm_bridge infrastructure please
* take look at drm_panel_bridge_add() and devm_drm_panel_bridge_add().
*/
/**
* drm_panel_init - initialize a panel
* @panel: DRM panel
* @dev: parent device of the panel
* @funcs: panel operations
* @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
* the panel interface
*
* Initialize the panel structure for subsequent registration with
* drm_panel_add().
*/
void drm_panel_init(struct drm_panel *panel, struct device *dev,
const struct drm_panel_funcs *funcs, int connector_type)
{
INIT_LIST_HEAD(&panel->list);
INIT_LIST_HEAD(&panel->followers);
mutex_init(&panel->follower_lock);
panel->dev = dev;
panel->funcs = funcs;
panel->connector_type = connector_type;
}
EXPORT_SYMBOL(drm_panel_init);
/**
* drm_panel_add - add a panel to the global registry
* @panel: panel to add
*
* Add a panel to the global registry so that it can be looked up by display
* drivers.
*/
void drm_panel_add(struct drm_panel *panel)
{
mutex_lock(&panel_lock);
list_add_tail(&panel->list, &panel_list);
mutex_unlock(&panel_lock);
}
EXPORT_SYMBOL(drm_panel_add);
/**
* drm_panel_remove - remove a panel from the global registry
* @panel: DRM panel
*
* Removes a panel from the global registry.
*/
void drm_panel_remove(struct drm_panel *panel)
{
mutex_lock(&panel_lock);
list_del_init(&panel->list);
mutex_unlock(&panel_lock);
}
EXPORT_SYMBOL(drm_panel_remove);
/**
* drm_panel_prepare - power on a panel
* @panel: DRM panel
*
* Calling this function will enable power and deassert any reset signals to
* the panel. After this has completed it is possible to communicate with any
* integrated circuitry via a command bus.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_prepare(struct drm_panel *panel)
{
struct drm_panel_follower *follower;
int ret;
if (!panel)
return -EINVAL;
if (panel->prepared) {
dev_warn(panel->dev, "Skipping prepare of already prepared panel\n");
return 0;
}
mutex_lock(&panel->follower_lock);
if (panel->funcs && panel->funcs->prepare) {
ret = panel->funcs->prepare(panel);
if (ret < 0)
goto exit;
}
panel->prepared = true;
list_for_each_entry(follower, &panel->followers, list) {
ret = follower->funcs->panel_prepared(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
follower->funcs->panel_prepared, ret);
}
ret = 0;
exit:
mutex_unlock(&panel->follower_lock);
return ret;
}
EXPORT_SYMBOL(drm_panel_prepare);
/**
* drm_panel_unprepare - power off a panel
* @panel: DRM panel
*
* Calling this function will completely power off a panel (assert the panel's
* reset, turn off power supplies, ...). After this function has completed, it
* is usually no longer possible to communicate with the panel until another
* call to drm_panel_prepare().
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_unprepare(struct drm_panel *panel)
{
struct drm_panel_follower *follower;
int ret;
if (!panel)
return -EINVAL;
if (!panel->prepared) {
dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n");
return 0;
}
mutex_lock(&panel->follower_lock);
list_for_each_entry(follower, &panel->followers, list) {
ret = follower->funcs->panel_unpreparing(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
follower->funcs->panel_unpreparing, ret);
}
if (panel->funcs && panel->funcs->unprepare) {
ret = panel->funcs->unprepare(panel);
if (ret < 0)
goto exit;
}
panel->prepared = false;
ret = 0;
exit:
mutex_unlock(&panel->follower_lock);
return ret;
}
EXPORT_SYMBOL(drm_panel_unprepare);
/**
* drm_panel_enable - enable a panel
* @panel: DRM panel
*
* Calling this function will cause the panel display drivers to be turned on
* and the backlight to be enabled. Content will be visible on screen after
* this call completes.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_enable(struct drm_panel *panel)
{
int ret;
if (!panel)
return -EINVAL;
if (panel->enabled) {
dev_warn(panel->dev, "Skipping enable of already enabled panel\n");
return 0;
}
if (panel->funcs && panel->funcs->enable) {
ret = panel->funcs->enable(panel);
if (ret < 0)
return ret;
}
panel->enabled = true;
ret = backlight_enable(panel->backlight);
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
ret);
return 0;
}
EXPORT_SYMBOL(drm_panel_enable);
/**
* drm_panel_disable - disable a panel
* @panel: DRM panel
*
* This will typically turn off the panel's backlight or disable the display
* drivers. For smart panels it should still be possible to communicate with
* the integrated circuitry via any command bus after this call.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_disable(struct drm_panel *panel)
{
int ret;
if (!panel)
return -EINVAL;
if (!panel->enabled) {
dev_warn(panel->dev, "Skipping disable of already disabled panel\n");
return 0;
}
ret = backlight_disable(panel->backlight);
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to disable backlight: %d\n",
ret);
if (panel->funcs && panel->funcs->disable) {
ret = panel->funcs->disable(panel);
if (ret < 0)
return ret;
}
panel->enabled = false;
return 0;
}
EXPORT_SYMBOL(drm_panel_disable);
/**
* drm_panel_get_modes - probe the available display modes of a panel
* @panel: DRM panel
* @connector: DRM connector
*
* The modes probed from the panel are automatically added to the connector
* that the panel is attached to.
*
* Return: The number of modes available from the panel on success or a
* negative error code on failure.
*/
int drm_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
if (!panel)
return -EINVAL;
if (panel->funcs && panel->funcs->get_modes)
return panel->funcs->get_modes(panel, connector);
return -EOPNOTSUPP;
}
EXPORT_SYMBOL(drm_panel_get_modes);
#ifdef CONFIG_OF
/**
* of_drm_find_panel - look up a panel using a device tree node
* @np: device tree node of the panel
*
* Searches the set of registered panels for one that matches the given device
* tree node. If a matching panel is found, return a pointer to it.
*
* Return: A pointer to the panel registered for the specified device tree
* node or an ERR_PTR() if no panel matching the device tree node can be found.
*
* Possible error codes returned by this function:
*
* - EPROBE_DEFER: the panel device has not been probed yet, and the caller
* should retry later
* - ENODEV: the device is not available (status != "okay" or "ok")
*/
struct drm_panel *of_drm_find_panel(const struct device_node *np)
{
struct drm_panel *panel;
if (!of_device_is_available(np))
return ERR_PTR(-ENODEV);
mutex_lock(&panel_lock);
list_for_each_entry(panel, &panel_list, list) {
if (panel->dev->of_node == np) {
mutex_unlock(&panel_lock);
return panel;
}
}
mutex_unlock(&panel_lock);
return ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL(of_drm_find_panel);
/**
* of_drm_get_panel_orientation - look up the orientation of the panel through
* the "rotation" binding from a device tree node
* @np: device tree node of the panel
* @orientation: orientation enum to be filled in
*
* Looks up the rotation of a panel in the device tree. The orientation of the
* panel is expressed as a property name "rotation" in the device tree. The
* rotation in the device tree is counter clockwise.
*
* Return: 0 when a valid rotation value (0, 90, 180, or 270) is read or the
* rotation property doesn't exist. Return a negative error code on failure.
*/
int of_drm_get_panel_orientation(const struct device_node *np,
enum drm_panel_orientation *orientation)
{
int rotation, ret;
ret = of_property_read_u32(np, "rotation", &rotation);
if (ret == -EINVAL) {
/* Don't return an error if there's no rotation property. */
*orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
return 0;
}
if (ret < 0)
return ret;
if (rotation == 0)
*orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
else if (rotation == 90)
*orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
else if (rotation == 180)
*orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
else if (rotation == 270)
*orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
else
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(of_drm_get_panel_orientation);
#endif
/**
* drm_is_panel_follower() - Check if the device is a panel follower
* @dev: The 'struct device' to check
*
* This checks to see if a device needs to be power sequenced together with
* a panel using the panel follower API.
* At the moment panels can only be followed on device tree enabled systems.
* The "panel" property of the follower points to the panel to be followed.
*
* Return: true if we should be power sequenced with a panel; false otherwise.
*/
bool drm_is_panel_follower(struct device *dev)
{
/*
* The "panel" property is actually a phandle, but for simplicity we
* don't bother trying to parse it here. We just need to know if the
* property is there.
*/
return of_property_read_bool(dev->of_node, "panel");
}
EXPORT_SYMBOL(drm_is_panel_follower);
/**
* drm_panel_add_follower() - Register something to follow panel state.
* @follower_dev: The 'struct device' for the follower.
* @follower: The panel follower descriptor for the follower.
*
* A panel follower is called right after preparing the panel and right before
* unpreparing the panel. It's primary intention is to power on an associated
* touchscreen, though it could be used for any similar devices. Multiple
* devices are allowed the follow the same panel.
*
* If a follower is added to a panel that's already been turned on, the
* follower's prepare callback is called right away.
*
* At the moment panels can only be followed on device tree enabled systems.
* The "panel" property of the follower points to the panel to be followed.
*
* Return: 0 or an error code. Note that -ENODEV means that we detected that
* follower_dev is not actually following a panel. The caller may
* choose to ignore this return value if following a panel is optional.
*/
int drm_panel_add_follower(struct device *follower_dev,
struct drm_panel_follower *follower)
{
struct device_node *panel_np;
struct drm_panel *panel;
int ret;
panel_np = of_parse_phandle(follower_dev->of_node, "panel", 0);
if (!panel_np)
return -ENODEV;
panel = of_drm_find_panel(panel_np);
of_node_put(panel_np);
if (IS_ERR(panel))
return PTR_ERR(panel);
get_device(panel->dev);
follower->panel = panel;
mutex_lock(&panel->follower_lock);
list_add_tail(&follower->list, &panel->followers);
if (panel->prepared) {
ret = follower->funcs->panel_prepared(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
follower->funcs->panel_prepared, ret);
}
mutex_unlock(&panel->follower_lock);
return 0;
}
EXPORT_SYMBOL(drm_panel_add_follower);
/**
* drm_panel_remove_follower() - Reverse drm_panel_add_follower().
* @follower: The panel follower descriptor for the follower.
*
* Undo drm_panel_add_follower(). This includes calling the follower's
* unprepare function if we're removed from a panel that's currently prepared.
*
* Return: 0 or an error code.
*/
void drm_panel_remove_follower(struct drm_panel_follower *follower)
{
struct drm_panel *panel = follower->panel;
int ret;
mutex_lock(&panel->follower_lock);
if (panel->prepared) {
ret = follower->funcs->panel_unpreparing(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
follower->funcs->panel_unpreparing, ret);
}
list_del_init(&follower->list);
mutex_unlock(&panel->follower_lock);
put_device(panel->dev);
}
EXPORT_SYMBOL(drm_panel_remove_follower);
static void drm_panel_remove_follower_void(void *follower)
{
drm_panel_remove_follower(follower);
}
/**
* devm_drm_panel_add_follower() - devm version of drm_panel_add_follower()
* @follower_dev: The 'struct device' for the follower.
* @follower: The panel follower descriptor for the follower.
*
* Handles calling drm_panel_remove_follower() using devm on the follower_dev.
*
* Return: 0 or an error code.
*/
int devm_drm_panel_add_follower(struct device *follower_dev,
struct drm_panel_follower *follower)
{
int ret;
ret = drm_panel_add_follower(follower_dev, follower);
if (ret)
return ret;
return devm_add_action_or_reset(follower_dev,
drm_panel_remove_follower_void, follower);
}
EXPORT_SYMBOL(devm_drm_panel_add_follower);
#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
/**
* drm_panel_of_backlight - use backlight device node for backlight
* @panel: DRM panel
*
* Use this function to enable backlight handling if your panel
* uses device tree and has a backlight phandle.
*
* When the panel is enabled backlight will be enabled after a
* successful call to &drm_panel_funcs.enable()
*
* When the panel is disabled backlight will be disabled before the
* call to &drm_panel_funcs.disable().
*
* A typical implementation for a panel driver supporting device tree
* will call this function at probe time. Backlight will then be handled
* transparently without requiring any intervention from the driver.
* drm_panel_of_backlight() must be called after the call to drm_panel_init().
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_of_backlight(struct drm_panel *panel)
{
struct backlight_device *backlight;
if (!panel || !panel->dev)
return -EINVAL;
backlight = devm_of_find_backlight(panel->dev);
if (IS_ERR(backlight))
return PTR_ERR(backlight);
panel->backlight = backlight;
return 0;
}
EXPORT_SYMBOL(drm_panel_of_backlight);
#endif
MODULE_AUTHOR("Thierry Reding <[email protected]>");
MODULE_DESCRIPTION("DRM panel infrastructure");
MODULE_LICENSE("GPL and additional rights");
| linux-master | drivers/gpu/drm/drm_panel.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* Copyright (c) 2012 David Airlie <[email protected]>
* Copyright (c) 2013 David Herrmann <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <drm/drm_mm.h>
#include <drm/drm_vma_manager.h>
/**
* DOC: vma offset manager
*
* The vma-manager is responsible to map arbitrary driver-dependent memory
* regions into the linear user address-space. It provides offsets to the
* caller which can then be used on the address_space of the drm-device. It
* takes care to not overlap regions, size them appropriately and to not
* confuse mm-core by inconsistent fake vm_pgoff fields.
* Drivers shouldn't use this for object placement in VMEM. This manager should
* only be used to manage mappings into linear user-space VMs.
*
* We use drm_mm as backend to manage object allocations. But it is highly
* optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
* speed up offset lookups.
*
* You must not use multiple offset managers on a single address_space.
* Otherwise, mm-core will be unable to tear down memory mappings as the VM will
* no longer be linear.
*
* This offset manager works on page-based addresses. That is, every argument
* and return code (with the exception of drm_vma_node_offset_addr()) is given
* in number of pages, not number of bytes. That means, object sizes and offsets
* must always be page-aligned (as usual).
* If you want to get a valid byte-based user-space address for a given offset,
* please see drm_vma_node_offset_addr().
*
* Additionally to offset management, the vma offset manager also handles access
* management. For every open-file context that is allowed to access a given
* node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
* open-file with the offset of the node will fail with -EACCES. To revoke
* access again, use drm_vma_node_revoke(). However, the caller is responsible
* for destroying already existing mappings, if required.
*/
/**
* drm_vma_offset_manager_init - Initialize new offset-manager
* @mgr: Manager object
* @page_offset: Offset of available memory area (page-based)
* @size: Size of available address space range (page-based)
*
* Initialize a new offset-manager. The offset and area size available for the
* manager are given as @page_offset and @size. Both are interpreted as
* page-numbers, not bytes.
*
* Adding/removing nodes from the manager is locked internally and protected
* against concurrent access. However, node allocation and destruction is left
* for the caller. While calling into the vma-manager, a given node must
* always be guaranteed to be referenced.
*/
void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
unsigned long page_offset, unsigned long size)
{
rwlock_init(&mgr->vm_lock);
drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
}
EXPORT_SYMBOL(drm_vma_offset_manager_init);
/**
* drm_vma_offset_manager_destroy() - Destroy offset manager
* @mgr: Manager object
*
* Destroy an object manager which was previously created via
* drm_vma_offset_manager_init(). The caller must remove all allocated nodes
* before destroying the manager. Otherwise, drm_mm will refuse to free the
* requested resources.
*
* The manager must not be accessed after this function is called.
*/
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
{
drm_mm_takedown(&mgr->vm_addr_space_mm);
}
EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
/**
* drm_vma_offset_lookup_locked() - Find node in offset space
* @mgr: Manager object
* @start: Start address for object (page-based)
* @pages: Size of object (page-based)
*
* Find a node given a start address and object size. This returns the _best_
* match for the given node. That is, @start may point somewhere into a valid
* region and the given node will be returned, as long as the node spans the
* whole requested area (given the size in number of pages as @pages).
*
* Note that before lookup the vma offset manager lookup lock must be acquired
* with drm_vma_offset_lock_lookup(). See there for an example. This can then be
* used to implement weakly referenced lookups using kref_get_unless_zero().
*
* Example:
*
* ::
*
* drm_vma_offset_lock_lookup(mgr);
* node = drm_vma_offset_lookup_locked(mgr);
* if (node)
* kref_get_unless_zero(container_of(node, sth, entr));
* drm_vma_offset_unlock_lookup(mgr);
*
* RETURNS:
* Returns NULL if no suitable node can be found. Otherwise, the best match
* is returned. It's the caller's responsibility to make sure the node doesn't
* get destroyed before the caller can access it.
*/
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
struct drm_mm_node *node, *best;
struct rb_node *iter;
unsigned long offset;
iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node;
best = NULL;
while (likely(iter)) {
node = rb_entry(iter, struct drm_mm_node, rb);
offset = node->start;
if (start >= offset) {
iter = iter->rb_right;
best = node;
if (start == offset)
break;
} else {
iter = iter->rb_left;
}
}
/* verify that the node spans the requested area */
if (best) {
offset = best->start + best->size;
if (offset < start + pages)
best = NULL;
}
if (!best)
return NULL;
return container_of(best, struct drm_vma_offset_node, vm_node);
}
EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
/**
* drm_vma_offset_add() - Add offset node to manager
* @mgr: Manager object
* @node: Node to be added
* @pages: Allocation size visible to user-space (in number of pages)
*
* Add a node to the offset-manager. If the node was already added, this does
* nothing and return 0. @pages is the size of the object given in number of
* pages.
* After this call succeeds, you can access the offset of the node until it
* is removed again.
*
* If this call fails, it is safe to retry the operation or call
* drm_vma_offset_remove(), anyway. However, no cleanup is required in that
* case.
*
* @pages is not required to be the same size as the underlying memory object
* that you want to map. It only limits the size that user-space can map into
* their address space.
*
* RETURNS:
* 0 on success, negative error code on failure.
*/
int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node, unsigned long pages)
{
int ret = 0;
write_lock(&mgr->vm_lock);
if (!drm_mm_node_allocated(&node->vm_node))
ret = drm_mm_insert_node(&mgr->vm_addr_space_mm,
&node->vm_node, pages);
write_unlock(&mgr->vm_lock);
return ret;
}
EXPORT_SYMBOL(drm_vma_offset_add);
/**
* drm_vma_offset_remove() - Remove offset node from manager
* @mgr: Manager object
* @node: Node to be removed
*
* Remove a node from the offset manager. If the node wasn't added before, this
* does nothing. After this call returns, the offset and size will be 0 until a
* new offset is allocated via drm_vma_offset_add() again. Helper functions like
* drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
* offset is allocated.
*/
void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node)
{
write_lock(&mgr->vm_lock);
if (drm_mm_node_allocated(&node->vm_node)) {
drm_mm_remove_node(&node->vm_node);
memset(&node->vm_node, 0, sizeof(node->vm_node));
}
write_unlock(&mgr->vm_lock);
}
EXPORT_SYMBOL(drm_vma_offset_remove);
static int vma_node_allow(struct drm_vma_offset_node *node,
struct drm_file *tag, bool ref_counted)
{
struct rb_node **iter;
struct rb_node *parent = NULL;
struct drm_vma_offset_file *new, *entry;
int ret = 0;
/* Preallocate entry to avoid atomic allocations below. It is quite
* unlikely that an open-file is added twice to a single node so we
* don't optimize for this case. OOM is checked below only if the entry
* is actually used. */
new = kmalloc(sizeof(*entry), GFP_KERNEL);
write_lock(&node->vm_lock);
iter = &node->vm_files.rb_node;
while (likely(*iter)) {
parent = *iter;
entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
if (tag == entry->vm_tag) {
if (ref_counted)
entry->vm_count++;
goto unlock;
} else if (tag > entry->vm_tag) {
iter = &(*iter)->rb_right;
} else {
iter = &(*iter)->rb_left;
}
}
if (!new) {
ret = -ENOMEM;
goto unlock;
}
new->vm_tag = tag;
new->vm_count = 1;
rb_link_node(&new->vm_rb, parent, iter);
rb_insert_color(&new->vm_rb, &node->vm_files);
new = NULL;
unlock:
write_unlock(&node->vm_lock);
kfree(new);
return ret;
}
/**
* drm_vma_node_allow - Add open-file to list of allowed users
* @node: Node to modify
* @tag: Tag of file to remove
*
* Add @tag to the list of allowed open-files for this node. If @tag is
* already on this list, the ref-count is incremented.
*
* The list of allowed-users is preserved across drm_vma_offset_add() and
* drm_vma_offset_remove() calls. You may even call it if the node is currently
* not added to any offset-manager.
*
* You must remove all open-files the same number of times as you added them
* before destroying the node. Otherwise, you will leak memory.
*
* This is locked against concurrent access internally.
*
* RETURNS:
* 0 on success, negative error code on internal failure (out-of-mem)
*/
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
{
return vma_node_allow(node, tag, true);
}
EXPORT_SYMBOL(drm_vma_node_allow);
/**
* drm_vma_node_allow_once - Add open-file to list of allowed users
* @node: Node to modify
* @tag: Tag of file to remove
*
* Add @tag to the list of allowed open-files for this node.
*
* The list of allowed-users is preserved across drm_vma_offset_add() and
* drm_vma_offset_remove() calls. You may even call it if the node is currently
* not added to any offset-manager.
*
* This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke()
* should only be called once after this.
*
* This is locked against concurrent access internally.
*
* RETURNS:
* 0 on success, negative error code on internal failure (out-of-mem)
*/
int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag)
{
return vma_node_allow(node, tag, false);
}
EXPORT_SYMBOL(drm_vma_node_allow_once);
/**
* drm_vma_node_revoke - Remove open-file from list of allowed users
* @node: Node to modify
* @tag: Tag of file to remove
*
* Decrement the ref-count of @tag in the list of allowed open-files on @node.
* If the ref-count drops to zero, remove @tag from the list. You must call
* this once for every drm_vma_node_allow() on @tag.
*
* This is locked against concurrent access internally.
*
* If @tag is not on the list, nothing is done.
*/
void drm_vma_node_revoke(struct drm_vma_offset_node *node,
struct drm_file *tag)
{
struct drm_vma_offset_file *entry;
struct rb_node *iter;
write_lock(&node->vm_lock);
iter = node->vm_files.rb_node;
while (likely(iter)) {
entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
if (tag == entry->vm_tag) {
if (!--entry->vm_count) {
rb_erase(&entry->vm_rb, &node->vm_files);
kfree(entry);
}
break;
} else if (tag > entry->vm_tag) {
iter = iter->rb_right;
} else {
iter = iter->rb_left;
}
}
write_unlock(&node->vm_lock);
}
EXPORT_SYMBOL(drm_vma_node_revoke);
/**
* drm_vma_node_is_allowed - Check whether an open-file is granted access
* @node: Node to check
* @tag: Tag of file to remove
*
* Search the list in @node whether @tag is currently on the list of allowed
* open-files (see drm_vma_node_allow()).
*
* This is locked against concurrent access internally.
*
* RETURNS:
* true if @filp is on the list
*/
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
struct drm_file *tag)
{
struct drm_vma_offset_file *entry;
struct rb_node *iter;
read_lock(&node->vm_lock);
iter = node->vm_files.rb_node;
while (likely(iter)) {
entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
if (tag == entry->vm_tag)
break;
else if (tag > entry->vm_tag)
iter = iter->rb_right;
else
iter = iter->rb_left;
}
read_unlock(&node->vm_lock);
return iter;
}
EXPORT_SYMBOL(drm_vma_node_is_allowed);
| linux-master | drivers/gpu/drm/drm_vma_manager.c |
/*
* \file drm_memory.c
* Memory management wrappers for DRM
*
* \author Rickard E. (Rik) Faith <[email protected]>
* \author Gareth Hughes <[email protected]>
*/
/*
* Created: Thu Feb 4 14:00:34 1999 by [email protected]
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/highmem.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <drm/drm_cache.h>
#include <drm/drm_device.h>
#include "drm_legacy.h"
#if IS_ENABLED(CONFIG_AGP)
#ifdef HAVE_PAGE_AGP
# include <asm/agp.h>
#else
# ifdef __powerpc__
# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL)
# else
# define PAGE_AGP PAGE_KERNEL
# endif
#endif
static void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device *dev)
{
unsigned long i, num_pages =
PAGE_ALIGN(size) / PAGE_SIZE;
struct drm_agp_mem *agpmem;
struct page **page_map;
struct page **phys_page_map;
void *addr;
size = PAGE_ALIGN(size);
#ifdef __alpha__
offset -= dev->hose->mem_space->start;
#endif
list_for_each_entry(agpmem, &dev->agp->memory, head)
if (agpmem->bound <= offset
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))
break;
if (&agpmem->head == &dev->agp->memory)
return NULL;
/*
* OK, we're mapping AGP space on a chipset/platform on which memory accesses by
* the CPU do not get remapped by the GART. We fix this by using the kernel's
* page-table instead (that's probably faster anyhow...).
*/
/* note: use vmalloc() because num_pages could be large... */
page_map = vmalloc(array_size(num_pages, sizeof(struct page *)));
if (!page_map)
return NULL;
phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
for (i = 0; i < num_pages; ++i)
page_map[i] = phys_page_map[i];
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
vfree(page_map);
return addr;
}
#else /* CONFIG_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device *dev)
{
return NULL;
}
#endif /* CONFIG_AGP */
void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap(map->offset, map->size);
}
EXPORT_SYMBOL(drm_legacy_ioremap);
void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
{
if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap_wc(map->offset, map->size);
}
EXPORT_SYMBOL(drm_legacy_ioremap_wc);
void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
{
if (!map->handle || !map->size)
return;
if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
vunmap(map->handle);
else
iounmap(map->handle);
}
EXPORT_SYMBOL(drm_legacy_ioremapfree);
| linux-master | drivers/gpu/drm/drm_memory.c |
/*
* Copyright (c) 2006 Luc Verhaegen (quirks list)
* Copyright (c) 2007-2008 Intel Corporation
* Jesse Barnes <[email protected]>
* Copyright 2010 Red Hat, Inc.
*
* DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
* FB layer.
* Copyright (C) 2006 Dennis Munsie <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/bitfield.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_displayid.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
static int oui(u8 first, u8 second, u8 third)
{
return (first << 16) | (second << 8) | third;
}
#define EDID_EST_TIMINGS 16
#define EDID_STD_TIMINGS 8
#define EDID_DETAILED_TIMINGS 4
/*
* EDID blocks out in the wild have a variety of bugs, try to collect
* them here (note that userspace may work around broken monitors first,
* but fixes should make their way here so that the kernel "just works"
* on as many displays as possible).
*/
/* First detailed mode wrong, use largest 60Hz mode */
#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
/* Reported 135MHz pixel clock is too high, needs adjustment */
#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
/* Prefer the largest mode at 75 Hz */
#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
/* Detail timing is in cm not mm */
#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
/* Detailed timing descriptors have bogus size values, so just take the
* maximum size and use that.
*/
#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
/* Force 8bpc */
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
/* Force 12bpc */
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
/* Force 6bpc */
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
/* Force 10bpc */
#define EDID_QUIRK_FORCE_10BPC (1 << 11)
/* Non desktop display (i.e. HMD) */
#define EDID_QUIRK_NON_DESKTOP (1 << 12)
/* Cap the DSC target bitrate to 15bpp */
#define EDID_QUIRK_CAP_DSC_15BPP (1 << 13)
#define MICROSOFT_IEEE_OUI 0xca125c
struct detailed_mode_closure {
struct drm_connector *connector;
const struct drm_edid *drm_edid;
bool preferred;
int modes;
};
#define LEVEL_DMT 0
#define LEVEL_GTF 1
#define LEVEL_GTF2 2
#define LEVEL_CVT 3
#define EDID_QUIRK(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _quirks) \
{ \
.panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
product_id), \
.quirks = _quirks \
}
static const struct edid_quirk {
u32 panel_id;
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
EDID_QUIRK('A', 'C', 'R', 44358, EDID_QUIRK_PREFER_LARGE_60),
/* Acer F51 */
EDID_QUIRK('A', 'P', 'I', 0x7602, EDID_QUIRK_PREFER_LARGE_60),
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
EDID_QUIRK('C', 'P', 'T', 0x17df, EDID_QUIRK_FORCE_6BPC),
/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
EDID_QUIRK('S', 'D', 'C', 0x3652, EDID_QUIRK_FORCE_6BPC),
/* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
EDID_QUIRK('B', 'O', 'E', 0x0771, EDID_QUIRK_FORCE_6BPC),
/* Belinea 10 15 55 */
EDID_QUIRK('M', 'A', 'X', 1516, EDID_QUIRK_PREFER_LARGE_60),
EDID_QUIRK('M', 'A', 'X', 0x77e, EDID_QUIRK_PREFER_LARGE_60),
/* Envision Peripherals, Inc. EN-7100e */
EDID_QUIRK('E', 'P', 'I', 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH),
/* Envision EN2028 */
EDID_QUIRK('E', 'P', 'I', 8232, EDID_QUIRK_PREFER_LARGE_60),
/* Funai Electronics PM36B */
EDID_QUIRK('F', 'C', 'M', 13600, EDID_QUIRK_PREFER_LARGE_75 |
EDID_QUIRK_DETAILED_IN_CM),
/* LG 27GP950 */
EDID_QUIRK('G', 'S', 'M', 0x5bbf, EDID_QUIRK_CAP_DSC_15BPP),
/* LG 27GN950 */
EDID_QUIRK('G', 'S', 'M', 0x5b9a, EDID_QUIRK_CAP_DSC_15BPP),
/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
EDID_QUIRK('L', 'G', 'D', 764, EDID_QUIRK_FORCE_10BPC),
/* LG Philips LCD LP154W01-A5 */
EDID_QUIRK('L', 'P', 'L', 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
EDID_QUIRK('L', 'P', 'L', 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
/* Samsung SyncMaster 205BW. Note: irony */
EDID_QUIRK('S', 'A', 'M', 541, EDID_QUIRK_DETAILED_SYNC_PP),
/* Samsung SyncMaster 22[5-6]BW */
EDID_QUIRK('S', 'A', 'M', 596, EDID_QUIRK_PREFER_LARGE_60),
EDID_QUIRK('S', 'A', 'M', 638, EDID_QUIRK_PREFER_LARGE_60),
/* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
EDID_QUIRK('S', 'N', 'Y', 0x2541, EDID_QUIRK_FORCE_12BPC),
/* ViewSonic VA2026w */
EDID_QUIRK('V', 'S', 'C', 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING),
/* Medion MD 30217 PG */
EDID_QUIRK('M', 'E', 'D', 0x7b8, EDID_QUIRK_PREFER_LARGE_75),
/* Lenovo G50 */
EDID_QUIRK('S', 'D', 'C', 18514, EDID_QUIRK_FORCE_6BPC),
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
EDID_QUIRK('S', 'E', 'C', 0xd033, EDID_QUIRK_FORCE_8BPC),
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
EDID_QUIRK('E', 'T', 'R', 13896, EDID_QUIRK_FORCE_8BPC),
/* Valve Index Headset */
EDID_QUIRK('V', 'L', 'V', 0x91a8, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91b0, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91b1, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91b2, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91b3, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91b4, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91b5, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91b6, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91b7, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91b8, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91b9, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91ba, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91bb, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91bc, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91bd, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91be, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('V', 'L', 'V', 0x91bf, EDID_QUIRK_NON_DESKTOP),
/* HTC Vive and Vive Pro VR Headsets */
EDID_QUIRK('H', 'V', 'R', 0xaa01, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('H', 'V', 'R', 0xaa02, EDID_QUIRK_NON_DESKTOP),
/* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
EDID_QUIRK('O', 'V', 'R', 0x0001, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('O', 'V', 'R', 0x0003, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('O', 'V', 'R', 0x0004, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('O', 'V', 'R', 0x0012, EDID_QUIRK_NON_DESKTOP),
/* Windows Mixed Reality Headsets */
EDID_QUIRK('A', 'C', 'R', 0x7fce, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('L', 'E', 'N', 0x0408, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('F', 'U', 'J', 0x1970, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('D', 'E', 'L', 0x7fce, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('S', 'E', 'C', 0x144a, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('A', 'U', 'S', 0xc102, EDID_QUIRK_NON_DESKTOP),
/* Sony PlayStation VR Headset */
EDID_QUIRK('S', 'N', 'Y', 0x0704, EDID_QUIRK_NON_DESKTOP),
/* Sensics VR Headsets */
EDID_QUIRK('S', 'E', 'N', 0x1019, EDID_QUIRK_NON_DESKTOP),
/* OSVR HDK and HDK2 VR Headsets */
EDID_QUIRK('S', 'V', 'R', 0x1019, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('A', 'U', 'O', 0x1111, EDID_QUIRK_NON_DESKTOP),
};
/*
* Autogenerated from the DMT spec.
* This table is copied from xfree86/modes/xf86EdidModes.c.
*/
static const struct drm_display_mode drm_dmt_modes[] = {
/* 0x01 - 640x350@85Hz */
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 350, 382, 385, 445, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x02 - 640x400@85Hz */
{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 400, 401, 404, 445, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x03 - 720x400@85Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
828, 936, 0, 400, 401, 404, 446, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x04 - 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x05 - 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
704, 832, 0, 480, 489, 492, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x06 - 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x07 - 640x480@85Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
752, 832, 0, 480, 481, 484, 509, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x08 - 800x600@56Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
896, 1024, 0, 600, 601, 603, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x09 - 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x0a - 800x600@72Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
976, 1040, 0, 600, 637, 643, 666, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x0b - 800x600@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
896, 1056, 0, 600, 601, 604, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x0c - 800x600@85Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
896, 1048, 0, 600, 601, 604, 631, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x0d - 800x600@120Hz RB */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
880, 960, 0, 600, 603, 607, 636, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x0e - 848x480@60Hz */
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
976, 1088, 0, 480, 486, 494, 517, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x0f - 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 0x10 - 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x11 - 1024x768@70Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
1184, 1328, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x12 - 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x13 - 1024x768@85Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x14 - 1024x768@120Hz RB */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
1104, 1184, 0, 768, 771, 775, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x15 - 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x55 - 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x16 - 1280x768@60Hz RB */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
1360, 1440, 0, 768, 771, 778, 790, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x17 - 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x18 - 1280x768@75Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
1488, 1696, 0, 768, 771, 778, 805, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x19 - 1280x768@85Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
1496, 1712, 0, 768, 771, 778, 809, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x1a - 1280x768@120Hz RB */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
1360, 1440, 0, 768, 771, 778, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x1b - 1280x800@60Hz RB */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
1360, 1440, 0, 800, 803, 809, 823, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x1c - 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x1d - 1280x800@75Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
1488, 1696, 0, 800, 803, 809, 838, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x1e - 1280x800@85Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
1496, 1712, 0, 800, 803, 809, 843, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x1f - 1280x800@120Hz RB */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
1360, 1440, 0, 800, 803, 809, 847, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x20 - 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x21 - 1280x960@85Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
1504, 1728, 0, 960, 961, 964, 1011, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x22 - 1280x960@120Hz RB */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
1360, 1440, 0, 960, 963, 967, 1017, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x23 - 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x24 - 1280x1024@75Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x25 - 1280x1024@85Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x26 - 1280x1024@120Hz RB */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x27 - 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x28 - 1360x768@120Hz RB */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
1440, 1520, 0, 768, 771, 776, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x51 - 1366x768@60Hz */
{ DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 85500, 1366, 1436,
1579, 1792, 0, 768, 771, 774, 798, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x56 - 1366x768@60Hz */
{ DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 72000, 1366, 1380,
1436, 1500, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x29 - 1400x1050@60Hz RB */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x2a - 1400x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x2b - 1400x1050@75Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x2c - 1400x1050@85Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x2d - 1400x1050@120Hz RB */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x2e - 1440x900@60Hz RB */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
1520, 1600, 0, 900, 903, 909, 926, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x2f - 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x30 - 1440x900@75Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
1688, 1936, 0, 900, 903, 909, 942, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x31 - 1440x900@85Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
1696, 1952, 0, 900, 903, 909, 948, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x32 - 1440x900@120Hz RB */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
1520, 1600, 0, 900, 903, 909, 953, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x53 - 1600x900@60Hz */
{ DRM_MODE("1600x900", DRM_MODE_TYPE_DRIVER, 108000, 1600, 1624,
1704, 1800, 0, 900, 901, 904, 1000, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x33 - 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x34 - 1600x1200@65Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x35 - 1600x1200@70Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x36 - 1600x1200@75Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x37 - 1600x1200@85Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x38 - 1600x1200@120Hz RB */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x39 - 1680x1050@60Hz RB */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x3a - 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x3b - 1680x1050@75Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x3c - 1680x1050@85Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x3d - 1680x1050@120Hz RB */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x3e - 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x3f - 1792x1344@75Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x40 - 1792x1344@120Hz RB */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x41 - 1856x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x42 - 1856x1392@75Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
2208, 2560, 0, 1392, 1393, 1396, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x43 - 1856x1392@120Hz RB */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x52 - 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x44 - 1920x1200@60Hz RB */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x45 - 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x46 - 1920x1200@75Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x47 - 1920x1200@85Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x48 - 1920x1200@120Hz RB */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x49 - 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x4a - 1920x1440@75Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x4b - 1920x1440@120Hz RB */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x54 - 2048x1152@60Hz */
{ DRM_MODE("2048x1152", DRM_MODE_TYPE_DRIVER, 162000, 2048, 2074,
2154, 2250, 0, 1152, 1153, 1156, 1200, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x4c - 2560x1600@60Hz RB */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x4d - 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x4e - 2560x1600@75Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x4f - 2560x1600@85Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x50 - 2560x1600@120Hz RB */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x57 - 4096x2160@60Hz RB */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 556744, 4096, 4104,
4136, 4176, 0, 2160, 2208, 2216, 2222, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x58 - [email protected] RB */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 556188, 4096, 4104,
4136, 4176, 0, 2160, 2208, 2216, 2222, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
};
/*
* These more or less come from the DMT spec. The 720x400 modes are
* inferred from historical 80x25 practice. The 640x480@67 and 832x624@75
* modes are old-school Mac modes. The EDID spec says the 1152x864@75 mode
* should be 1152x870, again for the Mac, but instead we use the x864 DMT
* mode.
*
* The DMT modes have been fact-checked; the rest are mild guesses.
*/
static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
896, 1024, 0, 600, 601, 603, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
704, 832, 0, 480, 489, 492, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
768, 864, 0, 480, 483, 486, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
846, 900, 0, 400, 421, 423, 449, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
846, 900, 0, 400, 412, 414, 449, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
1184, 1328, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
928, 1152, 0, 624, 625, 628, 667, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
896, 1056, 0, 600, 601, 604, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
976, 1040, 0, 600, 637, 643, 666, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
struct minimode {
short w;
short h;
short r;
short rb;
};
static const struct minimode est3_modes[] = {
/* byte 6 */
{ 640, 350, 85, 0 },
{ 640, 400, 85, 0 },
{ 720, 400, 85, 0 },
{ 640, 480, 85, 0 },
{ 848, 480, 60, 0 },
{ 800, 600, 85, 0 },
{ 1024, 768, 85, 0 },
{ 1152, 864, 75, 0 },
/* byte 7 */
{ 1280, 768, 60, 1 },
{ 1280, 768, 60, 0 },
{ 1280, 768, 75, 0 },
{ 1280, 768, 85, 0 },
{ 1280, 960, 60, 0 },
{ 1280, 960, 85, 0 },
{ 1280, 1024, 60, 0 },
{ 1280, 1024, 85, 0 },
/* byte 8 */
{ 1360, 768, 60, 0 },
{ 1440, 900, 60, 1 },
{ 1440, 900, 60, 0 },
{ 1440, 900, 75, 0 },
{ 1440, 900, 85, 0 },
{ 1400, 1050, 60, 1 },
{ 1400, 1050, 60, 0 },
{ 1400, 1050, 75, 0 },
/* byte 9 */
{ 1400, 1050, 85, 0 },
{ 1680, 1050, 60, 1 },
{ 1680, 1050, 60, 0 },
{ 1680, 1050, 75, 0 },
{ 1680, 1050, 85, 0 },
{ 1600, 1200, 60, 0 },
{ 1600, 1200, 65, 0 },
{ 1600, 1200, 70, 0 },
/* byte 10 */
{ 1600, 1200, 75, 0 },
{ 1600, 1200, 85, 0 },
{ 1792, 1344, 60, 0 },
{ 1792, 1344, 75, 0 },
{ 1856, 1392, 60, 0 },
{ 1856, 1392, 75, 0 },
{ 1920, 1200, 60, 1 },
{ 1920, 1200, 60, 0 },
/* byte 11 */
{ 1920, 1200, 75, 0 },
{ 1920, 1200, 85, 0 },
{ 1920, 1440, 60, 0 },
{ 1920, 1440, 75, 0 },
};
static const struct minimode extra_modes[] = {
{ 1024, 576, 60, 0 },
{ 1366, 768, 60, 0 },
{ 1600, 900, 60, 0 },
{ 1680, 945, 60, 0 },
{ 1920, 1080, 60, 0 },
{ 2048, 1152, 60, 0 },
{ 2048, 1536, 60, 0 },
};
/*
* From CEA/CTA-861 spec.
*
* Do not access directly, instead always use cea_mode_for_vic().
*/
static const struct drm_display_mode edid_cea_modes_1[] = {
/* 1 - 640x480@60Hz 4:3 */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 2 - 720x480@60Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 3 - 720x480@60Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 4 - 1280x720@60Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 5 - 1920x1080i@60Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 6 - 720(1440)x480i@60Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 7 - 720(1440)x480i@60Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 8 - 720(1440)x240@60Hz 4:3 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 9 - 720(1440)x240@60Hz 16:9 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 10 - 2880x480i@60Hz 4:3 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 11 - 2880x480i@60Hz 16:9 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 12 - 2880x240@60Hz 4:3 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 13 - 2880x240@60Hz 16:9 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 14 - 1440x480@60Hz 4:3 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 15 - 1440x480@60Hz 16:9 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 16 - 1920x1080@60Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 17 - 720x576@50Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 18 - 720x576@50Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 19 - 1280x720@50Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 20 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 21 - 720(1440)x576i@50Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 22 - 720(1440)x576i@50Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 23 - 720(1440)x288@50Hz 4:3 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 24 - 720(1440)x288@50Hz 16:9 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 25 - 2880x576i@50Hz 4:3 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 26 - 2880x576i@50Hz 16:9 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 27 - 2880x288@50Hz 4:3 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 28 - 2880x288@50Hz 16:9 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 29 - 1440x576@50Hz 4:3 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 30 - 1440x576@50Hz 16:9 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 31 - 1920x1080@50Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 32 - 1920x1080@24Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 33 - 1920x1080@25Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 34 - 1920x1080@30Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 35 - 2880x480@60Hz 4:3 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 36 - 2880x480@60Hz 16:9 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 37 - 2880x576@50Hz 4:3 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 38 - 2880x576@50Hz 16:9 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 39 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 40 - 1920x1080i@100Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 41 - 1280x720@100Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 42 - 720x576@100Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 43 - 720x576@100Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 44 - 720(1440)x576i@100Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 45 - 720(1440)x576i@100Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 46 - 1920x1080i@120Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 47 - 1280x720@120Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 48 - 720x480@120Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 49 - 720x480@120Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 50 - 720(1440)x480i@120Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 51 - 720(1440)x480i@120Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 52 - 720x576@200Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 53 - 720x576@200Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 54 - 720(1440)x576i@200Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 55 - 720(1440)x576i@200Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 56 - 720x480@240Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 57 - 720x480@240Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 58 - 720(1440)x480i@240Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 59 - 720(1440)x480i@240Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 60 - 1280x720@24Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 61 - 1280x720@25Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 62 - 1280x720@30Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 63 - 1920x1080@120Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 64 - 1920x1080@100Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 65 - 1280x720@24Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 66 - 1280x720@25Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 67 - 1280x720@30Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 68 - 1280x720@50Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 69 - 1280x720@60Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 70 - 1280x720@100Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 71 - 1280x720@120Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 72 - 1920x1080@24Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 73 - 1920x1080@25Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 74 - 1920x1080@30Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 75 - 1920x1080@50Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 76 - 1920x1080@60Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 77 - 1920x1080@100Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 78 - 1920x1080@120Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 79 - 1680x720@24Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 80 - 1680x720@25Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
2948, 3168, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 81 - 1680x720@30Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
2420, 2640, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 82 - 1680x720@50Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 83 - 1680x720@60Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 84 - 1680x720@100Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 85 - 1680x720@120Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 86 - 2560x1080@24Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 87 - 2560x1080@25Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 88 - 2560x1080@30Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 89 - 2560x1080@50Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 90 - 2560x1080@60Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 91 - 2560x1080@100Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 92 - 2560x1080@120Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 93 - 3840x2160@24Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 94 - 3840x2160@25Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 95 - 3840x2160@30Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 96 - 3840x2160@50Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 97 - 3840x2160@60Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 98 - 4096x2160@24Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 99 - 4096x2160@25Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 100 - 4096x2160@30Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 101 - 4096x2160@50Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 102 - 4096x2160@60Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 103 - 3840x2160@24Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 104 - 3840x2160@25Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 105 - 3840x2160@30Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 106 - 3840x2160@50Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 107 - 3840x2160@60Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 108 - 1280x720@48Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240,
2280, 2500, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 109 - 1280x720@48Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240,
2280, 2500, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 110 - 1680x720@48Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 2490,
2530, 2750, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 111 - 1920x1080@48Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 112 - 1920x1080@48Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 113 - 2560x1080@48Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 3558,
3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 114 - 3840x2160@48Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 115 - 4096x2160@48Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 116 - 3840x2160@48Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 117 - 3840x2160@100Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 118 - 3840x2160@120Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 119 - 3840x2160@100Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 120 - 3840x2160@120Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 121 - 5120x2160@24Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 7116,
7204, 7500, 0, 2160, 2168, 2178, 2200, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 122 - 5120x2160@25Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 6816,
6904, 7200, 0, 2160, 2168, 2178, 2200, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 123 - 5120x2160@30Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 5784,
5872, 6000, 0, 2160, 2168, 2178, 2200, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 124 - 5120x2160@48Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5866,
5954, 6250, 0, 2160, 2168, 2178, 2475, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 125 - 5120x2160@50Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 6216,
6304, 6600, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 126 - 5120x2160@60Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5284,
5372, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 127 - 5120x2160@100Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 6216,
6304, 6600, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
};
/*
* From CEA/CTA-861 spec.
*
* Do not access directly, instead always use cea_mode_for_vic().
*/
static const struct drm_display_mode edid_cea_modes_193[] = {
/* 193 - 5120x2160@120Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 5284,
5372, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 194 - 7680x4320@24Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10232,
10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 195 - 7680x4320@25Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10032,
10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 196 - 7680x4320@30Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 8232,
8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 197 - 7680x4320@48Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10232,
10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 198 - 7680x4320@50Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10032,
10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 199 - 7680x4320@60Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 8232,
8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 200 - 7680x4320@100Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 9792,
9968, 10560, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 201 - 7680x4320@120Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 8032,
8208, 8800, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 202 - 7680x4320@24Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10232,
10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 203 - 7680x4320@25Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10032,
10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 204 - 7680x4320@30Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 8232,
8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 205 - 7680x4320@48Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10232,
10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 206 - 7680x4320@50Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10032,
10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 207 - 7680x4320@60Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 8232,
8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 208 - 7680x4320@100Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 9792,
9968, 10560, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 209 - 7680x4320@120Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 8032,
8208, 8800, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 210 - 10240x4320@24Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 11732,
11908, 12500, 0, 4320, 4336, 4356, 4950, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 211 - 10240x4320@25Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 12732,
12908, 13500, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 212 - 10240x4320@30Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 10528,
10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 213 - 10240x4320@48Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 11732,
11908, 12500, 0, 4320, 4336, 4356, 4950, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 214 - 10240x4320@50Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 12732,
12908, 13500, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 215 - 10240x4320@60Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 10528,
10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 216 - 10240x4320@100Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 5940000, 10240, 12432,
12608, 13200, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 217 - 10240x4320@120Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 5940000, 10240, 10528,
10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 218 - 4096x2160@100Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 1188000, 4096, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 219 - 4096x2160@120Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 1188000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
};
/*
* HDMI 1.4 4k modes. Index using the VIC.
*/
static const struct drm_display_mode edid_4k_modes[] = {
/* 0 - dummy, VICs start at 1 */
{ },
/* 1 - 3840x2160@30Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 4016, 4104, 4400, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 2 - 3840x2160@25Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 4896, 4984, 5280, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 3 - 3840x2160@24Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 5116, 5204, 5500, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 4 - 4096x2160@24Hz (SMPTE) */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
4096, 5116, 5204, 5500, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
};
/*** DDC fetch and block validation ***/
/*
* The opaque EDID type, internal to drm_edid.c.
*/
struct drm_edid {
/* Size allocated for edid */
size_t size;
const struct edid *edid;
};
static int edid_hfeeodb_extension_block_count(const struct edid *edid);
static int edid_hfeeodb_block_count(const struct edid *edid)
{
int eeodb = edid_hfeeodb_extension_block_count(edid);
return eeodb ? eeodb + 1 : 0;
}
static int edid_extension_block_count(const struct edid *edid)
{
return edid->extensions;
}
static int edid_block_count(const struct edid *edid)
{
return edid_extension_block_count(edid) + 1;
}
static int edid_size_by_blocks(int num_blocks)
{
return num_blocks * EDID_LENGTH;
}
static int edid_size(const struct edid *edid)
{
return edid_size_by_blocks(edid_block_count(edid));
}
static const void *edid_block_data(const struct edid *edid, int index)
{
BUILD_BUG_ON(sizeof(*edid) != EDID_LENGTH);
return edid + index;
}
static const void *edid_extension_block_data(const struct edid *edid, int index)
{
return edid_block_data(edid, index + 1);
}
/* EDID block count indicated in EDID, may exceed allocated size */
static int __drm_edid_block_count(const struct drm_edid *drm_edid)
{
int num_blocks;
/* Starting point */
num_blocks = edid_block_count(drm_edid->edid);
/* HF-EEODB override */
if (drm_edid->size >= edid_size_by_blocks(2)) {
int eeodb;
/*
* Note: HF-EEODB may specify a smaller extension count than the
* regular one. Unlike in buffer allocation, here we can use it.
*/
eeodb = edid_hfeeodb_block_count(drm_edid->edid);
if (eeodb)
num_blocks = eeodb;
}
return num_blocks;
}
/* EDID block count, limited by allocated size */
static int drm_edid_block_count(const struct drm_edid *drm_edid)
{
/* Limit by allocated size */
return min(__drm_edid_block_count(drm_edid),
(int)drm_edid->size / EDID_LENGTH);
}
/* EDID extension block count, limited by allocated size */
static int drm_edid_extension_block_count(const struct drm_edid *drm_edid)
{
return drm_edid_block_count(drm_edid) - 1;
}
static const void *drm_edid_block_data(const struct drm_edid *drm_edid, int index)
{
return edid_block_data(drm_edid->edid, index);
}
static const void *drm_edid_extension_block_data(const struct drm_edid *drm_edid,
int index)
{
return edid_extension_block_data(drm_edid->edid, index);
}
/*
* Initializer helper for legacy interfaces, where we have no choice but to
* trust edid size. Not for general purpose use.
*/
static const struct drm_edid *drm_edid_legacy_init(struct drm_edid *drm_edid,
const struct edid *edid)
{
if (!edid)
return NULL;
memset(drm_edid, 0, sizeof(*drm_edid));
drm_edid->edid = edid;
drm_edid->size = edid_size(edid);
return drm_edid;
}
/*
* EDID base and extension block iterator.
*
* struct drm_edid_iter iter;
* const u8 *block;
*
* drm_edid_iter_begin(drm_edid, &iter);
* drm_edid_iter_for_each(block, &iter) {
* // do stuff with block
* }
* drm_edid_iter_end(&iter);
*/
struct drm_edid_iter {
const struct drm_edid *drm_edid;
/* Current block index. */
int index;
};
static void drm_edid_iter_begin(const struct drm_edid *drm_edid,
struct drm_edid_iter *iter)
{
memset(iter, 0, sizeof(*iter));
iter->drm_edid = drm_edid;
}
static const void *__drm_edid_iter_next(struct drm_edid_iter *iter)
{
const void *block = NULL;
if (!iter->drm_edid)
return NULL;
if (iter->index < drm_edid_block_count(iter->drm_edid))
block = drm_edid_block_data(iter->drm_edid, iter->index++);
return block;
}
#define drm_edid_iter_for_each(__block, __iter) \
while (((__block) = __drm_edid_iter_next(__iter)))
static void drm_edid_iter_end(struct drm_edid_iter *iter)
{
memset(iter, 0, sizeof(*iter));
}
static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
static void edid_header_fix(void *edid)
{
memcpy(edid, edid_header, sizeof(edid_header));
}
/**
* drm_edid_header_is_valid - sanity check the header of the base EDID block
* @_edid: pointer to raw base EDID block
*
* Sanity check the header of the base EDID block.
*
* Return: 8 if the header is perfect, down to 0 if it's totally wrong.
*/
int drm_edid_header_is_valid(const void *_edid)
{
const struct edid *edid = _edid;
int i, score = 0;
for (i = 0; i < sizeof(edid_header); i++) {
if (edid->header[i] == edid_header[i])
score++;
}
return score;
}
EXPORT_SYMBOL(drm_edid_header_is_valid);
static int edid_fixup __read_mostly = 6;
module_param_named(edid_fixup, edid_fixup, int, 0400);
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
static int edid_block_compute_checksum(const void *_block)
{
const u8 *block = _block;
int i;
u8 csum = 0, crc = 0;
for (i = 0; i < EDID_LENGTH - 1; i++)
csum += block[i];
crc = 0x100 - csum;
return crc;
}
static int edid_block_get_checksum(const void *_block)
{
const struct edid *block = _block;
return block->checksum;
}
static int edid_block_tag(const void *_block)
{
const u8 *block = _block;
return block[0];
}
static bool edid_block_is_zero(const void *edid)
{
return !memchr_inv(edid, 0, EDID_LENGTH);
}
/**
* drm_edid_are_equal - compare two edid blobs.
* @edid1: pointer to first blob
* @edid2: pointer to second blob
* This helper can be used during probing to determine if
* edid had changed.
*/
bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2)
{
int edid1_len, edid2_len;
bool edid1_present = edid1 != NULL;
bool edid2_present = edid2 != NULL;
if (edid1_present != edid2_present)
return false;
if (edid1) {
edid1_len = edid_size(edid1);
edid2_len = edid_size(edid2);
if (edid1_len != edid2_len)
return false;
if (memcmp(edid1, edid2, edid1_len))
return false;
}
return true;
}
EXPORT_SYMBOL(drm_edid_are_equal);
enum edid_block_status {
EDID_BLOCK_OK = 0,
EDID_BLOCK_READ_FAIL,
EDID_BLOCK_NULL,
EDID_BLOCK_ZERO,
EDID_BLOCK_HEADER_CORRUPT,
EDID_BLOCK_HEADER_REPAIR,
EDID_BLOCK_HEADER_FIXED,
EDID_BLOCK_CHECKSUM,
EDID_BLOCK_VERSION,
};
static enum edid_block_status edid_block_check(const void *_block,
bool is_base_block)
{
const struct edid *block = _block;
if (!block)
return EDID_BLOCK_NULL;
if (is_base_block) {
int score = drm_edid_header_is_valid(block);
if (score < clamp(edid_fixup, 0, 8)) {
if (edid_block_is_zero(block))
return EDID_BLOCK_ZERO;
else
return EDID_BLOCK_HEADER_CORRUPT;
}
if (score < 8)
return EDID_BLOCK_HEADER_REPAIR;
}
if (edid_block_compute_checksum(block) != edid_block_get_checksum(block)) {
if (edid_block_is_zero(block))
return EDID_BLOCK_ZERO;
else
return EDID_BLOCK_CHECKSUM;
}
if (is_base_block) {
if (block->version != 1)
return EDID_BLOCK_VERSION;
}
return EDID_BLOCK_OK;
}
static bool edid_block_status_valid(enum edid_block_status status, int tag)
{
return status == EDID_BLOCK_OK ||
status == EDID_BLOCK_HEADER_FIXED ||
(status == EDID_BLOCK_CHECKSUM && tag == CEA_EXT);
}
static bool edid_block_valid(const void *block, bool base)
{
return edid_block_status_valid(edid_block_check(block, base),
edid_block_tag(block));
}
static void edid_block_status_print(enum edid_block_status status,
const struct edid *block,
int block_num)
{
switch (status) {
case EDID_BLOCK_OK:
break;
case EDID_BLOCK_READ_FAIL:
pr_debug("EDID block %d read failed\n", block_num);
break;
case EDID_BLOCK_NULL:
pr_debug("EDID block %d pointer is NULL\n", block_num);
break;
case EDID_BLOCK_ZERO:
pr_notice("EDID block %d is all zeroes\n", block_num);
break;
case EDID_BLOCK_HEADER_CORRUPT:
pr_notice("EDID has corrupt header\n");
break;
case EDID_BLOCK_HEADER_REPAIR:
pr_debug("EDID corrupt header needs repair\n");
break;
case EDID_BLOCK_HEADER_FIXED:
pr_debug("EDID corrupt header fixed\n");
break;
case EDID_BLOCK_CHECKSUM:
if (edid_block_status_valid(status, edid_block_tag(block))) {
pr_debug("EDID block %d (tag 0x%02x) checksum is invalid, remainder is %d, ignoring\n",
block_num, edid_block_tag(block),
edid_block_compute_checksum(block));
} else {
pr_notice("EDID block %d (tag 0x%02x) checksum is invalid, remainder is %d\n",
block_num, edid_block_tag(block),
edid_block_compute_checksum(block));
}
break;
case EDID_BLOCK_VERSION:
pr_notice("EDID has major version %d, instead of 1\n",
block->version);
break;
default:
WARN(1, "EDID block %d unknown edid block status code %d\n",
block_num, status);
break;
}
}
static void edid_block_dump(const char *level, const void *block, int block_num)
{
enum edid_block_status status;
char prefix[20];
status = edid_block_check(block, block_num == 0);
if (status == EDID_BLOCK_ZERO)
sprintf(prefix, "\t[%02x] ZERO ", block_num);
else if (!edid_block_status_valid(status, edid_block_tag(block)))
sprintf(prefix, "\t[%02x] BAD ", block_num);
else
sprintf(prefix, "\t[%02x] GOOD ", block_num);
print_hex_dump(level, prefix, DUMP_PREFIX_NONE, 16, 1,
block, EDID_LENGTH, false);
}
/**
* drm_edid_block_valid - Sanity check the EDID block (base or extension)
* @_block: pointer to raw EDID block
* @block_num: type of block to validate (0 for base, extension otherwise)
* @print_bad_edid: if true, dump bad EDID blocks to the console
* @edid_corrupt: if true, the header or checksum is invalid
*
* Validate a base or extension EDID block and optionally dump bad blocks to
* the console.
*
* Return: True if the block is valid, false otherwise.
*/
bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid,
bool *edid_corrupt)
{
struct edid *block = (struct edid *)_block;
enum edid_block_status status;
bool is_base_block = block_num == 0;
bool valid;
if (WARN_ON(!block))
return false;
status = edid_block_check(block, is_base_block);
if (status == EDID_BLOCK_HEADER_REPAIR) {
DRM_DEBUG_KMS("Fixing EDID header, your hardware may be failing\n");
edid_header_fix(block);
/* Retry with fixed header, update status if that worked. */
status = edid_block_check(block, is_base_block);
if (status == EDID_BLOCK_OK)
status = EDID_BLOCK_HEADER_FIXED;
}
if (edid_corrupt) {
/*
* Unknown major version isn't corrupt but we can't use it. Only
* the base block can reset edid_corrupt to false.
*/
if (is_base_block &&
(status == EDID_BLOCK_OK || status == EDID_BLOCK_VERSION))
*edid_corrupt = false;
else if (status != EDID_BLOCK_OK)
*edid_corrupt = true;
}
edid_block_status_print(status, block, block_num);
/* Determine whether we can use this block with this status. */
valid = edid_block_status_valid(status, edid_block_tag(block));
if (!valid && print_bad_edid && status != EDID_BLOCK_ZERO) {
pr_notice("Raw EDID:\n");
edid_block_dump(KERN_NOTICE, block, block_num);
}
return valid;
}
EXPORT_SYMBOL(drm_edid_block_valid);
/**
* drm_edid_is_valid - sanity check EDID data
* @edid: EDID data
*
* Sanity-check an entire EDID record (including extensions)
*
* Return: True if the EDID data is valid, false otherwise.
*/
bool drm_edid_is_valid(struct edid *edid)
{
int i;
if (!edid)
return false;
for (i = 0; i < edid_block_count(edid); i++) {
void *block = (void *)edid_block_data(edid, i);
if (!drm_edid_block_valid(block, i, true, NULL))
return false;
}
return true;
}
EXPORT_SYMBOL(drm_edid_is_valid);
/**
* drm_edid_valid - sanity check EDID data
* @drm_edid: EDID data
*
* Sanity check an EDID. Cross check block count against allocated size and
* checksum the blocks.
*
* Return: True if the EDID data is valid, false otherwise.
*/
bool drm_edid_valid(const struct drm_edid *drm_edid)
{
int i;
if (!drm_edid)
return false;
if (edid_size_by_blocks(__drm_edid_block_count(drm_edid)) != drm_edid->size)
return false;
for (i = 0; i < drm_edid_block_count(drm_edid); i++) {
const void *block = drm_edid_block_data(drm_edid, i);
if (!edid_block_valid(block, i == 0))
return false;
}
return true;
}
EXPORT_SYMBOL(drm_edid_valid);
static struct edid *edid_filter_invalid_blocks(struct edid *edid,
size_t *alloc_size)
{
struct edid *new;
int i, valid_blocks = 0;
/*
* Note: If the EDID uses HF-EEODB, but has invalid blocks, we'll revert
* back to regular extension count here. We don't want to start
* modifying the HF-EEODB extension too.
*/
for (i = 0; i < edid_block_count(edid); i++) {
const void *src_block = edid_block_data(edid, i);
if (edid_block_valid(src_block, i == 0)) {
void *dst_block = (void *)edid_block_data(edid, valid_blocks);
memmove(dst_block, src_block, EDID_LENGTH);
valid_blocks++;
}
}
/* We already trusted the base block to be valid here... */
if (WARN_ON(!valid_blocks)) {
kfree(edid);
return NULL;
}
edid->extensions = valid_blocks - 1;
edid->checksum = edid_block_compute_checksum(edid);
*alloc_size = edid_size_by_blocks(valid_blocks);
new = krealloc(edid, *alloc_size, GFP_KERNEL);
if (!new)
kfree(edid);
return new;
}
#define DDC_SEGMENT_ADDR 0x30
/**
* drm_do_probe_ddc_edid() - get EDID information via I2C
* @data: I2C device adapter
* @buf: EDID data buffer to be filled
* @block: 128 byte EDID block to start fetching from
* @len: EDID data buffer length to fetch
*
* Try to fetch EDID information by calling I2C driver functions.
*
* Return: 0 on success or -1 on failure.
*/
static int
drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
{
struct i2c_adapter *adapter = data;
unsigned char start = block * EDID_LENGTH;
unsigned char segment = block >> 1;
unsigned char xfers = segment ? 3 : 2;
int ret, retries = 5;
/*
* The core I2C driver will automatically retry the transfer if the
* adapter reports EAGAIN. However, we find that bit-banging transfers
* are susceptible to errors under a heavily loaded machine and
* generate spurious NAKs and timeouts. Retrying the transfer
* of the individual block a few times seems to overcome this.
*/
do {
struct i2c_msg msgs[] = {
{
.addr = DDC_SEGMENT_ADDR,
.flags = 0,
.len = 1,
.buf = &segment,
}, {
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &start,
}, {
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = len,
.buf = buf,
}
};
/*
* Avoid sending the segment addr to not upset non-compliant
* DDC monitors.
*/
ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
if (ret == -ENXIO) {
DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
adapter->name);
break;
}
} while (ret != xfers && --retries);
return ret == xfers ? 0 : -1;
}
static void connector_bad_edid(struct drm_connector *connector,
const struct edid *edid, int num_blocks)
{
int i;
u8 last_block;
/*
* 0x7e in the EDID is the number of extension blocks. The EDID
* is 1 (base block) + num_ext_blocks big. That means we can think
* of 0x7e in the EDID of the _index_ of the last block in the
* combined chunk of memory.
*/
last_block = edid->extensions;
/* Calculate real checksum for the last edid extension block data */
if (last_block < num_blocks)
connector->real_edid_checksum =
edid_block_compute_checksum(edid + last_block);
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] EDID is invalid:\n",
connector->base.id, connector->name);
for (i = 0; i < num_blocks; i++)
edid_block_dump(KERN_DEBUG, edid + i, i);
}
/* Get override or firmware EDID */
static const struct drm_edid *drm_edid_override_get(struct drm_connector *connector)
{
const struct drm_edid *override = NULL;
mutex_lock(&connector->edid_override_mutex);
if (connector->edid_override)
override = drm_edid_dup(connector->edid_override);
mutex_unlock(&connector->edid_override_mutex);
if (!override)
override = drm_edid_load_firmware(connector);
return IS_ERR(override) ? NULL : override;
}
/* For debugfs edid_override implementation */
int drm_edid_override_show(struct drm_connector *connector, struct seq_file *m)
{
const struct drm_edid *drm_edid;
mutex_lock(&connector->edid_override_mutex);
drm_edid = connector->edid_override;
if (drm_edid)
seq_write(m, drm_edid->edid, drm_edid->size);
mutex_unlock(&connector->edid_override_mutex);
return 0;
}
/* For debugfs edid_override implementation */
int drm_edid_override_set(struct drm_connector *connector, const void *edid,
size_t size)
{
const struct drm_edid *drm_edid;
drm_edid = drm_edid_alloc(edid, size);
if (!drm_edid_valid(drm_edid)) {
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] EDID override invalid\n",
connector->base.id, connector->name);
drm_edid_free(drm_edid);
return -EINVAL;
}
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] EDID override set\n",
connector->base.id, connector->name);
mutex_lock(&connector->edid_override_mutex);
drm_edid_free(connector->edid_override);
connector->edid_override = drm_edid;
mutex_unlock(&connector->edid_override_mutex);
return 0;
}
/* For debugfs edid_override implementation */
int drm_edid_override_reset(struct drm_connector *connector)
{
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] EDID override reset\n",
connector->base.id, connector->name);
mutex_lock(&connector->edid_override_mutex);
drm_edid_free(connector->edid_override);
connector->edid_override = NULL;
mutex_unlock(&connector->edid_override_mutex);
return 0;
}
/**
* drm_edid_override_connector_update - add modes from override/firmware EDID
* @connector: connector we're probing
*
* Add modes from the override/firmware EDID, if available. Only to be used from
* drm_helper_probe_single_connector_modes() as a fallback for when DDC probe
* failed during drm_get_edid() and caused the override/firmware EDID to be
* skipped.
*
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_edid_override_connector_update(struct drm_connector *connector)
{
const struct drm_edid *override;
int num_modes = 0;
override = drm_edid_override_get(connector);
if (override) {
num_modes = drm_edid_connector_update(connector, override);
drm_edid_free(override);
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n",
connector->base.id, connector->name, num_modes);
}
return num_modes;
}
EXPORT_SYMBOL(drm_edid_override_connector_update);
typedef int read_block_fn(void *context, u8 *buf, unsigned int block, size_t len);
static enum edid_block_status edid_block_read(void *block, unsigned int block_num,
read_block_fn read_block,
void *context)
{
enum edid_block_status status;
bool is_base_block = block_num == 0;
int try;
for (try = 0; try < 4; try++) {
if (read_block(context, block, block_num, EDID_LENGTH))
return EDID_BLOCK_READ_FAIL;
status = edid_block_check(block, is_base_block);
if (status == EDID_BLOCK_HEADER_REPAIR) {
edid_header_fix(block);
/* Retry with fixed header, update status if that worked. */
status = edid_block_check(block, is_base_block);
if (status == EDID_BLOCK_OK)
status = EDID_BLOCK_HEADER_FIXED;
}
if (edid_block_status_valid(status, edid_block_tag(block)))
break;
/* Fail early for unrepairable base block all zeros. */
if (try == 0 && is_base_block && status == EDID_BLOCK_ZERO)
break;
}
return status;
}
static struct edid *_drm_do_get_edid(struct drm_connector *connector,
read_block_fn read_block, void *context,
size_t *size)
{
enum edid_block_status status;
int i, num_blocks, invalid_blocks = 0;
const struct drm_edid *override;
struct edid *edid, *new;
size_t alloc_size = EDID_LENGTH;
override = drm_edid_override_get(connector);
if (override) {
alloc_size = override->size;
edid = kmemdup(override->edid, alloc_size, GFP_KERNEL);
drm_edid_free(override);
if (!edid)
return NULL;
goto ok;
}
edid = kmalloc(alloc_size, GFP_KERNEL);
if (!edid)
return NULL;
status = edid_block_read(edid, 0, read_block, context);
edid_block_status_print(status, edid, 0);
if (status == EDID_BLOCK_READ_FAIL)
goto fail;
/* FIXME: Clarify what a corrupt EDID actually means. */
if (status == EDID_BLOCK_OK || status == EDID_BLOCK_VERSION)
connector->edid_corrupt = false;
else
connector->edid_corrupt = true;
if (!edid_block_status_valid(status, edid_block_tag(edid))) {
if (status == EDID_BLOCK_ZERO)
connector->null_edid_counter++;
connector_bad_edid(connector, edid, 1);
goto fail;
}
if (!edid_extension_block_count(edid))
goto ok;
alloc_size = edid_size(edid);
new = krealloc(edid, alloc_size, GFP_KERNEL);
if (!new)
goto fail;
edid = new;
num_blocks = edid_block_count(edid);
for (i = 1; i < num_blocks; i++) {
void *block = (void *)edid_block_data(edid, i);
status = edid_block_read(block, i, read_block, context);
edid_block_status_print(status, block, i);
if (!edid_block_status_valid(status, edid_block_tag(block))) {
if (status == EDID_BLOCK_READ_FAIL)
goto fail;
invalid_blocks++;
} else if (i == 1) {
/*
* If the first EDID extension is a CTA extension, and
* the first Data Block is HF-EEODB, override the
* extension block count.
*
* Note: HF-EEODB could specify a smaller extension
* count too, but we can't risk allocating a smaller
* amount.
*/
int eeodb = edid_hfeeodb_block_count(edid);
if (eeodb > num_blocks) {
num_blocks = eeodb;
alloc_size = edid_size_by_blocks(num_blocks);
new = krealloc(edid, alloc_size, GFP_KERNEL);
if (!new)
goto fail;
edid = new;
}
}
}
if (invalid_blocks) {
connector_bad_edid(connector, edid, num_blocks);
edid = edid_filter_invalid_blocks(edid, &alloc_size);
}
ok:
if (size)
*size = alloc_size;
return edid;
fail:
kfree(edid);
return NULL;
}
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
* @read_block: EDID block read function
* @context: private data passed to the block read function
*
* When the I2C adapter connected to the DDC bus is hidden behind a device that
* exposes a different interface to read EDID blocks this function can be used
* to get EDID data using a custom block read function.
*
* As in the general case the DDC bus is accessible by the kernel at the I2C
* level, drivers must make all reasonable efforts to expose it as an I2C
* adapter and use drm_get_edid() instead of abusing this function.
*
* The EDID may be overridden using debugfs override_edid or firmware EDID
* (drm_edid_load_firmware() and drm.edid_firmware parameter), in this priority
* order. Having either of them bypasses actual EDID reads.
*
* Return: Pointer to valid EDID or NULL if we couldn't find any.
*/
struct edid *drm_do_get_edid(struct drm_connector *connector,
read_block_fn read_block,
void *context)
{
return _drm_do_get_edid(connector, read_block, context, NULL);
}
EXPORT_SYMBOL_GPL(drm_do_get_edid);
/**
* drm_edid_raw - Get a pointer to the raw EDID data.
* @drm_edid: drm_edid container
*
* Get a pointer to the raw EDID data.
*
* This is for transition only. Avoid using this like the plague.
*
* Return: Pointer to raw EDID data.
*/
const struct edid *drm_edid_raw(const struct drm_edid *drm_edid)
{
if (!drm_edid || !drm_edid->size)
return NULL;
/*
* Do not return pointers where relying on EDID extension count would
* lead to buffer overflow.
*/
if (WARN_ON(edid_size(drm_edid->edid) > drm_edid->size))
return NULL;
return drm_edid->edid;
}
EXPORT_SYMBOL(drm_edid_raw);
/* Allocate struct drm_edid container *without* duplicating the edid data */
static const struct drm_edid *_drm_edid_alloc(const void *edid, size_t size)
{
struct drm_edid *drm_edid;
if (!edid || !size || size < EDID_LENGTH)
return NULL;
drm_edid = kzalloc(sizeof(*drm_edid), GFP_KERNEL);
if (drm_edid) {
drm_edid->edid = edid;
drm_edid->size = size;
}
return drm_edid;
}
/**
* drm_edid_alloc - Allocate a new drm_edid container
* @edid: Pointer to raw EDID data
* @size: Size of memory allocated for EDID
*
* Allocate a new drm_edid container. Do not calculate edid size from edid, pass
* the actual size that has been allocated for the data. There is no validation
* of the raw EDID data against the size, but at least the EDID base block must
* fit in the buffer.
*
* The returned pointer must be freed using drm_edid_free().
*
* Return: drm_edid container, or NULL on errors
*/
const struct drm_edid *drm_edid_alloc(const void *edid, size_t size)
{
const struct drm_edid *drm_edid;
if (!edid || !size || size < EDID_LENGTH)
return NULL;
edid = kmemdup(edid, size, GFP_KERNEL);
if (!edid)
return NULL;
drm_edid = _drm_edid_alloc(edid, size);
if (!drm_edid)
kfree(edid);
return drm_edid;
}
EXPORT_SYMBOL(drm_edid_alloc);
/**
* drm_edid_dup - Duplicate a drm_edid container
* @drm_edid: EDID to duplicate
*
* The returned pointer must be freed using drm_edid_free().
*
* Returns: drm_edid container copy, or NULL on errors
*/
const struct drm_edid *drm_edid_dup(const struct drm_edid *drm_edid)
{
if (!drm_edid)
return NULL;
return drm_edid_alloc(drm_edid->edid, drm_edid->size);
}
EXPORT_SYMBOL(drm_edid_dup);
/**
* drm_edid_free - Free the drm_edid container
* @drm_edid: EDID to free
*/
void drm_edid_free(const struct drm_edid *drm_edid)
{
if (!drm_edid)
return;
kfree(drm_edid->edid);
kfree(drm_edid);
}
EXPORT_SYMBOL(drm_edid_free);
/**
* drm_probe_ddc() - probe DDC presence
* @adapter: I2C adapter to probe
*
* Return: True on success, false on failure.
*/
bool
drm_probe_ddc(struct i2c_adapter *adapter)
{
unsigned char out;
return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
}
EXPORT_SYMBOL(drm_probe_ddc);
/**
* drm_get_edid - get EDID data, if available
* @connector: connector we're probing
* @adapter: I2C adapter to use for DDC
*
* Poke the given I2C channel to grab EDID data if possible. If found,
* attach it to the connector.
*
* Return: Pointer to valid EDID or NULL if we couldn't find any.
*/
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
if (connector->force == DRM_FORCE_OFF)
return NULL;
if (connector->force == DRM_FORCE_UNSPECIFIED && !drm_probe_ddc(adapter))
return NULL;
edid = _drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter, NULL);
drm_connector_update_edid_property(connector, edid);
return edid;
}
EXPORT_SYMBOL(drm_get_edid);
/**
* drm_edid_read_custom - Read EDID data using given EDID block read function
* @connector: Connector to use
* @read_block: EDID block read function
* @context: Private data passed to the block read function
*
* When the I2C adapter connected to the DDC bus is hidden behind a device that
* exposes a different interface to read EDID blocks this function can be used
* to get EDID data using a custom block read function.
*
* As in the general case the DDC bus is accessible by the kernel at the I2C
* level, drivers must make all reasonable efforts to expose it as an I2C
* adapter and use drm_edid_read() or drm_edid_read_ddc() instead of abusing
* this function.
*
* The EDID may be overridden using debugfs override_edid or firmware EDID
* (drm_edid_load_firmware() and drm.edid_firmware parameter), in this priority
* order. Having either of them bypasses actual EDID reads.
*
* The returned pointer must be freed using drm_edid_free().
*
* Return: Pointer to EDID, or NULL if probe/read failed.
*/
const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector,
read_block_fn read_block,
void *context)
{
const struct drm_edid *drm_edid;
struct edid *edid;
size_t size = 0;
edid = _drm_do_get_edid(connector, read_block, context, &size);
if (!edid)
return NULL;
/* Sanity check for now */
drm_WARN_ON(connector->dev, !size);
drm_edid = _drm_edid_alloc(edid, size);
if (!drm_edid)
kfree(edid);
return drm_edid;
}
EXPORT_SYMBOL(drm_edid_read_custom);
/**
* drm_edid_read_ddc - Read EDID data using given I2C adapter
* @connector: Connector to use
* @adapter: I2C adapter to use for DDC
*
* Read EDID using the given I2C adapter.
*
* The EDID may be overridden using debugfs override_edid or firmware EDID
* (drm_edid_load_firmware() and drm.edid_firmware parameter), in this priority
* order. Having either of them bypasses actual EDID reads.
*
* Prefer initializing connector->ddc with drm_connector_init_with_ddc() and
* using drm_edid_read() instead of this function.
*
* The returned pointer must be freed using drm_edid_free().
*
* Return: Pointer to EDID, or NULL if probe/read failed.
*/
const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
const struct drm_edid *drm_edid;
if (connector->force == DRM_FORCE_OFF)
return NULL;
if (connector->force == DRM_FORCE_UNSPECIFIED && !drm_probe_ddc(adapter))
return NULL;
drm_edid = drm_edid_read_custom(connector, drm_do_probe_ddc_edid, adapter);
/* Note: Do *not* call connector updates here. */
return drm_edid;
}
EXPORT_SYMBOL(drm_edid_read_ddc);
/**
* drm_edid_read - Read EDID data using connector's I2C adapter
* @connector: Connector to use
*
* Read EDID using the connector's I2C adapter.
*
* The EDID may be overridden using debugfs override_edid or firmware EDID
* (drm_edid_load_firmware() and drm.edid_firmware parameter), in this priority
* order. Having either of them bypasses actual EDID reads.
*
* The returned pointer must be freed using drm_edid_free().
*
* Return: Pointer to EDID, or NULL if probe/read failed.
*/
const struct drm_edid *drm_edid_read(struct drm_connector *connector)
{
if (drm_WARN_ON(connector->dev, !connector->ddc))
return NULL;
return drm_edid_read_ddc(connector, connector->ddc);
}
EXPORT_SYMBOL(drm_edid_read);
static u32 edid_extract_panel_id(const struct edid *edid)
{
/*
* We represent the ID as a 32-bit number so it can easily be compared
* with "==".
*
* NOTE that we deal with endianness differently for the top half
* of this ID than for the bottom half. The bottom half (the product
* id) gets decoded as little endian by the EDID_PRODUCT_ID because
* that's how everyone seems to interpret it. The top half (the mfg_id)
* gets stored as big endian because that makes
* drm_edid_encode_panel_id() and drm_edid_decode_panel_id() easier
* to write (it's easier to extract the ASCII). It doesn't really
* matter, though, as long as the number here is unique.
*/
return (u32)edid->mfg_id[0] << 24 |
(u32)edid->mfg_id[1] << 16 |
(u32)EDID_PRODUCT_ID(edid);
}
/**
* drm_edid_get_panel_id - Get a panel's ID through DDC
* @adapter: I2C adapter to use for DDC
*
* This function reads the first block of the EDID of a panel and (assuming
* that the EDID is valid) extracts the ID out of it. The ID is a 32-bit value
* (16 bits of manufacturer ID and 16 bits of per-manufacturer ID) that's
* supposed to be different for each different modem of panel.
*
* This function is intended to be used during early probing on devices where
* more than one panel might be present. Because of its intended use it must
* assume that the EDID of the panel is correct, at least as far as the ID
* is concerned (in other words, we don't process any overrides here).
*
* NOTE: it's expected that this function and drm_do_get_edid() will both
* be read the EDID, but there is no caching between them. Since we're only
* reading the first block, hopefully this extra overhead won't be too big.
*
* Return: A 32-bit ID that should be different for each make/model of panel.
* See the functions drm_edid_encode_panel_id() and
* drm_edid_decode_panel_id() for some details on the structure of this
* ID.
*/
u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
{
enum edid_block_status status;
void *base_block;
u32 panel_id = 0;
/*
* There are no manufacturer IDs of 0, so if there is a problem reading
* the EDID then we'll just return 0.
*/
base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
if (!base_block)
return 0;
status = edid_block_read(base_block, 0, drm_do_probe_ddc_edid, adapter);
edid_block_status_print(status, base_block, 0);
if (edid_block_status_valid(status, edid_block_tag(base_block)))
panel_id = edid_extract_panel_id(base_block);
else
edid_block_dump(KERN_NOTICE, base_block, 0);
kfree(base_block);
return panel_id;
}
EXPORT_SYMBOL(drm_edid_get_panel_id);
/**
* drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output
* @connector: connector we're probing
* @adapter: I2C adapter to use for DDC
*
* Wrapper around drm_get_edid() for laptops with dual GPUs using one set of
* outputs. The wrapper adds the requisite vga_switcheroo calls to temporarily
* switch DDC to the GPU which is retrieving EDID.
*
* Return: Pointer to valid EDID or %NULL if we couldn't find any.
*/
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct drm_device *dev = connector->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct edid *edid;
if (drm_WARN_ON_ONCE(dev, !dev_is_pci(dev->dev)))
return NULL;
vga_switcheroo_lock_ddc(pdev);
edid = drm_get_edid(connector, adapter);
vga_switcheroo_unlock_ddc(pdev);
return edid;
}
EXPORT_SYMBOL(drm_get_edid_switcheroo);
/**
* drm_edid_read_switcheroo - get EDID data for a vga_switcheroo output
* @connector: connector we're probing
* @adapter: I2C adapter to use for DDC
*
* Wrapper around drm_edid_read_ddc() for laptops with dual GPUs using one set
* of outputs. The wrapper adds the requisite vga_switcheroo calls to
* temporarily switch DDC to the GPU which is retrieving EDID.
*
* Return: Pointer to valid EDID or %NULL if we couldn't find any.
*/
const struct drm_edid *drm_edid_read_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct drm_device *dev = connector->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
const struct drm_edid *drm_edid;
if (drm_WARN_ON_ONCE(dev, !dev_is_pci(dev->dev)))
return NULL;
vga_switcheroo_lock_ddc(pdev);
drm_edid = drm_edid_read_ddc(connector, adapter);
vga_switcheroo_unlock_ddc(pdev);
return drm_edid;
}
EXPORT_SYMBOL(drm_edid_read_switcheroo);
/**
* drm_edid_duplicate - duplicate an EDID and the extensions
* @edid: EDID to duplicate
*
* Return: Pointer to duplicated EDID or NULL on allocation failure.
*/
struct edid *drm_edid_duplicate(const struct edid *edid)
{
if (!edid)
return NULL;
return kmemdup(edid, edid_size(edid), GFP_KERNEL);
}
EXPORT_SYMBOL(drm_edid_duplicate);
/*** EDID parsing ***/
/**
* edid_get_quirks - return quirk flags for a given EDID
* @drm_edid: EDID to process
*
* This tells subsequent routines what fixes they need to apply.
*/
static u32 edid_get_quirks(const struct drm_edid *drm_edid)
{
u32 panel_id = edid_extract_panel_id(drm_edid->edid);
const struct edid_quirk *quirk;
int i;
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
quirk = &edid_quirk_list[i];
if (quirk->panel_id == panel_id)
return quirk->quirks;
}
return 0;
}
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
/*
* Walk the mode list for connector, clearing the preferred status on existing
* modes and setting it anew for the right mode ala quirks.
*/
static void edid_fixup_preferred(struct drm_connector *connector)
{
const struct drm_display_info *info = &connector->display_info;
struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh = 0;
int cur_vrefresh, preferred_vrefresh;
if (list_empty(&connector->probed_modes))
return;
if (info->quirks & EDID_QUIRK_PREFER_LARGE_60)
target_refresh = 60;
if (info->quirks & EDID_QUIRK_PREFER_LARGE_75)
target_refresh = 75;
preferred_mode = list_first_entry(&connector->probed_modes,
struct drm_display_mode, head);
list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
if (cur_mode == preferred_mode)
continue;
/* Largest mode is preferred */
if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
preferred_mode = cur_mode;
cur_vrefresh = drm_mode_vrefresh(cur_mode);
preferred_vrefresh = drm_mode_vrefresh(preferred_mode);
/* At a given size, try to get closest to target refresh */
if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) <
MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) {
preferred_mode = cur_mode;
}
}
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
static bool
mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
(mode->hsync_end - mode->hsync_start == 32) &&
(mode->vsync_start - mode->vdisplay == 3);
}
/*
* drm_mode_find_dmt - Create a copy of a mode if present in DMT
* @dev: Device to duplicate against
* @hsize: Mode width
* @vsize: Mode height
* @fresh: Mode refresh rate
* @rb: Mode reduced-blanking-ness
*
* Walk the DMT mode list looking for a match for the given parameters.
*
* Return: A newly allocated copy of the mode, or NULL if not found.
*/
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
bool rb)
{
int i;
for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize != ptr->hdisplay)
continue;
if (vsize != ptr->vdisplay)
continue;
if (fresh != drm_mode_vrefresh(ptr))
continue;
if (rb != mode_is_rb(ptr))
continue;
return drm_mode_duplicate(dev, ptr);
}
return NULL;
}
EXPORT_SYMBOL(drm_mode_find_dmt);
static bool is_display_descriptor(const struct detailed_timing *descriptor, u8 type)
{
BUILD_BUG_ON(offsetof(typeof(*descriptor), pixel_clock) != 0);
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.pad1) != 2);
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.type) != 3);
return descriptor->pixel_clock == 0 &&
descriptor->data.other_data.pad1 == 0 &&
descriptor->data.other_data.type == type;
}
static bool is_detailed_timing_descriptor(const struct detailed_timing *descriptor)
{
BUILD_BUG_ON(offsetof(typeof(*descriptor), pixel_clock) != 0);
return descriptor->pixel_clock != 0;
}
typedef void detailed_cb(const struct detailed_timing *timing, void *closure);
static void
cea_for_each_detailed_block(const u8 *ext, detailed_cb *cb, void *closure)
{
int i, n;
u8 d = ext[0x02];
const u8 *det_base = ext + d;
if (d < 4 || d > 127)
return;
n = (127 - d) / 18;
for (i = 0; i < n; i++)
cb((const struct detailed_timing *)(det_base + 18 * i), closure);
}
static void
vtb_for_each_detailed_block(const u8 *ext, detailed_cb *cb, void *closure)
{
unsigned int i, n = min((int)ext[0x02], 6);
const u8 *det_base = ext + 5;
if (ext[0x01] != 1)
return; /* unknown version */
for (i = 0; i < n; i++)
cb((const struct detailed_timing *)(det_base + 18 * i), closure);
}
static void drm_for_each_detailed_block(const struct drm_edid *drm_edid,
detailed_cb *cb, void *closure)
{
struct drm_edid_iter edid_iter;
const u8 *ext;
int i;
if (!drm_edid)
return;
for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
cb(&drm_edid->edid->detailed_timings[i], closure);
drm_edid_iter_begin(drm_edid, &edid_iter);
drm_edid_iter_for_each(ext, &edid_iter) {
switch (*ext) {
case CEA_EXT:
cea_for_each_detailed_block(ext, cb, closure);
break;
case VTB_EXT:
vtb_for_each_detailed_block(ext, cb, closure);
break;
default:
break;
}
}
drm_edid_iter_end(&edid_iter);
}
static void
is_rb(const struct detailed_timing *descriptor, void *data)
{
bool *res = data;
if (!is_display_descriptor(descriptor, EDID_DETAIL_MONITOR_RANGE))
return;
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.flags) != 10);
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.cvt.flags) != 15);
if (descriptor->data.other_data.data.range.flags == DRM_EDID_CVT_SUPPORT_FLAG &&
descriptor->data.other_data.data.range.formula.cvt.flags & DRM_EDID_CVT_FLAGS_REDUCED_BLANKING)
*res = true;
}
/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
static bool
drm_monitor_supports_rb(const struct drm_edid *drm_edid)
{
if (drm_edid->edid->revision >= 4) {
bool ret = false;
drm_for_each_detailed_block(drm_edid, is_rb, &ret);
return ret;
}
return ((drm_edid->edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
}
static void
find_gtf2(const struct detailed_timing *descriptor, void *data)
{
const struct detailed_timing **res = data;
if (!is_display_descriptor(descriptor, EDID_DETAIL_MONITOR_RANGE))
return;
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.flags) != 10);
if (descriptor->data.other_data.data.range.flags == DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG)
*res = descriptor;
}
/* Secondary GTF curve kicks in above some break frequency */
static int
drm_gtf2_hbreak(const struct drm_edid *drm_edid)
{
const struct detailed_timing *descriptor = NULL;
drm_for_each_detailed_block(drm_edid, find_gtf2, &descriptor);
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.hfreq_start_khz) != 12);
return descriptor ? descriptor->data.other_data.data.range.formula.gtf2.hfreq_start_khz * 2 : 0;
}
static int
drm_gtf2_2c(const struct drm_edid *drm_edid)
{
const struct detailed_timing *descriptor = NULL;
drm_for_each_detailed_block(drm_edid, find_gtf2, &descriptor);
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.c) != 13);
return descriptor ? descriptor->data.other_data.data.range.formula.gtf2.c : 0;
}
static int
drm_gtf2_m(const struct drm_edid *drm_edid)
{
const struct detailed_timing *descriptor = NULL;
drm_for_each_detailed_block(drm_edid, find_gtf2, &descriptor);
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.m) != 14);
return descriptor ? le16_to_cpu(descriptor->data.other_data.data.range.formula.gtf2.m) : 0;
}
static int
drm_gtf2_k(const struct drm_edid *drm_edid)
{
const struct detailed_timing *descriptor = NULL;
drm_for_each_detailed_block(drm_edid, find_gtf2, &descriptor);
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.k) != 16);
return descriptor ? descriptor->data.other_data.data.range.formula.gtf2.k : 0;
}
static int
drm_gtf2_2j(const struct drm_edid *drm_edid)
{
const struct detailed_timing *descriptor = NULL;
drm_for_each_detailed_block(drm_edid, find_gtf2, &descriptor);
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.j) != 17);
return descriptor ? descriptor->data.other_data.data.range.formula.gtf2.j : 0;
}
static void
get_timing_level(const struct detailed_timing *descriptor, void *data)
{
int *res = data;
if (!is_display_descriptor(descriptor, EDID_DETAIL_MONITOR_RANGE))
return;
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.flags) != 10);
switch (descriptor->data.other_data.data.range.flags) {
case DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG:
*res = LEVEL_GTF;
break;
case DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG:
*res = LEVEL_GTF2;
break;
case DRM_EDID_CVT_SUPPORT_FLAG:
*res = LEVEL_CVT;
break;
default:
break;
}
}
/* Get standard timing level (CVT/GTF/DMT). */
static int standard_timing_level(const struct drm_edid *drm_edid)
{
const struct edid *edid = drm_edid->edid;
if (edid->revision >= 4) {
/*
* If the range descriptor doesn't
* indicate otherwise default to CVT
*/
int ret = LEVEL_CVT;
drm_for_each_detailed_block(drm_edid, get_timing_level, &ret);
return ret;
} else if (edid->revision >= 3 && drm_gtf2_hbreak(drm_edid)) {
return LEVEL_GTF2;
} else if (edid->revision >= 2) {
return LEVEL_GTF;
} else {
return LEVEL_DMT;
}
}
/*
* 0 is reserved. The spec says 0x01 fill for unused timings. Some old
* monitors fill with ascii space (0x20) instead.
*/
static int
bad_std_timing(u8 a, u8 b)
{
return (a == 0x00 && b == 0x00) ||
(a == 0x01 && b == 0x01) ||
(a == 0x20 && b == 0x20);
}
static int drm_mode_hsync(const struct drm_display_mode *mode)
{
if (mode->htotal <= 0)
return 0;
return DIV_ROUND_CLOSEST(mode->clock, mode->htotal);
}
static struct drm_display_mode *
drm_gtf2_mode(struct drm_device *dev,
const struct drm_edid *drm_edid,
int hsize, int vsize, int vrefresh_rate)
{
struct drm_display_mode *mode;
/*
* This is potentially wrong if there's ever a monitor with
* more than one ranges section, each claiming a different
* secondary GTF curve. Please don't do that.
*/
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
if (!mode)
return NULL;
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(drm_edid)) {
drm_mode_destroy(dev, mode);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
vrefresh_rate, 0, 0,
drm_gtf2_m(drm_edid),
drm_gtf2_2c(drm_edid),
drm_gtf2_k(drm_edid),
drm_gtf2_2j(drm_edid));
}
return mode;
}
/*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
*/
static struct drm_display_mode *drm_mode_std(struct drm_connector *connector,
const struct drm_edid *drm_edid,
const struct std_timing *t)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *m, *mode = NULL;
int hsize, vsize;
int vrefresh_rate;
unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
>> EDID_TIMING_ASPECT_SHIFT;
unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
>> EDID_TIMING_VFREQ_SHIFT;
int timing_level = standard_timing_level(drm_edid);
if (bad_std_timing(t->hsize, t->vfreq_aspect))
return NULL;
/* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
hsize = t->hsize * 8 + 248;
/* vrefresh_rate = vfreq + 60 */
vrefresh_rate = vfreq + 60;
/* the vdisplay is calculated based on the aspect ratio */
if (aspect_ratio == 0) {
if (drm_edid->edid->revision < 3)
vsize = hsize;
else
vsize = (hsize * 10) / 16;
} else if (aspect_ratio == 1)
vsize = (hsize * 3) / 4;
else if (aspect_ratio == 2)
vsize = (hsize * 4) / 5;
else
vsize = (hsize * 9) / 16;
/* HDTV hack, part 1 */
if (vrefresh_rate == 60 &&
((hsize == 1360 && vsize == 765) ||
(hsize == 1368 && vsize == 769))) {
hsize = 1366;
vsize = 768;
}
/*
* If this connector already has a mode for this size and refresh
* rate (because it came from detailed or CVT info), use that
* instead. This way we don't have to guess at interlace or
* reduced blanking.
*/
list_for_each_entry(m, &connector->probed_modes, head)
if (m->hdisplay == hsize && m->vdisplay == vsize &&
drm_mode_vrefresh(m) == vrefresh_rate)
return NULL;
/* HDTV hack, part 2 */
if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
false);
if (!mode)
return NULL;
mode->hdisplay = 1366;
mode->hsync_start = mode->hsync_start - 1;
mode->hsync_end = mode->hsync_end - 1;
return mode;
}
/* check whether it can be found in default mode table */
if (drm_monitor_supports_rb(drm_edid)) {
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
true);
if (mode)
return mode;
}
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
if (mode)
return mode;
/* okay, generate it */
switch (timing_level) {
case LEVEL_DMT:
break;
case LEVEL_GTF:
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
break;
case LEVEL_GTF2:
mode = drm_gtf2_mode(dev, drm_edid, hsize, vsize, vrefresh_rate);
break;
case LEVEL_CVT:
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
false);
break;
}
return mode;
}
/*
* EDID is delightfully ambiguous about how interlaced modes are to be
* encoded. Our internal representation is of frame height, but some
* HDTV detailed timings are encoded as field height.
*
* The format list here is from CEA, in frame size. Technically we
* should be checking refresh rate too. Whatever.
*/
static void
drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
const struct detailed_pixel_timing *pt)
{
int i;
static const struct {
int w, h;
} cea_interlaced[] = {
{ 1920, 1080 },
{ 720, 480 },
{ 1440, 480 },
{ 2880, 480 },
{ 720, 576 },
{ 1440, 576 },
{ 2880, 576 },
};
if (!(pt->misc & DRM_EDID_PT_INTERLACED))
return;
for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
if ((mode->hdisplay == cea_interlaced[i].w) &&
(mode->vdisplay == cea_interlaced[i].h / 2)) {
mode->vdisplay *= 2;
mode->vsync_start *= 2;
mode->vsync_end *= 2;
mode->vtotal *= 2;
mode->vtotal |= 1;
}
}
mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
/*
* Create a new mode from an EDID detailed timing section. An EDID detailed
* timing block contains enough info for us to create and return a new struct
* drm_display_mode.
*/
static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connector,
const struct drm_edid *drm_edid,
const struct detailed_timing *timing)
{
const struct drm_display_info *info = &connector->display_info;
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
const struct detailed_pixel_timing *pt = &timing->data.pixel_data;
unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
/* ignore tiny modes */
if (hactive < 64 || vactive < 64)
return NULL;
if (pt->misc & DRM_EDID_PT_STEREO) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Stereo mode not supported\n",
connector->base.id, connector->name);
return NULL;
}
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n",
connector->base.id, connector->name);
}
/* it is incorrect if hsync/vsync width is zero */
if (!hsync_pulse_width || !vsync_pulse_width) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Incorrect Detailed timing. Wrong Hsync/Vsync pulse width\n",
connector->base.id, connector->name);
return NULL;
}
if (info->quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
if (!mode)
return NULL;
goto set_size;
}
mode = drm_mode_create(dev);
if (!mode)
return NULL;
if (info->quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
mode->clock = 1088 * 10;
else
mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
mode->hdisplay = hactive;
mode->hsync_start = mode->hdisplay + hsync_offset;
mode->hsync_end = mode->hsync_start + hsync_pulse_width;
mode->htotal = mode->hdisplay + hblank;
mode->vdisplay = vactive;
mode->vsync_start = mode->vdisplay + vsync_offset;
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
mode->vtotal = mode->vdisplay + vblank;
/* Some EDIDs have bogus h/vtotal values */
if (mode->hsync_end > mode->htotal)
mode->htotal = mode->hsync_end + 1;
if (mode->vsync_end > mode->vtotal)
mode->vtotal = mode->vsync_end + 1;
drm_mode_do_interlace_quirk(mode, pt);
if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
} else {
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
}
set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
if (info->quirks & EDID_QUIRK_DETAILED_IN_CM) {
mode->width_mm *= 10;
mode->height_mm *= 10;
}
if (info->quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
mode->width_mm = drm_edid->edid->width_cm * 10;
mode->height_mm = drm_edid->edid->height_cm * 10;
}
mode->type = DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
return mode;
}
static bool
mode_in_hsync_range(const struct drm_display_mode *mode,
const struct edid *edid, const u8 *t)
{
int hsync, hmin, hmax;
hmin = t[7];
if (edid->revision >= 4)
hmin += ((t[4] & 0x04) ? 255 : 0);
hmax = t[8];
if (edid->revision >= 4)
hmax += ((t[4] & 0x08) ? 255 : 0);
hsync = drm_mode_hsync(mode);
return (hsync <= hmax && hsync >= hmin);
}
static bool
mode_in_vsync_range(const struct drm_display_mode *mode,
const struct edid *edid, const u8 *t)
{
int vsync, vmin, vmax;
vmin = t[5];
if (edid->revision >= 4)
vmin += ((t[4] & 0x01) ? 255 : 0);
vmax = t[6];
if (edid->revision >= 4)
vmax += ((t[4] & 0x02) ? 255 : 0);
vsync = drm_mode_vrefresh(mode);
return (vsync <= vmax && vsync >= vmin);
}
static u32
range_pixel_clock(const struct edid *edid, const u8 *t)
{
/* unspecified */
if (t[9] == 0 || t[9] == 255)
return 0;
/* 1.4 with CVT support gives us real precision, yay */
if (edid->revision >= 4 && t[10] == DRM_EDID_CVT_SUPPORT_FLAG)
return (t[9] * 10000) - ((t[12] >> 2) * 250);
/* 1.3 is pathetic, so fuzz up a bit */
return t[9] * 10000 + 5001;
}
static bool mode_in_range(const struct drm_display_mode *mode,
const struct drm_edid *drm_edid,
const struct detailed_timing *timing)
{
const struct edid *edid = drm_edid->edid;
u32 max_clock;
const u8 *t = (const u8 *)timing;
if (!mode_in_hsync_range(mode, edid, t))
return false;
if (!mode_in_vsync_range(mode, edid, t))
return false;
if ((max_clock = range_pixel_clock(edid, t)))
if (mode->clock > max_clock)
return false;
/* 1.4 max horizontal check */
if (edid->revision >= 4 && t[10] == DRM_EDID_CVT_SUPPORT_FLAG)
if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
return false;
if (mode_is_rb(mode) && !drm_monitor_supports_rb(drm_edid))
return false;
return true;
}
static bool valid_inferred_mode(const struct drm_connector *connector,
const struct drm_display_mode *mode)
{
const struct drm_display_mode *m;
bool ok = false;
list_for_each_entry(m, &connector->probed_modes, head) {
if (mode->hdisplay == m->hdisplay &&
mode->vdisplay == m->vdisplay &&
drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
return false; /* duplicated */
if (mode->hdisplay <= m->hdisplay &&
mode->vdisplay <= m->vdisplay)
ok = true;
}
return ok;
}
static int drm_dmt_modes_for_range(struct drm_connector *connector,
const struct drm_edid *drm_edid,
const struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
if (mode_in_range(drm_dmt_modes + i, drm_edid, timing) &&
valid_inferred_mode(connector, drm_dmt_modes + i)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
return modes;
}
/* fix up 1366x768 mode from 1368x768;
* GFT/CVT can't express 1366 width which isn't dividable by 8
*/
void drm_mode_fixup_1366x768(struct drm_display_mode *mode)
{
if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
mode->hdisplay = 1366;
mode->hsync_start--;
mode->hsync_end--;
drm_mode_set_name(mode);
}
}
static int drm_gtf_modes_for_range(struct drm_connector *connector,
const struct drm_edid *drm_edid,
const struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
if (!newmode)
return modes;
drm_mode_fixup_1366x768(newmode);
if (!mode_in_range(newmode, drm_edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
drm_mode_probed_add(connector, newmode);
modes++;
}
return modes;
}
static int drm_gtf2_modes_for_range(struct drm_connector *connector,
const struct drm_edid *drm_edid,
const struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_gtf2_mode(dev, drm_edid, m->w, m->h, m->r);
if (!newmode)
return modes;
drm_mode_fixup_1366x768(newmode);
if (!mode_in_range(newmode, drm_edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
drm_mode_probed_add(connector, newmode);
modes++;
}
return modes;
}
static int drm_cvt_modes_for_range(struct drm_connector *connector,
const struct drm_edid *drm_edid,
const struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
bool rb = drm_monitor_supports_rb(drm_edid);
for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
if (!newmode)
return modes;
drm_mode_fixup_1366x768(newmode);
if (!mode_in_range(newmode, drm_edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
drm_mode_probed_add(connector, newmode);
modes++;
}
return modes;
}
static void
do_inferred_modes(const struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
const struct detailed_non_pixel *data = &timing->data.other_data;
const struct detailed_data_monitor_range *range = &data->data.range;
if (!is_display_descriptor(timing, EDID_DETAIL_MONITOR_RANGE))
return;
closure->modes += drm_dmt_modes_for_range(closure->connector,
closure->drm_edid,
timing);
if (closure->drm_edid->edid->revision < 2)
return; /* GTF not defined yet */
switch (range->flags) {
case DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG:
closure->modes += drm_gtf2_modes_for_range(closure->connector,
closure->drm_edid,
timing);
break;
case DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG:
closure->modes += drm_gtf_modes_for_range(closure->connector,
closure->drm_edid,
timing);
break;
case DRM_EDID_CVT_SUPPORT_FLAG:
if (closure->drm_edid->edid->revision < 4)
break;
closure->modes += drm_cvt_modes_for_range(closure->connector,
closure->drm_edid,
timing);
break;
case DRM_EDID_RANGE_LIMITS_ONLY_FLAG:
default:
break;
}
}
static int add_inferred_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
struct detailed_mode_closure closure = {
.connector = connector,
.drm_edid = drm_edid,
};
if (drm_edid->edid->revision >= 1)
drm_for_each_detailed_block(drm_edid, do_inferred_modes, &closure);
return closure.modes;
}
static int
drm_est3_modes(struct drm_connector *connector, const struct detailed_timing *timing)
{
int i, j, m, modes = 0;
struct drm_display_mode *mode;
const u8 *est = ((const u8 *)timing) + 6;
for (i = 0; i < 6; i++) {
for (j = 7; j >= 0; j--) {
m = (i * 8) + (7 - j);
if (m >= ARRAY_SIZE(est3_modes))
break;
if (est[i] & (1 << j)) {
mode = drm_mode_find_dmt(connector->dev,
est3_modes[m].w,
est3_modes[m].h,
est3_modes[m].r,
est3_modes[m].rb);
if (mode) {
drm_mode_probed_add(connector, mode);
modes++;
}
}
}
}
return modes;
}
static void
do_established_modes(const struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
if (!is_display_descriptor(timing, EDID_DETAIL_EST_TIMINGS))
return;
closure->modes += drm_est3_modes(closure->connector, timing);
}
/*
* Get established modes from EDID and add them. Each EDID block contains a
* bitmap of the supported "established modes" list (defined above). Tease them
* out and add them to the global modes list.
*/
static int add_established_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
struct drm_device *dev = connector->dev;
const struct edid *edid = drm_edid->edid;
unsigned long est_bits = edid->established_timings.t1 |
(edid->established_timings.t2 << 8) |
((edid->established_timings.mfg_rsvd & 0x80) << 9);
int i, modes = 0;
struct detailed_mode_closure closure = {
.connector = connector,
.drm_edid = drm_edid,
};
for (i = 0; i <= EDID_EST_TIMINGS; i++) {
if (est_bits & (1<<i)) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
if (edid->revision >= 1)
drm_for_each_detailed_block(drm_edid, do_established_modes,
&closure);
return modes + closure.modes;
}
static void
do_standard_modes(const struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
const struct detailed_non_pixel *data = &timing->data.other_data;
struct drm_connector *connector = closure->connector;
int i;
if (!is_display_descriptor(timing, EDID_DETAIL_STD_MODES))
return;
for (i = 0; i < 6; i++) {
const struct std_timing *std = &data->data.timings[i];
struct drm_display_mode *newmode;
newmode = drm_mode_std(connector, closure->drm_edid, std);
if (newmode) {
drm_mode_probed_add(connector, newmode);
closure->modes++;
}
}
}
/*
* Get standard modes from EDID and add them. Standard modes can be calculated
* using the appropriate standard (DMT, GTF, or CVT). Grab them from EDID and
* add them to the list.
*/
static int add_standard_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
int i, modes = 0;
struct detailed_mode_closure closure = {
.connector = connector,
.drm_edid = drm_edid,
};
for (i = 0; i < EDID_STD_TIMINGS; i++) {
struct drm_display_mode *newmode;
newmode = drm_mode_std(connector, drm_edid,
&drm_edid->edid->standard_timings[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
if (drm_edid->edid->revision >= 1)
drm_for_each_detailed_block(drm_edid, do_standard_modes,
&closure);
/* XXX should also look for standard codes in VTB blocks */
return modes + closure.modes;
}
static int drm_cvt_modes(struct drm_connector *connector,
const struct detailed_timing *timing)
{
int i, j, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
const struct cvt_timing *cvt;
static const int rates[] = { 60, 85, 75, 60, 50 };
const u8 empty[3] = { 0, 0, 0 };
for (i = 0; i < 4; i++) {
int width, height;
cvt = &(timing->data.other_data.data.cvt[i]);
if (!memcmp(cvt->code, empty, 3))
continue;
height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
switch (cvt->code[1] & 0x0c) {
/* default - because compiler doesn't see that we've enumerated all cases */
default:
case 0x00:
width = height * 4 / 3;
break;
case 0x04:
width = height * 16 / 9;
break;
case 0x08:
width = height * 16 / 10;
break;
case 0x0c:
width = height * 15 / 9;
break;
}
for (j = 1; j < 5; j++) {
if (cvt->code[2] & (1 << j)) {
newmode = drm_cvt_mode(dev, width, height,
rates[j], j == 0,
false, false);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
}
return modes;
}
static void
do_cvt_mode(const struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
if (!is_display_descriptor(timing, EDID_DETAIL_CVT_3BYTE))
return;
closure->modes += drm_cvt_modes(closure->connector, timing);
}
static int
add_cvt_modes(struct drm_connector *connector, const struct drm_edid *drm_edid)
{
struct detailed_mode_closure closure = {
.connector = connector,
.drm_edid = drm_edid,
};
if (drm_edid->edid->revision >= 3)
drm_for_each_detailed_block(drm_edid, do_cvt_mode, &closure);
/* XXX should also look for CVT codes in VTB blocks */
return closure.modes;
}
static void fixup_detailed_cea_mode_clock(struct drm_connector *connector,
struct drm_display_mode *mode);
static void
do_detailed_mode(const struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct drm_display_mode *newmode;
if (!is_detailed_timing_descriptor(timing))
return;
newmode = drm_mode_detailed(closure->connector,
closure->drm_edid, timing);
if (!newmode)
return;
if (closure->preferred)
newmode->type |= DRM_MODE_TYPE_PREFERRED;
/*
* Detailed modes are limited to 10kHz pixel clock resolution,
* so fix up anything that looks like CEA/HDMI mode, but the clock
* is just slightly off.
*/
fixup_detailed_cea_mode_clock(closure->connector, newmode);
drm_mode_probed_add(closure->connector, newmode);
closure->modes++;
closure->preferred = false;
}
/*
* add_detailed_modes - Add modes from detailed timings
* @connector: attached connector
* @drm_edid: EDID block to scan
*/
static int add_detailed_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
struct detailed_mode_closure closure = {
.connector = connector,
.drm_edid = drm_edid,
};
if (drm_edid->edid->revision >= 4)
closure.preferred = true; /* first detailed timing is always preferred */
else
closure.preferred =
drm_edid->edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING;
drm_for_each_detailed_block(drm_edid, do_detailed_mode, &closure);
return closure.modes;
}
/* CTA-861-H Table 60 - CTA Tag Codes */
#define CTA_DB_AUDIO 1
#define CTA_DB_VIDEO 2
#define CTA_DB_VENDOR 3
#define CTA_DB_SPEAKER 4
#define CTA_DB_EXTENDED_TAG 7
/* CTA-861-H Table 62 - CTA Extended Tag Codes */
#define CTA_EXT_DB_VIDEO_CAP 0
#define CTA_EXT_DB_VENDOR 1
#define CTA_EXT_DB_HDR_STATIC_METADATA 6
#define CTA_EXT_DB_420_VIDEO_DATA 14
#define CTA_EXT_DB_420_VIDEO_CAP_MAP 15
#define CTA_EXT_DB_HF_EEODB 0x78
#define CTA_EXT_DB_HF_SCDB 0x79
#define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4)
#define EDID_CEA_VCDB_QS (1 << 6)
/*
* Search EDID for CEA extension block.
*
* FIXME: Prefer not returning pointers to raw EDID data.
*/
const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid,
int ext_id, int *ext_index)
{
const u8 *edid_ext = NULL;
int i;
/* No EDID or EDID extensions */
if (!drm_edid || !drm_edid_extension_block_count(drm_edid))
return NULL;
/* Find CEA extension */
for (i = *ext_index; i < drm_edid_extension_block_count(drm_edid); i++) {
edid_ext = drm_edid_extension_block_data(drm_edid, i);
if (edid_block_tag(edid_ext) == ext_id)
break;
}
if (i >= drm_edid_extension_block_count(drm_edid))
return NULL;
*ext_index = i + 1;
return edid_ext;
}
/* Return true if the EDID has a CTA extension or a DisplayID CTA data block */
static bool drm_edid_has_cta_extension(const struct drm_edid *drm_edid)
{
const struct displayid_block *block;
struct displayid_iter iter;
int ext_index = 0;
bool found = false;
/* Look for a top level CEA extension block */
if (drm_find_edid_extension(drm_edid, CEA_EXT, &ext_index))
return true;
/* CEA blocks can also be found embedded in a DisplayID block */
displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
if (block->tag == DATA_BLOCK_CTA) {
found = true;
break;
}
}
displayid_iter_end(&iter);
return found;
}
static __always_inline const struct drm_display_mode *cea_mode_for_vic(u8 vic)
{
BUILD_BUG_ON(1 + ARRAY_SIZE(edid_cea_modes_1) - 1 != 127);
BUILD_BUG_ON(193 + ARRAY_SIZE(edid_cea_modes_193) - 1 != 219);
if (vic >= 1 && vic < 1 + ARRAY_SIZE(edid_cea_modes_1))
return &edid_cea_modes_1[vic - 1];
if (vic >= 193 && vic < 193 + ARRAY_SIZE(edid_cea_modes_193))
return &edid_cea_modes_193[vic - 193];
return NULL;
}
static u8 cea_num_vics(void)
{
return 193 + ARRAY_SIZE(edid_cea_modes_193);
}
static u8 cea_next_vic(u8 vic)
{
if (++vic == 1 + ARRAY_SIZE(edid_cea_modes_1))
vic = 193;
return vic;
}
/*
* Calculate the alternate clock for the CEA mode
* (60Hz vs. 59.94Hz etc.)
*/
static unsigned int
cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
{
unsigned int clock = cea_mode->clock;
if (drm_mode_vrefresh(cea_mode) % 6 != 0)
return clock;
/*
* edid_cea_modes contains the 59.94Hz
* variant for 240 and 480 line modes,
* and the 60Hz variant otherwise.
*/
if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480)
clock = DIV_ROUND_CLOSEST(clock * 1001, 1000);
else
clock = DIV_ROUND_CLOSEST(clock * 1000, 1001);
return clock;
}
static bool
cea_mode_alternate_timings(u8 vic, struct drm_display_mode *mode)
{
/*
* For certain VICs the spec allows the vertical
* front porch to vary by one or two lines.
*
* cea_modes[] stores the variant with the shortest
* vertical front porch. We can adjust the mode to
* get the other variants by simply increasing the
* vertical front porch length.
*/
BUILD_BUG_ON(cea_mode_for_vic(8)->vtotal != 262 ||
cea_mode_for_vic(9)->vtotal != 262 ||
cea_mode_for_vic(12)->vtotal != 262 ||
cea_mode_for_vic(13)->vtotal != 262 ||
cea_mode_for_vic(23)->vtotal != 312 ||
cea_mode_for_vic(24)->vtotal != 312 ||
cea_mode_for_vic(27)->vtotal != 312 ||
cea_mode_for_vic(28)->vtotal != 312);
if (((vic == 8 || vic == 9 ||
vic == 12 || vic == 13) && mode->vtotal < 263) ||
((vic == 23 || vic == 24 ||
vic == 27 || vic == 28) && mode->vtotal < 314)) {
mode->vsync_start++;
mode->vsync_end++;
mode->vtotal++;
return true;
}
return false;
}
static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match,
unsigned int clock_tolerance)
{
unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS;
u8 vic;
if (!to_match->clock)
return 0;
if (to_match->picture_aspect_ratio)
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
struct drm_display_mode cea_mode;
unsigned int clock1, clock2;
drm_mode_init(&cea_mode, cea_mode_for_vic(vic));
/* Check both 60Hz and 59.94Hz */
clock1 = cea_mode.clock;
clock2 = cea_mode_alternate_clock(&cea_mode);
if (abs(to_match->clock - clock1) > clock_tolerance &&
abs(to_match->clock - clock2) > clock_tolerance)
continue;
do {
if (drm_mode_match(to_match, &cea_mode, match_flags))
return vic;
} while (cea_mode_alternate_timings(vic, &cea_mode));
}
return 0;
}
/**
* drm_match_cea_mode - look for a CEA mode matching given mode
* @to_match: display mode
*
* Return: The CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
* mode.
*/
u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
{
unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS;
u8 vic;
if (!to_match->clock)
return 0;
if (to_match->picture_aspect_ratio)
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
struct drm_display_mode cea_mode;
unsigned int clock1, clock2;
drm_mode_init(&cea_mode, cea_mode_for_vic(vic));
/* Check both 60Hz and 59.94Hz */
clock1 = cea_mode.clock;
clock2 = cea_mode_alternate_clock(&cea_mode);
if (KHZ2PICOS(to_match->clock) != KHZ2PICOS(clock1) &&
KHZ2PICOS(to_match->clock) != KHZ2PICOS(clock2))
continue;
do {
if (drm_mode_match(to_match, &cea_mode, match_flags))
return vic;
} while (cea_mode_alternate_timings(vic, &cea_mode));
}
return 0;
}
EXPORT_SYMBOL(drm_match_cea_mode);
static bool drm_valid_cea_vic(u8 vic)
{
return cea_mode_for_vic(vic) != NULL;
}
static enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
{
const struct drm_display_mode *mode = cea_mode_for_vic(video_code);
if (mode)
return mode->picture_aspect_ratio;
return HDMI_PICTURE_ASPECT_NONE;
}
static enum hdmi_picture_aspect drm_get_hdmi_aspect_ratio(const u8 video_code)
{
return edid_4k_modes[video_code].picture_aspect_ratio;
}
/*
* Calculate the alternate clock for HDMI modes (those from the HDMI vendor
* specific block).
*/
static unsigned int
hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
{
return cea_mode_alternate_clock(hdmi_mode);
}
static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match,
unsigned int clock_tolerance)
{
unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS;
u8 vic;
if (!to_match->clock)
return 0;
if (to_match->picture_aspect_ratio)
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2;
/* Make sure to also match alternate clocks */
clock1 = hdmi_mode->clock;
clock2 = hdmi_mode_alternate_clock(hdmi_mode);
if (abs(to_match->clock - clock1) > clock_tolerance &&
abs(to_match->clock - clock2) > clock_tolerance)
continue;
if (drm_mode_match(to_match, hdmi_mode, match_flags))
return vic;
}
return 0;
}
/*
* drm_match_hdmi_mode - look for a HDMI mode matching given mode
* @to_match: display mode
*
* An HDMI mode is one defined in the HDMI vendor specific block.
*
* Returns the HDMI Video ID (VIC) of the mode or 0 if it isn't one.
*/
static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
{
unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS;
u8 vic;
if (!to_match->clock)
return 0;
if (to_match->picture_aspect_ratio)
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2;
/* Make sure to also match alternate clocks */
clock1 = hdmi_mode->clock;
clock2 = hdmi_mode_alternate_clock(hdmi_mode);
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
drm_mode_match(to_match, hdmi_mode, match_flags))
return vic;
}
return 0;
}
static bool drm_valid_hdmi_vic(u8 vic)
{
return vic > 0 && vic < ARRAY_SIZE(edid_4k_modes);
}
static int add_alternate_cea_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *tmp;
LIST_HEAD(list);
int modes = 0;
/* Don't add CTA modes if the CTA extension block is missing */
if (!drm_edid_has_cta_extension(drm_edid))
return 0;
/*
* Go through all probed modes and create a new mode
* with the alternate clock for certain CEA modes.
*/
list_for_each_entry(mode, &connector->probed_modes, head) {
const struct drm_display_mode *cea_mode = NULL;
struct drm_display_mode *newmode;
u8 vic = drm_match_cea_mode(mode);
unsigned int clock1, clock2;
if (drm_valid_cea_vic(vic)) {
cea_mode = cea_mode_for_vic(vic);
clock2 = cea_mode_alternate_clock(cea_mode);
} else {
vic = drm_match_hdmi_mode(mode);
if (drm_valid_hdmi_vic(vic)) {
cea_mode = &edid_4k_modes[vic];
clock2 = hdmi_mode_alternate_clock(cea_mode);
}
}
if (!cea_mode)
continue;
clock1 = cea_mode->clock;
if (clock1 == clock2)
continue;
if (mode->clock != clock1 && mode->clock != clock2)
continue;
newmode = drm_mode_duplicate(dev, cea_mode);
if (!newmode)
continue;
/* Carry over the stereo flags */
newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
/*
* The current mode could be either variant. Make
* sure to pick the "other" clock for the new mode.
*/
if (mode->clock != clock1)
newmode->clock = clock1;
else
newmode->clock = clock2;
list_add_tail(&newmode->head, &list);
}
list_for_each_entry_safe(mode, tmp, &list, head) {
list_del(&mode->head);
drm_mode_probed_add(connector, mode);
modes++;
}
return modes;
}
static u8 svd_to_vic(u8 svd)
{
/* 0-6 bit vic, 7th bit native mode indicator */
if ((svd >= 1 && svd <= 64) || (svd >= 129 && svd <= 192))
return svd & 127;
return svd;
}
/*
* Return a display mode for the 0-based vic_index'th VIC across all CTA VDBs in
* the EDID, or NULL on errors.
*/
static struct drm_display_mode *
drm_display_mode_from_vic_index(struct drm_connector *connector, int vic_index)
{
const struct drm_display_info *info = &connector->display_info;
struct drm_device *dev = connector->dev;
if (!info->vics || vic_index >= info->vics_len || !info->vics[vic_index])
return NULL;
return drm_display_mode_from_cea_vic(dev, info->vics[vic_index]);
}
/*
* do_y420vdb_modes - Parse YCBCR 420 only modes
* @connector: connector corresponding to the HDMI sink
* @svds: start of the data block of CEA YCBCR 420 VDB
* @len: length of the CEA YCBCR 420 VDB
*
* Parse the CEA-861-F YCBCR 420 Video Data Block (Y420VDB)
* which contains modes which can be supported in YCBCR 420
* output format only.
*/
static int do_y420vdb_modes(struct drm_connector *connector,
const u8 *svds, u8 svds_len)
{
struct drm_device *dev = connector->dev;
int modes = 0, i;
for (i = 0; i < svds_len; i++) {
u8 vic = svd_to_vic(svds[i]);
struct drm_display_mode *newmode;
if (!drm_valid_cea_vic(vic))
continue;
newmode = drm_mode_duplicate(dev, cea_mode_for_vic(vic));
if (!newmode)
break;
drm_mode_probed_add(connector, newmode);
modes++;
}
return modes;
}
/**
* drm_display_mode_from_cea_vic() - return a mode for CEA VIC
* @dev: DRM device
* @video_code: CEA VIC of the mode
*
* Creates a new mode matching the specified CEA VIC.
*
* Returns: A new drm_display_mode on success or NULL on failure
*/
struct drm_display_mode *
drm_display_mode_from_cea_vic(struct drm_device *dev,
u8 video_code)
{
const struct drm_display_mode *cea_mode;
struct drm_display_mode *newmode;
cea_mode = cea_mode_for_vic(video_code);
if (!cea_mode)
return NULL;
newmode = drm_mode_duplicate(dev, cea_mode);
if (!newmode)
return NULL;
return newmode;
}
EXPORT_SYMBOL(drm_display_mode_from_cea_vic);
/* Add modes based on VICs parsed in parse_cta_vdb() */
static int add_cta_vdb_modes(struct drm_connector *connector)
{
const struct drm_display_info *info = &connector->display_info;
int i, modes = 0;
if (!info->vics)
return 0;
for (i = 0; i < info->vics_len; i++) {
struct drm_display_mode *mode;
mode = drm_display_mode_from_vic_index(connector, i);
if (mode) {
drm_mode_probed_add(connector, mode);
modes++;
}
}
return modes;
}
struct stereo_mandatory_mode {
int width, height, vrefresh;
unsigned int flags;
};
static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
{ 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
{ 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
{ 1920, 1080, 50,
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
{ 1920, 1080, 60,
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
{ 1280, 720, 50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
{ 1280, 720, 50, DRM_MODE_FLAG_3D_FRAME_PACKING },
{ 1280, 720, 60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
{ 1280, 720, 60, DRM_MODE_FLAG_3D_FRAME_PACKING }
};
static bool
stereo_match_mandatory(const struct drm_display_mode *mode,
const struct stereo_mandatory_mode *stereo_mode)
{
unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
return mode->hdisplay == stereo_mode->width &&
mode->vdisplay == stereo_mode->height &&
interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
}
static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
const struct drm_display_mode *mode;
struct list_head stereo_modes;
int modes = 0, i;
INIT_LIST_HEAD(&stereo_modes);
list_for_each_entry(mode, &connector->probed_modes, head) {
for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
const struct stereo_mandatory_mode *mandatory;
struct drm_display_mode *new_mode;
if (!stereo_match_mandatory(mode,
&stereo_mandatory_modes[i]))
continue;
mandatory = &stereo_mandatory_modes[i];
new_mode = drm_mode_duplicate(dev, mode);
if (!new_mode)
continue;
new_mode->flags |= mandatory->flags;
list_add_tail(&new_mode->head, &stereo_modes);
modes++;
}
}
list_splice_tail(&stereo_modes, &connector->probed_modes);
return modes;
}
static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *newmode;
if (!drm_valid_hdmi_vic(vic)) {
drm_err(connector->dev, "[CONNECTOR:%d:%s] Unknown HDMI VIC: %d\n",
connector->base.id, connector->name, vic);
return 0;
}
newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
if (!newmode)
return 0;
drm_mode_probed_add(connector, newmode);
return 1;
}
static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
int vic_index)
{
struct drm_display_mode *newmode;
int modes = 0;
if (structure & (1 << 0)) {
newmode = drm_display_mode_from_vic_index(connector, vic_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
drm_mode_probed_add(connector, newmode);
modes++;
}
}
if (structure & (1 << 6)) {
newmode = drm_display_mode_from_vic_index(connector, vic_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
drm_mode_probed_add(connector, newmode);
modes++;
}
}
if (structure & (1 << 8)) {
newmode = drm_display_mode_from_vic_index(connector, vic_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
drm_mode_probed_add(connector, newmode);
modes++;
}
}
return modes;
}
static bool hdmi_vsdb_latency_present(const u8 *db)
{
return db[8] & BIT(7);
}
static bool hdmi_vsdb_i_latency_present(const u8 *db)
{
return hdmi_vsdb_latency_present(db) && db[8] & BIT(6);
}
static int hdmi_vsdb_latency_length(const u8 *db)
{
if (hdmi_vsdb_i_latency_present(db))
return 4;
else if (hdmi_vsdb_latency_present(db))
return 2;
else
return 0;
}
/*
* do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
* @connector: connector corresponding to the HDMI sink
* @db: start of the CEA vendor specific block
* @len: length of the CEA block payload, ie. one can access up to db[len]
*
* Parses the HDMI VSDB looking for modes to add to @connector. This function
* also adds the stereo 3d modes when applicable.
*/
static int
do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
{
int modes = 0, offset = 0, i, multi_present = 0, multi_len;
u8 vic_len, hdmi_3d_len = 0;
u16 mask;
u16 structure_all;
if (len < 8)
goto out;
/* no HDMI_Video_Present */
if (!(db[8] & (1 << 5)))
goto out;
offset += hdmi_vsdb_latency_length(db);
/* the declared length is not long enough for the 2 first bytes
* of additional video format capabilities */
if (len < (8 + offset + 2))
goto out;
/* 3D_Present */
offset++;
if (db[8 + offset] & (1 << 7)) {
modes += add_hdmi_mandatory_stereo_modes(connector);
/* 3D_Multi_present */
multi_present = (db[8 + offset] & 0x60) >> 5;
}
offset++;
vic_len = db[8 + offset] >> 5;
hdmi_3d_len = db[8 + offset] & 0x1f;
for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
u8 vic;
vic = db[9 + offset + i];
modes += add_hdmi_mode(connector, vic);
}
offset += 1 + vic_len;
if (multi_present == 1)
multi_len = 2;
else if (multi_present == 2)
multi_len = 4;
else
multi_len = 0;
if (len < (8 + offset + hdmi_3d_len - 1))
goto out;
if (hdmi_3d_len < multi_len)
goto out;
if (multi_present == 1 || multi_present == 2) {
/* 3D_Structure_ALL */
structure_all = (db[8 + offset] << 8) | db[9 + offset];
/* check if 3D_MASK is present */
if (multi_present == 2)
mask = (db[10 + offset] << 8) | db[11 + offset];
else
mask = 0xffff;
for (i = 0; i < 16; i++) {
if (mask & (1 << i))
modes += add_3d_struct_modes(connector,
structure_all, i);
}
}
offset += multi_len;
for (i = 0; i < (hdmi_3d_len - multi_len); i++) {
int vic_index;
struct drm_display_mode *newmode = NULL;
unsigned int newflag = 0;
bool detail_present;
detail_present = ((db[8 + offset + i] & 0x0f) > 7);
if (detail_present && (i + 1 == hdmi_3d_len - multi_len))
break;
/* 2D_VIC_order_X */
vic_index = db[8 + offset + i] >> 4;
/* 3D_Structure_X */
switch (db[8 + offset + i] & 0x0f) {
case 0:
newflag = DRM_MODE_FLAG_3D_FRAME_PACKING;
break;
case 6:
newflag = DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
break;
case 8:
/* 3D_Detail_X */
if ((db[9 + offset + i] >> 4) == 1)
newflag = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
break;
}
if (newflag != 0) {
newmode = drm_display_mode_from_vic_index(connector,
vic_index);
if (newmode) {
newmode->flags |= newflag;
drm_mode_probed_add(connector, newmode);
modes++;
}
}
if (detail_present)
i++;
}
out:
return modes;
}
static int
cea_revision(const u8 *cea)
{
/*
* FIXME is this correct for the DispID variant?
* The DispID spec doesn't really specify whether
* this is the revision of the CEA extension or
* the DispID CEA data block. And the only value
* given as an example is 0.
*/
return cea[1];
}
/*
* CTA Data Block iterator.
*
* Iterate through all CTA Data Blocks in both EDID CTA Extensions and DisplayID
* CTA Data Blocks.
*
* struct cea_db *db:
* struct cea_db_iter iter;
*
* cea_db_iter_edid_begin(edid, &iter);
* cea_db_iter_for_each(db, &iter) {
* // do stuff with db
* }
* cea_db_iter_end(&iter);
*/
struct cea_db_iter {
struct drm_edid_iter edid_iter;
struct displayid_iter displayid_iter;
/* Current Data Block Collection. */
const u8 *collection;
/* Current Data Block index in current collection. */
int index;
/* End index in current collection. */
int end;
};
/* CTA-861-H section 7.4 CTA Data BLock Collection */
struct cea_db {
u8 tag_length;
u8 data[];
} __packed;
static int cea_db_tag(const struct cea_db *db)
{
return db->tag_length >> 5;
}
static int cea_db_payload_len(const void *_db)
{
/* FIXME: Transition to passing struct cea_db * everywhere. */
const struct cea_db *db = _db;
return db->tag_length & 0x1f;
}
static const void *cea_db_data(const struct cea_db *db)
{
return db->data;
}
static bool cea_db_is_extended_tag(const struct cea_db *db, int tag)
{
return cea_db_tag(db) == CTA_DB_EXTENDED_TAG &&
cea_db_payload_len(db) >= 1 &&
db->data[0] == tag;
}
static bool cea_db_is_vendor(const struct cea_db *db, int vendor_oui)
{
const u8 *data = cea_db_data(db);
return cea_db_tag(db) == CTA_DB_VENDOR &&
cea_db_payload_len(db) >= 3 &&
oui(data[2], data[1], data[0]) == vendor_oui;
}
static void cea_db_iter_edid_begin(const struct drm_edid *drm_edid,
struct cea_db_iter *iter)
{
memset(iter, 0, sizeof(*iter));
drm_edid_iter_begin(drm_edid, &iter->edid_iter);
displayid_iter_edid_begin(drm_edid, &iter->displayid_iter);
}
static const struct cea_db *
__cea_db_iter_current_block(const struct cea_db_iter *iter)
{
const struct cea_db *db;
if (!iter->collection)
return NULL;
db = (const struct cea_db *)&iter->collection[iter->index];
if (iter->index + sizeof(*db) <= iter->end &&
iter->index + sizeof(*db) + cea_db_payload_len(db) <= iter->end)
return db;
return NULL;
}
/*
* References:
* - CTA-861-H section 7.3.3 CTA Extension Version 3
*/
static int cea_db_collection_size(const u8 *cta)
{
u8 d = cta[2];
if (d < 4 || d > 127)
return 0;
return d - 4;
}
/*
* References:
* - VESA E-EDID v1.4
* - CTA-861-H section 7.3.3 CTA Extension Version 3
*/
static const void *__cea_db_iter_edid_next(struct cea_db_iter *iter)
{
const u8 *ext;
drm_edid_iter_for_each(ext, &iter->edid_iter) {
int size;
/* Only support CTA Extension revision 3+ */
if (ext[0] != CEA_EXT || cea_revision(ext) < 3)
continue;
size = cea_db_collection_size(ext);
if (!size)
continue;
iter->index = 4;
iter->end = iter->index + size;
return ext;
}
return NULL;
}
/*
* References:
* - DisplayID v1.3 Appendix C: CEA Data Block within a DisplayID Data Block
* - DisplayID v2.0 section 4.10 CTA DisplayID Data Block
*
* Note that the above do not specify any connection between DisplayID Data
* Block revision and CTA Extension versions.
*/
static const void *__cea_db_iter_displayid_next(struct cea_db_iter *iter)
{
const struct displayid_block *block;
displayid_iter_for_each(block, &iter->displayid_iter) {
if (block->tag != DATA_BLOCK_CTA)
continue;
/*
* The displayid iterator has already verified the block bounds
* in displayid_iter_block().
*/
iter->index = sizeof(*block);
iter->end = iter->index + block->num_bytes;
return block;
}
return NULL;
}
static const struct cea_db *__cea_db_iter_next(struct cea_db_iter *iter)
{
const struct cea_db *db;
if (iter->collection) {
/* Current collection should always be valid. */
db = __cea_db_iter_current_block(iter);
if (WARN_ON(!db)) {
iter->collection = NULL;
return NULL;
}
/* Next block in CTA Data Block Collection */
iter->index += sizeof(*db) + cea_db_payload_len(db);
db = __cea_db_iter_current_block(iter);
if (db)
return db;
}
for (;;) {
/*
* Find the next CTA Data Block Collection. First iterate all
* the EDID CTA Extensions, then all the DisplayID CTA blocks.
*
* Per DisplayID v1.3 Appendix B: DisplayID as an EDID
* Extension, it's recommended that DisplayID extensions are
* exposed after all of the CTA Extensions.
*/
iter->collection = __cea_db_iter_edid_next(iter);
if (!iter->collection)
iter->collection = __cea_db_iter_displayid_next(iter);
if (!iter->collection)
return NULL;
db = __cea_db_iter_current_block(iter);
if (db)
return db;
}
}
#define cea_db_iter_for_each(__db, __iter) \
while (((__db) = __cea_db_iter_next(__iter)))
static void cea_db_iter_end(struct cea_db_iter *iter)
{
displayid_iter_end(&iter->displayid_iter);
drm_edid_iter_end(&iter->edid_iter);
memset(iter, 0, sizeof(*iter));
}
static bool cea_db_is_hdmi_vsdb(const struct cea_db *db)
{
return cea_db_is_vendor(db, HDMI_IEEE_OUI) &&
cea_db_payload_len(db) >= 5;
}
static bool cea_db_is_hdmi_forum_vsdb(const struct cea_db *db)
{
return cea_db_is_vendor(db, HDMI_FORUM_IEEE_OUI) &&
cea_db_payload_len(db) >= 7;
}
static bool cea_db_is_hdmi_forum_eeodb(const void *db)
{
return cea_db_is_extended_tag(db, CTA_EXT_DB_HF_EEODB) &&
cea_db_payload_len(db) >= 2;
}
static bool cea_db_is_microsoft_vsdb(const struct cea_db *db)
{
return cea_db_is_vendor(db, MICROSOFT_IEEE_OUI) &&
cea_db_payload_len(db) == 21;
}
static bool cea_db_is_vcdb(const struct cea_db *db)
{
return cea_db_is_extended_tag(db, CTA_EXT_DB_VIDEO_CAP) &&
cea_db_payload_len(db) == 2;
}
static bool cea_db_is_hdmi_forum_scdb(const struct cea_db *db)
{
return cea_db_is_extended_tag(db, CTA_EXT_DB_HF_SCDB) &&
cea_db_payload_len(db) >= 7;
}
static bool cea_db_is_y420cmdb(const struct cea_db *db)
{
return cea_db_is_extended_tag(db, CTA_EXT_DB_420_VIDEO_CAP_MAP);
}
static bool cea_db_is_y420vdb(const struct cea_db *db)
{
return cea_db_is_extended_tag(db, CTA_EXT_DB_420_VIDEO_DATA);
}
static bool cea_db_is_hdmi_hdr_metadata_block(const struct cea_db *db)
{
return cea_db_is_extended_tag(db, CTA_EXT_DB_HDR_STATIC_METADATA) &&
cea_db_payload_len(db) >= 3;
}
/*
* Get the HF-EEODB override extension block count from EDID.
*
* The passed in EDID may be partially read, as long as it has at least two
* blocks (base block and one extension block) if EDID extension count is > 0.
*
* Note that this is *not* how you should parse CTA Data Blocks in general; this
* is only to handle partially read EDIDs. Normally, use the CTA Data Block
* iterators instead.
*
* References:
* - HDMI 2.1 section 10.3.6 HDMI Forum EDID Extension Override Data Block
*/
static int edid_hfeeodb_extension_block_count(const struct edid *edid)
{
const u8 *cta;
/* No extensions according to base block, no HF-EEODB. */
if (!edid_extension_block_count(edid))
return 0;
/* HF-EEODB is always in the first EDID extension block only */
cta = edid_extension_block_data(edid, 0);
if (edid_block_tag(cta) != CEA_EXT || cea_revision(cta) < 3)
return 0;
/* Need to have the data block collection, and at least 3 bytes. */
if (cea_db_collection_size(cta) < 3)
return 0;
/*
* Sinks that include the HF-EEODB in their E-EDID shall include one and
* only one instance of the HF-EEODB in the E-EDID, occupying bytes 4
* through 6 of Block 1 of the E-EDID.
*/
if (!cea_db_is_hdmi_forum_eeodb(&cta[4]))
return 0;
return cta[4 + 2];
}
/*
* CTA-861 YCbCr 4:2:0 Capability Map Data Block (CTA Y420CMDB)
*
* Y420CMDB contains a bitmap which gives the index of CTA modes from CTA VDB,
* which can support YCBCR 420 sampling output also (apart from RGB/YCBCR444
* etc). For example, if the bit 0 in bitmap is set, first mode in VDB can
* support YCBCR420 output too.
*/
static void parse_cta_y420cmdb(struct drm_connector *connector,
const struct cea_db *db, u64 *y420cmdb_map)
{
struct drm_display_info *info = &connector->display_info;
int i, map_len = cea_db_payload_len(db) - 1;
const u8 *data = cea_db_data(db) + 1;
u64 map = 0;
if (map_len == 0) {
/* All CEA modes support ycbcr420 sampling also.*/
map = U64_MAX;
goto out;
}
/*
* This map indicates which of the existing CEA block modes
* from VDB can support YCBCR420 output too. So if bit=0 is
* set, first mode from VDB can support YCBCR420 output too.
* We will parse and keep this map, before parsing VDB itself
* to avoid going through the same block again and again.
*
* Spec is not clear about max possible size of this block.
* Clamping max bitmap block size at 8 bytes. Every byte can
* address 8 CEA modes, in this way this map can address
* 8*8 = first 64 SVDs.
*/
if (WARN_ON_ONCE(map_len > 8))
map_len = 8;
for (i = 0; i < map_len; i++)
map |= (u64)data[i] << (8 * i);
out:
if (map)
info->color_formats |= DRM_COLOR_FORMAT_YCBCR420;
*y420cmdb_map = map;
}
static int add_cea_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
const struct cea_db *db;
struct cea_db_iter iter;
int modes;
/* CTA VDB block VICs parsed earlier */
modes = add_cta_vdb_modes(connector);
cea_db_iter_edid_begin(drm_edid, &iter);
cea_db_iter_for_each(db, &iter) {
if (cea_db_is_hdmi_vsdb(db)) {
modes += do_hdmi_vsdb_modes(connector, (const u8 *)db,
cea_db_payload_len(db));
} else if (cea_db_is_y420vdb(db)) {
const u8 *vdb420 = cea_db_data(db) + 1;
/* Add 4:2:0(only) modes present in EDID */
modes += do_y420vdb_modes(connector, vdb420,
cea_db_payload_len(db) - 1);
}
}
cea_db_iter_end(&iter);
return modes;
}
static void fixup_detailed_cea_mode_clock(struct drm_connector *connector,
struct drm_display_mode *mode)
{
const struct drm_display_mode *cea_mode;
int clock1, clock2, clock;
u8 vic;
const char *type;
/*
* allow 5kHz clock difference either way to account for
* the 10kHz clock resolution limit of detailed timings.
*/
vic = drm_match_cea_mode_clock_tolerance(mode, 5);
if (drm_valid_cea_vic(vic)) {
type = "CEA";
cea_mode = cea_mode_for_vic(vic);
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
} else {
vic = drm_match_hdmi_mode_clock_tolerance(mode, 5);
if (drm_valid_hdmi_vic(vic)) {
type = "HDMI";
cea_mode = &edid_4k_modes[vic];
clock1 = cea_mode->clock;
clock2 = hdmi_mode_alternate_clock(cea_mode);
} else {
return;
}
}
/* pick whichever is closest */
if (abs(mode->clock - clock1) < abs(mode->clock - clock2))
clock = clock1;
else
clock = clock2;
if (mode->clock == clock)
return;
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] detailed mode matches %s VIC %d, adjusting clock %d -> %d\n",
connector->base.id, connector->name,
type, vic, mode->clock, clock);
mode->clock = clock;
}
static void drm_calculate_luminance_range(struct drm_connector *connector)
{
struct hdr_static_metadata *hdr_metadata = &connector->hdr_sink_metadata.hdmi_type1;
struct drm_luminance_range_info *luminance_range =
&connector->display_info.luminance_range;
static const u8 pre_computed_values[] = {
50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98
};
u32 max_avg, min_cll, max, min, q, r;
if (!(hdr_metadata->metadata_type & BIT(HDMI_STATIC_METADATA_TYPE1)))
return;
max_avg = hdr_metadata->max_fall;
min_cll = hdr_metadata->min_cll;
/*
* From the specification (CTA-861-G), for calculating the maximum
* luminance we need to use:
* Luminance = 50*2**(CV/32)
* Where CV is a one-byte value.
* For calculating this expression we may need float point precision;
* to avoid this complexity level, we take advantage that CV is divided
* by a constant. From the Euclids division algorithm, we know that CV
* can be written as: CV = 32*q + r. Next, we replace CV in the
* Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
* need to pre-compute the value of r/32. For pre-computing the values
* We just used the following Ruby line:
* (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
* The results of the above expressions can be verified at
* pre_computed_values.
*/
q = max_avg >> 5;
r = max_avg % 32;
max = (1 << q) * pre_computed_values[r];
/* min luminance: maxLum * (CV/255)^2 / 100 */
q = DIV_ROUND_CLOSEST(min_cll, 255);
min = max * DIV_ROUND_CLOSEST((q * q), 100);
luminance_range->min_luminance = min;
luminance_range->max_luminance = max;
}
static uint8_t eotf_supported(const u8 *edid_ext)
{
return edid_ext[2] &
(BIT(HDMI_EOTF_TRADITIONAL_GAMMA_SDR) |
BIT(HDMI_EOTF_TRADITIONAL_GAMMA_HDR) |
BIT(HDMI_EOTF_SMPTE_ST2084) |
BIT(HDMI_EOTF_BT_2100_HLG));
}
static uint8_t hdr_metadata_type(const u8 *edid_ext)
{
return edid_ext[3] &
BIT(HDMI_STATIC_METADATA_TYPE1);
}
static void
drm_parse_hdr_metadata_block(struct drm_connector *connector, const u8 *db)
{
u16 len;
len = cea_db_payload_len(db);
connector->hdr_sink_metadata.hdmi_type1.eotf =
eotf_supported(db);
connector->hdr_sink_metadata.hdmi_type1.metadata_type =
hdr_metadata_type(db);
if (len >= 4)
connector->hdr_sink_metadata.hdmi_type1.max_cll = db[4];
if (len >= 5)
connector->hdr_sink_metadata.hdmi_type1.max_fall = db[5];
if (len >= 6) {
connector->hdr_sink_metadata.hdmi_type1.min_cll = db[6];
/* Calculate only when all values are available */
drm_calculate_luminance_range(connector);
}
}
/* HDMI Vendor-Specific Data Block (HDMI VSDB, H14b-VSDB) */
static void
drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
{
u8 len = cea_db_payload_len(db);
if (len >= 6 && (db[6] & (1 << 7)))
connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_SUPPORTS_AI;
if (len >= 10 && hdmi_vsdb_latency_present(db)) {
connector->latency_present[0] = true;
connector->video_latency[0] = db[9];
connector->audio_latency[0] = db[10];
}
if (len >= 12 && hdmi_vsdb_i_latency_present(db)) {
connector->latency_present[1] = true;
connector->video_latency[1] = db[11];
connector->audio_latency[1] = db[12];
}
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] HDMI: latency present %d %d, video latency %d %d, audio latency %d %d\n",
connector->base.id, connector->name,
connector->latency_present[0], connector->latency_present[1],
connector->video_latency[0], connector->video_latency[1],
connector->audio_latency[0], connector->audio_latency[1]);
}
static void
monitor_name(const struct detailed_timing *timing, void *data)
{
const char **res = data;
if (!is_display_descriptor(timing, EDID_DETAIL_MONITOR_NAME))
return;
*res = timing->data.other_data.data.str.str;
}
static int get_monitor_name(const struct drm_edid *drm_edid, char name[13])
{
const char *edid_name = NULL;
int mnl;
if (!drm_edid || !name)
return 0;
drm_for_each_detailed_block(drm_edid, monitor_name, &edid_name);
for (mnl = 0; edid_name && mnl < 13; mnl++) {
if (edid_name[mnl] == 0x0a)
break;
name[mnl] = edid_name[mnl];
}
return mnl;
}
/**
* drm_edid_get_monitor_name - fetch the monitor name from the edid
* @edid: monitor EDID information
* @name: pointer to a character array to hold the name of the monitor
* @bufsize: The size of the name buffer (should be at least 14 chars.)
*
*/
void drm_edid_get_monitor_name(const struct edid *edid, char *name, int bufsize)
{
int name_length = 0;
if (bufsize <= 0)
return;
if (edid) {
char buf[13];
struct drm_edid drm_edid = {
.edid = edid,
.size = edid_size(edid),
};
name_length = min(get_monitor_name(&drm_edid, buf), bufsize - 1);
memcpy(name, buf, name_length);
}
name[name_length] = '\0';
}
EXPORT_SYMBOL(drm_edid_get_monitor_name);
static void clear_eld(struct drm_connector *connector)
{
memset(connector->eld, 0, sizeof(connector->eld));
connector->latency_present[0] = false;
connector->latency_present[1] = false;
connector->video_latency[0] = 0;
connector->audio_latency[0] = 0;
connector->video_latency[1] = 0;
connector->audio_latency[1] = 0;
}
/*
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
* @drm_edid: EDID to parse
*
* Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
* HDCP and Port_ID ELD fields are left for the graphics driver to fill in.
*/
static void drm_edid_to_eld(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
const struct drm_display_info *info = &connector->display_info;
const struct cea_db *db;
struct cea_db_iter iter;
uint8_t *eld = connector->eld;
int total_sad_count = 0;
int mnl;
if (!drm_edid)
return;
mnl = get_monitor_name(drm_edid, &eld[DRM_ELD_MONITOR_NAME_STRING]);
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD monitor %s\n",
connector->base.id, connector->name,
&eld[DRM_ELD_MONITOR_NAME_STRING]);
eld[DRM_ELD_CEA_EDID_VER_MNL] = info->cea_rev << DRM_ELD_CEA_EDID_VER_SHIFT;
eld[DRM_ELD_CEA_EDID_VER_MNL] |= mnl;
eld[DRM_ELD_VER] = DRM_ELD_VER_CEA861D;
eld[DRM_ELD_MANUFACTURER_NAME0] = drm_edid->edid->mfg_id[0];
eld[DRM_ELD_MANUFACTURER_NAME1] = drm_edid->edid->mfg_id[1];
eld[DRM_ELD_PRODUCT_CODE0] = drm_edid->edid->prod_code[0];
eld[DRM_ELD_PRODUCT_CODE1] = drm_edid->edid->prod_code[1];
cea_db_iter_edid_begin(drm_edid, &iter);
cea_db_iter_for_each(db, &iter) {
const u8 *data = cea_db_data(db);
int len = cea_db_payload_len(db);
int sad_count;
switch (cea_db_tag(db)) {
case CTA_DB_AUDIO:
/* Audio Data Block, contains SADs */
sad_count = min(len / 3, 15 - total_sad_count);
if (sad_count >= 1)
memcpy(&eld[DRM_ELD_CEA_SAD(mnl, total_sad_count)],
data, sad_count * 3);
total_sad_count += sad_count;
break;
case CTA_DB_SPEAKER:
/* Speaker Allocation Data Block */
if (len >= 1)
eld[DRM_ELD_SPEAKER] = data[0];
break;
case CTA_DB_VENDOR:
/* HDMI Vendor-Specific Data Block */
if (cea_db_is_hdmi_vsdb(db))
drm_parse_hdmi_vsdb_audio(connector, (const u8 *)db);
break;
default:
break;
}
}
cea_db_iter_end(&iter);
eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= total_sad_count << DRM_ELD_SAD_COUNT_SHIFT;
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_DP;
else
eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_HDMI;
eld[DRM_ELD_BASELINE_ELD_LEN] =
DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD size %d, SAD count %d\n",
connector->base.id, connector->name,
drm_eld_size(eld), total_sad_count);
}
static int _drm_edid_to_sad(const struct drm_edid *drm_edid,
struct cea_sad **sads)
{
const struct cea_db *db;
struct cea_db_iter iter;
int count = 0;
cea_db_iter_edid_begin(drm_edid, &iter);
cea_db_iter_for_each(db, &iter) {
if (cea_db_tag(db) == CTA_DB_AUDIO) {
int j;
count = cea_db_payload_len(db) / 3; /* SAD is 3B */
*sads = kcalloc(count, sizeof(**sads), GFP_KERNEL);
if (!*sads)
return -ENOMEM;
for (j = 0; j < count; j++) {
const u8 *sad = &db->data[j * 3];
(*sads)[j].format = (sad[0] & 0x78) >> 3;
(*sads)[j].channels = sad[0] & 0x7;
(*sads)[j].freq = sad[1] & 0x7F;
(*sads)[j].byte2 = sad[2];
}
break;
}
}
cea_db_iter_end(&iter);
DRM_DEBUG_KMS("Found %d Short Audio Descriptors\n", count);
return count;
}
/**
* drm_edid_to_sad - extracts SADs from EDID
* @edid: EDID to parse
* @sads: pointer that will be set to the extracted SADs
*
* Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
*
* Note: The returned pointer needs to be freed using kfree().
*
* Return: The number of found SADs or negative number on error.
*/
int drm_edid_to_sad(const struct edid *edid, struct cea_sad **sads)
{
struct drm_edid drm_edid;
return _drm_edid_to_sad(drm_edid_legacy_init(&drm_edid, edid), sads);
}
EXPORT_SYMBOL(drm_edid_to_sad);
static int _drm_edid_to_speaker_allocation(const struct drm_edid *drm_edid,
u8 **sadb)
{
const struct cea_db *db;
struct cea_db_iter iter;
int count = 0;
cea_db_iter_edid_begin(drm_edid, &iter);
cea_db_iter_for_each(db, &iter) {
if (cea_db_tag(db) == CTA_DB_SPEAKER &&
cea_db_payload_len(db) == 3) {
*sadb = kmemdup(db->data, cea_db_payload_len(db),
GFP_KERNEL);
if (!*sadb)
return -ENOMEM;
count = cea_db_payload_len(db);
break;
}
}
cea_db_iter_end(&iter);
DRM_DEBUG_KMS("Found %d Speaker Allocation Data Blocks\n", count);
return count;
}
/**
* drm_edid_to_speaker_allocation - extracts Speaker Allocation Data Blocks from EDID
* @edid: EDID to parse
* @sadb: pointer to the speaker block
*
* Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
*
* Note: The returned pointer needs to be freed using kfree().
*
* Return: The number of found Speaker Allocation Blocks or negative number on
* error.
*/
int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb)
{
struct drm_edid drm_edid;
return _drm_edid_to_speaker_allocation(drm_edid_legacy_init(&drm_edid, edid),
sadb);
}
EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
/**
* drm_av_sync_delay - compute the HDMI/DP sink audio-video sync delay
* @connector: connector associated with the HDMI/DP sink
* @mode: the display mode
*
* Return: The HDMI/DP sink's audio-video sync delay in milliseconds or 0 if
* the sink doesn't support audio or video.
*/
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
int a, v;
if (!connector->latency_present[0])
return 0;
if (!connector->latency_present[1])
i = 0;
a = connector->audio_latency[i];
v = connector->video_latency[i];
/*
* HDMI/DP sink doesn't support audio or video?
*/
if (a == 255 || v == 255)
return 0;
/*
* Convert raw EDID values to millisecond.
* Treat unknown latency as 0ms.
*/
if (a)
a = min(2 * (a - 1), 500);
if (v)
v = min(2 * (v - 1), 500);
return max(v - a, 0);
}
EXPORT_SYMBOL(drm_av_sync_delay);
static bool _drm_detect_hdmi_monitor(const struct drm_edid *drm_edid)
{
const struct cea_db *db;
struct cea_db_iter iter;
bool hdmi = false;
/*
* Because HDMI identifier is in Vendor Specific Block,
* search it from all data blocks of CEA extension.
*/
cea_db_iter_edid_begin(drm_edid, &iter);
cea_db_iter_for_each(db, &iter) {
if (cea_db_is_hdmi_vsdb(db)) {
hdmi = true;
break;
}
}
cea_db_iter_end(&iter);
return hdmi;
}
/**
* drm_detect_hdmi_monitor - detect whether monitor is HDMI
* @edid: monitor EDID information
*
* Parse the CEA extension according to CEA-861-B.
*
* Drivers that have added the modes parsed from EDID to drm_display_info
* should use &drm_display_info.is_hdmi instead of calling this function.
*
* Return: True if the monitor is HDMI, false if not or unknown.
*/
bool drm_detect_hdmi_monitor(const struct edid *edid)
{
struct drm_edid drm_edid;
return _drm_detect_hdmi_monitor(drm_edid_legacy_init(&drm_edid, edid));
}
EXPORT_SYMBOL(drm_detect_hdmi_monitor);
static bool _drm_detect_monitor_audio(const struct drm_edid *drm_edid)
{
struct drm_edid_iter edid_iter;
const struct cea_db *db;
struct cea_db_iter iter;
const u8 *edid_ext;
bool has_audio = false;
drm_edid_iter_begin(drm_edid, &edid_iter);
drm_edid_iter_for_each(edid_ext, &edid_iter) {
if (edid_ext[0] == CEA_EXT) {
has_audio = edid_ext[3] & EDID_BASIC_AUDIO;
if (has_audio)
break;
}
}
drm_edid_iter_end(&edid_iter);
if (has_audio) {
DRM_DEBUG_KMS("Monitor has basic audio support\n");
goto end;
}
cea_db_iter_edid_begin(drm_edid, &iter);
cea_db_iter_for_each(db, &iter) {
if (cea_db_tag(db) == CTA_DB_AUDIO) {
const u8 *data = cea_db_data(db);
int i;
for (i = 0; i < cea_db_payload_len(db); i += 3)
DRM_DEBUG_KMS("CEA audio format %d\n",
(data[i] >> 3) & 0xf);
has_audio = true;
break;
}
}
cea_db_iter_end(&iter);
end:
return has_audio;
}
/**
* drm_detect_monitor_audio - check monitor audio capability
* @edid: EDID block to scan
*
* Monitor should have CEA extension block.
* If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
* audio' only. If there is any audio extension block and supported
* audio format, assume at least 'basic audio' support, even if 'basic
* audio' is not defined in EDID.
*
* Return: True if the monitor supports audio, false otherwise.
*/
bool drm_detect_monitor_audio(const struct edid *edid)
{
struct drm_edid drm_edid;
return _drm_detect_monitor_audio(drm_edid_legacy_init(&drm_edid, edid));
}
EXPORT_SYMBOL(drm_detect_monitor_audio);
/**
* drm_default_rgb_quant_range - default RGB quantization range
* @mode: display mode
*
* Determine the default RGB quantization range for the mode,
* as specified in CEA-861.
*
* Return: The default RGB quantization range for the mode
*/
enum hdmi_quantization_range
drm_default_rgb_quant_range(const struct drm_display_mode *mode)
{
/* All CEA modes other than VIC 1 use limited quantization range. */
return drm_match_cea_mode(mode) > 1 ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL;
}
EXPORT_SYMBOL(drm_default_rgb_quant_range);
/* CTA-861 Video Data Block (CTA VDB) */
static void parse_cta_vdb(struct drm_connector *connector, const struct cea_db *db)
{
struct drm_display_info *info = &connector->display_info;
int i, vic_index, len = cea_db_payload_len(db);
const u8 *svds = cea_db_data(db);
u8 *vics;
if (!len)
return;
/* Gracefully handle multiple VDBs, however unlikely that is */
vics = krealloc(info->vics, info->vics_len + len, GFP_KERNEL);
if (!vics)
return;
vic_index = info->vics_len;
info->vics_len += len;
info->vics = vics;
for (i = 0; i < len; i++) {
u8 vic = svd_to_vic(svds[i]);
if (!drm_valid_cea_vic(vic))
vic = 0;
info->vics[vic_index++] = vic;
}
}
/*
* Update y420_cmdb_modes based on previously parsed CTA VDB and Y420CMDB.
*
* Translate the y420cmdb_map based on VIC indexes to y420_cmdb_modes indexed
* using the VICs themselves.
*/
static void update_cta_y420cmdb(struct drm_connector *connector, u64 y420cmdb_map)
{
struct drm_display_info *info = &connector->display_info;
struct drm_hdmi_info *hdmi = &info->hdmi;
int i, len = min_t(int, info->vics_len, BITS_PER_TYPE(y420cmdb_map));
for (i = 0; i < len; i++) {
u8 vic = info->vics[i];
if (vic && y420cmdb_map & BIT_ULL(i))
bitmap_set(hdmi->y420_cmdb_modes, vic, 1);
}
}
static bool cta_vdb_has_vic(const struct drm_connector *connector, u8 vic)
{
const struct drm_display_info *info = &connector->display_info;
int i;
if (!vic || !info->vics)
return false;
for (i = 0; i < info->vics_len; i++) {
if (info->vics[i] == vic)
return true;
}
return false;
}
/* CTA-861-H YCbCr 4:2:0 Video Data Block (CTA Y420VDB) */
static void parse_cta_y420vdb(struct drm_connector *connector,
const struct cea_db *db)
{
struct drm_display_info *info = &connector->display_info;
struct drm_hdmi_info *hdmi = &info->hdmi;
const u8 *svds = cea_db_data(db) + 1;
int i;
for (i = 0; i < cea_db_payload_len(db) - 1; i++) {
u8 vic = svd_to_vic(svds[i]);
if (!drm_valid_cea_vic(vic))
continue;
bitmap_set(hdmi->y420_vdb_modes, vic, 1);
info->color_formats |= DRM_COLOR_FORMAT_YCBCR420;
}
}
static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
{
struct drm_display_info *info = &connector->display_info;
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] CEA VCDB 0x%02x\n",
connector->base.id, connector->name, db[2]);
if (db[2] & EDID_CEA_VCDB_QS)
info->rgb_quant_range_selectable = true;
}
static
void drm_get_max_frl_rate(int max_frl_rate, u8 *max_lanes, u8 *max_rate_per_lane)
{
switch (max_frl_rate) {
case 1:
*max_lanes = 3;
*max_rate_per_lane = 3;
break;
case 2:
*max_lanes = 3;
*max_rate_per_lane = 6;
break;
case 3:
*max_lanes = 4;
*max_rate_per_lane = 6;
break;
case 4:
*max_lanes = 4;
*max_rate_per_lane = 8;
break;
case 5:
*max_lanes = 4;
*max_rate_per_lane = 10;
break;
case 6:
*max_lanes = 4;
*max_rate_per_lane = 12;
break;
case 0:
default:
*max_lanes = 0;
*max_rate_per_lane = 0;
}
}
static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
const u8 *db)
{
u8 dc_mask;
struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
dc_mask = db[7] & DRM_EDID_YCBCR420_DC_MASK;
hdmi->y420_dc_modes = dc_mask;
}
static void drm_parse_dsc_info(struct drm_hdmi_dsc_cap *hdmi_dsc,
const u8 *hf_scds)
{
hdmi_dsc->v_1p2 = hf_scds[11] & DRM_EDID_DSC_1P2;
if (!hdmi_dsc->v_1p2)
return;
hdmi_dsc->native_420 = hf_scds[11] & DRM_EDID_DSC_NATIVE_420;
hdmi_dsc->all_bpp = hf_scds[11] & DRM_EDID_DSC_ALL_BPP;
if (hf_scds[11] & DRM_EDID_DSC_16BPC)
hdmi_dsc->bpc_supported = 16;
else if (hf_scds[11] & DRM_EDID_DSC_12BPC)
hdmi_dsc->bpc_supported = 12;
else if (hf_scds[11] & DRM_EDID_DSC_10BPC)
hdmi_dsc->bpc_supported = 10;
else
/* Supports min 8 BPC if DSC 1.2 is supported*/
hdmi_dsc->bpc_supported = 8;
if (cea_db_payload_len(hf_scds) >= 12 && hf_scds[12]) {
u8 dsc_max_slices;
u8 dsc_max_frl_rate;
dsc_max_frl_rate = (hf_scds[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes,
&hdmi_dsc->max_frl_rate_per_lane);
dsc_max_slices = hf_scds[12] & DRM_EDID_DSC_MAX_SLICES;
switch (dsc_max_slices) {
case 1:
hdmi_dsc->max_slices = 1;
hdmi_dsc->clk_per_slice = 340;
break;
case 2:
hdmi_dsc->max_slices = 2;
hdmi_dsc->clk_per_slice = 340;
break;
case 3:
hdmi_dsc->max_slices = 4;
hdmi_dsc->clk_per_slice = 340;
break;
case 4:
hdmi_dsc->max_slices = 8;
hdmi_dsc->clk_per_slice = 340;
break;
case 5:
hdmi_dsc->max_slices = 8;
hdmi_dsc->clk_per_slice = 400;
break;
case 6:
hdmi_dsc->max_slices = 12;
hdmi_dsc->clk_per_slice = 400;
break;
case 7:
hdmi_dsc->max_slices = 16;
hdmi_dsc->clk_per_slice = 400;
break;
case 0:
default:
hdmi_dsc->max_slices = 0;
hdmi_dsc->clk_per_slice = 0;
}
}
if (cea_db_payload_len(hf_scds) >= 13 && hf_scds[13])
hdmi_dsc->total_chunk_kbytes = hf_scds[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES;
}
/* Sink Capability Data Structure */
static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
const u8 *hf_scds)
{
struct drm_display_info *info = &connector->display_info;
struct drm_hdmi_info *hdmi = &info->hdmi;
struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap;
int max_tmds_clock = 0;
u8 max_frl_rate = 0;
bool dsc_support = false;
info->has_hdmi_infoframe = true;
if (hf_scds[6] & 0x80) {
hdmi->scdc.supported = true;
if (hf_scds[6] & 0x40)
hdmi->scdc.read_request = true;
}
/*
* All HDMI 2.0 monitors must support scrambling at rates > 340 MHz.
* And as per the spec, three factors confirm this:
* * Availability of a HF-VSDB block in EDID (check)
* * Non zero Max_TMDS_Char_Rate filed in HF-VSDB (let's check)
* * SCDC support available (let's check)
* Lets check it out.
*/
if (hf_scds[5]) {
struct drm_scdc *scdc = &hdmi->scdc;
/* max clock is 5000 KHz times block value */
max_tmds_clock = hf_scds[5] * 5000;
if (max_tmds_clock > 340000) {
info->max_tmds_clock = max_tmds_clock;
}
if (scdc->supported) {
scdc->scrambling.supported = true;
/* Few sinks support scrambling for clocks < 340M */
if ((hf_scds[6] & 0x8))
scdc->scrambling.low_rates = true;
}
}
if (hf_scds[7]) {
max_frl_rate = (hf_scds[7] & DRM_EDID_MAX_FRL_RATE_MASK) >> 4;
drm_get_max_frl_rate(max_frl_rate, &hdmi->max_lanes,
&hdmi->max_frl_rate_per_lane);
}
drm_parse_ycbcr420_deep_color_info(connector, hf_scds);
if (cea_db_payload_len(hf_scds) >= 11 && hf_scds[11]) {
drm_parse_dsc_info(hdmi_dsc, hf_scds);
dsc_support = true;
}
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] HF-VSDB: max TMDS clock: %d KHz, HDMI 2.1 support: %s, DSC 1.2 support: %s\n",
connector->base.id, connector->name,
max_tmds_clock, str_yes_no(max_frl_rate), str_yes_no(dsc_support));
}
static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
const u8 *hdmi)
{
struct drm_display_info *info = &connector->display_info;
unsigned int dc_bpc = 0;
/* HDMI supports at least 8 bpc */
info->bpc = 8;
if (cea_db_payload_len(hdmi) < 6)
return;
if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
dc_bpc = 10;
info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_30;
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink does deep color 30.\n",
connector->base.id, connector->name);
}
if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
dc_bpc = 12;
info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_36;
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink does deep color 36.\n",
connector->base.id, connector->name);
}
if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
dc_bpc = 16;
info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_48;
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink does deep color 48.\n",
connector->base.id, connector->name);
}
if (dc_bpc == 0) {
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] No deep color support on this HDMI sink.\n",
connector->base.id, connector->name);
return;
}
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Assigning HDMI sink color depth as %d bpc.\n",
connector->base.id, connector->name, dc_bpc);
info->bpc = dc_bpc;
/* YCRCB444 is optional according to spec. */
if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
info->edid_hdmi_ycbcr444_dc_modes = info->edid_hdmi_rgb444_dc_modes;
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink does YCRCB444 in deep color.\n",
connector->base.id, connector->name);
}
/*
* Spec says that if any deep color mode is supported at all,
* then deep color 36 bit must be supported.
*/
if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink should do DC_36, but does not!\n",
connector->base.id, connector->name);
}
}
/* HDMI Vendor-Specific Data Block (HDMI VSDB, H14b-VSDB) */
static void
drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
{
struct drm_display_info *info = &connector->display_info;
u8 len = cea_db_payload_len(db);
info->is_hdmi = true;
if (len >= 6)
info->dvi_dual = db[6] & 1;
if (len >= 7)
info->max_tmds_clock = db[7] * 5000;
/*
* Try to infer whether the sink supports HDMI infoframes.
*
* HDMI infoframe support was first added in HDMI 1.4. Assume the sink
* supports infoframes if HDMI_Video_present is set.
*/
if (len >= 8 && db[8] & BIT(5))
info->has_hdmi_infoframe = true;
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI: DVI dual %d, max TMDS clock %d kHz\n",
connector->base.id, connector->name,
info->dvi_dual, info->max_tmds_clock);
drm_parse_hdmi_deep_color_info(connector, db);
}
/*
* See EDID extension for head-mounted and specialized monitors, specified at:
* https://docs.microsoft.com/en-us/windows-hardware/drivers/display/specialized-monitors-edid-extension
*/
static void drm_parse_microsoft_vsdb(struct drm_connector *connector,
const u8 *db)
{
struct drm_display_info *info = &connector->display_info;
u8 version = db[4];
bool desktop_usage = db[5] & BIT(6);
/* Version 1 and 2 for HMDs, version 3 flags desktop usage explicitly */
if (version == 1 || version == 2 || (version == 3 && !desktop_usage))
info->non_desktop = true;
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] HMD or specialized display VSDB version %u: 0x%02x\n",
connector->base.id, connector->name, version, db[5]);
}
static void drm_parse_cea_ext(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
struct drm_display_info *info = &connector->display_info;
struct drm_edid_iter edid_iter;
const struct cea_db *db;
struct cea_db_iter iter;
const u8 *edid_ext;
u64 y420cmdb_map = 0;
drm_edid_iter_begin(drm_edid, &edid_iter);
drm_edid_iter_for_each(edid_ext, &edid_iter) {
if (edid_ext[0] != CEA_EXT)
continue;
if (!info->cea_rev)
info->cea_rev = edid_ext[1];
if (info->cea_rev != edid_ext[1])
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] CEA extension version mismatch %u != %u\n",
connector->base.id, connector->name,
info->cea_rev, edid_ext[1]);
/* The existence of a CTA extension should imply RGB support */
info->color_formats = DRM_COLOR_FORMAT_RGB444;
if (edid_ext[3] & EDID_CEA_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCBCR444;
if (edid_ext[3] & EDID_CEA_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCBCR422;
if (edid_ext[3] & EDID_BASIC_AUDIO)
info->has_audio = true;
}
drm_edid_iter_end(&edid_iter);
cea_db_iter_edid_begin(drm_edid, &iter);
cea_db_iter_for_each(db, &iter) {
/* FIXME: convert parsers to use struct cea_db */
const u8 *data = (const u8 *)db;
if (cea_db_is_hdmi_vsdb(db))
drm_parse_hdmi_vsdb_video(connector, data);
else if (cea_db_is_hdmi_forum_vsdb(db) ||
cea_db_is_hdmi_forum_scdb(db))
drm_parse_hdmi_forum_scds(connector, data);
else if (cea_db_is_microsoft_vsdb(db))
drm_parse_microsoft_vsdb(connector, data);
else if (cea_db_is_y420cmdb(db))
parse_cta_y420cmdb(connector, db, &y420cmdb_map);
else if (cea_db_is_y420vdb(db))
parse_cta_y420vdb(connector, db);
else if (cea_db_is_vcdb(db))
drm_parse_vcdb(connector, data);
else if (cea_db_is_hdmi_hdr_metadata_block(db))
drm_parse_hdr_metadata_block(connector, data);
else if (cea_db_tag(db) == CTA_DB_VIDEO)
parse_cta_vdb(connector, db);
else if (cea_db_tag(db) == CTA_DB_AUDIO)
info->has_audio = true;
}
cea_db_iter_end(&iter);
if (y420cmdb_map)
update_cta_y420cmdb(connector, y420cmdb_map);
}
static
void get_monitor_range(const struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct drm_display_info *info = &closure->connector->display_info;
struct drm_monitor_range_info *monitor_range = &info->monitor_range;
const struct detailed_non_pixel *data = &timing->data.other_data;
const struct detailed_data_monitor_range *range = &data->data.range;
const struct edid *edid = closure->drm_edid->edid;
if (!is_display_descriptor(timing, EDID_DETAIL_MONITOR_RANGE))
return;
/*
* These limits are used to determine the VRR refresh
* rate range. Only the "range limits only" variant
* of the range descriptor seems to guarantee that
* any and all timings are accepted by the sink, as
* opposed to just timings conforming to the indicated
* formula (GTF/GTF2/CVT). Thus other variants of the
* range descriptor are not accepted here.
*/
if (range->flags != DRM_EDID_RANGE_LIMITS_ONLY_FLAG)
return;
monitor_range->min_vfreq = range->min_vfreq;
monitor_range->max_vfreq = range->max_vfreq;
if (edid->revision >= 4) {
if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
monitor_range->min_vfreq += 255;
if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
monitor_range->max_vfreq += 255;
}
}
static void drm_get_monitor_range(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
const struct drm_display_info *info = &connector->display_info;
struct detailed_mode_closure closure = {
.connector = connector,
.drm_edid = drm_edid,
};
if (drm_edid->edid->revision < 4)
return;
if (!(drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ))
return;
drm_for_each_detailed_block(drm_edid, get_monitor_range, &closure);
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
connector->base.id, connector->name,
info->monitor_range.min_vfreq, info->monitor_range.max_vfreq);
}
static void drm_parse_vesa_mso_data(struct drm_connector *connector,
const struct displayid_block *block)
{
struct displayid_vesa_vendor_specific_block *vesa =
(struct displayid_vesa_vendor_specific_block *)block;
struct drm_display_info *info = &connector->display_info;
if (block->num_bytes < 3) {
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] Unexpected vendor block size %u\n",
connector->base.id, connector->name, block->num_bytes);
return;
}
if (oui(vesa->oui[0], vesa->oui[1], vesa->oui[2]) != VESA_IEEE_OUI)
return;
if (sizeof(*vesa) != sizeof(*block) + block->num_bytes) {
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] Unexpected VESA vendor block size\n",
connector->base.id, connector->name);
return;
}
switch (FIELD_GET(DISPLAYID_VESA_MSO_MODE, vesa->mso)) {
default:
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Reserved MSO mode value\n",
connector->base.id, connector->name);
fallthrough;
case 0:
info->mso_stream_count = 0;
break;
case 1:
info->mso_stream_count = 2; /* 2 or 4 links */
break;
case 2:
info->mso_stream_count = 4; /* 4 links */
break;
}
if (!info->mso_stream_count) {
info->mso_pixel_overlap = 0;
return;
}
info->mso_pixel_overlap = FIELD_GET(DISPLAYID_VESA_MSO_OVERLAP, vesa->mso);
if (info->mso_pixel_overlap > 8) {
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] Reserved MSO pixel overlap value %u\n",
connector->base.id, connector->name,
info->mso_pixel_overlap);
info->mso_pixel_overlap = 8;
}
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] MSO stream count %u, pixel overlap %u\n",
connector->base.id, connector->name,
info->mso_stream_count, info->mso_pixel_overlap);
}
static void drm_update_mso(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
const struct displayid_block *block;
struct displayid_iter iter;
displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
if (block->tag == DATA_BLOCK_2_VENDOR_SPECIFIC)
drm_parse_vesa_mso_data(connector, block);
}
displayid_iter_end(&iter);
}
/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
* all of the values which would have been set from EDID
*/
static void drm_reset_display_info(struct drm_connector *connector)
{
struct drm_display_info *info = &connector->display_info;
info->width_mm = 0;
info->height_mm = 0;
info->bpc = 0;
info->color_formats = 0;
info->cea_rev = 0;
info->max_tmds_clock = 0;
info->dvi_dual = false;
info->is_hdmi = false;
info->has_audio = false;
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
info->edid_hdmi_rgb444_dc_modes = 0;
info->edid_hdmi_ycbcr444_dc_modes = 0;
info->non_desktop = 0;
memset(&info->monitor_range, 0, sizeof(info->monitor_range));
memset(&info->luminance_range, 0, sizeof(info->luminance_range));
info->mso_stream_count = 0;
info->mso_pixel_overlap = 0;
info->max_dsc_bpp = 0;
kfree(info->vics);
info->vics = NULL;
info->vics_len = 0;
info->quirks = 0;
}
static void update_displayid_info(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
struct drm_display_info *info = &connector->display_info;
const struct displayid_block *block;
struct displayid_iter iter;
displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
if (displayid_version(&iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
(displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_VR ||
displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_AR))
info->non_desktop = true;
/*
* We're only interested in the base section here, no need to
* iterate further.
*/
break;
}
displayid_iter_end(&iter);
}
static void update_display_info(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
struct drm_display_info *info = &connector->display_info;
const struct edid *edid;
drm_reset_display_info(connector);
clear_eld(connector);
if (!drm_edid)
return;
edid = drm_edid->edid;
info->quirks = edid_get_quirks(drm_edid);
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
drm_get_monitor_range(connector, drm_edid);
if (edid->revision < 3)
goto out;
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
goto out;
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
drm_parse_cea_ext(connector, drm_edid);
update_displayid_info(connector, drm_edid);
/*
* Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
*
* For such displays, the DFP spec 1.0, section 3.10 "EDID support"
* tells us to assume 8 bpc color depth if the EDID doesn't have
* extensions which tell otherwise.
*/
if (info->bpc == 0 && edid->revision == 3 &&
edid->input & DRM_EDID_DIGITAL_DFP_1_X) {
info->bpc = 8;
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] Assigning DFP sink color depth as %d bpc.\n",
connector->base.id, connector->name, info->bpc);
}
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
goto out;
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
info->bpc = 6;
break;
case DRM_EDID_DIGITAL_DEPTH_8:
info->bpc = 8;
break;
case DRM_EDID_DIGITAL_DEPTH_10:
info->bpc = 10;
break;
case DRM_EDID_DIGITAL_DEPTH_12:
info->bpc = 12;
break;
case DRM_EDID_DIGITAL_DEPTH_14:
info->bpc = 14;
break;
case DRM_EDID_DIGITAL_DEPTH_16:
info->bpc = 16;
break;
case DRM_EDID_DIGITAL_DEPTH_UNDEF:
default:
info->bpc = 0;
break;
}
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
connector->base.id, connector->name, info->bpc);
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCBCR444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCBCR422;
drm_update_mso(connector, drm_edid);
out:
if (info->quirks & EDID_QUIRK_NON_DESKTOP) {
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Non-desktop display%s\n",
connector->base.id, connector->name,
info->non_desktop ? " (redundant quirk)" : "");
info->non_desktop = true;
}
if (info->quirks & EDID_QUIRK_CAP_DSC_15BPP)
info->max_dsc_bpp = 15;
if (info->quirks & EDID_QUIRK_FORCE_6BPC)
info->bpc = 6;
if (info->quirks & EDID_QUIRK_FORCE_8BPC)
info->bpc = 8;
if (info->quirks & EDID_QUIRK_FORCE_10BPC)
info->bpc = 10;
if (info->quirks & EDID_QUIRK_FORCE_12BPC)
info->bpc = 12;
/* Depends on info->cea_rev set by drm_parse_cea_ext() above */
drm_edid_to_eld(connector, drm_edid);
}
static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev,
struct displayid_detailed_timings_1 *timings,
bool type_7)
{
struct drm_display_mode *mode;
unsigned pixel_clock = (timings->pixel_clock[0] |
(timings->pixel_clock[1] << 8) |
(timings->pixel_clock[2] << 16)) + 1;
unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1;
unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1;
unsigned vblank = (timings->vblank[0] | timings->vblank[1] << 8) + 1;
unsigned vsync = (timings->vsync[0] | (timings->vsync[1] & 0x7f) << 8) + 1;
unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1;
bool hsync_positive = (timings->hsync[1] >> 7) & 0x1;
bool vsync_positive = (timings->vsync[1] >> 7) & 0x1;
mode = drm_mode_create(dev);
if (!mode)
return NULL;
/* resolution is kHz for type VII, and 10 kHz for type I */
mode->clock = type_7 ? pixel_clock : pixel_clock * 10;
mode->hdisplay = hactive;
mode->hsync_start = mode->hdisplay + hsync;
mode->hsync_end = mode->hsync_start + hsync_width;
mode->htotal = mode->hdisplay + hblank;
mode->vdisplay = vactive;
mode->vsync_start = mode->vdisplay + vsync;
mode->vsync_end = mode->vsync_start + vsync_width;
mode->vtotal = mode->vdisplay + vblank;
mode->flags = 0;
mode->flags |= hsync_positive ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
mode->flags |= vsync_positive ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
mode->type = DRM_MODE_TYPE_DRIVER;
if (timings->flags & 0x80)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
return mode;
}
static int add_displayid_detailed_1_modes(struct drm_connector *connector,
const struct displayid_block *block)
{
struct displayid_detailed_timing_block *det = (struct displayid_detailed_timing_block *)block;
int i;
int num_timings;
struct drm_display_mode *newmode;
int num_modes = 0;
bool type_7 = block->tag == DATA_BLOCK_2_TYPE_7_DETAILED_TIMING;
/* blocks must be multiple of 20 bytes length */
if (block->num_bytes % 20)
return 0;
num_timings = block->num_bytes / 20;
for (i = 0; i < num_timings; i++) {
struct displayid_detailed_timings_1 *timings = &det->timings[i];
newmode = drm_mode_displayid_detailed(connector->dev, timings, type_7);
if (!newmode)
continue;
drm_mode_probed_add(connector, newmode);
num_modes++;
}
return num_modes;
}
static int add_displayid_detailed_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
const struct displayid_block *block;
struct displayid_iter iter;
int num_modes = 0;
displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
if (block->tag == DATA_BLOCK_TYPE_1_DETAILED_TIMING ||
block->tag == DATA_BLOCK_2_TYPE_7_DETAILED_TIMING)
num_modes += add_displayid_detailed_1_modes(connector, block);
}
displayid_iter_end(&iter);
return num_modes;
}
static int _drm_edid_connector_add_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
const struct drm_display_info *info = &connector->display_info;
int num_modes = 0;
if (!drm_edid)
return 0;
/*
* EDID spec says modes should be preferred in this order:
* - preferred detailed mode
* - other detailed modes from base block
* - detailed modes from extension blocks
* - CVT 3-byte code modes
* - standard timing codes
* - established timing codes
* - modes inferred from GTF or CVT range information
*
* We get this pretty much right.
*
* XXX order for additional mode types in extension blocks?
*/
num_modes += add_detailed_modes(connector, drm_edid);
num_modes += add_cvt_modes(connector, drm_edid);
num_modes += add_standard_modes(connector, drm_edid);
num_modes += add_established_modes(connector, drm_edid);
num_modes += add_cea_modes(connector, drm_edid);
num_modes += add_alternate_cea_modes(connector, drm_edid);
num_modes += add_displayid_detailed_modes(connector, drm_edid);
if (drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ)
num_modes += add_inferred_modes(connector, drm_edid);
if (info->quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector);
return num_modes;
}
static void _drm_update_tile_info(struct drm_connector *connector,
const struct drm_edid *drm_edid);
static int _drm_edid_connector_property_update(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
struct drm_device *dev = connector->dev;
int ret;
if (connector->edid_blob_ptr) {
const struct edid *old_edid = connector->edid_blob_ptr->data;
if (old_edid) {
if (!drm_edid_are_equal(drm_edid ? drm_edid->edid : NULL, old_edid)) {
connector->epoch_counter++;
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] EDID changed, epoch counter %llu\n",
connector->base.id, connector->name,
connector->epoch_counter);
}
}
}
ret = drm_property_replace_global_blob(dev,
&connector->edid_blob_ptr,
drm_edid ? drm_edid->size : 0,
drm_edid ? drm_edid->edid : NULL,
&connector->base,
dev->mode_config.edid_property);
if (ret) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] EDID property update failed (%d)\n",
connector->base.id, connector->name, ret);
goto out;
}
ret = drm_object_property_set_value(&connector->base,
dev->mode_config.non_desktop_property,
connector->display_info.non_desktop);
if (ret) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Non-desktop property update failed (%d)\n",
connector->base.id, connector->name, ret);
goto out;
}
ret = drm_connector_set_tile_property(connector);
if (ret) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Tile property update failed (%d)\n",
connector->base.id, connector->name, ret);
goto out;
}
out:
return ret;
}
/**
* drm_edid_connector_update - Update connector information from EDID
* @connector: Connector
* @drm_edid: EDID
*
* Update the connector display info, ELD, HDR metadata, relevant properties,
* etc. from the passed in EDID.
*
* If EDID is NULL, reset the information.
*
* Must be called before calling drm_edid_connector_add_modes().
*
* Return: 0 on success, negative error on errors.
*/
int drm_edid_connector_update(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
update_display_info(connector, drm_edid);
_drm_update_tile_info(connector, drm_edid);
return _drm_edid_connector_property_update(connector, drm_edid);
}
EXPORT_SYMBOL(drm_edid_connector_update);
/**
* drm_edid_connector_add_modes - Update probed modes from the EDID property
* @connector: Connector
*
* Add the modes from the previously updated EDID property to the connector
* probed modes list.
*
* drm_edid_connector_update() must have been called before this to update the
* EDID property.
*
* Return: The number of modes added, or 0 if we couldn't find any.
*/
int drm_edid_connector_add_modes(struct drm_connector *connector)
{
const struct drm_edid *drm_edid = NULL;
int count;
if (connector->edid_blob_ptr)
drm_edid = drm_edid_alloc(connector->edid_blob_ptr->data,
connector->edid_blob_ptr->length);
count = _drm_edid_connector_add_modes(connector, drm_edid);
drm_edid_free(drm_edid);
return count;
}
EXPORT_SYMBOL(drm_edid_connector_add_modes);
/**
* drm_connector_update_edid_property - update the edid property of a connector
* @connector: drm connector
* @edid: new value of the edid property
*
* This function creates a new blob modeset object and assigns its id to the
* connector's edid property.
* Since we also parse tile information from EDID's displayID block, we also
* set the connector's tile property here. See drm_connector_set_tile_property()
* for more details.
*
* This function is deprecated. Use drm_edid_connector_update() instead.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid)
{
struct drm_edid drm_edid;
return drm_edid_connector_update(connector, drm_edid_legacy_init(&drm_edid, edid));
}
EXPORT_SYMBOL(drm_connector_update_edid_property);
/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
* @edid: EDID data
*
* Add the specified modes to the connector's mode list. Also fills out the
* &drm_display_info structure and ELD in @connector with any information which
* can be derived from the edid.
*
* This function is deprecated. Use drm_edid_connector_add_modes() instead.
*
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
{
struct drm_edid _drm_edid;
const struct drm_edid *drm_edid;
if (edid && !drm_edid_is_valid(edid)) {
drm_warn(connector->dev, "[CONNECTOR:%d:%s] EDID invalid.\n",
connector->base.id, connector->name);
edid = NULL;
}
drm_edid = drm_edid_legacy_init(&_drm_edid, edid);
update_display_info(connector, drm_edid);
return _drm_edid_connector_add_modes(connector, drm_edid);
}
EXPORT_SYMBOL(drm_add_edid_modes);
/**
* drm_add_modes_noedid - add modes for the connectors without EDID
* @connector: connector we're probing
* @hdisplay: the horizontal display limit
* @vdisplay: the vertical display limit
*
* Add the specified modes to the connector's mode list. Only when the
* hdisplay/vdisplay is not beyond the given limit, it will be added.
*
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay)
{
int i, count, num_modes = 0;
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
count = ARRAY_SIZE(drm_dmt_modes);
if (hdisplay < 0)
hdisplay = 0;
if (vdisplay < 0)
vdisplay = 0;
for (i = 0; i < count; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hdisplay && vdisplay) {
/*
* Only when two are valid, they will be used to check
* whether the mode should be added to the mode list of
* the connector.
*/
if (ptr->hdisplay > hdisplay ||
ptr->vdisplay > vdisplay)
continue;
}
if (drm_mode_vrefresh(ptr) > 61)
continue;
mode = drm_mode_duplicate(dev, ptr);
if (mode) {
drm_mode_probed_add(connector, mode);
num_modes++;
}
}
return num_modes;
}
EXPORT_SYMBOL(drm_add_modes_noedid);
/**
* drm_set_preferred_mode - Sets the preferred mode of a connector
* @connector: connector whose mode list should be processed
* @hpref: horizontal resolution of preferred mode
* @vpref: vertical resolution of preferred mode
*
* Marks a mode as preferred if it matches the resolution specified by @hpref
* and @vpref.
*/
void drm_set_preferred_mode(struct drm_connector *connector,
int hpref, int vpref)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->probed_modes, head) {
if (mode->hdisplay == hpref &&
mode->vdisplay == vpref)
mode->type |= DRM_MODE_TYPE_PREFERRED;
}
}
EXPORT_SYMBOL(drm_set_preferred_mode);
static bool is_hdmi2_sink(const struct drm_connector *connector)
{
/*
* FIXME: sil-sii8620 doesn't have a connector around when
* we need one, so we have to be prepared for a NULL connector.
*/
if (!connector)
return true;
return connector->display_info.hdmi.scdc.supported ||
connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR420;
}
static u8 drm_mode_hdmi_vic(const struct drm_connector *connector,
const struct drm_display_mode *mode)
{
bool has_hdmi_infoframe = connector ?
connector->display_info.has_hdmi_infoframe : false;
if (!has_hdmi_infoframe)
return 0;
/* No HDMI VIC when signalling 3D video format */
if (mode->flags & DRM_MODE_FLAG_3D_MASK)
return 0;
return drm_match_hdmi_mode(mode);
}
static u8 drm_mode_cea_vic(const struct drm_connector *connector,
const struct drm_display_mode *mode)
{
/*
* HDMI spec says if a mode is found in HDMI 1.4b 4K modes
* we should send its VIC in vendor infoframes, else send the
* VIC in AVI infoframes. Lets check if this mode is present in
* HDMI 1.4b 4K modes
*/
if (drm_mode_hdmi_vic(connector, mode))
return 0;
return drm_match_cea_mode(mode);
}
/*
* Avoid sending VICs defined in HDMI 2.0 in AVI infoframes to sinks that
* conform to HDMI 1.4.
*
* HDMI 1.4 (CTA-861-D) VIC range: [1..64]
* HDMI 2.0 (CTA-861-F) VIC range: [1..107]
*
* If the sink lists the VIC in CTA VDB, assume it's fine, regardless of HDMI
* version.
*/
static u8 vic_for_avi_infoframe(const struct drm_connector *connector, u8 vic)
{
if (!is_hdmi2_sink(connector) && vic > 64 &&
!cta_vdb_has_vic(connector, vic))
return 0;
return vic;
}
/**
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
* @frame: HDMI AVI infoframe
* @connector: the connector
* @mode: DRM display mode
*
* Return: 0 on success or a negative error code on failure.
*/
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
const struct drm_connector *connector,
const struct drm_display_mode *mode)
{
enum hdmi_picture_aspect picture_aspect;
u8 vic, hdmi_vic;
if (!frame || !mode)
return -EINVAL;
hdmi_avi_infoframe_init(frame);
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
vic = drm_mode_cea_vic(connector, mode);
hdmi_vic = drm_mode_hdmi_vic(connector, mode);
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
/*
* As some drivers don't support atomic, we can't use connector state.
* So just initialize the frame with default values, just the same way
* as it's done with other properties here.
*/
frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
frame->itc = 0;
/*
* Populate picture aspect ratio from either
* user input (if specified) or from the CEA/HDMI mode lists.
*/
picture_aspect = mode->picture_aspect_ratio;
if (picture_aspect == HDMI_PICTURE_ASPECT_NONE) {
if (vic)
picture_aspect = drm_get_cea_aspect_ratio(vic);
else if (hdmi_vic)
picture_aspect = drm_get_hdmi_aspect_ratio(hdmi_vic);
}
/*
* The infoframe can't convey anything but none, 4:3
* and 16:9, so if the user has asked for anything else
* we can only satisfy it by specifying the right VIC.
*/
if (picture_aspect > HDMI_PICTURE_ASPECT_16_9) {
if (vic) {
if (picture_aspect != drm_get_cea_aspect_ratio(vic))
return -EINVAL;
} else if (hdmi_vic) {
if (picture_aspect != drm_get_hdmi_aspect_ratio(hdmi_vic))
return -EINVAL;
} else {
return -EINVAL;
}
picture_aspect = HDMI_PICTURE_ASPECT_NONE;
}
frame->video_code = vic_for_avi_infoframe(connector, vic);
frame->picture_aspect = picture_aspect;
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
return 0;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
/**
* drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe
* quantization range information
* @frame: HDMI AVI infoframe
* @connector: the connector
* @mode: DRM display mode
* @rgb_quant_range: RGB quantization range (Q)
*/
void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_connector *connector,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range)
{
const struct drm_display_info *info = &connector->display_info;
/*
* CEA-861:
* "A Source shall not send a non-zero Q value that does not correspond
* to the default RGB Quantization Range for the transmitted Picture
* unless the Sink indicates support for the Q bit in a Video
* Capabilities Data Block."
*
* HDMI 2.0 recommends sending non-zero Q when it does match the
* default RGB quantization range for the mode, even when QS=0.
*/
if (info->rgb_quant_range_selectable ||
rgb_quant_range == drm_default_rgb_quant_range(mode))
frame->quantization_range = rgb_quant_range;
else
frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
/*
* CEA-861-F:
* "When transmitting any RGB colorimetry, the Source should set the
* YQ-field to match the RGB Quantization Range being transmitted
* (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB,
* set YQ=1) and the Sink shall ignore the YQ-field."
*
* Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused
* by non-zero YQ when receiving RGB. There doesn't seem to be any
* good way to tell which version of CEA-861 the sink supports, so
* we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
* on CEA-861-F.
*/
if (!is_hdmi2_sink(connector) ||
rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
frame->ycc_quantization_range =
HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
else
frame->ycc_quantization_range =
HDMI_YCC_QUANTIZATION_RANGE_FULL;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_quant_range);
static enum hdmi_3d_structure
s3d_structure_from_display_mode(const struct drm_display_mode *mode)
{
u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
switch (layout) {
case DRM_MODE_FLAG_3D_FRAME_PACKING:
return HDMI_3D_STRUCTURE_FRAME_PACKING;
case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
case DRM_MODE_FLAG_3D_L_DEPTH:
return HDMI_3D_STRUCTURE_L_DEPTH;
case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
default:
return HDMI_3D_STRUCTURE_INVALID;
}
}
/**
* drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
* data from a DRM display mode
* @frame: HDMI vendor infoframe
* @connector: the connector
* @mode: DRM display mode
*
* Note that there's is a need to send HDMI vendor infoframes only when using a
* 4k or stereoscopic 3D mode. So when giving any other mode as input this
* function will return -EINVAL, error that can be safely ignored.
*
* Return: 0 on success or a negative error code on failure.
*/
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_connector *connector,
const struct drm_display_mode *mode)
{
/*
* FIXME: sil-sii8620 doesn't have a connector around when
* we need one, so we have to be prepared for a NULL connector.
*/
bool has_hdmi_infoframe = connector ?
connector->display_info.has_hdmi_infoframe : false;
int err;
if (!frame || !mode)
return -EINVAL;
if (!has_hdmi_infoframe)
return -EINVAL;
err = hdmi_vendor_infoframe_init(frame);
if (err < 0)
return err;
/*
* Even if it's not absolutely necessary to send the infoframe
* (ie.vic==0 and s3d_struct==0) we will still send it if we
* know that the sink can handle it. This is based on a
* suggestion in HDMI 2.0 Appendix F. Apparently some sinks
* have trouble realizing that they should switch from 3D to 2D
* mode if the source simply stops sending the infoframe when
* it wants to switch from 3D to 2D.
*/
frame->vic = drm_mode_hdmi_vic(connector, mode);
frame->s3d_struct = s3d_structure_from_display_mode(mode);
return 0;
}
EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
static void drm_parse_tiled_block(struct drm_connector *connector,
const struct displayid_block *block)
{
const struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
u16 w, h;
u8 tile_v_loc, tile_h_loc;
u8 num_v_tile, num_h_tile;
struct drm_tile_group *tg;
w = tile->tile_size[0] | tile->tile_size[1] << 8;
h = tile->tile_size[2] | tile->tile_size[3] << 8;
num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
connector->has_tile = true;
if (tile->tile_cap & 0x80)
connector->tile_is_single_monitor = true;
connector->num_h_tile = num_h_tile + 1;
connector->num_v_tile = num_v_tile + 1;
connector->tile_h_loc = tile_h_loc;
connector->tile_v_loc = tile_v_loc;
connector->tile_h_size = w + 1;
connector->tile_v_size = h + 1;
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] tile cap 0x%x, size %dx%d, num tiles %dx%d, location %dx%d, vend %c%c%c",
connector->base.id, connector->name,
tile->tile_cap,
connector->tile_h_size, connector->tile_v_size,
connector->num_h_tile, connector->num_v_tile,
connector->tile_h_loc, connector->tile_v_loc,
tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
if (!tg)
tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
if (!tg)
return;
if (connector->tile_group != tg) {
/* if we haven't got a pointer,
take the reference, drop ref to old tile group */
if (connector->tile_group)
drm_mode_put_tile_group(connector->dev, connector->tile_group);
connector->tile_group = tg;
} else {
/* if same tile group, then release the ref we just took. */
drm_mode_put_tile_group(connector->dev, tg);
}
}
static bool displayid_is_tiled_block(const struct displayid_iter *iter,
const struct displayid_block *block)
{
return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 &&
block->tag == DATA_BLOCK_TILED_DISPLAY) ||
(displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY);
}
static void _drm_update_tile_info(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
const struct displayid_block *block;
struct displayid_iter iter;
connector->has_tile = false;
displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
if (displayid_is_tiled_block(&iter, block))
drm_parse_tiled_block(connector, block);
}
displayid_iter_end(&iter);
if (!connector->has_tile && connector->tile_group) {
drm_mode_put_tile_group(connector->dev, connector->tile_group);
connector->tile_group = NULL;
}
}
| linux-master | drivers/gpu/drm/drm_edid.c |
// SPDX-License-Identifier: MIT
/*
* Copyright (C) 2020 Red Hat, Inc.
*
* Authors:
* Hans de Goede <[email protected]>
*/
#include <linux/acpi.h>
#include <drm/drm_privacy_screen_machine.h>
#ifdef CONFIG_X86
static struct drm_privacy_screen_lookup arch_lookup;
struct arch_init_data {
struct drm_privacy_screen_lookup lookup;
bool (*detect)(void);
};
#if IS_ENABLED(CONFIG_THINKPAD_ACPI)
static acpi_status __init acpi_set_handle(acpi_handle handle, u32 level,
void *context, void **return_value)
{
*(acpi_handle *)return_value = handle;
return AE_CTRL_TERMINATE;
}
static bool __init detect_thinkpad_privacy_screen(void)
{
union acpi_object obj = { .type = ACPI_TYPE_INTEGER };
struct acpi_object_list args = { .count = 1, .pointer = &obj, };
acpi_handle ec_handle = NULL;
unsigned long long output;
acpi_status status;
if (acpi_disabled)
return false;
/* Get embedded-controller handle */
status = acpi_get_devices("PNP0C09", acpi_set_handle, NULL, &ec_handle);
if (ACPI_FAILURE(status) || !ec_handle)
return false;
/* And call the privacy-screen get-status method */
status = acpi_evaluate_integer(ec_handle, "HKEY.GSSS", &args, &output);
if (ACPI_FAILURE(status))
return false;
return (output & 0x10000) ? true : false;
}
#endif
#if IS_ENABLED(CONFIG_CHROMEOS_PRIVACY_SCREEN)
static bool __init detect_chromeos_privacy_screen(void)
{
return acpi_dev_present("GOOG0010", NULL, -1);
}
#endif
static const struct arch_init_data arch_init_data[] __initconst = {
#if IS_ENABLED(CONFIG_THINKPAD_ACPI)
{
.lookup = {
.dev_id = NULL,
.con_id = NULL,
.provider = "privacy_screen-thinkpad_acpi",
},
.detect = detect_thinkpad_privacy_screen,
},
#endif
#if IS_ENABLED(CONFIG_CHROMEOS_PRIVACY_SCREEN)
{
.lookup = {
.dev_id = NULL,
.con_id = NULL,
.provider = "privacy_screen-GOOG0010:00",
},
.detect = detect_chromeos_privacy_screen,
},
#endif
};
void __init drm_privacy_screen_lookup_init(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(arch_init_data); i++) {
if (!arch_init_data[i].detect())
continue;
pr_info("Found '%s' privacy-screen provider\n",
arch_init_data[i].lookup.provider);
/* Make a copy because arch_init_data is __initconst */
arch_lookup = arch_init_data[i].lookup;
drm_privacy_screen_lookup_add(&arch_lookup);
break;
}
}
void drm_privacy_screen_lookup_exit(void)
{
if (arch_lookup.provider)
drm_privacy_screen_lookup_remove(&arch_lookup);
}
#endif /* ifdef CONFIG_X86 */
| linux-master | drivers/gpu/drm/drm_privacy_screen_x86.c |
/*
* Created: Tue Feb 2 08:37:54 1999 by [email protected]
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Author Rickard E. (Rik) Faith <[email protected]>
* Author Gareth Hughes <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/slab.h>
#include <drm/drm_auth.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_lease.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
#include "drm_legacy.h"
/**
* DOC: master and authentication
*
* &struct drm_master is used to track groups of clients with open
* primary/legacy device nodes. For every &struct drm_file which has had at
* least once successfully became the device master (either through the
* SET_MASTER IOCTL, or implicitly through opening the primary device node when
* no one else is the current master that time) there exists one &drm_master.
* This is noted in &drm_file.is_master. All other clients have just a pointer
* to the &drm_master they are associated with.
*
* In addition only one &drm_master can be the current master for a &drm_device.
* It can be switched through the DROP_MASTER and SET_MASTER IOCTL, or
* implicitly through closing/opening the primary device node. See also
* drm_is_current_master().
*
* Clients can authenticate against the current master (if it matches their own)
* using the GETMAGIC and AUTHMAGIC IOCTLs. Together with exchanging masters,
* this allows controlled access to the device for an entire group of mutually
* trusted clients.
*/
static bool drm_is_current_master_locked(struct drm_file *fpriv)
{
lockdep_assert_once(lockdep_is_held(&fpriv->master_lookup_lock) ||
lockdep_is_held(&fpriv->minor->dev->master_mutex));
return fpriv->is_master && drm_lease_owner(fpriv->master) == fpriv->minor->dev->master;
}
/**
* drm_is_current_master - checks whether @priv is the current master
* @fpriv: DRM file private
*
* Checks whether @fpriv is current master on its device. This decides whether a
* client is allowed to run DRM_MASTER IOCTLs.
*
* Most of the modern IOCTL which require DRM_MASTER are for kernel modesetting
* - the current master is assumed to own the non-shareable display hardware.
*/
bool drm_is_current_master(struct drm_file *fpriv)
{
bool ret;
spin_lock(&fpriv->master_lookup_lock);
ret = drm_is_current_master_locked(fpriv);
spin_unlock(&fpriv->master_lookup_lock);
return ret;
}
EXPORT_SYMBOL(drm_is_current_master);
int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_auth *auth = data;
int ret = 0;
mutex_lock(&dev->master_mutex);
if (!file_priv->magic) {
ret = idr_alloc(&file_priv->master->magic_map, file_priv,
1, 0, GFP_KERNEL);
if (ret >= 0)
file_priv->magic = ret;
}
auth->magic = file_priv->magic;
mutex_unlock(&dev->master_mutex);
drm_dbg_core(dev, "%u\n", auth->magic);
return ret < 0 ? ret : 0;
}
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_auth *auth = data;
struct drm_file *file;
drm_dbg_core(dev, "%u\n", auth->magic);
mutex_lock(&dev->master_mutex);
file = idr_find(&file_priv->master->magic_map, auth->magic);
if (file) {
file->authenticated = 1;
idr_replace(&file_priv->master->magic_map, NULL, auth->magic);
}
mutex_unlock(&dev->master_mutex);
return file ? 0 : -EINVAL;
}
struct drm_master *drm_master_create(struct drm_device *dev)
{
struct drm_master *master;
master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
return NULL;
kref_init(&master->refcount);
drm_master_legacy_init(master);
idr_init_base(&master->magic_map, 1);
master->dev = dev;
/* initialize the tree of output resource lessees */
INIT_LIST_HEAD(&master->lessees);
INIT_LIST_HEAD(&master->lessee_list);
idr_init(&master->leases);
idr_init_base(&master->lessee_idr, 1);
return master;
}
static void drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
bool new_master)
{
dev->master = drm_master_get(fpriv->master);
if (dev->driver->master_set)
dev->driver->master_set(dev, fpriv, new_master);
fpriv->was_master = true;
}
static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
{
struct drm_master *old_master;
struct drm_master *new_master;
lockdep_assert_held_once(&dev->master_mutex);
WARN_ON(fpriv->is_master);
old_master = fpriv->master;
new_master = drm_master_create(dev);
if (!new_master)
return -ENOMEM;
spin_lock(&fpriv->master_lookup_lock);
fpriv->master = new_master;
spin_unlock(&fpriv->master_lookup_lock);
fpriv->is_master = 1;
fpriv->authenticated = 1;
drm_set_master(dev, fpriv, true);
if (old_master)
drm_master_put(&old_master);
return 0;
}
/*
* In the olden days the SET/DROP_MASTER ioctls used to return EACCES when
* CAP_SYS_ADMIN was not set. This was used to prevent rogue applications
* from becoming master and/or failing to release it.
*
* At the same time, the first client (for a given VT) is _always_ master.
* Thus in order for the ioctls to succeed, one had to _explicitly_ run the
* application as root or flip the setuid bit.
*
* If the CAP_SYS_ADMIN was missing, no other client could become master...
* EVER :-( Leading to a) the graphics session dying badly or b) a completely
* locked session.
*
*
* As some point systemd-logind was introduced to orchestrate and delegate
* master as applicable. It does so by opening the fd and passing it to users
* while in itself logind a) does the set/drop master per users' request and
* b) * implicitly drops master on VT switch.
*
* Even though logind looks like the future, there are a few issues:
* - some platforms don't have equivalent (Android, CrOS, some BSDs) so
* root is required _solely_ for SET/DROP MASTER.
* - applications may not be updated to use it,
* - any client which fails to drop master* can DoS the application using
* logind, to a varying degree.
*
* * Either due missing CAP_SYS_ADMIN or simply not calling DROP_MASTER.
*
*
* Here we implement the next best thing:
* - ensure the logind style of fd passing works unchanged, and
* - allow a client to drop/set master, iff it is/was master at a given point
* in time.
*
* Note: DROP_MASTER cannot be free for all, as an arbitrator user could:
* - DoS/crash the arbitrator - details would be implementation specific
* - open the node, become master implicitly and cause issues
*
* As a result this fixes the following when using root-less build w/o logind
* - startx
* - weston
* - various compositors based on wlroots
*/
static int
drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
{
if (file_priv->pid == task_pid(current) && file_priv->was_master)
return 0;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
return 0;
}
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
mutex_lock(&dev->master_mutex);
ret = drm_master_check_perm(dev, file_priv);
if (ret)
goto out_unlock;
if (drm_is_current_master_locked(file_priv))
goto out_unlock;
if (dev->master) {
ret = -EBUSY;
goto out_unlock;
}
if (!file_priv->master) {
ret = -EINVAL;
goto out_unlock;
}
if (!file_priv->is_master) {
ret = drm_new_set_master(dev, file_priv);
goto out_unlock;
}
if (file_priv->master->lessor != NULL) {
drm_dbg_lease(dev,
"Attempt to set lessee %d as master\n",
file_priv->master->lessee_id);
ret = -EINVAL;
goto out_unlock;
}
drm_set_master(dev, file_priv, false);
out_unlock:
mutex_unlock(&dev->master_mutex);
return ret;
}
static void drm_drop_master(struct drm_device *dev,
struct drm_file *fpriv)
{
if (dev->driver->master_drop)
dev->driver->master_drop(dev, fpriv);
drm_master_put(&dev->master);
}
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
mutex_lock(&dev->master_mutex);
ret = drm_master_check_perm(dev, file_priv);
if (ret)
goto out_unlock;
if (!drm_is_current_master_locked(file_priv)) {
ret = -EINVAL;
goto out_unlock;
}
if (!dev->master) {
ret = -EINVAL;
goto out_unlock;
}
if (file_priv->master->lessor != NULL) {
drm_dbg_lease(dev,
"Attempt to drop lessee %d as master\n",
file_priv->master->lessee_id);
ret = -EINVAL;
goto out_unlock;
}
drm_drop_master(dev, file_priv);
out_unlock:
mutex_unlock(&dev->master_mutex);
return ret;
}
int drm_master_open(struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
int ret = 0;
/* if there is no current master make this fd it, but do not create
* any master object for render clients
*/
mutex_lock(&dev->master_mutex);
if (!dev->master) {
ret = drm_new_set_master(dev, file_priv);
} else {
spin_lock(&file_priv->master_lookup_lock);
file_priv->master = drm_master_get(dev->master);
spin_unlock(&file_priv->master_lookup_lock);
}
mutex_unlock(&dev->master_mutex);
return ret;
}
void drm_master_release(struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
struct drm_master *master;
mutex_lock(&dev->master_mutex);
master = file_priv->master;
if (file_priv->magic)
idr_remove(&file_priv->master->magic_map, file_priv->magic);
if (!drm_is_current_master_locked(file_priv))
goto out;
drm_legacy_lock_master_cleanup(dev, master);
if (dev->master == file_priv->master)
drm_drop_master(dev, file_priv);
out:
if (drm_core_check_feature(dev, DRIVER_MODESET) && file_priv->is_master) {
/* Revoke any leases held by this or lessees, but only if
* this is the "real" master
*/
drm_lease_revoke(master);
}
/* drop the master reference held by the file priv */
if (file_priv->master)
drm_master_put(&file_priv->master);
mutex_unlock(&dev->master_mutex);
}
/**
* drm_master_get - reference a master pointer
* @master: &struct drm_master
*
* Increments the reference count of @master and returns a pointer to @master.
*/
struct drm_master *drm_master_get(struct drm_master *master)
{
kref_get(&master->refcount);
return master;
}
EXPORT_SYMBOL(drm_master_get);
/**
* drm_file_get_master - reference &drm_file.master of @file_priv
* @file_priv: DRM file private
*
* Increments the reference count of @file_priv's &drm_file.master and returns
* the &drm_file.master. If @file_priv has no &drm_file.master, returns NULL.
*
* Master pointers returned from this function should be unreferenced using
* drm_master_put().
*/
struct drm_master *drm_file_get_master(struct drm_file *file_priv)
{
struct drm_master *master = NULL;
spin_lock(&file_priv->master_lookup_lock);
if (!file_priv->master)
goto unlock;
master = drm_master_get(file_priv->master);
unlock:
spin_unlock(&file_priv->master_lookup_lock);
return master;
}
EXPORT_SYMBOL(drm_file_get_master);
static void drm_master_destroy(struct kref *kref)
{
struct drm_master *master = container_of(kref, struct drm_master, refcount);
struct drm_device *dev = master->dev;
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_lease_destroy(master);
drm_legacy_master_rmmaps(dev, master);
idr_destroy(&master->magic_map);
idr_destroy(&master->leases);
idr_destroy(&master->lessee_idr);
kfree(master->unique);
kfree(master);
}
/**
* drm_master_put - unreference and clear a master pointer
* @master: pointer to a pointer of &struct drm_master
*
* This decrements the &drm_master behind @master and sets it to NULL.
*/
void drm_master_put(struct drm_master **master)
{
kref_put(&(*master)->refcount, drm_master_destroy);
*master = NULL;
}
EXPORT_SYMBOL(drm_master_put);
/* Used by drm_client and drm_fb_helper */
bool drm_master_internal_acquire(struct drm_device *dev)
{
mutex_lock(&dev->master_mutex);
if (dev->master) {
mutex_unlock(&dev->master_mutex);
return false;
}
return true;
}
EXPORT_SYMBOL(drm_master_internal_acquire);
/* Used by drm_client and drm_fb_helper */
void drm_master_internal_release(struct drm_device *dev)
{
mutex_unlock(&dev->master_mutex);
}
EXPORT_SYMBOL(drm_master_internal_release);
| linux-master | drivers/gpu/drm/drm_auth.c |
/*
* Copyright (C) 2018 Intel Corp.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rob Clark <[email protected]>
* Daniel Vetter <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_plane.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
#include <linux/slab.h>
#include <linux/dma-fence.h>
/**
* DOC: atomic state reset and initialization
*
* Both the drm core and the atomic helpers assume that there is always the full
* and correct atomic software state for all connectors, CRTCs and planes
* available. Which is a bit a problem on driver load and also after system
* suspend. One way to solve this is to have a hardware state read-out
* infrastructure which reconstructs the full software state (e.g. the i915
* driver).
*
* The simpler solution is to just reset the software state to everything off,
* which is easiest to do by calling drm_mode_config_reset(). To facilitate this
* the atomic helpers provide default reset implementations for all hooks.
*
* On the upside the precise state tracking of atomic simplifies system suspend
* and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
* is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
* For other drivers the building blocks are split out, see the documentation
* for these functions.
*/
/**
* __drm_atomic_helper_crtc_state_reset - reset the CRTC state
* @crtc_state: atomic CRTC state, must not be NULL
* @crtc: CRTC object, must not be NULL
*
* Initializes the newly allocated @crtc_state with default
* values. This is useful for drivers that subclass the CRTC state.
*/
void
__drm_atomic_helper_crtc_state_reset(struct drm_crtc_state *crtc_state,
struct drm_crtc *crtc)
{
crtc_state->crtc = crtc;
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_state_reset);
/**
* __drm_atomic_helper_crtc_reset - reset state on CRTC
* @crtc: drm CRTC
* @crtc_state: CRTC state to assign
*
* Initializes the newly allocated @crtc_state and assigns it to
* the &drm_crtc->state pointer of @crtc, usually required when
* initializing the drivers or when called from the &drm_crtc_funcs.reset
* hook.
*
* This is useful for drivers that subclass the CRTC state.
*/
void
__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
if (crtc_state)
__drm_atomic_helper_crtc_state_reset(crtc_state, crtc);
if (drm_dev_has_vblank(crtc->dev))
drm_crtc_vblank_reset(crtc);
crtc->state = crtc_state;
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_reset);
/**
* drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs
* @crtc: drm CRTC
*
* Resets the atomic state for @crtc by freeing the state pointer (which might
* be NULL, e.g. at driver load time) and allocating a new empty state object.
*/
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
{
struct drm_crtc_state *crtc_state =
kzalloc(sizeof(*crtc->state), GFP_KERNEL);
if (crtc->state)
crtc->funcs->atomic_destroy_state(crtc, crtc->state);
__drm_atomic_helper_crtc_reset(crtc, crtc_state);
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
/**
* __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
* @crtc: CRTC object
* @state: atomic CRTC state
*
* Copies atomic state from a CRTC's current state and resets inferred values.
* This is useful for drivers that subclass the CRTC state.
*/
void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
memcpy(state, crtc->state, sizeof(*state));
if (state->mode_blob)
drm_property_blob_get(state->mode_blob);
if (state->degamma_lut)
drm_property_blob_get(state->degamma_lut);
if (state->ctm)
drm_property_blob_get(state->ctm);
if (state->gamma_lut)
drm_property_blob_get(state->gamma_lut);
state->mode_changed = false;
state->active_changed = false;
state->planes_changed = false;
state->connectors_changed = false;
state->color_mgmt_changed = false;
state->zpos_changed = false;
state->commit = NULL;
state->event = NULL;
state->async_flip = false;
/* Self refresh should be canceled when a new update is available */
state->active = drm_atomic_crtc_effectively_active(state);
state->self_refresh_active = false;
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
/**
* drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
* @crtc: drm CRTC
*
* Default CRTC state duplicate hook for drivers which don't have their own
* subclassed CRTC state structure.
*/
struct drm_crtc_state *
drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct drm_crtc_state *state;
if (WARN_ON(!crtc->state))
return NULL;
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_crtc_duplicate_state(crtc, state);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
/**
* __drm_atomic_helper_crtc_destroy_state - release CRTC state
* @state: CRTC state object to release
*
* Releases all resources stored in the CRTC state without actually freeing
* the memory of the CRTC state. This is useful for drivers that subclass the
* CRTC state.
*/
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
{
if (state->commit) {
/*
* In the event that a non-blocking commit returns
* -ERESTARTSYS before the commit_tail work is queued, we will
* have an extra reference to the commit object. Release it, if
* the event has not been consumed by the worker.
*
* state->event may be freed, so we can't directly look at
* state->event->base.completion.
*/
if (state->event && state->commit->abort_completion)
drm_crtc_commit_put(state->commit);
kfree(state->commit->event);
state->commit->event = NULL;
drm_crtc_commit_put(state->commit);
}
drm_property_blob_put(state->mode_blob);
drm_property_blob_put(state->degamma_lut);
drm_property_blob_put(state->ctm);
drm_property_blob_put(state->gamma_lut);
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
/**
* drm_atomic_helper_crtc_destroy_state - default state destroy hook
* @crtc: drm CRTC
* @state: CRTC state object to release
*
* Default CRTC state destroy hook for drivers which don't have their own
* subclassed CRTC state structure.
*/
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
__drm_atomic_helper_crtc_destroy_state(state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
/**
* __drm_atomic_helper_plane_state_reset - resets plane state to default values
* @plane_state: atomic plane state, must not be NULL
* @plane: plane object, must not be NULL
*
* Initializes the newly allocated @plane_state with default
* values. This is useful for drivers that subclass the CRTC state.
*/
void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state,
struct drm_plane *plane)
{
u64 val;
plane_state->plane = plane;
plane_state->rotation = DRM_MODE_ROTATE_0;
plane_state->alpha = DRM_BLEND_ALPHA_OPAQUE;
plane_state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
if (plane->color_encoding_property) {
if (!drm_object_property_get_default_value(&plane->base,
plane->color_encoding_property,
&val))
plane_state->color_encoding = val;
}
if (plane->color_range_property) {
if (!drm_object_property_get_default_value(&plane->base,
plane->color_range_property,
&val))
plane_state->color_range = val;
}
if (plane->zpos_property) {
if (!drm_object_property_get_default_value(&plane->base,
plane->zpos_property,
&val)) {
plane_state->zpos = val;
plane_state->normalized_zpos = val;
}
}
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_state_reset);
/**
* __drm_atomic_helper_plane_reset - reset state on plane
* @plane: drm plane
* @plane_state: plane state to assign
*
* Initializes the newly allocated @plane_state and assigns it to
* the &drm_crtc->state pointer of @plane, usually required when
* initializing the drivers or when called from the &drm_plane_funcs.reset
* hook.
*
* This is useful for drivers that subclass the plane state.
*/
void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
struct drm_plane_state *plane_state)
{
if (plane_state)
__drm_atomic_helper_plane_state_reset(plane_state, plane);
plane->state = plane_state;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
/**
* drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
* @plane: drm plane
*
* Resets the atomic state for @plane by freeing the state pointer (which might
* be NULL, e.g. at driver load time) and allocating a new empty state object.
*/
void drm_atomic_helper_plane_reset(struct drm_plane *plane)
{
if (plane->state)
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(plane->state);
plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
if (plane->state)
__drm_atomic_helper_plane_reset(plane, plane->state);
}
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
/**
* __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
* @plane: plane object
* @state: atomic plane state
*
* Copies atomic state from a plane's current state. This is useful for
* drivers that subclass the plane state.
*/
void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
memcpy(state, plane->state, sizeof(*state));
if (state->fb)
drm_framebuffer_get(state->fb);
state->fence = NULL;
state->commit = NULL;
state->fb_damage_clips = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
/**
* drm_atomic_helper_plane_duplicate_state - default state duplicate hook
* @plane: drm plane
*
* Default plane state duplicate hook for drivers which don't have their own
* subclassed plane state structure.
*/
struct drm_plane_state *
drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
{
struct drm_plane_state *state;
if (WARN_ON(!plane->state))
return NULL;
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_plane_duplicate_state(plane, state);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
/**
* __drm_atomic_helper_plane_destroy_state - release plane state
* @state: plane state object to release
*
* Releases all resources stored in the plane state without actually freeing
* the memory of the plane state. This is useful for drivers that subclass the
* plane state.
*/
void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
{
if (state->fb)
drm_framebuffer_put(state->fb);
if (state->fence)
dma_fence_put(state->fence);
if (state->commit)
drm_crtc_commit_put(state->commit);
drm_property_blob_put(state->fb_damage_clips);
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
/**
* drm_atomic_helper_plane_destroy_state - default state destroy hook
* @plane: drm plane
* @state: plane state object to release
*
* Default plane state destroy hook for drivers which don't have their own
* subclassed plane state structure.
*/
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
__drm_atomic_helper_plane_destroy_state(state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
/**
* __drm_atomic_helper_connector_state_reset - reset the connector state
* @conn_state: atomic connector state, must not be NULL
* @connector: connectotr object, must not be NULL
*
* Initializes the newly allocated @conn_state with default
* values. This is useful for drivers that subclass the connector state.
*/
void
__drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_state,
struct drm_connector *connector)
{
conn_state->connector = connector;
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_state_reset);
/**
* __drm_atomic_helper_connector_reset - reset state on connector
* @connector: drm connector
* @conn_state: connector state to assign
*
* Initializes the newly allocated @conn_state and assigns it to
* the &drm_connector->state pointer of @connector, usually required when
* initializing the drivers or when called from the &drm_connector_funcs.reset
* hook.
*
* This is useful for drivers that subclass the connector state.
*/
void
__drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state)
{
if (conn_state)
__drm_atomic_helper_connector_state_reset(conn_state, connector);
connector->state = conn_state;
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
/**
* drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors
* @connector: drm connector
*
* Resets the atomic state for @connector by freeing the state pointer (which
* might be NULL, e.g. at driver load time) and allocating a new empty state
* object.
*/
void drm_atomic_helper_connector_reset(struct drm_connector *connector)
{
struct drm_connector_state *conn_state =
kzalloc(sizeof(*conn_state), GFP_KERNEL);
if (connector->state)
__drm_atomic_helper_connector_destroy_state(connector->state);
kfree(connector->state);
__drm_atomic_helper_connector_reset(connector, conn_state);
}
EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
/**
* drm_atomic_helper_connector_tv_margins_reset - Resets TV connector properties
* @connector: DRM connector
*
* Resets the TV-related properties attached to a connector.
*/
void drm_atomic_helper_connector_tv_margins_reset(struct drm_connector *connector)
{
struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
struct drm_connector_state *state = connector->state;
state->tv.margins.left = cmdline->tv_margins.left;
state->tv.margins.right = cmdline->tv_margins.right;
state->tv.margins.top = cmdline->tv_margins.top;
state->tv.margins.bottom = cmdline->tv_margins.bottom;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_tv_margins_reset);
/**
* drm_atomic_helper_connector_tv_reset - Resets Analog TV connector properties
* @connector: DRM connector
*
* Resets the analog TV properties attached to a connector
*/
void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
struct drm_connector_state *state = connector->state;
struct drm_property *prop;
uint64_t val;
prop = dev->mode_config.tv_mode_property;
if (prop)
if (!drm_object_property_get_default_value(&connector->base,
prop, &val))
state->tv.mode = val;
if (cmdline->tv_mode_specified)
state->tv.mode = cmdline->tv_mode;
prop = dev->mode_config.tv_select_subconnector_property;
if (prop)
if (!drm_object_property_get_default_value(&connector->base,
prop, &val))
state->tv.select_subconnector = val;
prop = dev->mode_config.tv_subconnector_property;
if (prop)
if (!drm_object_property_get_default_value(&connector->base,
prop, &val))
state->tv.subconnector = val;
prop = dev->mode_config.tv_brightness_property;
if (prop)
if (!drm_object_property_get_default_value(&connector->base,
prop, &val))
state->tv.brightness = val;
prop = dev->mode_config.tv_contrast_property;
if (prop)
if (!drm_object_property_get_default_value(&connector->base,
prop, &val))
state->tv.contrast = val;
prop = dev->mode_config.tv_flicker_reduction_property;
if (prop)
if (!drm_object_property_get_default_value(&connector->base,
prop, &val))
state->tv.flicker_reduction = val;
prop = dev->mode_config.tv_overscan_property;
if (prop)
if (!drm_object_property_get_default_value(&connector->base,
prop, &val))
state->tv.overscan = val;
prop = dev->mode_config.tv_saturation_property;
if (prop)
if (!drm_object_property_get_default_value(&connector->base,
prop, &val))
state->tv.saturation = val;
prop = dev->mode_config.tv_hue_property;
if (prop)
if (!drm_object_property_get_default_value(&connector->base,
prop, &val))
state->tv.hue = val;
drm_atomic_helper_connector_tv_margins_reset(connector);
}
EXPORT_SYMBOL(drm_atomic_helper_connector_tv_reset);
/**
* drm_atomic_helper_connector_tv_check - Validate an analog TV connector state
* @connector: DRM Connector
* @state: the DRM State object
*
* Checks the state object to see if the requested state is valid for an
* analog TV connector.
*
* Return:
* %0 for success, a negative error code on error.
*/
int drm_atomic_helper_connector_tv_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct drm_connector_state *old_conn_state =
drm_atomic_get_old_connector_state(state, connector);
struct drm_connector_state *new_conn_state =
drm_atomic_get_new_connector_state(state, connector);
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
crtc = new_conn_state->crtc;
if (!crtc)
return 0;
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state)
return -EINVAL;
if (old_conn_state->tv.mode != new_conn_state->tv.mode)
crtc_state->mode_changed = true;
if (old_conn_state->tv.margins.left != new_conn_state->tv.margins.left ||
old_conn_state->tv.margins.right != new_conn_state->tv.margins.right ||
old_conn_state->tv.margins.top != new_conn_state->tv.margins.top ||
old_conn_state->tv.margins.bottom != new_conn_state->tv.margins.bottom ||
old_conn_state->tv.mode != new_conn_state->tv.mode ||
old_conn_state->tv.brightness != new_conn_state->tv.brightness ||
old_conn_state->tv.contrast != new_conn_state->tv.contrast ||
old_conn_state->tv.flicker_reduction != new_conn_state->tv.flicker_reduction ||
old_conn_state->tv.overscan != new_conn_state->tv.overscan ||
old_conn_state->tv.saturation != new_conn_state->tv.saturation ||
old_conn_state->tv.hue != new_conn_state->tv.hue)
crtc_state->connectors_changed = true;
return 0;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_tv_check);
/**
* __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
* @connector: connector object
* @state: atomic connector state
*
* Copies atomic state from a connector's current state. This is useful for
* drivers that subclass the connector state.
*/
void
__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
memcpy(state, connector->state, sizeof(*state));
if (state->crtc)
drm_connector_get(connector);
state->commit = NULL;
if (state->hdr_output_metadata)
drm_property_blob_get(state->hdr_output_metadata);
/* Don't copy over a writeback job, they are used only once */
state->writeback_job = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
/**
* drm_atomic_helper_connector_duplicate_state - default state duplicate hook
* @connector: drm connector
*
* Default connector state duplicate hook for drivers which don't have their own
* subclassed connector state structure.
*/
struct drm_connector_state *
drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
{
struct drm_connector_state *state;
if (WARN_ON(!connector->state))
return NULL;
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_connector_duplicate_state(connector, state);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
/**
* __drm_atomic_helper_connector_destroy_state - release connector state
* @state: connector state object to release
*
* Releases all resources stored in the connector state without actually
* freeing the memory of the connector state. This is useful for drivers that
* subclass the connector state.
*/
void
__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
{
if (state->crtc)
drm_connector_put(state->connector);
if (state->commit)
drm_crtc_commit_put(state->commit);
if (state->writeback_job)
drm_writeback_cleanup_job(state->writeback_job);
drm_property_blob_put(state->hdr_output_metadata);
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
/**
* drm_atomic_helper_connector_destroy_state - default state destroy hook
* @connector: drm connector
* @state: connector state object to release
*
* Default connector state destroy hook for drivers which don't have their own
* subclassed connector state structure.
*/
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
__drm_atomic_helper_connector_destroy_state(state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
/**
* __drm_atomic_helper_private_obj_duplicate_state - copy atomic private state
* @obj: CRTC object
* @state: new private object state
*
* Copies atomic state from a private objects's current state and resets inferred values.
* This is useful for drivers that subclass the private state.
*/
void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
memcpy(state, obj->state, sizeof(*state));
}
EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
/**
* __drm_atomic_helper_bridge_duplicate_state() - Copy atomic bridge state
* @bridge: bridge object
* @state: atomic bridge state
*
* Copies atomic state from a bridge's current state and resets inferred values.
* This is useful for drivers that subclass the bridge state.
*/
void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge,
struct drm_bridge_state *state)
{
__drm_atomic_helper_private_obj_duplicate_state(&bridge->base,
&state->base);
state->bridge = bridge;
}
EXPORT_SYMBOL(__drm_atomic_helper_bridge_duplicate_state);
/**
* drm_atomic_helper_bridge_duplicate_state() - Duplicate a bridge state object
* @bridge: bridge object
*
* Allocates a new bridge state and initializes it with the current bridge
* state values. This helper is meant to be used as a bridge
* &drm_bridge_funcs.atomic_duplicate_state hook for bridges that don't
* subclass the bridge state.
*/
struct drm_bridge_state *
drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge)
{
struct drm_bridge_state *new;
if (WARN_ON(!bridge->base.state))
return NULL;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (new)
__drm_atomic_helper_bridge_duplicate_state(bridge, new);
return new;
}
EXPORT_SYMBOL(drm_atomic_helper_bridge_duplicate_state);
/**
* drm_atomic_helper_bridge_destroy_state() - Destroy a bridge state object
* @bridge: the bridge this state refers to
* @state: bridge state to destroy
*
* Destroys a bridge state previously created by
* &drm_atomic_helper_bridge_reset() or
* &drm_atomic_helper_bridge_duplicate_state(). This helper is meant to be
* used as a bridge &drm_bridge_funcs.atomic_destroy_state hook for bridges
* that don't subclass the bridge state.
*/
void drm_atomic_helper_bridge_destroy_state(struct drm_bridge *bridge,
struct drm_bridge_state *state)
{
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_bridge_destroy_state);
/**
* __drm_atomic_helper_bridge_reset() - Initialize a bridge state to its
* default
* @bridge: the bridge this state refers to
* @state: bridge state to initialize
*
* Initializes the bridge state to default values. This is meant to be called
* by the bridge &drm_bridge_funcs.atomic_reset hook for bridges that subclass
* the bridge state.
*/
void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge,
struct drm_bridge_state *state)
{
memset(state, 0, sizeof(*state));
state->bridge = bridge;
}
EXPORT_SYMBOL(__drm_atomic_helper_bridge_reset);
/**
* drm_atomic_helper_bridge_reset() - Allocate and initialize a bridge state
* to its default
* @bridge: the bridge this state refers to
*
* Allocates the bridge state and initializes it to default values. This helper
* is meant to be used as a bridge &drm_bridge_funcs.atomic_reset hook for
* bridges that don't subclass the bridge state.
*/
struct drm_bridge_state *
drm_atomic_helper_bridge_reset(struct drm_bridge *bridge)
{
struct drm_bridge_state *bridge_state;
bridge_state = kzalloc(sizeof(*bridge_state), GFP_KERNEL);
if (!bridge_state)
return ERR_PTR(-ENOMEM);
__drm_atomic_helper_bridge_reset(bridge, bridge_state);
return bridge_state;
}
EXPORT_SYMBOL(drm_atomic_helper_bridge_reset);
| linux-master | drivers/gpu/drm/drm_atomic_state_helper.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
#include <drm/drm_exec.h>
#include <drm/drm_gem.h>
#include <linux/dma-resv.h>
/**
* DOC: Overview
*
* This component mainly abstracts the retry loop necessary for locking
* multiple GEM objects while preparing hardware operations (e.g. command
* submissions, page table updates etc..).
*
* If a contention is detected while locking a GEM object the cleanup procedure
* unlocks all previously locked GEM objects and locks the contended one first
* before locking any further objects.
*
* After an object is locked fences slots can optionally be reserved on the
* dma_resv object inside the GEM object.
*
* A typical usage pattern should look like this::
*
* struct drm_gem_object *obj;
* struct drm_exec exec;
* unsigned long index;
* int ret;
*
* drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
* drm_exec_until_all_locked(&exec) {
* ret = drm_exec_prepare_obj(&exec, boA, 1);
* drm_exec_retry_on_contention(&exec);
* if (ret)
* goto error;
*
* ret = drm_exec_prepare_obj(&exec, boB, 1);
* drm_exec_retry_on_contention(&exec);
* if (ret)
* goto error;
* }
*
* drm_exec_for_each_locked_object(&exec, index, obj) {
* dma_resv_add_fence(obj->resv, fence, DMA_RESV_USAGE_READ);
* ...
* }
* drm_exec_fini(&exec);
*
* See struct dma_exec for more details.
*/
/* Dummy value used to initially enter the retry loop */
#define DRM_EXEC_DUMMY ((void *)~0)
/* Unlock all objects and drop references */
static void drm_exec_unlock_all(struct drm_exec *exec)
{
struct drm_gem_object *obj;
unsigned long index;
drm_exec_for_each_locked_object_reverse(exec, index, obj) {
dma_resv_unlock(obj->resv);
drm_gem_object_put(obj);
}
drm_gem_object_put(exec->prelocked);
exec->prelocked = NULL;
}
/**
* drm_exec_init - initialize a drm_exec object
* @exec: the drm_exec object to initialize
* @flags: controls locking behavior, see DRM_EXEC_* defines
*
* Initialize the object and make sure that we can track locked objects.
*/
void drm_exec_init(struct drm_exec *exec, uint32_t flags)
{
exec->flags = flags;
exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL);
/* If allocation here fails, just delay that till the first use */
exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0;
exec->num_objects = 0;
exec->contended = DRM_EXEC_DUMMY;
exec->prelocked = NULL;
}
EXPORT_SYMBOL(drm_exec_init);
/**
* drm_exec_fini - finalize a drm_exec object
* @exec: the drm_exec object to finalize
*
* Unlock all locked objects, drop the references to objects and free all memory
* used for tracking the state.
*/
void drm_exec_fini(struct drm_exec *exec)
{
drm_exec_unlock_all(exec);
kvfree(exec->objects);
if (exec->contended != DRM_EXEC_DUMMY) {
drm_gem_object_put(exec->contended);
ww_acquire_fini(&exec->ticket);
}
}
EXPORT_SYMBOL(drm_exec_fini);
/**
* drm_exec_cleanup - cleanup when contention is detected
* @exec: the drm_exec object to cleanup
*
* Cleanup the current state and return true if we should stay inside the retry
* loop, false if there wasn't any contention detected and we can keep the
* objects locked.
*/
bool drm_exec_cleanup(struct drm_exec *exec)
{
if (likely(!exec->contended)) {
ww_acquire_done(&exec->ticket);
return false;
}
if (likely(exec->contended == DRM_EXEC_DUMMY)) {
exec->contended = NULL;
ww_acquire_init(&exec->ticket, &reservation_ww_class);
return true;
}
drm_exec_unlock_all(exec);
exec->num_objects = 0;
return true;
}
EXPORT_SYMBOL(drm_exec_cleanup);
/* Track the locked object in the array */
static int drm_exec_obj_locked(struct drm_exec *exec,
struct drm_gem_object *obj)
{
if (unlikely(exec->num_objects == exec->max_objects)) {
size_t size = exec->max_objects * sizeof(void *);
void *tmp;
tmp = kvrealloc(exec->objects, size, size + PAGE_SIZE,
GFP_KERNEL);
if (!tmp)
return -ENOMEM;
exec->objects = tmp;
exec->max_objects += PAGE_SIZE / sizeof(void *);
}
drm_gem_object_get(obj);
exec->objects[exec->num_objects++] = obj;
return 0;
}
/* Make sure the contended object is locked first */
static int drm_exec_lock_contended(struct drm_exec *exec)
{
struct drm_gem_object *obj = exec->contended;
int ret;
if (likely(!obj))
return 0;
/* Always cleanup the contention so that error handling can kick in */
exec->contended = NULL;
if (exec->flags & DRM_EXEC_INTERRUPTIBLE_WAIT) {
ret = dma_resv_lock_slow_interruptible(obj->resv,
&exec->ticket);
if (unlikely(ret))
goto error_dropref;
} else {
dma_resv_lock_slow(obj->resv, &exec->ticket);
}
ret = drm_exec_obj_locked(exec, obj);
if (unlikely(ret))
goto error_unlock;
exec->prelocked = obj;
return 0;
error_unlock:
dma_resv_unlock(obj->resv);
error_dropref:
drm_gem_object_put(obj);
return ret;
}
/**
* drm_exec_lock_obj - lock a GEM object for use
* @exec: the drm_exec object with the state
* @obj: the GEM object to lock
*
* Lock a GEM object for use and grab a reference to it.
*
* Returns: -EDEADLK if a contention is detected, -EALREADY when object is
* already locked (can be suppressed by setting the DRM_EXEC_IGNORE_DUPLICATES
* flag), -ENOMEM when memory allocation failed and zero for success.
*/
int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj)
{
int ret;
ret = drm_exec_lock_contended(exec);
if (unlikely(ret))
return ret;
if (exec->prelocked == obj) {
drm_gem_object_put(exec->prelocked);
exec->prelocked = NULL;
return 0;
}
if (exec->flags & DRM_EXEC_INTERRUPTIBLE_WAIT)
ret = dma_resv_lock_interruptible(obj->resv, &exec->ticket);
else
ret = dma_resv_lock(obj->resv, &exec->ticket);
if (unlikely(ret == -EDEADLK)) {
drm_gem_object_get(obj);
exec->contended = obj;
return -EDEADLK;
}
if (unlikely(ret == -EALREADY) &&
exec->flags & DRM_EXEC_IGNORE_DUPLICATES)
return 0;
if (unlikely(ret))
return ret;
ret = drm_exec_obj_locked(exec, obj);
if (ret)
goto error_unlock;
return 0;
error_unlock:
dma_resv_unlock(obj->resv);
return ret;
}
EXPORT_SYMBOL(drm_exec_lock_obj);
/**
* drm_exec_unlock_obj - unlock a GEM object in this exec context
* @exec: the drm_exec object with the state
* @obj: the GEM object to unlock
*
* Unlock the GEM object and remove it from the collection of locked objects.
* Should only be used to unlock the most recently locked objects. It's not time
* efficient to unlock objects locked long ago.
*/
void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj)
{
unsigned int i;
for (i = exec->num_objects; i--;) {
if (exec->objects[i] == obj) {
dma_resv_unlock(obj->resv);
for (++i; i < exec->num_objects; ++i)
exec->objects[i - 1] = exec->objects[i];
--exec->num_objects;
drm_gem_object_put(obj);
return;
}
}
}
EXPORT_SYMBOL(drm_exec_unlock_obj);
/**
* drm_exec_prepare_obj - prepare a GEM object for use
* @exec: the drm_exec object with the state
* @obj: the GEM object to prepare
* @num_fences: how many fences to reserve
*
* Prepare a GEM object for use by locking it and reserving fence slots.
*
* Returns: -EDEADLK if a contention is detected, -EALREADY when object is
* already locked, -ENOMEM when memory allocation failed and zero for success.
*/
int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
unsigned int num_fences)
{
int ret;
ret = drm_exec_lock_obj(exec, obj);
if (ret)
return ret;
ret = dma_resv_reserve_fences(obj->resv, num_fences);
if (ret) {
drm_exec_unlock_obj(exec, obj);
return ret;
}
return 0;
}
EXPORT_SYMBOL(drm_exec_prepare_obj);
/**
* drm_exec_prepare_array - helper to prepare an array of objects
* @exec: the drm_exec object with the state
* @objects: array of GEM object to prepare
* @num_objects: number of GEM objects in the array
* @num_fences: number of fences to reserve on each GEM object
*
* Prepares all GEM objects in an array, aborts on first error.
* Reserves @num_fences on each GEM object after locking it.
*
* Returns: -EDEADLOCK on contention, -EALREADY when object is already locked,
* -ENOMEM when memory allocation failed and zero for success.
*/
int drm_exec_prepare_array(struct drm_exec *exec,
struct drm_gem_object **objects,
unsigned int num_objects,
unsigned int num_fences)
{
int ret;
for (unsigned int i = 0; i < num_objects; ++i) {
ret = drm_exec_prepare_obj(exec, objects[i], num_fences);
if (unlikely(ret))
return ret;
}
return 0;
}
EXPORT_SYMBOL(drm_exec_prepare_array);
MODULE_DESCRIPTION("DRM execution context");
MODULE_LICENSE("Dual MIT/GPL");
| linux-master | drivers/gpu/drm/drm_exec.c |
/*
* \file drm_scatter.c
* IOCTLs to manage scatter/gather memory
*
* \author Gareth Hughes <[email protected]>
*/
/*
* Created: Mon Dec 18 23:20:54 2000 by [email protected]
*
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
#define DEBUG_SCATTER 0
static void drm_sg_cleanup(struct drm_sg_mem * entry)
{
struct page *page;
int i;
for (i = 0; i < entry->pages; i++) {
page = entry->pagelist[i];
if (page)
ClearPageReserved(page);
}
vfree(entry->virtual);
kfree(entry->busaddr);
kfree(entry->pagelist);
kfree(entry);
}
void drm_legacy_sg_cleanup(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
drm_core_check_feature(dev, DRIVER_LEGACY)) {
drm_sg_cleanup(dev->sg);
dev->sg = NULL;
}
}
#ifdef _LP64
# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
#else
# define ScatterHandle(x) (unsigned int)(x)
#endif
int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
unsigned long pages, i, j;
DRM_DEBUG("\n");
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EOPNOTSUPP;
if (request->size > SIZE_MAX - PAGE_SIZE)
return -EINVAL;
if (dev->sg)
return -EINVAL;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL);
if (!entry->pagelist) {
kfree(entry);
return -ENOMEM;
}
entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL);
if (!entry->busaddr) {
kfree(entry->pagelist);
kfree(entry);
return -ENOMEM;
}
entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
if (!entry->virtual) {
kfree(entry->busaddr);
kfree(entry->pagelist);
kfree(entry);
return -ENOMEM;
}
/* This also forces the mapping of COW pages, so our page list
* will be valid. Please don't remove it...
*/
memset(entry->virtual, 0, pages << PAGE_SHIFT);
entry->handle = ScatterHandle((unsigned long)entry->virtual);
DRM_DEBUG("handle = %08lx\n", entry->handle);
DRM_DEBUG("virtual = %p\n", entry->virtual);
for (i = (unsigned long)entry->virtual, j = 0; j < pages;
i += PAGE_SIZE, j++) {
entry->pagelist[j] = vmalloc_to_page((void *)i);
if (!entry->pagelist[j])
goto failed;
SetPageReserved(entry->pagelist[j]);
}
request->handle = entry->handle;
dev->sg = entry;
#if DEBUG_SCATTER
/* Verify that each page points to its virtual address, and vice
* versa.
*/
{
int error = 0;
for (i = 0; i < pages; i++) {
unsigned long *tmp;
tmp = page_address(entry->pagelist[i]);
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
*tmp = 0xcafebabe;
}
tmp = (unsigned long *)((u8 *) entry->virtual +
(PAGE_SIZE * i));
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
if (*tmp != 0xcafebabe && error == 0) {
error = 1;
DRM_ERROR("Scatter allocation error, "
"pagelist does not match "
"virtual mapping\n");
}
}
tmp = page_address(entry->pagelist[i]);
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
*tmp = 0;
}
}
if (error == 0)
DRM_ERROR("Scatter allocation matches pagelist\n");
}
#endif
return 0;
failed:
drm_sg_cleanup(entry);
return -ENOMEM;
}
int drm_legacy_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EOPNOTSUPP;
entry = dev->sg;
dev->sg = NULL;
if (!entry || entry->handle != request->handle)
return -EINVAL;
DRM_DEBUG("virtual = %p\n", entry->virtual);
drm_sg_cleanup(entry);
return 0;
}
| linux-master | drivers/gpu/drm/drm_scatter.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Danilo Krummrich <[email protected]>
*
*/
#include <drm/drm_gpuva_mgr.h>
#include <linux/interval_tree_generic.h>
#include <linux/mm.h>
/**
* DOC: Overview
*
* The DRM GPU VA Manager, represented by struct drm_gpuva_manager keeps track
* of a GPU's virtual address (VA) space and manages the corresponding virtual
* mappings represented by &drm_gpuva objects. It also keeps track of the
* mapping's backing &drm_gem_object buffers.
*
* &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
* all existent GPU VA mappings using this &drm_gem_object as backing buffer.
*
* GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
* keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
*
* The GPU VA manager internally uses a rb-tree to manage the
* &drm_gpuva mappings within a GPU's virtual address space.
*
* The &drm_gpuva_manager contains a special &drm_gpuva representing the
* portion of VA space reserved by the kernel. This node is initialized together
* with the GPU VA manager instance and removed when the GPU VA manager is
* destroyed.
*
* In a typical application drivers would embed struct drm_gpuva_manager and
* struct drm_gpuva within their own driver specific structures, there won't be
* any memory allocations of its own nor memory allocations of &drm_gpuva
* entries.
*
* The data structures needed to store &drm_gpuvas within the &drm_gpuva_manager
* are contained within struct drm_gpuva already. Hence, for inserting
* &drm_gpuva entries from within dma-fence signalling critical sections it is
* enough to pre-allocate the &drm_gpuva structures.
*/
/**
* DOC: Split and Merge
*
* Besides its capability to manage and represent a GPU VA space, the
* &drm_gpuva_manager also provides functions to let the &drm_gpuva_manager
* calculate a sequence of operations to satisfy a given map or unmap request.
*
* Therefore the DRM GPU VA manager provides an algorithm implementing splitting
* and merging of existent GPU VA mappings with the ones that are requested to
* be mapped or unmapped. This feature is required by the Vulkan API to
* implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
* as VM BIND.
*
* Drivers can call drm_gpuva_sm_map() to receive a sequence of callbacks
* containing map, unmap and remap operations for a given newly requested
* mapping. The sequence of callbacks represents the set of operations to
* execute in order to integrate the new mapping cleanly into the current state
* of the GPU VA space.
*
* Depending on how the new GPU VA mapping intersects with the existent mappings
* of the GPU VA space the &drm_gpuva_fn_ops callbacks contain an arbitrary
* amount of unmap operations, a maximum of two remap operations and a single
* map operation. The caller might receive no callback at all if no operation is
* required, e.g. if the requested mapping already exists in the exact same way.
*
* The single map operation represents the original map operation requested by
* the caller.
*
* &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the
* &drm_gpuva to unmap is physically contiguous with the original mapping
* request. Optionally, if 'keep' is set, drivers may keep the actual page table
* entries for this &drm_gpuva, adding the missing page table entries only and
* update the &drm_gpuva_manager's view of things accordingly.
*
* Drivers may do the same optimization, namely delta page table updates, also
* for remap operations. This is possible since &drm_gpuva_op_remap consists of
* one unmap operation and one or two map operations, such that drivers can
* derive the page table update delta accordingly.
*
* Note that there can't be more than two existent mappings to split up, one at
* the beginning and one at the end of the new mapping, hence there is a
* maximum of two remap operations.
*
* Analogous to drm_gpuva_sm_map() drm_gpuva_sm_unmap() uses &drm_gpuva_fn_ops
* to call back into the driver in order to unmap a range of GPU VA space. The
* logic behind this function is way simpler though: For all existent mappings
* enclosed by the given range unmap operations are created. For mappings which
* are only partically located within the given range, remap operations are
* created such that those mappings are split up and re-mapped partically.
*
* As an alternative to drm_gpuva_sm_map() and drm_gpuva_sm_unmap(),
* drm_gpuva_sm_map_ops_create() and drm_gpuva_sm_unmap_ops_create() can be used
* to directly obtain an instance of struct drm_gpuva_ops containing a list of
* &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list
* contains the &drm_gpuva_ops analogous to the callbacks one would receive when
* calling drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). While this way requires
* more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to
* iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory
* allocations are possible (e.g. to allocate GPU page tables) and once in the
* dma-fence signalling critical path.
*
* To update the &drm_gpuva_manager's view of the GPU VA space
* drm_gpuva_insert() and drm_gpuva_remove() may be used. These functions can
* safely be used from &drm_gpuva_fn_ops callbacks originating from
* drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). However, it might be more
* convenient to use the provided helper functions drm_gpuva_map(),
* drm_gpuva_remap() and drm_gpuva_unmap() instead.
*
* The following diagram depicts the basic relationships of existent GPU VA
* mappings, a newly requested mapping and the resulting mappings as implemented
* by drm_gpuva_sm_map() - it doesn't cover any arbitrary combinations of these.
*
* 1) Requested mapping is identical. Replace it, but indicate the backing PTEs
* could be kept.
*
* ::
*
* 0 a 1
* old: |-----------| (bo_offset=n)
*
* 0 a 1
* req: |-----------| (bo_offset=n)
*
* 0 a 1
* new: |-----------| (bo_offset=n)
*
*
* 2) Requested mapping is identical, except for the BO offset, hence replace
* the mapping.
*
* ::
*
* 0 a 1
* old: |-----------| (bo_offset=n)
*
* 0 a 1
* req: |-----------| (bo_offset=m)
*
* 0 a 1
* new: |-----------| (bo_offset=m)
*
*
* 3) Requested mapping is identical, except for the backing BO, hence replace
* the mapping.
*
* ::
*
* 0 a 1
* old: |-----------| (bo_offset=n)
*
* 0 b 1
* req: |-----------| (bo_offset=n)
*
* 0 b 1
* new: |-----------| (bo_offset=n)
*
*
* 4) Existent mapping is a left aligned subset of the requested one, hence
* replace the existent one.
*
* ::
*
* 0 a 1
* old: |-----| (bo_offset=n)
*
* 0 a 2
* req: |-----------| (bo_offset=n)
*
* 0 a 2
* new: |-----------| (bo_offset=n)
*
* .. note::
* We expect to see the same result for a request with a different BO
* and/or non-contiguous BO offset.
*
*
* 5) Requested mapping's range is a left aligned subset of the existent one,
* but backed by a different BO. Hence, map the requested mapping and split
* the existent one adjusting its BO offset.
*
* ::
*
* 0 a 2
* old: |-----------| (bo_offset=n)
*
* 0 b 1
* req: |-----| (bo_offset=n)
*
* 0 b 1 a' 2
* new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1)
*
* .. note::
* We expect to see the same result for a request with a different BO
* and/or non-contiguous BO offset.
*
*
* 6) Existent mapping is a superset of the requested mapping. Split it up, but
* indicate that the backing PTEs could be kept.
*
* ::
*
* 0 a 2
* old: |-----------| (bo_offset=n)
*
* 0 a 1
* req: |-----| (bo_offset=n)
*
* 0 a 1 a' 2
* new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
*
*
* 7) Requested mapping's range is a right aligned subset of the existent one,
* but backed by a different BO. Hence, map the requested mapping and split
* the existent one, without adjusting the BO offset.
*
* ::
*
* 0 a 2
* old: |-----------| (bo_offset=n)
*
* 1 b 2
* req: |-----| (bo_offset=m)
*
* 0 a 1 b 2
* new: |-----|-----| (a.bo_offset=n,b.bo_offset=m)
*
*
* 8) Existent mapping is a superset of the requested mapping. Split it up, but
* indicate that the backing PTEs could be kept.
*
* ::
*
* 0 a 2
* old: |-----------| (bo_offset=n)
*
* 1 a 2
* req: |-----| (bo_offset=n+1)
*
* 0 a' 1 a 2
* new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1)
*
*
* 9) Existent mapping is overlapped at the end by the requested mapping backed
* by a different BO. Hence, map the requested mapping and split up the
* existent one, without adjusting the BO offset.
*
* ::
*
* 0 a 2
* old: |-----------| (bo_offset=n)
*
* 1 b 3
* req: |-----------| (bo_offset=m)
*
* 0 a 1 b 3
* new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m)
*
*
* 10) Existent mapping is overlapped by the requested mapping, both having the
* same backing BO with a contiguous offset. Indicate the backing PTEs of
* the old mapping could be kept.
*
* ::
*
* 0 a 2
* old: |-----------| (bo_offset=n)
*
* 1 a 3
* req: |-----------| (bo_offset=n+1)
*
* 0 a' 1 a 3
* new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
*
*
* 11) Requested mapping's range is a centered subset of the existent one
* having a different backing BO. Hence, map the requested mapping and split
* up the existent one in two mappings, adjusting the BO offset of the right
* one accordingly.
*
* ::
*
* 0 a 3
* old: |-----------------| (bo_offset=n)
*
* 1 b 2
* req: |-----| (bo_offset=m)
*
* 0 a 1 b 2 a' 3
* new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
*
*
* 12) Requested mapping is a contiguous subset of the existent one. Split it
* up, but indicate that the backing PTEs could be kept.
*
* ::
*
* 0 a 3
* old: |-----------------| (bo_offset=n)
*
* 1 a 2
* req: |-----| (bo_offset=n+1)
*
* 0 a' 1 a 2 a'' 3
* old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2)
*
*
* 13) Existent mapping is a right aligned subset of the requested one, hence
* replace the existent one.
*
* ::
*
* 1 a 2
* old: |-----| (bo_offset=n+1)
*
* 0 a 2
* req: |-----------| (bo_offset=n)
*
* 0 a 2
* new: |-----------| (bo_offset=n)
*
* .. note::
* We expect to see the same result for a request with a different bo
* and/or non-contiguous bo_offset.
*
*
* 14) Existent mapping is a centered subset of the requested one, hence
* replace the existent one.
*
* ::
*
* 1 a 2
* old: |-----| (bo_offset=n+1)
*
* 0 a 3
* req: |----------------| (bo_offset=n)
*
* 0 a 3
* new: |----------------| (bo_offset=n)
*
* .. note::
* We expect to see the same result for a request with a different bo
* and/or non-contiguous bo_offset.
*
*
* 15) Existent mappings is overlapped at the beginning by the requested mapping
* backed by a different BO. Hence, map the requested mapping and split up
* the existent one, adjusting its BO offset accordingly.
*
* ::
*
* 1 a 3
* old: |-----------| (bo_offset=n)
*
* 0 b 2
* req: |-----------| (bo_offset=m)
*
* 0 b 2 a' 3
* new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
*/
/**
* DOC: Locking
*
* Generally, the GPU VA manager does not take care of locking itself, it is
* the drivers responsibility to take care about locking. Drivers might want to
* protect the following operations: inserting, removing and iterating
* &drm_gpuva objects as well as generating all kinds of operations, such as
* split / merge or prefetch.
*
* The GPU VA manager also does not take care of the locking of the backing
* &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to
* enforce mutual exclusion using either the GEMs dma_resv lock or alternatively
* a driver specific external lock. For the latter see also
* drm_gem_gpuva_set_lock().
*
* However, the GPU VA manager contains lockdep checks to ensure callers of its
* API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is
* accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink().
*/
/**
* DOC: Examples
*
* This section gives two examples on how to let the DRM GPUVA Manager generate
* &drm_gpuva_op in order to satisfy a given map or unmap request and how to
* make use of them.
*
* The below code is strictly limited to illustrate the generic usage pattern.
* To maintain simplicitly, it doesn't make use of any abstractions for common
* code, different (asyncronous) stages with fence signalling critical paths,
* any other helpers or error handling in terms of freeing memory and dropping
* previously taken locks.
*
* 1) Obtain a list of &drm_gpuva_op to create a new mapping::
*
* // Allocates a new &drm_gpuva.
* struct drm_gpuva * driver_gpuva_alloc(void);
*
* // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
* int driver_mapping_create(struct drm_gpuva_manager *mgr,
* u64 addr, u64 range,
* struct drm_gem_object *obj, u64 offset)
* {
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op
*
* driver_lock_va_space();
* ops = drm_gpuva_sm_map_ops_create(mgr, addr, range,
* obj, offset);
* if (IS_ERR(ops))
* return PTR_ERR(ops);
*
* drm_gpuva_for_each_op(op, ops) {
* struct drm_gpuva *va;
*
* switch (op->op) {
* case DRM_GPUVA_OP_MAP:
* va = driver_gpuva_alloc();
* if (!va)
* ; // unwind previous VA space updates,
* // free memory and unlock
*
* driver_vm_map();
* drm_gpuva_map(mgr, va, &op->map);
* drm_gpuva_link(va);
*
* break;
* case DRM_GPUVA_OP_REMAP: {
* struct drm_gpuva *prev = NULL, *next = NULL;
*
* va = op->remap.unmap->va;
*
* if (op->remap.prev) {
* prev = driver_gpuva_alloc();
* if (!prev)
* ; // unwind previous VA space
* // updates, free memory and
* // unlock
* }
*
* if (op->remap.next) {
* next = driver_gpuva_alloc();
* if (!next)
* ; // unwind previous VA space
* // updates, free memory and
* // unlock
* }
*
* driver_vm_remap();
* drm_gpuva_remap(prev, next, &op->remap);
*
* drm_gpuva_unlink(va);
* if (prev)
* drm_gpuva_link(prev);
* if (next)
* drm_gpuva_link(next);
*
* break;
* }
* case DRM_GPUVA_OP_UNMAP:
* va = op->unmap->va;
*
* driver_vm_unmap();
* drm_gpuva_unlink(va);
* drm_gpuva_unmap(&op->unmap);
*
* break;
* default:
* break;
* }
* }
* driver_unlock_va_space();
*
* return 0;
* }
*
* 2) Receive a callback for each &drm_gpuva_op to create a new mapping::
*
* struct driver_context {
* struct drm_gpuva_manager *mgr;
* struct drm_gpuva *new_va;
* struct drm_gpuva *prev_va;
* struct drm_gpuva *next_va;
* };
*
* // ops to pass to drm_gpuva_manager_init()
* static const struct drm_gpuva_fn_ops driver_gpuva_ops = {
* .sm_step_map = driver_gpuva_map,
* .sm_step_remap = driver_gpuva_remap,
* .sm_step_unmap = driver_gpuva_unmap,
* };
*
* // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
* int driver_mapping_create(struct drm_gpuva_manager *mgr,
* u64 addr, u64 range,
* struct drm_gem_object *obj, u64 offset)
* {
* struct driver_context ctx;
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op;
* int ret = 0;
*
* ctx.mgr = mgr;
*
* ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL);
* ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL);
* ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL);
* if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) {
* ret = -ENOMEM;
* goto out;
* }
*
* driver_lock_va_space();
* ret = drm_gpuva_sm_map(mgr, &ctx, addr, range, obj, offset);
* driver_unlock_va_space();
*
* out:
* kfree(ctx.new_va);
* kfree(ctx.prev_va);
* kfree(ctx.next_va);
* return ret;
* }
*
* int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx)
* {
* struct driver_context *ctx = __ctx;
*
* drm_gpuva_map(ctx->mgr, ctx->new_va, &op->map);
*
* drm_gpuva_link(ctx->new_va);
*
* // prevent the new GPUVA from being freed in
* // driver_mapping_create()
* ctx->new_va = NULL;
*
* return 0;
* }
*
* int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx)
* {
* struct driver_context *ctx = __ctx;
*
* drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
*
* drm_gpuva_unlink(op->remap.unmap->va);
* kfree(op->remap.unmap->va);
*
* if (op->remap.prev) {
* drm_gpuva_link(ctx->prev_va);
* ctx->prev_va = NULL;
* }
*
* if (op->remap.next) {
* drm_gpuva_link(ctx->next_va);
* ctx->next_va = NULL;
* }
*
* return 0;
* }
*
* int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx)
* {
* drm_gpuva_unlink(op->unmap.va);
* drm_gpuva_unmap(&op->unmap);
* kfree(op->unmap.va);
*
* return 0;
* }
*/
#define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node)
#define GPUVA_START(node) ((node)->va.addr)
#define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
/* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain
* about this.
*/
INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last,
GPUVA_START, GPUVA_LAST, static __maybe_unused,
drm_gpuva_it)
static int __drm_gpuva_insert(struct drm_gpuva_manager *mgr,
struct drm_gpuva *va);
static void __drm_gpuva_remove(struct drm_gpuva *va);
static bool
drm_gpuva_check_overflow(u64 addr, u64 range)
{
u64 end;
return WARN(check_add_overflow(addr, range, &end),
"GPUVA address limited to %zu bytes.\n", sizeof(end));
}
static bool
drm_gpuva_in_mm_range(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
{
u64 end = addr + range;
u64 mm_start = mgr->mm_start;
u64 mm_end = mm_start + mgr->mm_range;
return addr >= mm_start && end <= mm_end;
}
static bool
drm_gpuva_in_kernel_node(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
{
u64 end = addr + range;
u64 kstart = mgr->kernel_alloc_node.va.addr;
u64 krange = mgr->kernel_alloc_node.va.range;
u64 kend = kstart + krange;
return krange && addr < kend && kstart < end;
}
static bool
drm_gpuva_range_valid(struct drm_gpuva_manager *mgr,
u64 addr, u64 range)
{
return !drm_gpuva_check_overflow(addr, range) &&
drm_gpuva_in_mm_range(mgr, addr, range) &&
!drm_gpuva_in_kernel_node(mgr, addr, range);
}
/**
* drm_gpuva_manager_init() - initialize a &drm_gpuva_manager
* @mgr: pointer to the &drm_gpuva_manager to initialize
* @name: the name of the GPU VA space
* @start_offset: the start offset of the GPU VA space
* @range: the size of the GPU VA space
* @reserve_offset: the start of the kernel reserved GPU VA area
* @reserve_range: the size of the kernel reserved GPU VA area
* @ops: &drm_gpuva_fn_ops called on &drm_gpuva_sm_map / &drm_gpuva_sm_unmap
*
* The &drm_gpuva_manager must be initialized with this function before use.
*
* Note that @mgr must be cleared to 0 before calling this function. The given
* &name is expected to be managed by the surrounding driver structures.
*/
void
drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
const char *name,
u64 start_offset, u64 range,
u64 reserve_offset, u64 reserve_range,
const struct drm_gpuva_fn_ops *ops)
{
mgr->rb.tree = RB_ROOT_CACHED;
INIT_LIST_HEAD(&mgr->rb.list);
drm_gpuva_check_overflow(start_offset, range);
mgr->mm_start = start_offset;
mgr->mm_range = range;
mgr->name = name ? name : "unknown";
mgr->ops = ops;
memset(&mgr->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
if (reserve_range) {
mgr->kernel_alloc_node.va.addr = reserve_offset;
mgr->kernel_alloc_node.va.range = reserve_range;
if (likely(!drm_gpuva_check_overflow(reserve_offset,
reserve_range)))
__drm_gpuva_insert(mgr, &mgr->kernel_alloc_node);
}
}
EXPORT_SYMBOL_GPL(drm_gpuva_manager_init);
/**
* drm_gpuva_manager_destroy() - cleanup a &drm_gpuva_manager
* @mgr: pointer to the &drm_gpuva_manager to clean up
*
* Note that it is a bug to call this function on a manager that still
* holds GPU VA mappings.
*/
void
drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr)
{
mgr->name = NULL;
if (mgr->kernel_alloc_node.va.range)
__drm_gpuva_remove(&mgr->kernel_alloc_node);
WARN(!RB_EMPTY_ROOT(&mgr->rb.tree.rb_root),
"GPUVA tree is not empty, potentially leaking memory.");
}
EXPORT_SYMBOL_GPL(drm_gpuva_manager_destroy);
static int
__drm_gpuva_insert(struct drm_gpuva_manager *mgr,
struct drm_gpuva *va)
{
struct rb_node *node;
struct list_head *head;
if (drm_gpuva_it_iter_first(&mgr->rb.tree,
GPUVA_START(va),
GPUVA_LAST(va)))
return -EEXIST;
va->mgr = mgr;
drm_gpuva_it_insert(va, &mgr->rb.tree);
node = rb_prev(&va->rb.node);
if (node)
head = &(to_drm_gpuva(node))->rb.entry;
else
head = &mgr->rb.list;
list_add(&va->rb.entry, head);
return 0;
}
/**
* drm_gpuva_insert() - insert a &drm_gpuva
* @mgr: the &drm_gpuva_manager to insert the &drm_gpuva in
* @va: the &drm_gpuva to insert
*
* Insert a &drm_gpuva with a given address and range into a
* &drm_gpuva_manager.
*
* It is safe to use this function using the safe versions of iterating the GPU
* VA space, such as drm_gpuva_for_each_va_safe() and
* drm_gpuva_for_each_va_range_safe().
*
* Returns: 0 on success, negative error code on failure.
*/
int
drm_gpuva_insert(struct drm_gpuva_manager *mgr,
struct drm_gpuva *va)
{
u64 addr = va->va.addr;
u64 range = va->va.range;
if (unlikely(!drm_gpuva_range_valid(mgr, addr, range)))
return -EINVAL;
return __drm_gpuva_insert(mgr, va);
}
EXPORT_SYMBOL_GPL(drm_gpuva_insert);
static void
__drm_gpuva_remove(struct drm_gpuva *va)
{
drm_gpuva_it_remove(va, &va->mgr->rb.tree);
list_del_init(&va->rb.entry);
}
/**
* drm_gpuva_remove() - remove a &drm_gpuva
* @va: the &drm_gpuva to remove
*
* This removes the given &va from the underlaying tree.
*
* It is safe to use this function using the safe versions of iterating the GPU
* VA space, such as drm_gpuva_for_each_va_safe() and
* drm_gpuva_for_each_va_range_safe().
*/
void
drm_gpuva_remove(struct drm_gpuva *va)
{
struct drm_gpuva_manager *mgr = va->mgr;
if (unlikely(va == &mgr->kernel_alloc_node)) {
WARN(1, "Can't destroy kernel reserved node.\n");
return;
}
__drm_gpuva_remove(va);
}
EXPORT_SYMBOL_GPL(drm_gpuva_remove);
/**
* drm_gpuva_link() - link a &drm_gpuva
* @va: the &drm_gpuva to link
*
* This adds the given &va to the GPU VA list of the &drm_gem_object it is
* associated with.
*
* This function expects the caller to protect the GEM's GPUVA list against
* concurrent access using the GEMs dma_resv lock.
*/
void
drm_gpuva_link(struct drm_gpuva *va)
{
struct drm_gem_object *obj = va->gem.obj;
if (unlikely(!obj))
return;
drm_gem_gpuva_assert_lock_held(obj);
list_add_tail(&va->gem.entry, &obj->gpuva.list);
}
EXPORT_SYMBOL_GPL(drm_gpuva_link);
/**
* drm_gpuva_unlink() - unlink a &drm_gpuva
* @va: the &drm_gpuva to unlink
*
* This removes the given &va from the GPU VA list of the &drm_gem_object it is
* associated with.
*
* This function expects the caller to protect the GEM's GPUVA list against
* concurrent access using the GEMs dma_resv lock.
*/
void
drm_gpuva_unlink(struct drm_gpuva *va)
{
struct drm_gem_object *obj = va->gem.obj;
if (unlikely(!obj))
return;
drm_gem_gpuva_assert_lock_held(obj);
list_del_init(&va->gem.entry);
}
EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
/**
* drm_gpuva_find_first() - find the first &drm_gpuva in the given range
* @mgr: the &drm_gpuva_manager to search in
* @addr: the &drm_gpuvas address
* @range: the &drm_gpuvas range
*
* Returns: the first &drm_gpuva within the given range
*/
struct drm_gpuva *
drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
u64 addr, u64 range)
{
u64 last = addr + range - 1;
return drm_gpuva_it_iter_first(&mgr->rb.tree, addr, last);
}
EXPORT_SYMBOL_GPL(drm_gpuva_find_first);
/**
* drm_gpuva_find() - find a &drm_gpuva
* @mgr: the &drm_gpuva_manager to search in
* @addr: the &drm_gpuvas address
* @range: the &drm_gpuvas range
*
* Returns: the &drm_gpuva at a given &addr and with a given &range
*/
struct drm_gpuva *
drm_gpuva_find(struct drm_gpuva_manager *mgr,
u64 addr, u64 range)
{
struct drm_gpuva *va;
va = drm_gpuva_find_first(mgr, addr, range);
if (!va)
goto out;
if (va->va.addr != addr ||
va->va.range != range)
goto out;
return va;
out:
return NULL;
}
EXPORT_SYMBOL_GPL(drm_gpuva_find);
/**
* drm_gpuva_find_prev() - find the &drm_gpuva before the given address
* @mgr: the &drm_gpuva_manager to search in
* @start: the given GPU VA's start address
*
* Find the adjacent &drm_gpuva before the GPU VA with given &start address.
*
* Note that if there is any free space between the GPU VA mappings no mapping
* is returned.
*
* Returns: a pointer to the found &drm_gpuva or NULL if none was found
*/
struct drm_gpuva *
drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start)
{
if (!drm_gpuva_range_valid(mgr, start - 1, 1))
return NULL;
return drm_gpuva_it_iter_first(&mgr->rb.tree, start - 1, start);
}
EXPORT_SYMBOL_GPL(drm_gpuva_find_prev);
/**
* drm_gpuva_find_next() - find the &drm_gpuva after the given address
* @mgr: the &drm_gpuva_manager to search in
* @end: the given GPU VA's end address
*
* Find the adjacent &drm_gpuva after the GPU VA with given &end address.
*
* Note that if there is any free space between the GPU VA mappings no mapping
* is returned.
*
* Returns: a pointer to the found &drm_gpuva or NULL if none was found
*/
struct drm_gpuva *
drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end)
{
if (!drm_gpuva_range_valid(mgr, end, 1))
return NULL;
return drm_gpuva_it_iter_first(&mgr->rb.tree, end, end + 1);
}
EXPORT_SYMBOL_GPL(drm_gpuva_find_next);
/**
* drm_gpuva_interval_empty() - indicate whether a given interval of the VA space
* is empty
* @mgr: the &drm_gpuva_manager to check the range for
* @addr: the start address of the range
* @range: the range of the interval
*
* Returns: true if the interval is empty, false otherwise
*/
bool
drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
{
return !drm_gpuva_find_first(mgr, addr, range);
}
EXPORT_SYMBOL_GPL(drm_gpuva_interval_empty);
/**
* drm_gpuva_map() - helper to insert a &drm_gpuva according to a
* &drm_gpuva_op_map
* @mgr: the &drm_gpuva_manager
* @va: the &drm_gpuva to insert
* @op: the &drm_gpuva_op_map to initialize @va with
*
* Initializes the @va from the @op and inserts it into the given @mgr.
*/
void
drm_gpuva_map(struct drm_gpuva_manager *mgr,
struct drm_gpuva *va,
struct drm_gpuva_op_map *op)
{
drm_gpuva_init_from_op(va, op);
drm_gpuva_insert(mgr, va);
}
EXPORT_SYMBOL_GPL(drm_gpuva_map);
/**
* drm_gpuva_remap() - helper to remap a &drm_gpuva according to a
* &drm_gpuva_op_remap
* @prev: the &drm_gpuva to remap when keeping the start of a mapping
* @next: the &drm_gpuva to remap when keeping the end of a mapping
* @op: the &drm_gpuva_op_remap to initialize @prev and @next with
*
* Removes the currently mapped &drm_gpuva and remaps it using @prev and/or
* @next.
*/
void
drm_gpuva_remap(struct drm_gpuva *prev,
struct drm_gpuva *next,
struct drm_gpuva_op_remap *op)
{
struct drm_gpuva *curr = op->unmap->va;
struct drm_gpuva_manager *mgr = curr->mgr;
drm_gpuva_remove(curr);
if (op->prev) {
drm_gpuva_init_from_op(prev, op->prev);
drm_gpuva_insert(mgr, prev);
}
if (op->next) {
drm_gpuva_init_from_op(next, op->next);
drm_gpuva_insert(mgr, next);
}
}
EXPORT_SYMBOL_GPL(drm_gpuva_remap);
/**
* drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a
* &drm_gpuva_op_unmap
* @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove
*
* Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap.
*/
void
drm_gpuva_unmap(struct drm_gpuva_op_unmap *op)
{
drm_gpuva_remove(op->va);
}
EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
static int
op_map_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
u64 addr, u64 range,
struct drm_gem_object *obj, u64 offset)
{
struct drm_gpuva_op op = {};
op.op = DRM_GPUVA_OP_MAP;
op.map.va.addr = addr;
op.map.va.range = range;
op.map.gem.obj = obj;
op.map.gem.offset = offset;
return fn->sm_step_map(&op, priv);
}
static int
op_remap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
struct drm_gpuva_op_map *prev,
struct drm_gpuva_op_map *next,
struct drm_gpuva_op_unmap *unmap)
{
struct drm_gpuva_op op = {};
struct drm_gpuva_op_remap *r;
op.op = DRM_GPUVA_OP_REMAP;
r = &op.remap;
r->prev = prev;
r->next = next;
r->unmap = unmap;
return fn->sm_step_remap(&op, priv);
}
static int
op_unmap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
struct drm_gpuva *va, bool merge)
{
struct drm_gpuva_op op = {};
op.op = DRM_GPUVA_OP_UNMAP;
op.unmap.va = va;
op.unmap.keep = merge;
return fn->sm_step_unmap(&op, priv);
}
static int
__drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
const struct drm_gpuva_fn_ops *ops, void *priv,
u64 req_addr, u64 req_range,
struct drm_gem_object *req_obj, u64 req_offset)
{
struct drm_gpuva *va, *next;
u64 req_end = req_addr + req_range;
int ret;
if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
return -EINVAL;
drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
struct drm_gem_object *obj = va->gem.obj;
u64 offset = va->gem.offset;
u64 addr = va->va.addr;
u64 range = va->va.range;
u64 end = addr + range;
bool merge = !!va->gem.obj;
if (addr == req_addr) {
merge &= obj == req_obj &&
offset == req_offset;
if (end == req_end) {
ret = op_unmap_cb(ops, priv, va, merge);
if (ret)
return ret;
break;
}
if (end < req_end) {
ret = op_unmap_cb(ops, priv, va, merge);
if (ret)
return ret;
continue;
}
if (end > req_end) {
struct drm_gpuva_op_map n = {
.va.addr = req_end,
.va.range = range - req_range,
.gem.obj = obj,
.gem.offset = offset + req_range,
};
struct drm_gpuva_op_unmap u = {
.va = va,
.keep = merge,
};
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
break;
}
} else if (addr < req_addr) {
u64 ls_range = req_addr - addr;
struct drm_gpuva_op_map p = {
.va.addr = addr,
.va.range = ls_range,
.gem.obj = obj,
.gem.offset = offset,
};
struct drm_gpuva_op_unmap u = { .va = va };
merge &= obj == req_obj &&
offset + ls_range == req_offset;
u.keep = merge;
if (end == req_end) {
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
break;
}
if (end < req_end) {
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
continue;
}
if (end > req_end) {
struct drm_gpuva_op_map n = {
.va.addr = req_end,
.va.range = end - req_end,
.gem.obj = obj,
.gem.offset = offset + ls_range +
req_range,
};
ret = op_remap_cb(ops, priv, &p, &n, &u);
if (ret)
return ret;
break;
}
} else if (addr > req_addr) {
merge &= obj == req_obj &&
offset == req_offset +
(addr - req_addr);
if (end == req_end) {
ret = op_unmap_cb(ops, priv, va, merge);
if (ret)
return ret;
break;
}
if (end < req_end) {
ret = op_unmap_cb(ops, priv, va, merge);
if (ret)
return ret;
continue;
}
if (end > req_end) {
struct drm_gpuva_op_map n = {
.va.addr = req_end,
.va.range = end - req_end,
.gem.obj = obj,
.gem.offset = offset + req_end - addr,
};
struct drm_gpuva_op_unmap u = {
.va = va,
.keep = merge,
};
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
break;
}
}
}
return op_map_cb(ops, priv,
req_addr, req_range,
req_obj, req_offset);
}
static int
__drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr,
const struct drm_gpuva_fn_ops *ops, void *priv,
u64 req_addr, u64 req_range)
{
struct drm_gpuva *va, *next;
u64 req_end = req_addr + req_range;
int ret;
if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
return -EINVAL;
drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
struct drm_gpuva_op_map prev = {}, next = {};
bool prev_split = false, next_split = false;
struct drm_gem_object *obj = va->gem.obj;
u64 offset = va->gem.offset;
u64 addr = va->va.addr;
u64 range = va->va.range;
u64 end = addr + range;
if (addr < req_addr) {
prev.va.addr = addr;
prev.va.range = req_addr - addr;
prev.gem.obj = obj;
prev.gem.offset = offset;
prev_split = true;
}
if (end > req_end) {
next.va.addr = req_end;
next.va.range = end - req_end;
next.gem.obj = obj;
next.gem.offset = offset + (req_end - addr);
next_split = true;
}
if (prev_split || next_split) {
struct drm_gpuva_op_unmap unmap = { .va = va };
ret = op_remap_cb(ops, priv,
prev_split ? &prev : NULL,
next_split ? &next : NULL,
&unmap);
if (ret)
return ret;
} else {
ret = op_unmap_cb(ops, priv, va, false);
if (ret)
return ret;
}
}
return 0;
}
/**
* drm_gpuva_sm_map() - creates the &drm_gpuva_op split/merge steps
* @mgr: the &drm_gpuva_manager representing the GPU VA space
* @req_addr: the start address of the new mapping
* @req_range: the range of the new mapping
* @req_obj: the &drm_gem_object to map
* @req_offset: the offset within the &drm_gem_object
* @priv: pointer to a driver private data structure
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuva_fn_ops to call back into the driver providing the split and merge
* steps.
*
* Drivers may use these callbacks to update the GPU VA space right away within
* the callback. In case the driver decides to copy and store the operations for
* later processing neither this function nor &drm_gpuva_sm_unmap is allowed to
* be called before the &drm_gpuva_manager's view of the GPU VA space was
* updated with the previous set of operations. To update the
* &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
* drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
* used.
*
* A sequence of callbacks can contain map, unmap and remap operations, but
* the sequence of callbacks might also be empty if no operation is required,
* e.g. if the requested mapping already exists in the exact same way.
*
* There can be an arbitrary amount of unmap operations, a maximum of two remap
* operations and a single map operation. The latter one represents the original
* map operation requested by the caller.
*
* Returns: 0 on success or a negative error code
*/
int
drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
u64 req_addr, u64 req_range,
struct drm_gem_object *req_obj, u64 req_offset)
{
const struct drm_gpuva_fn_ops *ops = mgr->ops;
if (unlikely(!(ops && ops->sm_step_map &&
ops->sm_step_remap &&
ops->sm_step_unmap)))
return -EINVAL;
return __drm_gpuva_sm_map(mgr, ops, priv,
req_addr, req_range,
req_obj, req_offset);
}
EXPORT_SYMBOL_GPL(drm_gpuva_sm_map);
/**
* drm_gpuva_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
* @mgr: the &drm_gpuva_manager representing the GPU VA space
* @priv: pointer to a driver private data structure
* @req_addr: the start address of the range to unmap
* @req_range: the range of the mappings to unmap
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuva_fn_ops to call back into the driver providing the operations to
* unmap and, if required, split existent mappings.
*
* Drivers may use these callbacks to update the GPU VA space right away within
* the callback. In case the driver decides to copy and store the operations for
* later processing neither this function nor &drm_gpuva_sm_map is allowed to be
* called before the &drm_gpuva_manager's view of the GPU VA space was updated
* with the previous set of operations. To update the &drm_gpuva_manager's view
* of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
* drm_gpuva_destroy_unlocked() should be used.
*
* A sequence of callbacks can contain unmap and remap operations, depending on
* whether there are actual overlapping mappings to split.
*
* There can be an arbitrary amount of unmap operations and a maximum of two
* remap operations.
*
* Returns: 0 on success or a negative error code
*/
int
drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
u64 req_addr, u64 req_range)
{
const struct drm_gpuva_fn_ops *ops = mgr->ops;
if (unlikely(!(ops && ops->sm_step_remap &&
ops->sm_step_unmap)))
return -EINVAL;
return __drm_gpuva_sm_unmap(mgr, ops, priv,
req_addr, req_range);
}
EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap);
static struct drm_gpuva_op *
gpuva_op_alloc(struct drm_gpuva_manager *mgr)
{
const struct drm_gpuva_fn_ops *fn = mgr->ops;
struct drm_gpuva_op *op;
if (fn && fn->op_alloc)
op = fn->op_alloc();
else
op = kzalloc(sizeof(*op), GFP_KERNEL);
if (unlikely(!op))
return NULL;
return op;
}
static void
gpuva_op_free(struct drm_gpuva_manager *mgr,
struct drm_gpuva_op *op)
{
const struct drm_gpuva_fn_ops *fn = mgr->ops;
if (fn && fn->op_free)
fn->op_free(op);
else
kfree(op);
}
static int
drm_gpuva_sm_step(struct drm_gpuva_op *__op,
void *priv)
{
struct {
struct drm_gpuva_manager *mgr;
struct drm_gpuva_ops *ops;
} *args = priv;
struct drm_gpuva_manager *mgr = args->mgr;
struct drm_gpuva_ops *ops = args->ops;
struct drm_gpuva_op *op;
op = gpuva_op_alloc(mgr);
if (unlikely(!op))
goto err;
memcpy(op, __op, sizeof(*op));
if (op->op == DRM_GPUVA_OP_REMAP) {
struct drm_gpuva_op_remap *__r = &__op->remap;
struct drm_gpuva_op_remap *r = &op->remap;
r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap),
GFP_KERNEL);
if (unlikely(!r->unmap))
goto err_free_op;
if (__r->prev) {
r->prev = kmemdup(__r->prev, sizeof(*r->prev),
GFP_KERNEL);
if (unlikely(!r->prev))
goto err_free_unmap;
}
if (__r->next) {
r->next = kmemdup(__r->next, sizeof(*r->next),
GFP_KERNEL);
if (unlikely(!r->next))
goto err_free_prev;
}
}
list_add_tail(&op->entry, &ops->list);
return 0;
err_free_unmap:
kfree(op->remap.unmap);
err_free_prev:
kfree(op->remap.prev);
err_free_op:
gpuva_op_free(mgr, op);
err:
return -ENOMEM;
}
static const struct drm_gpuva_fn_ops gpuva_list_ops = {
.sm_step_map = drm_gpuva_sm_step,
.sm_step_remap = drm_gpuva_sm_step,
.sm_step_unmap = drm_gpuva_sm_step,
};
/**
* drm_gpuva_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
* @mgr: the &drm_gpuva_manager representing the GPU VA space
* @req_addr: the start address of the new mapping
* @req_range: the range of the new mapping
* @req_obj: the &drm_gem_object to map
* @req_offset: the offset within the &drm_gem_object
*
* This function creates a list of operations to perform splitting and merging
* of existent mapping(s) with the newly requested one.
*
* The list can be iterated with &drm_gpuva_for_each_op and must be processed
* in the given order. It can contain map, unmap and remap operations, but it
* also can be empty if no operation is required, e.g. if the requested mapping
* already exists is the exact same way.
*
* There can be an arbitrary amount of unmap operations, a maximum of two remap
* operations and a single map operation. The latter one represents the original
* map operation requested by the caller.
*
* Note that before calling this function again with another mapping request it
* is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
* previously obtained operations must be either processed or abandoned. To
* update the &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
* drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
* used.
*
* After the caller finished processing the returned &drm_gpuva_ops, they must
* be freed with &drm_gpuva_ops_free.
*
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
struct drm_gpuva_ops *
drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
u64 req_addr, u64 req_range,
struct drm_gem_object *req_obj, u64 req_offset)
{
struct drm_gpuva_ops *ops;
struct {
struct drm_gpuva_manager *mgr;
struct drm_gpuva_ops *ops;
} args;
int ret;
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (unlikely(!ops))
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&ops->list);
args.mgr = mgr;
args.ops = ops;
ret = __drm_gpuva_sm_map(mgr, &gpuva_list_ops, &args,
req_addr, req_range,
req_obj, req_offset);
if (ret)
goto err_free_ops;
return ops;
err_free_ops:
drm_gpuva_ops_free(mgr, ops);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gpuva_sm_map_ops_create);
/**
* drm_gpuva_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
* unmap
* @mgr: the &drm_gpuva_manager representing the GPU VA space
* @req_addr: the start address of the range to unmap
* @req_range: the range of the mappings to unmap
*
* This function creates a list of operations to perform unmapping and, if
* required, splitting of the mappings overlapping the unmap range.
*
* The list can be iterated with &drm_gpuva_for_each_op and must be processed
* in the given order. It can contain unmap and remap operations, depending on
* whether there are actual overlapping mappings to split.
*
* There can be an arbitrary amount of unmap operations and a maximum of two
* remap operations.
*
* Note that before calling this function again with another range to unmap it
* is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
* previously obtained operations must be processed or abandoned. To update the
* &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
* drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
* used.
*
* After the caller finished processing the returned &drm_gpuva_ops, they must
* be freed with &drm_gpuva_ops_free.
*
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
struct drm_gpuva_ops *
drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
u64 req_addr, u64 req_range)
{
struct drm_gpuva_ops *ops;
struct {
struct drm_gpuva_manager *mgr;
struct drm_gpuva_ops *ops;
} args;
int ret;
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (unlikely(!ops))
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&ops->list);
args.mgr = mgr;
args.ops = ops;
ret = __drm_gpuva_sm_unmap(mgr, &gpuva_list_ops, &args,
req_addr, req_range);
if (ret)
goto err_free_ops;
return ops;
err_free_ops:
drm_gpuva_ops_free(mgr, ops);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap_ops_create);
/**
* drm_gpuva_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
* @mgr: the &drm_gpuva_manager representing the GPU VA space
* @addr: the start address of the range to prefetch
* @range: the range of the mappings to prefetch
*
* This function creates a list of operations to perform prefetching.
*
* The list can be iterated with &drm_gpuva_for_each_op and must be processed
* in the given order. It can contain prefetch operations.
*
* There can be an arbitrary amount of prefetch operations.
*
* After the caller finished processing the returned &drm_gpuva_ops, they must
* be freed with &drm_gpuva_ops_free.
*
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
struct drm_gpuva_ops *
drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
u64 addr, u64 range)
{
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *op;
struct drm_gpuva *va;
u64 end = addr + range;
int ret;
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&ops->list);
drm_gpuva_for_each_va_range(va, mgr, addr, end) {
op = gpuva_op_alloc(mgr);
if (!op) {
ret = -ENOMEM;
goto err_free_ops;
}
op->op = DRM_GPUVA_OP_PREFETCH;
op->prefetch.va = va;
list_add_tail(&op->entry, &ops->list);
}
return ops;
err_free_ops:
drm_gpuva_ops_free(mgr, ops);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gpuva_prefetch_ops_create);
/**
* drm_gpuva_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
* @mgr: the &drm_gpuva_manager representing the GPU VA space
* @obj: the &drm_gem_object to unmap
*
* This function creates a list of operations to perform unmapping for every
* GPUVA attached to a GEM.
*
* The list can be iterated with &drm_gpuva_for_each_op and consists out of an
* arbitrary amount of unmap operations.
*
* After the caller finished processing the returned &drm_gpuva_ops, they must
* be freed with &drm_gpuva_ops_free.
*
* It is the callers responsibility to protect the GEMs GPUVA list against
* concurrent access using the GEMs dma_resv lock.
*
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
struct drm_gpuva_ops *
drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
struct drm_gem_object *obj)
{
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *op;
struct drm_gpuva *va;
int ret;
drm_gem_gpuva_assert_lock_held(obj);
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&ops->list);
drm_gem_for_each_gpuva(va, obj) {
op = gpuva_op_alloc(mgr);
if (!op) {
ret = -ENOMEM;
goto err_free_ops;
}
op->op = DRM_GPUVA_OP_UNMAP;
op->unmap.va = va;
list_add_tail(&op->entry, &ops->list);
}
return ops;
err_free_ops:
drm_gpuva_ops_free(mgr, ops);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gpuva_gem_unmap_ops_create);
/**
* drm_gpuva_ops_free() - free the given &drm_gpuva_ops
* @mgr: the &drm_gpuva_manager the ops were created for
* @ops: the &drm_gpuva_ops to free
*
* Frees the given &drm_gpuva_ops structure including all the ops associated
* with it.
*/
void
drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
struct drm_gpuva_ops *ops)
{
struct drm_gpuva_op *op, *next;
drm_gpuva_for_each_op_safe(op, next, ops) {
list_del(&op->entry);
if (op->op == DRM_GPUVA_OP_REMAP) {
kfree(op->remap.prev);
kfree(op->remap.next);
kfree(op->remap.unmap);
}
gpuva_op_free(mgr, op);
}
kfree(ops);
}
EXPORT_SYMBOL_GPL(drm_gpuva_ops_free);
| linux-master | drivers/gpu/drm/drm_gpuva_mgr.c |
/*
* Copyright (c) 2006-2008 Intel Corporation
* Copyright (c) 2007 Dave Airlie <[email protected]>
* Copyright (c) 2008 Red Hat Inc.
* Copyright (c) 2016 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
#include <drm/drm_mode.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
/**
* DOC: overview
*
* The KMS API doesn't standardize backing storage object creation and leaves it
* to driver-specific ioctls. Furthermore actually creating a buffer object even
* for GEM-based drivers is done through a driver-specific ioctl - GEM only has
* a common userspace interface for sharing and destroying objects. While not an
* issue for full-fledged graphics stacks that include device-specific userspace
* components (in libdrm for instance), this limit makes DRM-based early boot
* graphics unnecessarily complex.
*
* Dumb objects partly alleviate the problem by providing a standard API to
* create dumb buffers suitable for scanout, which can then be used to create
* KMS frame buffers.
*
* To support dumb objects drivers must implement the &drm_driver.dumb_create
* and &drm_driver.dumb_map_offset operations (the latter defaults to
* drm_gem_dumb_map_offset() if not set). Drivers that don't use GEM handles
* additionally need to implement the &drm_driver.dumb_destroy operation. See
* the callbacks for further details.
*
* Note that dumb objects may not be used for gpu acceleration, as has been
* attempted on some ARM embedded platforms. Such drivers really must have
* a hardware-specific ioctl to allocate suitable buffer objects.
*/
int drm_mode_create_dumb(struct drm_device *dev,
struct drm_mode_create_dumb *args,
struct drm_file *file_priv)
{
u32 cpp, stride, size;
if (!dev->driver->dumb_create)
return -ENOSYS;
if (!args->width || !args->height || !args->bpp)
return -EINVAL;
/* overflow checks for 32bit size calculations */
if (args->bpp > U32_MAX - 8)
return -EINVAL;
cpp = DIV_ROUND_UP(args->bpp, 8);
if (cpp > U32_MAX / args->width)
return -EINVAL;
stride = cpp * args->width;
if (args->height > U32_MAX / stride)
return -EINVAL;
/* test for wrap-around */
size = args->height * stride;
if (PAGE_ALIGN(size) == 0)
return -EINVAL;
/*
* handle, pitch and size are output parameters. Zero them out to
* prevent drivers from accidentally using uninitialized data. Since
* not all existing userspace is clearing these fields properly we
* cannot reject IOCTL with garbage in them.
*/
args->handle = 0;
args->pitch = 0;
args->size = 0;
return dev->driver->dumb_create(file_priv, dev, args);
}
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
return drm_mode_create_dumb(dev, data, file_priv);
}
/**
* drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Allocate an offset in the drm device node's address space to be able to
* memory map a dumb buffer.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_map_dumb *args = data;
if (!dev->driver->dumb_create)
return -ENOSYS;
if (dev->driver->dumb_map_offset)
return dev->driver->dumb_map_offset(file_priv, dev,
args->handle,
&args->offset);
else
return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
&args->offset);
}
int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
struct drm_file *file_priv)
{
if (!dev->driver->dumb_create)
return -ENOSYS;
return drm_gem_handle_delete(file_priv, handle);
}
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_destroy_dumb *args = data;
return drm_mode_destroy_dumb(dev, args->handle, file_priv);
}
| linux-master | drivers/gpu/drm/drm_dumb_buffers.c |
/*
* Copyright (c) 2016 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <drm/drm_auth.h>
#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_file.h>
#include <drm/drm_managed.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_sysfs.h>
#include <drm/drm_utils.h>
#include <linux/property.h>
#include <linux/uaccess.h>
#include <video/cmdline.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
/**
* DOC: overview
*
* In DRM connectors are the general abstraction for display sinks, and include
* also fixed panels or anything else that can display pixels in some form. As
* opposed to all other KMS objects representing hardware (like CRTC, encoder or
* plane abstractions) connectors can be hotplugged and unplugged at runtime.
* Hence they are reference-counted using drm_connector_get() and
* drm_connector_put().
*
* KMS driver must create, initialize, register and attach at a &struct
* drm_connector for each such sink. The instance is created as other KMS
* objects and initialized by setting the following fields. The connector is
* initialized with a call to drm_connector_init() with a pointer to the
* &struct drm_connector_funcs and a connector type, and then exposed to
* userspace with a call to drm_connector_register().
*
* Connectors must be attached to an encoder to be used. For devices that map
* connectors to encoders 1:1, the connector should be attached at
* initialization time with a call to drm_connector_attach_encoder(). The
* driver must also set the &drm_connector.encoder field to point to the
* attached encoder.
*
* For connectors which are not fixed (like built-in panels) the driver needs to
* support hotplug notifications. The simplest way to do that is by using the
* probe helpers, see drm_kms_helper_poll_init() for connectors which don't have
* hardware support for hotplug interrupts. Connectors with hardware hotplug
* support can instead use e.g. drm_helper_hpd_irq_event().
*/
/*
* Global connector list for drm_connector_find_by_fwnode().
* Note drm_connector_[un]register() first take connector->lock and then
* take the connector_list_lock.
*/
static DEFINE_MUTEX(connector_list_lock);
static LIST_HEAD(connector_list);
struct drm_conn_prop_enum_list {
int type;
const char *name;
struct ida ida;
};
/*
* Connector and encoder types.
*/
static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
{ DRM_MODE_CONNECTOR_Unknown, "Unknown" },
{ DRM_MODE_CONNECTOR_VGA, "VGA" },
{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
{ DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
{ DRM_MODE_CONNECTOR_Composite, "Composite" },
{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
{ DRM_MODE_CONNECTOR_LVDS, "LVDS" },
{ DRM_MODE_CONNECTOR_Component, "Component" },
{ DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
{ DRM_MODE_CONNECTOR_DisplayPort, "DP" },
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
{ DRM_MODE_CONNECTOR_TV, "TV" },
{ DRM_MODE_CONNECTOR_eDP, "eDP" },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
{ DRM_MODE_CONNECTOR_DPI, "DPI" },
{ DRM_MODE_CONNECTOR_WRITEBACK, "Writeback" },
{ DRM_MODE_CONNECTOR_SPI, "SPI" },
{ DRM_MODE_CONNECTOR_USB, "USB" },
};
void drm_connector_ida_init(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
ida_init(&drm_connector_enum_list[i].ida);
}
void drm_connector_ida_destroy(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
ida_destroy(&drm_connector_enum_list[i].ida);
}
/**
* drm_get_connector_type_name - return a string for connector type
* @type: The connector type (DRM_MODE_CONNECTOR_*)
*
* Returns: the name of the connector type, or NULL if the type is not valid.
*/
const char *drm_get_connector_type_name(unsigned int type)
{
if (type < ARRAY_SIZE(drm_connector_enum_list))
return drm_connector_enum_list[type].name;
return NULL;
}
EXPORT_SYMBOL(drm_get_connector_type_name);
/**
* drm_connector_get_cmdline_mode - reads the user's cmdline mode
* @connector: connector to query
*
* The kernel supports per-connector configuration of its consoles through
* use of the video= parameter. This function parses that option and
* extracts the user's specified mode (or enable/disable status) for a
* particular connector. This is typically only used during the early fbdev
* setup.
*/
static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
{
struct drm_cmdline_mode *mode = &connector->cmdline_mode;
const char *option;
option = video_get_options(connector->name);
if (!option)
return;
if (!drm_mode_parse_command_line_for_connector(option,
connector,
mode))
return;
if (mode->force) {
DRM_INFO("forcing %s connector %s\n", connector->name,
drm_get_connector_force_name(mode->force));
connector->force = mode->force;
}
if (mode->panel_orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) {
DRM_INFO("cmdline forces connector %s panel_orientation to %d\n",
connector->name, mode->panel_orientation);
drm_connector_set_panel_orientation(connector,
mode->panel_orientation);
}
DRM_DEBUG_KMS("cmdline mode for connector %s %s %dx%d@%dHz%s%s%s\n",
connector->name, mode->name,
mode->xres, mode->yres,
mode->refresh_specified ? mode->refresh : 60,
mode->rb ? " reduced blanking" : "",
mode->margins ? " with margins" : "",
mode->interlace ? " interlaced" : "");
}
static void drm_connector_free(struct kref *kref)
{
struct drm_connector *connector =
container_of(kref, struct drm_connector, base.refcount);
struct drm_device *dev = connector->dev;
drm_mode_object_unregister(dev, &connector->base);
connector->funcs->destroy(connector);
}
void drm_connector_free_work_fn(struct work_struct *work)
{
struct drm_connector *connector, *n;
struct drm_device *dev =
container_of(work, struct drm_device, mode_config.connector_free_work);
struct drm_mode_config *config = &dev->mode_config;
unsigned long flags;
struct llist_node *freed;
spin_lock_irqsave(&config->connector_list_lock, flags);
freed = llist_del_all(&config->connector_free_list);
spin_unlock_irqrestore(&config->connector_list_lock, flags);
llist_for_each_entry_safe(connector, n, freed, free_node) {
drm_mode_object_unregister(dev, &connector->base);
connector->funcs->destroy(connector);
}
}
static int __drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type,
struct i2c_adapter *ddc)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
struct ida *connector_ida =
&drm_connector_enum_list[connector_type].ida;
WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
(!funcs->atomic_destroy_state ||
!funcs->atomic_duplicate_state));
ret = __drm_mode_object_add(dev, &connector->base,
DRM_MODE_OBJECT_CONNECTOR,
false, drm_connector_free);
if (ret)
return ret;
connector->base.properties = &connector->properties;
connector->dev = dev;
connector->funcs = funcs;
/* connector index is used with 32bit bitmasks */
ret = ida_alloc_max(&config->connector_ida, 31, GFP_KERNEL);
if (ret < 0) {
DRM_DEBUG_KMS("Failed to allocate %s connector index: %d\n",
drm_connector_enum_list[connector_type].name,
ret);
goto out_put;
}
connector->index = ret;
ret = 0;
connector->connector_type = connector_type;
connector->connector_type_id =
ida_alloc_min(connector_ida, 1, GFP_KERNEL);
if (connector->connector_type_id < 0) {
ret = connector->connector_type_id;
goto out_put_id;
}
connector->name =
kasprintf(GFP_KERNEL, "%s-%d",
drm_connector_enum_list[connector_type].name,
connector->connector_type_id);
if (!connector->name) {
ret = -ENOMEM;
goto out_put_type_id;
}
/* provide ddc symlink in sysfs */
connector->ddc = ddc;
INIT_LIST_HEAD(&connector->global_connector_list_entry);
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
mutex_init(&connector->edid_override_mutex);
connector->edid_blob_ptr = NULL;
connector->epoch_counter = 0;
connector->tile_blob_ptr = NULL;
connector->status = connector_status_unknown;
connector->display_info.panel_orientation =
DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
drm_connector_get_cmdline_mode(connector);
/* We should add connectors at the end to avoid upsetting the connector
* index too much.
*/
spin_lock_irq(&config->connector_list_lock);
list_add_tail(&connector->head, &config->connector_list);
config->num_connector++;
spin_unlock_irq(&config->connector_list_lock);
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
drm_connector_attach_edid_property(connector);
drm_object_attach_property(&connector->base,
config->dpms_property, 0);
drm_object_attach_property(&connector->base,
config->link_status_property,
0);
drm_object_attach_property(&connector->base,
config->non_desktop_property,
0);
drm_object_attach_property(&connector->base,
config->tile_property,
0);
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
}
connector->debugfs_entry = NULL;
out_put_type_id:
if (ret)
ida_free(connector_ida, connector->connector_type_id);
out_put_id:
if (ret)
ida_free(&config->connector_ida, connector->index);
out_put:
if (ret)
drm_mode_object_unregister(dev, &connector->base);
return ret;
}
/**
* drm_connector_init - Init a preallocated connector
* @dev: DRM device
* @connector: the connector to init
* @funcs: callbacks for this connector
* @connector_type: user visible type of the connector
*
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
*
* At driver unload time the driver's &drm_connector_funcs.destroy hook
* should call drm_connector_cleanup() and free the connector structure.
* The connector structure should not be allocated with devm_kzalloc().
*
* Note: consider using drmm_connector_init() instead of
* drm_connector_init() to let the DRM managed resource infrastructure
* take care of cleanup and deallocation.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type)
{
if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
return -EINVAL;
return __drm_connector_init(dev, connector, funcs, connector_type, NULL);
}
EXPORT_SYMBOL(drm_connector_init);
/**
* drm_connector_init_with_ddc - Init a preallocated connector
* @dev: DRM device
* @connector: the connector to init
* @funcs: callbacks for this connector
* @connector_type: user visible type of the connector
* @ddc: pointer to the associated ddc adapter
*
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
*
* At driver unload time the driver's &drm_connector_funcs.destroy hook
* should call drm_connector_cleanup() and free the connector structure.
* The connector structure should not be allocated with devm_kzalloc().
*
* Ensures that the ddc field of the connector is correctly set.
*
* Note: consider using drmm_connector_init() instead of
* drm_connector_init_with_ddc() to let the DRM managed resource
* infrastructure take care of cleanup and deallocation.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_connector_init_with_ddc(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type,
struct i2c_adapter *ddc)
{
if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
return -EINVAL;
return __drm_connector_init(dev, connector, funcs, connector_type, ddc);
}
EXPORT_SYMBOL(drm_connector_init_with_ddc);
static void drm_connector_cleanup_action(struct drm_device *dev,
void *ptr)
{
struct drm_connector *connector = ptr;
drm_connector_cleanup(connector);
}
/**
* drmm_connector_init - Init a preallocated connector
* @dev: DRM device
* @connector: the connector to init
* @funcs: callbacks for this connector
* @connector_type: user visible type of the connector
* @ddc: optional pointer to the associated ddc adapter
*
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
*
* Cleanup is automatically handled with a call to
* drm_connector_cleanup() in a DRM-managed action.
*
* The connector structure should be allocated with drmm_kzalloc().
*
* Returns:
* Zero on success, error code on failure.
*/
int drmm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type,
struct i2c_adapter *ddc)
{
int ret;
if (drm_WARN_ON(dev, funcs && funcs->destroy))
return -EINVAL;
ret = __drm_connector_init(dev, connector, funcs, connector_type, ddc);
if (ret)
return ret;
ret = drmm_add_action_or_reset(dev, drm_connector_cleanup_action,
connector);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL(drmm_connector_init);
/**
* drm_connector_attach_edid_property - attach edid property.
* @connector: the connector
*
* Some connector types like DRM_MODE_CONNECTOR_VIRTUAL do not get a
* edid property attached by default. This function can be used to
* explicitly enable the edid property in these cases.
*/
void drm_connector_attach_edid_property(struct drm_connector *connector)
{
struct drm_mode_config *config = &connector->dev->mode_config;
drm_object_attach_property(&connector->base,
config->edid_property,
0);
}
EXPORT_SYMBOL(drm_connector_attach_edid_property);
/**
* drm_connector_attach_encoder - attach a connector to an encoder
* @connector: connector to attach
* @encoder: encoder to attach @connector to
*
* This function links up a connector to an encoder. Note that the routing
* restrictions between encoders and crtcs are exposed to userspace through the
* possible_clones and possible_crtcs bitmasks.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
/*
* In the past, drivers have attempted to model the static association
* of connector to encoder in simple connector/encoder devices using a
* direct assignment of connector->encoder = encoder. This connection
* is a logical one and the responsibility of the core, so drivers are
* expected not to mess with this.
*
* Note that the error return should've been enough here, but a large
* majority of drivers ignores the return value, so add in a big WARN
* to get people's attention.
*/
if (WARN_ON(connector->encoder))
return -EINVAL;
connector->possible_encoders |= drm_encoder_mask(encoder);
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_encoder);
/**
* drm_connector_has_possible_encoder - check if the connector and encoder are
* associated with each other
* @connector: the connector
* @encoder: the encoder
*
* Returns:
* True if @encoder is one of the possible encoders for @connector.
*/
bool drm_connector_has_possible_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
return connector->possible_encoders & drm_encoder_mask(encoder);
}
EXPORT_SYMBOL(drm_connector_has_possible_encoder);
static void drm_mode_remove(struct drm_connector *connector,
struct drm_display_mode *mode)
{
list_del(&mode->head);
drm_mode_destroy(connector->dev, mode);
}
/**
* drm_connector_cleanup - cleans up an initialised connector
* @connector: connector to cleanup
*
* Cleans up the connector but doesn't free the object.
*/
void drm_connector_cleanup(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *t;
/* The connector should have been removed from userspace long before
* it is finally destroyed.
*/
if (WARN_ON(connector->registration_state ==
DRM_CONNECTOR_REGISTERED))
drm_connector_unregister(connector);
if (connector->privacy_screen) {
drm_privacy_screen_put(connector->privacy_screen);
connector->privacy_screen = NULL;
}
if (connector->tile_group) {
drm_mode_put_tile_group(dev, connector->tile_group);
connector->tile_group = NULL;
}
list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
drm_mode_remove(connector, mode);
list_for_each_entry_safe(mode, t, &connector->modes, head)
drm_mode_remove(connector, mode);
ida_free(&drm_connector_enum_list[connector->connector_type].ida,
connector->connector_type_id);
ida_free(&dev->mode_config.connector_ida, connector->index);
kfree(connector->display_info.bus_formats);
kfree(connector->display_info.vics);
drm_mode_object_unregister(dev, &connector->base);
kfree(connector->name);
connector->name = NULL;
fwnode_handle_put(connector->fwnode);
connector->fwnode = NULL;
spin_lock_irq(&dev->mode_config.connector_list_lock);
list_del(&connector->head);
dev->mode_config.num_connector--;
spin_unlock_irq(&dev->mode_config.connector_list_lock);
WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
if (connector->state && connector->funcs->atomic_destroy_state)
connector->funcs->atomic_destroy_state(connector,
connector->state);
mutex_destroy(&connector->mutex);
memset(connector, 0, sizeof(*connector));
if (dev->registered)
drm_sysfs_hotplug_event(dev);
}
EXPORT_SYMBOL(drm_connector_cleanup);
/**
* drm_connector_register - register a connector
* @connector: the connector to register
*
* Register userspace interfaces for a connector. Only call this for connectors
* which can be hotplugged after drm_dev_register() has been called already,
* e.g. DP MST connectors. All other connectors will be registered automatically
* when calling drm_dev_register().
*
* When the connector is no longer available, callers must call
* drm_connector_unregister().
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_connector_register(struct drm_connector *connector)
{
int ret = 0;
if (!connector->dev->registered)
return 0;
mutex_lock(&connector->mutex);
if (connector->registration_state != DRM_CONNECTOR_INITIALIZING)
goto unlock;
ret = drm_sysfs_connector_add(connector);
if (ret)
goto unlock;
drm_debugfs_connector_add(connector);
if (connector->funcs->late_register) {
ret = connector->funcs->late_register(connector);
if (ret)
goto err_debugfs;
}
drm_mode_object_register(connector->dev, &connector->base);
connector->registration_state = DRM_CONNECTOR_REGISTERED;
/* Let userspace know we have a new connector */
drm_sysfs_connector_hotplug_event(connector);
if (connector->privacy_screen)
drm_privacy_screen_register_notifier(connector->privacy_screen,
&connector->privacy_screen_notifier);
mutex_lock(&connector_list_lock);
list_add_tail(&connector->global_connector_list_entry, &connector_list);
mutex_unlock(&connector_list_lock);
goto unlock;
err_debugfs:
drm_debugfs_connector_remove(connector);
drm_sysfs_connector_remove(connector);
unlock:
mutex_unlock(&connector->mutex);
return ret;
}
EXPORT_SYMBOL(drm_connector_register);
/**
* drm_connector_unregister - unregister a connector
* @connector: the connector to unregister
*
* Unregister userspace interfaces for a connector. Only call this for
* connectors which have been registered explicitly by calling
* drm_connector_register().
*/
void drm_connector_unregister(struct drm_connector *connector)
{
mutex_lock(&connector->mutex);
if (connector->registration_state != DRM_CONNECTOR_REGISTERED) {
mutex_unlock(&connector->mutex);
return;
}
mutex_lock(&connector_list_lock);
list_del_init(&connector->global_connector_list_entry);
mutex_unlock(&connector_list_lock);
if (connector->privacy_screen)
drm_privacy_screen_unregister_notifier(
connector->privacy_screen,
&connector->privacy_screen_notifier);
if (connector->funcs->early_unregister)
connector->funcs->early_unregister(connector);
drm_sysfs_connector_remove(connector);
drm_debugfs_connector_remove(connector);
connector->registration_state = DRM_CONNECTOR_UNREGISTERED;
mutex_unlock(&connector->mutex);
}
EXPORT_SYMBOL(drm_connector_unregister);
void drm_connector_unregister_all(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
drm_connector_unregister(connector);
drm_connector_list_iter_end(&conn_iter);
}
int drm_connector_register_all(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
int ret = 0;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
ret = drm_connector_register(connector);
if (ret)
break;
}
drm_connector_list_iter_end(&conn_iter);
if (ret)
drm_connector_unregister_all(dev);
return ret;
}
/**
* drm_get_connector_status_name - return a string for connector status
* @status: connector status to compute name of
*
* In contrast to the other drm_get_*_name functions this one here returns a
* const pointer and hence is threadsafe.
*
* Returns: connector status string
*/
const char *drm_get_connector_status_name(enum drm_connector_status status)
{
if (status == connector_status_connected)
return "connected";
else if (status == connector_status_disconnected)
return "disconnected";
else
return "unknown";
}
EXPORT_SYMBOL(drm_get_connector_status_name);
/**
* drm_get_connector_force_name - return a string for connector force
* @force: connector force to get name of
*
* Returns: const pointer to name.
*/
const char *drm_get_connector_force_name(enum drm_connector_force force)
{
switch (force) {
case DRM_FORCE_UNSPECIFIED:
return "unspecified";
case DRM_FORCE_OFF:
return "off";
case DRM_FORCE_ON:
return "on";
case DRM_FORCE_ON_DIGITAL:
return "digital";
default:
return "unknown";
}
}
#ifdef CONFIG_LOCKDEP
static struct lockdep_map connector_list_iter_dep_map = {
.name = "drm_connector_list_iter"
};
#endif
/**
* drm_connector_list_iter_begin - initialize a connector_list iterator
* @dev: DRM device
* @iter: connector_list iterator
*
* Sets @iter up to walk the &drm_mode_config.connector_list of @dev. @iter
* must always be cleaned up again by calling drm_connector_list_iter_end().
* Iteration itself happens using drm_connector_list_iter_next() or
* drm_for_each_connector_iter().
*/
void drm_connector_list_iter_begin(struct drm_device *dev,
struct drm_connector_list_iter *iter)
{
iter->dev = dev;
iter->conn = NULL;
lock_acquire_shared_recursive(&connector_list_iter_dep_map, 0, 1, NULL, _RET_IP_);
}
EXPORT_SYMBOL(drm_connector_list_iter_begin);
/*
* Extra-safe connector put function that works in any context. Should only be
* used from the connector_iter functions, where we never really expect to
* actually release the connector when dropping our final reference.
*/
static void
__drm_connector_put_safe(struct drm_connector *conn)
{
struct drm_mode_config *config = &conn->dev->mode_config;
lockdep_assert_held(&config->connector_list_lock);
if (!refcount_dec_and_test(&conn->base.refcount.refcount))
return;
llist_add(&conn->free_node, &config->connector_free_list);
schedule_work(&config->connector_free_work);
}
/**
* drm_connector_list_iter_next - return next connector
* @iter: connector_list iterator
*
* Returns: the next connector for @iter, or NULL when the list walk has
* completed.
*/
struct drm_connector *
drm_connector_list_iter_next(struct drm_connector_list_iter *iter)
{
struct drm_connector *old_conn = iter->conn;
struct drm_mode_config *config = &iter->dev->mode_config;
struct list_head *lhead;
unsigned long flags;
spin_lock_irqsave(&config->connector_list_lock, flags);
lhead = old_conn ? &old_conn->head : &config->connector_list;
do {
if (lhead->next == &config->connector_list) {
iter->conn = NULL;
break;
}
lhead = lhead->next;
iter->conn = list_entry(lhead, struct drm_connector, head);
/* loop until it's not a zombie connector */
} while (!kref_get_unless_zero(&iter->conn->base.refcount));
if (old_conn)
__drm_connector_put_safe(old_conn);
spin_unlock_irqrestore(&config->connector_list_lock, flags);
return iter->conn;
}
EXPORT_SYMBOL(drm_connector_list_iter_next);
/**
* drm_connector_list_iter_end - tear down a connector_list iterator
* @iter: connector_list iterator
*
* Tears down @iter and releases any resources (like &drm_connector references)
* acquired while walking the list. This must always be called, both when the
* iteration completes fully or when it was aborted without walking the entire
* list.
*/
void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
{
struct drm_mode_config *config = &iter->dev->mode_config;
unsigned long flags;
iter->dev = NULL;
if (iter->conn) {
spin_lock_irqsave(&config->connector_list_lock, flags);
__drm_connector_put_safe(iter->conn);
spin_unlock_irqrestore(&config->connector_list_lock, flags);
}
lock_release(&connector_list_iter_dep_map, _RET_IP_);
}
EXPORT_SYMBOL(drm_connector_list_iter_end);
static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
{ SubPixelUnknown, "Unknown" },
{ SubPixelHorizontalRGB, "Horizontal RGB" },
{ SubPixelHorizontalBGR, "Horizontal BGR" },
{ SubPixelVerticalRGB, "Vertical RGB" },
{ SubPixelVerticalBGR, "Vertical BGR" },
{ SubPixelNone, "None" },
};
/**
* drm_get_subpixel_order_name - return a string for a given subpixel enum
* @order: enum of subpixel_order
*
* Note you could abuse this and return something out of bounds, but that
* would be a caller error. No unscrubbed user data should make it here.
*
* Returns: string describing an enumerated subpixel property
*/
const char *drm_get_subpixel_order_name(enum subpixel_order order)
{
return drm_subpixel_enum_list[order].name;
}
EXPORT_SYMBOL(drm_get_subpixel_order_name);
static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
{ DRM_MODE_DPMS_ON, "On" },
{ DRM_MODE_DPMS_STANDBY, "Standby" },
{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
{ DRM_MODE_DPMS_OFF, "Off" }
};
DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
static const struct drm_prop_enum_list drm_link_status_enum_list[] = {
{ DRM_MODE_LINK_STATUS_GOOD, "Good" },
{ DRM_MODE_LINK_STATUS_BAD, "Bad" },
};
/**
* drm_display_info_set_bus_formats - set the supported bus formats
* @info: display info to store bus formats in
* @formats: array containing the supported bus formats
* @num_formats: the number of entries in the fmts array
*
* Store the supported bus formats in display info structure.
* See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for
* a full list of available formats.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_display_info_set_bus_formats(struct drm_display_info *info,
const u32 *formats,
unsigned int num_formats)
{
u32 *fmts = NULL;
if (!formats && num_formats)
return -EINVAL;
if (formats && num_formats) {
fmts = kmemdup(formats, sizeof(*formats) * num_formats,
GFP_KERNEL);
if (!fmts)
return -ENOMEM;
}
kfree(info->bus_formats);
info->bus_formats = fmts;
info->num_bus_formats = num_formats;
return 0;
}
EXPORT_SYMBOL(drm_display_info_set_bus_formats);
/* Optional connector properties. */
static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = {
{ DRM_MODE_SCALE_NONE, "None" },
{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
{ DRM_MODE_SCALE_CENTER, "Center" },
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
};
static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
{ DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
{ DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
};
static const struct drm_prop_enum_list drm_content_type_enum_list[] = {
{ DRM_MODE_CONTENT_TYPE_NO_DATA, "No Data" },
{ DRM_MODE_CONTENT_TYPE_GRAPHICS, "Graphics" },
{ DRM_MODE_CONTENT_TYPE_PHOTO, "Photo" },
{ DRM_MODE_CONTENT_TYPE_CINEMA, "Cinema" },
{ DRM_MODE_CONTENT_TYPE_GAME, "Game" },
};
static const struct drm_prop_enum_list drm_panel_orientation_enum_list[] = {
{ DRM_MODE_PANEL_ORIENTATION_NORMAL, "Normal" },
{ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, "Upside Down" },
{ DRM_MODE_PANEL_ORIENTATION_LEFT_UP, "Left Side Up" },
{ DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, "Right Side Up" },
};
static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
{ DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
};
DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I, TV-out and DP */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
{ DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
};
DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
drm_dvi_i_subconnector_enum_list)
static const struct drm_prop_enum_list drm_tv_mode_enum_list[] = {
{ DRM_MODE_TV_MODE_NTSC, "NTSC" },
{ DRM_MODE_TV_MODE_NTSC_443, "NTSC-443" },
{ DRM_MODE_TV_MODE_NTSC_J, "NTSC-J" },
{ DRM_MODE_TV_MODE_PAL, "PAL" },
{ DRM_MODE_TV_MODE_PAL_M, "PAL-M" },
{ DRM_MODE_TV_MODE_PAL_N, "PAL-N" },
{ DRM_MODE_TV_MODE_SECAM, "SECAM" },
};
DRM_ENUM_NAME_FN(drm_get_tv_mode_name, drm_tv_mode_enum_list)
/**
* drm_get_tv_mode_from_name - Translates a TV mode name into its enum value
* @name: TV Mode name we want to convert
* @len: Length of @name
*
* Translates @name into an enum drm_connector_tv_mode.
*
* Returns: the enum value on success, a negative errno otherwise.
*/
int drm_get_tv_mode_from_name(const char *name, size_t len)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(drm_tv_mode_enum_list); i++) {
const struct drm_prop_enum_list *item = &drm_tv_mode_enum_list[i];
if (strlen(item->name) == len && !strncmp(item->name, name, len))
return item->type;
}
return -EINVAL;
}
EXPORT_SYMBOL(drm_get_tv_mode_from_name);
static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
};
DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I, TV-out and DP */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
};
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
static const struct drm_prop_enum_list drm_dp_subconnector_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I, TV-out and DP */
{ DRM_MODE_SUBCONNECTOR_VGA, "VGA" }, /* DP */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DP */
{ DRM_MODE_SUBCONNECTOR_HDMIA, "HDMI" }, /* DP */
{ DRM_MODE_SUBCONNECTOR_DisplayPort, "DP" }, /* DP */
{ DRM_MODE_SUBCONNECTOR_Wireless, "Wireless" }, /* DP */
{ DRM_MODE_SUBCONNECTOR_Native, "Native" }, /* DP */
};
DRM_ENUM_NAME_FN(drm_get_dp_subconnector_name,
drm_dp_subconnector_enum_list)
static const char * const colorspace_names[] = {
/* For Default case, driver will set the colorspace */
[DRM_MODE_COLORIMETRY_DEFAULT] = "Default",
/* Standard Definition Colorimetry based on CEA 861 */
[DRM_MODE_COLORIMETRY_SMPTE_170M_YCC] = "SMPTE_170M_YCC",
[DRM_MODE_COLORIMETRY_BT709_YCC] = "BT709_YCC",
/* Standard Definition Colorimetry based on IEC 61966-2-4 */
[DRM_MODE_COLORIMETRY_XVYCC_601] = "XVYCC_601",
/* High Definition Colorimetry based on IEC 61966-2-4 */
[DRM_MODE_COLORIMETRY_XVYCC_709] = "XVYCC_709",
/* Colorimetry based on IEC 61966-2-1/Amendment 1 */
[DRM_MODE_COLORIMETRY_SYCC_601] = "SYCC_601",
/* Colorimetry based on IEC 61966-2-5 [33] */
[DRM_MODE_COLORIMETRY_OPYCC_601] = "opYCC_601",
/* Colorimetry based on IEC 61966-2-5 */
[DRM_MODE_COLORIMETRY_OPRGB] = "opRGB",
/* Colorimetry based on ITU-R BT.2020 */
[DRM_MODE_COLORIMETRY_BT2020_CYCC] = "BT2020_CYCC",
/* Colorimetry based on ITU-R BT.2020 */
[DRM_MODE_COLORIMETRY_BT2020_RGB] = "BT2020_RGB",
/* Colorimetry based on ITU-R BT.2020 */
[DRM_MODE_COLORIMETRY_BT2020_YCC] = "BT2020_YCC",
/* Added as part of Additional Colorimetry Extension in 861.G */
[DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65] = "DCI-P3_RGB_D65",
[DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER] = "DCI-P3_RGB_Theater",
[DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED] = "RGB_WIDE_FIXED",
/* Colorimetry based on scRGB (IEC 61966-2-2) */
[DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT] = "RGB_WIDE_FLOAT",
[DRM_MODE_COLORIMETRY_BT601_YCC] = "BT601_YCC",
};
/**
* drm_get_colorspace_name - return a string for color encoding
* @colorspace: color space to compute name of
*
* In contrast to the other drm_get_*_name functions this one here returns a
* const pointer and hence is threadsafe.
*/
const char *drm_get_colorspace_name(enum drm_colorspace colorspace)
{
if (colorspace < ARRAY_SIZE(colorspace_names) && colorspace_names[colorspace])
return colorspace_names[colorspace];
else
return "(null)";
}
static const u32 hdmi_colorspaces =
BIT(DRM_MODE_COLORIMETRY_SMPTE_170M_YCC) |
BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
BIT(DRM_MODE_COLORIMETRY_XVYCC_601) |
BIT(DRM_MODE_COLORIMETRY_XVYCC_709) |
BIT(DRM_MODE_COLORIMETRY_SYCC_601) |
BIT(DRM_MODE_COLORIMETRY_OPYCC_601) |
BIT(DRM_MODE_COLORIMETRY_OPRGB) |
BIT(DRM_MODE_COLORIMETRY_BT2020_CYCC) |
BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
BIT(DRM_MODE_COLORIMETRY_BT2020_YCC) |
BIT(DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65) |
BIT(DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER);
/*
* As per DP 1.4a spec, 2.2.5.7.5 VSC SDP Payload for Pixel Encoding/Colorimetry
* Format Table 2-120
*/
static const u32 dp_colorspaces =
BIT(DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED) |
BIT(DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT) |
BIT(DRM_MODE_COLORIMETRY_OPRGB) |
BIT(DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65) |
BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
BIT(DRM_MODE_COLORIMETRY_BT601_YCC) |
BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
BIT(DRM_MODE_COLORIMETRY_XVYCC_601) |
BIT(DRM_MODE_COLORIMETRY_XVYCC_709) |
BIT(DRM_MODE_COLORIMETRY_SYCC_601) |
BIT(DRM_MODE_COLORIMETRY_OPYCC_601) |
BIT(DRM_MODE_COLORIMETRY_BT2020_CYCC) |
BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
/**
* DOC: standard connector properties
*
* DRM connectors have a few standardized properties:
*
* EDID:
* Blob property which contains the current EDID read from the sink. This
* is useful to parse sink identification information like vendor, model
* and serial. Drivers should update this property by calling
* drm_connector_update_edid_property(), usually after having parsed
* the EDID using drm_add_edid_modes(). Userspace cannot change this
* property.
*
* User-space should not parse the EDID to obtain information exposed via
* other KMS properties (because the kernel might apply limits, quirks or
* fixups to the EDID). For instance, user-space should not try to parse
* mode lists from the EDID.
* DPMS:
* Legacy property for setting the power state of the connector. For atomic
* drivers this is only provided for backwards compatibility with existing
* drivers, it remaps to controlling the "ACTIVE" property on the CRTC the
* connector is linked to. Drivers should never set this property directly,
* it is handled by the DRM core by calling the &drm_connector_funcs.dpms
* callback. For atomic drivers the remapping to the "ACTIVE" property is
* implemented in the DRM core.
*
* Note that this property cannot be set through the MODE_ATOMIC ioctl,
* userspace must use "ACTIVE" on the CRTC instead.
*
* WARNING:
*
* For userspace also running on legacy drivers the "DPMS" semantics are a
* lot more complicated. First, userspace cannot rely on the "DPMS" value
* returned by the GETCONNECTOR actually reflecting reality, because many
* drivers fail to update it. For atomic drivers this is taken care of in
* drm_atomic_helper_update_legacy_modeset_state().
*
* The second issue is that the DPMS state is only well-defined when the
* connector is connected to a CRTC. In atomic the DRM core enforces that
* "ACTIVE" is off in such a case, no such checks exists for "DPMS".
*
* Finally, when enabling an output using the legacy SETCONFIG ioctl then
* "DPMS" is forced to ON. But see above, that might not be reflected in
* the software value on legacy drivers.
*
* Summarizing: Only set "DPMS" when the connector is known to be enabled,
* assume that a successful SETCONFIG call also sets "DPMS" to on, and
* never read back the value of "DPMS" because it can be incorrect.
* PATH:
* Connector path property to identify how this sink is physically
* connected. Used by DP MST. This should be set by calling
* drm_connector_set_path_property(), in the case of DP MST with the
* path property the MST manager created. Userspace cannot change this
* property.
* TILE:
* Connector tile group property to indicate how a set of DRM connector
* compose together into one logical screen. This is used by both high-res
* external screens (often only using a single cable, but exposing multiple
* DP MST sinks), or high-res integrated panels (like dual-link DSI) which
* are not gen-locked. Note that for tiled panels which are genlocked, like
* dual-link LVDS or dual-link DSI, the driver should try to not expose the
* tiling and virtualise both &drm_crtc and &drm_plane if needed. Drivers
* should update this value using drm_connector_set_tile_property().
* Userspace cannot change this property.
* link-status:
* Connector link-status property to indicate the status of link. The
* default value of link-status is "GOOD". If something fails during or
* after modeset, the kernel driver may set this to "BAD" and issue a
* hotplug uevent. Drivers should update this value using
* drm_connector_set_link_status_property().
*
* When user-space receives the hotplug uevent and detects a "BAD"
* link-status, the sink doesn't receive pixels anymore (e.g. the screen
* becomes completely black). The list of available modes may have
* changed. User-space is expected to pick a new mode if the current one
* has disappeared and perform a new modeset with link-status set to
* "GOOD" to re-enable the connector.
*
* If multiple connectors share the same CRTC and one of them gets a "BAD"
* link-status, the other are unaffected (ie. the sinks still continue to
* receive pixels).
*
* When user-space performs an atomic commit on a connector with a "BAD"
* link-status without resetting the property to "GOOD", the sink may
* still not receive pixels. When user-space performs an atomic commit
* which resets the link-status property to "GOOD" without the
* ALLOW_MODESET flag set, it might fail because a modeset is required.
*
* User-space can only change link-status to "GOOD", changing it to "BAD"
* is a no-op.
*
* For backwards compatibility with non-atomic userspace the kernel
* tries to automatically set the link-status back to "GOOD" in the
* SETCRTC IOCTL. This might fail if the mode is no longer valid, similar
* to how it might fail if a different screen has been connected in the
* interim.
* non_desktop:
* Indicates the output should be ignored for purposes of displaying a
* standard desktop environment or console. This is most likely because
* the output device is not rectilinear.
* Content Protection:
* This property is used by userspace to request the kernel protect future
* content communicated over the link. When requested, kernel will apply
* the appropriate means of protection (most often HDCP), and use the
* property to tell userspace the protection is active.
*
* Drivers can set this up by calling
* drm_connector_attach_content_protection_property() on initialization.
*
* The value of this property can be one of the following:
*
* DRM_MODE_CONTENT_PROTECTION_UNDESIRED = 0
* The link is not protected, content is transmitted in the clear.
* DRM_MODE_CONTENT_PROTECTION_DESIRED = 1
* Userspace has requested content protection, but the link is not
* currently protected. When in this state, kernel should enable
* Content Protection as soon as possible.
* DRM_MODE_CONTENT_PROTECTION_ENABLED = 2
* Userspace has requested content protection, and the link is
* protected. Only the driver can set the property to this value.
* If userspace attempts to set to ENABLED, kernel will return
* -EINVAL.
*
* A few guidelines:
*
* - DESIRED state should be preserved until userspace de-asserts it by
* setting the property to UNDESIRED. This means ENABLED should only
* transition to UNDESIRED when the user explicitly requests it.
* - If the state is DESIRED, kernel should attempt to re-authenticate the
* link whenever possible. This includes across disable/enable, dpms,
* hotplug, downstream device changes, link status failures, etc..
* - Kernel sends uevent with the connector id and property id through
* @drm_hdcp_update_content_protection, upon below kernel triggered
* scenarios:
*
* - DESIRED -> ENABLED (authentication success)
* - ENABLED -> DESIRED (termination of authentication)
* - Please note no uevents for userspace triggered property state changes,
* which can't fail such as
*
* - DESIRED/ENABLED -> UNDESIRED
* - UNDESIRED -> DESIRED
* - Userspace is responsible for polling the property or listen to uevents
* to determine when the value transitions from ENABLED to DESIRED.
* This signifies the link is no longer protected and userspace should
* take appropriate action (whatever that might be).
*
* HDCP Content Type:
* This Enum property is used by the userspace to declare the content type
* of the display stream, to kernel. Here display stream stands for any
* display content that userspace intended to display through HDCP
* encryption.
*
* Content Type of a stream is decided by the owner of the stream, as
* "HDCP Type0" or "HDCP Type1".
*
* The value of the property can be one of the below:
* - "HDCP Type0": DRM_MODE_HDCP_CONTENT_TYPE0 = 0
* - "HDCP Type1": DRM_MODE_HDCP_CONTENT_TYPE1 = 1
*
* When kernel starts the HDCP authentication (see "Content Protection"
* for details), it uses the content type in "HDCP Content Type"
* for performing the HDCP authentication with the display sink.
*
* Please note in HDCP spec versions, a link can be authenticated with
* HDCP 2.2 for Content Type 0/Content Type 1. Where as a link can be
* authenticated with HDCP1.4 only for Content Type 0(though it is implicit
* in nature. As there is no reference for Content Type in HDCP1.4).
*
* HDCP2.2 authentication protocol itself takes the "Content Type" as a
* parameter, which is a input for the DP HDCP2.2 encryption algo.
*
* In case of Type 0 content protection request, kernel driver can choose
* either of HDCP spec versions 1.4 and 2.2. When HDCP2.2 is used for
* "HDCP Type 0", a HDCP 2.2 capable repeater in the downstream can send
* that content to a HDCP 1.4 authenticated HDCP sink (Type0 link).
* But if the content is classified as "HDCP Type 1", above mentioned
* HDCP 2.2 repeater wont send the content to the HDCP sink as it can't
* authenticate the HDCP1.4 capable sink for "HDCP Type 1".
*
* Please note userspace can be ignorant of the HDCP versions used by the
* kernel driver to achieve the "HDCP Content Type".
*
* At current scenario, classifying a content as Type 1 ensures that the
* content will be displayed only through the HDCP2.2 encrypted link.
*
* Note that the HDCP Content Type property is introduced at HDCP 2.2, and
* defaults to type 0. It is only exposed by drivers supporting HDCP 2.2
* (hence supporting Type 0 and Type 1). Based on how next versions of
* HDCP specs are defined content Type could be used for higher versions
* too.
*
* If content type is changed when "Content Protection" is not UNDESIRED,
* then kernel will disable the HDCP and re-enable with new type in the
* same atomic commit. And when "Content Protection" is ENABLED, it means
* that link is HDCP authenticated and encrypted, for the transmission of
* the Type of stream mentioned at "HDCP Content Type".
*
* HDR_OUTPUT_METADATA:
* Connector property to enable userspace to send HDR Metadata to
* driver. This metadata is based on the composition and blending
* policies decided by user, taking into account the hardware and
* sink capabilities. The driver gets this metadata and creates a
* Dynamic Range and Mastering Infoframe (DRM) in case of HDMI,
* SDP packet (Non-audio INFOFRAME SDP v1.3) for DP. This is then
* sent to sink. This notifies the sink of the upcoming frame's Color
* Encoding and Luminance parameters.
*
* Userspace first need to detect the HDR capabilities of sink by
* reading and parsing the EDID. Details of HDR metadata for HDMI
* are added in CTA 861.G spec. For DP , its defined in VESA DP
* Standard v1.4. It needs to then get the metadata information
* of the video/game/app content which are encoded in HDR (basically
* using HDR transfer functions). With this information it needs to
* decide on a blending policy and compose the relevant
* layers/overlays into a common format. Once this blending is done,
* userspace will be aware of the metadata of the composed frame to
* be send to sink. It then uses this property to communicate this
* metadata to driver which then make a Infoframe packet and sends
* to sink based on the type of encoder connected.
*
* Userspace will be responsible to do Tone mapping operation in case:
* - Some layers are HDR and others are SDR
* - HDR layers luminance is not same as sink
*
* It will even need to do colorspace conversion and get all layers
* to one common colorspace for blending. It can use either GL, Media
* or display engine to get this done based on the capabilities of the
* associated hardware.
*
* Driver expects metadata to be put in &struct hdr_output_metadata
* structure from userspace. This is received as blob and stored in
* &drm_connector_state.hdr_output_metadata. It parses EDID and saves the
* sink metadata in &struct hdr_sink_metadata, as
* &drm_connector.hdr_sink_metadata. Driver uses
* drm_hdmi_infoframe_set_hdr_metadata() helper to set the HDR metadata,
* hdmi_drm_infoframe_pack() to pack the infoframe as per spec, in case of
* HDMI encoder.
*
* max bpc:
* This range property is used by userspace to limit the bit depth. When
* used the driver would limit the bpc in accordance with the valid range
* supported by the hardware and sink. Drivers to use the function
* drm_connector_attach_max_bpc_property() to create and attach the
* property to the connector during initialization.
*
* Connectors also have one standardized atomic property:
*
* CRTC_ID:
* Mode object ID of the &drm_crtc this connector should be connected to.
*
* Connectors for LCD panels may also have one standardized property:
*
* panel orientation:
* On some devices the LCD panel is mounted in the casing in such a way
* that the up/top side of the panel does not match with the top side of
* the device. Userspace can use this property to check for this.
* Note that input coordinates from touchscreens (input devices with
* INPUT_PROP_DIRECT) will still map 1:1 to the actual LCD panel
* coordinates, so if userspace rotates the picture to adjust for
* the orientation it must also apply the same transformation to the
* touchscreen input coordinates. This property is initialized by calling
* drm_connector_set_panel_orientation() or
* drm_connector_set_panel_orientation_with_quirk()
*
* scaling mode:
* This property defines how a non-native mode is upscaled to the native
* mode of an LCD panel:
*
* None:
* No upscaling happens, scaling is left to the panel. Not all
* drivers expose this mode.
* Full:
* The output is upscaled to the full resolution of the panel,
* ignoring the aspect ratio.
* Center:
* No upscaling happens, the output is centered within the native
* resolution the panel.
* Full aspect:
* The output is upscaled to maximize either the width or height
* while retaining the aspect ratio.
*
* This property should be set up by calling
* drm_connector_attach_scaling_mode_property(). Note that drivers
* can also expose this property to external outputs, in which case they
* must support "None", which should be the default (since external screens
* have a built-in scaler).
*
* subconnector:
* This property is used by DVI-I, TVout and DisplayPort to indicate different
* connector subtypes. Enum values more or less match with those from main
* connector types.
* For DVI-I and TVout there is also a matching property "select subconnector"
* allowing to switch between signal types.
* DP subconnector corresponds to a downstream port.
*
* privacy-screen sw-state, privacy-screen hw-state:
* These 2 optional properties can be used to query the state of the
* electronic privacy screen that is available on some displays; and in
* some cases also control the state. If a driver implements these
* properties then both properties must be present.
*
* "privacy-screen hw-state" is read-only and reflects the actual state
* of the privacy-screen, possible values: "Enabled", "Disabled,
* "Enabled-locked", "Disabled-locked". The locked states indicate
* that the state cannot be changed through the DRM API. E.g. there
* might be devices where the firmware-setup options, or a hardware
* slider-switch, offer always on / off modes.
*
* "privacy-screen sw-state" can be set to change the privacy-screen state
* when not locked. In this case the driver must update the hw-state
* property to reflect the new state on completion of the commit of the
* sw-state property. Setting the sw-state property when the hw-state is
* locked must be interpreted by the driver as a request to change the
* state to the set state when the hw-state becomes unlocked. E.g. if
* "privacy-screen hw-state" is "Enabled-locked" and the sw-state
* gets set to "Disabled" followed by the user unlocking the state by
* changing the slider-switch position, then the driver must set the
* state to "Disabled" upon receiving the unlock event.
*
* In some cases the privacy-screen's actual state might change outside of
* control of the DRM code. E.g. there might be a firmware handled hotkey
* which toggles the actual state, or the actual state might be changed
* through another userspace API such as writing /proc/acpi/ibm/lcdshadow.
* In this case the driver must update both the hw-state and the sw-state
* to reflect the new value, overwriting any pending state requests in the
* sw-state. Any pending sw-state requests are thus discarded.
*
* Note that the ability for the state to change outside of control of
* the DRM master process means that userspace must not cache the value
* of the sw-state. Caching the sw-state value and including it in later
* atomic commits may lead to overriding a state change done through e.g.
* a firmware handled hotkey. Therefor userspace must not include the
* privacy-screen sw-state in an atomic commit unless it wants to change
* its value.
*
* left margin, right margin, top margin, bottom margin:
* Add margins to the connector's viewport. This is typically used to
* mitigate overscan on TVs.
*
* The value is the size in pixels of the black border which will be
* added. The attached CRTC's content will be scaled to fill the whole
* area inside the margin.
*
* The margins configuration might be sent to the sink, e.g. via HDMI AVI
* InfoFrames.
*
* Drivers can set up these properties by calling
* drm_mode_create_tv_margin_properties().
*/
int drm_connector_create_standard_properties(struct drm_device *dev)
{
struct drm_property *prop;
prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
DRM_MODE_PROP_IMMUTABLE,
"EDID", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.edid_property = prop;
prop = drm_property_create_enum(dev, 0,
"DPMS", drm_dpms_enum_list,
ARRAY_SIZE(drm_dpms_enum_list));
if (!prop)
return -ENOMEM;
dev->mode_config.dpms_property = prop;
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB |
DRM_MODE_PROP_IMMUTABLE,
"PATH", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.path_property = prop;
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB |
DRM_MODE_PROP_IMMUTABLE,
"TILE", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.tile_property = prop;
prop = drm_property_create_enum(dev, 0, "link-status",
drm_link_status_enum_list,
ARRAY_SIZE(drm_link_status_enum_list));
if (!prop)
return -ENOMEM;
dev->mode_config.link_status_property = prop;
prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE, "non-desktop");
if (!prop)
return -ENOMEM;
dev->mode_config.non_desktop_property = prop;
prop = drm_property_create(dev, DRM_MODE_PROP_BLOB,
"HDR_OUTPUT_METADATA", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.hdr_output_metadata_property = prop;
return 0;
}
/**
* drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
* @dev: DRM device
*
* Called by a driver the first time a DVI-I connector is made.
*
* Returns: %0
*/
int drm_mode_create_dvi_i_properties(struct drm_device *dev)
{
struct drm_property *dvi_i_selector;
struct drm_property *dvi_i_subconnector;
if (dev->mode_config.dvi_i_select_subconnector_property)
return 0;
dvi_i_selector =
drm_property_create_enum(dev, 0,
"select subconnector",
drm_dvi_i_select_enum_list,
ARRAY_SIZE(drm_dvi_i_select_enum_list));
dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"subconnector",
drm_dvi_i_subconnector_enum_list,
ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
/**
* drm_connector_attach_dp_subconnector_property - create subconnector property for DP
* @connector: drm_connector to attach property
*
* Called by a driver when DP connector is created.
*/
void drm_connector_attach_dp_subconnector_property(struct drm_connector *connector)
{
struct drm_mode_config *mode_config = &connector->dev->mode_config;
if (!mode_config->dp_subconnector_property)
mode_config->dp_subconnector_property =
drm_property_create_enum(connector->dev,
DRM_MODE_PROP_IMMUTABLE,
"subconnector",
drm_dp_subconnector_enum_list,
ARRAY_SIZE(drm_dp_subconnector_enum_list));
drm_object_attach_property(&connector->base,
mode_config->dp_subconnector_property,
DRM_MODE_SUBCONNECTOR_Unknown);
}
EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
/**
* DOC: HDMI connector properties
*
* content type (HDMI specific):
* Indicates content type setting to be used in HDMI infoframes to indicate
* content type for the external device, so that it adjusts its display
* settings accordingly.
*
* The value of this property can be one of the following:
*
* No Data:
* Content type is unknown
* Graphics:
* Content type is graphics
* Photo:
* Content type is photo
* Cinema:
* Content type is cinema
* Game:
* Content type is game
*
* The meaning of each content type is defined in CTA-861-G table 15.
*
* Drivers can set up this property by calling
* drm_connector_attach_content_type_property(). Decoding to
* infoframe values is done through drm_hdmi_avi_infoframe_content_type().
*/
/*
* TODO: Document the properties:
* - brightness
* - contrast
* - flicker reduction
* - hue
* - mode
* - overscan
* - saturation
* - select subconnector
*/
/**
* DOC: Analog TV Connector Properties
*
* TV Mode:
* Indicates the TV Mode used on an analog TV connector. The value
* of this property can be one of the following:
*
* NTSC:
* TV Mode is CCIR System M (aka 525-lines) together with
* the NTSC Color Encoding.
*
* NTSC-443:
*
* TV Mode is CCIR System M (aka 525-lines) together with
* the NTSC Color Encoding, but with a color subcarrier
* frequency of 4.43MHz
*
* NTSC-J:
*
* TV Mode is CCIR System M (aka 525-lines) together with
* the NTSC Color Encoding, but with a black level equal to
* the blanking level.
*
* PAL:
*
* TV Mode is CCIR System B (aka 625-lines) together with
* the PAL Color Encoding.
*
* PAL-M:
*
* TV Mode is CCIR System M (aka 525-lines) together with
* the PAL Color Encoding.
*
* PAL-N:
*
* TV Mode is CCIR System N together with the PAL Color
* Encoding, a color subcarrier frequency of 3.58MHz, the
* SECAM color space, and narrower channels than other PAL
* variants.
*
* SECAM:
*
* TV Mode is CCIR System B (aka 625-lines) together with
* the SECAM Color Encoding.
*
* Drivers can set up this property by calling
* drm_mode_create_tv_properties().
*/
/**
* drm_connector_attach_content_type_property - attach content-type property
* @connector: connector to attach content type property on.
*
* Called by a driver the first time a HDMI connector is made.
*
* Returns: %0
*/
int drm_connector_attach_content_type_property(struct drm_connector *connector)
{
if (!drm_mode_create_content_type_property(connector->dev))
drm_object_attach_property(&connector->base,
connector->dev->mode_config.content_type_property,
DRM_MODE_CONTENT_TYPE_NO_DATA);
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_content_type_property);
/**
* drm_connector_attach_tv_margin_properties - attach TV connector margin
* properties
* @connector: DRM connector
*
* Called by a driver when it needs to attach TV margin props to a connector.
* Typically used on SDTV and HDMI connectors.
*/
void drm_connector_attach_tv_margin_properties(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
drm_object_attach_property(&connector->base,
dev->mode_config.tv_left_margin_property,
0);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_right_margin_property,
0);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_top_margin_property,
0);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
0);
}
EXPORT_SYMBOL(drm_connector_attach_tv_margin_properties);
/**
* drm_mode_create_tv_margin_properties - create TV connector margin properties
* @dev: DRM device
*
* Called by a driver's HDMI connector initialization routine, this function
* creates the TV margin properties for a given device. No need to call this
* function for an SDTV connector, it's already called from
* drm_mode_create_tv_properties_legacy().
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_mode_create_tv_margin_properties(struct drm_device *dev)
{
if (dev->mode_config.tv_left_margin_property)
return 0;
dev->mode_config.tv_left_margin_property =
drm_property_create_range(dev, 0, "left margin", 0, 100);
if (!dev->mode_config.tv_left_margin_property)
return -ENOMEM;
dev->mode_config.tv_right_margin_property =
drm_property_create_range(dev, 0, "right margin", 0, 100);
if (!dev->mode_config.tv_right_margin_property)
return -ENOMEM;
dev->mode_config.tv_top_margin_property =
drm_property_create_range(dev, 0, "top margin", 0, 100);
if (!dev->mode_config.tv_top_margin_property)
return -ENOMEM;
dev->mode_config.tv_bottom_margin_property =
drm_property_create_range(dev, 0, "bottom margin", 0, 100);
if (!dev->mode_config.tv_bottom_margin_property)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_tv_margin_properties);
/**
* drm_mode_create_tv_properties_legacy - create TV specific connector properties
* @dev: DRM device
* @num_modes: number of different TV formats (modes) supported
* @modes: array of pointers to strings containing name of each format
*
* Called by a driver's TV initialization routine, this function creates
* the TV specific connector properties for a given device. Caller is
* responsible for allocating a list of format names and passing them to
* this routine.
*
* NOTE: This functions registers the deprecated "mode" connector
* property to select the analog TV mode (ie, NTSC, PAL, etc.). New
* drivers must use drm_mode_create_tv_properties() instead.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_mode_create_tv_properties_legacy(struct drm_device *dev,
unsigned int num_modes,
const char * const modes[])
{
struct drm_property *tv_selector;
struct drm_property *tv_subconnector;
unsigned int i;
if (dev->mode_config.tv_select_subconnector_property)
return 0;
/*
* Basic connector properties
*/
tv_selector = drm_property_create_enum(dev, 0,
"select subconnector",
drm_tv_select_enum_list,
ARRAY_SIZE(drm_tv_select_enum_list));
if (!tv_selector)
goto nomem;
dev->mode_config.tv_select_subconnector_property = tv_selector;
tv_subconnector =
drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"subconnector",
drm_tv_subconnector_enum_list,
ARRAY_SIZE(drm_tv_subconnector_enum_list));
if (!tv_subconnector)
goto nomem;
dev->mode_config.tv_subconnector_property = tv_subconnector;
/*
* Other, TV specific properties: margins & TV modes.
*/
if (drm_mode_create_tv_margin_properties(dev))
goto nomem;
if (num_modes) {
dev->mode_config.legacy_tv_mode_property =
drm_property_create(dev, DRM_MODE_PROP_ENUM,
"mode", num_modes);
if (!dev->mode_config.legacy_tv_mode_property)
goto nomem;
for (i = 0; i < num_modes; i++)
drm_property_add_enum(dev->mode_config.legacy_tv_mode_property,
i, modes[i]);
}
dev->mode_config.tv_brightness_property =
drm_property_create_range(dev, 0, "brightness", 0, 100);
if (!dev->mode_config.tv_brightness_property)
goto nomem;
dev->mode_config.tv_contrast_property =
drm_property_create_range(dev, 0, "contrast", 0, 100);
if (!dev->mode_config.tv_contrast_property)
goto nomem;
dev->mode_config.tv_flicker_reduction_property =
drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
if (!dev->mode_config.tv_flicker_reduction_property)
goto nomem;
dev->mode_config.tv_overscan_property =
drm_property_create_range(dev, 0, "overscan", 0, 100);
if (!dev->mode_config.tv_overscan_property)
goto nomem;
dev->mode_config.tv_saturation_property =
drm_property_create_range(dev, 0, "saturation", 0, 100);
if (!dev->mode_config.tv_saturation_property)
goto nomem;
dev->mode_config.tv_hue_property =
drm_property_create_range(dev, 0, "hue", 0, 100);
if (!dev->mode_config.tv_hue_property)
goto nomem;
return 0;
nomem:
return -ENOMEM;
}
EXPORT_SYMBOL(drm_mode_create_tv_properties_legacy);
/**
* drm_mode_create_tv_properties - create TV specific connector properties
* @dev: DRM device
* @supported_tv_modes: Bitmask of TV modes supported (See DRM_MODE_TV_MODE_*)
*
* Called by a driver's TV initialization routine, this function creates
* the TV specific connector properties for a given device.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_mode_create_tv_properties(struct drm_device *dev,
unsigned int supported_tv_modes)
{
struct drm_prop_enum_list tv_mode_list[DRM_MODE_TV_MODE_MAX];
struct drm_property *tv_mode;
unsigned int i, len = 0;
if (dev->mode_config.tv_mode_property)
return 0;
for (i = 0; i < DRM_MODE_TV_MODE_MAX; i++) {
if (!(supported_tv_modes & BIT(i)))
continue;
tv_mode_list[len].type = i;
tv_mode_list[len].name = drm_get_tv_mode_name(i);
len++;
}
tv_mode = drm_property_create_enum(dev, 0, "TV mode",
tv_mode_list, len);
if (!tv_mode)
return -ENOMEM;
dev->mode_config.tv_mode_property = tv_mode;
return drm_mode_create_tv_properties_legacy(dev, 0, NULL);
}
EXPORT_SYMBOL(drm_mode_create_tv_properties);
/**
* drm_mode_create_scaling_mode_property - create scaling mode property
* @dev: DRM device
*
* Called by a driver the first time it's needed, must be attached to desired
* connectors.
*
* Atomic drivers should use drm_connector_attach_scaling_mode_property()
* instead to correctly assign &drm_connector_state.scaling_mode
* in the atomic state.
*
* Returns: %0
*/
int drm_mode_create_scaling_mode_property(struct drm_device *dev)
{
struct drm_property *scaling_mode;
if (dev->mode_config.scaling_mode_property)
return 0;
scaling_mode =
drm_property_create_enum(dev, 0, "scaling mode",
drm_scaling_mode_enum_list,
ARRAY_SIZE(drm_scaling_mode_enum_list));
dev->mode_config.scaling_mode_property = scaling_mode;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
/**
* DOC: Variable refresh properties
*
* Variable refresh rate capable displays can dynamically adjust their
* refresh rate by extending the duration of their vertical front porch
* until page flip or timeout occurs. This can reduce or remove stuttering
* and latency in scenarios where the page flip does not align with the
* vblank interval.
*
* An example scenario would be an application flipping at a constant rate
* of 48Hz on a 60Hz display. The page flip will frequently miss the vblank
* interval and the same contents will be displayed twice. This can be
* observed as stuttering for content with motion.
*
* If variable refresh rate was active on a display that supported a
* variable refresh range from 35Hz to 60Hz no stuttering would be observable
* for the example scenario. The minimum supported variable refresh rate of
* 35Hz is below the page flip frequency and the vertical front porch can
* be extended until the page flip occurs. The vblank interval will be
* directly aligned to the page flip rate.
*
* Not all userspace content is suitable for use with variable refresh rate.
* Large and frequent changes in vertical front porch duration may worsen
* perceived stuttering for input sensitive applications.
*
* Panel brightness will also vary with vertical front porch duration. Some
* panels may have noticeable differences in brightness between the minimum
* vertical front porch duration and the maximum vertical front porch duration.
* Large and frequent changes in vertical front porch duration may produce
* observable flickering for such panels.
*
* Userspace control for variable refresh rate is supported via properties
* on the &drm_connector and &drm_crtc objects.
*
* "vrr_capable":
* Optional &drm_connector boolean property that drivers should attach
* with drm_connector_attach_vrr_capable_property() on connectors that
* could support variable refresh rates. Drivers should update the
* property value by calling drm_connector_set_vrr_capable_property().
*
* Absence of the property should indicate absence of support.
*
* "VRR_ENABLED":
* Default &drm_crtc boolean property that notifies the driver that the
* content on the CRTC is suitable for variable refresh rate presentation.
* The driver will take this property as a hint to enable variable
* refresh rate support if the receiver supports it, ie. if the
* "vrr_capable" property is true on the &drm_connector object. The
* vertical front porch duration will be extended until page-flip or
* timeout when enabled.
*
* The minimum vertical front porch duration is defined as the vertical
* front porch duration for the current mode.
*
* The maximum vertical front porch duration is greater than or equal to
* the minimum vertical front porch duration. The duration is derived
* from the minimum supported variable refresh rate for the connector.
*
* The driver may place further restrictions within these minimum
* and maximum bounds.
*/
/**
* drm_connector_attach_vrr_capable_property - creates the
* vrr_capable property
* @connector: connector to create the vrr_capable property on.
*
* This is used by atomic drivers to add support for querying
* variable refresh rate capability for a connector.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_attach_vrr_capable_property(
struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_property *prop;
if (!connector->vrr_capable_property) {
prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE,
"vrr_capable");
if (!prop)
return -ENOMEM;
connector->vrr_capable_property = prop;
drm_object_attach_property(&connector->base, prop, 0);
}
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_vrr_capable_property);
/**
* drm_connector_attach_scaling_mode_property - attach atomic scaling mode property
* @connector: connector to attach scaling mode property on.
* @scaling_mode_mask: or'ed mask of BIT(%DRM_MODE_SCALE_\*).
*
* This is used to add support for scaling mode to atomic drivers.
* The scaling mode will be set to &drm_connector_state.scaling_mode
* and can be used from &drm_connector_helper_funcs->atomic_check for validation.
*
* This is the atomic version of drm_mode_create_scaling_mode_property().
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask)
{
struct drm_device *dev = connector->dev;
struct drm_property *scaling_mode_property;
int i;
const unsigned valid_scaling_mode_mask =
(1U << ARRAY_SIZE(drm_scaling_mode_enum_list)) - 1;
if (WARN_ON(hweight32(scaling_mode_mask) < 2 ||
scaling_mode_mask & ~valid_scaling_mode_mask))
return -EINVAL;
scaling_mode_property =
drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
hweight32(scaling_mode_mask));
if (!scaling_mode_property)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++) {
int ret;
if (!(BIT(i) & scaling_mode_mask))
continue;
ret = drm_property_add_enum(scaling_mode_property,
drm_scaling_mode_enum_list[i].type,
drm_scaling_mode_enum_list[i].name);
if (ret) {
drm_property_destroy(dev, scaling_mode_property);
return ret;
}
}
drm_object_attach_property(&connector->base,
scaling_mode_property, 0);
connector->scaling_mode_property = scaling_mode_property;
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_scaling_mode_property);
/**
* drm_mode_create_aspect_ratio_property - create aspect ratio property
* @dev: DRM device
*
* Called by a driver the first time it's needed, must be attached to desired
* connectors.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
{
if (dev->mode_config.aspect_ratio_property)
return 0;
dev->mode_config.aspect_ratio_property =
drm_property_create_enum(dev, 0, "aspect ratio",
drm_aspect_ratio_enum_list,
ARRAY_SIZE(drm_aspect_ratio_enum_list));
if (dev->mode_config.aspect_ratio_property == NULL)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
/**
* DOC: standard connector properties
*
* Colorspace:
* This property helps select a suitable colorspace based on the sink
* capability. Modern sink devices support wider gamut like BT2020.
* This helps switch to BT2020 mode if the BT2020 encoded video stream
* is being played by the user, same for any other colorspace. Thereby
* giving a good visual experience to users.
*
* The expectation from userspace is that it should parse the EDID
* and get supported colorspaces. Use this property and switch to the
* one supported. Sink supported colorspaces should be retrieved by
* userspace from EDID and driver will not explicitly expose them.
*
* Basically the expectation from userspace is:
* - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink
* colorspace
* - Set this new property to let the sink know what it
* converted the CRTC output to.
* - This property is just to inform sink what colorspace
* source is trying to drive.
*
* Because between HDMI and DP have different colorspaces,
* drm_mode_create_hdmi_colorspace_property() is used for HDMI connector and
* drm_mode_create_dp_colorspace_property() is used for DP connector.
*/
static int drm_mode_create_colorspace_property(struct drm_connector *connector,
u32 supported_colorspaces)
{
struct drm_device *dev = connector->dev;
u32 colorspaces = supported_colorspaces | BIT(DRM_MODE_COLORIMETRY_DEFAULT);
struct drm_prop_enum_list enum_list[DRM_MODE_COLORIMETRY_COUNT];
int i, len;
if (connector->colorspace_property)
return 0;
if (!supported_colorspaces) {
drm_err(dev, "No supported colorspaces provded on [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
return -EINVAL;
}
if ((supported_colorspaces & -BIT(DRM_MODE_COLORIMETRY_COUNT)) != 0) {
drm_err(dev, "Unknown colorspace provded on [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
return -EINVAL;
}
len = 0;
for (i = 0; i < DRM_MODE_COLORIMETRY_COUNT; i++) {
if ((colorspaces & BIT(i)) == 0)
continue;
enum_list[len].type = i;
enum_list[len].name = colorspace_names[i];
len++;
}
connector->colorspace_property =
drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace",
enum_list,
len);
if (!connector->colorspace_property)
return -ENOMEM;
return 0;
}
/**
* drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property
* @connector: connector to create the Colorspace property on.
* @supported_colorspaces: bitmap of supported color spaces
*
* Called by a driver the first time it's needed, must be attached to desired
* HDMI connectors.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector,
u32 supported_colorspaces)
{
u32 colorspaces;
if (supported_colorspaces)
colorspaces = supported_colorspaces & hdmi_colorspaces;
else
colorspaces = hdmi_colorspaces;
return drm_mode_create_colorspace_property(connector, colorspaces);
}
EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property);
/**
* drm_mode_create_dp_colorspace_property - create dp colorspace property
* @connector: connector to create the Colorspace property on.
* @supported_colorspaces: bitmap of supported color spaces
*
* Called by a driver the first time it's needed, must be attached to desired
* DP connectors.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_create_dp_colorspace_property(struct drm_connector *connector,
u32 supported_colorspaces)
{
u32 colorspaces;
if (supported_colorspaces)
colorspaces = supported_colorspaces & dp_colorspaces;
else
colorspaces = dp_colorspaces;
return drm_mode_create_colorspace_property(connector, colorspaces);
}
EXPORT_SYMBOL(drm_mode_create_dp_colorspace_property);
/**
* drm_mode_create_content_type_property - create content type property
* @dev: DRM device
*
* Called by a driver the first time it's needed, must be attached to desired
* connectors.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_create_content_type_property(struct drm_device *dev)
{
if (dev->mode_config.content_type_property)
return 0;
dev->mode_config.content_type_property =
drm_property_create_enum(dev, 0, "content type",
drm_content_type_enum_list,
ARRAY_SIZE(drm_content_type_enum_list));
if (dev->mode_config.content_type_property == NULL)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_content_type_property);
/**
* drm_mode_create_suggested_offset_properties - create suggests offset properties
* @dev: DRM device
*
* Create the suggested x/y offset property for connectors.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
{
if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
return 0;
dev->mode_config.suggested_x_property =
drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
dev->mode_config.suggested_y_property =
drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
if (dev->mode_config.suggested_x_property == NULL ||
dev->mode_config.suggested_y_property == NULL)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
/**
* drm_connector_set_path_property - set tile property on connector
* @connector: connector to set property on.
* @path: path to use for property; must not be NULL.
*
* This creates a property to expose to userspace to specify a
* connector path. This is mainly used for DisplayPort MST where
* connectors have a topology and we want to allow userspace to give
* them more meaningful names.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_set_path_property(struct drm_connector *connector,
const char *path)
{
struct drm_device *dev = connector->dev;
int ret;
ret = drm_property_replace_global_blob(dev,
&connector->path_blob_ptr,
strlen(path) + 1,
path,
&connector->base,
dev->mode_config.path_property);
return ret;
}
EXPORT_SYMBOL(drm_connector_set_path_property);
/**
* drm_connector_set_tile_property - set tile property on connector
* @connector: connector to set property on.
*
* This looks up the tile information for a connector, and creates a
* property for userspace to parse if it exists. The property is of
* the form of 8 integers using ':' as a separator.
* This is used for dual port tiled displays with DisplayPort SST
* or DisplayPort MST connectors.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_connector_set_tile_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
char tile[256];
int ret;
if (!connector->has_tile) {
ret = drm_property_replace_global_blob(dev,
&connector->tile_blob_ptr,
0,
NULL,
&connector->base,
dev->mode_config.tile_property);
return ret;
}
snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
connector->tile_group->id, connector->tile_is_single_monitor,
connector->num_h_tile, connector->num_v_tile,
connector->tile_h_loc, connector->tile_v_loc,
connector->tile_h_size, connector->tile_v_size);
ret = drm_property_replace_global_blob(dev,
&connector->tile_blob_ptr,
strlen(tile) + 1,
tile,
&connector->base,
dev->mode_config.tile_property);
return ret;
}
EXPORT_SYMBOL(drm_connector_set_tile_property);
/**
* drm_connector_set_link_status_property - Set link status property of a connector
* @connector: drm connector
* @link_status: new value of link status property (0: Good, 1: Bad)
*
* In usual working scenario, this link status property will always be set to
* "GOOD". If something fails during or after a mode set, the kernel driver
* may set this link status property to "BAD". The caller then needs to send a
* hotplug uevent for userspace to re-check the valid modes through
* GET_CONNECTOR_IOCTL and retry modeset.
*
* Note: Drivers cannot rely on userspace to support this property and
* issue a modeset. As such, they may choose to handle issues (like
* re-training a link) without userspace's intervention.
*
* The reason for adding this property is to handle link training failures, but
* it is not limited to DP or link training. For example, if we implement
* asynchronous setcrtc, this property can be used to report any failures in that.
*/
void drm_connector_set_link_status_property(struct drm_connector *connector,
uint64_t link_status)
{
struct drm_device *dev = connector->dev;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
connector->state->link_status = link_status;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
EXPORT_SYMBOL(drm_connector_set_link_status_property);
/**
* drm_connector_attach_max_bpc_property - attach "max bpc" property
* @connector: connector to attach max bpc property on.
* @min: The minimum bit depth supported by the connector.
* @max: The maximum bit depth supported by the connector.
*
* This is used to add support for limiting the bit depth on a connector.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
int min, int max)
{
struct drm_device *dev = connector->dev;
struct drm_property *prop;
prop = connector->max_bpc_property;
if (!prop) {
prop = drm_property_create_range(dev, 0, "max bpc", min, max);
if (!prop)
return -ENOMEM;
connector->max_bpc_property = prop;
}
drm_object_attach_property(&connector->base, prop, max);
connector->state->max_requested_bpc = max;
connector->state->max_bpc = max;
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
/**
* drm_connector_attach_hdr_output_metadata_property - attach "HDR_OUTPUT_METADA" property
* @connector: connector to attach the property on.
*
* This is used to allow the userspace to send HDR Metadata to the
* driver.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_property *prop = dev->mode_config.hdr_output_metadata_property;
drm_object_attach_property(&connector->base, prop, 0);
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_hdr_output_metadata_property);
/**
* drm_connector_attach_colorspace_property - attach "Colorspace" property
* @connector: connector to attach the property on.
*
* This is used to allow the userspace to signal the output colorspace
* to the driver.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_attach_colorspace_property(struct drm_connector *connector)
{
struct drm_property *prop = connector->colorspace_property;
drm_object_attach_property(&connector->base, prop, DRM_MODE_COLORIMETRY_DEFAULT);
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_colorspace_property);
/**
* drm_connector_atomic_hdr_metadata_equal - checks if the hdr metadata changed
* @old_state: old connector state to compare
* @new_state: new connector state to compare
*
* This is used by HDR-enabled drivers to test whether the HDR metadata
* have changed between two different connector state (and thus probably
* requires a full blown mode change).
*
* Returns:
* True if the metadata are equal, False otherwise
*/
bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state,
struct drm_connector_state *new_state)
{
struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
if (!old_blob || !new_blob)
return old_blob == new_blob;
if (old_blob->length != new_blob->length)
return false;
return !memcmp(old_blob->data, new_blob->data, old_blob->length);
}
EXPORT_SYMBOL(drm_connector_atomic_hdr_metadata_equal);
/**
* drm_connector_set_vrr_capable_property - sets the variable refresh rate
* capable property for a connector
* @connector: drm connector
* @capable: True if the connector is variable refresh rate capable
*
* Should be used by atomic drivers to update the indicated support for
* variable refresh rate over a connector.
*/
void drm_connector_set_vrr_capable_property(
struct drm_connector *connector, bool capable)
{
if (!connector->vrr_capable_property)
return;
drm_object_property_set_value(&connector->base,
connector->vrr_capable_property,
capable);
}
EXPORT_SYMBOL(drm_connector_set_vrr_capable_property);
/**
* drm_connector_set_panel_orientation - sets the connector's panel_orientation
* @connector: connector for which to set the panel-orientation property.
* @panel_orientation: drm_panel_orientation value to set
*
* This function sets the connector's panel_orientation and attaches
* a "panel orientation" property to the connector.
*
* Calling this function on a connector where the panel_orientation has
* already been set is a no-op (e.g. the orientation has been overridden with
* a kernel commandline option).
*
* It is allowed to call this function with a panel_orientation of
* DRM_MODE_PANEL_ORIENTATION_UNKNOWN, in which case it is a no-op.
*
* The function shouldn't be called in panel after drm is registered (i.e.
* drm_dev_register() is called in drm).
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_set_panel_orientation(
struct drm_connector *connector,
enum drm_panel_orientation panel_orientation)
{
struct drm_device *dev = connector->dev;
struct drm_display_info *info = &connector->display_info;
struct drm_property *prop;
/* Already set? */
if (info->panel_orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return 0;
/* Don't attach the property if the orientation is unknown */
if (panel_orientation == DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return 0;
info->panel_orientation = panel_orientation;
prop = dev->mode_config.panel_orientation_property;
if (!prop) {
prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"panel orientation",
drm_panel_orientation_enum_list,
ARRAY_SIZE(drm_panel_orientation_enum_list));
if (!prop)
return -ENOMEM;
dev->mode_config.panel_orientation_property = prop;
}
drm_object_attach_property(&connector->base, prop,
info->panel_orientation);
return 0;
}
EXPORT_SYMBOL(drm_connector_set_panel_orientation);
/**
* drm_connector_set_panel_orientation_with_quirk - set the
* connector's panel_orientation after checking for quirks
* @connector: connector for which to init the panel-orientation property.
* @panel_orientation: drm_panel_orientation value to set
* @width: width in pixels of the panel, used for panel quirk detection
* @height: height in pixels of the panel, used for panel quirk detection
*
* Like drm_connector_set_panel_orientation(), but with a check for platform
* specific (e.g. DMI based) quirks overriding the passed in panel_orientation.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_set_panel_orientation_with_quirk(
struct drm_connector *connector,
enum drm_panel_orientation panel_orientation,
int width, int height)
{
int orientation_quirk;
orientation_quirk = drm_get_panel_orientation_quirk(width, height);
if (orientation_quirk != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
panel_orientation = orientation_quirk;
return drm_connector_set_panel_orientation(connector,
panel_orientation);
}
EXPORT_SYMBOL(drm_connector_set_panel_orientation_with_quirk);
/**
* drm_connector_set_orientation_from_panel -
* set the connector's panel_orientation from panel's callback.
* @connector: connector for which to init the panel-orientation property.
* @panel: panel that can provide orientation information.
*
* Drm drivers should call this function before drm_dev_register().
* Orientation is obtained from panel's .get_orientation() callback.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_set_orientation_from_panel(
struct drm_connector *connector,
struct drm_panel *panel)
{
enum drm_panel_orientation orientation;
if (panel && panel->funcs && panel->funcs->get_orientation)
orientation = panel->funcs->get_orientation(panel);
else
orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
return drm_connector_set_panel_orientation(connector, orientation);
}
EXPORT_SYMBOL(drm_connector_set_orientation_from_panel);
static const struct drm_prop_enum_list privacy_screen_enum[] = {
{ PRIVACY_SCREEN_DISABLED, "Disabled" },
{ PRIVACY_SCREEN_ENABLED, "Enabled" },
{ PRIVACY_SCREEN_DISABLED_LOCKED, "Disabled-locked" },
{ PRIVACY_SCREEN_ENABLED_LOCKED, "Enabled-locked" },
};
/**
* drm_connector_create_privacy_screen_properties - create the drm connecter's
* privacy-screen properties.
* @connector: connector for which to create the privacy-screen properties
*
* This function creates the "privacy-screen sw-state" and "privacy-screen
* hw-state" properties for the connector. They are not attached.
*/
void
drm_connector_create_privacy_screen_properties(struct drm_connector *connector)
{
if (connector->privacy_screen_sw_state_property)
return;
/* Note sw-state only supports the first 2 values of the enum */
connector->privacy_screen_sw_state_property =
drm_property_create_enum(connector->dev, DRM_MODE_PROP_ENUM,
"privacy-screen sw-state",
privacy_screen_enum, 2);
connector->privacy_screen_hw_state_property =
drm_property_create_enum(connector->dev,
DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ENUM,
"privacy-screen hw-state",
privacy_screen_enum,
ARRAY_SIZE(privacy_screen_enum));
}
EXPORT_SYMBOL(drm_connector_create_privacy_screen_properties);
/**
* drm_connector_attach_privacy_screen_properties - attach the drm connecter's
* privacy-screen properties.
* @connector: connector on which to attach the privacy-screen properties
*
* This function attaches the "privacy-screen sw-state" and "privacy-screen
* hw-state" properties to the connector. The initial state of both is set
* to "Disabled".
*/
void
drm_connector_attach_privacy_screen_properties(struct drm_connector *connector)
{
if (!connector->privacy_screen_sw_state_property)
return;
drm_object_attach_property(&connector->base,
connector->privacy_screen_sw_state_property,
PRIVACY_SCREEN_DISABLED);
drm_object_attach_property(&connector->base,
connector->privacy_screen_hw_state_property,
PRIVACY_SCREEN_DISABLED);
}
EXPORT_SYMBOL(drm_connector_attach_privacy_screen_properties);
static void drm_connector_update_privacy_screen_properties(
struct drm_connector *connector, bool set_sw_state)
{
enum drm_privacy_screen_status sw_state, hw_state;
drm_privacy_screen_get_state(connector->privacy_screen,
&sw_state, &hw_state);
if (set_sw_state)
connector->state->privacy_screen_sw_state = sw_state;
drm_object_property_set_value(&connector->base,
connector->privacy_screen_hw_state_property, hw_state);
}
static int drm_connector_privacy_screen_notifier(
struct notifier_block *nb, unsigned long action, void *data)
{
struct drm_connector *connector =
container_of(nb, struct drm_connector, privacy_screen_notifier);
struct drm_device *dev = connector->dev;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
drm_connector_update_privacy_screen_properties(connector, true);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
drm_sysfs_connector_property_event(connector,
connector->privacy_screen_sw_state_property);
drm_sysfs_connector_property_event(connector,
connector->privacy_screen_hw_state_property);
return NOTIFY_DONE;
}
/**
* drm_connector_attach_privacy_screen_provider - attach a privacy-screen to
* the connector
* @connector: connector to attach the privacy-screen to
* @priv: drm_privacy_screen to attach
*
* Create and attach the standard privacy-screen properties and register
* a generic notifier for generating sysfs-connector-status-events
* on external changes to the privacy-screen status.
* This function takes ownership of the passed in drm_privacy_screen and will
* call drm_privacy_screen_put() on it when the connector is destroyed.
*/
void drm_connector_attach_privacy_screen_provider(
struct drm_connector *connector, struct drm_privacy_screen *priv)
{
connector->privacy_screen = priv;
connector->privacy_screen_notifier.notifier_call =
drm_connector_privacy_screen_notifier;
drm_connector_create_privacy_screen_properties(connector);
drm_connector_update_privacy_screen_properties(connector, true);
drm_connector_attach_privacy_screen_properties(connector);
}
EXPORT_SYMBOL(drm_connector_attach_privacy_screen_provider);
/**
* drm_connector_update_privacy_screen - update connector's privacy-screen sw-state
* @connector_state: connector-state to update the privacy-screen for
*
* This function calls drm_privacy_screen_set_sw_state() on the connector's
* privacy-screen.
*
* If the connector has no privacy-screen, then this is a no-op.
*/
void drm_connector_update_privacy_screen(const struct drm_connector_state *connector_state)
{
struct drm_connector *connector = connector_state->connector;
int ret;
if (!connector->privacy_screen)
return;
ret = drm_privacy_screen_set_sw_state(connector->privacy_screen,
connector_state->privacy_screen_sw_state);
if (ret) {
drm_err(connector->dev, "Error updating privacy-screen sw_state\n");
return;
}
/* The hw_state property value may have changed, update it. */
drm_connector_update_privacy_screen_properties(connector, false);
}
EXPORT_SYMBOL(drm_connector_update_privacy_screen);
int drm_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
int ret = -EINVAL;
struct drm_connector *connector = obj_to_connector(obj);
/* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) {
ret = (*connector->funcs->dpms)(connector, (int)value);
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, value);
if (!ret)
drm_object_property_set_value(&connector->base, property, value);
return ret;
}
int drm_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_connector_set_property *conn_set_prop = data;
struct drm_mode_obj_set_property obj_set_prop = {
.value = conn_set_prop->value,
.prop_id = conn_set_prop->prop_id,
.obj_id = conn_set_prop->connector_id,
.obj_type = DRM_MODE_OBJECT_CONNECTOR
};
/* It does all the locking and checking we need */
return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
}
static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
{
/* For atomic drivers only state objects are synchronously updated and
* protected by modeset locks, so check those first.
*/
if (connector->state)
return connector->state->best_encoder;
return connector->encoder;
}
static bool
drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
const struct list_head *modes,
const struct drm_file *file_priv)
{
/*
* If user-space hasn't configured the driver to expose the stereo 3D
* modes, don't expose them.
*/
if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
return false;
/*
* If user-space hasn't configured the driver to expose the modes
* with aspect-ratio, don't expose them. However if such a mode
* is unique, let it be exposed, but reset the aspect-ratio flags
* while preparing the list of user-modes.
*/
if (!file_priv->aspect_ratio_allowed) {
const struct drm_display_mode *mode_itr;
list_for_each_entry(mode_itr, modes, head) {
if (mode_itr->expose_to_userspace &&
drm_mode_match(mode_itr, mode,
DRM_MODE_MATCH_TIMINGS |
DRM_MODE_MATCH_CLOCK |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS))
return false;
}
}
return true;
}
int drm_mode_getconnector(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_connector *out_resp = data;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *mode;
int mode_count = 0;
int encoders_count = 0;
int ret = 0;
int copied = 0;
struct drm_mode_modeinfo u_mode;
struct drm_mode_modeinfo __user *mode_ptr;
uint32_t __user *encoder_ptr;
bool is_current_master;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
connector = drm_connector_lookup(dev, file_priv, out_resp->connector_id);
if (!connector)
return -ENOENT;
encoders_count = hweight32(connector->possible_encoders);
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
drm_connector_for_each_possible_encoder(connector, encoder) {
if (put_user(encoder->base.id, encoder_ptr + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
out_resp->count_encoders = encoders_count;
out_resp->connector_id = connector->base.id;
out_resp->connector_type = connector->connector_type;
out_resp->connector_type_id = connector->connector_type_id;
is_current_master = drm_is_current_master(file_priv);
mutex_lock(&dev->mode_config.mutex);
if (out_resp->count_modes == 0) {
if (is_current_master)
connector->funcs->fill_modes(connector,
dev->mode_config.max_width,
dev->mode_config.max_height);
else
drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe",
connector->base.id, connector->name);
}
out_resp->mm_width = connector->display_info.width_mm;
out_resp->mm_height = connector->display_info.height_mm;
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
/* delayed so we get modes regardless of pre-fill_modes state */
list_for_each_entry(mode, &connector->modes, head) {
WARN_ON(mode->expose_to_userspace);
if (drm_mode_expose_to_userspace(mode, &connector->modes,
file_priv)) {
mode->expose_to_userspace = true;
mode_count++;
}
}
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if ((out_resp->count_modes >= mode_count) && mode_count) {
copied = 0;
mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
list_for_each_entry(mode, &connector->modes, head) {
if (!mode->expose_to_userspace)
continue;
/* Clear the tag for the next time around */
mode->expose_to_userspace = false;
drm_mode_convert_to_umode(&u_mode, mode);
/*
* Reset aspect ratio flags of user-mode, if modes with
* aspect-ratio are not supported.
*/
if (!file_priv->aspect_ratio_allowed)
u_mode.flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) {
ret = -EFAULT;
/*
* Clear the tag for the rest of
* the modes for the next time around.
*/
list_for_each_entry_continue(mode, &connector->modes, head)
mode->expose_to_userspace = false;
mutex_unlock(&dev->mode_config.mutex);
goto out;
}
copied++;
}
} else {
/* Clear the tag for the next time around */
list_for_each_entry(mode, &connector->modes, head)
mode->expose_to_userspace = false;
}
out_resp->count_modes = mode_count;
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
encoder = drm_connector_get_encoder(connector);
if (encoder)
out_resp->encoder_id = encoder->base.id;
else
out_resp->encoder_id = 0;
/* Only grab properties after probing, to make sure EDID and other
* properties reflect the latest status.
*/
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
&out_resp->count_props);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
out:
drm_connector_put(connector);
return ret;
}
/**
* drm_connector_find_by_fwnode - Find a connector based on the associated fwnode
* @fwnode: fwnode for which to find the matching drm_connector
*
* This functions looks up a drm_connector based on its associated fwnode. When
* a connector is found a reference to the connector is returned. The caller must
* call drm_connector_put() to release this reference when it is done with the
* connector.
*
* Returns: A reference to the found connector or an ERR_PTR().
*/
struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode)
{
struct drm_connector *connector, *found = ERR_PTR(-ENODEV);
if (!fwnode)
return ERR_PTR(-ENODEV);
mutex_lock(&connector_list_lock);
list_for_each_entry(connector, &connector_list, global_connector_list_entry) {
if (connector->fwnode == fwnode ||
(connector->fwnode && connector->fwnode->secondary == fwnode)) {
drm_connector_get(connector);
found = connector;
break;
}
}
mutex_unlock(&connector_list_lock);
return found;
}
/**
* drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector
* @connector_fwnode: fwnode_handle to report the event on
*
* On some hardware a hotplug event notification may come from outside the display
* driver / device. An example of this is some USB Type-C setups where the hardware
* muxes the DisplayPort data and aux-lines but does not pass the altmode HPD
* status bit to the GPU's DP HPD pin.
*
* This function can be used to report these out-of-band events after obtaining
* a drm_connector reference through calling drm_connector_find_by_fwnode().
*/
void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode)
{
struct drm_connector *connector;
connector = drm_connector_find_by_fwnode(connector_fwnode);
if (IS_ERR(connector))
return;
if (connector->funcs->oob_hotplug_event)
connector->funcs->oob_hotplug_event(connector);
drm_connector_put(connector);
}
EXPORT_SYMBOL(drm_connector_oob_hotplug_event);
/**
* DOC: Tile group
*
* Tile groups are used to represent tiled monitors with a unique integer
* identifier. Tiled monitors using DisplayID v1.3 have a unique 8-byte handle,
* we store this in a tile group, so we have a common identifier for all tiles
* in a monitor group. The property is called "TILE". Drivers can manage tile
* groups using drm_mode_create_tile_group(), drm_mode_put_tile_group() and
* drm_mode_get_tile_group(). But this is only needed for internal panels where
* the tile group information is exposed through a non-standard way.
*/
static void drm_tile_group_free(struct kref *kref)
{
struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
struct drm_device *dev = tg->dev;
mutex_lock(&dev->mode_config.idr_mutex);
idr_remove(&dev->mode_config.tile_idr, tg->id);
mutex_unlock(&dev->mode_config.idr_mutex);
kfree(tg);
}
/**
* drm_mode_put_tile_group - drop a reference to a tile group.
* @dev: DRM device
* @tg: tile group to drop reference to.
*
* drop reference to tile group and free if 0.
*/
void drm_mode_put_tile_group(struct drm_device *dev,
struct drm_tile_group *tg)
{
kref_put(&tg->refcount, drm_tile_group_free);
}
EXPORT_SYMBOL(drm_mode_put_tile_group);
/**
* drm_mode_get_tile_group - get a reference to an existing tile group
* @dev: DRM device
* @topology: 8-bytes unique per monitor.
*
* Use the unique bytes to get a reference to an existing tile group.
*
* RETURNS:
* tile group or NULL if not found.
*/
struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
const char topology[8])
{
struct drm_tile_group *tg;
int id;
mutex_lock(&dev->mode_config.idr_mutex);
idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
if (!memcmp(tg->group_data, topology, 8)) {
if (!kref_get_unless_zero(&tg->refcount))
tg = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
return tg;
}
}
mutex_unlock(&dev->mode_config.idr_mutex);
return NULL;
}
EXPORT_SYMBOL(drm_mode_get_tile_group);
/**
* drm_mode_create_tile_group - create a tile group from a displayid description
* @dev: DRM device
* @topology: 8-bytes unique per monitor.
*
* Create a tile group for the unique monitor, and get a unique
* identifier for the tile group.
*
* RETURNS:
* new tile group or NULL.
*/
struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
const char topology[8])
{
struct drm_tile_group *tg;
int ret;
tg = kzalloc(sizeof(*tg), GFP_KERNEL);
if (!tg)
return NULL;
kref_init(&tg->refcount);
memcpy(tg->group_data, topology, 8);
tg->dev = dev;
mutex_lock(&dev->mode_config.idr_mutex);
ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
if (ret >= 0) {
tg->id = ret;
} else {
kfree(tg);
tg = NULL;
}
mutex_unlock(&dev->mode_config.idr_mutex);
return tg;
}
EXPORT_SYMBOL(drm_mode_create_tile_group);
| linux-master | drivers/gpu/drm/drm_connector.c |
// SPDX-License-Identifier: MIT
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include <drm/drm_fbdev_generic.h>
/* @user: 1=userspace, 0=fbcon */
static int drm_fbdev_generic_fb_open(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
/* No need to take a ref for fbcon because it unbinds on unregister */
if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
return -ENODEV;
return 0;
}
static int drm_fbdev_generic_fb_release(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
if (user)
module_put(fb_helper->dev->driver->fops->owner);
return 0;
}
FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(drm_fbdev_generic,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area);
static void drm_fbdev_generic_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
void *shadow = info->screen_buffer;
if (!fb_helper->dev)
return;
fb_deferred_io_cleanup(info);
drm_fb_helper_fini(fb_helper);
vfree(shadow);
drm_client_framebuffer_delete(fb_helper->buffer);
drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
static const struct fb_ops drm_fbdev_generic_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_generic_fb_open,
.fb_release = drm_fbdev_generic_fb_release,
FB_DEFAULT_DEFERRED_OPS(drm_fbdev_generic),
DRM_FB_HELPER_DEFAULT_OPS,
.fb_destroy = drm_fbdev_generic_fb_destroy,
};
/*
* This function uses the client API to create a framebuffer backed by a dumb buffer.
*/
static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct drm_client_buffer *buffer;
struct fb_info *info;
size_t screen_size;
void *screen_buffer;
u32 format;
int ret;
drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
sizes->surface_bpp);
format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
buffer = drm_client_framebuffer_create(client, sizes->surface_width,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
fb_helper->buffer = buffer;
fb_helper->fb = buffer->fb;
screen_size = buffer->gem->size;
screen_buffer = vzalloc(screen_size);
if (!screen_buffer) {
ret = -ENOMEM;
goto err_drm_client_framebuffer_delete;
}
info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_vfree;
}
drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fbops = &drm_fbdev_generic_fb_ops;
/* screen */
info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
info->screen_buffer = screen_buffer;
info->fix.smem_start = page_to_phys(vmalloc_to_page(info->screen_buffer));
info->fix.smem_len = screen_size;
/* deferred I/O */
fb_helper->fbdefio.delay = HZ / 20;
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
goto err_drm_fb_helper_release_info;
return 0;
err_drm_fb_helper_release_info:
drm_fb_helper_release_info(fb_helper);
err_vfree:
vfree(screen_buffer);
err_drm_client_framebuffer_delete:
fb_helper->fb = NULL;
fb_helper->buffer = NULL;
drm_client_framebuffer_delete(buffer);
return ret;
}
static void drm_fbdev_generic_damage_blit_real(struct drm_fb_helper *fb_helper,
struct drm_clip_rect *clip,
struct iosys_map *dst)
{
struct drm_framebuffer *fb = fb_helper->fb;
size_t offset = clip->y1 * fb->pitches[0];
size_t len = clip->x2 - clip->x1;
unsigned int y;
void *src;
switch (drm_format_info_bpp(fb->format, 0)) {
case 1:
offset += clip->x1 / 8;
len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
break;
case 2:
offset += clip->x1 / 4;
len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
break;
case 4:
offset += clip->x1 / 2;
len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
break;
default:
offset += clip->x1 * fb->format->cpp[0];
len *= fb->format->cpp[0];
break;
}
src = fb_helper->info->screen_buffer + offset;
iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
for (y = clip->y1; y < clip->y2; y++) {
iosys_map_memcpy_to(dst, 0, src, len);
iosys_map_incr(dst, fb->pitches[0]);
src += fb->pitches[0];
}
}
static int drm_fbdev_generic_damage_blit(struct drm_fb_helper *fb_helper,
struct drm_clip_rect *clip)
{
struct drm_client_buffer *buffer = fb_helper->buffer;
struct iosys_map map, dst;
int ret;
/*
* We have to pin the client buffer to its current location while
* flushing the shadow buffer. In the general case, concurrent
* modesetting operations could try to move the buffer and would
* fail. The modeset has to be serialized by acquiring the reservation
* object of the underlying BO here.
*
* For fbdev emulation, we only have to protect against fbdev modeset
* operations. Nothing else will involve the client buffer's BO. So it
* is sufficient to acquire struct drm_fb_helper.lock here.
*/
mutex_lock(&fb_helper->lock);
ret = drm_client_buffer_vmap(buffer, &map);
if (ret)
goto out;
dst = map;
drm_fbdev_generic_damage_blit_real(fb_helper, clip, &dst);
drm_client_buffer_vunmap(buffer);
out:
mutex_unlock(&fb_helper->lock);
return ret;
}
static int drm_fbdev_generic_helper_fb_dirty(struct drm_fb_helper *helper,
struct drm_clip_rect *clip)
{
struct drm_device *dev = helper->dev;
int ret;
/* Call damage handlers only if necessary */
if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
return 0;
ret = drm_fbdev_generic_damage_blit(helper, clip);
if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
return ret;
if (helper->fb->funcs->dirty) {
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
return ret;
}
return 0;
}
static const struct drm_fb_helper_funcs drm_fbdev_generic_helper_funcs = {
.fb_probe = drm_fbdev_generic_helper_fb_probe,
.fb_dirty = drm_fbdev_generic_helper_fb_dirty,
};
static void drm_fbdev_generic_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
if (fb_helper->info) {
drm_fb_helper_unregister_info(fb_helper);
} else {
drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
}
static int drm_fbdev_generic_client_restore(struct drm_client_dev *client)
{
drm_fb_helper_lastclose(client->dev);
return 0;
}
static int drm_fbdev_generic_client_hotplug(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
struct drm_device *dev = client->dev;
int ret;
if (dev->fb_helper)
return drm_fb_helper_hotplug_event(dev->fb_helper);
ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto err_drm_err;
if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(fb_helper);
if (ret)
goto err_drm_fb_helper_fini;
return 0;
err_drm_fb_helper_fini:
drm_fb_helper_fini(fb_helper);
err_drm_err:
drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
return ret;
}
static const struct drm_client_funcs drm_fbdev_generic_client_funcs = {
.owner = THIS_MODULE,
.unregister = drm_fbdev_generic_client_unregister,
.restore = drm_fbdev_generic_client_restore,
.hotplug = drm_fbdev_generic_client_hotplug,
};
/**
* drm_fbdev_generic_setup() - Setup generic fbdev emulation
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device.
*
* This function sets up generic fbdev emulation for drivers that supports
* dumb buffers with a virtual address and that can be mmap'ed.
* drm_fbdev_generic_setup() shall be called after the DRM driver registered
* the new DRM device with drm_dev_register().
*
* Restore, hotplug events and teardown are all taken care of. Drivers that do
* suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
* Simple drivers might use drm_mode_config_helper_suspend().
*
* In order to provide fixed mmap-able memory ranges, generic fbdev emulation
* uses a shadow buffer in system memory. The implementation blits the shadow
* fbdev buffer onto the real buffer in regular intervals.
*
* This function is safe to call even when there are no connectors present.
* Setup will be retried on the next hotplug event.
*
* The fbdev is destroyed by drm_dev_unregister().
*/
void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
{
struct drm_fb_helper *fb_helper;
int ret;
drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
if (!fb_helper)
return;
drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_generic_helper_funcs);
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_generic_client_funcs);
if (ret) {
drm_err(dev, "Failed to register client: %d\n", ret);
goto err_drm_client_init;
}
drm_client_register(&fb_helper->client);
return;
err_drm_client_init:
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
return;
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
| linux-master | drivers/gpu/drm/drm_fbdev_generic.c |
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Simple open hash tab implementation.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <linux/hash.h>
#include <linux/mm.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
unsigned int size = 1 << order;
ht->order = order;
ht->table = NULL;
if (size <= PAGE_SIZE / sizeof(*ht->table))
ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
else
ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
return 0;
}
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
unsigned int hashed_key;
int count = 0;
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
hlist_for_each_entry(entry, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
}
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
unsigned int hashed_key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
return &entry->head;
if (entry->key > key)
break;
}
return NULL;
}
static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
unsigned int hashed_key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
hlist_for_each_entry_rcu(entry, h_list, head) {
if (entry->key == key)
return &entry->head;
if (entry->key > key)
break;
}
return NULL;
}
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *parent;
unsigned int hashed_key;
unsigned long key = item->key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
break;
parent = &entry->head;
}
if (parent) {
hlist_add_behind_rcu(&item->head, parent);
} else {
hlist_add_head_rcu(&item->head, h_list);
}
return 0;
}
/*
* Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add)
{
int ret;
unsigned long mask = (1UL << bits) - 1;
unsigned long first, unshifted_key;
unshifted_key = hash_long(seed, bits);
first = unshifted_key;
do {
item->key = (unshifted_key << shift) + add;
ret = drm_ht_insert_item(ht, item);
if (ret)
unshifted_key = (unshifted_key + 1) & mask;
} while(ret && (unshifted_key != first));
if (ret) {
DRM_ERROR("Available key bit space exhausted\n");
return -EINVAL;
}
return 0;
}
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
struct drm_hash_item **item)
{
struct hlist_node *list;
list = drm_ht_find_key_rcu(ht, key);
if (!list)
return -EINVAL;
*item = hlist_entry(list, struct drm_hash_item, head);
return 0;
}
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
{
struct hlist_node *list;
list = drm_ht_find_key(ht, key);
if (list) {
hlist_del_init_rcu(list);
return 0;
}
return -EINVAL;
}
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
hlist_del_init_rcu(&item->head);
return 0;
}
void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
kvfree(ht->table);
ht->table = NULL;
}
}
| linux-master | drivers/gpu/drm/drm_hashtab.c |
/*
* Copyright © 1997-2003 by The XFree86 Project, Inc.
* Copyright © 2007 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <[email protected]>
* Copyright 2005-2006 Luc Verhaegen
* Copyright (c) 2001, Andy Ritger [email protected]
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the name of the copyright holder(s)
* and author(s) shall not be used in advertising or otherwise to promote
* the sale, use or other dealings in this Software without prior written
* authorization from the copyright holder(s) and author(s).
*/
#include <linux/ctype.h>
#include <linux/export.h>
#include <linux/fb.h> /* for KHZ2PICOS() */
#include <linux/list.h>
#include <linux/list_sort.h>
#include <linux/of.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_modes.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
/**
* drm_mode_debug_printmodeline - print a mode to dmesg
* @mode: mode to print
*
* Describe @mode using DRM_DEBUG.
*/
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
EXPORT_SYMBOL(drm_mode_debug_printmodeline);
/**
* drm_mode_create - create a new display mode
* @dev: DRM device
*
* Create a new, cleared drm_display_mode with kzalloc, allocate an ID for it
* and return it.
*
* Returns:
* Pointer to new mode on success, NULL on error.
*/
struct drm_display_mode *drm_mode_create(struct drm_device *dev)
{
struct drm_display_mode *nmode;
nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
if (!nmode)
return NULL;
return nmode;
}
EXPORT_SYMBOL(drm_mode_create);
/**
* drm_mode_destroy - remove a mode
* @dev: DRM device
* @mode: mode to remove
*
* Release @mode's unique ID, then free it @mode structure itself using kfree.
*/
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
{
if (!mode)
return;
kfree(mode);
}
EXPORT_SYMBOL(drm_mode_destroy);
/**
* drm_mode_probed_add - add a mode to a connector's probed_mode list
* @connector: connector the new mode
* @mode: mode data
*
* Add @mode to @connector's probed_mode list for later use. This list should
* then in a second step get filtered and all the modes actually supported by
* the hardware moved to the @connector's modes list.
*/
void drm_mode_probed_add(struct drm_connector *connector,
struct drm_display_mode *mode)
{
WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
list_add_tail(&mode->head, &connector->probed_modes);
}
EXPORT_SYMBOL(drm_mode_probed_add);
enum drm_mode_analog {
DRM_MODE_ANALOG_NTSC, /* 525 lines, 60Hz */
DRM_MODE_ANALOG_PAL, /* 625 lines, 50Hz */
};
/*
* The timings come from:
* - https://web.archive.org/web/20220406232708/http://www.kolumbus.fi/pami1/video/pal_ntsc.html
* - https://web.archive.org/web/20220406124914/http://martin.hinner.info/vga/pal.html
* - https://web.archive.org/web/20220609202433/http://www.batsocks.co.uk/readme/video_timing.htm
*/
#define NTSC_LINE_DURATION_NS 63556U
#define NTSC_LINES_NUMBER 525
#define NTSC_HBLK_DURATION_TYP_NS 10900U
#define NTSC_HBLK_DURATION_MIN_NS (NTSC_HBLK_DURATION_TYP_NS - 200)
#define NTSC_HBLK_DURATION_MAX_NS (NTSC_HBLK_DURATION_TYP_NS + 200)
#define NTSC_HACT_DURATION_TYP_NS (NTSC_LINE_DURATION_NS - NTSC_HBLK_DURATION_TYP_NS)
#define NTSC_HACT_DURATION_MIN_NS (NTSC_LINE_DURATION_NS - NTSC_HBLK_DURATION_MAX_NS)
#define NTSC_HACT_DURATION_MAX_NS (NTSC_LINE_DURATION_NS - NTSC_HBLK_DURATION_MIN_NS)
#define NTSC_HFP_DURATION_TYP_NS 1500
#define NTSC_HFP_DURATION_MIN_NS 1270
#define NTSC_HFP_DURATION_MAX_NS 2220
#define NTSC_HSLEN_DURATION_TYP_NS 4700
#define NTSC_HSLEN_DURATION_MIN_NS (NTSC_HSLEN_DURATION_TYP_NS - 100)
#define NTSC_HSLEN_DURATION_MAX_NS (NTSC_HSLEN_DURATION_TYP_NS + 100)
#define NTSC_HBP_DURATION_TYP_NS 4700
/*
* I couldn't find the actual tolerance for the back porch, so let's
* just reuse the sync length ones.
*/
#define NTSC_HBP_DURATION_MIN_NS (NTSC_HBP_DURATION_TYP_NS - 100)
#define NTSC_HBP_DURATION_MAX_NS (NTSC_HBP_DURATION_TYP_NS + 100)
#define PAL_LINE_DURATION_NS 64000U
#define PAL_LINES_NUMBER 625
#define PAL_HACT_DURATION_TYP_NS 51950U
#define PAL_HACT_DURATION_MIN_NS (PAL_HACT_DURATION_TYP_NS - 100)
#define PAL_HACT_DURATION_MAX_NS (PAL_HACT_DURATION_TYP_NS + 400)
#define PAL_HBLK_DURATION_TYP_NS (PAL_LINE_DURATION_NS - PAL_HACT_DURATION_TYP_NS)
#define PAL_HBLK_DURATION_MIN_NS (PAL_LINE_DURATION_NS - PAL_HACT_DURATION_MAX_NS)
#define PAL_HBLK_DURATION_MAX_NS (PAL_LINE_DURATION_NS - PAL_HACT_DURATION_MIN_NS)
#define PAL_HFP_DURATION_TYP_NS 1650
#define PAL_HFP_DURATION_MIN_NS (PAL_HFP_DURATION_TYP_NS - 100)
#define PAL_HFP_DURATION_MAX_NS (PAL_HFP_DURATION_TYP_NS + 400)
#define PAL_HSLEN_DURATION_TYP_NS 4700
#define PAL_HSLEN_DURATION_MIN_NS (PAL_HSLEN_DURATION_TYP_NS - 200)
#define PAL_HSLEN_DURATION_MAX_NS (PAL_HSLEN_DURATION_TYP_NS + 200)
#define PAL_HBP_DURATION_TYP_NS 5700
#define PAL_HBP_DURATION_MIN_NS (PAL_HBP_DURATION_TYP_NS - 200)
#define PAL_HBP_DURATION_MAX_NS (PAL_HBP_DURATION_TYP_NS + 200)
struct analog_param_field {
unsigned int even, odd;
};
#define PARAM_FIELD(_odd, _even) \
{ .even = _even, .odd = _odd }
struct analog_param_range {
unsigned int min, typ, max;
};
#define PARAM_RANGE(_min, _typ, _max) \
{ .min = _min, .typ = _typ, .max = _max }
struct analog_parameters {
unsigned int num_lines;
unsigned int line_duration_ns;
struct analog_param_range hact_ns;
struct analog_param_range hfp_ns;
struct analog_param_range hslen_ns;
struct analog_param_range hbp_ns;
struct analog_param_range hblk_ns;
unsigned int bt601_hfp;
struct analog_param_field vfp_lines;
struct analog_param_field vslen_lines;
struct analog_param_field vbp_lines;
};
#define TV_MODE_PARAMETER(_mode, _lines, _line_dur, _hact, _hfp, \
_hslen, _hbp, _hblk, _bt601_hfp, _vfp, \
_vslen, _vbp) \
[_mode] = { \
.num_lines = _lines, \
.line_duration_ns = _line_dur, \
.hact_ns = _hact, \
.hfp_ns = _hfp, \
.hslen_ns = _hslen, \
.hbp_ns = _hbp, \
.hblk_ns = _hblk, \
.bt601_hfp = _bt601_hfp, \
.vfp_lines = _vfp, \
.vslen_lines = _vslen, \
.vbp_lines = _vbp, \
}
static const struct analog_parameters tv_modes_parameters[] = {
TV_MODE_PARAMETER(DRM_MODE_ANALOG_NTSC,
NTSC_LINES_NUMBER,
NTSC_LINE_DURATION_NS,
PARAM_RANGE(NTSC_HACT_DURATION_MIN_NS,
NTSC_HACT_DURATION_TYP_NS,
NTSC_HACT_DURATION_MAX_NS),
PARAM_RANGE(NTSC_HFP_DURATION_MIN_NS,
NTSC_HFP_DURATION_TYP_NS,
NTSC_HFP_DURATION_MAX_NS),
PARAM_RANGE(NTSC_HSLEN_DURATION_MIN_NS,
NTSC_HSLEN_DURATION_TYP_NS,
NTSC_HSLEN_DURATION_MAX_NS),
PARAM_RANGE(NTSC_HBP_DURATION_MIN_NS,
NTSC_HBP_DURATION_TYP_NS,
NTSC_HBP_DURATION_MAX_NS),
PARAM_RANGE(NTSC_HBLK_DURATION_MIN_NS,
NTSC_HBLK_DURATION_TYP_NS,
NTSC_HBLK_DURATION_MAX_NS),
16,
PARAM_FIELD(3, 3),
PARAM_FIELD(3, 3),
PARAM_FIELD(16, 17)),
TV_MODE_PARAMETER(DRM_MODE_ANALOG_PAL,
PAL_LINES_NUMBER,
PAL_LINE_DURATION_NS,
PARAM_RANGE(PAL_HACT_DURATION_MIN_NS,
PAL_HACT_DURATION_TYP_NS,
PAL_HACT_DURATION_MAX_NS),
PARAM_RANGE(PAL_HFP_DURATION_MIN_NS,
PAL_HFP_DURATION_TYP_NS,
PAL_HFP_DURATION_MAX_NS),
PARAM_RANGE(PAL_HSLEN_DURATION_MIN_NS,
PAL_HSLEN_DURATION_TYP_NS,
PAL_HSLEN_DURATION_MAX_NS),
PARAM_RANGE(PAL_HBP_DURATION_MIN_NS,
PAL_HBP_DURATION_TYP_NS,
PAL_HBP_DURATION_MAX_NS),
PARAM_RANGE(PAL_HBLK_DURATION_MIN_NS,
PAL_HBLK_DURATION_TYP_NS,
PAL_HBLK_DURATION_MAX_NS),
12,
/*
* The front porch is actually 6 short sync
* pulses for the even field, and 5 for the
* odd field. Each sync takes half a life so
* the odd field front porch is shorter by
* half a line.
*
* In progressive, we're supposed to use 6
* pulses, so we're fine there
*/
PARAM_FIELD(3, 2),
/*
* The vsync length is 5 long sync pulses,
* each field taking half a line. We're
* shorter for both fields by half a line.
*
* In progressive, we're supposed to use 5
* pulses, so we're off by half
* a line.
*
* In interlace, we're now off by half a line
* for the even field and one line for the odd
* field.
*/
PARAM_FIELD(3, 3),
/*
* The back porch starts with post-equalizing
* pulses, consisting in 5 short sync pulses
* for the even field, 4 for the odd field. In
* progressive, it's 5 short syncs.
*
* In progressive, we thus have 2.5 lines,
* plus the 0.5 line we were missing
* previously, so we should use 3 lines.
*
* In interlace, the even field is in the
* exact same case than progressive. For the
* odd field, we should be using 2 lines but
* we're one line short, so we'll make up for
* it here by using 3.
*
* The entire blanking area is supposed to
* take 25 lines, so we also need to account
* for the rest of the blanking area that
* can't be in either the front porch or sync
* period.
*/
PARAM_FIELD(19, 20)),
};
static int fill_analog_mode(struct drm_device *dev,
struct drm_display_mode *mode,
const struct analog_parameters *params,
unsigned long pixel_clock_hz,
unsigned int hactive,
unsigned int vactive,
bool interlace)
{
unsigned long pixel_duration_ns = NSEC_PER_SEC / pixel_clock_hz;
unsigned int htotal, vtotal;
unsigned int max_hact, hact_duration_ns;
unsigned int hblk, hblk_duration_ns;
unsigned int hfp, hfp_duration_ns;
unsigned int hslen, hslen_duration_ns;
unsigned int hbp, hbp_duration_ns;
unsigned int porches, porches_duration_ns;
unsigned int vfp, vfp_min;
unsigned int vbp, vbp_min;
unsigned int vslen;
bool bt601 = false;
int porches_rem;
u64 result;
drm_dbg_kms(dev,
"Generating a %ux%u%c, %u-line mode with a %lu kHz clock\n",
hactive, vactive,
interlace ? 'i' : 'p',
params->num_lines,
pixel_clock_hz / 1000);
max_hact = params->hact_ns.max / pixel_duration_ns;
if (pixel_clock_hz == 13500000 && hactive > max_hact && hactive <= 720) {
drm_dbg_kms(dev, "Trying to generate a BT.601 mode. Disabling checks.\n");
bt601 = true;
}
/*
* Our pixel duration is going to be round down by the division,
* so rounding up is probably going to introduce even more
* deviation.
*/
result = (u64)params->line_duration_ns * pixel_clock_hz;
do_div(result, NSEC_PER_SEC);
htotal = result;
drm_dbg_kms(dev, "Total Horizontal Number of Pixels: %u\n", htotal);
hact_duration_ns = hactive * pixel_duration_ns;
if (!bt601 &&
(hact_duration_ns < params->hact_ns.min ||
hact_duration_ns > params->hact_ns.max)) {
DRM_ERROR("Invalid horizontal active area duration: %uns (min: %u, max %u)\n",
hact_duration_ns, params->hact_ns.min, params->hact_ns.max);
return -EINVAL;
}
hblk = htotal - hactive;
drm_dbg_kms(dev, "Horizontal Blanking Period: %u\n", hblk);
hblk_duration_ns = hblk * pixel_duration_ns;
if (!bt601 &&
(hblk_duration_ns < params->hblk_ns.min ||
hblk_duration_ns > params->hblk_ns.max)) {
DRM_ERROR("Invalid horizontal blanking duration: %uns (min: %u, max %u)\n",
hblk_duration_ns, params->hblk_ns.min, params->hblk_ns.max);
return -EINVAL;
}
hslen = DIV_ROUND_UP(params->hslen_ns.typ, pixel_duration_ns);
drm_dbg_kms(dev, "Horizontal Sync Period: %u\n", hslen);
hslen_duration_ns = hslen * pixel_duration_ns;
if (!bt601 &&
(hslen_duration_ns < params->hslen_ns.min ||
hslen_duration_ns > params->hslen_ns.max)) {
DRM_ERROR("Invalid horizontal sync duration: %uns (min: %u, max %u)\n",
hslen_duration_ns, params->hslen_ns.min, params->hslen_ns.max);
return -EINVAL;
}
porches = hblk - hslen;
drm_dbg_kms(dev, "Remaining horizontal pixels for both porches: %u\n", porches);
porches_duration_ns = porches * pixel_duration_ns;
if (!bt601 &&
(porches_duration_ns > (params->hfp_ns.max + params->hbp_ns.max) ||
porches_duration_ns < (params->hfp_ns.min + params->hbp_ns.min))) {
DRM_ERROR("Invalid horizontal porches duration: %uns\n", porches_duration_ns);
return -EINVAL;
}
if (bt601) {
hfp = params->bt601_hfp;
} else {
unsigned int hfp_min = DIV_ROUND_UP(params->hfp_ns.min,
pixel_duration_ns);
unsigned int hbp_min = DIV_ROUND_UP(params->hbp_ns.min,
pixel_duration_ns);
int porches_rem = porches - hfp_min - hbp_min;
hfp = hfp_min + DIV_ROUND_UP(porches_rem, 2);
}
drm_dbg_kms(dev, "Horizontal Front Porch: %u\n", hfp);
hfp_duration_ns = hfp * pixel_duration_ns;
if (!bt601 &&
(hfp_duration_ns < params->hfp_ns.min ||
hfp_duration_ns > params->hfp_ns.max)) {
DRM_ERROR("Invalid horizontal front porch duration: %uns (min: %u, max %u)\n",
hfp_duration_ns, params->hfp_ns.min, params->hfp_ns.max);
return -EINVAL;
}
hbp = porches - hfp;
drm_dbg_kms(dev, "Horizontal Back Porch: %u\n", hbp);
hbp_duration_ns = hbp * pixel_duration_ns;
if (!bt601 &&
(hbp_duration_ns < params->hbp_ns.min ||
hbp_duration_ns > params->hbp_ns.max)) {
DRM_ERROR("Invalid horizontal back porch duration: %uns (min: %u, max %u)\n",
hbp_duration_ns, params->hbp_ns.min, params->hbp_ns.max);
return -EINVAL;
}
if (htotal != (hactive + hfp + hslen + hbp))
return -EINVAL;
mode->clock = pixel_clock_hz / 1000;
mode->hdisplay = hactive;
mode->hsync_start = mode->hdisplay + hfp;
mode->hsync_end = mode->hsync_start + hslen;
mode->htotal = mode->hsync_end + hbp;
if (interlace) {
vfp_min = params->vfp_lines.even + params->vfp_lines.odd;
vbp_min = params->vbp_lines.even + params->vbp_lines.odd;
vslen = params->vslen_lines.even + params->vslen_lines.odd;
} else {
/*
* By convention, NTSC (aka 525/60) systems start with
* the even field, but PAL (aka 625/50) systems start
* with the odd one.
*
* PAL systems also have asymmetric timings between the
* even and odd field, while NTSC is symmetric.
*
* Moreover, if we want to create a progressive mode for
* PAL, we need to use the odd field timings.
*
* Since odd == even for NTSC, we can just use the odd
* one all the time to simplify the code a bit.
*/
vfp_min = params->vfp_lines.odd;
vbp_min = params->vbp_lines.odd;
vslen = params->vslen_lines.odd;
}
drm_dbg_kms(dev, "Vertical Sync Period: %u\n", vslen);
porches = params->num_lines - vactive - vslen;
drm_dbg_kms(dev, "Remaining vertical pixels for both porches: %u\n", porches);
porches_rem = porches - vfp_min - vbp_min;
vfp = vfp_min + (porches_rem / 2);
drm_dbg_kms(dev, "Vertical Front Porch: %u\n", vfp);
vbp = porches - vfp;
drm_dbg_kms(dev, "Vertical Back Porch: %u\n", vbp);
vtotal = vactive + vfp + vslen + vbp;
if (params->num_lines != vtotal) {
DRM_ERROR("Invalid vertical total: %upx (expected %upx)\n",
vtotal, params->num_lines);
return -EINVAL;
}
mode->vdisplay = vactive;
mode->vsync_start = mode->vdisplay + vfp;
mode->vsync_end = mode->vsync_start + vslen;
mode->vtotal = mode->vsync_end + vbp;
if (mode->vtotal != params->num_lines)
return -EINVAL;
mode->type = DRM_MODE_TYPE_DRIVER;
mode->flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC;
if (interlace)
mode->flags |= DRM_MODE_FLAG_INTERLACE;
drm_mode_set_name(mode);
drm_dbg_kms(dev, "Generated mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
return 0;
}
/**
* drm_analog_tv_mode - create a display mode for an analog TV
* @dev: drm device
* @tv_mode: TV Mode standard to create a mode for. See DRM_MODE_TV_MODE_*.
* @pixel_clock_hz: Pixel Clock Frequency, in Hertz
* @hdisplay: hdisplay size
* @vdisplay: vdisplay size
* @interlace: whether to compute an interlaced mode
*
* This function creates a struct drm_display_mode instance suited for
* an analog TV output, for one of the usual analog TV mode.
*
* Note that @hdisplay is larger than the usual constraints for the PAL
* and NTSC timings, and we'll choose to ignore most timings constraints
* to reach those resolutions.
*
* Returns:
*
* A pointer to the mode, allocated with drm_mode_create(). Returns NULL
* on error.
*/
struct drm_display_mode *drm_analog_tv_mode(struct drm_device *dev,
enum drm_connector_tv_mode tv_mode,
unsigned long pixel_clock_hz,
unsigned int hdisplay,
unsigned int vdisplay,
bool interlace)
{
struct drm_display_mode *mode;
enum drm_mode_analog analog;
int ret;
switch (tv_mode) {
case DRM_MODE_TV_MODE_NTSC:
fallthrough;
case DRM_MODE_TV_MODE_NTSC_443:
fallthrough;
case DRM_MODE_TV_MODE_NTSC_J:
fallthrough;
case DRM_MODE_TV_MODE_PAL_M:
analog = DRM_MODE_ANALOG_NTSC;
break;
case DRM_MODE_TV_MODE_PAL:
fallthrough;
case DRM_MODE_TV_MODE_PAL_N:
fallthrough;
case DRM_MODE_TV_MODE_SECAM:
analog = DRM_MODE_ANALOG_PAL;
break;
default:
return NULL;
}
mode = drm_mode_create(dev);
if (!mode)
return NULL;
ret = fill_analog_mode(dev, mode,
&tv_modes_parameters[analog],
pixel_clock_hz, hdisplay, vdisplay, interlace);
if (ret)
goto err_free_mode;
return mode;
err_free_mode:
drm_mode_destroy(dev, mode);
return NULL;
}
EXPORT_SYMBOL(drm_analog_tv_mode);
/**
* drm_cvt_mode -create a modeline based on the CVT algorithm
* @dev: drm device
* @hdisplay: hdisplay size
* @vdisplay: vdisplay size
* @vrefresh: vrefresh rate
* @reduced: whether to use reduced blanking
* @interlaced: whether to compute an interlaced mode
* @margins: whether to add margins (borders)
*
* This function is called to generate the modeline based on CVT algorithm
* according to the hdisplay, vdisplay, vrefresh.
* It is based from the VESA(TM) Coordinated Video Timing Generator by
* Graham Loveridge April 9, 2003 available at
* http://www.elo.utfsm.cl/~elo212/docs/CVTd6r1.xls
*
* And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
* What I have done is to translate it by using integer calculation.
*
* Returns:
* The modeline based on the CVT algorithm stored in a drm_display_mode object.
* The display mode object is allocated with drm_mode_create(). Returns NULL
* when no mode could be allocated.
*/
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
int vdisplay, int vrefresh,
bool reduced, bool interlaced, bool margins)
{
#define HV_FACTOR 1000
/* 1) top/bottom margin size (% of height) - default: 1.8, */
#define CVT_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
#define CVT_H_GRANULARITY 8
/* 3) Minimum vertical porch (lines) - default 3 */
#define CVT_MIN_V_PORCH 3
/* 4) Minimum number of vertical back porch lines - default 6 */
#define CVT_MIN_V_BPORCH 6
/* Pixel Clock step (kHz) */
#define CVT_CLOCK_STEP 250
struct drm_display_mode *drm_mode;
unsigned int vfieldrate, hperiod;
int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
int interlace;
u64 tmp;
if (!hdisplay || !vdisplay)
return NULL;
/* allocate the drm_display_mode structure. If failure, we will
* return directly
*/
drm_mode = drm_mode_create(dev);
if (!drm_mode)
return NULL;
/* the CVT default refresh rate is 60Hz */
if (!vrefresh)
vrefresh = 60;
/* the required field fresh rate */
if (interlaced)
vfieldrate = vrefresh * 2;
else
vfieldrate = vrefresh;
/* horizontal pixels */
hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
/* determine the left&right borders */
hmargin = 0;
if (margins) {
hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
hmargin -= hmargin % CVT_H_GRANULARITY;
}
/* find the total active pixels */
drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
/* find the number of lines per field */
if (interlaced)
vdisplay_rnd = vdisplay / 2;
else
vdisplay_rnd = vdisplay;
/* find the top & bottom borders */
vmargin = 0;
if (margins)
vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
drm_mode->vdisplay = vdisplay + 2 * vmargin;
/* Interlaced */
if (interlaced)
interlace = 1;
else
interlace = 0;
/* Determine VSync Width from aspect ratio */
if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
vsync = 4;
else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
vsync = 5;
else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
vsync = 6;
else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
vsync = 7;
else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
vsync = 7;
else /* custom */
vsync = 10;
if (!reduced) {
/* simplify the GTF calculation */
/* 4) Minimum time of vertical sync + back porch interval (µs)
* default 550.0
*/
int tmp1, tmp2;
#define CVT_MIN_VSYNC_BP 550
/* 3) Nominal HSync width (% of line period) - default 8 */
#define CVT_HSYNC_PERCENTAGE 8
unsigned int hblank_percentage;
int vsyncandback_porch, __maybe_unused vback_porch, hblank;
/* estimated the horizontal period */
tmp1 = HV_FACTOR * 1000000 -
CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
interlace;
hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
/* 9. Find number of lines in sync + backporch */
if (tmp1 < (vsync + CVT_MIN_V_PORCH))
vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
else
vsyncandback_porch = tmp1;
/* 10. Find number of lines in back porch */
vback_porch = vsyncandback_porch - vsync;
drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
vsyncandback_porch + CVT_MIN_V_PORCH;
/* 5) Definition of Horizontal blanking time limitation */
/* Gradient (%/kHz) - default 600 */
#define CVT_M_FACTOR 600
/* Offset (%) - default 40 */
#define CVT_C_FACTOR 40
/* Blanking time scaling factor - default 128 */
#define CVT_K_FACTOR 128
/* Scaling factor weighting - default 20 */
#define CVT_J_FACTOR 20
#define CVT_M_PRIME (CVT_M_FACTOR * CVT_K_FACTOR / 256)
#define CVT_C_PRIME ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
CVT_J_FACTOR)
/* 12. Find ideal blanking duty cycle from formula */
hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
hperiod / 1000;
/* 13. Blanking time */
if (hblank_percentage < 20 * HV_FACTOR)
hblank_percentage = 20 * HV_FACTOR;
hblank = drm_mode->hdisplay * hblank_percentage /
(100 * HV_FACTOR - hblank_percentage);
hblank -= hblank % (2 * CVT_H_GRANULARITY);
/* 14. find the total pixels per line */
drm_mode->htotal = drm_mode->hdisplay + hblank;
drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
drm_mode->hsync_start = drm_mode->hsync_end -
(drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
drm_mode->hsync_start += CVT_H_GRANULARITY -
drm_mode->hsync_start % CVT_H_GRANULARITY;
/* fill the Vsync values */
drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
drm_mode->vsync_end = drm_mode->vsync_start + vsync;
} else {
/* Reduced blanking */
/* Minimum vertical blanking interval time (µs)- default 460 */
#define CVT_RB_MIN_VBLANK 460
/* Fixed number of clocks for horizontal sync */
#define CVT_RB_H_SYNC 32
/* Fixed number of clocks for horizontal blanking */
#define CVT_RB_H_BLANK 160
/* Fixed number of lines for vertical front porch - default 3*/
#define CVT_RB_VFPORCH 3
int vbilines;
int tmp1, tmp2;
/* 8. Estimate Horizontal period. */
tmp1 = HV_FACTOR * 1000000 -
CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
tmp2 = vdisplay_rnd + 2 * vmargin;
hperiod = tmp1 / (tmp2 * vfieldrate);
/* 9. Find number of lines in vertical blanking */
vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
/* 10. Check if vertical blanking is sufficient */
if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
/* 11. Find total number of lines in vertical field */
drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
/* 12. Find total number of pixels in a line */
drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
/* Fill in HSync values */
drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
/* Fill in VSync values */
drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
drm_mode->vsync_end = drm_mode->vsync_start + vsync;
}
/* 15/13. Find pixel clock frequency (kHz for xf86) */
tmp = drm_mode->htotal; /* perform intermediate calcs in u64 */
tmp *= HV_FACTOR * 1000;
do_div(tmp, hperiod);
tmp -= drm_mode->clock % CVT_CLOCK_STEP;
drm_mode->clock = tmp;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
if (interlaced) {
drm_mode->vtotal *= 2;
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
/* Fill the mode line name */
drm_mode_set_name(drm_mode);
if (reduced)
drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
DRM_MODE_FLAG_NVSYNC);
else
drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NHSYNC);
return drm_mode;
}
EXPORT_SYMBOL(drm_cvt_mode);
/**
* drm_gtf_mode_complex - create the modeline based on the full GTF algorithm
* @dev: drm device
* @hdisplay: hdisplay size
* @vdisplay: vdisplay size
* @vrefresh: vrefresh rate.
* @interlaced: whether to compute an interlaced mode
* @margins: desired margin (borders) size
* @GTF_M: extended GTF formula parameters
* @GTF_2C: extended GTF formula parameters
* @GTF_K: extended GTF formula parameters
* @GTF_2J: extended GTF formula parameters
*
* GTF feature blocks specify C and J in multiples of 0.5, so we pass them
* in here multiplied by two. For a C of 40, pass in 80.
*
* Returns:
* The modeline based on the full GTF algorithm stored in a drm_display_mode object.
* The display mode object is allocated with drm_mode_create(). Returns NULL
* when no mode could be allocated.
*/
struct drm_display_mode *
drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
int vrefresh, bool interlaced, int margins,
int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
#define GTF_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
#define GTF_CELL_GRAN 8
/* 3) Minimum vertical porch (lines) - default 3 */
#define GTF_MIN_V_PORCH 1
/* width of vsync in lines */
#define V_SYNC_RQD 3
/* width of hsync as % of total line */
#define H_SYNC_PERCENT 8
/* min time of vsync + back porch (microsec) */
#define MIN_VSYNC_PLUS_BP 550
/* C' and M' are part of the Blanking Duty Cycle computation */
#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
#define GTF_M_PRIME (GTF_K * GTF_M / 256)
struct drm_display_mode *drm_mode;
unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
int top_margin, bottom_margin;
int interlace;
unsigned int hfreq_est;
int vsync_plus_bp, __maybe_unused vback_porch;
unsigned int vtotal_lines, __maybe_unused vfieldrate_est;
unsigned int __maybe_unused hperiod;
unsigned int vfield_rate, __maybe_unused vframe_rate;
int left_margin, right_margin;
unsigned int total_active_pixels, ideal_duty_cycle;
unsigned int hblank, total_pixels, pixel_freq;
int hsync, hfront_porch, vodd_front_porch_lines;
unsigned int tmp1, tmp2;
if (!hdisplay || !vdisplay)
return NULL;
drm_mode = drm_mode_create(dev);
if (!drm_mode)
return NULL;
/* 1. In order to give correct results, the number of horizontal
* pixels requested is first processed to ensure that it is divisible
* by the character size, by rounding it to the nearest character
* cell boundary:
*/
hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
/* 2. If interlace is requested, the number of vertical lines assumed
* by the calculation must be halved, as the computation calculates
* the number of vertical lines per field.
*/
if (interlaced)
vdisplay_rnd = vdisplay / 2;
else
vdisplay_rnd = vdisplay;
/* 3. Find the frame rate required: */
if (interlaced)
vfieldrate_rqd = vrefresh * 2;
else
vfieldrate_rqd = vrefresh;
/* 4. Find number of lines in Top margin: */
top_margin = 0;
if (margins)
top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
1000;
/* 5. Find number of lines in bottom margin: */
bottom_margin = top_margin;
/* 6. If interlace is required, then set variable interlace: */
if (interlaced)
interlace = 1;
else
interlace = 0;
/* 7. Estimate the Horizontal frequency */
{
tmp1 = (1000000 - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
2 + interlace;
hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
}
/* 8. Find the number of lines in V sync + back porch */
/* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
/* 9. Find the number of lines in V back porch alone: */
vback_porch = vsync_plus_bp - V_SYNC_RQD;
/* 10. Find the total number of lines in Vertical field period: */
vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
vsync_plus_bp + GTF_MIN_V_PORCH;
/* 11. Estimate the Vertical field frequency: */
vfieldrate_est = hfreq_est / vtotal_lines;
/* 12. Find the actual horizontal period: */
hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
/* 13. Find the actual Vertical field frequency: */
vfield_rate = hfreq_est / vtotal_lines;
/* 14. Find the Vertical frame frequency: */
if (interlaced)
vframe_rate = vfield_rate / 2;
else
vframe_rate = vfield_rate;
/* 15. Find number of pixels in left margin: */
if (margins)
left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
1000;
else
left_margin = 0;
/* 16.Find number of pixels in right margin: */
right_margin = left_margin;
/* 17.Find total number of active pixels in image and left and right */
total_active_pixels = hdisplay_rnd + left_margin + right_margin;
/* 18.Find the ideal blanking duty cycle from blanking duty cycle */
ideal_duty_cycle = GTF_C_PRIME * 1000 -
(GTF_M_PRIME * 1000000 / hfreq_est);
/* 19.Find the number of pixels in the blanking time to the nearest
* double character cell: */
hblank = total_active_pixels * ideal_duty_cycle /
(100000 - ideal_duty_cycle);
hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
hblank = hblank * 2 * GTF_CELL_GRAN;
/* 20.Find total number of pixels: */
total_pixels = total_active_pixels + hblank;
/* 21.Find pixel clock frequency: */
pixel_freq = total_pixels * hfreq_est / 1000;
/* Stage 1 computations are now complete; I should really pass
* the results to another function and do the Stage 2 computations,
* but I only need a few more values so I'll just append the
* computations here for now */
/* 17. Find the number of pixels in the horizontal sync period: */
hsync = H_SYNC_PERCENT * total_pixels / 100;
hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
hsync = hsync * GTF_CELL_GRAN;
/* 18. Find the number of pixels in horizontal front porch period */
hfront_porch = hblank / 2 - hsync;
/* 36. Find the number of lines in the odd front porch period: */
vodd_front_porch_lines = GTF_MIN_V_PORCH ;
/* finally, pack the results in the mode struct */
drm_mode->hdisplay = hdisplay_rnd;
drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
drm_mode->hsync_end = drm_mode->hsync_start + hsync;
drm_mode->htotal = total_pixels;
drm_mode->vdisplay = vdisplay_rnd;
drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
drm_mode->vtotal = vtotal_lines;
drm_mode->clock = pixel_freq;
if (interlaced) {
drm_mode->vtotal *= 2;
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
drm_mode_set_name(drm_mode);
if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
else
drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
return drm_mode;
}
EXPORT_SYMBOL(drm_gtf_mode_complex);
/**
* drm_gtf_mode - create the modeline based on the GTF algorithm
* @dev: drm device
* @hdisplay: hdisplay size
* @vdisplay: vdisplay size
* @vrefresh: vrefresh rate.
* @interlaced: whether to compute an interlaced mode
* @margins: desired margin (borders) size
*
* return the modeline based on GTF algorithm
*
* This function is to create the modeline based on the GTF algorithm.
* Generalized Timing Formula is derived from:
*
* GTF Spreadsheet by Andy Morrish (1/5/97)
* available at https://www.vesa.org
*
* And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
* What I have done is to translate it by using integer calculation.
* I also refer to the function of fb_get_mode in the file of
* drivers/video/fbmon.c
*
* Standard GTF parameters::
*
* M = 600
* C = 40
* K = 128
* J = 20
*
* Returns:
* The modeline based on the GTF algorithm stored in a drm_display_mode object.
* The display mode object is allocated with drm_mode_create(). Returns NULL
* when no mode could be allocated.
*/
struct drm_display_mode *
drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
bool interlaced, int margins)
{
return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh,
interlaced, margins,
600, 40 * 2, 128, 20 * 2);
}
EXPORT_SYMBOL(drm_gtf_mode);
#ifdef CONFIG_VIDEOMODE_HELPERS
/**
* drm_display_mode_from_videomode - fill in @dmode using @vm,
* @vm: videomode structure to use as source
* @dmode: drm_display_mode structure to use as destination
*
* Fills out @dmode using the display mode specified in @vm.
*/
void drm_display_mode_from_videomode(const struct videomode *vm,
struct drm_display_mode *dmode)
{
dmode->hdisplay = vm->hactive;
dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
dmode->hsync_end = dmode->hsync_start + vm->hsync_len;
dmode->htotal = dmode->hsync_end + vm->hback_porch;
dmode->vdisplay = vm->vactive;
dmode->vsync_start = dmode->vdisplay + vm->vfront_porch;
dmode->vsync_end = dmode->vsync_start + vm->vsync_len;
dmode->vtotal = dmode->vsync_end + vm->vback_porch;
dmode->clock = vm->pixelclock / 1000;
dmode->flags = 0;
if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
dmode->flags |= DRM_MODE_FLAG_PHSYNC;
else if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
dmode->flags |= DRM_MODE_FLAG_NHSYNC;
if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
dmode->flags |= DRM_MODE_FLAG_PVSYNC;
else if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
dmode->flags |= DRM_MODE_FLAG_NVSYNC;
if (vm->flags & DISPLAY_FLAGS_INTERLACED)
dmode->flags |= DRM_MODE_FLAG_INTERLACE;
if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN)
dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
dmode->flags |= DRM_MODE_FLAG_DBLCLK;
drm_mode_set_name(dmode);
}
EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
/**
* drm_display_mode_to_videomode - fill in @vm using @dmode,
* @dmode: drm_display_mode structure to use as source
* @vm: videomode structure to use as destination
*
* Fills out @vm using the display mode specified in @dmode.
*/
void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
struct videomode *vm)
{
vm->hactive = dmode->hdisplay;
vm->hfront_porch = dmode->hsync_start - dmode->hdisplay;
vm->hsync_len = dmode->hsync_end - dmode->hsync_start;
vm->hback_porch = dmode->htotal - dmode->hsync_end;
vm->vactive = dmode->vdisplay;
vm->vfront_porch = dmode->vsync_start - dmode->vdisplay;
vm->vsync_len = dmode->vsync_end - dmode->vsync_start;
vm->vback_porch = dmode->vtotal - dmode->vsync_end;
vm->pixelclock = dmode->clock * 1000;
vm->flags = 0;
if (dmode->flags & DRM_MODE_FLAG_PHSYNC)
vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH;
else if (dmode->flags & DRM_MODE_FLAG_NHSYNC)
vm->flags |= DISPLAY_FLAGS_HSYNC_LOW;
if (dmode->flags & DRM_MODE_FLAG_PVSYNC)
vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH;
else if (dmode->flags & DRM_MODE_FLAG_NVSYNC)
vm->flags |= DISPLAY_FLAGS_VSYNC_LOW;
if (dmode->flags & DRM_MODE_FLAG_INTERLACE)
vm->flags |= DISPLAY_FLAGS_INTERLACED;
if (dmode->flags & DRM_MODE_FLAG_DBLSCAN)
vm->flags |= DISPLAY_FLAGS_DOUBLESCAN;
if (dmode->flags & DRM_MODE_FLAG_DBLCLK)
vm->flags |= DISPLAY_FLAGS_DOUBLECLK;
}
EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
/**
* drm_bus_flags_from_videomode - extract information about pixelclk and
* DE polarity from videomode and store it in a separate variable
* @vm: videomode structure to use
* @bus_flags: information about pixelclk, sync and DE polarity will be stored
* here
*
* Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_DRIVE_(POS|NEG)EDGE
* and DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS
* found in @vm
*/
void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
{
*bus_flags = 0;
if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
*bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
*bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
*bus_flags |= DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
if (vm->flags & DISPLAY_FLAGS_SYNC_NEGEDGE)
*bus_flags |= DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
if (vm->flags & DISPLAY_FLAGS_DE_LOW)
*bus_flags |= DRM_BUS_FLAG_DE_LOW;
if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
*bus_flags |= DRM_BUS_FLAG_DE_HIGH;
}
EXPORT_SYMBOL_GPL(drm_bus_flags_from_videomode);
#ifdef CONFIG_OF
/**
* of_get_drm_display_mode - get a drm_display_mode from devicetree
* @np: device_node with the timing specification
* @dmode: will be set to the return value
* @bus_flags: information about pixelclk, sync and DE polarity
* @index: index into the list of display timings in devicetree
*
* This function is expensive and should only be used, if only one mode is to be
* read from DT. To get multiple modes start with of_get_display_timings and
* work with that instead.
*
* Returns:
* 0 on success, a negative errno code when no of videomode node was found.
*/
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode, u32 *bus_flags,
int index)
{
struct videomode vm;
int ret;
ret = of_get_videomode(np, &vm, index);
if (ret)
return ret;
drm_display_mode_from_videomode(&vm, dmode);
if (bus_flags)
drm_bus_flags_from_videomode(&vm, bus_flags);
pr_debug("%pOF: got %dx%d display mode\n",
np, vm.hactive, vm.vactive);
drm_mode_debug_printmodeline(dmode);
return 0;
}
EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
/**
* of_get_drm_panel_display_mode - get a panel-timing drm_display_mode from devicetree
* @np: device_node with the panel-timing specification
* @dmode: will be set to the return value
* @bus_flags: information about pixelclk, sync and DE polarity
*
* The mandatory Device Tree properties width-mm and height-mm
* are read and set on the display mode.
*
* Returns:
* Zero on success, negative error code on failure.
*/
int of_get_drm_panel_display_mode(struct device_node *np,
struct drm_display_mode *dmode, u32 *bus_flags)
{
u32 width_mm = 0, height_mm = 0;
struct display_timing timing;
struct videomode vm;
int ret;
ret = of_get_display_timing(np, "panel-timing", &timing);
if (ret)
return ret;
videomode_from_timing(&timing, &vm);
memset(dmode, 0, sizeof(*dmode));
drm_display_mode_from_videomode(&vm, dmode);
if (bus_flags)
drm_bus_flags_from_videomode(&vm, bus_flags);
ret = of_property_read_u32(np, "width-mm", &width_mm);
if (ret)
return ret;
ret = of_property_read_u32(np, "height-mm", &height_mm);
if (ret)
return ret;
dmode->width_mm = width_mm;
dmode->height_mm = height_mm;
drm_mode_debug_printmodeline(dmode);
return 0;
}
EXPORT_SYMBOL_GPL(of_get_drm_panel_display_mode);
#endif /* CONFIG_OF */
#endif /* CONFIG_VIDEOMODE_HELPERS */
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
*
* Set the name of @mode to a standard format which is <hdisplay>x<vdisplay>
* with an optional 'i' suffix for interlaced modes.
*/
void drm_mode_set_name(struct drm_display_mode *mode)
{
bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
mode->hdisplay, mode->vdisplay,
interlaced ? "i" : "");
}
EXPORT_SYMBOL(drm_mode_set_name);
/**
* drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode
*
* Returns:
* @modes's vrefresh rate in Hz, rounded to the nearest integer. Calculates the
* value first if it is not yet set.
*/
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
unsigned int num, den;
if (mode->htotal == 0 || mode->vtotal == 0)
return 0;
num = mode->clock;
den = mode->htotal * mode->vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
num *= 2;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
den *= 2;
if (mode->vscan > 1)
den *= mode->vscan;
return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den);
}
EXPORT_SYMBOL(drm_mode_vrefresh);
/**
* drm_mode_get_hv_timing - Fetches hdisplay/vdisplay for given mode
* @mode: mode to query
* @hdisplay: hdisplay value to fill in
* @vdisplay: vdisplay value to fill in
*
* The vdisplay value will be doubled if the specified mode is a stereo mode of
* the appropriate layout.
*/
void drm_mode_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay)
{
struct drm_display_mode adjusted;
drm_mode_init(&adjusted, mode);
drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
*hdisplay = adjusted.crtc_hdisplay;
*vdisplay = adjusted.crtc_vdisplay;
}
EXPORT_SYMBOL(drm_mode_get_hv_timing);
/**
* drm_mode_set_crtcinfo - set CRTC modesetting timing parameters
* @p: mode
* @adjust_flags: a combination of adjustment flags
*
* Setup the CRTC modesetting timing parameters for @p, adjusting if necessary.
*
* - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
* interlaced modes.
* - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
* buffers containing two eyes (only adjust the timings when needed, eg. for
* "frame packing" or "side by side full").
* - The CRTC_NO_DBLSCAN and CRTC_NO_VSCAN flags request that adjustment *not*
* be performed for doublescan and vscan > 1 modes respectively.
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
if (!p)
return;
p->crtc_clock = p->clock;
p->crtc_hdisplay = p->hdisplay;
p->crtc_hsync_start = p->hsync_start;
p->crtc_hsync_end = p->hsync_end;
p->crtc_htotal = p->htotal;
p->crtc_hskew = p->hskew;
p->crtc_vdisplay = p->vdisplay;
p->crtc_vsync_start = p->vsync_start;
p->crtc_vsync_end = p->vsync_end;
p->crtc_vtotal = p->vtotal;
if (p->flags & DRM_MODE_FLAG_INTERLACE) {
if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
p->crtc_vdisplay /= 2;
p->crtc_vsync_start /= 2;
p->crtc_vsync_end /= 2;
p->crtc_vtotal /= 2;
}
}
if (!(adjust_flags & CRTC_NO_DBLSCAN)) {
if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
p->crtc_vdisplay *= 2;
p->crtc_vsync_start *= 2;
p->crtc_vsync_end *= 2;
p->crtc_vtotal *= 2;
}
}
if (!(adjust_flags & CRTC_NO_VSCAN)) {
if (p->vscan > 1) {
p->crtc_vdisplay *= p->vscan;
p->crtc_vsync_start *= p->vscan;
p->crtc_vsync_end *= p->vscan;
p->crtc_vtotal *= p->vscan;
}
}
if (adjust_flags & CRTC_STEREO_DOUBLE) {
unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
switch (layout) {
case DRM_MODE_FLAG_3D_FRAME_PACKING:
p->crtc_clock *= 2;
p->crtc_vdisplay += p->crtc_vtotal;
p->crtc_vsync_start += p->crtc_vtotal;
p->crtc_vsync_end += p->crtc_vtotal;
p->crtc_vtotal += p->crtc_vtotal;
break;
}
}
p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
}
EXPORT_SYMBOL(drm_mode_set_crtcinfo);
/**
* drm_mode_copy - copy the mode
* @dst: mode to overwrite
* @src: mode to copy
*
* Copy an existing mode into another mode, preserving the
* list head of the destination mode.
*/
void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
{
struct list_head head = dst->head;
*dst = *src;
dst->head = head;
}
EXPORT_SYMBOL(drm_mode_copy);
/**
* drm_mode_init - initialize the mode from another mode
* @dst: mode to overwrite
* @src: mode to copy
*
* Copy an existing mode into another mode, zeroing the
* list head of the destination mode. Typically used
* to guarantee the list head is not left with stack
* garbage in on-stack modes.
*/
void drm_mode_init(struct drm_display_mode *dst, const struct drm_display_mode *src)
{
memset(dst, 0, sizeof(*dst));
drm_mode_copy(dst, src);
}
EXPORT_SYMBOL(drm_mode_init);
/**
* drm_mode_duplicate - allocate and duplicate an existing mode
* @dev: drm_device to allocate the duplicated mode for
* @mode: mode to duplicate
*
* Just allocate a new mode, copy the existing mode into it, and return
* a pointer to it. Used to create new instances of established modes.
*
* Returns:
* Pointer to duplicated mode on success, NULL on error.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
nmode = drm_mode_create(dev);
if (!nmode)
return NULL;
drm_mode_copy(nmode, mode);
return nmode;
}
EXPORT_SYMBOL(drm_mode_duplicate);
static bool drm_mode_match_timings(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2)
{
return mode1->hdisplay == mode2->hdisplay &&
mode1->hsync_start == mode2->hsync_start &&
mode1->hsync_end == mode2->hsync_end &&
mode1->htotal == mode2->htotal &&
mode1->hskew == mode2->hskew &&
mode1->vdisplay == mode2->vdisplay &&
mode1->vsync_start == mode2->vsync_start &&
mode1->vsync_end == mode2->vsync_end &&
mode1->vtotal == mode2->vtotal &&
mode1->vscan == mode2->vscan;
}
static bool drm_mode_match_clock(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2)
{
/*
* do clock check convert to PICOS
* so fb modes get matched the same
*/
if (mode1->clock && mode2->clock)
return KHZ2PICOS(mode1->clock) == KHZ2PICOS(mode2->clock);
else
return mode1->clock == mode2->clock;
}
static bool drm_mode_match_flags(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2)
{
return (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
(mode2->flags & ~DRM_MODE_FLAG_3D_MASK);
}
static bool drm_mode_match_3d_flags(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2)
{
return (mode1->flags & DRM_MODE_FLAG_3D_MASK) ==
(mode2->flags & DRM_MODE_FLAG_3D_MASK);
}
static bool drm_mode_match_aspect_ratio(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2)
{
return mode1->picture_aspect_ratio == mode2->picture_aspect_ratio;
}
/**
* drm_mode_match - test modes for (partial) equality
* @mode1: first mode
* @mode2: second mode
* @match_flags: which parts need to match (DRM_MODE_MATCH_*)
*
* Check to see if @mode1 and @mode2 are equivalent.
*
* Returns:
* True if the modes are (partially) equal, false otherwise.
*/
bool drm_mode_match(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2,
unsigned int match_flags)
{
if (!mode1 && !mode2)
return true;
if (!mode1 || !mode2)
return false;
if (match_flags & DRM_MODE_MATCH_TIMINGS &&
!drm_mode_match_timings(mode1, mode2))
return false;
if (match_flags & DRM_MODE_MATCH_CLOCK &&
!drm_mode_match_clock(mode1, mode2))
return false;
if (match_flags & DRM_MODE_MATCH_FLAGS &&
!drm_mode_match_flags(mode1, mode2))
return false;
if (match_flags & DRM_MODE_MATCH_3D_FLAGS &&
!drm_mode_match_3d_flags(mode1, mode2))
return false;
if (match_flags & DRM_MODE_MATCH_ASPECT_RATIO &&
!drm_mode_match_aspect_ratio(mode1, mode2))
return false;
return true;
}
EXPORT_SYMBOL(drm_mode_match);
/**
* drm_mode_equal - test modes for equality
* @mode1: first mode
* @mode2: second mode
*
* Check to see if @mode1 and @mode2 are equivalent.
*
* Returns:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2)
{
return drm_mode_match(mode1, mode2,
DRM_MODE_MATCH_TIMINGS |
DRM_MODE_MATCH_CLOCK |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS|
DRM_MODE_MATCH_ASPECT_RATIO);
}
EXPORT_SYMBOL(drm_mode_equal);
/**
* drm_mode_equal_no_clocks - test modes for equality
* @mode1: first mode
* @mode2: second mode
*
* Check to see if @mode1 and @mode2 are equivalent, but
* don't check the pixel clocks.
*
* Returns:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2)
{
return drm_mode_match(mode1, mode2,
DRM_MODE_MATCH_TIMINGS |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS);
}
EXPORT_SYMBOL(drm_mode_equal_no_clocks);
/**
* drm_mode_equal_no_clocks_no_stereo - test modes for equality
* @mode1: first mode
* @mode2: second mode
*
* Check to see if @mode1 and @mode2 are equivalent, but
* don't check the pixel clocks nor the stereo layout.
*
* Returns:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2)
{
return drm_mode_match(mode1, mode2,
DRM_MODE_MATCH_TIMINGS |
DRM_MODE_MATCH_FLAGS);
}
EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
static enum drm_mode_status
drm_mode_validate_basic(const struct drm_display_mode *mode)
{
if (mode->type & ~DRM_MODE_TYPE_ALL)
return MODE_BAD;
if (mode->flags & ~DRM_MODE_FLAG_ALL)
return MODE_BAD;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
return MODE_BAD;
if (mode->clock == 0)
return MODE_CLOCK_LOW;
if (mode->hdisplay == 0 ||
mode->hsync_start < mode->hdisplay ||
mode->hsync_end < mode->hsync_start ||
mode->htotal < mode->hsync_end)
return MODE_H_ILLEGAL;
if (mode->vdisplay == 0 ||
mode->vsync_start < mode->vdisplay ||
mode->vsync_end < mode->vsync_start ||
mode->vtotal < mode->vsync_end)
return MODE_V_ILLEGAL;
return MODE_OK;
}
/**
* drm_mode_validate_driver - make sure the mode is somewhat sane
* @dev: drm device
* @mode: mode to check
*
* First do basic validation on the mode, and then allow the driver
* to check for device/driver specific limitations via the optional
* &drm_mode_config_helper_funcs.mode_valid hook.
*
* Returns:
* The mode status
*/
enum drm_mode_status
drm_mode_validate_driver(struct drm_device *dev,
const struct drm_display_mode *mode)
{
enum drm_mode_status status;
status = drm_mode_validate_basic(mode);
if (status != MODE_OK)
return status;
if (dev->mode_config.funcs->mode_valid)
return dev->mode_config.funcs->mode_valid(dev, mode);
else
return MODE_OK;
}
EXPORT_SYMBOL(drm_mode_validate_driver);
/**
* drm_mode_validate_size - make sure modes adhere to size constraints
* @mode: mode to check
* @maxX: maximum width
* @maxY: maximum height
*
* This function is a helper which can be used to validate modes against size
* limitations of the DRM device/connector. If a mode is too big its status
* member is updated with the appropriate validation failure code. The list
* itself is not changed.
*
* Returns:
* The mode status
*/
enum drm_mode_status
drm_mode_validate_size(const struct drm_display_mode *mode,
int maxX, int maxY)
{
if (maxX > 0 && mode->hdisplay > maxX)
return MODE_VIRTUAL_X;
if (maxY > 0 && mode->vdisplay > maxY)
return MODE_VIRTUAL_Y;
return MODE_OK;
}
EXPORT_SYMBOL(drm_mode_validate_size);
/**
* drm_mode_validate_ycbcr420 - add 'ycbcr420-only' modes only when allowed
* @mode: mode to check
* @connector: drm connector under action
*
* This function is a helper which can be used to filter out any YCBCR420
* only mode, when the source doesn't support it.
*
* Returns:
* The mode status
*/
enum drm_mode_status
drm_mode_validate_ycbcr420(const struct drm_display_mode *mode,
struct drm_connector *connector)
{
if (!connector->ycbcr_420_allowed &&
drm_mode_is_420_only(&connector->display_info, mode))
return MODE_NO_420;
return MODE_OK;
}
EXPORT_SYMBOL(drm_mode_validate_ycbcr420);
#define MODE_STATUS(status) [MODE_ ## status + 3] = #status
static const char * const drm_mode_status_names[] = {
MODE_STATUS(OK),
MODE_STATUS(HSYNC),
MODE_STATUS(VSYNC),
MODE_STATUS(H_ILLEGAL),
MODE_STATUS(V_ILLEGAL),
MODE_STATUS(BAD_WIDTH),
MODE_STATUS(NOMODE),
MODE_STATUS(NO_INTERLACE),
MODE_STATUS(NO_DBLESCAN),
MODE_STATUS(NO_VSCAN),
MODE_STATUS(MEM),
MODE_STATUS(VIRTUAL_X),
MODE_STATUS(VIRTUAL_Y),
MODE_STATUS(MEM_VIRT),
MODE_STATUS(NOCLOCK),
MODE_STATUS(CLOCK_HIGH),
MODE_STATUS(CLOCK_LOW),
MODE_STATUS(CLOCK_RANGE),
MODE_STATUS(BAD_HVALUE),
MODE_STATUS(BAD_VVALUE),
MODE_STATUS(BAD_VSCAN),
MODE_STATUS(HSYNC_NARROW),
MODE_STATUS(HSYNC_WIDE),
MODE_STATUS(HBLANK_NARROW),
MODE_STATUS(HBLANK_WIDE),
MODE_STATUS(VSYNC_NARROW),
MODE_STATUS(VSYNC_WIDE),
MODE_STATUS(VBLANK_NARROW),
MODE_STATUS(VBLANK_WIDE),
MODE_STATUS(PANEL),
MODE_STATUS(INTERLACE_WIDTH),
MODE_STATUS(ONE_WIDTH),
MODE_STATUS(ONE_HEIGHT),
MODE_STATUS(ONE_SIZE),
MODE_STATUS(NO_REDUCED),
MODE_STATUS(NO_STEREO),
MODE_STATUS(NO_420),
MODE_STATUS(STALE),
MODE_STATUS(BAD),
MODE_STATUS(ERROR),
};
#undef MODE_STATUS
const char *drm_get_mode_status_name(enum drm_mode_status status)
{
int index = status + 3;
if (WARN_ON(index < 0 || index >= ARRAY_SIZE(drm_mode_status_names)))
return "";
return drm_mode_status_names[index];
}
/**
* drm_mode_prune_invalid - remove invalid modes from mode list
* @dev: DRM device
* @mode_list: list of modes to check
* @verbose: be verbose about it
*
* This helper function can be used to prune a display mode list after
* validation has been completed. All modes whose status is not MODE_OK will be
* removed from the list, and if @verbose the status code and mode name is also
* printed to dmesg.
*/
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose)
{
struct drm_display_mode *mode, *t;
list_for_each_entry_safe(mode, t, mode_list, head) {
if (mode->status != MODE_OK) {
list_del(&mode->head);
if (mode->type & DRM_MODE_TYPE_USERDEF) {
drm_warn(dev, "User-defined mode not supported: "
DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
if (verbose) {
drm_mode_debug_printmodeline(mode);
DRM_DEBUG_KMS("Not using %s mode: %s\n",
mode->name,
drm_get_mode_status_name(mode->status));
}
drm_mode_destroy(dev, mode);
}
}
}
EXPORT_SYMBOL(drm_mode_prune_invalid);
/**
* drm_mode_compare - compare modes for favorability
* @priv: unused
* @lh_a: list_head for first mode
* @lh_b: list_head for second mode
*
* Compare two modes, given by @lh_a and @lh_b, returning a value indicating
* which is better.
*
* Returns:
* Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
* positive if @lh_b is better than @lh_a.
*/
static int drm_mode_compare(void *priv, const struct list_head *lh_a,
const struct list_head *lh_b)
{
struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
int diff;
diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
if (diff)
return diff;
diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
if (diff)
return diff;
diff = drm_mode_vrefresh(b) - drm_mode_vrefresh(a);
if (diff)
return diff;
diff = b->clock - a->clock;
return diff;
}
/**
* drm_mode_sort - sort mode list
* @mode_list: list of drm_display_mode structures to sort
*
* Sort @mode_list by favorability, moving good modes to the head of the list.
*/
void drm_mode_sort(struct list_head *mode_list)
{
list_sort(NULL, mode_list, drm_mode_compare);
}
EXPORT_SYMBOL(drm_mode_sort);
/**
* drm_connector_list_update - update the mode list for the connector
* @connector: the connector to update
*
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
* list and only adds different/new modes.
*
* This is just a helper functions doesn't validate any modes itself and also
* doesn't prune any invalid modes. Callers need to do that themselves.
*/
void drm_connector_list_update(struct drm_connector *connector)
{
struct drm_display_mode *pmode, *pt;
WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
list_for_each_entry_safe(pmode, pt, &connector->probed_modes, head) {
struct drm_display_mode *mode;
bool found_it = false;
/* go through current modes checking for the new probed mode */
list_for_each_entry(mode, &connector->modes, head) {
if (!drm_mode_equal(pmode, mode))
continue;
found_it = true;
/*
* If the old matching mode is stale (ie. left over
* from a previous probe) just replace it outright.
* Otherwise just merge the type bits between all
* equal probed modes.
*
* If two probed modes are considered equal, pick the
* actual timings from the one that's marked as
* preferred (in case the match isn't 100%). If
* multiple or zero preferred modes are present, favor
* the mode added to the probed_modes list first.
*/
if (mode->status == MODE_STALE) {
drm_mode_copy(mode, pmode);
} else if ((mode->type & DRM_MODE_TYPE_PREFERRED) == 0 &&
(pmode->type & DRM_MODE_TYPE_PREFERRED) != 0) {
pmode->type |= mode->type;
drm_mode_copy(mode, pmode);
} else {
mode->type |= pmode->type;
}
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;
}
if (!found_it) {
list_move_tail(&pmode->head, &connector->modes);
}
}
}
EXPORT_SYMBOL(drm_connector_list_update);
static int drm_mode_parse_cmdline_bpp(const char *str, char **end_ptr,
struct drm_cmdline_mode *mode)
{
unsigned int bpp;
if (str[0] != '-')
return -EINVAL;
str++;
bpp = simple_strtol(str, end_ptr, 10);
if (*end_ptr == str)
return -EINVAL;
mode->bpp = bpp;
mode->bpp_specified = true;
return 0;
}
static int drm_mode_parse_cmdline_refresh(const char *str, char **end_ptr,
struct drm_cmdline_mode *mode)
{
unsigned int refresh;
if (str[0] != '@')
return -EINVAL;
str++;
refresh = simple_strtol(str, end_ptr, 10);
if (*end_ptr == str)
return -EINVAL;
mode->refresh = refresh;
mode->refresh_specified = true;
return 0;
}
static int drm_mode_parse_cmdline_extra(const char *str, int length,
bool freestanding,
const struct drm_connector *connector,
struct drm_cmdline_mode *mode)
{
int i;
for (i = 0; i < length; i++) {
switch (str[i]) {
case 'i':
if (freestanding)
return -EINVAL;
mode->interlace = true;
break;
case 'm':
if (freestanding)
return -EINVAL;
mode->margins = true;
break;
case 'D':
if (mode->force != DRM_FORCE_UNSPECIFIED)
return -EINVAL;
if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
(connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
mode->force = DRM_FORCE_ON;
else
mode->force = DRM_FORCE_ON_DIGITAL;
break;
case 'd':
if (mode->force != DRM_FORCE_UNSPECIFIED)
return -EINVAL;
mode->force = DRM_FORCE_OFF;
break;
case 'e':
if (mode->force != DRM_FORCE_UNSPECIFIED)
return -EINVAL;
mode->force = DRM_FORCE_ON;
break;
default:
return -EINVAL;
}
}
return 0;
}
static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length,
bool extras,
const struct drm_connector *connector,
struct drm_cmdline_mode *mode)
{
const char *str_start = str;
bool rb = false, cvt = false;
int xres = 0, yres = 0;
int remaining, i;
char *end_ptr;
xres = simple_strtol(str, &end_ptr, 10);
if (end_ptr == str)
return -EINVAL;
if (end_ptr[0] != 'x')
return -EINVAL;
end_ptr++;
str = end_ptr;
yres = simple_strtol(str, &end_ptr, 10);
if (end_ptr == str)
return -EINVAL;
remaining = length - (end_ptr - str_start);
if (remaining < 0)
return -EINVAL;
for (i = 0; i < remaining; i++) {
switch (end_ptr[i]) {
case 'M':
cvt = true;
break;
case 'R':
rb = true;
break;
default:
/*
* Try to pass that to our extras parsing
* function to handle the case where the
* extras are directly after the resolution
*/
if (extras) {
int ret = drm_mode_parse_cmdline_extra(end_ptr + i,
1,
false,
connector,
mode);
if (ret)
return ret;
} else {
return -EINVAL;
}
}
}
mode->xres = xres;
mode->yres = yres;
mode->cvt = cvt;
mode->rb = rb;
return 0;
}
static int drm_mode_parse_cmdline_int(const char *delim, unsigned int *int_ret)
{
const char *value;
char *endp;
/*
* delim must point to the '=', otherwise it is a syntax error and
* if delim points to the terminating zero, then delim + 1 will point
* past the end of the string.
*/
if (*delim != '=')
return -EINVAL;
value = delim + 1;
*int_ret = simple_strtol(value, &endp, 10);
/* Make sure we have parsed something */
if (endp == value)
return -EINVAL;
return 0;
}
static int drm_mode_parse_panel_orientation(const char *delim,
struct drm_cmdline_mode *mode)
{
const char *value;
if (*delim != '=')
return -EINVAL;
value = delim + 1;
delim = strchr(value, ',');
if (!delim)
delim = value + strlen(value);
if (!strncmp(value, "normal", delim - value))
mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
else if (!strncmp(value, "upside_down", delim - value))
mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
else if (!strncmp(value, "left_side_up", delim - value))
mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
else if (!strncmp(value, "right_side_up", delim - value))
mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
else
return -EINVAL;
return 0;
}
static int drm_mode_parse_tv_mode(const char *delim,
struct drm_cmdline_mode *mode)
{
const char *value;
int ret;
if (*delim != '=')
return -EINVAL;
value = delim + 1;
delim = strchr(value, ',');
if (!delim)
delim = value + strlen(value);
ret = drm_get_tv_mode_from_name(value, delim - value);
if (ret < 0)
return ret;
mode->tv_mode_specified = true;
mode->tv_mode = ret;
return 0;
}
static int drm_mode_parse_cmdline_options(const char *str,
bool freestanding,
const struct drm_connector *connector,
struct drm_cmdline_mode *mode)
{
unsigned int deg, margin, rotation = 0;
const char *delim, *option, *sep;
option = str;
do {
delim = strchr(option, '=');
if (!delim) {
delim = strchr(option, ',');
if (!delim)
delim = option + strlen(option);
}
if (!strncmp(option, "rotate", delim - option)) {
if (drm_mode_parse_cmdline_int(delim, °))
return -EINVAL;
switch (deg) {
case 0:
rotation |= DRM_MODE_ROTATE_0;
break;
case 90:
rotation |= DRM_MODE_ROTATE_90;
break;
case 180:
rotation |= DRM_MODE_ROTATE_180;
break;
case 270:
rotation |= DRM_MODE_ROTATE_270;
break;
default:
return -EINVAL;
}
} else if (!strncmp(option, "reflect_x", delim - option)) {
rotation |= DRM_MODE_REFLECT_X;
} else if (!strncmp(option, "reflect_y", delim - option)) {
rotation |= DRM_MODE_REFLECT_Y;
} else if (!strncmp(option, "margin_right", delim - option)) {
if (drm_mode_parse_cmdline_int(delim, &margin))
return -EINVAL;
mode->tv_margins.right = margin;
} else if (!strncmp(option, "margin_left", delim - option)) {
if (drm_mode_parse_cmdline_int(delim, &margin))
return -EINVAL;
mode->tv_margins.left = margin;
} else if (!strncmp(option, "margin_top", delim - option)) {
if (drm_mode_parse_cmdline_int(delim, &margin))
return -EINVAL;
mode->tv_margins.top = margin;
} else if (!strncmp(option, "margin_bottom", delim - option)) {
if (drm_mode_parse_cmdline_int(delim, &margin))
return -EINVAL;
mode->tv_margins.bottom = margin;
} else if (!strncmp(option, "panel_orientation", delim - option)) {
if (drm_mode_parse_panel_orientation(delim, mode))
return -EINVAL;
} else if (!strncmp(option, "tv_mode", delim - option)) {
if (drm_mode_parse_tv_mode(delim, mode))
return -EINVAL;
} else {
return -EINVAL;
}
sep = strchr(delim, ',');
option = sep + 1;
} while (sep);
if (rotation && freestanding)
return -EINVAL;
if (!(rotation & DRM_MODE_ROTATE_MASK))
rotation |= DRM_MODE_ROTATE_0;
/* Make sure there is exactly one rotation defined */
if (!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK))
return -EINVAL;
mode->rotation_reflection = rotation;
return 0;
}
struct drm_named_mode {
const char *name;
unsigned int pixel_clock_khz;
unsigned int xres;
unsigned int yres;
unsigned int flags;
unsigned int tv_mode;
};
#define NAMED_MODE(_name, _pclk, _x, _y, _flags, _mode) \
{ \
.name = _name, \
.pixel_clock_khz = _pclk, \
.xres = _x, \
.yres = _y, \
.flags = _flags, \
.tv_mode = _mode, \
}
static const struct drm_named_mode drm_named_modes[] = {
NAMED_MODE("NTSC", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_NTSC),
NAMED_MODE("NTSC-J", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_NTSC_J),
NAMED_MODE("PAL", 13500, 720, 576, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_PAL),
NAMED_MODE("PAL-M", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_PAL_M),
};
static int drm_mode_parse_cmdline_named_mode(const char *name,
unsigned int name_end,
struct drm_cmdline_mode *cmdline_mode)
{
unsigned int i;
if (!name_end)
return 0;
/* If the name starts with a digit, it's not a named mode */
if (isdigit(name[0]))
return 0;
/*
* If there's an equal sign in the name, the command-line
* contains only an option and no mode.
*/
if (strnchr(name, name_end, '='))
return 0;
/* The connection status extras can be set without a mode. */
if (name_end == 1 &&
(name[0] == 'd' || name[0] == 'D' || name[0] == 'e'))
return 0;
/*
* We're sure we're a named mode at this point, iterate over the
* list of modes we're aware of.
*/
for (i = 0; i < ARRAY_SIZE(drm_named_modes); i++) {
const struct drm_named_mode *mode = &drm_named_modes[i];
int ret;
ret = str_has_prefix(name, mode->name);
if (ret != name_end)
continue;
strscpy(cmdline_mode->name, mode->name, sizeof(cmdline_mode->name));
cmdline_mode->pixel_clock = mode->pixel_clock_khz;
cmdline_mode->xres = mode->xres;
cmdline_mode->yres = mode->yres;
cmdline_mode->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
cmdline_mode->tv_mode = mode->tv_mode;
cmdline_mode->tv_mode_specified = true;
cmdline_mode->specified = true;
return 1;
}
return -EINVAL;
}
/**
* drm_mode_parse_command_line_for_connector - parse command line modeline for connector
* @mode_option: optional per connector mode option
* @connector: connector to parse modeline for
* @mode: preallocated drm_cmdline_mode structure to fill out
*
* This parses @mode_option command line modeline for modes and options to
* configure the connector.
*
* This uses the same parameters as the fb modedb.c, except for an extra
* force-enable, force-enable-digital and force-disable bit at the end::
*
* <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
*
* Additionals options can be provided following the mode, using a comma to
* separate each option. Valid options can be found in
* Documentation/fb/modedb.rst.
*
* The intermediate drm_cmdline_mode structure is required to store additional
* options from the command line modline like the force-enable/disable flag.
*
* Returns:
* True if a valid modeline has been parsed, false otherwise.
*/
bool drm_mode_parse_command_line_for_connector(const char *mode_option,
const struct drm_connector *connector,
struct drm_cmdline_mode *mode)
{
const char *name;
bool freestanding = false, parse_extras = false;
unsigned int bpp_off = 0, refresh_off = 0, options_off = 0;
unsigned int mode_end = 0;
const char *bpp_ptr = NULL, *refresh_ptr = NULL, *extra_ptr = NULL;
const char *options_ptr = NULL;
char *bpp_end_ptr = NULL, *refresh_end_ptr = NULL;
int len, ret;
memset(mode, 0, sizeof(*mode));
mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
if (!mode_option)
return false;
name = mode_option;
/* Locate the start of named options */
options_ptr = strchr(name, ',');
if (options_ptr)
options_off = options_ptr - name;
else
options_off = strlen(name);
/* Try to locate the bpp and refresh specifiers, if any */
bpp_ptr = strnchr(name, options_off, '-');
while (bpp_ptr && !isdigit(bpp_ptr[1]))
bpp_ptr = strnchr(bpp_ptr + 1, options_off, '-');
if (bpp_ptr)
bpp_off = bpp_ptr - name;
refresh_ptr = strnchr(name, options_off, '@');
if (refresh_ptr)
refresh_off = refresh_ptr - name;
/* Locate the end of the name / resolution, and parse it */
if (bpp_ptr) {
mode_end = bpp_off;
} else if (refresh_ptr) {
mode_end = refresh_off;
} else if (options_ptr) {
mode_end = options_off;
parse_extras = true;
} else {
mode_end = strlen(name);
parse_extras = true;
}
if (!mode_end)
return false;
ret = drm_mode_parse_cmdline_named_mode(name, mode_end, mode);
if (ret < 0)
return false;
/*
* Having a mode that starts by a letter (and thus is named) and
* an at-sign (used to specify a refresh rate) is disallowed.
*/
if (ret && refresh_ptr)
return false;
/* No named mode? Check for a normal mode argument, e.g. 1024x768 */
if (!mode->specified && isdigit(name[0])) {
ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
parse_extras,
connector,
mode);
if (ret)
return false;
mode->specified = true;
}
/* No mode? Check for freestanding extras and/or options */
if (!mode->specified) {
unsigned int len = strlen(mode_option);
if (bpp_ptr || refresh_ptr)
return false; /* syntax error */
if (len == 1 || (len >= 2 && mode_option[1] == ','))
extra_ptr = mode_option;
else
options_ptr = mode_option - 1;
freestanding = true;
}
if (bpp_ptr) {
ret = drm_mode_parse_cmdline_bpp(bpp_ptr, &bpp_end_ptr, mode);
if (ret)
return false;
mode->bpp_specified = true;
}
if (refresh_ptr) {
ret = drm_mode_parse_cmdline_refresh(refresh_ptr,
&refresh_end_ptr, mode);
if (ret)
return false;
mode->refresh_specified = true;
}
/*
* Locate the end of the bpp / refresh, and parse the extras
* if relevant
*/
if (bpp_ptr && refresh_ptr)
extra_ptr = max(bpp_end_ptr, refresh_end_ptr);
else if (bpp_ptr)
extra_ptr = bpp_end_ptr;
else if (refresh_ptr)
extra_ptr = refresh_end_ptr;
if (extra_ptr) {
if (options_ptr)
len = options_ptr - extra_ptr;
else
len = strlen(extra_ptr);
ret = drm_mode_parse_cmdline_extra(extra_ptr, len, freestanding,
connector, mode);
if (ret)
return false;
}
if (options_ptr) {
ret = drm_mode_parse_cmdline_options(options_ptr + 1,
freestanding,
connector, mode);
if (ret)
return false;
}
return true;
}
EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
static struct drm_display_mode *drm_named_mode(struct drm_device *dev,
struct drm_cmdline_mode *cmd)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(drm_named_modes); i++) {
const struct drm_named_mode *named_mode = &drm_named_modes[i];
if (strcmp(cmd->name, named_mode->name))
continue;
if (!cmd->tv_mode_specified)
continue;
return drm_analog_tv_mode(dev,
named_mode->tv_mode,
named_mode->pixel_clock_khz * 1000,
named_mode->xres,
named_mode->yres,
named_mode->flags & DRM_MODE_FLAG_INTERLACE);
}
return NULL;
}
/**
* drm_mode_create_from_cmdline_mode - convert a command line modeline into a DRM display mode
* @dev: DRM device to create the new mode for
* @cmd: input command line modeline
*
* Returns:
* Pointer to converted mode on success, NULL on error.
*/
struct drm_display_mode *
drm_mode_create_from_cmdline_mode(struct drm_device *dev,
struct drm_cmdline_mode *cmd)
{
struct drm_display_mode *mode;
if (cmd->xres == 0 || cmd->yres == 0)
return NULL;
if (strlen(cmd->name))
mode = drm_named_mode(dev, cmd);
else if (cmd->cvt)
mode = drm_cvt_mode(dev,
cmd->xres, cmd->yres,
cmd->refresh_specified ? cmd->refresh : 60,
cmd->rb, cmd->interlace,
cmd->margins);
else
mode = drm_gtf_mode(dev,
cmd->xres, cmd->yres,
cmd->refresh_specified ? cmd->refresh : 60,
cmd->interlace,
cmd->margins);
if (!mode)
return NULL;
mode->type |= DRM_MODE_TYPE_USERDEF;
/* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
if (cmd->xres == 1366)
drm_mode_fixup_1366x768(mode);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
return mode;
}
EXPORT_SYMBOL(drm_mode_create_from_cmdline_mode);
/**
* drm_mode_convert_to_umode - convert a drm_display_mode into a modeinfo
* @out: drm_mode_modeinfo struct to return to the user
* @in: drm_display_mode to use
*
* Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
* the user.
*/
void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
const struct drm_display_mode *in)
{
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
out->hsync_end = in->hsync_end;
out->htotal = in->htotal;
out->hskew = in->hskew;
out->vdisplay = in->vdisplay;
out->vsync_start = in->vsync_start;
out->vsync_end = in->vsync_end;
out->vtotal = in->vtotal;
out->vscan = in->vscan;
out->vrefresh = drm_mode_vrefresh(in);
out->flags = in->flags;
out->type = in->type;
switch (in->picture_aspect_ratio) {
case HDMI_PICTURE_ASPECT_4_3:
out->flags |= DRM_MODE_FLAG_PIC_AR_4_3;
break;
case HDMI_PICTURE_ASPECT_16_9:
out->flags |= DRM_MODE_FLAG_PIC_AR_16_9;
break;
case HDMI_PICTURE_ASPECT_64_27:
out->flags |= DRM_MODE_FLAG_PIC_AR_64_27;
break;
case HDMI_PICTURE_ASPECT_256_135:
out->flags |= DRM_MODE_FLAG_PIC_AR_256_135;
break;
default:
WARN(1, "Invalid aspect ratio (0%x) on mode\n",
in->picture_aspect_ratio);
fallthrough;
case HDMI_PICTURE_ASPECT_NONE:
out->flags |= DRM_MODE_FLAG_PIC_AR_NONE;
break;
}
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
}
/**
* drm_mode_convert_umode - convert a modeinfo into a drm_display_mode
* @dev: drm device
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_convert_umode(struct drm_device *dev,
struct drm_display_mode *out,
const struct drm_mode_modeinfo *in)
{
if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
return -ERANGE;
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
out->hsync_end = in->hsync_end;
out->htotal = in->htotal;
out->hskew = in->hskew;
out->vdisplay = in->vdisplay;
out->vsync_start = in->vsync_start;
out->vsync_end = in->vsync_end;
out->vtotal = in->vtotal;
out->vscan = in->vscan;
out->flags = in->flags;
/*
* Old xf86-video-vmware (possibly others too) used to
* leave 'type' uninitialized. Just ignore any bits we
* don't like. It's a just hint after all, and more
* useful for the kernel->userspace direction anyway.
*/
out->type = in->type & DRM_MODE_TYPE_ALL;
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
/* Clearing picture aspect ratio bits from out flags,
* as the aspect-ratio information is not stored in
* flags for kernel-mode, but in picture_aspect_ratio.
*/
out->flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
switch (in->flags & DRM_MODE_FLAG_PIC_AR_MASK) {
case DRM_MODE_FLAG_PIC_AR_4_3:
out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
break;
case DRM_MODE_FLAG_PIC_AR_16_9:
out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
break;
case DRM_MODE_FLAG_PIC_AR_64_27:
out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27;
break;
case DRM_MODE_FLAG_PIC_AR_256_135:
out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135;
break;
case DRM_MODE_FLAG_PIC_AR_NONE:
out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
break;
default:
return -EINVAL;
}
out->status = drm_mode_validate_driver(dev, out);
if (out->status != MODE_OK)
return -EINVAL;
drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
return 0;
}
/**
* drm_mode_is_420_only - if a given videomode can be only supported in YCBCR420
* output format
*
* @display: display under action
* @mode: video mode to be tested.
*
* Returns:
* true if the mode can be supported in YCBCR420 format
* false if not.
*/
bool drm_mode_is_420_only(const struct drm_display_info *display,
const struct drm_display_mode *mode)
{
u8 vic = drm_match_cea_mode(mode);
return test_bit(vic, display->hdmi.y420_vdb_modes);
}
EXPORT_SYMBOL(drm_mode_is_420_only);
/**
* drm_mode_is_420_also - if a given videomode can be supported in YCBCR420
* output format also (along with RGB/YCBCR444/422)
*
* @display: display under action.
* @mode: video mode to be tested.
*
* Returns:
* true if the mode can be support YCBCR420 format
* false if not.
*/
bool drm_mode_is_420_also(const struct drm_display_info *display,
const struct drm_display_mode *mode)
{
u8 vic = drm_match_cea_mode(mode);
return test_bit(vic, display->hdmi.y420_cmdb_modes);
}
EXPORT_SYMBOL(drm_mode_is_420_also);
/**
* drm_mode_is_420 - if a given videomode can be supported in YCBCR420
* output format
*
* @display: display under action.
* @mode: video mode to be tested.
*
* Returns:
* true if the mode can be supported in YCBCR420 format
* false if not.
*/
bool drm_mode_is_420(const struct drm_display_info *display,
const struct drm_display_mode *mode)
{
return drm_mode_is_420_only(display, mode) ||
drm_mode_is_420_also(display, mode);
}
EXPORT_SYMBOL(drm_mode_is_420);
| linux-master | drivers/gpu/drm/drm_modes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
* Author: Brian Starkey <[email protected]>
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*/
#include <linux/dma-fence.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_property.h>
#include <drm/drm_writeback.h>
/**
* DOC: overview
*
* Writeback connectors are used to expose hardware which can write the output
* from a CRTC to a memory buffer. They are used and act similarly to other
* types of connectors, with some important differences:
*
* * Writeback connectors don't provide a way to output visually to the user.
*
* * Writeback connectors are visible to userspace only when the client sets
* DRM_CLIENT_CAP_WRITEBACK_CONNECTORS.
*
* * Writeback connectors don't have EDID.
*
* A framebuffer may only be attached to a writeback connector when the
* connector is attached to a CRTC. The WRITEBACK_FB_ID property which sets the
* framebuffer applies only to a single commit (see below). A framebuffer may
* not be attached while the CRTC is off.
*
* Unlike with planes, when a writeback framebuffer is removed by userspace DRM
* makes no attempt to remove it from active use by the connector. This is
* because no method is provided to abort a writeback operation, and in any
* case making a new commit whilst a writeback is ongoing is undefined (see
* WRITEBACK_OUT_FENCE_PTR below). As soon as the current writeback is finished,
* the framebuffer will automatically no longer be in active use. As it will
* also have already been removed from the framebuffer list, there will be no
* way for any userspace application to retrieve a reference to it in the
* intervening period.
*
* Writeback connectors have some additional properties, which userspace
* can use to query and control them:
*
* "WRITEBACK_FB_ID":
* Write-only object property storing a DRM_MODE_OBJECT_FB: it stores the
* framebuffer to be written by the writeback connector. This property is
* similar to the FB_ID property on planes, but will always read as zero
* and is not preserved across commits.
* Userspace must set this property to an output buffer every time it
* wishes the buffer to get filled.
*
* "WRITEBACK_PIXEL_FORMATS":
* Immutable blob property to store the supported pixel formats table. The
* data is an array of u32 DRM_FORMAT_* fourcc values.
* Userspace can use this blob to find out what pixel formats are supported
* by the connector's writeback engine.
*
* "WRITEBACK_OUT_FENCE_PTR":
* Userspace can use this property to provide a pointer for the kernel to
* fill with a sync_file file descriptor, which will signal once the
* writeback is finished. The value should be the address of a 32-bit
* signed integer, cast to a u64.
* Userspace should wait for this fence to signal before making another
* commit affecting any of the same CRTCs, Planes or Connectors.
* **Failure to do so will result in undefined behaviour.**
* For this reason it is strongly recommended that all userspace
* applications making use of writeback connectors *always* retrieve an
* out-fence for the commit and use it appropriately.
* From userspace, this property will always read as zero.
*/
#define fence_to_wb_connector(x) container_of(x->lock, \
struct drm_writeback_connector, \
fence_lock)
static const char *drm_writeback_fence_get_driver_name(struct dma_fence *fence)
{
struct drm_writeback_connector *wb_connector =
fence_to_wb_connector(fence);
return wb_connector->base.dev->driver->name;
}
static const char *
drm_writeback_fence_get_timeline_name(struct dma_fence *fence)
{
struct drm_writeback_connector *wb_connector =
fence_to_wb_connector(fence);
return wb_connector->timeline_name;
}
static bool drm_writeback_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
static const struct dma_fence_ops drm_writeback_fence_ops = {
.get_driver_name = drm_writeback_fence_get_driver_name,
.get_timeline_name = drm_writeback_fence_get_timeline_name,
.enable_signaling = drm_writeback_fence_enable_signaling,
};
static int create_writeback_properties(struct drm_device *dev)
{
struct drm_property *prop;
if (!dev->mode_config.writeback_fb_id_property) {
prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
"WRITEBACK_FB_ID",
DRM_MODE_OBJECT_FB);
if (!prop)
return -ENOMEM;
dev->mode_config.writeback_fb_id_property = prop;
}
if (!dev->mode_config.writeback_pixel_formats_property) {
prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
DRM_MODE_PROP_ATOMIC |
DRM_MODE_PROP_IMMUTABLE,
"WRITEBACK_PIXEL_FORMATS", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.writeback_pixel_formats_property = prop;
}
if (!dev->mode_config.writeback_out_fence_ptr_property) {
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"WRITEBACK_OUT_FENCE_PTR", 0,
U64_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.writeback_out_fence_ptr_property = prop;
}
return 0;
}
static const struct drm_encoder_funcs drm_writeback_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
/**
* drm_writeback_connector_init - Initialize a writeback connector and its properties
* @dev: DRM device
* @wb_connector: Writeback connector to initialize
* @con_funcs: Connector funcs vtable
* @enc_helper_funcs: Encoder helper funcs vtable to be used by the internal encoder
* @formats: Array of supported pixel formats for the writeback engine
* @n_formats: Length of the formats array
* @possible_crtcs: possible crtcs for the internal writeback encoder
*
* This function creates the writeback-connector-specific properties if they
* have not been already created, initializes the connector as
* type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
* values. It will also create an internal encoder associated with the
* drm_writeback_connector and set it to use the @enc_helper_funcs vtable for
* the encoder helper.
*
* Drivers should always use this function instead of drm_connector_init() to
* set up writeback connectors.
*
* Returns: 0 on success, or a negative error code
*/
int drm_writeback_connector_init(struct drm_device *dev,
struct drm_writeback_connector *wb_connector,
const struct drm_connector_funcs *con_funcs,
const struct drm_encoder_helper_funcs *enc_helper_funcs,
const u32 *formats, int n_formats,
u32 possible_crtcs)
{
int ret = 0;
drm_encoder_helper_add(&wb_connector->encoder, enc_helper_funcs);
wb_connector->encoder.possible_crtcs = possible_crtcs;
ret = drm_encoder_init(dev, &wb_connector->encoder,
&drm_writeback_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret)
return ret;
ret = drm_writeback_connector_init_with_encoder(dev, wb_connector, &wb_connector->encoder,
con_funcs, formats, n_formats);
if (ret)
drm_encoder_cleanup(&wb_connector->encoder);
return ret;
}
EXPORT_SYMBOL(drm_writeback_connector_init);
/**
* drm_writeback_connector_init_with_encoder - Initialize a writeback connector with
* a custom encoder
*
* @dev: DRM device
* @wb_connector: Writeback connector to initialize
* @enc: handle to the already initialized drm encoder
* @con_funcs: Connector funcs vtable
* @formats: Array of supported pixel formats for the writeback engine
* @n_formats: Length of the formats array
*
* This function creates the writeback-connector-specific properties if they
* have not been already created, initializes the connector as
* type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
* values.
*
* This function assumes that the drm_writeback_connector's encoder has already been
* created and initialized before invoking this function.
*
* In addition, this function also assumes that callers of this API will manage
* assigning the encoder helper functions, possible_crtcs and any other encoder
* specific operation.
*
* Drivers should always use this function instead of drm_connector_init() to
* set up writeback connectors if they want to manage themselves the lifetime of the
* associated encoder.
*
* Returns: 0 on success, or a negative error code
*/
int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
struct drm_writeback_connector *wb_connector, struct drm_encoder *enc,
const struct drm_connector_funcs *con_funcs, const u32 *formats,
int n_formats)
{
struct drm_property_blob *blob;
struct drm_connector *connector = &wb_connector->base;
struct drm_mode_config *config = &dev->mode_config;
int ret = create_writeback_properties(dev);
if (ret != 0)
return ret;
blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
formats);
if (IS_ERR(blob))
return PTR_ERR(blob);
connector->interlace_allowed = 0;
ret = drm_connector_init(dev, connector, con_funcs,
DRM_MODE_CONNECTOR_WRITEBACK);
if (ret)
goto connector_fail;
ret = drm_connector_attach_encoder(connector, enc);
if (ret)
goto attach_fail;
INIT_LIST_HEAD(&wb_connector->job_queue);
spin_lock_init(&wb_connector->job_lock);
wb_connector->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&wb_connector->fence_lock);
snprintf(wb_connector->timeline_name,
sizeof(wb_connector->timeline_name),
"CONNECTOR:%d-%s", connector->base.id, connector->name);
drm_object_attach_property(&connector->base,
config->writeback_out_fence_ptr_property, 0);
drm_object_attach_property(&connector->base,
config->writeback_fb_id_property, 0);
drm_object_attach_property(&connector->base,
config->writeback_pixel_formats_property,
blob->base.id);
wb_connector->pixel_formats_blob_ptr = blob;
return 0;
attach_fail:
drm_connector_cleanup(connector);
connector_fail:
drm_property_blob_put(blob);
return ret;
}
EXPORT_SYMBOL(drm_writeback_connector_init_with_encoder);
int drm_writeback_set_fb(struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
{
WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
if (!conn_state->writeback_job) {
conn_state->writeback_job =
kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
if (!conn_state->writeback_job)
return -ENOMEM;
conn_state->writeback_job->connector =
drm_connector_to_writeback(conn_state->connector);
}
drm_framebuffer_assign(&conn_state->writeback_job->fb, fb);
return 0;
}
int drm_writeback_prepare_job(struct drm_writeback_job *job)
{
struct drm_writeback_connector *connector = job->connector;
const struct drm_connector_helper_funcs *funcs =
connector->base.helper_private;
int ret;
if (funcs->prepare_writeback_job) {
ret = funcs->prepare_writeback_job(connector, job);
if (ret < 0)
return ret;
}
job->prepared = true;
return 0;
}
EXPORT_SYMBOL(drm_writeback_prepare_job);
/**
* drm_writeback_queue_job - Queue a writeback job for later signalling
* @wb_connector: The writeback connector to queue a job on
* @conn_state: The connector state containing the job to queue
*
* This function adds the job contained in @conn_state to the job_queue for a
* writeback connector. It takes ownership of the writeback job and sets the
* @conn_state->writeback_job to NULL, and so no access to the job may be
* performed by the caller after this function returns.
*
* Drivers must ensure that for a given writeback connector, jobs are queued in
* exactly the same order as they will be completed by the hardware (and
* signaled via drm_writeback_signal_completion).
*
* For every call to drm_writeback_queue_job() there must be exactly one call to
* drm_writeback_signal_completion()
*
* See also: drm_writeback_signal_completion()
*/
void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
struct drm_connector_state *conn_state)
{
struct drm_writeback_job *job;
unsigned long flags;
job = conn_state->writeback_job;
conn_state->writeback_job = NULL;
spin_lock_irqsave(&wb_connector->job_lock, flags);
list_add_tail(&job->list_entry, &wb_connector->job_queue);
spin_unlock_irqrestore(&wb_connector->job_lock, flags);
}
EXPORT_SYMBOL(drm_writeback_queue_job);
void drm_writeback_cleanup_job(struct drm_writeback_job *job)
{
struct drm_writeback_connector *connector = job->connector;
const struct drm_connector_helper_funcs *funcs =
connector->base.helper_private;
if (job->prepared && funcs->cleanup_writeback_job)
funcs->cleanup_writeback_job(connector, job);
if (job->fb)
drm_framebuffer_put(job->fb);
if (job->out_fence)
dma_fence_put(job->out_fence);
kfree(job);
}
EXPORT_SYMBOL(drm_writeback_cleanup_job);
/*
* @cleanup_work: deferred cleanup of a writeback job
*
* The job cannot be cleaned up directly in drm_writeback_signal_completion,
* because it may be called in interrupt context. Dropping the framebuffer
* reference can sleep, and so the cleanup is deferred to a workqueue.
*/
static void cleanup_work(struct work_struct *work)
{
struct drm_writeback_job *job = container_of(work,
struct drm_writeback_job,
cleanup_work);
drm_writeback_cleanup_job(job);
}
/**
* drm_writeback_signal_completion - Signal the completion of a writeback job
* @wb_connector: The writeback connector whose job is complete
* @status: Status code to set in the writeback out_fence (0 for success)
*
* Drivers should call this to signal the completion of a previously queued
* writeback job. It should be called as soon as possible after the hardware
* has finished writing, and may be called from interrupt context.
* It is the driver's responsibility to ensure that for a given connector, the
* hardware completes writeback jobs in the same order as they are queued.
*
* Unless the driver is holding its own reference to the framebuffer, it must
* not be accessed after calling this function.
*
* See also: drm_writeback_queue_job()
*/
void
drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
int status)
{
unsigned long flags;
struct drm_writeback_job *job;
struct dma_fence *out_fence;
spin_lock_irqsave(&wb_connector->job_lock, flags);
job = list_first_entry_or_null(&wb_connector->job_queue,
struct drm_writeback_job,
list_entry);
if (job)
list_del(&job->list_entry);
spin_unlock_irqrestore(&wb_connector->job_lock, flags);
if (WARN_ON(!job))
return;
out_fence = job->out_fence;
if (out_fence) {
if (status)
dma_fence_set_error(out_fence, status);
dma_fence_signal(out_fence);
dma_fence_put(out_fence);
job->out_fence = NULL;
}
INIT_WORK(&job->cleanup_work, cleanup_work);
queue_work(system_long_wq, &job->cleanup_work);
}
EXPORT_SYMBOL(drm_writeback_signal_completion);
struct dma_fence *
drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector)
{
struct dma_fence *fence;
if (WARN_ON(wb_connector->base.connector_type !=
DRM_MODE_CONNECTOR_WRITEBACK))
return NULL;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return NULL;
dma_fence_init(fence, &drm_writeback_fence_ops,
&wb_connector->fence_lock, wb_connector->fence_context,
++wb_connector->fence_seqno);
return fence;
}
EXPORT_SYMBOL(drm_writeback_get_out_fence);
| linux-master | drivers/gpu/drm/drm_writeback.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include <drm/drm_displayid.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
static const struct displayid_header *
displayid_get_header(const u8 *displayid, int length, int index)
{
const struct displayid_header *base;
if (sizeof(*base) > length - index)
return ERR_PTR(-EINVAL);
base = (const struct displayid_header *)&displayid[index];
return base;
}
static const struct displayid_header *
validate_displayid(const u8 *displayid, int length, int idx)
{
int i, dispid_length;
u8 csum = 0;
const struct displayid_header *base;
base = displayid_get_header(displayid, length, idx);
if (IS_ERR(base))
return base;
DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
base->rev, base->bytes, base->prod_id, base->ext_count);
/* +1 for DispID checksum */
dispid_length = sizeof(*base) + base->bytes + 1;
if (dispid_length > length - idx)
return ERR_PTR(-EINVAL);
for (i = 0; i < dispid_length; i++)
csum += displayid[idx + i];
if (csum) {
DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
return ERR_PTR(-EINVAL);
}
return base;
}
static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
int *length, int *idx,
int *ext_index)
{
const u8 *displayid = drm_find_edid_extension(drm_edid, DISPLAYID_EXT, ext_index);
const struct displayid_header *base;
if (!displayid)
return NULL;
/* EDID extensions block checksum isn't for us */
*length = EDID_LENGTH - 1;
*idx = 1;
base = validate_displayid(displayid, *length, *idx);
if (IS_ERR(base))
return NULL;
*length = *idx + sizeof(*base) + base->bytes;
return displayid;
}
void displayid_iter_edid_begin(const struct drm_edid *drm_edid,
struct displayid_iter *iter)
{
memset(iter, 0, sizeof(*iter));
iter->drm_edid = drm_edid;
}
static const struct displayid_block *
displayid_iter_block(const struct displayid_iter *iter)
{
const struct displayid_block *block;
if (!iter->section)
return NULL;
block = (const struct displayid_block *)&iter->section[iter->idx];
if (iter->idx + sizeof(*block) <= iter->length &&
iter->idx + sizeof(*block) + block->num_bytes <= iter->length)
return block;
return NULL;
}
const struct displayid_block *
__displayid_iter_next(struct displayid_iter *iter)
{
const struct displayid_block *block;
if (!iter->drm_edid)
return NULL;
if (iter->section) {
/* current block should always be valid */
block = displayid_iter_block(iter);
if (WARN_ON(!block)) {
iter->section = NULL;
iter->drm_edid = NULL;
return NULL;
}
/* next block in section */
iter->idx += sizeof(*block) + block->num_bytes;
block = displayid_iter_block(iter);
if (block)
return block;
}
for (;;) {
/* The first section we encounter is the base section */
bool base_section = !iter->section;
iter->section = drm_find_displayid_extension(iter->drm_edid,
&iter->length,
&iter->idx,
&iter->ext_index);
if (!iter->section) {
iter->drm_edid = NULL;
return NULL;
}
/* Save the structure version and primary use case. */
if (base_section) {
const struct displayid_header *base;
base = displayid_get_header(iter->section, iter->length,
iter->idx);
if (!IS_ERR(base)) {
iter->version = base->rev;
iter->primary_use = base->prod_id;
}
}
iter->idx += sizeof(struct displayid_header);
block = displayid_iter_block(iter);
if (block)
return block;
}
}
void displayid_iter_end(struct displayid_iter *iter)
{
memset(iter, 0, sizeof(*iter));
}
/* DisplayID Structure Version/Revision from the Base Section. */
u8 displayid_version(const struct displayid_iter *iter)
{
return iter->version;
}
/*
* DisplayID Primary Use Case (2.0+) or Product Type Identifier (1.0-1.3) from
* the Base Section.
*/
u8 displayid_primary_use(const struct displayid_iter *iter)
{
return iter->primary_use;
}
| linux-master | drivers/gpu/drm/drm_displayid.c |
/*
* MIPI DSI Bus
*
* Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd.
* Andrzej Hajda <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <drm/display/drm_dsc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_print.h>
#include <video/mipi_display.h>
/**
* DOC: dsi helpers
*
* These functions contain some common logic and helpers to deal with MIPI DSI
* peripherals.
*
* Helpers are provided for a number of standard MIPI DSI command as well as a
* subset of the MIPI DCS command set.
*/
static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
/* attempt OF style match */
if (of_driver_match_device(dev, drv))
return 1;
/* compare DSI device and driver names */
if (!strcmp(dsi->name, drv->name))
return 1;
return 0;
}
static int mipi_dsi_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
int err;
err = of_device_uevent_modalias(dev, env);
if (err != -ENODEV)
return err;
add_uevent_var(env, "MODALIAS=%s%s", MIPI_DSI_MODULE_PREFIX,
dsi->name);
return 0;
}
static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
.runtime_suspend = pm_generic_runtime_suspend,
.runtime_resume = pm_generic_runtime_resume,
.suspend = pm_generic_suspend,
.resume = pm_generic_resume,
.freeze = pm_generic_freeze,
.thaw = pm_generic_thaw,
.poweroff = pm_generic_poweroff,
.restore = pm_generic_restore,
};
static struct bus_type mipi_dsi_bus_type = {
.name = "mipi-dsi",
.match = mipi_dsi_device_match,
.uevent = mipi_dsi_uevent,
.pm = &mipi_dsi_device_pm_ops,
};
/**
* of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a
* device tree node
* @np: device tree node
*
* Return: A pointer to the MIPI DSI device corresponding to @np or NULL if no
* such device exists (or has not been registered yet).
*/
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np)
{
struct device *dev;
dev = bus_find_device_by_of_node(&mipi_dsi_bus_type, np);
return dev ? to_mipi_dsi_device(dev) : NULL;
}
EXPORT_SYMBOL(of_find_mipi_dsi_device_by_node);
static void mipi_dsi_dev_release(struct device *dev)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
of_node_put(dev->of_node);
kfree(dsi);
}
static const struct device_type mipi_dsi_device_type = {
.release = mipi_dsi_dev_release,
};
static struct mipi_dsi_device *mipi_dsi_device_alloc(struct mipi_dsi_host *host)
{
struct mipi_dsi_device *dsi;
dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return ERR_PTR(-ENOMEM);
dsi->host = host;
dsi->dev.bus = &mipi_dsi_bus_type;
dsi->dev.parent = host->dev;
dsi->dev.type = &mipi_dsi_device_type;
device_initialize(&dsi->dev);
return dsi;
}
static int mipi_dsi_device_add(struct mipi_dsi_device *dsi)
{
struct mipi_dsi_host *host = dsi->host;
dev_set_name(&dsi->dev, "%s.%d", dev_name(host->dev), dsi->channel);
return device_add(&dsi->dev);
}
#if IS_ENABLED(CONFIG_OF)
static struct mipi_dsi_device *
of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
{
struct mipi_dsi_device_info info = { };
int ret;
u32 reg;
if (of_alias_from_compatible(node, info.type, sizeof(info.type)) < 0) {
drm_err(host, "modalias failure on %pOF\n", node);
return ERR_PTR(-EINVAL);
}
ret = of_property_read_u32(node, "reg", ®);
if (ret) {
drm_err(host, "device node %pOF has no valid reg property: %d\n",
node, ret);
return ERR_PTR(-EINVAL);
}
info.channel = reg;
info.node = of_node_get(node);
return mipi_dsi_device_register_full(host, &info);
}
#else
static struct mipi_dsi_device *
of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
{
return ERR_PTR(-ENODEV);
}
#endif
/**
* mipi_dsi_device_register_full - create a MIPI DSI device
* @host: DSI host to which this device is connected
* @info: pointer to template containing DSI device information
*
* Create a MIPI DSI device by using the device information provided by
* mipi_dsi_device_info template
*
* Returns:
* A pointer to the newly created MIPI DSI device, or, a pointer encoded
* with an error
*/
struct mipi_dsi_device *
mipi_dsi_device_register_full(struct mipi_dsi_host *host,
const struct mipi_dsi_device_info *info)
{
struct mipi_dsi_device *dsi;
int ret;
if (!info) {
drm_err(host, "invalid mipi_dsi_device_info pointer\n");
return ERR_PTR(-EINVAL);
}
if (info->channel > 3) {
drm_err(host, "invalid virtual channel: %u\n", info->channel);
return ERR_PTR(-EINVAL);
}
dsi = mipi_dsi_device_alloc(host);
if (IS_ERR(dsi)) {
drm_err(host, "failed to allocate DSI device %ld\n",
PTR_ERR(dsi));
return dsi;
}
device_set_node(&dsi->dev, of_fwnode_handle(info->node));
dsi->channel = info->channel;
strscpy(dsi->name, info->type, sizeof(dsi->name));
ret = mipi_dsi_device_add(dsi);
if (ret) {
drm_err(host, "failed to add DSI device %d\n", ret);
kfree(dsi);
return ERR_PTR(ret);
}
return dsi;
}
EXPORT_SYMBOL(mipi_dsi_device_register_full);
/**
* mipi_dsi_device_unregister - unregister MIPI DSI device
* @dsi: DSI peripheral device
*/
void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi)
{
device_unregister(&dsi->dev);
}
EXPORT_SYMBOL(mipi_dsi_device_unregister);
static void devm_mipi_dsi_device_unregister(void *arg)
{
struct mipi_dsi_device *dsi = arg;
mipi_dsi_device_unregister(dsi);
}
/**
* devm_mipi_dsi_device_register_full - create a managed MIPI DSI device
* @dev: device to tie the MIPI-DSI device lifetime to
* @host: DSI host to which this device is connected
* @info: pointer to template containing DSI device information
*
* Create a MIPI DSI device by using the device information provided by
* mipi_dsi_device_info template
*
* This is the managed version of mipi_dsi_device_register_full() which
* automatically calls mipi_dsi_device_unregister() when @dev is
* unbound.
*
* Returns:
* A pointer to the newly created MIPI DSI device, or, a pointer encoded
* with an error
*/
struct mipi_dsi_device *
devm_mipi_dsi_device_register_full(struct device *dev,
struct mipi_dsi_host *host,
const struct mipi_dsi_device_info *info)
{
struct mipi_dsi_device *dsi;
int ret;
dsi = mipi_dsi_device_register_full(host, info);
if (IS_ERR(dsi))
return dsi;
ret = devm_add_action_or_reset(dev,
devm_mipi_dsi_device_unregister,
dsi);
if (ret)
return ERR_PTR(ret);
return dsi;
}
EXPORT_SYMBOL_GPL(devm_mipi_dsi_device_register_full);
static DEFINE_MUTEX(host_lock);
static LIST_HEAD(host_list);
/**
* of_find_mipi_dsi_host_by_node() - find the MIPI DSI host matching a
* device tree node
* @node: device tree node
*
* Returns:
* A pointer to the MIPI DSI host corresponding to @node or NULL if no
* such device exists (or has not been registered yet).
*/
struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node)
{
struct mipi_dsi_host *host;
mutex_lock(&host_lock);
list_for_each_entry(host, &host_list, list) {
if (host->dev->of_node == node) {
mutex_unlock(&host_lock);
return host;
}
}
mutex_unlock(&host_lock);
return NULL;
}
EXPORT_SYMBOL(of_find_mipi_dsi_host_by_node);
int mipi_dsi_host_register(struct mipi_dsi_host *host)
{
struct device_node *node;
for_each_available_child_of_node(host->dev->of_node, node) {
/* skip nodes without reg property */
if (!of_property_present(node, "reg"))
continue;
of_mipi_dsi_device_add(host, node);
}
mutex_lock(&host_lock);
list_add_tail(&host->list, &host_list);
mutex_unlock(&host_lock);
return 0;
}
EXPORT_SYMBOL(mipi_dsi_host_register);
static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
mipi_dsi_detach(dsi);
mipi_dsi_device_unregister(dsi);
return 0;
}
void mipi_dsi_host_unregister(struct mipi_dsi_host *host)
{
device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn);
mutex_lock(&host_lock);
list_del_init(&host->list);
mutex_unlock(&host_lock);
}
EXPORT_SYMBOL(mipi_dsi_host_unregister);
/**
* mipi_dsi_attach - attach a DSI device to its DSI host
* @dsi: DSI peripheral
*/
int mipi_dsi_attach(struct mipi_dsi_device *dsi)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
if (!ops || !ops->attach)
return -ENOSYS;
return ops->attach(dsi->host, dsi);
}
EXPORT_SYMBOL(mipi_dsi_attach);
/**
* mipi_dsi_detach - detach a DSI device from its DSI host
* @dsi: DSI peripheral
*/
int mipi_dsi_detach(struct mipi_dsi_device *dsi)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
if (!ops || !ops->detach)
return -ENOSYS;
return ops->detach(dsi->host, dsi);
}
EXPORT_SYMBOL(mipi_dsi_detach);
static void devm_mipi_dsi_detach(void *arg)
{
struct mipi_dsi_device *dsi = arg;
mipi_dsi_detach(dsi);
}
/**
* devm_mipi_dsi_attach - Attach a MIPI-DSI device to its DSI Host
* @dev: device to tie the MIPI-DSI device attachment lifetime to
* @dsi: DSI peripheral
*
* This is the managed version of mipi_dsi_attach() which automatically
* calls mipi_dsi_detach() when @dev is unbound.
*
* Returns:
* 0 on success, a negative error code on failure.
*/
int devm_mipi_dsi_attach(struct device *dev,
struct mipi_dsi_device *dsi)
{
int ret;
ret = mipi_dsi_attach(dsi);
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, devm_mipi_dsi_detach, dsi);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(devm_mipi_dsi_attach);
static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi,
struct mipi_dsi_msg *msg)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
if (!ops || !ops->transfer)
return -ENOSYS;
if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
msg->flags |= MIPI_DSI_MSG_USE_LPM;
return ops->transfer(dsi->host, msg);
}
/**
* mipi_dsi_packet_format_is_short - check if a packet is of the short format
* @type: MIPI DSI data type of the packet
*
* Return: true if the packet for the given data type is a short packet, false
* otherwise.
*/
bool mipi_dsi_packet_format_is_short(u8 type)
{
switch (type) {
case MIPI_DSI_V_SYNC_START:
case MIPI_DSI_V_SYNC_END:
case MIPI_DSI_H_SYNC_START:
case MIPI_DSI_H_SYNC_END:
case MIPI_DSI_COMPRESSION_MODE:
case MIPI_DSI_END_OF_TRANSMISSION:
case MIPI_DSI_COLOR_MODE_OFF:
case MIPI_DSI_COLOR_MODE_ON:
case MIPI_DSI_SHUTDOWN_PERIPHERAL:
case MIPI_DSI_TURN_ON_PERIPHERAL:
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
case MIPI_DSI_DCS_SHORT_WRITE:
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
case MIPI_DSI_DCS_READ:
case MIPI_DSI_EXECUTE_QUEUE:
case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
return true;
}
return false;
}
EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
/**
* mipi_dsi_packet_format_is_long - check if a packet is of the long format
* @type: MIPI DSI data type of the packet
*
* Return: true if the packet for the given data type is a long packet, false
* otherwise.
*/
bool mipi_dsi_packet_format_is_long(u8 type)
{
switch (type) {
case MIPI_DSI_NULL_PACKET:
case MIPI_DSI_BLANKING_PACKET:
case MIPI_DSI_GENERIC_LONG_WRITE:
case MIPI_DSI_DCS_LONG_WRITE:
case MIPI_DSI_PICTURE_PARAMETER_SET:
case MIPI_DSI_COMPRESSED_PIXEL_STREAM:
case MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20:
case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24:
case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16:
case MIPI_DSI_PACKED_PIXEL_STREAM_30:
case MIPI_DSI_PACKED_PIXEL_STREAM_36:
case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12:
case MIPI_DSI_PACKED_PIXEL_STREAM_16:
case MIPI_DSI_PACKED_PIXEL_STREAM_18:
case MIPI_DSI_PIXEL_STREAM_3BYTE_18:
case MIPI_DSI_PACKED_PIXEL_STREAM_24:
return true;
}
return false;
}
EXPORT_SYMBOL(mipi_dsi_packet_format_is_long);
/**
* mipi_dsi_create_packet - create a packet from a message according to the
* DSI protocol
* @packet: pointer to a DSI packet structure
* @msg: message to translate into a packet
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
const struct mipi_dsi_msg *msg)
{
if (!packet || !msg)
return -EINVAL;
/* do some minimum sanity checking */
if (!mipi_dsi_packet_format_is_short(msg->type) &&
!mipi_dsi_packet_format_is_long(msg->type))
return -EINVAL;
if (msg->channel > 3)
return -EINVAL;
memset(packet, 0, sizeof(*packet));
packet->header[0] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f);
/* TODO: compute ECC if hardware support is not available */
/*
* Long write packets contain the word count in header bytes 1 and 2.
* The payload follows the header and is word count bytes long.
*
* Short write packets encode up to two parameters in header bytes 1
* and 2.
*/
if (mipi_dsi_packet_format_is_long(msg->type)) {
packet->header[1] = (msg->tx_len >> 0) & 0xff;
packet->header[2] = (msg->tx_len >> 8) & 0xff;
packet->payload_length = msg->tx_len;
packet->payload = msg->tx_buf;
} else {
const u8 *tx = msg->tx_buf;
packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0;
packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0;
}
packet->size = sizeof(packet->header) + packet->payload_length;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_create_packet);
/**
* mipi_dsi_shutdown_peripheral() - sends a Shutdown Peripheral command
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_SHUTDOWN_PERIPHERAL,
.tx_buf = (u8 [2]) { 0, 0 },
.tx_len = 2,
};
int ret = mipi_dsi_device_transfer(dsi, &msg);
return (ret < 0) ? ret : 0;
}
EXPORT_SYMBOL(mipi_dsi_shutdown_peripheral);
/**
* mipi_dsi_turn_on_peripheral() - sends a Turn On Peripheral command
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_TURN_ON_PERIPHERAL,
.tx_buf = (u8 [2]) { 0, 0 },
.tx_len = 2,
};
int ret = mipi_dsi_device_transfer(dsi, &msg);
return (ret < 0) ? ret : 0;
}
EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral);
/*
* mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of
* the payload in a long packet transmitted from the peripheral back to the
* host processor
* @dsi: DSI peripheral device
* @value: the maximum size of the payload
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
u16 value)
{
u8 tx[2] = { value & 0xff, value >> 8 };
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
.tx_len = sizeof(tx),
.tx_buf = tx,
};
int ret = mipi_dsi_device_transfer(dsi, &msg);
return (ret < 0) ? ret : 0;
}
EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
/**
* mipi_dsi_compression_mode() - enable/disable DSC on the peripheral
* @dsi: DSI peripheral device
* @enable: Whether to enable or disable the DSC
*
* Enable or disable Display Stream Compression on the peripheral using the
* default Picture Parameter Set and VESA DSC 1.1 algorithm.
*
* Return: 0 on success or a negative error code on failure.
*/
ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
{
/* Note: Needs updating for non-default PPS or algorithm */
u8 tx[2] = { enable << 0, 0 };
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_COMPRESSION_MODE,
.tx_len = sizeof(tx),
.tx_buf = tx,
};
int ret = mipi_dsi_device_transfer(dsi, &msg);
return (ret < 0) ? ret : 0;
}
EXPORT_SYMBOL(mipi_dsi_compression_mode);
/**
* mipi_dsi_picture_parameter_set() - transmit the DSC PPS to the peripheral
* @dsi: DSI peripheral device
* @pps: VESA DSC 1.1 Picture Parameter Set
*
* Transmit the VESA DSC 1.1 Picture Parameter Set to the peripheral.
*
* Return: 0 on success or a negative error code on failure.
*/
ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
const struct drm_dsc_picture_parameter_set *pps)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_PICTURE_PARAMETER_SET,
.tx_len = sizeof(*pps),
.tx_buf = pps,
};
int ret = mipi_dsi_device_transfer(dsi, &msg);
return (ret < 0) ? ret : 0;
}
EXPORT_SYMBOL(mipi_dsi_picture_parameter_set);
/**
* mipi_dsi_generic_write() - transmit data using a generic write packet
* @dsi: DSI peripheral device
* @payload: buffer containing the payload
* @size: size of payload buffer
*
* This function will automatically choose the right data type depending on
* the payload length.
*
* Return: The number of bytes transmitted on success or a negative error code
* on failure.
*/
ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
size_t size)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.tx_buf = payload,
.tx_len = size
};
switch (size) {
case 0:
msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
break;
case 1:
msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
break;
case 2:
msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
break;
default:
msg.type = MIPI_DSI_GENERIC_LONG_WRITE;
break;
}
return mipi_dsi_device_transfer(dsi, &msg);
}
EXPORT_SYMBOL(mipi_dsi_generic_write);
/**
* mipi_dsi_generic_read() - receive data using a generic read packet
* @dsi: DSI peripheral device
* @params: buffer containing the request parameters
* @num_params: number of request parameters
* @data: buffer in which to return the received data
* @size: size of receive buffer
*
* This function will automatically choose the right data type depending on
* the number of parameters passed in.
*
* Return: The number of bytes successfully read or a negative error code on
* failure.
*/
ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
size_t num_params, void *data, size_t size)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.tx_len = num_params,
.tx_buf = params,
.rx_len = size,
.rx_buf = data
};
switch (num_params) {
case 0:
msg.type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
break;
case 1:
msg.type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
break;
case 2:
msg.type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
break;
default:
return -EINVAL;
}
return mipi_dsi_device_transfer(dsi, &msg);
}
EXPORT_SYMBOL(mipi_dsi_generic_read);
/**
* mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload
* @dsi: DSI peripheral device
* @data: buffer containing data to be transmitted
* @len: size of transmission buffer
*
* This function will automatically choose the right data type depending on
* the command payload length.
*
* Return: The number of bytes successfully transmitted or a negative error
* code on failure.
*/
ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
const void *data, size_t len)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.tx_buf = data,
.tx_len = len
};
switch (len) {
case 0:
return -EINVAL;
case 1:
msg.type = MIPI_DSI_DCS_SHORT_WRITE;
break;
case 2:
msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
break;
default:
msg.type = MIPI_DSI_DCS_LONG_WRITE;
break;
}
return mipi_dsi_device_transfer(dsi, &msg);
}
EXPORT_SYMBOL(mipi_dsi_dcs_write_buffer);
/**
* mipi_dsi_dcs_write() - send DCS write command
* @dsi: DSI peripheral device
* @cmd: DCS command
* @data: buffer containing the command payload
* @len: command payload length
*
* This function will automatically choose the right data type depending on
* the command payload length.
*
* Return: The number of bytes successfully transmitted or a negative error
* code on failure.
*/
ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
const void *data, size_t len)
{
ssize_t err;
size_t size;
u8 stack_tx[8];
u8 *tx;
size = 1 + len;
if (len > ARRAY_SIZE(stack_tx) - 1) {
tx = kmalloc(size, GFP_KERNEL);
if (!tx)
return -ENOMEM;
} else {
tx = stack_tx;
}
/* concatenate the DCS command byte and the payload */
tx[0] = cmd;
if (data)
memcpy(&tx[1], data, len);
err = mipi_dsi_dcs_write_buffer(dsi, tx, size);
if (tx != stack_tx)
kfree(tx);
return err;
}
EXPORT_SYMBOL(mipi_dsi_dcs_write);
/**
* mipi_dsi_dcs_read() - send DCS read request command
* @dsi: DSI peripheral device
* @cmd: DCS command
* @data: buffer in which to receive data
* @len: size of receive buffer
*
* Return: The number of bytes read or a negative error code on failure.
*/
ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
size_t len)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_DCS_READ,
.tx_buf = &cmd,
.tx_len = 1,
.rx_buf = data,
.rx_len = len
};
return mipi_dsi_device_transfer(dsi, &msg);
}
EXPORT_SYMBOL(mipi_dsi_dcs_read);
/**
* mipi_dsi_dcs_nop() - send DCS nop packet
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi)
{
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_NOP, NULL, 0);
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_nop);
/**
* mipi_dsi_dcs_soft_reset() - perform a software reset of the display module
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi)
{
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SOFT_RESET, NULL, 0);
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_soft_reset);
/**
* mipi_dsi_dcs_get_power_mode() - query the display module's current power
* mode
* @dsi: DSI peripheral device
* @mode: return location for the current power mode
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode)
{
ssize_t err;
err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_POWER_MODE, mode,
sizeof(*mode));
if (err <= 0) {
if (err == 0)
err = -ENODATA;
return err;
}
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_get_power_mode);
/**
* mipi_dsi_dcs_get_pixel_format() - gets the pixel format for the RGB image
* data used by the interface
* @dsi: DSI peripheral device
* @format: return location for the pixel format
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format)
{
ssize_t err;
err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_PIXEL_FORMAT, format,
sizeof(*format));
if (err <= 0) {
if (err == 0)
err = -ENODATA;
return err;
}
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_get_pixel_format);
/**
* mipi_dsi_dcs_enter_sleep_mode() - disable all unnecessary blocks inside the
* display module except interface communication
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi)
{
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_enter_sleep_mode);
/**
* mipi_dsi_dcs_exit_sleep_mode() - enable all blocks inside the display
* module
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi)
{
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_exit_sleep_mode);
/**
* mipi_dsi_dcs_set_display_off() - stop displaying the image data on the
* display device
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi)
{
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_display_off);
/**
* mipi_dsi_dcs_set_display_on() - start displaying the image data on the
* display device
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi)
{
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_display_on);
/**
* mipi_dsi_dcs_set_column_address() - define the column extent of the frame
* memory accessed by the host processor
* @dsi: DSI peripheral device
* @start: first column of frame memory
* @end: last column of frame memory
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
u16 end)
{
u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_COLUMN_ADDRESS, payload,
sizeof(payload));
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_column_address);
/**
* mipi_dsi_dcs_set_page_address() - define the page extent of the frame
* memory accessed by the host processor
* @dsi: DSI peripheral device
* @start: first page of frame memory
* @end: last page of frame memory
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
u16 end)
{
u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PAGE_ADDRESS, payload,
sizeof(payload));
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_page_address);
/**
* mipi_dsi_dcs_set_tear_off() - turn off the display module's Tearing Effect
* output signal on the TE signal line
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi)
{
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_OFF, NULL, 0);
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off);
/**
* mipi_dsi_dcs_set_tear_on() - turn on the display module's Tearing Effect
* output signal on the TE signal line.
* @dsi: DSI peripheral device
* @mode: the Tearing Effect Output Line mode
*
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
enum mipi_dsi_dcs_tear_mode mode)
{
u8 value = mode;
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_ON, &value,
sizeof(value));
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
/**
* mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
* data used by the interface
* @dsi: DSI peripheral device
* @format: pixel format
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
{
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PIXEL_FORMAT, &format,
sizeof(format));
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
/**
* mipi_dsi_dcs_set_tear_scanline() - set the scanline to use as trigger for
* the Tearing Effect output signal of the display module
* @dsi: DSI peripheral device
* @scanline: scanline to use as trigger
*
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
{
u8 payload[2] = { scanline >> 8, scanline & 0xff };
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_SCANLINE, payload,
sizeof(payload));
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
/**
* mipi_dsi_dcs_set_display_brightness() - sets the brightness value of the
* display
* @dsi: DSI peripheral device
* @brightness: brightness value
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
u16 brightness)
{
u8 payload[2] = { brightness & 0xff, brightness >> 8 };
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
payload, sizeof(payload));
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness);
/**
* mipi_dsi_dcs_get_display_brightness() - gets the current brightness value
* of the display
* @dsi: DSI peripheral device
* @brightness: brightness value
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
u16 *brightness)
{
ssize_t err;
err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
brightness, sizeof(*brightness));
if (err <= 0) {
if (err == 0)
err = -ENODATA;
return err;
}
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness);
/**
* mipi_dsi_dcs_set_display_brightness_large() - sets the 16-bit brightness value
* of the display
* @dsi: DSI peripheral device
* @brightness: brightness value
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
u16 brightness)
{
u8 payload[2] = { brightness >> 8, brightness & 0xff };
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
payload, sizeof(payload));
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness_large);
/**
* mipi_dsi_dcs_get_display_brightness_large() - gets the current 16-bit
* brightness value of the display
* @dsi: DSI peripheral device
* @brightness: brightness value
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
u16 *brightness)
{
u8 brightness_be[2];
ssize_t err;
err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
brightness_be, sizeof(brightness_be));
if (err <= 0) {
if (err == 0)
err = -ENODATA;
return err;
}
*brightness = (brightness_be[0] << 8) | brightness_be[1];
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness_large);
static int mipi_dsi_drv_probe(struct device *dev)
{
struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
return drv->probe(dsi);
}
static int mipi_dsi_drv_remove(struct device *dev)
{
struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
drv->remove(dsi);
return 0;
}
static void mipi_dsi_drv_shutdown(struct device *dev)
{
struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
drv->shutdown(dsi);
}
/**
* mipi_dsi_driver_register_full() - register a driver for DSI devices
* @drv: DSI driver structure
* @owner: owner module
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_driver_register_full(struct mipi_dsi_driver *drv,
struct module *owner)
{
drv->driver.bus = &mipi_dsi_bus_type;
drv->driver.owner = owner;
if (drv->probe)
drv->driver.probe = mipi_dsi_drv_probe;
if (drv->remove)
drv->driver.remove = mipi_dsi_drv_remove;
if (drv->shutdown)
drv->driver.shutdown = mipi_dsi_drv_shutdown;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL(mipi_dsi_driver_register_full);
/**
* mipi_dsi_driver_unregister() - unregister a driver for DSI devices
* @drv: DSI driver structure
*
* Return: 0 on success or a negative error code on failure.
*/
void mipi_dsi_driver_unregister(struct mipi_dsi_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(mipi_dsi_driver_unregister);
static int __init mipi_dsi_bus_init(void)
{
return bus_register(&mipi_dsi_bus_type);
}
postcore_initcall(mipi_dsi_bus_init);
MODULE_AUTHOR("Andrzej Hajda <[email protected]>");
MODULE_DESCRIPTION("MIPI DSI Bus");
MODULE_LICENSE("GPL and additional rights");
| linux-master | drivers/gpu/drm/drm_mipi_dsi.c |
/*
* Copyright (c) 2016 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <linux/export.h>
#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
/**
* DOC: overview
*
* Encoders represent the connecting element between the CRTC (as the overall
* pixel pipeline, represented by &struct drm_crtc) and the connectors (as the
* generic sink entity, represented by &struct drm_connector). An encoder takes
* pixel data from a CRTC and converts it to a format suitable for any attached
* connector. Encoders are objects exposed to userspace, originally to allow
* userspace to infer cloning and connector/CRTC restrictions. Unfortunately
* almost all drivers get this wrong, making the uabi pretty much useless. On
* top of that the exposed restrictions are too simple for today's hardware, and
* the recommended way to infer restrictions is by using the
* DRM_MODE_ATOMIC_TEST_ONLY flag for the atomic IOCTL.
*
* Otherwise encoders aren't used in the uapi at all (any modeset request from
* userspace directly connects a connector with a CRTC), drivers are therefore
* free to use them however they wish. Modeset helper libraries make strong use
* of encoders to facilitate code sharing. But for more complex settings it is
* usually better to move shared code into a separate &drm_bridge. Compared to
* encoders, bridges also have the benefit of being purely an internal
* abstraction since they are not exposed to userspace at all.
*
* Encoders are initialized with drm_encoder_init() and cleaned up using
* drm_encoder_cleanup().
*/
static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
{ DRM_MODE_ENCODER_NONE, "None" },
{ DRM_MODE_ENCODER_DAC, "DAC" },
{ DRM_MODE_ENCODER_TMDS, "TMDS" },
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
{ DRM_MODE_ENCODER_TVDAC, "TV" },
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
{ DRM_MODE_ENCODER_DSI, "DSI" },
{ DRM_MODE_ENCODER_DPMST, "DP MST" },
{ DRM_MODE_ENCODER_DPI, "DPI" },
};
int drm_encoder_register_all(struct drm_device *dev)
{
struct drm_encoder *encoder;
int ret = 0;
drm_for_each_encoder(encoder, dev) {
if (encoder->funcs && encoder->funcs->late_register)
ret = encoder->funcs->late_register(encoder);
if (ret)
return ret;
}
return 0;
}
void drm_encoder_unregister_all(struct drm_device *dev)
{
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, dev) {
if (encoder->funcs && encoder->funcs->early_unregister)
encoder->funcs->early_unregister(encoder);
}
}
__printf(5, 0)
static int __drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, va_list ap)
{
int ret;
/* encoder index is used with 32bit bitmasks */
if (WARN_ON(dev->mode_config.num_encoder >= 32))
return -EINVAL;
ret = drm_mode_object_add(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
if (ret)
return ret;
encoder->dev = dev;
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
if (name) {
encoder->name = kvasprintf(GFP_KERNEL, name, ap);
} else {
encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
drm_encoder_enum_list[encoder_type].name,
encoder->base.id);
}
if (!encoder->name) {
ret = -ENOMEM;
goto out_put;
}
INIT_LIST_HEAD(&encoder->bridge_chain);
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
encoder->index = dev->mode_config.num_encoder++;
out_put:
if (ret)
drm_mode_object_unregister(dev, &encoder->base);
return ret;
}
/**
* drm_encoder_init - Init a preallocated encoder
* @dev: drm device
* @encoder: the encoder to init
* @funcs: callbacks for this encoder
* @encoder_type: user visible type of the encoder
* @name: printf style format string for the encoder name, or NULL for default name
*
* Initializes a preallocated encoder. Encoder should be subclassed as part of
* driver encoder objects. At driver unload time the driver's
* &drm_encoder_funcs.destroy hook should call drm_encoder_cleanup() and kfree()
* the encoder structure. The encoder structure should not be allocated with
* devm_kzalloc().
*
* Note: consider using drmm_encoder_alloc() or drmm_encoder_init()
* instead of drm_encoder_init() to let the DRM managed resource
* infrastructure take care of cleanup and deallocation.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...)
{
va_list ap;
int ret;
WARN_ON(!funcs->destroy);
va_start(ap, name);
ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
va_end(ap);
return ret;
}
EXPORT_SYMBOL(drm_encoder_init);
/**
* drm_encoder_cleanup - cleans up an initialised encoder
* @encoder: encoder to cleanup
*
* Cleans up the encoder but doesn't free the object.
*/
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_bridge *bridge, *next;
/* Note that the encoder_list is considered to be static; should we
* remove the drm_encoder at runtime we would have to decrement all
* the indices on the drm_encoder after us in the encoder_list.
*/
list_for_each_entry_safe(bridge, next, &encoder->bridge_chain,
chain_node)
drm_bridge_detach(bridge);
drm_mode_object_unregister(dev, &encoder->base);
kfree(encoder->name);
list_del(&encoder->head);
dev->mode_config.num_encoder--;
memset(encoder, 0, sizeof(*encoder));
}
EXPORT_SYMBOL(drm_encoder_cleanup);
static void drmm_encoder_alloc_release(struct drm_device *dev, void *ptr)
{
struct drm_encoder *encoder = ptr;
if (WARN_ON(!encoder->dev))
return;
drm_encoder_cleanup(encoder);
}
__printf(5, 0)
static int __drmm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type,
const char *name,
va_list args)
{
int ret;
if (drm_WARN_ON(dev, funcs && funcs->destroy))
return -EINVAL;
ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, args);
if (ret)
return ret;
ret = drmm_add_action_or_reset(dev, drmm_encoder_alloc_release, encoder);
if (ret)
return ret;
return 0;
}
void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...)
{
void *container;
struct drm_encoder *encoder;
va_list ap;
int ret;
container = drmm_kzalloc(dev, size, GFP_KERNEL);
if (!container)
return ERR_PTR(-ENOMEM);
encoder = container + offset;
va_start(ap, name);
ret = __drmm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
va_end(ap);
if (ret)
return ERR_PTR(ret);
return container;
}
EXPORT_SYMBOL(__drmm_encoder_alloc);
/**
* drmm_encoder_init - Initialize a preallocated encoder
* @dev: drm device
* @encoder: the encoder to init
* @funcs: callbacks for this encoder (optional)
* @encoder_type: user visible type of the encoder
* @name: printf style format string for the encoder name, or NULL for default name
*
* Initializes a preallocated encoder. Encoder should be subclassed as
* part of driver encoder objects. Cleanup is automatically handled
* through registering drm_encoder_cleanup() with drmm_add_action(). The
* encoder structure should be allocated with drmm_kzalloc().
*
* The @drm_encoder_funcs.destroy hook must be NULL.
*
* Returns:
* Zero on success, error code on failure.
*/
int drmm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...)
{
va_list ap;
int ret;
va_start(ap, name);
ret = __drmm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
va_end(ap);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL(drmm_encoder_init);
static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
bool uses_atomic = false;
struct drm_connector_list_iter conn_iter;
/* For atomic drivers only state objects are synchronously updated and
* protected by modeset locks, so check those first. */
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (!connector->state)
continue;
uses_atomic = true;
if (connector->state->best_encoder != encoder)
continue;
drm_connector_list_iter_end(&conn_iter);
return connector->state->crtc;
}
drm_connector_list_iter_end(&conn_iter);
/* Don't return stale data (e.g. pending async disable). */
if (uses_atomic)
return NULL;
return encoder->crtc;
}
int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_encoder *enc_resp = data;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
encoder = drm_encoder_find(dev, file_priv, enc_resp->encoder_id);
if (!encoder)
return -ENOENT;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
crtc = drm_encoder_get_crtc(encoder);
if (crtc && drm_lease_held(file_priv, crtc->base.id))
enc_resp->crtc_id = crtc->base.id;
else
enc_resp->crtc_id = 0;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
enc_resp->encoder_type = encoder->encoder_type;
enc_resp->encoder_id = encoder->base.id;
enc_resp->possible_crtcs = drm_lease_filter_crtcs(file_priv,
encoder->possible_crtcs);
enc_resp->possible_clones = encoder->possible_clones;
return 0;
}
| linux-master | drivers/gpu/drm/drm_encoder.c |
/*
* Copyright (C) 2014 Red Hat
* Copyright (C) 2014 Intel Corp.
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rob Clark <[email protected]>
* Daniel Vetter <[email protected]>
*/
#include <linux/sync_file.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
#include <drm/drm_writeback.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
void __drm_crtc_commit_free(struct kref *kref)
{
struct drm_crtc_commit *commit =
container_of(kref, struct drm_crtc_commit, ref);
kfree(commit);
}
EXPORT_SYMBOL(__drm_crtc_commit_free);
/**
* drm_crtc_commit_wait - Waits for a commit to complete
* @commit: &drm_crtc_commit to wait for
*
* Waits for a given &drm_crtc_commit to be programmed into the
* hardware and flipped to.
*
* Returns:
*
* 0 on success, a negative error code otherwise.
*/
int drm_crtc_commit_wait(struct drm_crtc_commit *commit)
{
unsigned long timeout = 10 * HZ;
int ret;
if (!commit)
return 0;
ret = wait_for_completion_timeout(&commit->hw_done, timeout);
if (!ret) {
drm_err(commit->crtc->dev, "hw_done timed out\n");
return -ETIMEDOUT;
}
/*
* Currently no support for overwriting flips, hence
* stall for previous one to execute completely.
*/
ret = wait_for_completion_timeout(&commit->flip_done, timeout);
if (!ret) {
drm_err(commit->crtc->dev, "flip_done timed out\n");
return -ETIMEDOUT;
}
return 0;
}
EXPORT_SYMBOL(drm_crtc_commit_wait);
/**
* drm_atomic_state_default_release -
* release memory initialized by drm_atomic_state_init
* @state: atomic state
*
* Free all the memory allocated by drm_atomic_state_init.
* This should only be used by drivers which are still subclassing
* &drm_atomic_state and haven't switched to &drm_private_state yet.
*/
void drm_atomic_state_default_release(struct drm_atomic_state *state)
{
kfree(state->connectors);
kfree(state->crtcs);
kfree(state->planes);
kfree(state->private_objs);
}
EXPORT_SYMBOL(drm_atomic_state_default_release);
/**
* drm_atomic_state_init - init new atomic state
* @dev: DRM device
* @state: atomic state
*
* Default implementation for filling in a new atomic state.
* This should only be used by drivers which are still subclassing
* &drm_atomic_state and haven't switched to &drm_private_state yet.
*/
int
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
{
kref_init(&state->ref);
/* TODO legacy paths should maybe do a better job about
* setting this appropriately?
*/
state->allow_modeset = true;
state->crtcs = kcalloc(dev->mode_config.num_crtc,
sizeof(*state->crtcs), GFP_KERNEL);
if (!state->crtcs)
goto fail;
state->planes = kcalloc(dev->mode_config.num_total_plane,
sizeof(*state->planes), GFP_KERNEL);
if (!state->planes)
goto fail;
/*
* Because drm_atomic_state can be committed asynchronously we need our
* own reference and cannot rely on the on implied by drm_file in the
* ioctl call.
*/
drm_dev_get(dev);
state->dev = dev;
drm_dbg_atomic(dev, "Allocated atomic state %p\n", state);
return 0;
fail:
drm_atomic_state_default_release(state);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_atomic_state_init);
/**
* drm_atomic_state_alloc - allocate atomic state
* @dev: DRM device
*
* This allocates an empty atomic state to track updates.
*/
struct drm_atomic_state *
drm_atomic_state_alloc(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
if (!config->funcs->atomic_state_alloc) {
struct drm_atomic_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
if (drm_atomic_state_init(dev, state) < 0) {
kfree(state);
return NULL;
}
return state;
}
return config->funcs->atomic_state_alloc(dev);
}
EXPORT_SYMBOL(drm_atomic_state_alloc);
/**
* drm_atomic_state_default_clear - clear base atomic state
* @state: atomic state
*
* Default implementation for clearing atomic state.
* This should only be used by drivers which are still subclassing
* &drm_atomic_state and haven't switched to &drm_private_state yet.
*/
void drm_atomic_state_default_clear(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
int i;
drm_dbg_atomic(dev, "Clearing atomic state %p\n", state);
for (i = 0; i < state->num_connector; i++) {
struct drm_connector *connector = state->connectors[i].ptr;
if (!connector)
continue;
connector->funcs->atomic_destroy_state(connector,
state->connectors[i].state);
state->connectors[i].ptr = NULL;
state->connectors[i].state = NULL;
state->connectors[i].old_state = NULL;
state->connectors[i].new_state = NULL;
drm_connector_put(connector);
}
for (i = 0; i < config->num_crtc; i++) {
struct drm_crtc *crtc = state->crtcs[i].ptr;
if (!crtc)
continue;
crtc->funcs->atomic_destroy_state(crtc,
state->crtcs[i].state);
state->crtcs[i].ptr = NULL;
state->crtcs[i].state = NULL;
state->crtcs[i].old_state = NULL;
state->crtcs[i].new_state = NULL;
if (state->crtcs[i].commit) {
drm_crtc_commit_put(state->crtcs[i].commit);
state->crtcs[i].commit = NULL;
}
}
for (i = 0; i < config->num_total_plane; i++) {
struct drm_plane *plane = state->planes[i].ptr;
if (!plane)
continue;
plane->funcs->atomic_destroy_state(plane,
state->planes[i].state);
state->planes[i].ptr = NULL;
state->planes[i].state = NULL;
state->planes[i].old_state = NULL;
state->planes[i].new_state = NULL;
}
for (i = 0; i < state->num_private_objs; i++) {
struct drm_private_obj *obj = state->private_objs[i].ptr;
obj->funcs->atomic_destroy_state(obj,
state->private_objs[i].state);
state->private_objs[i].ptr = NULL;
state->private_objs[i].state = NULL;
state->private_objs[i].old_state = NULL;
state->private_objs[i].new_state = NULL;
}
state->num_private_objs = 0;
if (state->fake_commit) {
drm_crtc_commit_put(state->fake_commit);
state->fake_commit = NULL;
}
}
EXPORT_SYMBOL(drm_atomic_state_default_clear);
/**
* drm_atomic_state_clear - clear state object
* @state: atomic state
*
* When the w/w mutex algorithm detects a deadlock we need to back off and drop
* all locks. So someone else could sneak in and change the current modeset
* configuration. Which means that all the state assembled in @state is no
* longer an atomic update to the current state, but to some arbitrary earlier
* state. Which could break assumptions the driver's
* &drm_mode_config_funcs.atomic_check likely relies on.
*
* Hence we must clear all cached state and completely start over, using this
* function.
*/
void drm_atomic_state_clear(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
if (config->funcs->atomic_state_clear)
config->funcs->atomic_state_clear(state);
else
drm_atomic_state_default_clear(state);
}
EXPORT_SYMBOL(drm_atomic_state_clear);
/**
* __drm_atomic_state_free - free all memory for an atomic state
* @ref: This atomic state to deallocate
*
* This frees all memory associated with an atomic state, including all the
* per-object state for planes, CRTCs and connectors.
*/
void __drm_atomic_state_free(struct kref *ref)
{
struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
drm_atomic_state_clear(state);
drm_dbg_atomic(state->dev, "Freeing atomic state %p\n", state);
if (config->funcs->atomic_state_free) {
config->funcs->atomic_state_free(state);
} else {
drm_atomic_state_default_release(state);
kfree(state);
}
drm_dev_put(dev);
}
EXPORT_SYMBOL(__drm_atomic_state_free);
/**
* drm_atomic_get_crtc_state - get CRTC state
* @state: global atomic state object
* @crtc: CRTC to get state object for
*
* This function returns the CRTC state for the given CRTC, allocating it if
* needed. It will also grab the relevant CRTC lock to make sure that the state
* is consistent.
*
* WARNING: Drivers may only add new CRTC states to a @state if
* drm_atomic_state.allow_modeset is set, or if it's a driver-internal commit
* not created by userspace through an IOCTL call.
*
* Returns:
*
* Either the allocated state or the error code encoded into the pointer. When
* the error is EDEADLK then the w/w mutex code has detected a deadlock and the
* entire atomic sequence must be restarted. All other errors are fatal.
*/
struct drm_crtc_state *
drm_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
int ret, index = drm_crtc_index(crtc);
struct drm_crtc_state *crtc_state;
WARN_ON(!state->acquire_ctx);
crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
if (crtc_state)
return crtc_state;
ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
if (ret)
return ERR_PTR(ret);
crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
if (!crtc_state)
return ERR_PTR(-ENOMEM);
state->crtcs[index].state = crtc_state;
state->crtcs[index].old_state = crtc->state;
state->crtcs[index].new_state = crtc_state;
state->crtcs[index].ptr = crtc;
crtc_state->state = state;
drm_dbg_atomic(state->dev, "Added [CRTC:%d:%s] %p state to %p\n",
crtc->base.id, crtc->name, crtc_state, state);
return crtc_state;
}
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
const struct drm_crtc_state *new_crtc_state)
{
struct drm_crtc *crtc = new_crtc_state->crtc;
/* NOTE: we explicitly don't enforce constraints such as primary
* layer covering entire screen, since that is something we want
* to allow (on hw that supports it). For hw that does not, it
* should be checked in driver's crtc->atomic_check() vfunc.
*
* TODO: Add generic modeset state checks once we support those.
*/
if (new_crtc_state->active && !new_crtc_state->enable) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] active without enabled\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
/* The state->enable vs. state->mode_blob checks can be WARN_ON,
* as this is a kernel-internal detail that userspace should never
* be able to trigger.
*/
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] enabled without mode blob\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] disabled with mode blob\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
/*
* Reject event generation for when a CRTC is off and stays off.
* It wouldn't be hard to implement this, but userspace has a track
* record of happily burning through 100% cpu (or worse, crash) when the
* display pipe is suspended. To avoid all that fun just reject updates
* that ask for events since likely that indicates a bug in the
* compositor's drawing loop. This is consistent with the vblank IOCTL
* and legacy page_flip IOCTL which also reject service on a disabled
* pipe.
*/
if (new_crtc_state->event &&
!new_crtc_state->active && !old_crtc_state->active) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] requesting event but off\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
return 0;
}
static void drm_atomic_crtc_print_state(struct drm_printer *p,
const struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
drm_printf(p, "\tenable=%d\n", state->enable);
drm_printf(p, "\tactive=%d\n", state->active);
drm_printf(p, "\tself_refresh_active=%d\n", state->self_refresh_active);
drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
if (crtc->funcs->atomic_print_state)
crtc->funcs->atomic_print_state(p, state);
}
static int drm_atomic_connector_check(struct drm_connector *connector,
struct drm_connector_state *state)
{
struct drm_crtc_state *crtc_state;
struct drm_writeback_job *writeback_job = state->writeback_job;
const struct drm_display_info *info = &connector->display_info;
state->max_bpc = info->bpc ? info->bpc : 8;
if (connector->max_bpc_property)
state->max_bpc = min(state->max_bpc, state->max_requested_bpc);
if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
return 0;
if (writeback_job->fb && !state->crtc) {
drm_dbg_atomic(connector->dev,
"[CONNECTOR:%d:%s] framebuffer without CRTC\n",
connector->base.id, connector->name);
return -EINVAL;
}
if (state->crtc)
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
if (writeback_job->fb && !crtc_state->active) {
drm_dbg_atomic(connector->dev,
"[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
connector->base.id, connector->name,
state->crtc->base.id);
return -EINVAL;
}
if (!writeback_job->fb) {
if (writeback_job->out_fence) {
drm_dbg_atomic(connector->dev,
"[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
connector->base.id, connector->name);
return -EINVAL;
}
drm_writeback_cleanup_job(writeback_job);
state->writeback_job = NULL;
}
return 0;
}
/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
* @plane: plane to get state object for
*
* This function returns the plane state for the given plane, allocating it if
* needed. It will also grab the relevant plane lock to make sure that the state
* is consistent.
*
* Returns:
*
* Either the allocated state or the error code encoded into the pointer. When
* the error is EDEADLK then the w/w mutex code has detected a deadlock and the
* entire atomic sequence must be restarted. All other errors are fatal.
*/
struct drm_plane_state *
drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane)
{
int ret, index = drm_plane_index(plane);
struct drm_plane_state *plane_state;
WARN_ON(!state->acquire_ctx);
/* the legacy pointers should never be set */
WARN_ON(plane->fb);
WARN_ON(plane->old_fb);
WARN_ON(plane->crtc);
plane_state = drm_atomic_get_existing_plane_state(state, plane);
if (plane_state)
return plane_state;
ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
if (ret)
return ERR_PTR(ret);
plane_state = plane->funcs->atomic_duplicate_state(plane);
if (!plane_state)
return ERR_PTR(-ENOMEM);
state->planes[index].state = plane_state;
state->planes[index].ptr = plane;
state->planes[index].old_state = plane->state;
state->planes[index].new_state = plane_state;
plane_state->state = state;
drm_dbg_atomic(plane->dev, "Added [PLANE:%d:%s] %p state to %p\n",
plane->base.id, plane->name, plane_state, state);
if (plane_state->crtc) {
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_crtc_state(state,
plane_state->crtc);
if (IS_ERR(crtc_state))
return ERR_CAST(crtc_state);
}
return plane_state;
}
EXPORT_SYMBOL(drm_atomic_get_plane_state);
static bool
plane_switching_crtc(const struct drm_plane_state *old_plane_state,
const struct drm_plane_state *new_plane_state)
{
if (!old_plane_state->crtc || !new_plane_state->crtc)
return false;
if (old_plane_state->crtc == new_plane_state->crtc)
return false;
/* This could be refined, but currently there's no helper or driver code
* to implement direct switching of active planes nor userspace to take
* advantage of more direct plane switching without the intermediate
* full OFF state.
*/
return true;
}
/**
* drm_atomic_plane_check - check plane state
* @old_plane_state: old plane state to check
* @new_plane_state: new plane state to check
*
* Provides core sanity checks for plane state.
*
* RETURNS:
* Zero on success, error code on failure
*/
static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
const struct drm_plane_state *new_plane_state)
{
struct drm_plane *plane = new_plane_state->plane;
struct drm_crtc *crtc = new_plane_state->crtc;
const struct drm_framebuffer *fb = new_plane_state->fb;
unsigned int fb_width, fb_height;
struct drm_mode_rect *clips;
uint32_t num_clips;
int ret;
/* either *both* CRTC and FB must be set, or neither */
if (crtc && !fb) {
drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] CRTC set but no FB\n",
plane->base.id, plane->name);
return -EINVAL;
} else if (fb && !crtc) {
drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] FB set but no CRTC\n",
plane->base.id, plane->name);
return -EINVAL;
}
/* if disabled, we don't care about the rest of the state: */
if (!crtc)
return 0;
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
drm_dbg_atomic(plane->dev,
"Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
crtc->base.id, crtc->name,
plane->base.id, plane->name);
return -EINVAL;
}
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, fb->format->format,
fb->modifier);
if (ret) {
drm_dbg_atomic(plane->dev,
"[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n",
plane->base.id, plane->name,
&fb->format->format, fb->modifier);
return ret;
}
/* Give drivers some help against integer overflows */
if (new_plane_state->crtc_w > INT_MAX ||
new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w ||
new_plane_state->crtc_h > INT_MAX ||
new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) {
drm_dbg_atomic(plane->dev,
"[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
plane->base.id, plane->name,
new_plane_state->crtc_w, new_plane_state->crtc_h,
new_plane_state->crtc_x, new_plane_state->crtc_y);
return -ERANGE;
}
fb_width = fb->width << 16;
fb_height = fb->height << 16;
/* Make sure source coordinates are inside the fb. */
if (new_plane_state->src_w > fb_width ||
new_plane_state->src_x > fb_width - new_plane_state->src_w ||
new_plane_state->src_h > fb_height ||
new_plane_state->src_y > fb_height - new_plane_state->src_h) {
drm_dbg_atomic(plane->dev,
"[PLANE:%d:%s] invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
plane->base.id, plane->name,
new_plane_state->src_w >> 16,
((new_plane_state->src_w & 0xffff) * 15625) >> 10,
new_plane_state->src_h >> 16,
((new_plane_state->src_h & 0xffff) * 15625) >> 10,
new_plane_state->src_x >> 16,
((new_plane_state->src_x & 0xffff) * 15625) >> 10,
new_plane_state->src_y >> 16,
((new_plane_state->src_y & 0xffff) * 15625) >> 10,
fb->width, fb->height);
return -ENOSPC;
}
clips = __drm_plane_get_damage_clips(new_plane_state);
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
/* Make sure damage clips are valid and inside the fb. */
while (num_clips > 0) {
if (clips->x1 >= clips->x2 ||
clips->y1 >= clips->y2 ||
clips->x1 < 0 ||
clips->y1 < 0 ||
clips->x2 > fb_width ||
clips->y2 > fb_height) {
drm_dbg_atomic(plane->dev,
"[PLANE:%d:%s] invalid damage clip %d %d %d %d\n",
plane->base.id, plane->name, clips->x1,
clips->y1, clips->x2, clips->y2);
return -EINVAL;
}
clips++;
num_clips--;
}
if (plane_switching_crtc(old_plane_state, new_plane_state)) {
drm_dbg_atomic(plane->dev,
"[PLANE:%d:%s] switching CRTC directly\n",
plane->base.id, plane->name);
return -EINVAL;
}
return 0;
}
static void drm_atomic_plane_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
struct drm_plane *plane = state->plane;
struct drm_rect src = drm_plane_state_src(state);
struct drm_rect dest = drm_plane_state_dest(state);
drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
if (state->fb)
drm_framebuffer_print_info(p, 2, state->fb);
drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
drm_printf(p, "\trotation=%x\n", state->rotation);
drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
drm_printf(p, "\tcolor-encoding=%s\n",
drm_get_color_encoding_name(state->color_encoding));
drm_printf(p, "\tcolor-range=%s\n",
drm_get_color_range_name(state->color_range));
if (plane->funcs->atomic_print_state)
plane->funcs->atomic_print_state(p, state);
}
/**
* DOC: handling driver private state
*
* Very often the DRM objects exposed to userspace in the atomic modeset api
* (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the
* underlying hardware. Especially for any kind of shared resources (e.g. shared
* clocks, scaler units, bandwidth and fifo limits shared among a group of
* planes or CRTCs, and so on) it makes sense to model these as independent
* objects. Drivers then need to do similar state tracking and commit ordering for
* such private (since not exposed to userspace) objects as the atomic core and
* helpers already provide for connectors, planes and CRTCs.
*
* To make this easier on drivers the atomic core provides some support to track
* driver private state objects using struct &drm_private_obj, with the
* associated state struct &drm_private_state.
*
* Similar to userspace-exposed objects, private state structures can be
* acquired by calling drm_atomic_get_private_obj_state(). This also takes care
* of locking, hence drivers should not have a need to call drm_modeset_lock()
* directly. Sequence of the actual hardware state commit is not handled,
* drivers might need to keep track of struct drm_crtc_commit within subclassed
* structure of &drm_private_state as necessary, e.g. similar to
* &drm_plane_state.commit. See also &drm_atomic_state.fake_commit.
*
* All private state structures contained in a &drm_atomic_state update can be
* iterated using for_each_oldnew_private_obj_in_state(),
* for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state().
* Drivers are recommended to wrap these for each type of driver private state
* object they have, filtering on &drm_private_obj.funcs using for_each_if(), at
* least if they want to iterate over all objects of a given type.
*
* An earlier way to handle driver private state was by subclassing struct
* &drm_atomic_state. But since that encourages non-standard ways to implement
* the check/commit split atomic requires (by using e.g. "check and rollback or
* commit instead" of "duplicate state, check, then either commit or release
* duplicated state) it is deprecated in favour of using &drm_private_state.
*/
/**
* drm_atomic_private_obj_init - initialize private object
* @dev: DRM device this object will be attached to
* @obj: private object
* @state: initial private object state
* @funcs: pointer to the struct of function pointers that identify the object
* type
*
* Initialize the private object, which can be embedded into any
* driver private object that needs its own atomic state.
*/
void
drm_atomic_private_obj_init(struct drm_device *dev,
struct drm_private_obj *obj,
struct drm_private_state *state,
const struct drm_private_state_funcs *funcs)
{
memset(obj, 0, sizeof(*obj));
drm_modeset_lock_init(&obj->lock);
obj->state = state;
obj->funcs = funcs;
list_add_tail(&obj->head, &dev->mode_config.privobj_list);
state->obj = obj;
}
EXPORT_SYMBOL(drm_atomic_private_obj_init);
/**
* drm_atomic_private_obj_fini - finalize private object
* @obj: private object
*
* Finalize the private object.
*/
void
drm_atomic_private_obj_fini(struct drm_private_obj *obj)
{
list_del(&obj->head);
obj->funcs->atomic_destroy_state(obj, obj->state);
drm_modeset_lock_fini(&obj->lock);
}
EXPORT_SYMBOL(drm_atomic_private_obj_fini);
/**
* drm_atomic_get_private_obj_state - get private object state
* @state: global atomic state
* @obj: private object to get the state for
*
* This function returns the private object state for the given private object,
* allocating the state if needed. It will also grab the relevant private
* object lock to make sure that the state is consistent.
*
* RETURNS:
*
* Either the allocated state or the error code encoded into a pointer.
*/
struct drm_private_state *
drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
struct drm_private_obj *obj)
{
int index, num_objs, i, ret;
size_t size;
struct __drm_private_objs_state *arr;
struct drm_private_state *obj_state;
for (i = 0; i < state->num_private_objs; i++)
if (obj == state->private_objs[i].ptr)
return state->private_objs[i].state;
ret = drm_modeset_lock(&obj->lock, state->acquire_ctx);
if (ret)
return ERR_PTR(ret);
num_objs = state->num_private_objs + 1;
size = sizeof(*state->private_objs) * num_objs;
arr = krealloc(state->private_objs, size, GFP_KERNEL);
if (!arr)
return ERR_PTR(-ENOMEM);
state->private_objs = arr;
index = state->num_private_objs;
memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
obj_state = obj->funcs->atomic_duplicate_state(obj);
if (!obj_state)
return ERR_PTR(-ENOMEM);
state->private_objs[index].state = obj_state;
state->private_objs[index].old_state = obj->state;
state->private_objs[index].new_state = obj_state;
state->private_objs[index].ptr = obj;
obj_state->state = state;
state->num_private_objs = num_objs;
drm_dbg_atomic(state->dev,
"Added new private object %p state %p to %p\n",
obj, obj_state, state);
return obj_state;
}
EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
/**
* drm_atomic_get_old_private_obj_state
* @state: global atomic state object
* @obj: private_obj to grab
*
* This function returns the old private object state for the given private_obj,
* or NULL if the private_obj is not part of the global atomic state.
*/
struct drm_private_state *
drm_atomic_get_old_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj)
{
int i;
for (i = 0; i < state->num_private_objs; i++)
if (obj == state->private_objs[i].ptr)
return state->private_objs[i].old_state;
return NULL;
}
EXPORT_SYMBOL(drm_atomic_get_old_private_obj_state);
/**
* drm_atomic_get_new_private_obj_state
* @state: global atomic state object
* @obj: private_obj to grab
*
* This function returns the new private object state for the given private_obj,
* or NULL if the private_obj is not part of the global atomic state.
*/
struct drm_private_state *
drm_atomic_get_new_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj)
{
int i;
for (i = 0; i < state->num_private_objs; i++)
if (obj == state->private_objs[i].ptr)
return state->private_objs[i].new_state;
return NULL;
}
EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
/**
* drm_atomic_get_old_connector_for_encoder - Get old connector for an encoder
* @state: Atomic state
* @encoder: The encoder to fetch the connector state for
*
* This function finds and returns the connector that was connected to @encoder
* as specified by the @state.
*
* If there is no connector in @state which previously had @encoder connected to
* it, this function will return NULL. While this may seem like an invalid use
* case, it is sometimes useful to differentiate commits which had no prior
* connectors attached to @encoder vs ones that did (and to inspect their
* state). This is especially true in enable hooks because the pipeline has
* changed.
*
* Returns: The old connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
struct drm_connector *
drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_connector_state *conn_state;
struct drm_connector *connector;
unsigned int i;
for_each_old_connector_in_state(state, connector, conn_state, i) {
if (conn_state->best_encoder == encoder)
return connector;
}
return NULL;
}
EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
/**
* drm_atomic_get_new_connector_for_encoder - Get new connector for an encoder
* @state: Atomic state
* @encoder: The encoder to fetch the connector state for
*
* This function finds and returns the connector that will be connected to
* @encoder as specified by the @state.
*
* If there is no connector in @state which will have @encoder connected to it,
* this function will return NULL. While this may seem like an invalid use case,
* it is sometimes useful to differentiate commits which have no connectors
* attached to @encoder vs ones that do (and to inspect their state). This is
* especially true in disable hooks because the pipeline will change.
*
* Returns: The new connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
struct drm_connector *
drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_connector_state *conn_state;
struct drm_connector *connector;
unsigned int i;
for_each_new_connector_in_state(state, connector, conn_state, i) {
if (conn_state->best_encoder == encoder)
return connector;
}
return NULL;
}
EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder);
/**
* drm_atomic_get_old_crtc_for_encoder - Get old crtc for an encoder
* @state: Atomic state
* @encoder: The encoder to fetch the crtc state for
*
* This function finds and returns the crtc that was connected to @encoder
* as specified by the @state.
*
* Returns: The old crtc connected to @encoder, or NULL if the encoder is
* not connected.
*/
struct drm_crtc *
drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct drm_connector_state *conn_state;
connector = drm_atomic_get_old_connector_for_encoder(state, encoder);
if (!connector)
return NULL;
conn_state = drm_atomic_get_old_connector_state(state, connector);
if (!conn_state)
return NULL;
return conn_state->crtc;
}
EXPORT_SYMBOL(drm_atomic_get_old_crtc_for_encoder);
/**
* drm_atomic_get_new_crtc_for_encoder - Get new crtc for an encoder
* @state: Atomic state
* @encoder: The encoder to fetch the crtc state for
*
* This function finds and returns the crtc that will be connected to @encoder
* as specified by the @state.
*
* Returns: The new crtc connected to @encoder, or NULL if the encoder is
* not connected.
*/
struct drm_crtc *
drm_atomic_get_new_crtc_for_encoder(struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct drm_connector_state *conn_state;
connector = drm_atomic_get_new_connector_for_encoder(state, encoder);
if (!connector)
return NULL;
conn_state = drm_atomic_get_new_connector_state(state, connector);
if (!conn_state)
return NULL;
return conn_state->crtc;
}
EXPORT_SYMBOL(drm_atomic_get_new_crtc_for_encoder);
/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
* @connector: connector to get state object for
*
* This function returns the connector state for the given connector,
* allocating it if needed. It will also grab the relevant connector lock to
* make sure that the state is consistent.
*
* Returns:
*
* Either the allocated state or the error code encoded into the pointer. When
* the error is EDEADLK then the w/w mutex code has detected a deadlock and the
* entire atomic sequence must be restarted. All other errors are fatal.
*/
struct drm_connector_state *
drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector)
{
int ret, index;
struct drm_mode_config *config = &connector->dev->mode_config;
struct drm_connector_state *connector_state;
WARN_ON(!state->acquire_ctx);
ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
if (ret)
return ERR_PTR(ret);
index = drm_connector_index(connector);
if (index >= state->num_connector) {
struct __drm_connnectors_state *c;
int alloc = max(index + 1, config->num_connector);
c = krealloc_array(state->connectors, alloc,
sizeof(*state->connectors), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
state->connectors = c;
memset(&state->connectors[state->num_connector], 0,
sizeof(*state->connectors) * (alloc - state->num_connector));
state->num_connector = alloc;
}
if (state->connectors[index].state)
return state->connectors[index].state;
connector_state = connector->funcs->atomic_duplicate_state(connector);
if (!connector_state)
return ERR_PTR(-ENOMEM);
drm_connector_get(connector);
state->connectors[index].state = connector_state;
state->connectors[index].old_state = connector->state;
state->connectors[index].new_state = connector_state;
state->connectors[index].ptr = connector;
connector_state->state = state;
drm_dbg_atomic(connector->dev, "Added [CONNECTOR:%d:%s] %p state to %p\n",
connector->base.id, connector->name,
connector_state, state);
if (connector_state->crtc) {
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_crtc_state(state,
connector_state->crtc);
if (IS_ERR(crtc_state))
return ERR_CAST(crtc_state);
}
return connector_state;
}
EXPORT_SYMBOL(drm_atomic_get_connector_state);
static void drm_atomic_connector_print_state(struct drm_printer *p,
const struct drm_connector_state *state)
{
struct drm_connector *connector = state->connector;
drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
drm_printf(p, "\tcolorspace=%s\n", drm_get_colorspace_name(state->colorspace));
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
if (state->writeback_job && state->writeback_job->fb)
drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
if (connector->funcs->atomic_print_state)
connector->funcs->atomic_print_state(p, state);
}
/**
* drm_atomic_get_bridge_state - get bridge state
* @state: global atomic state object
* @bridge: bridge to get state object for
*
* This function returns the bridge state for the given bridge, allocating it
* if needed. It will also grab the relevant bridge lock to make sure that the
* state is consistent.
*
* Returns:
*
* Either the allocated state or the error code encoded into the pointer. When
* the error is EDEADLK then the w/w mutex code has detected a deadlock and the
* entire atomic sequence must be restarted.
*/
struct drm_bridge_state *
drm_atomic_get_bridge_state(struct drm_atomic_state *state,
struct drm_bridge *bridge)
{
struct drm_private_state *obj_state;
obj_state = drm_atomic_get_private_obj_state(state, &bridge->base);
if (IS_ERR(obj_state))
return ERR_CAST(obj_state);
return drm_priv_to_bridge_state(obj_state);
}
EXPORT_SYMBOL(drm_atomic_get_bridge_state);
/**
* drm_atomic_get_old_bridge_state - get old bridge state, if it exists
* @state: global atomic state object
* @bridge: bridge to grab
*
* This function returns the old bridge state for the given bridge, or NULL if
* the bridge is not part of the global atomic state.
*/
struct drm_bridge_state *
drm_atomic_get_old_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge)
{
struct drm_private_state *obj_state;
obj_state = drm_atomic_get_old_private_obj_state(state, &bridge->base);
if (!obj_state)
return NULL;
return drm_priv_to_bridge_state(obj_state);
}
EXPORT_SYMBOL(drm_atomic_get_old_bridge_state);
/**
* drm_atomic_get_new_bridge_state - get new bridge state, if it exists
* @state: global atomic state object
* @bridge: bridge to grab
*
* This function returns the new bridge state for the given bridge, or NULL if
* the bridge is not part of the global atomic state.
*/
struct drm_bridge_state *
drm_atomic_get_new_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge)
{
struct drm_private_state *obj_state;
obj_state = drm_atomic_get_new_private_obj_state(state, &bridge->base);
if (!obj_state)
return NULL;
return drm_priv_to_bridge_state(obj_state);
}
EXPORT_SYMBOL(drm_atomic_get_new_bridge_state);
/**
* drm_atomic_add_encoder_bridges - add bridges attached to an encoder
* @state: atomic state
* @encoder: DRM encoder
*
* This function adds all bridges attached to @encoder. This is needed to add
* bridge states to @state and make them available when
* &drm_bridge_funcs.atomic_check(), &drm_bridge_funcs.atomic_pre_enable(),
* &drm_bridge_funcs.atomic_enable(),
* &drm_bridge_funcs.atomic_disable_post_disable() are called.
*
* Returns:
* 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
* then the w/w mutex code has detected a deadlock and the entire atomic
* sequence must be restarted. All other errors are fatal.
*/
int
drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_bridge_state *bridge_state;
struct drm_bridge *bridge;
if (!encoder)
return 0;
drm_dbg_atomic(encoder->dev,
"Adding all bridges for [encoder:%d:%s] to %p\n",
encoder->base.id, encoder->name, state);
drm_for_each_bridge_in_chain(encoder, bridge) {
/* Skip bridges that don't implement the atomic state hooks. */
if (!bridge->funcs->atomic_duplicate_state)
continue;
bridge_state = drm_atomic_get_bridge_state(state, bridge);
if (IS_ERR(bridge_state))
return PTR_ERR(bridge_state);
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_add_encoder_bridges);
/**
* drm_atomic_add_affected_connectors - add connectors for CRTC
* @state: atomic state
* @crtc: DRM CRTC
*
* This function walks the current configuration and adds all connectors
* currently using @crtc to the atomic configuration @state. Note that this
* function must acquire the connection mutex. This can potentially cause
* unneeded serialization if the update is just for the planes on one CRTC. Hence
* drivers and helpers should only call this when really needed (e.g. when a
* full modeset needs to happen due to some change).
*
* Returns:
* 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
* then the w/w mutex code has detected a deadlock and the entire atomic
* sequence must be restarted. All other errors are fatal.
*/
int
drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
struct drm_mode_config *config = &state->dev->mode_config;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct drm_connector_list_iter conn_iter;
struct drm_crtc_state *crtc_state;
int ret;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
if (ret)
return ret;
drm_dbg_atomic(crtc->dev,
"Adding all current connectors for [CRTC:%d:%s] to %p\n",
crtc->base.id, crtc->name, state);
/*
* Changed connectors are already in @state, so only need to look
* at the connector_mask in crtc_state.
*/
drm_connector_list_iter_begin(state->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (!(crtc_state->connector_mask & drm_connector_mask(connector)))
continue;
conn_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(conn_state)) {
drm_connector_list_iter_end(&conn_iter);
return PTR_ERR(conn_state);
}
}
drm_connector_list_iter_end(&conn_iter);
return 0;
}
EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
/**
* drm_atomic_add_affected_planes - add planes for CRTC
* @state: atomic state
* @crtc: DRM CRTC
*
* This function walks the current configuration and adds all planes
* currently used by @crtc to the atomic configuration @state. This is useful
* when an atomic commit also needs to check all currently enabled plane on
* @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
* to avoid special code to force-enable all planes.
*
* Since acquiring a plane state will always also acquire the w/w mutex of the
* current CRTC for that plane (if there is any) adding all the plane states for
* a CRTC will not reduce parallelism of atomic updates.
*
* Returns:
* 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
* then the w/w mutex code has detected a deadlock and the entire atomic
* sequence must be restarted. All other errors are fatal.
*/
int
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
const struct drm_crtc_state *old_crtc_state =
drm_atomic_get_old_crtc_state(state, crtc);
struct drm_plane *plane;
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
drm_dbg_atomic(crtc->dev,
"Adding all current planes for [CRTC:%d:%s] to %p\n",
crtc->base.id, crtc->name, state);
drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_add_affected_planes);
/**
* drm_atomic_check_only - check whether a given config would work
* @state: atomic configuration to check
*
* Note that this function can return -EDEADLK if the driver needed to acquire
* more locks but encountered a deadlock. The caller must then do the usual w/w
* backoff dance and restart. All other errors are fatal.
*
* Returns:
* 0 on success, negative error code on failure.
*/
int drm_atomic_check_only(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state;
struct drm_plane_state *new_plane_state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
struct drm_connector *conn;
struct drm_connector_state *conn_state;
unsigned int requested_crtc = 0;
unsigned int affected_crtc = 0;
int i, ret = 0;
drm_dbg_atomic(dev, "checking %p\n", state);
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->enable)
requested_crtc |= drm_crtc_mask(crtc);
}
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
if (ret) {
drm_dbg_atomic(dev, "[PLANE:%d:%s] atomic core check failed\n",
plane->base.id, plane->name);
return ret;
}
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state);
if (ret) {
drm_dbg_atomic(dev, "[CRTC:%d:%s] atomic core check failed\n",
crtc->base.id, crtc->name);
return ret;
}
}
for_each_new_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_connector_check(conn, conn_state);
if (ret) {
drm_dbg_atomic(dev, "[CONNECTOR:%d:%s] atomic core check failed\n",
conn->base.id, conn->name);
return ret;
}
}
if (config->funcs->atomic_check) {
ret = config->funcs->atomic_check(state->dev, state);
if (ret) {
drm_dbg_atomic(dev, "atomic driver check for %p failed: %d\n",
state, ret);
return ret;
}
}
if (!state->allow_modeset) {
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
drm_dbg_atomic(dev, "[CRTC:%d:%s] requires full modeset\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
}
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->enable)
affected_crtc |= drm_crtc_mask(crtc);
}
/*
* For commits that allow modesets drivers can add other CRTCs to the
* atomic commit, e.g. when they need to reallocate global resources.
* This can cause spurious EBUSY, which robs compositors of a very
* effective sanity check for their drawing loop. Therefor only allow
* drivers to add unrelated CRTC states for modeset commits.
*
* FIXME: Should add affected_crtc mask to the ATOMIC IOCTL as an output
* so compositors know what's going on.
*/
if (affected_crtc != requested_crtc) {
drm_dbg_atomic(dev,
"driver added CRTC to commit: requested 0x%x, affected 0x%0x\n",
requested_crtc, affected_crtc);
WARN(!state->allow_modeset, "adding CRTC not allowed without modesets: requested 0x%x, affected 0x%0x\n",
requested_crtc, affected_crtc);
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_check_only);
/**
* drm_atomic_commit - commit configuration atomically
* @state: atomic configuration to check
*
* Note that this function can return -EDEADLK if the driver needed to acquire
* more locks but encountered a deadlock. The caller must then do the usual w/w
* backoff dance and restart. All other errors are fatal.
*
* This function will take its own reference on @state.
* Callers should always release their reference with drm_atomic_state_put().
*
* Returns:
* 0 on success, negative error code on failure.
*/
int drm_atomic_commit(struct drm_atomic_state *state)
{
struct drm_mode_config *config = &state->dev->mode_config;
struct drm_printer p = drm_info_printer(state->dev->dev);
int ret;
if (drm_debug_enabled(DRM_UT_STATE))
drm_atomic_print_new_state(state, &p);
ret = drm_atomic_check_only(state);
if (ret)
return ret;
drm_dbg_atomic(state->dev, "committing %p\n", state);
return config->funcs->atomic_commit(state->dev, state, false);
}
EXPORT_SYMBOL(drm_atomic_commit);
/**
* drm_atomic_nonblocking_commit - atomic nonblocking commit
* @state: atomic configuration to check
*
* Note that this function can return -EDEADLK if the driver needed to acquire
* more locks but encountered a deadlock. The caller must then do the usual w/w
* backoff dance and restart. All other errors are fatal.
*
* This function will take its own reference on @state.
* Callers should always release their reference with drm_atomic_state_put().
*
* Returns:
* 0 on success, negative error code on failure.
*/
int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
{
struct drm_mode_config *config = &state->dev->mode_config;
int ret;
ret = drm_atomic_check_only(state);
if (ret)
return ret;
drm_dbg_atomic(state->dev, "committing %p nonblocking\n", state);
return config->funcs->atomic_commit(state->dev, state, true);
}
EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
/* just used from drm-client and atomic-helper: */
int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
struct drm_plane_state *plane_state)
{
int ret;
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret != 0)
return ret;
drm_atomic_set_fb_for_plane(plane_state, NULL);
plane_state->crtc_x = 0;
plane_state->crtc_y = 0;
plane_state->crtc_w = 0;
plane_state->crtc_h = 0;
plane_state->src_x = 0;
plane_state->src_y = 0;
plane_state->src_w = 0;
plane_state->src_h = 0;
return 0;
}
EXPORT_SYMBOL(__drm_atomic_helper_disable_plane);
static int update_output_state(struct drm_atomic_state *state,
struct drm_mode_set *set)
{
struct drm_device *dev = set->crtc->dev;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int ret, i;
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
state->acquire_ctx);
if (ret)
return ret;
/* First disable all connectors on the target crtc. */
ret = drm_atomic_add_affected_connectors(state, set->crtc);
if (ret)
return ret;
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
if (new_conn_state->crtc == set->crtc) {
ret = drm_atomic_set_crtc_for_connector(new_conn_state,
NULL);
if (ret)
return ret;
/* Make sure legacy setCrtc always re-trains */
new_conn_state->link_status = DRM_LINK_STATUS_GOOD;
}
}
/* Then set all connectors from set->connectors on the target crtc */
for (i = 0; i < set->num_connectors; i++) {
new_conn_state = drm_atomic_get_connector_state(state,
set->connectors[i]);
if (IS_ERR(new_conn_state))
return PTR_ERR(new_conn_state);
ret = drm_atomic_set_crtc_for_connector(new_conn_state,
set->crtc);
if (ret)
return ret;
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
/*
* Don't update ->enable for the CRTC in the set_config request,
* since a mismatch would indicate a bug in the upper layers.
* The actual modeset code later on will catch any
* inconsistencies here.
*/
if (crtc == set->crtc)
continue;
if (!new_crtc_state->connector_mask) {
ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state,
NULL);
if (ret < 0)
return ret;
new_crtc_state->active = false;
}
}
return 0;
}
/* just used from drm-client and atomic-helper: */
int __drm_atomic_helper_set_config(struct drm_mode_set *set,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state;
struct drm_plane_state *primary_state;
struct drm_crtc *crtc = set->crtc;
int hdisplay, vdisplay;
int ret;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
primary_state = drm_atomic_get_plane_state(state, crtc->primary);
if (IS_ERR(primary_state))
return PTR_ERR(primary_state);
if (!set->mode) {
WARN_ON(set->fb);
WARN_ON(set->num_connectors);
ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
if (ret != 0)
return ret;
crtc_state->active = false;
ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
if (ret != 0)
return ret;
drm_atomic_set_fb_for_plane(primary_state, NULL);
goto commit;
}
WARN_ON(!set->fb);
WARN_ON(!set->num_connectors);
ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
if (ret != 0)
return ret;
crtc_state->active = true;
ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
if (ret != 0)
return ret;
drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay);
drm_atomic_set_fb_for_plane(primary_state, set->fb);
primary_state->crtc_x = 0;
primary_state->crtc_y = 0;
primary_state->crtc_w = hdisplay;
primary_state->crtc_h = vdisplay;
primary_state->src_x = set->x << 16;
primary_state->src_y = set->y << 16;
if (drm_rotation_90_or_270(primary_state->rotation)) {
primary_state->src_w = vdisplay << 16;
primary_state->src_h = hdisplay << 16;
} else {
primary_state->src_w = hdisplay << 16;
primary_state->src_h = vdisplay << 16;
}
commit:
ret = update_output_state(state, set);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL(__drm_atomic_helper_set_config);
static void drm_atomic_private_obj_print_state(struct drm_printer *p,
const struct drm_private_state *state)
{
struct drm_private_obj *obj = state->obj;
if (obj->funcs->atomic_print_state)
obj->funcs->atomic_print_state(p, state);
}
/**
* drm_atomic_print_new_state - prints drm atomic state
* @state: atomic configuration to check
* @p: drm printer
*
* This functions prints the drm atomic state snapshot using the drm printer
* which is passed to it. This snapshot can be used for debugging purposes.
*
* Note that this function looks into the new state objects and hence its not
* safe to be used after the call to drm_atomic_helper_commit_hw_done().
*/
void drm_atomic_print_new_state(const struct drm_atomic_state *state,
struct drm_printer *p)
{
struct drm_plane *plane;
struct drm_plane_state *plane_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct drm_private_obj *obj;
struct drm_private_state *obj_state;
int i;
if (!p) {
drm_err(state->dev, "invalid drm printer\n");
return;
}
drm_dbg_atomic(state->dev, "checking %p\n", state);
for_each_new_plane_in_state(state, plane, plane_state, i)
drm_atomic_plane_print_state(p, plane_state);
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
drm_atomic_crtc_print_state(p, crtc_state);
for_each_new_connector_in_state(state, connector, connector_state, i)
drm_atomic_connector_print_state(p, connector_state);
for_each_new_private_obj_in_state(state, obj, obj_state, i)
drm_atomic_private_obj_print_state(p, obj_state);
}
EXPORT_SYMBOL(drm_atomic_print_new_state);
static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
bool take_locks)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
if (!drm_drv_uses_atomic_modeset(dev))
return;
list_for_each_entry(plane, &config->plane_list, head) {
if (take_locks)
drm_modeset_lock(&plane->mutex, NULL);
drm_atomic_plane_print_state(p, plane->state);
if (take_locks)
drm_modeset_unlock(&plane->mutex);
}
list_for_each_entry(crtc, &config->crtc_list, head) {
if (take_locks)
drm_modeset_lock(&crtc->mutex, NULL);
drm_atomic_crtc_print_state(p, crtc->state);
if (take_locks)
drm_modeset_unlock(&crtc->mutex);
}
drm_connector_list_iter_begin(dev, &conn_iter);
if (take_locks)
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
drm_for_each_connector_iter(connector, &conn_iter)
drm_atomic_connector_print_state(p, connector->state);
if (take_locks)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
drm_connector_list_iter_end(&conn_iter);
}
/**
* drm_state_dump - dump entire device atomic state
* @dev: the drm device
* @p: where to print the state to
*
* Just for debugging. Drivers might want an option to dump state
* to dmesg in case of error irq's. (Hint, you probably want to
* ratelimit this!)
*
* The caller must wrap this drm_modeset_lock_all_ctx() and
* drm_modeset_drop_locks(). If this is called from error irq handler, it should
* not be enabled by default - if you are debugging errors you might
* not care that this is racey, but calling this without all modeset locks held
* is inherently unsafe.
*/
void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
{
__drm_state_dump(dev, p, false);
}
EXPORT_SYMBOL(drm_state_dump);
#ifdef CONFIG_DEBUG_FS
static int drm_state_info(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
__drm_state_dump(dev, &p, true);
return 0;
}
/* any use in debugfs files to dump individual planes/crtc/etc? */
static const struct drm_debugfs_info drm_atomic_debugfs_list[] = {
{"state", drm_state_info, 0},
};
void drm_atomic_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_add_files(minor->dev, drm_atomic_debugfs_list,
ARRAY_SIZE(drm_atomic_debugfs_list));
}
#endif
| linux-master | drivers/gpu/drm/drm_atomic.c |
// SPDX-License-Identifier: MIT
#include <uapi/linux/sched/types.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
#include <drm/drm_crtc.h>
#include "drm_internal.h"
/**
* DOC: vblank works
*
* Many DRM drivers need to program hardware in a time-sensitive manner, many
* times with a deadline of starting and finishing within a certain region of
* the scanout. Most of the time the safest way to accomplish this is to
* simply do said time-sensitive programming in the driver's IRQ handler,
* which allows drivers to avoid being preempted during these critical
* regions. Or even better, the hardware may even handle applying such
* time-critical programming independently of the CPU.
*
* While there's a decent amount of hardware that's designed so that the CPU
* doesn't need to be concerned with extremely time-sensitive programming,
* there's a few situations where it can't be helped. Some unforgiving
* hardware may require that certain time-sensitive programming be handled
* completely by the CPU, and said programming may even take too long to
* handle in an IRQ handler. Another such situation would be where the driver
* needs to perform a task that needs to complete within a specific scanout
* period, but might possibly block and thus cannot be handled in an IRQ
* context. Both of these situations can't be solved perfectly in Linux since
* we're not a realtime kernel, and thus the scheduler may cause us to miss
* our deadline if it decides to preempt us. But for some drivers, it's good
* enough if we can lower our chance of being preempted to an absolute
* minimum.
*
* This is where &drm_vblank_work comes in. &drm_vblank_work provides a simple
* generic delayed work implementation which delays work execution until a
* particular vblank has passed, and then executes the work at realtime
* priority. This provides the best possible chance at performing
* time-sensitive hardware programming on time, even when the system is under
* heavy load. &drm_vblank_work also supports rescheduling, so that self
* re-arming work items can be easily implemented.
*/
void drm_handle_vblank_works(struct drm_vblank_crtc *vblank)
{
struct drm_vblank_work *work, *next;
u64 count = atomic64_read(&vblank->count);
bool wake = false;
assert_spin_locked(&vblank->dev->event_lock);
list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
if (!drm_vblank_passed(count, work->count))
continue;
list_del_init(&work->node);
drm_vblank_put(vblank->dev, vblank->pipe);
kthread_queue_work(vblank->worker, &work->base);
wake = true;
}
if (wake)
wake_up_all(&vblank->work_wait_queue);
}
/* Handle cancelling any pending vblank work items and drop respective vblank
* references in response to vblank interrupts being disabled.
*/
void drm_vblank_cancel_pending_works(struct drm_vblank_crtc *vblank)
{
struct drm_vblank_work *work, *next;
assert_spin_locked(&vblank->dev->event_lock);
list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
list_del_init(&work->node);
drm_vblank_put(vblank->dev, vblank->pipe);
}
wake_up_all(&vblank->work_wait_queue);
}
/**
* drm_vblank_work_schedule - schedule a vblank work
* @work: vblank work to schedule
* @count: target vblank count
* @nextonmiss: defer until the next vblank if target vblank was missed
*
* Schedule @work for execution once the crtc vblank count reaches @count.
*
* If the crtc vblank count has already reached @count and @nextonmiss is
* %false the work starts to execute immediately.
*
* If the crtc vblank count has already reached @count and @nextonmiss is
* %true the work is deferred until the next vblank (as if @count has been
* specified as crtc vblank count + 1).
*
* If @work is already scheduled, this function will reschedule said work
* using the new @count. This can be used for self-rearming work items.
*
* Returns:
* %1 if @work was successfully (re)scheduled, %0 if it was either already
* scheduled or cancelled, or a negative error code on failure.
*/
int drm_vblank_work_schedule(struct drm_vblank_work *work,
u64 count, bool nextonmiss)
{
struct drm_vblank_crtc *vblank = work->vblank;
struct drm_device *dev = vblank->dev;
u64 cur_vbl;
unsigned long irqflags;
bool passed, inmodeset, rescheduling = false, wake = false;
int ret = 0;
spin_lock_irqsave(&dev->event_lock, irqflags);
if (work->cancelling)
goto out;
spin_lock(&dev->vbl_lock);
inmodeset = vblank->inmodeset;
spin_unlock(&dev->vbl_lock);
if (inmodeset)
goto out;
if (list_empty(&work->node)) {
ret = drm_vblank_get(dev, vblank->pipe);
if (ret < 0)
goto out;
} else if (work->count == count) {
/* Already scheduled w/ same vbl count */
goto out;
} else {
rescheduling = true;
}
work->count = count;
cur_vbl = drm_vblank_count(dev, vblank->pipe);
passed = drm_vblank_passed(cur_vbl, count);
if (passed)
drm_dbg_core(dev,
"crtc %d vblank %llu already passed (current %llu)\n",
vblank->pipe, count, cur_vbl);
if (!nextonmiss && passed) {
drm_vblank_put(dev, vblank->pipe);
ret = kthread_queue_work(vblank->worker, &work->base);
if (rescheduling) {
list_del_init(&work->node);
wake = true;
}
} else {
if (!rescheduling)
list_add_tail(&work->node, &vblank->pending_work);
ret = true;
}
out:
spin_unlock_irqrestore(&dev->event_lock, irqflags);
if (wake)
wake_up_all(&vblank->work_wait_queue);
return ret;
}
EXPORT_SYMBOL(drm_vblank_work_schedule);
/**
* drm_vblank_work_cancel_sync - cancel a vblank work and wait for it to
* finish executing
* @work: vblank work to cancel
*
* Cancel an already scheduled vblank work and wait for its
* execution to finish.
*
* On return, @work is guaranteed to no longer be scheduled or running, even
* if it's self-arming.
*
* Returns:
* %True if the work was cancelled before it started to execute, %false
* otherwise.
*/
bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work)
{
struct drm_vblank_crtc *vblank = work->vblank;
struct drm_device *dev = vblank->dev;
bool ret = false;
spin_lock_irq(&dev->event_lock);
if (!list_empty(&work->node)) {
list_del_init(&work->node);
drm_vblank_put(vblank->dev, vblank->pipe);
ret = true;
}
work->cancelling++;
spin_unlock_irq(&dev->event_lock);
wake_up_all(&vblank->work_wait_queue);
if (kthread_cancel_work_sync(&work->base))
ret = true;
spin_lock_irq(&dev->event_lock);
work->cancelling--;
spin_unlock_irq(&dev->event_lock);
return ret;
}
EXPORT_SYMBOL(drm_vblank_work_cancel_sync);
/**
* drm_vblank_work_flush - wait for a scheduled vblank work to finish
* executing
* @work: vblank work to flush
*
* Wait until @work has finished executing once.
*/
void drm_vblank_work_flush(struct drm_vblank_work *work)
{
struct drm_vblank_crtc *vblank = work->vblank;
struct drm_device *dev = vblank->dev;
spin_lock_irq(&dev->event_lock);
wait_event_lock_irq(vblank->work_wait_queue, list_empty(&work->node),
dev->event_lock);
spin_unlock_irq(&dev->event_lock);
kthread_flush_work(&work->base);
}
EXPORT_SYMBOL(drm_vblank_work_flush);
/**
* drm_vblank_work_init - initialize a vblank work item
* @work: vblank work item
* @crtc: CRTC whose vblank will trigger the work execution
* @func: work function to be executed
*
* Initialize a vblank work item for a specific crtc.
*/
void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
void (*func)(struct kthread_work *work))
{
kthread_init_work(&work->base, func);
INIT_LIST_HEAD(&work->node);
work->vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
}
EXPORT_SYMBOL(drm_vblank_work_init);
int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
{
struct kthread_worker *worker;
INIT_LIST_HEAD(&vblank->pending_work);
init_waitqueue_head(&vblank->work_wait_queue);
worker = kthread_create_worker(0, "card%d-crtc%d",
vblank->dev->primary->index,
vblank->pipe);
if (IS_ERR(worker))
return PTR_ERR(worker);
vblank->worker = worker;
sched_set_fifo(worker->task);
return 0;
}
| linux-master | drivers/gpu/drm/drm_vblank_work.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drm gem framebuffer helper functions
*
* Copyright (C) 2017 Noralf Trønnes
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include "drm_internal.h"
MODULE_IMPORT_NS(DMA_BUF);
#define AFBC_HEADER_SIZE 16
#define AFBC_TH_LAYOUT_ALIGNMENT 8
#define AFBC_HDR_ALIGN 64
#define AFBC_SUPERBLOCK_PIXELS 256
#define AFBC_SUPERBLOCK_ALIGNMENT 128
#define AFBC_TH_BODY_START_ALIGNMENT 4096
/**
* DOC: overview
*
* This library provides helpers for drivers that don't subclass
* &drm_framebuffer and use &drm_gem_object for their backing storage.
*
* Drivers without additional needs to validate framebuffers can simply use
* drm_gem_fb_create() and everything is wired up automatically. Other drivers
* can use all parts independently.
*/
/**
* drm_gem_fb_get_obj() - Get GEM object backing the framebuffer
* @fb: Framebuffer
* @plane: Plane index
*
* No additional reference is taken beyond the one that the &drm_frambuffer
* already holds.
*
* Returns:
* Pointer to &drm_gem_object for the given framebuffer and plane index or NULL
* if it does not exist.
*/
struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
unsigned int plane)
{
struct drm_device *dev = fb->dev;
if (drm_WARN_ON_ONCE(dev, plane >= ARRAY_SIZE(fb->obj)))
return NULL;
else if (drm_WARN_ON_ONCE(dev, !fb->obj[plane]))
return NULL;
return fb->obj[plane];
}
EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj);
static int
drm_gem_fb_init(struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **obj, unsigned int num_planes,
const struct drm_framebuffer_funcs *funcs)
{
unsigned int i;
int ret;
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
for (i = 0; i < num_planes; i++)
fb->obj[i] = obj[i];
ret = drm_framebuffer_init(dev, fb, funcs);
if (ret)
drm_err(dev, "Failed to init framebuffer: %d\n", ret);
return ret;
}
/**
* drm_gem_fb_destroy - Free GEM backed framebuffer
* @fb: Framebuffer
*
* Frees a GEM backed framebuffer with its backing buffer(s) and the structure
* itself. Drivers can use this as their &drm_framebuffer_funcs->destroy
* callback.
*/
void drm_gem_fb_destroy(struct drm_framebuffer *fb)
{
unsigned int i;
for (i = 0; i < fb->format->num_planes; i++)
drm_gem_object_put(fb->obj[i]);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
EXPORT_SYMBOL(drm_gem_fb_destroy);
/**
* drm_gem_fb_create_handle - Create handle for GEM backed framebuffer
* @fb: Framebuffer
* @file: DRM file to register the handle for
* @handle: Pointer to return the created handle
*
* This function creates a handle for the GEM object backing the framebuffer.
* Drivers can use this as their &drm_framebuffer_funcs->create_handle
* callback. The GETFB IOCTL calls into this callback.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
unsigned int *handle)
{
return drm_gem_handle_create(file, fb->obj[0], handle);
}
EXPORT_SYMBOL(drm_gem_fb_create_handle);
/**
* drm_gem_fb_init_with_funcs() - Helper function for implementing
* &drm_mode_config_funcs.fb_create
* callback in cases when the driver
* allocates a subclass of
* struct drm_framebuffer
* @dev: DRM device
* @fb: framebuffer object
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
*
* This function can be used to set &drm_framebuffer_funcs for drivers that need
* custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to
* change &drm_framebuffer_funcs. The function does buffer size validation.
* The buffer size validation is for a general case, though, so users should
* pay attention to the checks being appropriate for them or, at least,
* non-conflicting.
*
* Returns:
* Zero or a negative error code.
*/
int drm_gem_fb_init_with_funcs(struct drm_device *dev,
struct drm_framebuffer *fb,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
const struct drm_format_info *info;
struct drm_gem_object *objs[DRM_FORMAT_MAX_PLANES];
unsigned int i;
int ret;
info = drm_get_format_info(dev, mode_cmd);
if (!info) {
drm_dbg_kms(dev, "Failed to get FB format info\n");
return -EINVAL;
}
if (drm_drv_uses_atomic_modeset(dev) &&
!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
drm_dbg_kms(dev, "Unsupported pixel format %p4cc / modifier 0x%llx\n",
&mode_cmd->pixel_format, mode_cmd->modifier[0]);
return -EINVAL;
}
for (i = 0; i < info->num_planes; i++) {
unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
unsigned int min_size;
objs[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
if (!objs[i]) {
drm_dbg_kms(dev, "Failed to lookup GEM object\n");
ret = -ENOENT;
goto err_gem_object_put;
}
min_size = (height - 1) * mode_cmd->pitches[i]
+ drm_format_info_min_pitch(info, i, width)
+ mode_cmd->offsets[i];
if (objs[i]->size < min_size) {
drm_dbg_kms(dev,
"GEM object size (%zu) smaller than minimum size (%u) for plane %d\n",
objs[i]->size, min_size, i);
drm_gem_object_put(objs[i]);
ret = -EINVAL;
goto err_gem_object_put;
}
}
ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs);
if (ret)
goto err_gem_object_put;
return 0;
err_gem_object_put:
while (i > 0) {
--i;
drm_gem_object_put(objs[i]);
}
return ret;
}
EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
/**
* drm_gem_fb_create_with_funcs() - Helper function for the
* &drm_mode_config_funcs.fb_create
* callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
*
* This function can be used to set &drm_framebuffer_funcs for drivers that need
* custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to
* change &drm_framebuffer_funcs. The function does buffer size validation.
*
* Returns:
* Pointer to a &drm_framebuffer on success or an error pointer on failure.
*/
struct drm_framebuffer *
drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
struct drm_framebuffer *fb;
int ret;
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (!fb)
return ERR_PTR(-ENOMEM);
ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs);
if (ret) {
kfree(fb);
return ERR_PTR(ret);
}
return fb;
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_funcs);
static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
/**
* drm_gem_fb_create() - Helper function for the
* &drm_mode_config_funcs.fb_create callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
* @mode_cmd: Metadata from the userspace framebuffer creation request
*
* This function creates a new framebuffer object described by
* &drm_mode_fb_cmd2. This description includes handles for the buffer(s)
* backing the framebuffer.
*
* If your hardware has special alignment or pitch requirements these should be
* checked before calling this function. The function does buffer size
* validation. Use drm_gem_fb_create_with_dirty() if you need framebuffer
* flushing.
*
* Drivers can use this as their &drm_mode_config_funcs.fb_create callback.
* The ADDFB2 IOCTL calls into this callback.
*
* Returns:
* Pointer to a &drm_framebuffer on success or an error pointer on failure.
*/
struct drm_framebuffer *
drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
&drm_gem_fb_funcs);
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create);
static const struct drm_framebuffer_funcs drm_gem_fb_funcs_dirtyfb = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
.dirty = drm_atomic_helper_dirtyfb,
};
/**
* drm_gem_fb_create_with_dirty() - Helper function for the
* &drm_mode_config_funcs.fb_create callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
* @mode_cmd: Metadata from the userspace framebuffer creation request
*
* This function creates a new framebuffer object described by
* &drm_mode_fb_cmd2. This description includes handles for the buffer(s)
* backing the framebuffer. drm_atomic_helper_dirtyfb() is used for the dirty
* callback giving framebuffer flushing through the atomic machinery. Use
* drm_gem_fb_create() if you don't need the dirty callback.
* The function does buffer size validation.
*
* Drivers should also call drm_plane_enable_fb_damage_clips() on all planes
* to enable userspace to use damage clips also with the ATOMIC IOCTL.
*
* Drivers can use this as their &drm_mode_config_funcs.fb_create callback.
* The ADDFB2 IOCTL calls into this callback.
*
* Returns:
* Pointer to a &drm_framebuffer on success or an error pointer on failure.
*/
struct drm_framebuffer *
drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
&drm_gem_fb_funcs_dirtyfb);
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
/**
* drm_gem_fb_vmap - maps all framebuffer BOs into kernel address space
* @fb: the framebuffer
* @map: returns the mapping's address for each BO
* @data: returns the data address for each BO, can be NULL
*
* This function maps all buffer objects of the given framebuffer into
* kernel address space and stores them in struct iosys_map. If the
* mapping operation fails for one of the BOs, the function unmaps the
* already established mappings automatically.
*
* Callers that want to access a BO's stored data should pass @data.
* The argument returns the addresses of the data stored in each BO. This
* is different from @map if the framebuffer's offsets field is non-zero.
*
* Both, @map and @data, must each refer to arrays with at least
* fb->format->num_planes elements.
*
* See drm_gem_fb_vunmap() for unmapping.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
struct iosys_map *data)
{
struct drm_gem_object *obj;
unsigned int i;
int ret;
for (i = 0; i < fb->format->num_planes; ++i) {
obj = drm_gem_fb_get_obj(fb, i);
if (!obj) {
ret = -EINVAL;
goto err_drm_gem_vunmap;
}
ret = drm_gem_vmap_unlocked(obj, &map[i]);
if (ret)
goto err_drm_gem_vunmap;
}
if (data) {
for (i = 0; i < fb->format->num_planes; ++i) {
memcpy(&data[i], &map[i], sizeof(data[i]));
if (iosys_map_is_null(&data[i]))
continue;
iosys_map_incr(&data[i], fb->offsets[i]);
}
}
return 0;
err_drm_gem_vunmap:
while (i) {
--i;
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
drm_gem_vunmap_unlocked(obj, &map[i]);
}
return ret;
}
EXPORT_SYMBOL(drm_gem_fb_vmap);
/**
* drm_gem_fb_vunmap - unmaps framebuffer BOs from kernel address space
* @fb: the framebuffer
* @map: mapping addresses as returned by drm_gem_fb_vmap()
*
* This function unmaps all buffer objects of the given framebuffer.
*
* See drm_gem_fb_vmap() for more information.
*/
void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
{
unsigned int i = fb->format->num_planes;
struct drm_gem_object *obj;
while (i) {
--i;
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
if (iosys_map_is_null(&map[i]))
continue;
drm_gem_vunmap_unlocked(obj, &map[i]);
}
}
EXPORT_SYMBOL(drm_gem_fb_vunmap);
static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir,
unsigned int num_planes)
{
struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
int ret;
while (num_planes) {
--num_planes;
obj = drm_gem_fb_get_obj(fb, num_planes);
if (!obj)
continue;
import_attach = obj->import_attach;
if (!import_attach)
continue;
ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
if (ret)
drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n",
ret, num_planes, dir);
}
}
/**
* drm_gem_fb_begin_cpu_access - prepares GEM buffer objects for CPU access
* @fb: the framebuffer
* @dir: access mode
*
* Prepares a framebuffer's GEM buffer objects for CPU access. This function
* must be called before accessing the BO data within the kernel. For imported
* BOs, the function calls dma_buf_begin_cpu_access().
*
* See drm_gem_fb_end_cpu_access() for signalling the end of CPU access.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir)
{
struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
unsigned int i;
int ret;
for (i = 0; i < fb->format->num_planes; ++i) {
obj = drm_gem_fb_get_obj(fb, i);
if (!obj) {
ret = -EINVAL;
goto err___drm_gem_fb_end_cpu_access;
}
import_attach = obj->import_attach;
if (!import_attach)
continue;
ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir);
if (ret)
goto err___drm_gem_fb_end_cpu_access;
}
return 0;
err___drm_gem_fb_end_cpu_access:
__drm_gem_fb_end_cpu_access(fb, dir, i);
return ret;
}
EXPORT_SYMBOL(drm_gem_fb_begin_cpu_access);
/**
* drm_gem_fb_end_cpu_access - signals end of CPU access to GEM buffer objects
* @fb: the framebuffer
* @dir: access mode
*
* Signals the end of CPU access to the given framebuffer's GEM buffer objects. This
* function must be paired with a corresponding call to drm_gem_fb_begin_cpu_access().
* For imported BOs, the function calls dma_buf_end_cpu_access().
*
* See also drm_gem_fb_begin_cpu_access().
*/
void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir)
{
__drm_gem_fb_end_cpu_access(fb, dir, fb->format->num_planes);
}
EXPORT_SYMBOL(drm_gem_fb_end_cpu_access);
// TODO Drop this function and replace by drm_format_info_bpp() once all
// DRM_FORMAT_* provide proper block info in drivers/gpu/drm/drm_fourcc.c
static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct drm_format_info *info;
info = drm_get_format_info(dev, mode_cmd);
switch (info->format) {
case DRM_FORMAT_YUV420_8BIT:
return 12;
case DRM_FORMAT_YUV420_10BIT:
return 15;
case DRM_FORMAT_VUY101010:
return 30;
default:
return drm_format_info_bpp(info, 0);
}
}
static int drm_gem_afbc_min_size(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb)
{
__u32 n_blocks, w_alignment, h_alignment, hdr_alignment;
/* remove bpp when all users properly encode cpp in drm_format_info */
__u32 bpp;
switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
afbc_fb->block_width = 16;
afbc_fb->block_height = 16;
break;
case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8:
afbc_fb->block_width = 32;
afbc_fb->block_height = 8;
break;
/* no user exists yet - fall through */
case AFBC_FORMAT_MOD_BLOCK_SIZE_64x4:
case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4:
default:
drm_dbg_kms(dev, "Invalid AFBC_FORMAT_MOD_BLOCK_SIZE: %lld.\n",
mode_cmd->modifier[0]
& AFBC_FORMAT_MOD_BLOCK_SIZE_MASK);
return -EINVAL;
}
/* tiled header afbc */
w_alignment = afbc_fb->block_width;
h_alignment = afbc_fb->block_height;
hdr_alignment = AFBC_HDR_ALIGN;
if (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_TILED) {
w_alignment *= AFBC_TH_LAYOUT_ALIGNMENT;
h_alignment *= AFBC_TH_LAYOUT_ALIGNMENT;
hdr_alignment = AFBC_TH_BODY_START_ALIGNMENT;
}
afbc_fb->aligned_width = ALIGN(mode_cmd->width, w_alignment);
afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment);
afbc_fb->offset = mode_cmd->offsets[0];
bpp = drm_gem_afbc_get_bpp(dev, mode_cmd);
if (!bpp) {
drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp);
return -EINVAL;
}
n_blocks = (afbc_fb->aligned_width * afbc_fb->aligned_height)
/ AFBC_SUPERBLOCK_PIXELS;
afbc_fb->afbc_size = ALIGN(n_blocks * AFBC_HEADER_SIZE, hdr_alignment);
afbc_fb->afbc_size += n_blocks * ALIGN(bpp * AFBC_SUPERBLOCK_PIXELS / 8,
AFBC_SUPERBLOCK_ALIGNMENT);
return 0;
}
/**
* drm_gem_fb_afbc_init() - Helper function for drivers using afbc to
* fill and validate all the afbc-specific
* struct drm_afbc_framebuffer members
*
* @dev: DRM device
* @afbc_fb: afbc-specific framebuffer
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @afbc_fb: afbc framebuffer
*
* This function can be used by drivers which support afbc to complete
* the preparation of struct drm_afbc_framebuffer. It must be called after
* allocating the said struct and calling drm_gem_fb_init_with_funcs().
* It is caller's responsibility to put afbc_fb->base.obj objects in case
* the call is unsuccessful.
*
* Returns:
* Zero on success or a negative error value on failure.
*/
int drm_gem_fb_afbc_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb)
{
const struct drm_format_info *info;
struct drm_gem_object **objs;
int ret;
objs = afbc_fb->base.obj;
info = drm_get_format_info(dev, mode_cmd);
if (!info)
return -EINVAL;
ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb);
if (ret < 0)
return ret;
if (objs[0]->size < afbc_fb->afbc_size)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(drm_gem_fb_afbc_init);
| linux-master | drivers/gpu/drm/drm_gem_framebuffer_helper.c |
// SPDX-License-Identifier: MIT
#include <linux/aperture.h>
#include <linux/platform_device.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
/**
* DOC: overview
*
* A graphics device might be supported by different drivers, but only one
* driver can be active at any given time. Many systems load a generic
* graphics drivers, such as EFI-GOP or VESA, early during the boot process.
* During later boot stages, they replace the generic driver with a dedicated,
* hardware-specific driver. To take over the device the dedicated driver
* first has to remove the generic driver. DRM aperture functions manage
* ownership of DRM framebuffer memory and hand-over between drivers.
*
* DRM drivers should call drm_aperture_remove_conflicting_framebuffers()
* at the top of their probe function. The function removes any generic
* driver that is currently associated with the given framebuffer memory.
* If the framebuffer is located at PCI BAR 0, the rsp code looks as in the
* example given below.
*
* .. code-block:: c
*
* static const struct drm_driver example_driver = {
* ...
* };
*
* static int remove_conflicting_framebuffers(struct pci_dev *pdev)
* {
* resource_size_t base, size;
* int ret;
*
* base = pci_resource_start(pdev, 0);
* size = pci_resource_len(pdev, 0);
*
* return drm_aperture_remove_conflicting_framebuffers(base, size,
* &example_driver);
* }
*
* static int probe(struct pci_dev *pdev)
* {
* int ret;
*
* // Remove any generic drivers...
* ret = remove_conflicting_framebuffers(pdev);
* if (ret)
* return ret;
*
* // ... and initialize the hardware.
* ...
*
* drm_dev_register();
*
* return 0;
* }
*
* PCI device drivers should call
* drm_aperture_remove_conflicting_pci_framebuffers() and let it detect the
* framebuffer apertures automatically. Device drivers without knowledge of
* the framebuffer's location shall call drm_aperture_remove_framebuffers(),
* which removes all drivers for known framebuffer.
*
* Drivers that are susceptible to being removed by other drivers, such as
* generic EFI or VESA drivers, have to register themselves as owners of their
* given framebuffer memory. Ownership of the framebuffer memory is achieved
* by calling devm_aperture_acquire_from_firmware(). On success, the driver
* is the owner of the framebuffer range. The function fails if the
* framebuffer is already owned by another driver. See below for an example.
*
* .. code-block:: c
*
* static int acquire_framebuffers(struct drm_device *dev, struct platform_device *pdev)
* {
* resource_size_t base, size;
*
* mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
* if (!mem)
* return -EINVAL;
* base = mem->start;
* size = resource_size(mem);
*
* return devm_acquire_aperture_from_firmware(dev, base, size);
* }
*
* static int probe(struct platform_device *pdev)
* {
* struct drm_device *dev;
* int ret;
*
* // ... Initialize the device...
* dev = devm_drm_dev_alloc();
* ...
*
* // ... and acquire ownership of the framebuffer.
* ret = acquire_framebuffers(dev, pdev);
* if (ret)
* return ret;
*
* drm_dev_register(dev, 0);
*
* return 0;
* }
*
* The generic driver is now subject to forced removal by other drivers. This
* only works for platform drivers that support hot unplug.
* When a driver calls drm_aperture_remove_conflicting_framebuffers() et al.
* for the registered framebuffer range, the aperture helpers call
* platform_device_unregister() and the generic driver unloads itself. It
* may not access the device's registers, framebuffer memory, ROM, etc
* afterwards.
*/
/**
* devm_aperture_acquire_from_firmware - Acquires ownership of a firmware framebuffer
* on behalf of a DRM driver.
* @dev: the DRM device to own the framebuffer memory
* @base: the framebuffer's byte offset in physical memory
* @size: the framebuffer size in bytes
*
* Installs the given device as the new owner of the framebuffer. The function
* expects the framebuffer to be provided by a platform device that has been
* set up by firmware. Firmware can be any generic interface, such as EFI,
* VESA, VGA, etc. If the native hardware driver takes over ownership of the
* framebuffer range, the firmware state gets lost. Aperture helpers will then
* unregister the platform device automatically. Acquired apertures are
* released automatically if the underlying device goes away.
*
* The function fails if the framebuffer range, or parts of it, is currently
* owned by another driver. To evict current owners, callers should use
* drm_aperture_remove_conflicting_framebuffers() et al. before calling this
* function. The function also fails if the given device is not a platform
* device.
*
* Returns:
* 0 on success, or a negative errno value otherwise.
*/
int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base,
resource_size_t size)
{
struct platform_device *pdev;
if (drm_WARN_ON(dev, !dev_is_platform(dev->dev)))
return -EINVAL;
pdev = to_platform_device(dev->dev);
return devm_aperture_acquire_for_platform_device(pdev, base, size);
}
EXPORT_SYMBOL(devm_aperture_acquire_from_firmware);
/**
* drm_aperture_remove_conflicting_framebuffers - remove existing framebuffers in the given range
* @base: the aperture's base address in physical memory
* @size: aperture size in bytes
* @req_driver: requesting DRM driver
*
* This function removes graphics device drivers which use the memory range described by
* @base and @size.
*
* Returns:
* 0 on success, or a negative errno code otherwise
*/
int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
const struct drm_driver *req_driver)
{
return aperture_remove_conflicting_devices(base, size, req_driver->name);
}
EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers);
/**
* drm_aperture_remove_conflicting_pci_framebuffers - remove existing framebuffers for PCI devices
* @pdev: PCI device
* @req_driver: requesting DRM driver
*
* This function removes graphics device drivers using the memory range configured
* for any of @pdev's memory bars. The function assumes that a PCI device with
* shadowed ROM drives a primary display and so kicks out vga16fb.
*
* Returns:
* 0 on success, or a negative errno code otherwise
*/
int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
const struct drm_driver *req_driver)
{
return aperture_remove_conflicting_pci_devices(pdev, req_driver->name);
}
EXPORT_SYMBOL(drm_aperture_remove_conflicting_pci_framebuffers);
| linux-master | drivers/gpu/drm/drm_aperture.c |
/*
* Copyright 2003 José Fonseca.
* Copyright 2003 Leif Delgass.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
#include "drm_legacy.h"
#ifdef CONFIG_DRM_LEGACY
/* List of devices hanging off drivers with stealth attach. */
static LIST_HEAD(legacy_dev_list);
static DEFINE_MUTEX(legacy_dev_list_lock);
#endif
static int drm_get_pci_domain(struct drm_device *dev)
{
#ifndef __alpha__
/* For historical reasons, drm_get_pci_domain() is busticated
* on most archs and has to remain so for userspace interface
* < 1.4, except on alpha which was right from the beginning
*/
if (dev->if_version < 0x10004)
return 0;
#endif /* __alpha__ */
return pci_domain_nr(to_pci_dev(dev->dev)->bus);
}
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
pdev->bus->number,
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
if (!master->unique)
return -ENOMEM;
master->unique_len = strlen(master->unique);
return 0;
}
#ifdef CONFIG_DRM_LEGACY
static int drm_legacy_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
(p->busnum & 0xff) != pdev->bus->number ||
p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn))
return -EINVAL;
p->irq = pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
return 0;
}
/**
* drm_legacy_irq_by_busid - Get interrupt from bus ID
* @dev: DRM device
* @data: IOCTL parameter pointing to a drm_irq_busid structure
* @file_priv: DRM file private.
*
* Finds the PCI device with the specified bus id and gets its IRQ number.
* This IOCTL is deprecated, and will now return EINVAL for any busid not equal
* to that of the device that this DRM instance attached to.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_irq_busid *p = data;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
/* UMS was only ever support on PCI devices. */
if (WARN_ON(!dev_is_pci(dev->dev)))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EOPNOTSUPP;
return drm_legacy_pci_irq_by_busid(dev, p);
}
void drm_legacy_pci_agp_destroy(struct drm_device *dev)
{
if (dev->agp) {
arch_phys_wc_del(dev->agp->agp_mtrr);
drm_legacy_agp_clear(dev);
kfree(dev->agp);
dev->agp = NULL;
}
}
static void drm_legacy_pci_agp_init(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP))
dev->agp = drm_legacy_agp_init(dev);
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size *
1024 * 1024);
}
}
}
static int drm_legacy_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
const struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
DRM_DEBUG("\n");
dev = drm_dev_alloc(driver, &pdev->dev);
if (IS_ERR(dev))
return PTR_ERR(dev);
ret = pci_enable_device(pdev);
if (ret)
goto err_free;
#ifdef __alpha__
dev->hose = pdev->sysdata;
#endif
drm_legacy_pci_agp_init(dev);
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
goto err_agp;
if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
mutex_lock(&legacy_dev_list_lock);
list_add_tail(&dev->legacy_dev_list, &legacy_dev_list);
mutex_unlock(&legacy_dev_list_lock);
}
return 0;
err_agp:
drm_legacy_pci_agp_destroy(dev);
pci_disable_device(pdev);
err_free:
drm_dev_put(dev);
return ret;
}
/**
* drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
* @driver: DRM device driver
* @pdriver: PCI device driver
*
* This is only used by legacy dri1 drivers and deprecated.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_legacy_pci_init(const struct drm_driver *driver,
struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
int i;
DRM_DEBUG("\n");
if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY)))
return -EINVAL;
/* If not using KMS, fall back to stealth mode manual scanning. */
for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
pid = &pdriver->id_table[i];
/* Loop around setting up a DRM device for each PCI device
* matching our ID and device class. If we had the internal
* function that pci_get_subsys and pci_get_class used, we'd
* be able to just pass pid in instead of doing a two-stage
* thing.
*/
pdev = NULL;
while ((pdev =
pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
pid->subdevice, pdev)) != NULL) {
if ((pdev->class & pid->class_mask) != pid->class)
continue;
/* stealth mode requires a manual probe */
pci_dev_get(pdev);
drm_legacy_get_pci_dev(pdev, pid, driver);
}
}
return 0;
}
EXPORT_SYMBOL(drm_legacy_pci_init);
/**
* drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
* @driver: DRM device driver
* @pdriver: PCI device driver
*
* Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
* is deprecated and only used by dri1 drivers.
*/
void drm_legacy_pci_exit(const struct drm_driver *driver,
struct pci_driver *pdriver)
{
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
if (!(driver->driver_features & DRIVER_LEGACY)) {
WARN_ON(1);
} else {
mutex_lock(&legacy_dev_list_lock);
list_for_each_entry_safe(dev, tmp, &legacy_dev_list,
legacy_dev_list) {
if (dev->driver == driver) {
list_del(&dev->legacy_dev_list);
drm_put_dev(dev);
}
}
mutex_unlock(&legacy_dev_list_lock);
}
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_legacy_pci_exit);
#endif
| linux-master | drivers/gpu/drm/drm_pci.c |
// SPDX-License-Identifier: MIT
/*
* Copyright (C) 2019 Google, Inc.
*
* Authors:
* Sean Paul <[email protected]>
*/
#include <linux/average.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_modeset_lock.h>
#include <drm/drm_print.h>
#include <drm/drm_self_refresh_helper.h>
/**
* DOC: overview
*
* This helper library provides an easy way for drivers to leverage the atomic
* framework to implement panel self refresh (SR) support. Drivers are
* responsible for initializing and cleaning up the SR helpers on load/unload
* (see &drm_self_refresh_helper_init/&drm_self_refresh_helper_cleanup).
* The connector is responsible for setting
* &drm_connector_state.self_refresh_aware to true at runtime if it is SR-aware
* (meaning it knows how to initiate self refresh on the panel).
*
* Once a crtc has enabled SR using &drm_self_refresh_helper_init, the
* helpers will monitor activity and call back into the driver to enable/disable
* SR as appropriate. The best way to think about this is that it's a DPMS
* on/off request with &drm_crtc_state.self_refresh_active set in crtc state
* that tells you to disable/enable SR on the panel instead of power-cycling it.
*
* During SR, drivers may choose to fully disable their crtc/encoder/bridge
* hardware (in which case no driver changes are necessary), or they can inspect
* &drm_crtc_state.self_refresh_active if they want to enter low power mode
* without full disable (in case full disable/enable is too slow).
*
* SR will be deactivated if there are any atomic updates affecting the
* pipe that is in SR mode. If a crtc is driving multiple connectors, all
* connectors must be SR aware and all will enter/exit SR mode at the same time.
*
* If the crtc and connector are SR aware, but the panel connected does not
* support it (or is otherwise unable to enter SR), the driver should fail
* atomic_check when &drm_crtc_state.self_refresh_active is true.
*/
#define SELF_REFRESH_AVG_SEED_MS 200
DECLARE_EWMA(psr_time, 4, 4)
struct drm_self_refresh_data {
struct drm_crtc *crtc;
struct delayed_work entry_work;
struct mutex avg_mutex;
struct ewma_psr_time entry_avg_ms;
struct ewma_psr_time exit_avg_ms;
};
static void drm_self_refresh_helper_entry_work(struct work_struct *work)
{
struct drm_self_refresh_data *sr_data = container_of(
to_delayed_work(work),
struct drm_self_refresh_data, entry_work);
struct drm_crtc *crtc = sr_data->crtc;
struct drm_device *dev = crtc->dev;
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
struct drm_connector *conn;
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
int i, ret = 0;
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(dev);
if (!state) {
ret = -ENOMEM;
goto out_drop_locks;
}
retry:
state->acquire_ctx = &ctx;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto out;
}
if (!crtc_state->enable)
goto out;
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
goto out;
for_each_new_connector_in_state(state, conn, conn_state, i) {
if (!conn_state->self_refresh_aware)
goto out;
}
crtc_state->active = false;
crtc_state->self_refresh_active = true;
ret = drm_atomic_commit(state);
if (ret)
goto out;
out:
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
ret = drm_modeset_backoff(&ctx);
if (!ret)
goto retry;
}
drm_atomic_state_put(state);
out_drop_locks:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
/**
* drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages
* @state: the state which has just been applied to hardware
* @commit_time_ms: the amount of time in ms that this commit took to complete
* @new_self_refresh_mask: bitmask of crtc's that have self_refresh_active in
* new state
*
* Called after &drm_mode_config_funcs.atomic_commit_tail, this function will
* update the average entry/exit self refresh times on self refresh transitions.
* These averages will be used when calculating how long to delay before
* entering self refresh mode after activity.
*/
void
drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
unsigned int commit_time_ms,
unsigned int new_self_refresh_mask)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
bool new_self_refresh_active = new_self_refresh_mask & BIT(i);
struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
struct ewma_psr_time *time;
if (old_crtc_state->self_refresh_active ==
new_self_refresh_active)
continue;
if (new_self_refresh_active)
time = &sr_data->entry_avg_ms;
else
time = &sr_data->exit_avg_ms;
mutex_lock(&sr_data->avg_mutex);
ewma_psr_time_add(time, commit_time_ms);
mutex_unlock(&sr_data->avg_mutex);
}
}
EXPORT_SYMBOL(drm_self_refresh_helper_update_avg_times);
/**
* drm_self_refresh_helper_alter_state - Alters the atomic state for SR exit
* @state: the state currently being checked
*
* Called at the end of atomic check. This function checks the state for flags
* incompatible with self refresh exit and changes them. This is a bit
* disingenuous since userspace is expecting one thing and we're giving it
* another. However in order to keep self refresh entirely hidden from
* userspace, this is required.
*
* At the end, we queue up the self refresh entry work so we can enter PSR after
* the desired delay.
*/
void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i;
if (state->async_update || !state->allow_modeset) {
for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
if (crtc_state->self_refresh_active) {
state->async_update = false;
state->allow_modeset = true;
break;
}
}
}
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct drm_self_refresh_data *sr_data;
unsigned int delay;
/* Don't trigger the entry timer when we're already in SR */
if (crtc_state->self_refresh_active)
continue;
sr_data = crtc->self_refresh_data;
if (!sr_data)
continue;
mutex_lock(&sr_data->avg_mutex);
delay = (ewma_psr_time_read(&sr_data->entry_avg_ms) +
ewma_psr_time_read(&sr_data->exit_avg_ms)) * 2;
mutex_unlock(&sr_data->avg_mutex);
mod_delayed_work(system_wq, &sr_data->entry_work,
msecs_to_jiffies(delay));
}
}
EXPORT_SYMBOL(drm_self_refresh_helper_alter_state);
/**
* drm_self_refresh_helper_init - Initializes self refresh helpers for a crtc
* @crtc: the crtc which supports self refresh supported displays
*
* Returns zero if successful or -errno on failure
*/
int drm_self_refresh_helper_init(struct drm_crtc *crtc)
{
struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
/* Helper is already initialized */
if (WARN_ON(sr_data))
return -EINVAL;
sr_data = kzalloc(sizeof(*sr_data), GFP_KERNEL);
if (!sr_data)
return -ENOMEM;
INIT_DELAYED_WORK(&sr_data->entry_work,
drm_self_refresh_helper_entry_work);
sr_data->crtc = crtc;
mutex_init(&sr_data->avg_mutex);
ewma_psr_time_init(&sr_data->entry_avg_ms);
ewma_psr_time_init(&sr_data->exit_avg_ms);
/*
* Seed the averages so they're non-zero (and sufficiently large
* for even poorly performing panels). As time goes on, this will be
* averaged out and the values will trend to their true value.
*/
ewma_psr_time_add(&sr_data->entry_avg_ms, SELF_REFRESH_AVG_SEED_MS);
ewma_psr_time_add(&sr_data->exit_avg_ms, SELF_REFRESH_AVG_SEED_MS);
crtc->self_refresh_data = sr_data;
return 0;
}
EXPORT_SYMBOL(drm_self_refresh_helper_init);
/**
* drm_self_refresh_helper_cleanup - Cleans up self refresh helpers for a crtc
* @crtc: the crtc to cleanup
*/
void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc)
{
struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
/* Helper is already uninitialized */
if (!sr_data)
return;
crtc->self_refresh_data = NULL;
cancel_delayed_work_sync(&sr_data->entry_work);
kfree(sr_data);
}
EXPORT_SYMBOL(drm_self_refresh_helper_cleanup);
| linux-master | drivers/gpu/drm/drm_self_refresh_helper.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
drm_edid_load.c: use a built-in EDID data set or load it via the firmware
interface
Copyright (C) 2012 Carsten Emde <[email protected]>
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
static char edid_firmware[PATH_MAX];
module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
"from built-in data or /lib/firmware instead. ");
/* Use only for backward compatibility with drm_kms_helper.edid_firmware */
int __drm_set_edid_firmware_path(const char *path)
{
scnprintf(edid_firmware, sizeof(edid_firmware), "%s", path);
return 0;
}
EXPORT_SYMBOL(__drm_set_edid_firmware_path);
/* Use only for backward compatibility with drm_kms_helper.edid_firmware */
int __drm_get_edid_firmware_path(char *buf, size_t bufsize)
{
return scnprintf(buf, bufsize, "%s", edid_firmware);
}
EXPORT_SYMBOL(__drm_get_edid_firmware_path);
#define GENERIC_EDIDS 6
static const char * const generic_edid_name[GENERIC_EDIDS] = {
"edid/800x600.bin",
"edid/1024x768.bin",
"edid/1280x1024.bin",
"edid/1600x1200.bin",
"edid/1680x1050.bin",
"edid/1920x1080.bin",
};
static const u8 generic_edid[GENERIC_EDIDS][128] = {
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x1b, 0x14, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x01, 0x00, 0x00, 0x45, 0x40,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xa0, 0x0f,
0x20, 0x00, 0x31, 0x58, 0x1c, 0x20, 0x28, 0x80,
0x14, 0x00, 0x15, 0xd0, 0x10, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
0x3d, 0x24, 0x26, 0x05, 0x00, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
0x56, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xc2,
},
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x23, 0x1a, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x00, 0x08, 0x00, 0x61, 0x40,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x64, 0x19,
0x00, 0x40, 0x41, 0x00, 0x26, 0x30, 0x08, 0x90,
0x36, 0x00, 0x63, 0x0a, 0x11, 0x00, 0x00, 0x18,
0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
0x3d, 0x2f, 0x31, 0x07, 0x00, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x58,
0x47, 0x41, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x55,
},
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x2c, 0x23, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0x81, 0x80,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x30, 0x2a,
0x00, 0x98, 0x51, 0x00, 0x2a, 0x40, 0x30, 0x70,
0x13, 0x00, 0xbc, 0x63, 0x11, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
0x3d, 0x3e, 0x40, 0x0b, 0x00, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xa0,
},
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x37, 0x29, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xa9, 0x40,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x48, 0x3f,
0x40, 0x30, 0x62, 0xb0, 0x32, 0x40, 0x40, 0xc0,
0x13, 0x00, 0x2b, 0xa0, 0x21, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
0x3d, 0x4a, 0x4c, 0x11, 0x00, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x55,
0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0x9d,
},
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x21, 0x39,
0x90, 0x30, 0x62, 0x1a, 0x27, 0x40, 0x68, 0xb0,
0x36, 0x00, 0xb5, 0x11, 0x11, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
0x3d, 0x40, 0x42, 0x0f, 0x00, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x57,
0x53, 0x58, 0x47, 0x41, 0x0a, 0x20, 0x00, 0x26,
},
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x32, 0x1c, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xd1, 0xc0,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a,
0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
0x45, 0x00, 0xf4, 0x19, 0x11, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
0x3d, 0x42, 0x44, 0x0f, 0x00, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x46,
0x48, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x05,
},
};
static const struct drm_edid *edid_load(struct drm_connector *connector, const char *name)
{
const struct firmware *fw = NULL;
const u8 *fwdata;
const struct drm_edid *drm_edid;
int fwsize, builtin;
builtin = match_string(generic_edid_name, GENERIC_EDIDS, name);
if (builtin >= 0) {
fwdata = generic_edid[builtin];
fwsize = sizeof(generic_edid[builtin]);
} else {
int err;
err = request_firmware(&fw, name, connector->dev->dev);
if (err) {
drm_err(connector->dev,
"[CONNECTOR:%d:%s] Requesting EDID firmware \"%s\" failed (err=%d)\n",
connector->base.id, connector->name,
name, err);
return ERR_PTR(err);
}
fwdata = fw->data;
fwsize = fw->size;
}
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Loaded %s firmware EDID \"%s\"\n",
connector->base.id, connector->name,
builtin >= 0 ? "built-in" : "external", name);
drm_edid = drm_edid_alloc(fwdata, fwsize);
if (!drm_edid_valid(drm_edid)) {
drm_err(connector->dev, "Invalid firmware EDID \"%s\"\n", name);
drm_edid_free(drm_edid);
drm_edid = ERR_PTR(-EINVAL);
}
release_firmware(fw);
return drm_edid;
}
const struct drm_edid *drm_edid_load_firmware(struct drm_connector *connector)
{
char *edidname, *last, *colon, *fwstr, *edidstr, *fallback = NULL;
const struct drm_edid *drm_edid;
if (edid_firmware[0] == '\0')
return ERR_PTR(-ENOENT);
/*
* If there are multiple edid files specified and separated
* by commas, search through the list looking for one that
* matches the connector.
*
* If there's one or more that doesn't specify a connector, keep
* the last one found one as a fallback.
*/
fwstr = kstrdup(edid_firmware, GFP_KERNEL);
if (!fwstr)
return ERR_PTR(-ENOMEM);
edidstr = fwstr;
while ((edidname = strsep(&edidstr, ","))) {
colon = strchr(edidname, ':');
if (colon != NULL) {
if (strncmp(connector->name, edidname, colon - edidname))
continue;
edidname = colon + 1;
break;
}
if (*edidname != '\0') /* corner case: multiple ',' */
fallback = edidname;
}
if (!edidname) {
if (!fallback) {
kfree(fwstr);
return ERR_PTR(-ENOENT);
}
edidname = fallback;
}
last = edidname + strlen(edidname) - 1;
if (*last == '\n')
*last = '\0';
drm_edid = edid_load(connector, edidname);
kfree(fwstr);
return drm_edid;
}
| linux-master | drivers/gpu/drm/drm_edid_load.c |
/*
* Copyright (c) 2006-2008 Intel Corporation
* Copyright (c) 2007 Dave Airlie <[email protected]>
*
* DRM core CRTC related functions
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*
* Authors:
* Keith Packard
* Eric Anholt <[email protected]>
* Dave Airlie <[email protected]>
* Jesse Barnes <[email protected]>
*/
#include <linux/export.h>
#include <linux/moduleparam.h>
#include <drm/drm_bridge.h>
#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_sysfs.h>
#include "drm_crtc_helper_internal.h"
/**
* DOC: output probing helper overview
*
* This library provides some helper code for output probing. It provides an
* implementation of the core &drm_connector_funcs.fill_modes interface with
* drm_helper_probe_single_connector_modes().
*
* It also provides support for polling connectors with a work item and for
* generic hotplug interrupt handling where the driver doesn't or cannot keep
* track of a per-connector hpd interrupt.
*
* This helper library can be used independently of the modeset helper library.
* Drivers can also overwrite different parts e.g. use their own hotplug
* handling code to avoid probing unrelated outputs.
*
* The probe helpers share the function table structures with other display
* helper libraries. See &struct drm_connector_helper_funcs for the details.
*/
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
static enum drm_mode_status
drm_mode_validate_flag(const struct drm_display_mode *mode,
int flags)
{
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
!(flags & DRM_MODE_FLAG_INTERLACE))
return MODE_NO_INTERLACE;
if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
!(flags & DRM_MODE_FLAG_DBLSCAN))
return MODE_NO_DBLESCAN;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
!(flags & DRM_MODE_FLAG_3D_MASK))
return MODE_NO_STEREO;
return MODE_OK;
}
static int
drm_mode_validate_pipeline(struct drm_display_mode *mode,
struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status)
{
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder;
int ret;
/* Step 1: Validate against connector */
ret = drm_connector_mode_valid(connector, mode, ctx, status);
if (ret || *status != MODE_OK)
return ret;
/* Step 2: Validate against encoders and crtcs */
drm_connector_for_each_possible_encoder(connector, encoder) {
struct drm_bridge *bridge;
struct drm_crtc *crtc;
*status = drm_encoder_mode_valid(encoder, mode);
if (*status != MODE_OK) {
/* No point in continuing for crtc check as this encoder
* will not accept the mode anyway. If all encoders
* reject the mode then, at exit, ret will not be
* MODE_OK. */
continue;
}
bridge = drm_bridge_chain_get_first_bridge(encoder);
*status = drm_bridge_chain_mode_valid(bridge,
&connector->display_info,
mode);
if (*status != MODE_OK) {
/* There is also no point in continuing for crtc check
* here. */
continue;
}
drm_for_each_crtc(crtc, dev) {
if (!drm_encoder_crtc_ok(encoder, crtc))
continue;
*status = drm_crtc_mode_valid(crtc, mode);
if (*status == MODE_OK) {
/* If we get to this point there is at least
* one combination of encoder+crtc that works
* for this mode. Lets return now. */
return 0;
}
}
}
return 0;
}
static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
{
struct drm_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode;
cmdline_mode = &connector->cmdline_mode;
if (!cmdline_mode->specified)
return 0;
/* Only add a GTF mode if we find no matching probed modes */
list_for_each_entry(mode, &connector->probed_modes, head) {
if (mode->hdisplay != cmdline_mode->xres ||
mode->vdisplay != cmdline_mode->yres)
continue;
if (cmdline_mode->refresh_specified) {
/* The probed mode's vrefresh is set until later */
if (drm_mode_vrefresh(mode) != cmdline_mode->refresh)
continue;
}
/* Mark the matching mode as being preferred by the user */
mode->type |= DRM_MODE_TYPE_USERDEF;
return 0;
}
mode = drm_mode_create_from_cmdline_mode(connector->dev,
cmdline_mode);
if (mode == NULL)
return 0;
drm_mode_probed_add(connector, mode);
return 1;
}
enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (!crtc_funcs || !crtc_funcs->mode_valid)
return MODE_OK;
return crtc_funcs->mode_valid(crtc, mode);
}
enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
const struct drm_encoder_helper_funcs *encoder_funcs =
encoder->helper_private;
if (!encoder_funcs || !encoder_funcs->mode_valid)
return MODE_OK;
return encoder_funcs->mode_valid(encoder, mode);
}
int
drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status)
{
const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
int ret = 0;
if (!connector_funcs)
*status = MODE_OK;
else if (connector_funcs->mode_valid_ctx)
ret = connector_funcs->mode_valid_ctx(connector, mode, ctx,
status);
else if (connector_funcs->mode_valid)
*status = connector_funcs->mode_valid(connector, mode);
else
*status = MODE_OK;
return ret;
}
static void drm_kms_helper_disable_hpd(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
const struct drm_connector_helper_funcs *funcs =
connector->helper_private;
if (funcs && funcs->disable_hpd)
funcs->disable_hpd(connector);
}
drm_connector_list_iter_end(&conn_iter);
}
static bool drm_kms_helper_enable_hpd(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
const struct drm_connector_helper_funcs *funcs =
connector->helper_private;
if (funcs && funcs->enable_hpd)
funcs->enable_hpd(connector);
if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
}
drm_connector_list_iter_end(&conn_iter);
return poll;
}
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void reschedule_output_poll_work(struct drm_device *dev)
{
unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
if (dev->mode_config.delayed_event)
/*
* FIXME:
*
* Use short (1s) delay to handle the initial delayed event.
* This delay should not be needed, but Optimus/nouveau will
* fail in a mysterious way if the delayed event is handled as
* soon as possible like it is done in
* drm_helper_probe_single_connector_modes() in case the poll
* was enabled before.
*/
delay = HZ;
schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
}
/**
* drm_kms_helper_poll_enable - re-enable output polling.
* @dev: drm_device
*
* This function re-enables the output polling work, after it has been
* temporarily disabled using drm_kms_helper_poll_disable(), for example over
* suspend/resume.
*
* Drivers can call this helper from their device resume implementation. It is
* not an error to call this even when output polling isn't enabled.
*
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
* callbacks.
*/
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll ||
dev->mode_config.poll_running)
return;
if (drm_kms_helper_enable_hpd(dev) ||
dev->mode_config.delayed_event)
reschedule_output_poll_work(dev);
dev->mode_config.poll_running = true;
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
/**
* drm_kms_helper_poll_reschedule - reschedule the output polling work
* @dev: drm_device
*
* This function reschedules the output polling work, after polling for a
* connector has been enabled.
*
* Drivers must call this helper after enabling polling for a connector by
* setting %DRM_CONNECTOR_POLL_CONNECT / %DRM_CONNECTOR_POLL_DISCONNECT flags
* in drm_connector::polled. Note that after disabling polling by clearing these
* flags for a connector will stop the output polling work automatically if
* the polling is disabled for all other connectors as well.
*
* The function can be called only after polling has been enabled by calling
* drm_kms_helper_poll_init() / drm_kms_helper_poll_enable().
*/
void drm_kms_helper_poll_reschedule(struct drm_device *dev)
{
if (dev->mode_config.poll_running)
reschedule_output_poll_work(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_reschedule);
static enum drm_connector_status
drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force)
{
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
struct drm_modeset_acquire_ctx ctx;
int ret;
drm_modeset_acquire_init(&ctx, 0);
retry:
ret = drm_modeset_lock(&connector->dev->mode_config.connection_mutex, &ctx);
if (!ret) {
if (funcs->detect_ctx)
ret = funcs->detect_ctx(connector, &ctx, force);
else if (connector->funcs->detect)
ret = connector->funcs->detect(connector, force);
else
ret = connector_status_connected;
}
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
}
if (WARN_ON(ret < 0))
ret = connector_status_unknown;
if (ret != connector->status)
connector->epoch_counter += 1;
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
/**
* drm_helper_probe_detect - probe connector status
* @connector: connector to probe
* @ctx: acquire_ctx, or NULL to let this function handle locking.
* @force: Whether destructive probe operations should be performed.
*
* This function calls the detect callbacks of the connector.
* This function returns &drm_connector_status, or
* if @ctx is set, it might also return -EDEADLK.
*/
int
drm_helper_probe_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
struct drm_device *dev = connector->dev;
int ret;
if (!ctx)
return drm_helper_probe_detect_ctx(connector, force);
ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
if (ret)
return ret;
if (funcs->detect_ctx)
ret = funcs->detect_ctx(connector, ctx, force);
else if (connector->funcs->detect)
ret = connector->funcs->detect(connector, force);
else
ret = connector_status_connected;
if (ret != connector->status)
connector->epoch_counter += 1;
return ret;
}
EXPORT_SYMBOL(drm_helper_probe_detect);
static int drm_helper_probe_get_modes(struct drm_connector *connector)
{
const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
int count;
count = connector_funcs->get_modes(connector);
/*
* Fallback for when DDC probe failed in drm_get_edid() and thus skipped
* override/firmware EDID.
*/
if (count == 0 && connector->status == connector_status_connected)
count = drm_edid_override_connector_update(connector);
return count;
}
static int __drm_helper_update_and_validate(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
int mode_flags = 0;
int ret;
drm_connector_list_update(connector);
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
if (connector->doublescan_allowed)
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
if (connector->stereo_allowed)
mode_flags |= DRM_MODE_FLAG_3D_MASK;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->status != MODE_OK)
continue;
mode->status = drm_mode_validate_driver(dev, mode);
if (mode->status != MODE_OK)
continue;
mode->status = drm_mode_validate_size(mode, maxX, maxY);
if (mode->status != MODE_OK)
continue;
mode->status = drm_mode_validate_flag(mode, mode_flags);
if (mode->status != MODE_OK)
continue;
ret = drm_mode_validate_pipeline(mode, connector, ctx,
&mode->status);
if (ret) {
drm_dbg_kms(dev,
"drm_mode_validate_pipeline failed: %d\n",
ret);
if (drm_WARN_ON_ONCE(dev, ret != -EDEADLK))
mode->status = MODE_ERROR;
else
return -EDEADLK;
}
if (mode->status != MODE_OK)
continue;
mode->status = drm_mode_validate_ycbcr420(mode, connector);
}
return 0;
}
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* Based on the helper callbacks implemented by @connector in struct
* &drm_connector_helper_funcs try to detect all valid modes. Modes will first
* be added to the connector's probed_modes list, then culled (based on validity
* and the @maxX, @maxY parameters) and put into the normal modes list.
*
* Intended to be used as a generic implementation of the
* &drm_connector_funcs.fill_modes() vfunc for drivers that use the CRTC helpers
* for output mode filtering and detection.
*
* The basic procedure is as follows
*
* 1. All modes currently on the connector's modes list are marked as stale
*
* 2. New modes are added to the connector's probed_modes list with
* drm_mode_probed_add(). New modes start their life with status as OK.
* Modes are added from a single source using the following priority order.
*
* - &drm_connector_helper_funcs.get_modes vfunc
* - if the connector status is connector_status_connected, standard
* VESA DMT modes up to 1024x768 are automatically added
* (drm_add_modes_noedid())
*
* Finally modes specified via the kernel command line (video=...) are
* added in addition to what the earlier probes produced
* (drm_helper_probe_add_cmdline_mode()). These modes are generated
* using the VESA GTF/CVT formulas.
*
* 3. Modes are moved from the probed_modes list to the modes list. Potential
* duplicates are merged together (see drm_connector_list_update()).
* After this step the probed_modes list will be empty again.
*
* 4. Any non-stale mode on the modes list then undergoes validation
*
* - drm_mode_validate_basic() performs basic sanity checks
* - drm_mode_validate_size() filters out modes larger than @maxX and @maxY
* (if specified)
* - drm_mode_validate_flag() checks the modes against basic connector
* capabilities (interlace_allowed,doublescan_allowed,stereo_allowed)
* - the optional &drm_connector_helper_funcs.mode_valid or
* &drm_connector_helper_funcs.mode_valid_ctx helpers can perform driver
* and/or sink specific checks
* - the optional &drm_crtc_helper_funcs.mode_valid,
* &drm_bridge_funcs.mode_valid and &drm_encoder_helper_funcs.mode_valid
* helpers can perform driver and/or source specific checks which are also
* enforced by the modeset/atomic helpers
*
* 5. Any mode whose status is not OK is pruned from the connector's modes list,
* accompanied by a debug message indicating the reason for the mode's
* rejection (see drm_mode_prune_invalid()).
*
* Returns:
* The number of modes found on @connector.
*/
int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
int count = 0, ret;
enum drm_connector_status old_status;
struct drm_modeset_acquire_ctx ctx;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
drm_modeset_acquire_init(&ctx, 0);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
connector->name);
retry:
ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
} else
WARN_ON(ret < 0);
/* set all old modes to the stale state */
list_for_each_entry(mode, &connector->modes, head)
mode->status = MODE_STALE;
old_status = connector->status;
if (connector->force) {
if (connector->force == DRM_FORCE_ON ||
connector->force == DRM_FORCE_ON_DIGITAL)
connector->status = connector_status_connected;
else
connector->status = connector_status_disconnected;
if (connector->funcs->force)
connector->funcs->force(connector);
} else {
ret = drm_helper_probe_detect(connector, &ctx, true);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
} else if (WARN(ret < 0, "Invalid return value %i for connector detection\n", ret))
ret = connector_status_unknown;
connector->status = ret;
}
/*
* Normally either the driver's hpd code or the poll loop should
* pick up any changes and fire the hotplug event. But if
* userspace sneaks in a probe, we might miss a change. Hence
* check here, and if anything changed start the hotplug code.
*/
if (old_status != connector->status) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
connector->name,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
/*
* The hotplug event code might call into the fb
* helpers, and so expects that we do not hold any
* locks. Fire up the poll struct instead, it will
* disable itself again.
*/
dev->mode_config.delayed_event = true;
if (dev->mode_config.poll_enabled)
mod_delayed_work(system_wq,
&dev->mode_config.output_poll_work,
0);
}
/* Re-enable polling in case the global poll config changed. */
drm_kms_helper_poll_enable(dev);
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, connector->name);
drm_connector_update_edid_property(connector, NULL);
drm_mode_prune_invalid(dev, &connector->modes, false);
goto exit;
}
count = drm_helper_probe_get_modes(connector);
if (count == 0 && (connector->status == connector_status_connected ||
connector->status == connector_status_unknown)) {
count = drm_add_modes_noedid(connector, 1024, 768);
/*
* Section 4.2.2.6 (EDID Corruption Detection) of the DP 1.4a
* Link CTS specifies that 640x480 (the official "failsafe"
* mode) needs to be the default if there's no EDID.
*/
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
drm_set_preferred_mode(connector, 640, 480);
}
count += drm_helper_probe_add_cmdline_mode(connector);
if (count != 0) {
ret = __drm_helper_update_and_validate(connector, maxX, maxY, &ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
}
}
drm_mode_prune_invalid(dev, &connector->modes, true);
/*
* Displayport spec section 5.2.1.2 ("Video Timing Format") says that
* all detachable sinks shall support 640x480 @60Hz as a fail safe
* mode. If all modes were pruned, perhaps because they need more
* lanes or a higher pixel clock than available, at least try to add
* in 640x480.
*/
if (list_empty(&connector->modes) &&
connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
count = drm_add_modes_noedid(connector, 640, 480);
ret = __drm_helper_update_and_validate(connector, maxX, maxY, &ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
}
drm_mode_prune_invalid(dev, &connector->modes, true);
}
exit:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
if (list_empty(&connector->modes))
return 0;
drm_mode_sort(&connector->modes);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
connector->name);
list_for_each_entry(mode, &connector->modes, head) {
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
drm_mode_debug_printmodeline(mode);
}
return count;
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
/**
* drm_kms_helper_hotplug_event - fire off KMS hotplug events
* @dev: drm_device whose connector state changed
*
* This function fires off the uevent for userspace and also calls the
* output_poll_changed function, which is most commonly used to inform the fbdev
* emulation code and allow it to update the fbcon output configuration.
*
* Drivers should call this from their hotplug handling code when a change is
* detected. Note that this function does not do any output detection of its
* own, like drm_helper_hpd_irq_event() does - this is assumed to be done by the
* driver already.
*
* This function must be called from process context with no mode
* setting locks held.
*
* If only a single connector has changed, consider calling
* drm_kms_helper_connector_hotplug_event() instead.
*/
void drm_kms_helper_hotplug_event(struct drm_device *dev)
{
/* send a uevent + call fbdev */
drm_sysfs_hotplug_event(dev);
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
drm_client_dev_hotplug(dev);
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
/**
* drm_kms_helper_connector_hotplug_event - fire off a KMS connector hotplug event
* @connector: drm_connector which has changed
*
* This is the same as drm_kms_helper_hotplug_event(), except it fires a more
* fine-grained uevent for a single connector.
*/
void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
/* send a uevent + call fbdev */
drm_sysfs_connector_hotplug_event(connector);
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
drm_client_dev_hotplug(dev);
}
EXPORT_SYMBOL(drm_kms_helper_connector_hotplug_event);
static void output_poll_execute(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
enum drm_connector_status old_status;
bool repoll = false, changed;
u64 old_epoch_counter;
if (!dev->mode_config.poll_enabled)
return;
/* Pick up any changes detected by the probe functions. */
changed = dev->mode_config.delayed_event;
dev->mode_config.delayed_event = false;
if (!drm_kms_helper_poll && dev->mode_config.poll_running) {
drm_kms_helper_disable_hpd(dev);
dev->mode_config.poll_running = false;
goto out;
}
if (!mutex_trylock(&dev->mode_config.mutex)) {
repoll = true;
goto out;
}
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
/* Ignore forced connectors. */
if (connector->force)
continue;
/* Ignore HPD capable connectors and connectors where we don't
* want any hotplug detection at all for polling. */
if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
continue;
old_status = connector->status;
/* if we are connected and don't want to poll for disconnect
skip it */
if (old_status == connector_status_connected &&
!(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
continue;
repoll = true;
old_epoch_counter = connector->epoch_counter;
connector->status = drm_helper_probe_detect(connector, NULL, false);
if (old_epoch_counter != connector->epoch_counter) {
const char *old, *new;
/*
* The poll work sets force=false when calling detect so
* that drivers can avoid to do disruptive tests (e.g.
* when load detect cycles could cause flickering on
* other, running displays). This bears the risk that we
* flip-flop between unknown here in the poll work and
* the real state when userspace forces a full detect
* call after receiving a hotplug event due to this
* change.
*
* Hence clamp an unknown detect status to the old
* value.
*/
if (connector->status == connector_status_unknown) {
connector->status = old_status;
continue;
}
old = drm_get_connector_status_name(old_status);
new = drm_get_connector_status_name(connector->status);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
"status updated from %s to %s\n",
connector->base.id,
connector->name,
old, new);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n",
connector->base.id, connector->name,
old_epoch_counter, connector->epoch_counter);
changed = true;
}
}
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
out:
if (changed)
drm_kms_helper_hotplug_event(dev);
if (repoll)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
}
/**
* drm_kms_helper_is_poll_worker - is %current task an output poll worker?
*
* Determine if %current task is an output poll worker. This can be used
* to select distinct code paths for output polling versus other contexts.
*
* One use case is to avoid a deadlock between the output poll worker and
* the autosuspend worker wherein the latter waits for polling to finish
* upon calling drm_kms_helper_poll_disable(), while the former waits for
* runtime suspend to finish upon calling pm_runtime_get_sync() in a
* connector ->detect hook.
*/
bool drm_kms_helper_is_poll_worker(void)
{
struct work_struct *work = current_work();
return work && work->func == output_poll_execute;
}
EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
/**
* drm_kms_helper_poll_disable - disable output polling
* @dev: drm_device
*
* This function disables the output polling work.
*
* Drivers can call this helper from their device suspend implementation. It is
* not an error to call this even when output polling isn't enabled or already
* disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
*
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
* callbacks.
*/
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
if (dev->mode_config.poll_running)
drm_kms_helper_disable_hpd(dev);
cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
dev->mode_config.poll_running = false;
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
/**
* drm_kms_helper_poll_init - initialize and enable output polling
* @dev: drm_device
*
* This function initializes and then also enables output polling support for
* @dev. Drivers which do not have reliable hotplug support in hardware can use
* this helper infrastructure to regularly poll such connectors for changes in
* their connection state.
*
* Drivers can control which connectors are polled by setting the
* DRM_CONNECTOR_POLL_CONNECT and DRM_CONNECTOR_POLL_DISCONNECT flags. On
* connectors where probing live outputs can result in visual distortion drivers
* should not set the DRM_CONNECTOR_POLL_DISCONNECT flag to avoid this.
* Connectors which have no flag or only DRM_CONNECTOR_POLL_HPD set are
* completely ignored by the polling logic.
*
* Note that a connector can be both polled and probed from the hotplug handler,
* in case the hotplug interrupt is known to be unreliable.
*/
void drm_kms_helper_poll_init(struct drm_device *dev)
{
INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
dev->mode_config.poll_enabled = true;
drm_kms_helper_poll_enable(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_init);
/**
* drm_kms_helper_poll_fini - disable output polling and clean it up
* @dev: drm_device
*/
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
drm_kms_helper_poll_disable(dev);
dev->mode_config.poll_enabled = false;
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
static bool check_connector_changed(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
enum drm_connector_status old_status;
u64 old_epoch_counter;
/* Only handle HPD capable connectors. */
drm_WARN_ON(dev, !(connector->polled & DRM_CONNECTOR_POLL_HPD));
drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
old_status = connector->status;
old_epoch_counter = connector->epoch_counter;
connector->status = drm_helper_probe_detect(connector, NULL, false);
if (old_epoch_counter == connector->epoch_counter) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Same epoch counter %llu\n",
connector->base.id,
connector->name,
connector->epoch_counter);
return false;
}
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
connector->name,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Changed epoch counter %llu => %llu\n",
connector->base.id,
connector->name,
old_epoch_counter,
connector->epoch_counter);
return true;
}
/**
* drm_connector_helper_hpd_irq_event - hotplug processing
* @connector: drm_connector
*
* Drivers can use this helper function to run a detect cycle on a connector
* which has the DRM_CONNECTOR_POLL_HPD flag set in its &polled member.
*
* This helper function is useful for drivers which can track hotplug
* interrupts for a single connector. Drivers that want to send a
* hotplug event for all connectors or can't track hotplug interrupts
* per connector need to use drm_helper_hpd_irq_event().
*
* This function must be called from process context with no mode
* setting locks held.
*
* Note that a connector can be both polled and probed from the hotplug
* handler, in case the hotplug interrupt is known to be unreliable.
*
* Returns:
* A boolean indicating whether the connector status changed or not
*/
bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
bool changed;
mutex_lock(&dev->mode_config.mutex);
changed = check_connector_changed(connector);
mutex_unlock(&dev->mode_config.mutex);
if (changed) {
drm_kms_helper_connector_hotplug_event(connector);
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Sent hotplug event\n",
connector->base.id,
connector->name);
}
return changed;
}
EXPORT_SYMBOL(drm_connector_helper_hpd_irq_event);
/**
* drm_helper_hpd_irq_event - hotplug processing
* @dev: drm_device
*
* Drivers can use this helper function to run a detect cycle on all connectors
* which have the DRM_CONNECTOR_POLL_HPD flag set in their &polled member. All
* other connectors are ignored, which is useful to avoid reprobing fixed
* panels.
*
* This helper function is useful for drivers which can't or don't track hotplug
* interrupts for each connector.
*
* Drivers which support hotplug interrupts for each connector individually and
* which have a more fine-grained detect logic can use
* drm_connector_helper_hpd_irq_event(). Alternatively, they should bypass this
* code and directly call drm_kms_helper_hotplug_event() in case the connector
* state changed.
*
* This function must be called from process context with no mode
* setting locks held.
*
* Note that a connector can be both polled and probed from the hotplug handler,
* in case the hotplug interrupt is known to be unreliable.
*
* Returns:
* A boolean indicating whether the connector status changed or not
*/
bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector, *first_changed_connector = NULL;
struct drm_connector_list_iter conn_iter;
int changed = 0;
if (!dev->mode_config.poll_enabled)
return false;
mutex_lock(&dev->mode_config.mutex);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
/* Only handle HPD capable connectors. */
if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
if (check_connector_changed(connector)) {
if (!first_changed_connector) {
drm_connector_get(connector);
first_changed_connector = connector;
}
changed++;
}
}
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
if (changed == 1)
drm_kms_helper_connector_hotplug_event(first_changed_connector);
else if (changed > 0)
drm_kms_helper_hotplug_event(dev);
if (first_changed_connector)
drm_connector_put(first_changed_connector);
return changed;
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
/**
* drm_crtc_helper_mode_valid_fixed - Validates a display mode
* @crtc: the crtc
* @mode: the mode to validate
* @fixed_mode: the display hardware's mode
*
* Returns:
* MODE_OK on success, or another mode-status code otherwise.
*/
enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
const struct drm_display_mode *fixed_mode)
{
if (mode->hdisplay != fixed_mode->hdisplay && mode->vdisplay != fixed_mode->vdisplay)
return MODE_ONE_SIZE;
else if (mode->hdisplay != fixed_mode->hdisplay)
return MODE_ONE_WIDTH;
else if (mode->vdisplay != fixed_mode->vdisplay)
return MODE_ONE_HEIGHT;
return MODE_OK;
}
EXPORT_SYMBOL(drm_crtc_helper_mode_valid_fixed);
/**
* drm_connector_helper_get_modes_from_ddc - Updates the connector's EDID
* property from the connector's
* DDC channel
* @connector: The connector
*
* Returns:
* The number of detected display modes.
*
* Uses a connector's DDC channel to retrieve EDID data and update the
* connector's EDID property and display modes. Drivers can use this
* function to implement struct &drm_connector_helper_funcs.get_modes
* for connectors with a DDC channel.
*/
int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector)
{
struct edid *edid;
int count = 0;
if (!connector->ddc)
return 0;
edid = drm_get_edid(connector, connector->ddc);
// clears property if EDID is NULL
drm_connector_update_edid_property(connector, edid);
if (edid) {
count = drm_add_edid_modes(connector, edid);
kfree(edid);
}
return count;
}
EXPORT_SYMBOL(drm_connector_helper_get_modes_from_ddc);
/**
* drm_connector_helper_get_modes_fixed - Duplicates a display mode for a connector
* @connector: the connector
* @fixed_mode: the display hardware's mode
*
* This function duplicates a display modes for a connector. Drivers for hardware
* that only supports a single fixed mode can use this function in their connector's
* get_modes helper.
*
* Returns:
* The number of created modes.
*/
int drm_connector_helper_get_modes_fixed(struct drm_connector *connector,
const struct drm_display_mode *fixed_mode)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, fixed_mode);
if (!mode) {
drm_err(dev, "Failed to duplicate mode " DRM_MODE_FMT "\n",
DRM_MODE_ARG(fixed_mode));
return 0;
}
if (mode->name[0] == '\0')
drm_mode_set_name(mode);
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
if (mode->width_mm)
connector->display_info.width_mm = mode->width_mm;
if (mode->height_mm)
connector->display_info.height_mm = mode->height_mm;
return 1;
}
EXPORT_SYMBOL(drm_connector_helper_get_modes_fixed);
/**
* drm_connector_helper_get_modes - Read EDID and update connector.
* @connector: The connector
*
* Read the EDID using drm_edid_read() (which requires that connector->ddc is
* set), and update the connector using the EDID.
*
* This can be used as the "default" connector helper .get_modes() hook if the
* driver does not need any special processing. This is sets the example what
* custom .get_modes() hooks should do regarding EDID read and connector update.
*
* Returns: Number of modes.
*/
int drm_connector_helper_get_modes(struct drm_connector *connector)
{
const struct drm_edid *drm_edid;
int count;
drm_edid = drm_edid_read(connector);
/*
* Unconditionally update the connector. If the EDID was read
* successfully, fill in the connector information derived from the
* EDID. Otherwise, if the EDID is NULL, clear the connector
* information.
*/
drm_edid_connector_update(connector, drm_edid);
count = drm_edid_connector_add_modes(connector);
drm_edid_free(drm_edid);
return count;
}
EXPORT_SYMBOL(drm_connector_helper_get_modes);
/**
* drm_connector_helper_tv_get_modes - Fills the modes availables to a TV connector
* @connector: The connector
*
* Fills the available modes for a TV connector based on the supported
* TV modes, and the default mode expressed by the kernel command line.
*
* This can be used as the default TV connector helper .get_modes() hook
* if the driver does not need any special processing.
*
* Returns:
* The number of modes added to the connector.
*/
int drm_connector_helper_tv_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_property *tv_mode_property =
dev->mode_config.tv_mode_property;
struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
unsigned int ntsc_modes = BIT(DRM_MODE_TV_MODE_NTSC) |
BIT(DRM_MODE_TV_MODE_NTSC_443) |
BIT(DRM_MODE_TV_MODE_NTSC_J) |
BIT(DRM_MODE_TV_MODE_PAL_M);
unsigned int pal_modes = BIT(DRM_MODE_TV_MODE_PAL) |
BIT(DRM_MODE_TV_MODE_PAL_N) |
BIT(DRM_MODE_TV_MODE_SECAM);
unsigned int tv_modes[2] = { UINT_MAX, UINT_MAX };
unsigned int i, supported_tv_modes = 0;
if (!tv_mode_property)
return 0;
for (i = 0; i < tv_mode_property->num_values; i++)
supported_tv_modes |= BIT(tv_mode_property->values[i]);
if ((supported_tv_modes & ntsc_modes) &&
(supported_tv_modes & pal_modes)) {
uint64_t default_mode;
if (drm_object_property_get_default_value(&connector->base,
tv_mode_property,
&default_mode))
return 0;
if (cmdline->tv_mode_specified)
default_mode = cmdline->tv_mode;
if (BIT(default_mode) & ntsc_modes) {
tv_modes[0] = DRM_MODE_TV_MODE_NTSC;
tv_modes[1] = DRM_MODE_TV_MODE_PAL;
} else {
tv_modes[0] = DRM_MODE_TV_MODE_PAL;
tv_modes[1] = DRM_MODE_TV_MODE_NTSC;
}
} else if (supported_tv_modes & ntsc_modes) {
tv_modes[0] = DRM_MODE_TV_MODE_NTSC;
} else if (supported_tv_modes & pal_modes) {
tv_modes[0] = DRM_MODE_TV_MODE_PAL;
} else {
return 0;
}
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
struct drm_display_mode *mode;
if (tv_modes[i] == DRM_MODE_TV_MODE_NTSC)
mode = drm_mode_analog_ntsc_480i(dev);
else if (tv_modes[i] == DRM_MODE_TV_MODE_PAL)
mode = drm_mode_analog_pal_576i(dev);
else
break;
if (!mode)
return i;
if (!i)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
}
return i;
}
EXPORT_SYMBOL(drm_connector_helper_tv_get_modes);
| linux-master | drivers/gpu/drm/drm_probe_helper.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 Laurent Pinchart <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_connector.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
/**
* DOC: overview
*
* The DRM bridge connector helper object provides a DRM connector
* implementation that wraps a chain of &struct drm_bridge. The connector
* operations are fully implemented based on the operations of the bridges in
* the chain, and don't require any intervention from the display controller
* driver at runtime.
*
* To use the helper, display controller drivers create a bridge connector with
* a call to drm_bridge_connector_init(). This associates the newly created
* connector with the chain of bridges passed to the function and registers it
* with the DRM device. At that point the connector becomes fully usable, no
* further operation is needed.
*
* The DRM bridge connector operations are implemented based on the operations
* provided by the bridges in the chain. Each connector operation is delegated
* to the bridge closest to the connector (at the end of the chain) that
* provides the relevant functionality.
*
* To make use of this helper, all bridges in the chain shall report bridge
* operation flags (&drm_bridge->ops) and bridge output type
* (&drm_bridge->type), as well as the DRM_BRIDGE_ATTACH_NO_CONNECTOR attach
* flag (none of the bridges shall create a DRM connector directly).
*/
/**
* struct drm_bridge_connector - A connector backed by a chain of bridges
*/
struct drm_bridge_connector {
/**
* @base: The base DRM connector
*/
struct drm_connector base;
/**
* @encoder:
*
* The encoder at the start of the bridges chain.
*/
struct drm_encoder *encoder;
/**
* @bridge_edid:
*
* The last bridge in the chain (closest to the connector) that provides
* EDID read support, if any (see &DRM_BRIDGE_OP_EDID).
*/
struct drm_bridge *bridge_edid;
/**
* @bridge_hpd:
*
* The last bridge in the chain (closest to the connector) that provides
* hot-plug detection notification, if any (see &DRM_BRIDGE_OP_HPD).
*/
struct drm_bridge *bridge_hpd;
/**
* @bridge_detect:
*
* The last bridge in the chain (closest to the connector) that provides
* connector detection, if any (see &DRM_BRIDGE_OP_DETECT).
*/
struct drm_bridge *bridge_detect;
/**
* @bridge_modes:
*
* The last bridge in the chain (closest to the connector) that provides
* connector modes detection, if any (see &DRM_BRIDGE_OP_MODES).
*/
struct drm_bridge *bridge_modes;
};
#define to_drm_bridge_connector(x) \
container_of(x, struct drm_bridge_connector, base)
/* -----------------------------------------------------------------------------
* Bridge Connector Hot-Plug Handling
*/
static void drm_bridge_connector_hpd_notify(struct drm_connector *connector,
enum drm_connector_status status)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
/* Notify all bridges in the pipeline of hotplug events. */
drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) {
if (bridge->funcs->hpd_notify)
bridge->funcs->hpd_notify(bridge, status);
}
}
static void drm_bridge_connector_hpd_cb(void *cb_data,
enum drm_connector_status status)
{
struct drm_bridge_connector *drm_bridge_connector = cb_data;
struct drm_connector *connector = &drm_bridge_connector->base;
struct drm_device *dev = connector->dev;
enum drm_connector_status old_status;
mutex_lock(&dev->mode_config.mutex);
old_status = connector->status;
connector->status = status;
mutex_unlock(&dev->mode_config.mutex);
if (old_status == status)
return;
drm_bridge_connector_hpd_notify(connector, status);
drm_kms_helper_connector_hotplug_event(connector);
}
static void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
struct drm_bridge *hpd = bridge_connector->bridge_hpd;
if (hpd)
drm_bridge_hpd_enable(hpd, drm_bridge_connector_hpd_cb,
bridge_connector);
}
static void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
struct drm_bridge *hpd = bridge_connector->bridge_hpd;
if (hpd)
drm_bridge_hpd_disable(hpd);
}
/* -----------------------------------------------------------------------------
* Bridge Connector Functions
*/
static enum drm_connector_status
drm_bridge_connector_detect(struct drm_connector *connector, bool force)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
struct drm_bridge *detect = bridge_connector->bridge_detect;
enum drm_connector_status status;
if (detect) {
status = detect->funcs->detect(detect);
drm_bridge_connector_hpd_notify(connector, status);
} else {
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DPI:
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_DSI:
case DRM_MODE_CONNECTOR_eDP:
status = connector_status_connected;
break;
default:
status = connector_status_unknown;
break;
}
}
return status;
}
static void drm_bridge_connector_destroy(struct drm_connector *connector)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
if (bridge_connector->bridge_hpd) {
struct drm_bridge *hpd = bridge_connector->bridge_hpd;
drm_bridge_hpd_disable(hpd);
}
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(bridge_connector);
}
static void drm_bridge_connector_debugfs_init(struct drm_connector *connector,
struct dentry *root)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
struct drm_encoder *encoder = bridge_connector->encoder;
struct drm_bridge *bridge;
list_for_each_entry(bridge, &encoder->bridge_chain, chain_node) {
if (bridge->funcs->debugfs_init)
bridge->funcs->debugfs_init(bridge, root);
}
}
static const struct drm_connector_funcs drm_bridge_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = drm_bridge_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_bridge_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.debugfs_init = drm_bridge_connector_debugfs_init,
};
/* -----------------------------------------------------------------------------
* Bridge Connector Helper Functions
*/
static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector,
struct drm_bridge *bridge)
{
enum drm_connector_status status;
struct edid *edid;
int n;
status = drm_bridge_connector_detect(connector, false);
if (status != connector_status_connected)
goto no_edid;
edid = bridge->funcs->get_edid(bridge, connector);
if (!drm_edid_is_valid(edid)) {
kfree(edid);
goto no_edid;
}
drm_connector_update_edid_property(connector, edid);
n = drm_add_edid_modes(connector, edid);
kfree(edid);
return n;
no_edid:
drm_connector_update_edid_property(connector, NULL);
return 0;
}
static int drm_bridge_connector_get_modes(struct drm_connector *connector)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
/*
* If display exposes EDID, then we parse that in the normal way to
* build table of supported modes.
*/
bridge = bridge_connector->bridge_edid;
if (bridge)
return drm_bridge_connector_get_modes_edid(connector, bridge);
/*
* Otherwise if the display pipeline reports modes (e.g. with a fixed
* resolution panel or an analog TV output), query it.
*/
bridge = bridge_connector->bridge_modes;
if (bridge)
return bridge->funcs->get_modes(bridge, connector);
/*
* We can't retrieve modes, which can happen for instance for a DVI or
* VGA output with the DDC bus unconnected. The KMS core will add the
* default modes.
*/
return 0;
}
static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
.get_modes = drm_bridge_connector_get_modes,
/* No need for .mode_valid(), the bridges are checked by the core. */
.enable_hpd = drm_bridge_connector_enable_hpd,
.disable_hpd = drm_bridge_connector_disable_hpd,
};
/* -----------------------------------------------------------------------------
* Bridge Connector Initialisation
*/
/**
* drm_bridge_connector_init - Initialise a connector for a chain of bridges
* @drm: the DRM device
* @encoder: the encoder where the bridge chain starts
*
* Allocate, initialise and register a &drm_bridge_connector with the @drm
* device. The connector is associated with a chain of bridges that starts at
* the @encoder. All bridges in the chain shall report bridge operation flags
* (&drm_bridge->ops) and bridge output type (&drm_bridge->type), and none of
* them may create a DRM connector directly.
*
* Returns a pointer to the new connector on success, or a negative error
* pointer otherwise.
*/
struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct drm_encoder *encoder)
{
struct drm_bridge_connector *bridge_connector;
struct drm_connector *connector;
struct i2c_adapter *ddc = NULL;
struct drm_bridge *bridge, *panel_bridge = NULL;
int connector_type;
int ret;
bridge_connector = kzalloc(sizeof(*bridge_connector), GFP_KERNEL);
if (!bridge_connector)
return ERR_PTR(-ENOMEM);
bridge_connector->encoder = encoder;
/*
* TODO: Handle doublescan_allowed, stereo_allowed and
* ycbcr_420_allowed.
*/
connector = &bridge_connector->base;
connector->interlace_allowed = true;
/*
* Initialise connector status handling. First locate the furthest
* bridges in the pipeline that support HPD and output detection. Then
* initialise the connector polling mode, using HPD if available and
* falling back to polling if supported. If neither HPD nor output
* detection are available, we don't support hotplug detection at all.
*/
connector_type = DRM_MODE_CONNECTOR_Unknown;
drm_for_each_bridge_in_chain(encoder, bridge) {
if (!bridge->interlace_allowed)
connector->interlace_allowed = false;
if (bridge->ops & DRM_BRIDGE_OP_EDID)
bridge_connector->bridge_edid = bridge;
if (bridge->ops & DRM_BRIDGE_OP_HPD)
bridge_connector->bridge_hpd = bridge;
if (bridge->ops & DRM_BRIDGE_OP_DETECT)
bridge_connector->bridge_detect = bridge;
if (bridge->ops & DRM_BRIDGE_OP_MODES)
bridge_connector->bridge_modes = bridge;
if (!drm_bridge_get_next_bridge(bridge))
connector_type = bridge->type;
if (bridge->ddc)
ddc = bridge->ddc;
if (drm_bridge_is_panel(bridge))
panel_bridge = bridge;
}
if (connector_type == DRM_MODE_CONNECTOR_Unknown) {
kfree(bridge_connector);
return ERR_PTR(-EINVAL);
}
ret = drm_connector_init_with_ddc(drm, connector,
&drm_bridge_connector_funcs,
connector_type, ddc);
if (ret) {
kfree(bridge_connector);
return ERR_PTR(ret);
}
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
if (bridge_connector->bridge_hpd)
connector->polled = DRM_CONNECTOR_POLL_HPD;
else if (bridge_connector->bridge_detect)
connector->polled = DRM_CONNECTOR_POLL_CONNECT
| DRM_CONNECTOR_POLL_DISCONNECT;
if (panel_bridge)
drm_panel_bridge_set_orientation(connector, panel_bridge);
return connector;
}
EXPORT_SYMBOL_GPL(drm_bridge_connector_init);
| linux-master | drivers/gpu/drm/drm_bridge_connector.c |
/*
* Legacy: Generic DRM Contexts
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Author: Rickard E. (Rik) Faith <[email protected]>
* Author: Gareth Hughes <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
struct drm_ctx_list {
struct list_head head;
drm_context_t handle;
struct drm_file *tag;
};
/******************************************************************/
/** \name Context bitmap support */
/*@{*/
/*
* Free a handle from the context bitmap.
*
* \param dev DRM device.
* \param ctx_handle context handle.
*
* Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
idr_remove(&dev->ctx_idr, ctx_handle);
mutex_unlock(&dev->struct_mutex);
}
/*
* Context bitmap allocation.
*
* \param dev DRM device.
* \return (non-negative) context handle on success or a negative number on failure.
*
* Allocate a new idr from drm_device::ctx_idr while holding the
* drm_device::struct_mutex lock.
*/
static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
{
int ret;
mutex_lock(&dev->struct_mutex);
ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
GFP_KERNEL);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/*
* Context bitmap initialization.
*
* \param dev DRM device.
*
* Initialise the drm_device::ctx_idr
*/
void drm_legacy_ctxbitmap_init(struct drm_device * dev)
{
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
idr_init(&dev->ctx_idr);
}
/*
* Context bitmap cleanup.
*
* \param dev DRM device.
*
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
{
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
idr_destroy(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
}
/**
* drm_legacy_ctxbitmap_flush() - Flush all contexts owned by a file
* @dev: DRM device to operate on
* @file: Open file to flush contexts for
*
* This iterates over all contexts on @dev and drops them if they're owned by
* @file. Note that after this call returns, new contexts might be added if
* the file is still alive.
*/
void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
{
struct drm_ctx_list *pos, *tmp;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->ctxlist_mutex);
list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
if (pos->tag == file &&
pos->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev, pos->handle);
drm_legacy_ctxbitmap_free(dev, pos->handle);
list_del(&pos->head);
kfree(pos);
}
}
mutex_unlock(&dev->ctxlist_mutex);
}
/*@}*/
/******************************************************************/
/** \name Per Context SAREA Support */
/*@{*/
/*
* Get per-context SAREA.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
*
* Gets the map from drm_device::ctx_idr with the handle specified and
* returns its handle.
*/
int drm_legacy_getsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_local_map *map;
struct drm_map_list *_entry;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
map = idr_find(&dev->ctx_idr, request->ctx_id);
if (!map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
request->handle = NULL;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
request->handle =
(void *)(unsigned long)_entry->user_token;
break;
}
}
mutex_unlock(&dev->struct_mutex);
if (request->handle == NULL)
return -EINVAL;
return 0;
}
/*
* Set per-context SAREA.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
*
* Searches the mapping specified in \p arg and update the entry in
* drm_device::ctx_idr with it.
*/
int drm_legacy_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
&& r_list->user_token == (unsigned long) request->handle)
goto found;
}
bad:
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
found:
map = r_list->map;
if (!map)
goto bad;
if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
goto bad;
mutex_unlock(&dev->struct_mutex);
return 0;
}
/*@}*/
/******************************************************************/
/** \name The actual DRM context handling routines */
/*@{*/
/*
* Switch context.
*
* \param dev DRM device.
* \param old old context handle.
* \param new new context handle.
* \return zero on success or a negative number on failure.
*
* Attempt to set drm_device::context_flag.
*/
static int drm_context_switch(struct drm_device * dev, int old, int new)
{
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return -EBUSY;
}
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
return 0;
}
/*
* Complete context switch.
*
* \param dev DRM device.
* \param new new context handle.
* \return zero on success or a negative number on failure.
*
* Updates drm_device::last_context and drm_device::last_switch. Verifies the
* hardware lock is held, clears the drm_device::context_flag and wakes up
* drm_device::context_wait.
*/
static int drm_context_switch_complete(struct drm_device *dev,
struct drm_file *file_priv, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here.
*/
clear_bit(0, &dev->context_flag);
return 0;
}
/*
* Reserve contexts.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_res structure.
* \return zero on success or a negative number on failure.
*/
int drm_legacy_resctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_res *res = data;
struct drm_ctx ctx;
int i;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (res->count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
return -EFAULT;
}
}
res->count = DRM_RESERVED_CONTEXTS;
return 0;
}
/*
* Add context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Get a new handle for the context and copy to userspace.
*/
int drm_legacy_addctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
int tmp_handle;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
tmp_handle = drm_legacy_ctxbitmap_next(dev);
if (tmp_handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
tmp_handle = drm_legacy_ctxbitmap_next(dev);
}
DRM_DEBUG("%d\n", tmp_handle);
if (tmp_handle < 0) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return tmp_handle;
}
ctx->handle = tmp_handle;
ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
if (!ctx_entry) {
DRM_DEBUG("out of memory\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&ctx_entry->head);
ctx_entry->handle = ctx->handle;
ctx_entry->tag = file_priv;
mutex_lock(&dev->ctxlist_mutex);
list_add(&ctx_entry->head, &dev->ctxlist);
mutex_unlock(&dev->ctxlist_mutex);
return 0;
}
/*
* Get context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*/
int drm_legacy_getctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
/* This is 0, because we don't handle any context flags */
ctx->flags = 0;
return 0;
}
/*
* Switch context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Calls context_switch().
*/
int drm_legacy_switchctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
/*
* New context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Calls context_switch_complete().
*/
int drm_legacy_newctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle);
return 0;
}
/*
* Remove context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* If not the special kernel context, calls ctxbitmap_free() to free the specified context.
*/
int drm_legacy_rmctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev, ctx->handle);
drm_legacy_ctxbitmap_free(dev, ctx->handle);
}
mutex_lock(&dev->ctxlist_mutex);
if (!list_empty(&dev->ctxlist)) {
struct drm_ctx_list *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->handle == ctx->handle) {
list_del(&pos->head);
kfree(pos);
}
}
}
mutex_unlock(&dev->ctxlist_mutex);
return 0;
}
/*@}*/
| linux-master | drivers/gpu/drm/drm_context.c |
// SPDX-License-Identifier: GPL-2.0 or MIT
/*
* Copyright 2018 Noralf Trønnes
*/
#include <linux/iosys-map.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <drm/drm_client.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
/**
* DOC: overview
*
* This library provides support for clients running in the kernel like fbdev and bootsplash.
*
* GEM drivers which provide a GEM based dumb buffer with a virtual address are supported.
*/
static int drm_client_open(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
struct drm_file *file;
file = drm_file_alloc(dev->primary);
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&dev->filelist_mutex);
list_add(&file->lhead, &dev->filelist_internal);
mutex_unlock(&dev->filelist_mutex);
client->file = file;
return 0;
}
static void drm_client_close(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
mutex_lock(&dev->filelist_mutex);
list_del(&client->file->lhead);
mutex_unlock(&dev->filelist_mutex);
drm_file_free(client->file);
}
/**
* drm_client_init - Initialise a DRM client
* @dev: DRM device
* @client: DRM client
* @name: Client name
* @funcs: DRM client functions (optional)
*
* This initialises the client and opens a &drm_file.
* Use drm_client_register() to complete the process.
* The caller needs to hold a reference on @dev before calling this function.
* The client is freed when the &drm_device is unregistered. See drm_client_release().
*
* Returns:
* Zero on success or negative error code on failure.
*/
int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
const char *name, const struct drm_client_funcs *funcs)
{
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET) || !dev->driver->dumb_create)
return -EOPNOTSUPP;
if (funcs && !try_module_get(funcs->owner))
return -ENODEV;
client->dev = dev;
client->name = name;
client->funcs = funcs;
ret = drm_client_modeset_create(client);
if (ret)
goto err_put_module;
ret = drm_client_open(client);
if (ret)
goto err_free;
drm_dev_get(dev);
return 0;
err_free:
drm_client_modeset_free(client);
err_put_module:
if (funcs)
module_put(funcs->owner);
return ret;
}
EXPORT_SYMBOL(drm_client_init);
/**
* drm_client_register - Register client
* @client: DRM client
*
* Add the client to the &drm_device client list to activate its callbacks.
* @client must be initialized by a call to drm_client_init(). After
* drm_client_register() it is no longer permissible to call drm_client_release()
* directly (outside the unregister callback), instead cleanup will happen
* automatically on driver unload.
*
* Registering a client generates a hotplug event that allows the client
* to set up its display from pre-existing outputs. The client must have
* initialized its state to able to handle the hotplug event successfully.
*/
void drm_client_register(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret;
mutex_lock(&dev->clientlist_mutex);
list_add(&client->list, &dev->clientlist);
if (client->funcs && client->funcs->hotplug) {
/*
* Perform an initial hotplug event to pick up the
* display configuration for the client. This step
* has to be performed *after* registering the client
* in the list of clients, or a concurrent hotplug
* event might be lost; leaving the display off.
*
* Hold the clientlist_mutex as for a regular hotplug
* event.
*/
ret = client->funcs->hotplug(client);
if (ret)
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
}
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_register);
/**
* drm_client_release - Release DRM client resources
* @client: DRM client
*
* Releases resources by closing the &drm_file that was opened by drm_client_init().
* It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
*
* This function should only be called from the unregister callback. An exception
* is fbdev which cannot free the buffer if userspace has open file descriptors.
*
* Note:
* Clients cannot initiate a release by themselves. This is done to keep the code simple.
* The driver has to be unloaded before the client can be unloaded.
*/
void drm_client_release(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
drm_dbg_kms(dev, "%s\n", client->name);
drm_client_modeset_free(client);
drm_client_close(client);
drm_dev_put(dev);
if (client->funcs)
module_put(client->funcs->owner);
}
EXPORT_SYMBOL(drm_client_release);
void drm_client_dev_unregister(struct drm_device *dev)
{
struct drm_client_dev *client, *tmp;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry_safe(client, tmp, &dev->clientlist, list) {
list_del(&client->list);
if (client->funcs && client->funcs->unregister) {
client->funcs->unregister(client);
} else {
drm_client_release(client);
kfree(client);
}
}
mutex_unlock(&dev->clientlist_mutex);
}
/**
* drm_client_dev_hotplug - Send hotplug event to clients
* @dev: DRM device
*
* This function calls the &drm_client_funcs.hotplug callback on the attached clients.
*
* drm_kms_helper_hotplug_event() calls this function, so drivers that use it
* don't need to call this function themselves.
*/
void drm_client_dev_hotplug(struct drm_device *dev)
{
struct drm_client_dev *client;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
if (!dev->mode_config.num_connector) {
drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n");
return;
}
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (!client->funcs || !client->funcs->hotplug)
continue;
if (client->hotplug_failed)
continue;
ret = client->funcs->hotplug(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
if (ret)
client->hotplug_failed = true;
}
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_hotplug);
void drm_client_dev_restore(struct drm_device *dev)
{
struct drm_client_dev *client;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (!client->funcs || !client->funcs->restore)
continue;
ret = client->funcs->restore(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
if (!ret) /* The first one to return zero gets the privilege to restore */
break;
}
mutex_unlock(&dev->clientlist_mutex);
}
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
if (buffer->gem) {
drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
drm_gem_object_put(buffer->gem);
}
kfree(buffer);
}
static struct drm_client_buffer *
drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
u32 format, u32 *handle)
{
const struct drm_format_info *info = drm_format_info(format);
struct drm_mode_create_dumb dumb_args = { };
struct drm_device *dev = client->dev;
struct drm_client_buffer *buffer;
struct drm_gem_object *obj;
int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
buffer->client = client;
dumb_args.width = width;
dumb_args.height = height;
dumb_args.bpp = drm_format_info_bpp(info, 0);
ret = drm_mode_create_dumb(dev, &dumb_args, client->file);
if (ret)
goto err_delete;
obj = drm_gem_object_lookup(client->file, dumb_args.handle);
if (!obj) {
ret = -ENOENT;
goto err_delete;
}
buffer->pitch = dumb_args.pitch;
buffer->gem = obj;
*handle = dumb_args.handle;
return buffer;
err_delete:
drm_client_buffer_delete(buffer);
return ERR_PTR(ret);
}
/**
* drm_client_buffer_vmap - Map DRM client buffer into address space
* @buffer: DRM client buffer
* @map_copy: Returns the mapped memory's address
*
* This function maps a client buffer into kernel address space. If the
* buffer is already mapped, it returns the existing mapping's address.
*
* Client buffer mappings are not ref'counted. Each call to
* drm_client_buffer_vmap() should be followed by a call to
* drm_client_buffer_vunmap(); or the client buffer should be mapped
* throughout its lifetime.
*
* The returned address is a copy of the internal value. In contrast to
* other vmap interfaces, you don't need it for the client's vunmap
* function. So you can modify it at will during blit and draw operations.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int
drm_client_buffer_vmap(struct drm_client_buffer *buffer,
struct iosys_map *map_copy)
{
struct iosys_map *map = &buffer->map;
int ret;
/*
* FIXME: The dependency on GEM here isn't required, we could
* convert the driver handle to a dma-buf instead and use the
* backend-agnostic dma-buf vmap support instead. This would
* require that the handle2fd prime ioctl is reworked to pull the
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
ret = drm_gem_vmap_unlocked(buffer->gem, map);
if (ret)
return ret;
*map_copy = *map;
return 0;
}
EXPORT_SYMBOL(drm_client_buffer_vmap);
/**
* drm_client_buffer_vunmap - Unmap DRM client buffer
* @buffer: DRM client buffer
*
* This function removes a client buffer's memory mapping. Calling this
* function is only required by clients that manage their buffer mappings
* by themselves.
*/
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
struct iosys_map *map = &buffer->map;
drm_gem_vunmap_unlocked(buffer->gem, map);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap);
static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
{
int ret;
if (!buffer->fb)
return;
ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file);
if (ret)
drm_err(buffer->client->dev,
"Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
buffer->fb = NULL;
}
static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
u32 width, u32 height, u32 format,
u32 handle)
{
struct drm_client_dev *client = buffer->client;
struct drm_mode_fb_cmd fb_req = { };
const struct drm_format_info *info;
int ret;
info = drm_format_info(format);
fb_req.bpp = drm_format_info_bpp(info, 0);
fb_req.depth = info->depth;
fb_req.width = width;
fb_req.height = height;
fb_req.handle = handle;
fb_req.pitch = buffer->pitch;
ret = drm_mode_addfb(client->dev, &fb_req, client->file);
if (ret)
return ret;
buffer->fb = drm_framebuffer_lookup(client->dev, buffer->client->file, fb_req.fb_id);
if (WARN_ON(!buffer->fb))
return -ENOENT;
/* drop the reference we picked up in framebuffer lookup */
drm_framebuffer_put(buffer->fb);
strscpy(buffer->fb->comm, client->name, TASK_COMM_LEN);
return 0;
}
/**
* drm_client_framebuffer_create - Create a client framebuffer
* @client: DRM client
* @width: Framebuffer width
* @height: Framebuffer height
* @format: Buffer format
*
* This function creates a &drm_client_buffer which consists of a
* &drm_framebuffer backed by a dumb buffer.
* Call drm_client_framebuffer_delete() to free the buffer.
*
* Returns:
* Pointer to a client buffer or an error pointer on failure.
*/
struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
{
struct drm_client_buffer *buffer;
u32 handle;
int ret;
buffer = drm_client_buffer_create(client, width, height, format,
&handle);
if (IS_ERR(buffer))
return buffer;
ret = drm_client_buffer_addfb(buffer, width, height, format, handle);
/*
* The handle is only needed for creating the framebuffer, destroy it
* again to solve a circular dependency should anybody export the GEM
* object as DMA-buf. The framebuffer and our buffer structure are still
* holding references to the GEM object to prevent its destruction.
*/
drm_mode_destroy_dumb(client->dev, handle, client->file);
if (ret) {
drm_client_buffer_delete(buffer);
return ERR_PTR(ret);
}
return buffer;
}
EXPORT_SYMBOL(drm_client_framebuffer_create);
/**
* drm_client_framebuffer_delete - Delete a client framebuffer
* @buffer: DRM client buffer (can be NULL)
*/
void drm_client_framebuffer_delete(struct drm_client_buffer *buffer)
{
if (!buffer)
return;
drm_client_buffer_rmfb(buffer);
drm_client_buffer_delete(buffer);
}
EXPORT_SYMBOL(drm_client_framebuffer_delete);
/**
* drm_client_framebuffer_flush - Manually flush client framebuffer
* @buffer: DRM client buffer (can be NULL)
* @rect: Damage rectangle (if NULL flushes all)
*
* This calls &drm_framebuffer_funcs->dirty (if present) to flush buffer changes
* for drivers that need it.
*
* Returns:
* Zero on success or negative error code on failure.
*/
int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect)
{
if (!buffer || !buffer->fb || !buffer->fb->funcs->dirty)
return 0;
if (rect) {
struct drm_clip_rect clip = {
.x1 = rect->x1,
.y1 = rect->y1,
.x2 = rect->x2,
.y2 = rect->y2,
};
return buffer->fb->funcs->dirty(buffer->fb, buffer->client->file,
0, 0, &clip, 1);
}
return buffer->fb->funcs->dirty(buffer->fb, buffer->client->file,
0, 0, NULL, 0);
}
EXPORT_SYMBOL(drm_client_framebuffer_flush);
#ifdef CONFIG_DEBUG_FS
static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
struct drm_client_dev *client;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list)
drm_printf(&p, "%s\n", client->name);
mutex_unlock(&dev->clientlist_mutex);
return 0;
}
static const struct drm_debugfs_info drm_client_debugfs_list[] = {
{ "internal_clients", drm_client_debugfs_internal_clients, 0 },
};
void drm_client_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_add_files(minor->dev, drm_client_debugfs_list,
ARRAY_SIZE(drm_client_debugfs_list));
}
#endif
| linux-master | drivers/gpu/drm/drm_client.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Noralf Trønnes
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
/**
* DOC: overview
*
* This helper library provides helpers for drivers for simple display
* hardware.
*
* drm_simple_display_pipe_init() initializes a simple display pipeline
* which has only one full-screen scanout buffer feeding one output. The
* pipeline is represented by &struct drm_simple_display_pipe and binds
* together &drm_plane, &drm_crtc and &drm_encoder structures into one fixed
* entity. Some flexibility for code reuse is provided through a separately
* allocated &drm_connector object and supporting optional &drm_bridge
* encoder drivers.
*
* Many drivers require only a very simple encoder that fulfills the minimum
* requirements of the display pipeline and does not add additional
* functionality. The function drm_simple_encoder_init() provides an
* implementation of such an encoder.
*/
static const struct drm_encoder_funcs drm_simple_encoder_funcs_cleanup = {
.destroy = drm_encoder_cleanup,
};
/**
* drm_simple_encoder_init - Initialize a preallocated encoder with
* basic functionality.
* @dev: drm device
* @encoder: the encoder to initialize
* @encoder_type: user visible type of the encoder
*
* Initialises a preallocated encoder that has no further functionality.
* Settings for possible CRTC and clones are left to their initial values.
* The encoder will be cleaned up automatically as part of the mode-setting
* cleanup.
*
* The caller of drm_simple_encoder_init() is responsible for freeing
* the encoder's memory after the encoder has been cleaned up. At the
* moment this only works reliably if the encoder data structure is
* stored in the device structure. Free the encoder's memory as part of
* the device release function.
*
* Note: consider using drmm_simple_encoder_alloc() instead of
* drm_simple_encoder_init() to let the DRM managed resource infrastructure
* take care of cleanup and deallocation.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_simple_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
int encoder_type)
{
return drm_encoder_init(dev, encoder,
&drm_simple_encoder_funcs_cleanup,
encoder_type, NULL);
}
EXPORT_SYMBOL(drm_simple_encoder_init);
void *__drmm_simple_encoder_alloc(struct drm_device *dev, size_t size,
size_t offset, int encoder_type)
{
return __drmm_encoder_alloc(dev, size, offset, NULL, encoder_type,
NULL);
}
EXPORT_SYMBOL(__drmm_simple_encoder_alloc);
static enum drm_mode_status
drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
if (!pipe->funcs || !pipe->funcs->mode_valid)
/* Anything goes */
return MODE_OK;
return pipe->funcs->mode_valid(pipe, mode);
}
static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
int ret;
if (!crtc_state->enable)
goto out;
ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state);
if (ret)
return ret;
out:
return drm_atomic_add_affected_planes(state, crtc);
}
static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_plane *plane;
struct drm_simple_display_pipe *pipe;
pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
if (!pipe->funcs || !pipe->funcs->enable)
return;
plane = &pipe->plane;
pipe->funcs->enable(pipe, crtc->state, plane->state);
}
static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
if (!pipe->funcs || !pipe->funcs->disable)
return;
pipe->funcs->disable(pipe);
}
static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
.mode_valid = drm_simple_kms_crtc_mode_valid,
.atomic_check = drm_simple_kms_crtc_check,
.atomic_enable = drm_simple_kms_crtc_enable,
.atomic_disable = drm_simple_kms_crtc_disable,
};
static void drm_simple_kms_crtc_reset(struct drm_crtc *crtc)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
if (!pipe->funcs || !pipe->funcs->reset_crtc)
return drm_atomic_helper_crtc_reset(crtc);
return pipe->funcs->reset_crtc(pipe);
}
static struct drm_crtc_state *drm_simple_kms_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
if (!pipe->funcs || !pipe->funcs->duplicate_crtc_state)
return drm_atomic_helper_crtc_duplicate_state(crtc);
return pipe->funcs->duplicate_crtc_state(pipe);
}
static void drm_simple_kms_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
if (!pipe->funcs || !pipe->funcs->destroy_crtc_state)
drm_atomic_helper_crtc_destroy_state(crtc, state);
else
pipe->funcs->destroy_crtc_state(pipe, state);
}
static int drm_simple_kms_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
if (!pipe->funcs || !pipe->funcs->enable_vblank)
return 0;
return pipe->funcs->enable_vblank(pipe);
}
static void drm_simple_kms_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
if (!pipe->funcs || !pipe->funcs->disable_vblank)
return;
pipe->funcs->disable_vblank(pipe);
}
static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
.reset = drm_simple_kms_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_simple_kms_crtc_duplicate_state,
.atomic_destroy_state = drm_simple_kms_crtc_destroy_state,
.enable_vblank = drm_simple_kms_crtc_enable_vblank,
.disable_vblank = drm_simple_kms_crtc_disable_vblank,
};
static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_simple_display_pipe *pipe;
struct drm_crtc_state *crtc_state;
int ret;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
crtc_state = drm_atomic_get_new_crtc_state(state,
&pipe->crtc);
ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, false);
if (ret)
return ret;
if (!plane_state->visible)
return 0;
if (!pipe->funcs || !pipe->funcs->check)
return 0;
return pipe->funcs->check(pipe, plane_state, crtc_state);
}
static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_pstate = drm_atomic_get_old_plane_state(state,
plane);
struct drm_simple_display_pipe *pipe;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
if (!pipe->funcs || !pipe->funcs->update)
return;
pipe->funcs->update(pipe, old_pstate);
}
static int drm_simple_kms_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
if (!pipe->funcs || !pipe->funcs->prepare_fb) {
if (WARN_ON_ONCE(!drm_core_check_feature(plane->dev, DRIVER_GEM)))
return 0;
WARN_ON_ONCE(pipe->funcs && pipe->funcs->cleanup_fb);
return drm_gem_plane_helper_prepare_fb(plane, state);
}
return pipe->funcs->prepare_fb(pipe, state);
}
static void drm_simple_kms_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
if (!pipe->funcs || !pipe->funcs->cleanup_fb)
return;
pipe->funcs->cleanup_fb(pipe, state);
}
static int drm_simple_kms_plane_begin_fb_access(struct drm_plane *plane,
struct drm_plane_state *new_plane_state)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
if (!pipe->funcs || !pipe->funcs->begin_fb_access)
return 0;
return pipe->funcs->begin_fb_access(pipe, new_plane_state);
}
static void drm_simple_kms_plane_end_fb_access(struct drm_plane *plane,
struct drm_plane_state *new_plane_state)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
if (!pipe->funcs || !pipe->funcs->end_fb_access)
return;
pipe->funcs->end_fb_access(pipe, new_plane_state);
}
static bool drm_simple_kms_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
{
return modifier == DRM_FORMAT_MOD_LINEAR;
}
static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
.prepare_fb = drm_simple_kms_plane_prepare_fb,
.cleanup_fb = drm_simple_kms_plane_cleanup_fb,
.begin_fb_access = drm_simple_kms_plane_begin_fb_access,
.end_fb_access = drm_simple_kms_plane_end_fb_access,
.atomic_check = drm_simple_kms_plane_atomic_check,
.atomic_update = drm_simple_kms_plane_atomic_update,
};
static void drm_simple_kms_plane_reset(struct drm_plane *plane)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
if (!pipe->funcs || !pipe->funcs->reset_plane)
return drm_atomic_helper_plane_reset(plane);
return pipe->funcs->reset_plane(pipe);
}
static struct drm_plane_state *drm_simple_kms_plane_duplicate_state(struct drm_plane *plane)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
if (!pipe->funcs || !pipe->funcs->duplicate_plane_state)
return drm_atomic_helper_plane_duplicate_state(plane);
return pipe->funcs->duplicate_plane_state(pipe);
}
static void drm_simple_kms_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
if (!pipe->funcs || !pipe->funcs->destroy_plane_state)
drm_atomic_helper_plane_destroy_state(plane, state);
else
pipe->funcs->destroy_plane_state(pipe, state);
}
static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_simple_kms_plane_reset,
.atomic_duplicate_state = drm_simple_kms_plane_duplicate_state,
.atomic_destroy_state = drm_simple_kms_plane_destroy_state,
.format_mod_supported = drm_simple_kms_format_mod_supported,
};
/**
* drm_simple_display_pipe_attach_bridge - Attach a bridge to the display pipe
* @pipe: simple display pipe object
* @bridge: bridge to attach
*
* Makes it possible to still use the drm_simple_display_pipe helpers when
* a DRM bridge has to be used.
*
* Note that you probably want to initialize the pipe by passing a NULL
* connector to drm_simple_display_pipe_init().
*
* Returns:
* Zero on success, negative error code on failure.
*/
int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
struct drm_bridge *bridge)
{
return drm_bridge_attach(&pipe->encoder, bridge, NULL, 0);
}
EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge);
/**
* drm_simple_display_pipe_init - Initialize a simple display pipeline
* @dev: DRM device
* @pipe: simple display pipe object to initialize
* @funcs: callbacks for the display pipe (optional)
* @formats: array of supported formats (DRM_FORMAT\_\*)
* @format_count: number of elements in @formats
* @format_modifiers: array of formats modifiers
* @connector: connector to attach and register (optional)
*
* Sets up a display pipeline which consist of a really simple
* plane-crtc-encoder pipe.
*
* If a connector is supplied, the pipe will be coupled with the provided
* connector. You may supply a NULL connector when using drm bridges, that
* handle connectors themselves (see drm_simple_display_pipe_attach_bridge()).
*
* Teardown of a simple display pipe is all handled automatically by the drm
* core through calling drm_mode_config_cleanup(). Drivers afterwards need to
* release the memory for the structure themselves.
*
* Returns:
* Zero on success, negative error code on failure.
*/
int drm_simple_display_pipe_init(struct drm_device *dev,
struct drm_simple_display_pipe *pipe,
const struct drm_simple_display_pipe_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
const uint64_t *format_modifiers,
struct drm_connector *connector)
{
struct drm_encoder *encoder = &pipe->encoder;
struct drm_plane *plane = &pipe->plane;
struct drm_crtc *crtc = &pipe->crtc;
int ret;
pipe->connector = connector;
pipe->funcs = funcs;
drm_plane_helper_add(plane, &drm_simple_kms_plane_helper_funcs);
ret = drm_universal_plane_init(dev, plane, 0,
&drm_simple_kms_plane_funcs,
formats, format_count,
format_modifiers,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
drm_crtc_helper_add(crtc, &drm_simple_kms_crtc_helper_funcs);
ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
&drm_simple_kms_crtc_funcs, NULL);
if (ret)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
if (ret || !connector)
return ret;
return drm_connector_attach_encoder(connector, encoder);
}
EXPORT_SYMBOL(drm_simple_display_pipe_init);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/drm_simple_kms_helper.c |
/*
* Copyright (C) 2014 Intel Corporation
*
* DRM universal plane helper functions
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/list.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
#define SUBPIXEL_MASK 0xffff
/**
* DOC: overview
*
* This helper library contains helpers to implement primary plane support on
* top of the normal CRTC configuration interface.
* Since the legacy &drm_mode_config_funcs.set_config interface ties the primary
* plane together with the CRTC state this does not allow userspace to disable
* the primary plane itself. The default primary plane only expose XRBG8888 and
* ARGB8888 as valid pixel formats for the attached framebuffer.
*
* Drivers are highly recommended to implement proper support for primary
* planes, and newly merged drivers must not rely upon these transitional
* helpers.
*
* The plane helpers share the function table structures with other helpers,
* specifically also the atomic helpers. See &struct drm_plane_helper_funcs for
* the details.
*/
/*
* Returns the connectors currently associated with a CRTC. This function
* should be called twice: once with a NULL connector list to retrieve
* the list size, and once with the properly allocated list to be filled in.
*/
static int get_connectors_for_crtc(struct drm_crtc *crtc,
struct drm_connector **connector_list,
int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
int count = 0;
/*
* Note: Once we change the plane hooks to more fine-grained locking we
* need to grab the connection_mutex here to be able to make these
* checks.
*/
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder && connector->encoder->crtc == crtc) {
if (connector_list != NULL && count < num_connectors)
*(connector_list++) = connector;
count++;
}
}
drm_connector_list_iter_end(&conn_iter);
return count;
}
static int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_rect *src,
struct drm_rect *dst,
unsigned int rotation,
int min_scale,
int max_scale,
bool can_position,
bool can_update_disabled,
bool *visible)
{
struct drm_plane_state plane_state = {
.plane = plane,
.crtc = crtc,
.fb = fb,
.src_x = src->x1,
.src_y = src->y1,
.src_w = drm_rect_width(src),
.src_h = drm_rect_height(src),
.crtc_x = dst->x1,
.crtc_y = dst->y1,
.crtc_w = drm_rect_width(dst),
.crtc_h = drm_rect_height(dst),
.rotation = rotation,
};
struct drm_crtc_state crtc_state = {
.crtc = crtc,
.enable = crtc->enabled,
.mode = crtc->mode,
};
int ret;
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
min_scale, max_scale,
can_position,
can_update_disabled);
if (ret)
return ret;
*src = plane_state.src;
*dst = plane_state.dst;
*visible = plane_state.visible;
return 0;
}
/**
* drm_plane_helper_update_primary - Helper for updating primary planes
* @plane: plane to update
* @crtc: the plane's new CRTC
* @fb: the plane's new framebuffer
* @crtc_x: x coordinate within CRTC
* @crtc_y: y coordinate within CRTC
* @crtc_w: width coordinate within CRTC
* @crtc_h: height coordinate within CRTC
* @src_x: x coordinate within source
* @src_y: y coordinate within source
* @src_w: width coordinate within source
* @src_h: height coordinate within source
* @ctx: modeset locking context
*
* This helper validates the given parameters and updates the primary plane.
*
* This function is only useful for non-atomic modesetting. Don't use
* it in new drivers.
*
* Returns:
* Zero on success, or an errno code otherwise.
*/
int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_mode_set set = {
.crtc = crtc,
.fb = fb,
.mode = &crtc->mode,
.x = src_x >> 16,
.y = src_y >> 16,
};
struct drm_rect src = {
.x1 = src_x,
.y1 = src_y,
.x2 = src_x + src_w,
.y2 = src_y + src_h,
};
struct drm_rect dest = {
.x1 = crtc_x,
.y1 = crtc_y,
.x2 = crtc_x + crtc_w,
.y2 = crtc_y + crtc_h,
};
struct drm_device *dev = plane->dev;
struct drm_connector **connector_list;
int num_connectors, ret;
bool visible;
if (drm_WARN_ON_ONCE(dev, drm_drv_uses_atomic_modeset(dev)))
return -EINVAL;
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest,
DRM_MODE_ROTATE_0,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, false, &visible);
if (ret)
return ret;
if (!visible)
/*
* Primary plane isn't visible. Note that unless a driver
* provides their own disable function, this will just
* wind up returning -EINVAL to userspace.
*/
return plane->funcs->disable_plane(plane, ctx);
/* Find current connectors for CRTC */
num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
BUG_ON(num_connectors == 0);
connector_list = kcalloc(num_connectors, sizeof(*connector_list),
GFP_KERNEL);
if (!connector_list)
return -ENOMEM;
get_connectors_for_crtc(crtc, connector_list, num_connectors);
set.connectors = connector_list;
set.num_connectors = num_connectors;
/*
* We call set_config() directly here rather than using
* drm_mode_set_config_internal. We're reprogramming the same
* connectors that were already in use, so we shouldn't need the extra
* cross-CRTC fb refcounting to accommodate stealing connectors.
* drm_mode_setplane() already handles the basic refcounting for the
* framebuffers involved in this operation.
*/
ret = crtc->funcs->set_config(&set, ctx);
kfree(connector_list);
return ret;
}
EXPORT_SYMBOL(drm_plane_helper_update_primary);
/**
* drm_plane_helper_disable_primary - Helper for disabling primary planes
* @plane: plane to disable
* @ctx: modeset locking context
*
* This helper returns an error when trying to disable the primary
* plane.
*
* This function is only useful for non-atomic modesetting. Don't use
* it in new drivers.
*
* Returns:
* An errno code.
*/
int drm_plane_helper_disable_primary(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = plane->dev;
drm_WARN_ON_ONCE(dev, drm_drv_uses_atomic_modeset(dev));
return -EINVAL;
}
EXPORT_SYMBOL(drm_plane_helper_disable_primary);
/**
* drm_plane_helper_destroy() - Helper for primary plane destruction
* @plane: plane to destroy
*
* Provides a default plane destroy handler for primary planes. This handler
* is called during CRTC destruction. We disable the primary plane, remove
* it from the DRM plane list, and deallocate the plane structure.
*/
void drm_plane_helper_destroy(struct drm_plane *plane)
{
drm_plane_cleanup(plane);
kfree(plane);
}
EXPORT_SYMBOL(drm_plane_helper_destroy);
/**
* drm_plane_helper_atomic_check() - Helper to check plane atomic-state
* @plane: plane to check
* @state: atomic state object
*
* Provides a default plane-state check handler for planes whose atomic-state
* scale and positioning are not expected to change since the plane is always
* a fullscreen scanout buffer.
*
* This is often the case for the primary plane of simple framebuffers. See
* also drm_crtc_helper_atomic_check() for the respective CRTC-state check
* helper function.
*
* RETURNS:
* Zero on success, or an errno code otherwise.
*/
int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_crtc *new_crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state = NULL;
if (new_crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, false);
}
EXPORT_SYMBOL(drm_plane_helper_atomic_check);
| linux-master | drivers/gpu/drm/drm_plane_helper.c |
/*
* Copyright (C) 2013 Red Hat
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_print.h>
#include <drm/drm_util.h>
/**
* drm_flip_work_allocate_task - allocate a flip-work task
* @data: data associated to the task
* @flags: allocator flags
*
* Allocate a drm_flip_task object and attach private data to it.
*/
struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
{
struct drm_flip_task *task;
task = kzalloc(sizeof(*task), flags);
if (task)
task->data = data;
return task;
}
EXPORT_SYMBOL(drm_flip_work_allocate_task);
/**
* drm_flip_work_queue_task - queue a specific task
* @work: the flip-work
* @task: the task to handle
*
* Queues task, that will later be run (passed back to drm_flip_func_t
* func) on a work queue after drm_flip_work_commit() is called.
*/
void drm_flip_work_queue_task(struct drm_flip_work *work,
struct drm_flip_task *task)
{
unsigned long flags;
spin_lock_irqsave(&work->lock, flags);
list_add_tail(&task->node, &work->queued);
spin_unlock_irqrestore(&work->lock, flags);
}
EXPORT_SYMBOL(drm_flip_work_queue_task);
/**
* drm_flip_work_queue - queue work
* @work: the flip-work
* @val: the value to queue
*
* Queues work, that will later be run (passed back to drm_flip_func_t
* func) on a work queue after drm_flip_work_commit() is called.
*/
void drm_flip_work_queue(struct drm_flip_work *work, void *val)
{
struct drm_flip_task *task;
task = drm_flip_work_allocate_task(val,
drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC);
if (task) {
drm_flip_work_queue_task(work, task);
} else {
DRM_ERROR("%s could not allocate task!\n", work->name);
work->func(work, val);
}
}
EXPORT_SYMBOL(drm_flip_work_queue);
/**
* drm_flip_work_commit - commit queued work
* @work: the flip-work
* @wq: the work-queue to run the queued work on
*
* Trigger work previously queued by drm_flip_work_queue() to run
* on a workqueue. The typical usage would be to queue work (via
* drm_flip_work_queue()) at any point (from vblank irq and/or
* prior), and then from vblank irq commit the queued work.
*/
void drm_flip_work_commit(struct drm_flip_work *work,
struct workqueue_struct *wq)
{
unsigned long flags;
spin_lock_irqsave(&work->lock, flags);
list_splice_tail(&work->queued, &work->commited);
INIT_LIST_HEAD(&work->queued);
spin_unlock_irqrestore(&work->lock, flags);
queue_work(wq, &work->worker);
}
EXPORT_SYMBOL(drm_flip_work_commit);
static void flip_worker(struct work_struct *w)
{
struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
struct list_head tasks;
unsigned long flags;
while (1) {
struct drm_flip_task *task, *tmp;
INIT_LIST_HEAD(&tasks);
spin_lock_irqsave(&work->lock, flags);
list_splice_tail(&work->commited, &tasks);
INIT_LIST_HEAD(&work->commited);
spin_unlock_irqrestore(&work->lock, flags);
if (list_empty(&tasks))
break;
list_for_each_entry_safe(task, tmp, &tasks, node) {
work->func(work, task->data);
kfree(task);
}
}
}
/**
* drm_flip_work_init - initialize flip-work
* @work: the flip-work to initialize
* @name: debug name
* @func: the callback work function
*
* Initializes/allocates resources for the flip-work
*/
void drm_flip_work_init(struct drm_flip_work *work,
const char *name, drm_flip_func_t func)
{
work->name = name;
INIT_LIST_HEAD(&work->queued);
INIT_LIST_HEAD(&work->commited);
spin_lock_init(&work->lock);
work->func = func;
INIT_WORK(&work->worker, flip_worker);
}
EXPORT_SYMBOL(drm_flip_work_init);
/**
* drm_flip_work_cleanup - cleans up flip-work
* @work: the flip-work to cleanup
*
* Destroy resources allocated for the flip-work
*/
void drm_flip_work_cleanup(struct drm_flip_work *work)
{
WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
}
EXPORT_SYMBOL(drm_flip_work_cleanup);
| linux-master | drivers/gpu/drm/drm_flip_work.c |
/*
* Copyright © 2012 Red Hat
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Dave Airlie <[email protected]>
* Rob Clark <[email protected]>
*
*/
#include <linux/export.h>
#include <linux/dma-buf.h>
#include <linux/rbtree.h>
#include <linux/module.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_prime.h>
#include "drm_internal.h"
MODULE_IMPORT_NS(DMA_BUF);
/**
* DOC: overview and lifetime rules
*
* Similar to GEM global names, PRIME file descriptors are also used to share
* buffer objects across processes. They offer additional security: as file
* descriptors must be explicitly sent over UNIX domain sockets to be shared
* between applications, they can't be guessed like the globally unique GEM
* names.
*
* Drivers that support the PRIME API implement the drm_gem_object_funcs.export
* and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for
* drivers are all individually exported for drivers which need to overwrite
* or reimplement some of them.
*
* Reference Counting for GEM Drivers
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* On the export the &dma_buf holds a reference to the exported buffer object,
* usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
* IOCTL, when it first calls &drm_gem_object_funcs.export
* and stores the exporting GEM object in the &dma_buf.priv field. This
* reference needs to be released when the final reference to the &dma_buf
* itself is dropped and its &dma_buf_ops.release function is called. For
* GEM-based drivers, the &dma_buf should be exported using
* drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
*
* Thus the chain of references always flows in one direction, avoiding loops:
* importing GEM object -> dma-buf -> exported GEM bo. A further complication
* are the lookup caches for import and export. These are required to guarantee
* that any given object will always have only one unique userspace handle. This
* is required to allow userspace to detect duplicated imports, since some GEM
* drivers do fail command submissions if a given buffer object is listed more
* than once. These import and export caches in &drm_prime_file_private only
* retain a weak reference, which is cleaned up when the corresponding object is
* released.
*
* Self-importing: If userspace is using PRIME as a replacement for flink then
* it will get a fd->handle request for a GEM object that it created. Drivers
* should detect this situation and return back the underlying object from the
* dma-buf private. For GEM based drivers this is handled in
* drm_gem_prime_import() already.
*/
struct drm_prime_member {
struct dma_buf *dma_buf;
uint32_t handle;
struct rb_node dmabuf_rb;
struct rb_node handle_rb;
};
static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf, uint32_t handle)
{
struct drm_prime_member *member;
struct rb_node **p, *rb;
member = kmalloc(sizeof(*member), GFP_KERNEL);
if (!member)
return -ENOMEM;
get_dma_buf(dma_buf);
member->dma_buf = dma_buf;
member->handle = handle;
rb = NULL;
p = &prime_fpriv->dmabufs.rb_node;
while (*p) {
struct drm_prime_member *pos;
rb = *p;
pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
if (dma_buf > pos->dma_buf)
p = &rb->rb_right;
else
p = &rb->rb_left;
}
rb_link_node(&member->dmabuf_rb, rb, p);
rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
rb = NULL;
p = &prime_fpriv->handles.rb_node;
while (*p) {
struct drm_prime_member *pos;
rb = *p;
pos = rb_entry(rb, struct drm_prime_member, handle_rb);
if (handle > pos->handle)
p = &rb->rb_right;
else
p = &rb->rb_left;
}
rb_link_node(&member->handle_rb, rb, p);
rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
return 0;
}
static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
uint32_t handle)
{
struct rb_node *rb;
rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
member = rb_entry(rb, struct drm_prime_member, handle_rb);
if (member->handle == handle)
return member->dma_buf;
else if (member->handle < handle)
rb = rb->rb_right;
else
rb = rb->rb_left;
}
return NULL;
}
static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf,
uint32_t *handle)
{
struct rb_node *rb;
rb = prime_fpriv->dmabufs.rb_node;
while (rb) {
struct drm_prime_member *member;
member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
if (member->dma_buf == dma_buf) {
*handle = member->handle;
return 0;
} else if (member->dma_buf < dma_buf) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
return -ENOENT;
}
void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
uint32_t handle)
{
struct rb_node *rb;
mutex_lock(&prime_fpriv->lock);
rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
member = rb_entry(rb, struct drm_prime_member, handle_rb);
if (member->handle == handle) {
rb_erase(&member->handle_rb, &prime_fpriv->handles);
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
dma_buf_put(member->dma_buf);
kfree(member);
break;
} else if (member->handle < handle) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
{
mutex_init(&prime_fpriv->lock);
prime_fpriv->dmabufs = RB_ROOT;
prime_fpriv->handles = RB_ROOT;
}
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
{
/* by now drm_gem_release should've made sure the list is empty */
WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
}
/**
* drm_gem_dmabuf_export - &dma_buf export implementation for GEM
* @dev: parent device for the exported dmabuf
* @exp_info: the export information used by dma_buf_export()
*
* This wraps dma_buf_export() for use by generic GEM drivers that are using
* drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
* a reference to the &drm_device and the exported &drm_gem_object (stored in
* &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
*
* Returns the new dmabuf.
*/
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info)
{
struct drm_gem_object *obj = exp_info->priv;
struct dma_buf *dma_buf;
dma_buf = dma_buf_export(exp_info);
if (IS_ERR(dma_buf))
return dma_buf;
drm_dev_get(dev);
drm_gem_object_get(obj);
dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
return dma_buf;
}
EXPORT_SYMBOL(drm_gem_dmabuf_export);
/**
* drm_gem_dmabuf_release - &dma_buf release implementation for GEM
* @dma_buf: buffer to be released
*
* Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
* must use this in their &dma_buf_ops structure as the release callback.
* drm_gem_dmabuf_release() should be used in conjunction with
* drm_gem_dmabuf_export().
*/
void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->dev;
/* drop the reference on the export fd holds */
drm_gem_object_put(obj);
drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_gem_dmabuf_release);
/*
* drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
* @dev: drm_device to import into
* @file_priv: drm file-private structure
* @prime_fd: fd id of the dma-buf which should be imported
* @handle: pointer to storage for the handle of the imported buffer object
*
* This is the PRIME import function which must be used mandatorily by GEM
* drivers to ensure correct lifetime management of the underlying GEM object.
* The actual importing of GEM object from the dma-buf is done through the
* &drm_driver.gem_prime_import driver callback.
*
* Returns 0 on success or a negative error code on failure.
*/
static int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd,
uint32_t *handle)
{
struct dma_buf *dma_buf;
struct drm_gem_object *obj;
int ret;
dma_buf = dma_buf_get(prime_fd);
if (IS_ERR(dma_buf))
return PTR_ERR(dma_buf);
mutex_lock(&file_priv->prime.lock);
ret = drm_prime_lookup_buf_handle(&file_priv->prime,
dma_buf, handle);
if (ret == 0)
goto out_put;
/* never seen this one, need to import */
mutex_lock(&dev->object_name_lock);
if (dev->driver->gem_prime_import)
obj = dev->driver->gem_prime_import(dev, dma_buf);
else
obj = drm_gem_prime_import(dev, dma_buf);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto out_unlock;
}
if (obj->dma_buf) {
WARN_ON(obj->dma_buf != dma_buf);
} else {
obj->dma_buf = dma_buf;
get_dma_buf(dma_buf);
}
/* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, handle);
drm_gem_object_put(obj);
if (ret)
goto out_put;
ret = drm_prime_add_buf_handle(&file_priv->prime,
dma_buf, *handle);
mutex_unlock(&file_priv->prime.lock);
if (ret)
goto fail;
dma_buf_put(dma_buf);
return 0;
fail:
/* hmm, if driver attached, we are relying on the free-object path
* to detach.. which seems ok..
*/
drm_gem_handle_delete(file_priv, *handle);
dma_buf_put(dma_buf);
return ret;
out_unlock:
mutex_unlock(&dev->object_name_lock);
out_put:
mutex_unlock(&file_priv->prime.lock);
dma_buf_put(dma_buf);
return ret;
}
int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_prime_handle *args = data;
if (dev->driver->prime_fd_to_handle) {
return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd,
&args->handle);
}
return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle);
}
static struct dma_buf *export_and_register_object(struct drm_device *dev,
struct drm_gem_object *obj,
uint32_t flags)
{
struct dma_buf *dmabuf;
/* prevent races with concurrent gem_close. */
if (obj->handle_count == 0) {
dmabuf = ERR_PTR(-ENOENT);
return dmabuf;
}
if (obj->funcs && obj->funcs->export)
dmabuf = obj->funcs->export(obj, flags);
else
dmabuf = drm_gem_prime_export(obj, flags);
if (IS_ERR(dmabuf)) {
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
*/
return dmabuf;
}
/*
* Note that callers do not need to clean up the export cache
* since the check for obj->handle_count guarantees that someone
* will clean it up.
*/
obj->dma_buf = dmabuf;
get_dma_buf(obj->dma_buf);
return dmabuf;
}
/*
* drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
* @dev: dev to export the buffer from
* @file_priv: drm file-private structure
* @handle: buffer handle to export
* @flags: flags like DRM_CLOEXEC
* @prime_fd: pointer to storage for the fd id of the create dma-buf
*
* This is the PRIME export function which must be used mandatorily by GEM
* drivers to ensure correct lifetime management of the underlying GEM object.
* The actual exporting from GEM object to a dma-buf is done through the
* &drm_gem_object_funcs.export callback.
*/
static int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle,
uint32_t flags,
int *prime_fd)
{
struct drm_gem_object *obj;
int ret = 0;
struct dma_buf *dmabuf;
mutex_lock(&file_priv->prime.lock);
obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
ret = -ENOENT;
goto out_unlock;
}
dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
if (dmabuf) {
get_dma_buf(dmabuf);
goto out_have_handle;
}
mutex_lock(&dev->object_name_lock);
/* re-export the original imported object */
if (obj->import_attach) {
dmabuf = obj->import_attach->dmabuf;
get_dma_buf(dmabuf);
goto out_have_obj;
}
if (obj->dma_buf) {
get_dma_buf(obj->dma_buf);
dmabuf = obj->dma_buf;
goto out_have_obj;
}
dmabuf = export_and_register_object(dev, obj, flags);
if (IS_ERR(dmabuf)) {
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
*/
ret = PTR_ERR(dmabuf);
mutex_unlock(&dev->object_name_lock);
goto out;
}
out_have_obj:
/*
* If we've exported this buffer then cheat and add it to the import list
* so we get the correct handle back. We must do this under the
* protection of dev->object_name_lock to ensure that a racing gem close
* ioctl doesn't miss to remove this buffer handle from the cache.
*/
ret = drm_prime_add_buf_handle(&file_priv->prime,
dmabuf, handle);
mutex_unlock(&dev->object_name_lock);
if (ret)
goto fail_put_dmabuf;
out_have_handle:
ret = dma_buf_fd(dmabuf, flags);
/*
* We must _not_ remove the buffer from the handle cache since the newly
* created dma buf is already linked in the global obj->dma_buf pointer,
* and that is invariant as long as a userspace gem handle exists.
* Closing the handle will clean out the cache anyway, so we don't leak.
*/
if (ret < 0) {
goto fail_put_dmabuf;
} else {
*prime_fd = ret;
ret = 0;
}
goto out;
fail_put_dmabuf:
dma_buf_put(dmabuf);
out:
drm_gem_object_put(obj);
out_unlock:
mutex_unlock(&file_priv->prime.lock);
return ret;
}
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_prime_handle *args = data;
/* check flags are valid */
if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
return -EINVAL;
if (dev->driver->prime_handle_to_fd) {
return dev->driver->prime_handle_to_fd(dev, file_priv,
args->handle, args->flags,
&args->fd);
}
return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle,
args->flags, &args->fd);
}
/**
* DOC: PRIME Helpers
*
* Drivers can implement &drm_gem_object_funcs.export and
* &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
* functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
* implement dma-buf support in terms of some lower-level helpers, which are
* again exported for drivers to use individually:
*
* Exporting buffers
* ~~~~~~~~~~~~~~~~~
*
* Optional pinning of buffers is handled at dma-buf attach and detach time in
* drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
* handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
* &drm_gem_object_funcs.get_sg_table. If &drm_gem_object_funcs.get_sg_table is
* unimplemented, exports into another device are rejected.
*
* For kernel-internal access there's drm_gem_dmabuf_vmap() and
* drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
* drm_gem_dmabuf_mmap().
*
* Note that these export helpers can only be used if the underlying backing
* storage is fully coherent and either permanently pinned, or it is safe to pin
* it indefinitely.
*
* FIXME: The underlying helper functions are named rather inconsistently.
*
* Importing buffers
* ~~~~~~~~~~~~~~~~~
*
* Importing dma-bufs using drm_gem_prime_import() relies on
* &drm_driver.gem_prime_import_sg_table.
*
* Note that similarly to the export helpers this permanently pins the
* underlying backing storage. Which is ok for scanout, but is not the best
* option for sharing lots of buffers for rendering.
*/
/**
* drm_gem_map_attach - dma_buf attach implementation for GEM
* @dma_buf: buffer to attach device to
* @attach: buffer attachment data
*
* Calls &drm_gem_object_funcs.pin for device specific handling. This can be
* used as the &dma_buf_ops.attach callback. Must be used together with
* drm_gem_map_detach().
*
* Returns 0 on success, negative error code on failure.
*/
int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
if (!obj->funcs->get_sg_table)
return -ENOSYS;
return drm_gem_pin(obj);
}
EXPORT_SYMBOL(drm_gem_map_attach);
/**
* drm_gem_map_detach - dma_buf detach implementation for GEM
* @dma_buf: buffer to detach from
* @attach: attachment to be detached
*
* Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up
* &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
* &dma_buf_ops.detach callback.
*/
void drm_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
drm_gem_unpin(obj);
}
EXPORT_SYMBOL(drm_gem_map_detach);
/**
* drm_gem_map_dma_buf - map_dma_buf implementation for GEM
* @attach: attachment whose scatterlist is to be returned
* @dir: direction of DMA transfer
*
* Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
* can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
* with drm_gem_unmap_dma_buf().
*
* Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
* on error. May return -EINTR if it is interrupted by a signal.
*/
struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct sg_table *sgt;
int ret;
if (WARN_ON(dir == DMA_NONE))
return ERR_PTR(-EINVAL);
if (WARN_ON(!obj->funcs->get_sg_table))
return ERR_PTR(-ENOSYS);
sgt = obj->funcs->get_sg_table(obj);
if (IS_ERR(sgt))
return sgt;
ret = dma_map_sgtable(attach->dev, sgt, dir,
DMA_ATTR_SKIP_CPU_SYNC);
if (ret) {
sg_free_table(sgt);
kfree(sgt);
sgt = ERR_PTR(ret);
}
return sgt;
}
EXPORT_SYMBOL(drm_gem_map_dma_buf);
/**
* drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
* @attach: attachment to unmap buffer from
* @sgt: scatterlist info of the buffer to unmap
* @dir: direction of DMA transfer
*
* This can be used as the &dma_buf_ops.unmap_dma_buf callback.
*/
void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
if (!sgt)
return;
dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sgt);
kfree(sgt);
}
EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
/**
* drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
* @dma_buf: buffer to be mapped
* @map: the virtual address of the buffer
*
* Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
* callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
* The kernel virtual address is returned in map.
*
* Returns 0 on success or a negative errno code otherwise.
*/
int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
return drm_gem_vmap(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
/**
* drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
* @dma_buf: buffer to be unmapped
* @map: the virtual address of the buffer
*
* Releases a kernel virtual mapping. This can be used as the
* &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
*/
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
drm_gem_vunmap(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
/**
* drm_gem_prime_mmap - PRIME mmap function for GEM drivers
* @obj: GEM object
* @vma: Virtual address range
*
* This function sets up a userspace mapping for PRIME exported buffers using
* the same codepath that is used for regular GEM buffer mapping on the DRM fd.
* The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
* called to set up the mapping.
*/
int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct drm_file *priv;
struct file *fil;
int ret;
/* Add the fake offset */
vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
if (obj->funcs && obj->funcs->mmap) {
vma->vm_ops = obj->funcs->vm_ops;
drm_gem_object_get(obj);
ret = obj->funcs->mmap(obj, vma);
if (ret) {
drm_gem_object_put(obj);
return ret;
}
vma->vm_private_data = obj;
return 0;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
fil = kzalloc(sizeof(*fil), GFP_KERNEL);
if (!priv || !fil) {
ret = -ENOMEM;
goto out;
}
/* Used by drm_gem_mmap() to lookup the GEM object */
priv->minor = obj->dev->primary;
fil->private_data = priv;
ret = drm_vma_node_allow(&obj->vma_node, priv);
if (ret)
goto out;
ret = obj->dev->driver->fops->mmap(fil, vma);
drm_vma_node_revoke(&obj->vma_node, priv);
out:
kfree(priv);
kfree(fil);
return ret;
}
EXPORT_SYMBOL(drm_gem_prime_mmap);
/**
* drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
* @dma_buf: buffer to be mapped
* @vma: virtual address range
*
* Provides memory mapping for the buffer. This can be used as the
* &dma_buf_ops.mmap callback. It just forwards to drm_gem_prime_mmap().
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
struct drm_gem_object *obj = dma_buf->priv;
return drm_gem_prime_mmap(obj, vma);
}
EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
.cache_sgt_mapping = true,
.attach = drm_gem_map_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
};
/**
* drm_prime_pages_to_sg - converts a page array into an sg list
* @dev: DRM device
* @pages: pointer to the array of page pointers to convert
* @nr_pages: length of the page vector
*
* This helper creates an sg table object from a set of pages
* the driver is responsible for mapping the pages into the
* importers address space for use with dma_buf itself.
*
* This is useful for implementing &drm_gem_object_funcs.get_sg_table.
*/
struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
struct page **pages, unsigned int nr_pages)
{
struct sg_table *sg;
size_t max_segment = 0;
int err;
sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!sg)
return ERR_PTR(-ENOMEM);
if (dev)
max_segment = dma_max_mapping_size(dev->dev);
if (max_segment == 0)
max_segment = UINT_MAX;
err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
nr_pages << PAGE_SHIFT,
max_segment, GFP_KERNEL);
if (err) {
kfree(sg);
sg = ERR_PTR(err);
}
return sg;
}
EXPORT_SYMBOL(drm_prime_pages_to_sg);
/**
* drm_prime_get_contiguous_size - returns the contiguous size of the buffer
* @sgt: sg_table describing the buffer to check
*
* This helper calculates the contiguous size in the DMA address space
* of the buffer described by the provided sg_table.
*
* This is useful for implementing
* &drm_gem_object_funcs.gem_prime_import_sg_table.
*/
unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
{
dma_addr_t expected = sg_dma_address(sgt->sgl);
struct scatterlist *sg;
unsigned long size = 0;
int i;
for_each_sgtable_dma_sg(sgt, sg, i) {
unsigned int len = sg_dma_len(sg);
if (!len)
break;
if (sg_dma_address(sg) != expected)
break;
expected += len;
size += len;
}
return size;
}
EXPORT_SYMBOL(drm_prime_get_contiguous_size);
/**
* drm_gem_prime_export - helper library implementation of the export callback
* @obj: GEM object to export
* @flags: flags like DRM_CLOEXEC and DRM_RDWR
*
* This is the implementation of the &drm_gem_object_funcs.export functions
* for GEM drivers using the PRIME helpers. It is used as the default for
* drivers that do not set their own.
*/
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
int flags)
{
struct drm_device *dev = obj->dev;
struct dma_buf_export_info exp_info = {
.exp_name = KBUILD_MODNAME, /* white lie for debug */
.owner = dev->driver->fops->owner,
.ops = &drm_gem_prime_dmabuf_ops,
.size = obj->size,
.flags = flags,
.priv = obj,
.resv = obj->resv,
};
return drm_gem_dmabuf_export(dev, &exp_info);
}
EXPORT_SYMBOL(drm_gem_prime_export);
/**
* drm_gem_prime_import_dev - core implementation of the import callback
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
* @attach_dev: struct device to dma_buf attach
*
* This is the core of drm_gem_prime_import(). It's designed to be called by
* drivers who want to use a different device structure than &drm_device.dev for
* attaching via dma_buf. This function calls
* &drm_driver.gem_prime_import_sg_table internally.
*
* Drivers must arrange to call drm_prime_gem_destroy() from their
* &drm_gem_object_funcs.free hook when using this function.
*/
struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct dma_buf *dma_buf,
struct device *attach_dev)
{
struct dma_buf_attachment *attach;
struct sg_table *sgt;
struct drm_gem_object *obj;
int ret;
if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
obj = dma_buf->priv;
if (obj->dev == dev) {
/*
* Importing dmabuf exported from our own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_get(obj);
return obj;
}
}
if (!dev->driver->gem_prime_import_sg_table)
return ERR_PTR(-EINVAL);
attach = dma_buf_attach(dma_buf, attach_dev);
if (IS_ERR(attach))
return ERR_CAST(attach);
get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto fail_detach;
}
obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto fail_unmap;
}
obj->import_attach = attach;
obj->resv = dma_buf->resv;
return obj;
fail_unmap:
dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(drm_gem_prime_import_dev);
/**
* drm_gem_prime_import - helper library implementation of the import callback
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
*
* This is the implementation of the gem_prime_import functions for GEM
* drivers using the PRIME helpers. It is the default for drivers that do
* not set their own &drm_driver.gem_prime_import.
*
* Drivers must arrange to call drm_prime_gem_destroy() from their
* &drm_gem_object_funcs.free hook when using this function.
*/
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
}
EXPORT_SYMBOL(drm_gem_prime_import);
/**
* drm_prime_sg_to_page_array - convert an sg table into a page array
* @sgt: scatter-gather table to convert
* @pages: array of page pointers to store the pages in
* @max_entries: size of the passed-in array
*
* Exports an sg table into an array of pages.
*
* This function is deprecated and strongly discouraged to be used.
* The page array is only useful for page faults and those can corrupt fields
* in the struct page if they are not handled by the exporting driver.
*/
int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
struct page **pages,
int max_entries)
{
struct sg_page_iter page_iter;
struct page **p = pages;
for_each_sgtable_page(sgt, &page_iter, 0) {
if (WARN_ON(p - pages >= max_entries))
return -1;
*p++ = sg_page_iter_page(&page_iter);
}
return 0;
}
EXPORT_SYMBOL(drm_prime_sg_to_page_array);
/**
* drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
* @sgt: scatter-gather table to convert
* @addrs: array to store the dma bus address of each page
* @max_entries: size of both the passed-in arrays
*
* Exports an sg table into an array of addresses.
*
* Drivers should use this in their &drm_driver.gem_prime_import_sg_table
* implementation.
*/
int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
int max_entries)
{
struct sg_dma_page_iter dma_iter;
dma_addr_t *a = addrs;
for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
if (WARN_ON(a - addrs >= max_entries))
return -1;
*a++ = sg_page_iter_dma_address(&dma_iter);
}
return 0;
}
EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
/**
* drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
* @obj: GEM object which was created from a dma-buf
* @sg: the sg-table which was pinned at import time
*
* This is the cleanup functions which GEM drivers need to call when they use
* drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
*/
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
{
struct dma_buf_attachment *attach;
struct dma_buf *dma_buf;
attach = obj->import_attach;
if (sg)
dma_buf_unmap_attachment_unlocked(attach, sg, DMA_BIDIRECTIONAL);
dma_buf = attach->dmabuf;
dma_buf_detach(attach->dmabuf, attach);
/* remove the reference */
dma_buf_put(dma_buf);
}
EXPORT_SYMBOL(drm_prime_gem_destroy);
| linux-master | drivers/gpu/drm/drm_prime.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/iosys-map.h>
#include <linux/module.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mode.h>
#include <drm/drm_plane.h>
#include <drm/drm_prime.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_tt.h>
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
/**
* DOC: overview
*
* This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
* buffer object that is backed by video RAM (VRAM). It can be used for
* framebuffer devices with dedicated memory.
*
* The data structure &struct drm_vram_mm and its helpers implement a memory
* manager for simple framebuffer devices with dedicated video memory. GEM
* VRAM buffer objects are either placed in the video memory or remain evicted
* to system memory.
*
* With the GEM interface userspace applications create, manage and destroy
* graphics buffers, such as an on-screen framebuffer. GEM does not provide
* an implementation of these interfaces. It's up to the DRM driver to
* provide an implementation that suits the hardware. If the hardware device
* contains dedicated video memory, the DRM driver can use the VRAM helper
* library. Each active buffer object is stored in video RAM. Active
* buffer are used for drawing the current frame, typically something like
* the frame's scanout buffer or the cursor image. If there's no more space
* left in VRAM, inactive GEM objects can be moved to system memory.
*
* To initialize the VRAM helper library call drmm_vram_helper_init().
* The function allocates and initializes an instance of &struct drm_vram_mm
* in &struct drm_device.vram_mm . Use &DRM_GEM_VRAM_DRIVER to initialize
* &struct drm_driver and &DRM_VRAM_MM_FILE_OPERATIONS to initialize
* &struct file_operations; as illustrated below.
*
* .. code-block:: c
*
* struct file_operations fops ={
* .owner = THIS_MODULE,
* DRM_VRAM_MM_FILE_OPERATION
* };
* struct drm_driver drv = {
* .driver_feature = DRM_ ... ,
* .fops = &fops,
* DRM_GEM_VRAM_DRIVER
* };
*
* int init_drm_driver()
* {
* struct drm_device *dev;
* uint64_t vram_base;
* unsigned long vram_size;
* int ret;
*
* // setup device, vram base and size
* // ...
*
* ret = drmm_vram_helper_init(dev, vram_base, vram_size);
* if (ret)
* return ret;
* return 0;
* }
*
* This creates an instance of &struct drm_vram_mm, exports DRM userspace
* interfaces for GEM buffer management and initializes file operations to
* allow for accessing created GEM buffers. With this setup, the DRM driver
* manages an area of video RAM with VRAM MM and provides GEM VRAM objects
* to userspace.
*
* You don't have to clean up the instance of VRAM MM.
* drmm_vram_helper_init() is a managed interface that installs a
* clean-up handler to run during the DRM device's release.
*
* For drawing or scanout operations, rsp. buffer objects have to be pinned
* in video RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
* &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
* memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
*
* A buffer object that is pinned in video RAM has a fixed address within that
* memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
* it's used to program the hardware's scanout engine for framebuffers, set
* the cursor overlay's image for a mouse cursor, or use it as input to the
* hardware's drawing engine.
*
* To access a buffer object's memory from the DRM driver, call
* drm_gem_vram_vmap(). It maps the buffer into kernel address
* space and returns the memory address. Use drm_gem_vram_vunmap() to
* release the mapping.
*/
/*
* Buffer-objects helpers
*/
static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
{
/* We got here via ttm_bo_put(), which means that the
* TTM buffer object in 'bo' has already been cleaned
* up; only release the GEM object.
*/
WARN_ON(gbo->vmap_use_count);
WARN_ON(iosys_map_is_set(&gbo->map));
drm_gem_object_release(&gbo->bo.base);
}
static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo)
{
drm_gem_vram_cleanup(gbo);
kfree(gbo);
}
static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
drm_gem_vram_destroy(gbo);
}
static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
unsigned long pl_flag)
{
u32 invariant_flags = 0;
unsigned int i;
unsigned int c = 0;
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN)
invariant_flags = TTM_PL_FLAG_TOPDOWN;
gbo->placement.placement = gbo->placements;
gbo->placement.busy_placement = gbo->placements;
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) {
gbo->placements[c].mem_type = TTM_PL_VRAM;
gbo->placements[c++].flags = invariant_flags;
}
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) {
gbo->placements[c].mem_type = TTM_PL_SYSTEM;
gbo->placements[c++].flags = invariant_flags;
}
gbo->placement.num_placement = c;
gbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
gbo->placements[i].fpfn = 0;
gbo->placements[i].lpfn = 0;
}
}
/**
* drm_gem_vram_create() - Creates a VRAM-backed GEM object
* @dev: the DRM device
* @size: the buffer size in bytes
* @pg_align: the buffer's alignment in multiples of the page size
*
* GEM objects are allocated by calling struct drm_driver.gem_create_object,
* if set. Otherwise kzalloc() will be used. Drivers can set their own GEM
* object functions in struct drm_driver.gem_create_object. If no functions
* are set, the new GEM object will use the default functions from GEM VRAM
* helpers.
*
* Returns:
* A new instance of &struct drm_gem_vram_object on success, or
* an ERR_PTR()-encoded error code otherwise.
*/
struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
size_t size,
unsigned long pg_align)
{
struct drm_gem_vram_object *gbo;
struct drm_gem_object *gem;
struct drm_vram_mm *vmm = dev->vram_mm;
struct ttm_device *bdev;
int ret;
if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
return ERR_PTR(-EINVAL);
if (dev->driver->gem_create_object) {
gem = dev->driver->gem_create_object(dev, size);
if (IS_ERR(gem))
return ERR_CAST(gem);
gbo = drm_gem_vram_of_gem(gem);
} else {
gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
if (!gbo)
return ERR_PTR(-ENOMEM);
gem = &gbo->bo.base;
}
if (!gem->funcs)
gem->funcs = &drm_gem_vram_object_funcs;
ret = drm_gem_object_init(dev, gem, size);
if (ret) {
kfree(gbo);
return ERR_PTR(ret);
}
bdev = &vmm->bdev;
gbo->bo.bdev = bdev;
drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
/*
* A failing ttm_bo_init will call ttm_buffer_object_destroy
* to release gbo->bo.base and kfree gbo.
*/
ret = ttm_bo_init_validate(bdev, &gbo->bo, ttm_bo_type_device,
&gbo->placement, pg_align, false, NULL, NULL,
ttm_buffer_object_destroy);
if (ret)
return ERR_PTR(ret);
return gbo;
}
EXPORT_SYMBOL(drm_gem_vram_create);
/**
* drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
* @gbo: the GEM VRAM object
*
* See ttm_bo_put() for more information.
*/
void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
{
ttm_bo_put(&gbo->bo);
}
EXPORT_SYMBOL(drm_gem_vram_put);
static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
{
/* Keep TTM behavior for now, remove when drivers are audited */
if (WARN_ON_ONCE(!gbo->bo.resource ||
gbo->bo.resource->mem_type == TTM_PL_SYSTEM))
return 0;
return gbo->bo.resource->start;
}
/**
* drm_gem_vram_offset() - \
Returns a GEM VRAM object's offset in video memory
* @gbo: the GEM VRAM object
*
* This function returns the buffer object's offset in the device's video
* memory. The buffer object has to be pinned to %TTM_PL_VRAM.
*
* Returns:
* The buffer object's offset in video memory on success, or
* a negative errno code otherwise.
*/
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
{
if (WARN_ON_ONCE(!gbo->bo.pin_count))
return (s64)-ENODEV;
return drm_gem_vram_pg_offset(gbo) << PAGE_SHIFT;
}
EXPORT_SYMBOL(drm_gem_vram_offset);
static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
unsigned long pl_flag)
{
struct ttm_operation_ctx ctx = { false, false };
int ret;
if (gbo->bo.pin_count)
goto out;
if (pl_flag)
drm_gem_vram_placement(gbo, pl_flag);
ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
if (ret < 0)
return ret;
out:
ttm_bo_pin(&gbo->bo);
return 0;
}
/**
* drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
* @gbo: the GEM VRAM object
* @pl_flag: a bitmask of possible memory regions
*
* Pinning a buffer object ensures that it is not evicted from
* a memory region. A pinned buffer object has to be unpinned before
* it can be pinned to another region. If the pl_flag argument is 0,
* the buffer is pinned at its current location (video RAM or system
* memory).
*
* Small buffer objects, such as cursor images, can lead to memory
* fragmentation if they are pinned in the middle of video RAM. This
* is especially a problem on devices with only a small amount of
* video RAM. Fragmentation can prevent the primary framebuffer from
* fitting in, even though there's enough memory overall. The modifier
* DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
* at the high end of the memory region to avoid fragmentation.
*
* Returns:
* 0 on success, or
* a negative error code otherwise.
*/
int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
{
int ret;
ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
if (ret)
return ret;
ret = drm_gem_vram_pin_locked(gbo, pl_flag);
ttm_bo_unreserve(&gbo->bo);
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_pin);
static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
{
ttm_bo_unpin(&gbo->bo);
}
/**
* drm_gem_vram_unpin() - Unpins a GEM VRAM object
* @gbo: the GEM VRAM object
*
* Returns:
* 0 on success, or
* a negative error code otherwise.
*/
int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
{
int ret;
ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
if (ret)
return ret;
drm_gem_vram_unpin_locked(gbo);
ttm_bo_unreserve(&gbo->bo);
return 0;
}
EXPORT_SYMBOL(drm_gem_vram_unpin);
static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
struct iosys_map *map)
{
int ret;
if (gbo->vmap_use_count > 0)
goto out;
/*
* VRAM helpers unmap the BO only on demand. So the previous
* page mapping might still be around. Only vmap if the there's
* no mapping present.
*/
if (iosys_map_is_null(&gbo->map)) {
ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
if (ret)
return ret;
}
out:
++gbo->vmap_use_count;
*map = gbo->map;
return 0;
}
static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo,
struct iosys_map *map)
{
struct drm_device *dev = gbo->bo.base.dev;
if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count))
return;
if (drm_WARN_ON_ONCE(dev, !iosys_map_is_equal(&gbo->map, map)))
return; /* BUG: map not mapped from this BO */
if (--gbo->vmap_use_count > 0)
return;
/*
* Permanently mapping and unmapping buffers adds overhead from
* updating the page tables and creates debugging output. Therefore,
* we delay the actual unmap operation until the BO gets evicted
* from memory. See drm_gem_vram_bo_driver_move_notify().
*/
}
/**
* drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
* space
* @gbo: The GEM VRAM object to map
* @map: Returns the kernel virtual address of the VRAM GEM object's backing
* store.
*
* The vmap function pins a GEM VRAM object to its current location, either
* system or video memory, and maps its buffer into kernel address space.
* As pinned object cannot be relocated, you should avoid pinning objects
* permanently. Call drm_gem_vram_vunmap() with the returned address to
* unmap and unpin the GEM VRAM object.
*
* Returns:
* 0 on success, or a negative error code otherwise.
*/
int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map)
{
int ret;
dma_resv_assert_held(gbo->bo.base.resv);
ret = drm_gem_vram_pin_locked(gbo, 0);
if (ret)
return ret;
ret = drm_gem_vram_kmap_locked(gbo, map);
if (ret)
goto err_drm_gem_vram_unpin_locked;
return 0;
err_drm_gem_vram_unpin_locked:
drm_gem_vram_unpin_locked(gbo);
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_vmap);
/**
* drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
* @gbo: The GEM VRAM object to unmap
* @map: Kernel virtual address where the VRAM GEM object was mapped
*
* A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
* the documentation for drm_gem_vram_vmap() for more information.
*/
void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
struct iosys_map *map)
{
dma_resv_assert_held(gbo->bo.base.resv);
drm_gem_vram_kunmap_locked(gbo, map);
drm_gem_vram_unpin_locked(gbo);
}
EXPORT_SYMBOL(drm_gem_vram_vunmap);
/**
* drm_gem_vram_fill_create_dumb() - \
Helper for implementing &struct drm_driver.dumb_create
* @file: the DRM file
* @dev: the DRM device
* @pg_align: the buffer's alignment in multiples of the page size
* @pitch_align: the scanline's alignment in powers of 2
* @args: the arguments as provided to \
&struct drm_driver.dumb_create
*
* This helper function fills &struct drm_mode_create_dumb, which is used
* by &struct drm_driver.dumb_create. Implementations of this interface
* should forwards their arguments to this helper, plus the driver-specific
* parameters.
*
* Returns:
* 0 on success, or
* a negative error code otherwise.
*/
int drm_gem_vram_fill_create_dumb(struct drm_file *file,
struct drm_device *dev,
unsigned long pg_align,
unsigned long pitch_align,
struct drm_mode_create_dumb *args)
{
size_t pitch, size;
struct drm_gem_vram_object *gbo;
int ret;
u32 handle;
pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
if (pitch_align) {
if (WARN_ON_ONCE(!is_power_of_2(pitch_align)))
return -EINVAL;
pitch = ALIGN(pitch, pitch_align);
}
size = pitch * args->height;
size = roundup(size, PAGE_SIZE);
if (!size)
return -EINVAL;
gbo = drm_gem_vram_create(dev, size, pg_align);
if (IS_ERR(gbo))
return PTR_ERR(gbo);
ret = drm_gem_handle_create(file, &gbo->bo.base, &handle);
if (ret)
goto err_drm_gem_object_put;
drm_gem_object_put(&gbo->bo.base);
args->pitch = pitch;
args->size = size;
args->handle = handle;
return 0;
err_drm_gem_object_put:
drm_gem_object_put(&gbo->bo.base);
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
/*
* Helpers for struct ttm_device_funcs
*/
static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
{
return (bo->destroy == ttm_buffer_object_destroy);
}
static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
struct ttm_placement *pl)
{
drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
*pl = gbo->placement;
}
static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo)
{
struct ttm_buffer_object *bo = &gbo->bo;
struct drm_device *dev = bo->base.dev;
if (drm_WARN_ON_ONCE(dev, gbo->vmap_use_count))
return;
ttm_bo_vunmap(bo, &gbo->map);
iosys_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */
}
static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
drm_gem_vram_bo_driver_move_notify(gbo);
return ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem);
}
/*
* Helpers for struct drm_gem_object_funcs
*/
/**
* drm_gem_vram_object_free() - \
Implements &struct drm_gem_object_funcs.free
* @gem: GEM object. Refers to &struct drm_gem_vram_object.gem
*/
static void drm_gem_vram_object_free(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
drm_gem_vram_put(gbo);
}
/*
* Helpers for dump buffers
*/
/**
* drm_gem_vram_driver_dumb_create() - \
Implements &struct drm_driver.dumb_create
* @file: the DRM file
* @dev: the DRM device
* @args: the arguments as provided to \
&struct drm_driver.dumb_create
*
* This function requires the driver to use @drm_device.vram_mm for its
* instance of VRAM MM.
*
* Returns:
* 0 on success, or
* a negative error code otherwise.
*/
int drm_gem_vram_driver_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
return -EINVAL;
return drm_gem_vram_fill_create_dumb(file, dev, 0, 0, args);
}
EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
/*
* Helpers for struct drm_plane_helper_funcs
*/
static void __drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state,
unsigned int num_planes)
{
struct drm_gem_object *obj;
struct drm_gem_vram_object *gbo;
struct drm_framebuffer *fb = state->fb;
while (num_planes) {
--num_planes;
obj = drm_gem_fb_get_obj(fb, num_planes);
if (!obj)
continue;
gbo = drm_gem_vram_of_gem(obj);
drm_gem_vram_unpin(gbo);
}
}
/**
* drm_gem_vram_plane_helper_prepare_fb() - \
* Implements &struct drm_plane_helper_funcs.prepare_fb
* @plane: a DRM plane
* @new_state: the plane's new state
*
* During plane updates, this function sets the plane's fence and
* pins the GEM VRAM objects of the plane's new framebuffer to VRAM.
* Call drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
*
* Returns:
* 0 on success, or
* a negative errno code otherwise.
*/
int
drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_framebuffer *fb = new_state->fb;
struct drm_gem_vram_object *gbo;
struct drm_gem_object *obj;
unsigned int i;
int ret;
if (!fb)
return 0;
for (i = 0; i < fb->format->num_planes; ++i) {
obj = drm_gem_fb_get_obj(fb, i);
if (!obj) {
ret = -EINVAL;
goto err_drm_gem_vram_unpin;
}
gbo = drm_gem_vram_of_gem(obj);
ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
goto err_drm_gem_vram_unpin;
}
ret = drm_gem_plane_helper_prepare_fb(plane, new_state);
if (ret)
goto err_drm_gem_vram_unpin;
return 0;
err_drm_gem_vram_unpin:
__drm_gem_vram_plane_helper_cleanup_fb(plane, new_state, i);
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
/**
* drm_gem_vram_plane_helper_cleanup_fb() - \
* Implements &struct drm_plane_helper_funcs.cleanup_fb
* @plane: a DRM plane
* @old_state: the plane's old state
*
* During plane updates, this function unpins the GEM VRAM
* objects of the plane's old framebuffer from VRAM. Complements
* drm_gem_vram_plane_helper_prepare_fb().
*/
void
drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_framebuffer *fb = old_state->fb;
if (!fb)
return;
__drm_gem_vram_plane_helper_cleanup_fb(plane, old_state, fb->format->num_planes);
}
EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
/*
* Helpers for struct drm_simple_display_pipe_funcs
*/
/**
* drm_gem_vram_simple_display_pipe_prepare_fb() - \
* Implements &struct drm_simple_display_pipe_funcs.prepare_fb
* @pipe: a simple display pipe
* @new_state: the plane's new state
*
* During plane updates, this function pins the GEM VRAM
* objects of the plane's new framebuffer to VRAM. Call
* drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them.
*
* Returns:
* 0 on success, or
* a negative errno code otherwise.
*/
int drm_gem_vram_simple_display_pipe_prepare_fb(
struct drm_simple_display_pipe *pipe,
struct drm_plane_state *new_state)
{
return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state);
}
EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb);
/**
* drm_gem_vram_simple_display_pipe_cleanup_fb() - \
* Implements &struct drm_simple_display_pipe_funcs.cleanup_fb
* @pipe: a simple display pipe
* @old_state: the plane's old state
*
* During plane updates, this function unpins the GEM VRAM
* objects of the plane's old framebuffer from VRAM. Complements
* drm_gem_vram_simple_display_pipe_prepare_fb().
*/
void drm_gem_vram_simple_display_pipe_cleanup_fb(
struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state);
}
EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb);
/*
* PRIME helpers
*/
/**
* drm_gem_vram_object_pin() - \
Implements &struct drm_gem_object_funcs.pin
* @gem: The GEM object to pin
*
* Returns:
* 0 on success, or
* a negative errno code otherwise.
*/
static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
/* Fbdev console emulation is the use case of these PRIME
* helpers. This may involve updating a hardware buffer from
* a shadow FB. We pin the buffer to it's current location
* (either video RAM or system memory) to prevent it from
* being relocated during the update operation. If you require
* the buffer to be pinned to VRAM, implement a callback that
* sets the flags accordingly.
*/
return drm_gem_vram_pin(gbo, 0);
}
/**
* drm_gem_vram_object_unpin() - \
Implements &struct drm_gem_object_funcs.unpin
* @gem: The GEM object to unpin
*/
static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
drm_gem_vram_unpin(gbo);
}
/**
* drm_gem_vram_object_vmap() -
* Implements &struct drm_gem_object_funcs.vmap
* @gem: The GEM object to map
* @map: Returns the kernel virtual address of the VRAM GEM object's backing
* store.
*
* Returns:
* 0 on success, or a negative error code otherwise.
*/
static int drm_gem_vram_object_vmap(struct drm_gem_object *gem,
struct iosys_map *map)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
return drm_gem_vram_vmap(gbo, map);
}
/**
* drm_gem_vram_object_vunmap() -
* Implements &struct drm_gem_object_funcs.vunmap
* @gem: The GEM object to unmap
* @map: Kernel virtual address where the VRAM GEM object was mapped
*/
static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
struct iosys_map *map)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
drm_gem_vram_vunmap(gbo, map);
}
/*
* GEM object funcs
*/
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
.free = drm_gem_vram_object_free,
.pin = drm_gem_vram_object_pin,
.unpin = drm_gem_vram_object_unpin,
.vmap = drm_gem_vram_object_vmap,
.vunmap = drm_gem_vram_object_vunmap,
.mmap = drm_gem_ttm_mmap,
.print_info = drm_gem_ttm_print_info,
};
/*
* VRAM memory manager
*/
/*
* TTM TT
*/
static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
{
ttm_tt_fini(tt);
kfree(tt);
}
/*
* TTM BO device
*/
static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
struct ttm_tt *tt;
int ret;
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
if (!tt)
return NULL;
ret = ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
if (ret < 0)
goto err_ttm_tt_init;
return tt;
err_ttm_tt_init:
kfree(tt);
return NULL;
}
static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
struct drm_gem_vram_object *gbo;
/* TTM may pass BOs that are not GEM VRAM BOs. */
if (!drm_is_gem_vram(bo))
return;
gbo = drm_gem_vram_of_bo(bo);
drm_gem_vram_bo_driver_evict_flags(gbo, placement);
}
static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_gem_vram_object *gbo;
/* TTM may pass BOs that are not GEM VRAM BOs. */
if (!drm_is_gem_vram(bo))
return;
gbo = drm_gem_vram_of_bo(bo);
drm_gem_vram_bo_driver_move_notify(gbo);
}
static int bo_driver_move(struct ttm_buffer_object *bo,
bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
struct drm_gem_vram_object *gbo;
if (!bo->resource) {
if (new_mem->mem_type != TTM_PL_SYSTEM) {
hop->mem_type = TTM_PL_SYSTEM;
hop->flags = TTM_PL_FLAG_TEMPORARY;
return -EMULTIHOP;
}
ttm_bo_move_null(bo, new_mem);
return 0;
}
gbo = drm_gem_vram_of_bo(bo);
return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
}
static int bo_driver_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
switch (mem->mem_type) {
case TTM_PL_SYSTEM: /* nothing to do */
break;
case TTM_PL_VRAM:
mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base;
mem->bus.is_iomem = true;
mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
}
return 0;
}
static struct ttm_device_funcs bo_driver = {
.ttm_tt_create = bo_driver_ttm_tt_create,
.ttm_tt_destroy = bo_driver_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = bo_driver_evict_flags,
.move = bo_driver_move,
.delete_mem_notify = bo_driver_delete_mem_notify,
.io_mem_reserve = bo_driver_io_mem_reserve,
};
/*
* struct drm_vram_mm
*/
static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_vram_mm *vmm = entry->dev->vram_mm;
struct ttm_resource_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM);
struct drm_printer p = drm_seq_file_printer(m);
ttm_resource_manager_debug(man, &p);
return 0;
}
static const struct drm_debugfs_info drm_vram_mm_debugfs_list[] = {
{ "vram-mm", drm_vram_mm_debugfs, 0, NULL },
};
/**
* drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
*
* @minor: drm minor device.
*
*/
void drm_vram_mm_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_add_files(minor->dev, drm_vram_mm_debugfs_list,
ARRAY_SIZE(drm_vram_mm_debugfs_list));
}
EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
uint64_t vram_base, size_t vram_size)
{
int ret;
vmm->vram_base = vram_base;
vmm->vram_size = vram_size;
ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev,
dev->anon_inode->i_mapping,
dev->vma_offset_manager,
false, true);
if (ret)
return ret;
ret = ttm_range_man_init(&vmm->bdev, TTM_PL_VRAM,
false, vram_size >> PAGE_SHIFT);
if (ret)
return ret;
return 0;
}
static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
{
ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM);
ttm_device_fini(&vmm->bdev);
}
/*
* Helpers for integration with struct drm_device
*/
static struct drm_vram_mm *drm_vram_helper_alloc_mm(struct drm_device *dev, uint64_t vram_base,
size_t vram_size)
{
int ret;
if (WARN_ON(dev->vram_mm))
return dev->vram_mm;
dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
if (!dev->vram_mm)
return ERR_PTR(-ENOMEM);
ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size);
if (ret)
goto err_kfree;
return dev->vram_mm;
err_kfree:
kfree(dev->vram_mm);
dev->vram_mm = NULL;
return ERR_PTR(ret);
}
static void drm_vram_helper_release_mm(struct drm_device *dev)
{
if (!dev->vram_mm)
return;
drm_vram_mm_cleanup(dev->vram_mm);
kfree(dev->vram_mm);
dev->vram_mm = NULL;
}
static void drm_vram_mm_release(struct drm_device *dev, void *ptr)
{
drm_vram_helper_release_mm(dev);
}
/**
* drmm_vram_helper_init - Initializes a device's instance of
* &struct drm_vram_mm
* @dev: the DRM device
* @vram_base: the base address of the video memory
* @vram_size: the size of the video memory in bytes
*
* Creates a new instance of &struct drm_vram_mm and stores it in
* struct &drm_device.vram_mm. The instance is auto-managed and cleaned
* up as part of device cleanup. Calling this function multiple times
* will generate an error message.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base,
size_t vram_size)
{
struct drm_vram_mm *vram_mm;
if (drm_WARN_ON_ONCE(dev, dev->vram_mm))
return 0;
vram_mm = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
if (IS_ERR(vram_mm))
return PTR_ERR(vram_mm);
return drmm_add_action_or_reset(dev, drm_vram_mm_release, NULL);
}
EXPORT_SYMBOL(drmm_vram_helper_init);
/*
* Mode-config helpers
*/
static enum drm_mode_status
drm_vram_helper_mode_valid_internal(struct drm_device *dev,
const struct drm_display_mode *mode,
unsigned long max_bpp)
{
struct drm_vram_mm *vmm = dev->vram_mm;
unsigned long fbsize, fbpages, max_fbpages;
if (WARN_ON(!dev->vram_mm))
return MODE_BAD;
max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT;
fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
if (fbpages > max_fbpages)
return MODE_MEM;
return MODE_OK;
}
/**
* drm_vram_helper_mode_valid - Tests if a display mode's
* framebuffer fits into the available video memory.
* @dev: the DRM device
* @mode: the mode to test
*
* This function tests if enough video memory is available for using the
* specified display mode. Atomic modesetting requires importing the
* designated framebuffer into video memory before evicting the active
* one. Hence, any framebuffer may consume at most half of the available
* VRAM. Display modes that require a larger framebuffer can not be used,
* even if the CRTC does support them. Each framebuffer is assumed to
* have 32-bit color depth.
*
* Note:
* The function can only test if the display mode is supported in
* general. If there are too many framebuffers pinned to video memory,
* a display mode may still not be usable in practice. The color depth of
* 32-bit fits all current use case. A more flexible test can be added
* when necessary.
*
* Returns:
* MODE_OK if the display mode is supported, or an error code of type
* enum drm_mode_status otherwise.
*/
enum drm_mode_status
drm_vram_helper_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp);
}
EXPORT_SYMBOL(drm_vram_helper_mode_valid);
MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/drm_gem_vram_helper.c |
/*
* Copyright (c) 2016 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
/**
* DOC: aux kms helpers
*
* This helper library contains various one-off functions which don't really fit
* anywhere else in the DRM modeset helper library.
*/
/**
* drm_helper_move_panel_connectors_to_head() - move panels to the front in the
* connector list
* @dev: drm device to operate on
*
* Some userspace presumes that the first connected connector is the main
* display, where it's supposed to display e.g. the login screen. For
* laptops, this should be the main panel. Use this function to sort all
* (eDP/LVDS/DSI) panels to the front of the connector list, instead of
* painstakingly trying to initialize them in the right order.
*/
void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
{
struct drm_connector *connector, *tmp;
struct list_head panel_list;
INIT_LIST_HEAD(&panel_list);
spin_lock_irq(&dev->mode_config.connector_list_lock);
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_DSI)
list_move_tail(&connector->head, &panel_list);
}
list_splice(&panel_list, &dev->mode_config.connector_list);
spin_unlock_irq(&dev->mode_config.connector_list_lock);
}
EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
/**
* drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
* @dev: DRM device
* @fb: drm_framebuffer object to fill out
* @mode_cmd: metadata from the userspace fb creation request
*
* This helper can be used in a drivers fb_create callback to pre-fill the fb's
* metadata fields.
*/
void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
int i;
fb->dev = dev;
fb->format = drm_get_format_info(dev, mode_cmd);
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
for (i = 0; i < 4; i++) {
fb->pitches[i] = mode_cmd->pitches[i];
fb->offsets[i] = mode_cmd->offsets[i];
}
fb->modifier = mode_cmd->modifier[0];
fb->flags = mode_cmd->flags;
}
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
/*
* This is the minimal list of formats that seem to be safe for modeset use
* with all current DRM drivers. Most hardware can actually support more
* formats than this and drivers may specify a more accurate list when
* creating the primary plane.
*/
static const uint32_t safe_modeset_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
static const struct drm_plane_funcs primary_plane_funcs = {
DRM_PLANE_NON_ATOMIC_FUNCS,
};
/**
* drm_crtc_init - Legacy CRTC initialization function
* @dev: DRM device
* @crtc: CRTC object to init
* @funcs: callbacks for the new CRTC
*
* Initialize a CRTC object with a default helper-provided primary plane and no
* cursor plane.
*
* Note that we make some assumptions about hardware limitations that may not be
* true for all hardware:
*
* 1. Primary plane cannot be repositioned.
* 2. Primary plane cannot be scaled.
* 3. Primary plane must cover the entire CRTC.
* 4. Subpixel positioning is not supported.
* 5. The primary plane must always be on if the CRTC is enabled.
*
* This is purely a backwards compatibility helper for old drivers. Drivers
* should instead implement their own primary plane. Atomic drivers must do so.
* Drivers with the above hardware restriction can look into using &struct
* drm_simple_display_pipe, which encapsulates the above limitations into a nice
* interface.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs)
{
struct drm_plane *primary;
int ret;
/* possible_crtc's will be filled in later by crtc_init */
primary = __drm_universal_plane_alloc(dev, sizeof(*primary), 0, 0,
&primary_plane_funcs,
safe_modeset_formats,
ARRAY_SIZE(safe_modeset_formats),
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (IS_ERR(primary))
return PTR_ERR(primary);
/*
* Remove the format_default field from drm_plane when dropping
* this helper.
*/
primary->format_default = true;
ret = drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs, NULL);
if (ret)
goto err_drm_plane_cleanup;
return 0;
err_drm_plane_cleanup:
drm_plane_cleanup(primary);
kfree(primary);
return ret;
}
EXPORT_SYMBOL(drm_crtc_init);
/**
* drm_mode_config_helper_suspend - Modeset suspend helper
* @dev: DRM device
*
* This helper function takes care of suspending the modeset side. It disables
* output polling if initialized, suspends fbdev if used and finally calls
* drm_atomic_helper_suspend().
* If suspending fails, fbdev and polling is re-enabled.
*
* Returns:
* Zero on success, negative error code on error.
*
* See also:
* drm_kms_helper_poll_disable() and drm_fb_helper_set_suspend_unlocked().
*/
int drm_mode_config_helper_suspend(struct drm_device *dev)
{
struct drm_atomic_state *state;
if (!dev)
return 0;
drm_kms_helper_poll_disable(dev);
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1);
state = drm_atomic_helper_suspend(dev);
if (IS_ERR(state)) {
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
drm_kms_helper_poll_enable(dev);
return PTR_ERR(state);
}
dev->mode_config.suspend_state = state;
return 0;
}
EXPORT_SYMBOL(drm_mode_config_helper_suspend);
/**
* drm_mode_config_helper_resume - Modeset resume helper
* @dev: DRM device
*
* This helper function takes care of resuming the modeset side. It calls
* drm_atomic_helper_resume(), resumes fbdev if used and enables output polling
* if initiaized.
*
* Returns:
* Zero on success, negative error code on error.
*
* See also:
* drm_fb_helper_set_suspend_unlocked() and drm_kms_helper_poll_enable().
*/
int drm_mode_config_helper_resume(struct drm_device *dev)
{
int ret;
if (!dev)
return 0;
if (WARN_ON(!dev->mode_config.suspend_state))
return -EINVAL;
ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state);
if (ret)
DRM_ERROR("Failed to resume (%d)\n", ret);
dev->mode_config.suspend_state = NULL;
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
drm_kms_helper_poll_enable(dev);
return ret;
}
EXPORT_SYMBOL(drm_mode_config_helper_resume);
| linux-master | drivers/gpu/drm/drm_modeset_helper.c |
/*
* Created: Fri Jan 8 09:01:26 1999 by [email protected]
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Author Rickard E. (Rik) Faith <[email protected]>
* Author Gareth Hughes <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/nospec.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <drm/drm_auth.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
#include "drm_legacy.h"
/**
* DOC: getunique and setversion story
*
* BEWARE THE DRAGONS! MIND THE TRAPDOORS!
*
* In an attempt to warn anyone else who's trying to figure out what's going
* on here, I'll try to summarize the story. First things first, let's clear up
* the names, because the kernel internals, libdrm and the ioctls are all named
* differently:
*
* - GET_UNIQUE ioctl, implemented by drm_getunique is wrapped up in libdrm
* through the drmGetBusid function.
* - The libdrm drmSetBusid function is backed by the SET_UNIQUE ioctl. All
* that code is nerved in the kernel with drm_invalid_op().
* - The internal set_busid kernel functions and driver callbacks are
* exclusively use by the SET_VERSION ioctl, because only drm 1.0 (which is
* nerved) allowed userspace to set the busid through the above ioctl.
* - Other ioctls and functions involved are named consistently.
*
* For anyone wondering what's the difference between drm 1.1 and 1.4: Correctly
* handling pci domains in the busid on ppc. Doing this correctly was only
* implemented in libdrm in 2010, hence can't be nerved yet. No one knows what's
* special with drm 1.2 and 1.3.
*
* Now the actual horror story of how device lookup in drm works. At large,
* there's 2 different ways, either by busid, or by device driver name.
*
* Opening by busid is fairly simple:
*
* 1. First call SET_VERSION to make sure pci domains are handled properly. As a
* side-effect this fills out the unique name in the master structure.
* 2. Call GET_UNIQUE to read out the unique name from the master structure,
* which matches the busid thanks to step 1. If it doesn't, proceed to try
* the next device node.
*
* Opening by name is slightly different:
*
* 1. Directly call VERSION to get the version and to match against the driver
* name returned by that ioctl. Note that SET_VERSION is not called, which
* means the unique name for the master node just opening is _not_ filled
* out. This despite that with current drm device nodes are always bound to
* one device, and can't be runtime assigned like with drm 1.0.
* 2. Match driver name. If it mismatches, proceed to the next device node.
* 3. Call GET_UNIQUE, and check whether the unique name has length zero (by
* checking that the first byte in the string is 0). If that's not the case
* libdrm skips and proceeds to the next device node. Probably this is just
* copypasta from drm 1.0 times where a set unique name meant that the driver
* was in use already, but that's just conjecture.
*
* Long story short: To keep the open by name logic working, GET_UNIQUE must
* _not_ return a unique string when SET_VERSION hasn't been called yet,
* otherwise libdrm breaks. Even when that unique string can't ever change, and
* is totally irrelevant for actually opening the device because runtime
* assignable device instances were only support in drm 1.0, which is long dead.
* But the libdrm code in drmOpenByName somehow survived, hence this can't be
* broken.
*/
/*
* Get the bus id.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_unique structure.
* \return zero on success or a negative number on failure.
*
* Copies the bus id from drm_device::unique into user space.
*/
int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
struct drm_master *master;
mutex_lock(&dev->master_mutex);
master = file_priv->master;
if (u->unique_len >= master->unique_len) {
if (copy_to_user(u->unique, master->unique, master->unique_len)) {
mutex_unlock(&dev->master_mutex);
return -EFAULT;
}
}
u->unique_len = master->unique_len;
mutex_unlock(&dev->master_mutex);
return 0;
}
static void
drm_unset_busid(struct drm_device *dev,
struct drm_master *master)
{
kfree(master->unique);
master->unique = NULL;
master->unique_len = 0;
}
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
int ret;
if (master->unique != NULL)
drm_unset_busid(dev, master);
if (dev->dev && dev_is_pci(dev->dev)) {
ret = drm_pci_set_busid(dev, master);
if (ret) {
drm_unset_busid(dev, master);
return ret;
}
} else {
WARN_ON(!dev->unique);
master->unique = kstrdup(dev->unique, GFP_KERNEL);
if (master->unique)
master->unique_len = strlen(dev->unique);
}
return 0;
}
/*
* Get client information.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_client structure.
*
* \return zero on success or a negative number on failure.
*
* Searches for the client with the specified index and copies its information
* into userspace
*/
int drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_client *client = data;
/*
* Hollowed-out getclient ioctl to keep some dead old drm tests/tools
* not breaking completely. Userspace tools stop enumerating one they
* get -EINVAL, hence this is the return value we need to hand back for
* no clients tracked.
*
* Unfortunately some clients (*cough* libva *cough*) use this in a fun
* attempt to figure out whether they're authenticated or not. Since
* that's the only thing they care about, give it to the directly
* instead of walking one giant list.
*/
if (client->idx == 0) {
client->auth = file_priv->authenticated;
client->pid = task_pid_vnr(current);
client->uid = overflowuid;
client->magic = 0;
client->iocs = 0;
return 0;
} else {
return -EINVAL;
}
}
/*
* Get statistics information.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_stats structure.
*
* \return zero on success or a negative number on failure.
*/
static int drm_getstats(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_stats *stats = data;
/* Clear stats to prevent userspace from eating its stack garbage. */
memset(stats, 0, sizeof(*stats));
return 0;
}
/*
* Get device/driver capabilities
*/
static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_get_cap *req = data;
struct drm_crtc *crtc;
req->value = 0;
/* Only some caps make sense with UMS/render-only drivers. */
switch (req->capability) {
case DRM_CAP_TIMESTAMP_MONOTONIC:
req->value = 1;
return 0;
case DRM_CAP_PRIME:
req->value = DRM_PRIME_CAP_IMPORT | DRM_PRIME_CAP_EXPORT;
return 0;
case DRM_CAP_SYNCOBJ:
req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ);
return 0;
case DRM_CAP_SYNCOBJ_TIMELINE:
req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE);
return 0;
}
/* Other caps only work with KMS drivers */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
switch (req->capability) {
case DRM_CAP_DUMB_BUFFER:
if (dev->driver->dumb_create)
req->value = 1;
break;
case DRM_CAP_VBLANK_HIGH_CRTC:
req->value = 1;
break;
case DRM_CAP_DUMB_PREFERRED_DEPTH:
req->value = dev->mode_config.preferred_depth;
break;
case DRM_CAP_DUMB_PREFER_SHADOW:
req->value = dev->mode_config.prefer_shadow;
break;
case DRM_CAP_ASYNC_PAGE_FLIP:
req->value = dev->mode_config.async_page_flip;
break;
case DRM_CAP_PAGE_FLIP_TARGET:
req->value = 1;
drm_for_each_crtc(crtc, dev) {
if (!crtc->funcs->page_flip_target)
req->value = 0;
}
break;
case DRM_CAP_CURSOR_WIDTH:
if (dev->mode_config.cursor_width)
req->value = dev->mode_config.cursor_width;
else
req->value = 64;
break;
case DRM_CAP_CURSOR_HEIGHT:
if (dev->mode_config.cursor_height)
req->value = dev->mode_config.cursor_height;
else
req->value = 64;
break;
case DRM_CAP_ADDFB2_MODIFIERS:
req->value = !dev->mode_config.fb_modifiers_not_supported;
break;
case DRM_CAP_CRTC_IN_VBLANK_EVENT:
req->value = 1;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Set device/driver capabilities
*/
static int
drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_set_client_cap *req = data;
/* No render-only settable capabilities for now */
/* Below caps that only works with KMS drivers */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
switch (req->capability) {
case DRM_CLIENT_CAP_STEREO_3D:
if (req->value > 1)
return -EINVAL;
file_priv->stereo_allowed = req->value;
break;
case DRM_CLIENT_CAP_UNIVERSAL_PLANES:
if (req->value > 1)
return -EINVAL;
file_priv->universal_planes = req->value;
break;
case DRM_CLIENT_CAP_ATOMIC:
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
return -EOPNOTSUPP;
/* The modesetting DDX has a totally broken idea of atomic. */
if (current->comm[0] == 'X' && req->value == 1) {
pr_info("broken atomic modeset userspace detected, disabling atomic\n");
return -EOPNOTSUPP;
}
if (req->value > 2)
return -EINVAL;
file_priv->atomic = req->value;
file_priv->universal_planes = req->value;
/*
* No atomic user-space blows up on aspect ratio mode bits.
*/
file_priv->aspect_ratio_allowed = req->value;
break;
case DRM_CLIENT_CAP_ASPECT_RATIO:
if (req->value > 1)
return -EINVAL;
file_priv->aspect_ratio_allowed = req->value;
break;
case DRM_CLIENT_CAP_WRITEBACK_CONNECTORS:
if (!file_priv->atomic)
return -EINVAL;
if (req->value > 1)
return -EINVAL;
file_priv->writeback_connectors = req->value;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Setversion ioctl.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_lock structure.
* \return zero on success or negative number on failure.
*
* Sets the requested interface version
*/
static int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_set_version *sv = data;
int if_version, retcode = 0;
mutex_lock(&dev->master_mutex);
if (sv->drm_di_major != -1) {
if (sv->drm_di_major != DRM_IF_MAJOR ||
sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
retcode = -EINVAL;
goto done;
}
if_version = DRM_IF_VERSION(sv->drm_di_major,
sv->drm_di_minor);
dev->if_version = max(if_version, dev->if_version);
if (sv->drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
* Version 1.4 has proper PCI domain support
*/
retcode = drm_set_busid(dev, file_priv);
if (retcode)
goto done;
}
}
if (sv->drm_dd_major != -1) {
if (sv->drm_dd_major != dev->driver->major ||
sv->drm_dd_minor < 0 || sv->drm_dd_minor >
dev->driver->minor) {
retcode = -EINVAL;
goto done;
}
}
done:
sv->drm_di_major = DRM_IF_MAJOR;
sv->drm_di_minor = DRM_IF_MINOR;
sv->drm_dd_major = dev->driver->major;
sv->drm_dd_minor = dev->driver->minor;
mutex_unlock(&dev->master_mutex);
return retcode;
}
/**
* drm_noop - DRM no-op ioctl implementation
* @dev: DRM device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: DRM file for the ioctl call
*
* This no-op implementation for drm ioctls is useful for deprecated
* functionality where we can't return a failure code because existing userspace
* checks the result of the ioctl, but doesn't care about the action.
*
* Always returns successfully with 0.
*/
int drm_noop(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_dbg_core(dev, "\n");
return 0;
}
EXPORT_SYMBOL(drm_noop);
/**
* drm_invalid_op - DRM invalid ioctl implementation
* @dev: DRM device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: DRM file for the ioctl call
*
* This no-op implementation for drm ioctls is useful for deprecated
* functionality where we really don't want to allow userspace to call the ioctl
* any more. This is the case for old ums interfaces for drivers that
* transitioned to kms gradually and so kept the old legacy tables around. This
* only applies to radeon and i915 kms drivers, other drivers shouldn't need to
* use this function.
*
* Always fails with a return value of -EINVAL.
*/
int drm_invalid_op(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -EINVAL;
}
EXPORT_SYMBOL(drm_invalid_op);
/*
* Copy and IOCTL return string to user space
*/
static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
{
size_t len;
/* don't attempt to copy a NULL pointer */
if (WARN_ONCE(!value, "BUG: the value to copy was not set!")) {
*buf_len = 0;
return 0;
}
/* don't overflow userbuf */
len = strlen(value);
if (len > *buf_len)
len = *buf_len;
/* let userspace know exact length of driver value (which could be
* larger than the userspace-supplied buffer) */
*buf_len = strlen(value);
/* finally, try filling in the userbuf */
if (len && buf)
if (copy_to_user(buf, value, len))
return -EFAULT;
return 0;
}
/*
* Get version information
*
* \param inode device inode.
* \param filp file pointer.
* \param cmd command.
* \param arg user argument, pointing to a drm_version structure.
* \return zero on success or negative number on failure.
*
* Fills in the version information in \p arg.
*/
int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_version *version = data;
int err;
version->version_major = dev->driver->major;
version->version_minor = dev->driver->minor;
version->version_patchlevel = dev->driver->patchlevel;
err = drm_copy_field(version->name, &version->name_len,
dev->driver->name);
if (!err)
err = drm_copy_field(version->date, &version->date_len,
dev->driver->date);
if (!err)
err = drm_copy_field(version->desc, &version->desc_len,
dev->driver->desc);
return err;
}
static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
return -EACCES;
/* AUTH is only for authenticated or render client */
if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
!file_priv->authenticated))
return -EACCES;
/* MASTER is only for master or control clients */
if (unlikely((flags & DRM_MASTER) &&
!drm_is_current_master(file_priv)))
return -EACCES;
/* Render clients must be explicitly allowed */
if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
drm_is_render_client(file_priv)))
return -EACCES;
return 0;
}
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
[DRM_IOCTL_NR(ioctl)] = { \
.cmd = ioctl, \
.func = _func, \
.flags = _flags, \
.name = #ioctl \
}
#if IS_ENABLED(CONFIG_DRM_LEGACY)
#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, _func, _flags)
#else
#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, drm_invalid_op, _flags)
#endif
/* Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid,
DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_MASTER),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#if IS_ENABLED(CONFIG_AGP)
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_legacy_agp_acquire_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_legacy_agp_release_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_legacy_agp_enable_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_legacy_agp_info_ioctl, DRM_AUTH),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_legacy_agp_alloc_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_legacy_agp_free_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_legacy_agp_bind_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_legacy_agp_unbind_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, 0),
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB2, drm_mode_getfb2_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_CREATE, drm_syncobj_create_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_DESTROY, drm_syncobj_destroy_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, drm_syncobj_handle_to_fd_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TRANSFER, drm_syncobj_transfer_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_EVENTFD, drm_syncobj_eventfd_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, drm_syncobj_timeline_signal_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE(drm_ioctls)
/**
* DOC: driver specific ioctls
*
* First things first, driver private IOCTLs should only be needed for drivers
* supporting rendering. Kernel modesetting is all standardized, and extended
* through properties. There are a few exceptions in some existing drivers,
* which define IOCTL for use by the display DRM master, but they all predate
* properties.
*
* Now if you do have a render driver you always have to support it through
* driver private properties. There's a few steps needed to wire all the things
* up.
*
* First you need to define the structure for your IOCTL in your driver private
* UAPI header in ``include/uapi/drm/my_driver_drm.h``::
*
* struct my_driver_operation {
* u32 some_thing;
* u32 another_thing;
* };
*
* Please make sure that you follow all the best practices from
* ``Documentation/process/botching-up-ioctls.rst``. Note that drm_ioctl()
* automatically zero-extends structures, hence make sure you can add more stuff
* at the end, i.e. don't put a variable sized array there.
*
* Then you need to define your IOCTL number, using one of DRM_IO(), DRM_IOR(),
* DRM_IOW() or DRM_IOWR(). It must start with the DRM_IOCTL\_ prefix::
*
* ##define DRM_IOCTL_MY_DRIVER_OPERATION \
* DRM_IOW(DRM_COMMAND_BASE, struct my_driver_operation)
*
* DRM driver private IOCTL must be in the range from DRM_COMMAND_BASE to
* DRM_COMMAND_END. Finally you need an array of &struct drm_ioctl_desc to wire
* up the handlers and set the access rights::
*
* static const struct drm_ioctl_desc my_driver_ioctls[] = {
* DRM_IOCTL_DEF_DRV(MY_DRIVER_OPERATION, my_driver_operation,
* DRM_AUTH|DRM_RENDER_ALLOW),
* };
*
* And then assign this to the &drm_driver.ioctls field in your driver
* structure.
*
* See the separate chapter on :ref:`file operations<drm_driver_fops>` for how
* the driver-specific IOCTLs are wired up.
*/
long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
u32 flags)
{
struct drm_file *file_priv = file->private_data;
struct drm_device *dev = file_priv->minor->dev;
int retcode;
if (drm_dev_is_unplugged(dev))
return -ENODEV;
retcode = drm_ioctl_permit(flags, file_priv);
if (unlikely(retcode))
return retcode;
/* Enforce sane locking for modern driver ioctls. */
if (likely(!drm_core_check_feature(dev, DRIVER_LEGACY)) ||
(flags & DRM_UNLOCKED))
retcode = func(dev, kdata, file_priv);
else {
mutex_lock(&drm_global_mutex);
retcode = func(dev, kdata, file_priv);
mutex_unlock(&drm_global_mutex);
}
return retcode;
}
EXPORT_SYMBOL(drm_ioctl_kernel);
/**
* drm_ioctl - ioctl callback implementation for DRM drivers
* @filp: file this ioctl is called on
* @cmd: ioctl cmd number
* @arg: user argument
*
* Looks up the ioctl function in the DRM core and the driver dispatch table,
* stored in &drm_driver.ioctls. It checks for necessary permission by calling
* drm_ioctl_permit(), and dispatches to the respective function.
*
* Returns:
* Zero on success, negative error code on failure.
*/
long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev;
const struct drm_ioctl_desc *ioctl = NULL;
drm_ioctl_t *func;
unsigned int nr = DRM_IOCTL_NR(cmd);
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
unsigned int in_size, out_size, drv_size, ksize;
bool is_driver_ioctl;
dev = file_priv->minor->dev;
if (drm_dev_is_unplugged(dev))
return -ENODEV;
if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
return -ENOTTY;
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
if (is_driver_ioctl) {
/* driver ioctl */
unsigned int index = nr - DRM_COMMAND_BASE;
if (index >= dev->driver->num_ioctls)
goto err_i1;
index = array_index_nospec(index, dev->driver->num_ioctls);
ioctl = &dev->driver->ioctls[index];
} else {
/* core ioctl */
if (nr >= DRM_CORE_IOCTL_COUNT)
goto err_i1;
nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
ioctl = &drm_ioctls[nr];
}
drv_size = _IOC_SIZE(ioctl->cmd);
out_size = in_size = _IOC_SIZE(cmd);
if ((cmd & ioctl->cmd & IOC_IN) == 0)
in_size = 0;
if ((cmd & ioctl->cmd & IOC_OUT) == 0)
out_size = 0;
ksize = max(max(in_size, out_size), drv_size);
drm_dbg_core(dev, "comm=\"%s\" pid=%d, dev=0x%lx, auth=%d, %s\n",
current->comm, task_pid_nr(current),
(long)old_encode_dev(file_priv->minor->kdev->devt),
file_priv->authenticated, ioctl->name);
/* Do not trust userspace, use our own definition */
func = ioctl->func;
if (unlikely(!func)) {
drm_dbg_core(dev, "no function\n");
retcode = -EINVAL;
goto err_i1;
}
if (ksize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
kdata = kmalloc(ksize, GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto err_i1;
}
}
if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
retcode = -EFAULT;
goto err_i1;
}
if (ksize > in_size)
memset(kdata + in_size, 0, ksize - in_size);
retcode = drm_ioctl_kernel(filp, func, kdata, ioctl->flags);
if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
retcode = -EFAULT;
err_i1:
if (!ioctl)
drm_dbg_core(dev,
"invalid ioctl: comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
current->comm, task_pid_nr(current),
(long)old_encode_dev(file_priv->minor->kdev->devt),
file_priv->authenticated, cmd, nr);
if (kdata != stack_kdata)
kfree(kdata);
if (retcode)
drm_dbg_core(dev, "comm=\"%s\", pid=%d, ret=%d\n",
current->comm, task_pid_nr(current), retcode);
return retcode;
}
EXPORT_SYMBOL(drm_ioctl);
/**
* drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
* @nr: ioctl number
* @flags: where to return the ioctl permission flags
*
* This ioctl is only used by the vmwgfx driver to augment the access checks
* done by the drm core and insofar a pretty decent layering violation. This
* shouldn't be used by any drivers.
*
* Returns:
* True if the @nr corresponds to a DRM core ioctl number, false otherwise.
*/
bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
{
if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END)
return false;
if (nr >= DRM_CORE_IOCTL_COUNT)
return false;
nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
*flags = drm_ioctls[nr].flags;
return true;
}
EXPORT_SYMBOL(drm_ioctl_flags);
| linux-master | drivers/gpu/drm/drm_ioctl.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.