Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/data/imx258.yaml | # SPDX-License-Identifier: CC0-1.0
%YAML 1.1
---
version: 1
algorithms:
- Agc:
- Awb:
- LensShadingCorrection:
x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
sets:
#4208x3120_A_70 - A
- ct: 2856
resolution: 4208x3120
r: [1483, 1423, 1410, 1414, 1417, 1384, 1356, 1348, 1349, 1348, 1393, 1392, 1409, 1444, 1460, 1475, 1568, 1462, 1409, 1398, 1391, 1361, 1343, 1328, 1312, 1316, 1325, 1328, 1372, 1395, 1427, 1410, 1440, 1525, 1441, 1366, 1373, 1364, 1338, 1312, 1287, 1270, 1262, 1267, 1305, 1339, 1380, 1402, 1425, 1424, 1510, 1423, 1376, 1375, 1353, 1309, 1253, 1220, 1201, 1192, 1203, 1243, 1286, 1338, 1375, 1427, 1438, 1499, 1405, 1353, 1354, 1331, 1269, 1207, 1169, 1140, 1137, 1145, 1186, 1246, 1309, 1373, 1399, 1438, 1512, 1391, 1349, 1351, 1306, 1236, 1174, 1121, 1089, 1083, 1098, 1139, 1202, 1276, 1349, 1384, 1428, 1494, 1401, 1337, 1336, 1277, 1211, 1138, 1082, 1057, 1053, 1067, 1110, 1166, 1253, 1331, 1375, 1417, 1485, 1401, 1341, 1316, 1269, 1184, 1115, 1063, 1037, 1029, 1042, 1082, 1144, 1234, 1322, 1368, 1405, 1480, 1387, 1329, 1305, 1257, 1179, 1104, 1049, 1028, 1024, 1037, 1078, 1144, 1231, 1312, 1363, 1404, 1456, 1401, 1341, 1313, 1254, 1177, 1104, 1053, 1041, 1026, 1042, 1082, 1149, 1229, 1322, 1372, 1397, 1457, 1397, 1344, 1312, 1271, 1191, 1122, 1070, 1052, 1044, 1061, 1097, 1166, 1245, 1334, 1382, 1405, 1476, 1400, 1342, 1333, 1293, 1213, 1146, 1099, 1073, 1061, 1081, 1134, 1202, 1273, 1332, 1380, 1411, 1484, 1414, 1350, 1344, 1301, 1251, 1181, 1133, 1109, 1100, 1118, 1164, 1218, 1299, 1338, 1373, 1408, 1459, 1397, 1360, 1342, 1339, 1293, 1231, 1181, 1149, 1155, 1161, 1202, 1256, 1315, 1364, 1383, 1396, 1479, 1382, 1342, 1358, 1346, 1314, 1284, 1231, 1210, 1198, 1224, 1251, 1303, 1338, 1361, 1381, 1394, 1455, 1386, 1338, 1342, 1341, 1326, 1296, 1274, 1254, 1249, 1262, 1280, 1319, 1357, 1367, 1373, 1379, 1462, 1426, 1340, 1356, 1354, 1330, 1344, 1291, 1275, 1255, 1272, 1298, 1333, 1374, 1390, 1393, 1418, 1580, ]
gr: [1274, 1203, 1200, 1184, 1165, 1167, 1155, 1160, 1155, 1158, 1164, 1181, 1196, 1223, 1219, 1220, 1369, 1233, 1172, 1161, 1158, 1146, 1149, 1142, 1129, 1133, 1137, 1144, 1155, 1173, 1189, 1204, 1205, 1268, 1215, 1172, 1148, 1137, 1135, 1124, 1123, 1114, 1110, 1116, 1131, 1149, 1161, 1175, 1191, 1220, 1263, 1185, 1153, 1140, 1137, 1119, 1106, 1094, 1088, 1086, 1099, 1107, 1125, 1152, 1154, 1187, 1209, 1255, 1195, 1141, 1133, 1133, 1112, 1083, 1081, 1066, 1057, 1067, 1088, 1103, 1134, 1154, 1172, 1199, 1255, 1186, 1136, 1127, 1121, 1094, 1077, 1055, 1044, 1040, 1048, 1067, 1086, 1121, 1146, 1155, 1185, 1258, 1177, 1127, 1117, 1104, 1082, 1063, 1044, 1038, 1027, 1036, 1057, 1070, 1101, 1138, 1151, 1177, 1245, 1184, 1116, 1119, 1098, 1070, 1045, 1037, 1030, 1027, 1026, 1045, 1062, 1099, 1132, 1149, 1179, 1238, 1172, 1120, 1113, 1100, 1070, 1042, 1029, 1027, 1029, 1027, 1042, 1066, 1088, 1126, 1149, 1174, 1223, 1162, 1118, 1117, 1093, 1065, 1039, 1030, 1028, 1022, 1028, 1045, 1060, 1101, 1134, 1146, 1165, 1246, 1172, 1116, 1119, 1102, 1075, 1046, 1029, 1032, 1030, 1038, 1049, 1073, 1097, 1132, 1146, 1168, 1231, 1178, 1118, 1123, 1111, 1083, 1062, 1041, 1038, 1033, 1041, 1054, 1074, 1109, 1135, 1144, 1175, 1244, 1193, 1136, 1123, 1118, 1100, 1070, 1045, 1036, 1044, 1047, 1067, 1090, 1116, 1135, 1158, 1174, 1232, 1198, 1142, 1127, 1130, 1107, 1085, 1068, 1060, 1057, 1069, 1079, 1102, 1115, 1124, 1154, 1178, 1241, 1192, 1136, 1125, 1113, 1116, 1096, 1081, 1075, 1075, 1088, 1097, 1116, 1124, 1135, 1155, 1177, 1232, 1183, 1142, 1119, 1113, 1099, 1101, 1088, 1084, 1085, 1089, 1103, 1109, 1122, 1133, 1147, 1175, 1258, 1238, 1162, 1161, 1143, 1124, 1131, 1108, 1111, 1107, 1115, 1116, 1138, 1137, 1150, 1163, 1186, 1381, ]
gb: [1277, 1217, 1179, 1179, 1163, 1158, 1151, 1150, 1149, 1143, 1151, 1172, 1184, 1207, 1216, 1246, 1375, 1242, 1194, 1166, 1151, 1144, 1145, 1135, 1130, 1129, 1132, 1137, 1154, 1166, 1189, 1207, 1210, 1290, 1229, 1177, 1153, 1144, 1140, 1135, 1124, 1110, 1104, 1115, 1126, 1148, 1162, 1171, 1199, 1220, 1268, 1226, 1163, 1152, 1138, 1130, 1111, 1091, 1088, 1086, 1089, 1097, 1126, 1147, 1164, 1187, 1206, 1273, 1212, 1151, 1141, 1132, 1117, 1093, 1075, 1060, 1059, 1062, 1088, 1108, 1133, 1162, 1168, 1204, 1278, 1207, 1141, 1130, 1126, 1095, 1075, 1063, 1046, 1044, 1054, 1069, 1084, 1120, 1153, 1167, 1195, 1269, 1200, 1141, 1126, 1113, 1092, 1063, 1045, 1033, 1036, 1038, 1055, 1080, 1117, 1139, 1165, 1182, 1262, 1195, 1130, 1128, 1115, 1079, 1052, 1041, 1031, 1024, 1028, 1046, 1072, 1110, 1141, 1160, 1175, 1258, 1189, 1136, 1124, 1105, 1077, 1049, 1029, 1021, 1029, 1033, 1040, 1074, 1108, 1143, 1152, 1173, 1237, 1200, 1130, 1126, 1109, 1080, 1050, 1030, 1031, 1027, 1031, 1043, 1069, 1099, 1141, 1152, 1168, 1249, 1203, 1132, 1124, 1113, 1082, 1058, 1032, 1030, 1024, 1033, 1050, 1083, 1109, 1151, 1156, 1178, 1253, 1204, 1130, 1128, 1112, 1088, 1060, 1045, 1030, 1027, 1036, 1058, 1082, 1120, 1145, 1160, 1176, 1246, 1195, 1137, 1123, 1121, 1102, 1072, 1046, 1037, 1037, 1047, 1072, 1090, 1125, 1140, 1158, 1177, 1252, 1209, 1147, 1128, 1125, 1114, 1088, 1063, 1053, 1051, 1058, 1084, 1101, 1128, 1140, 1159, 1176, 1243, 1195, 1138, 1130, 1127, 1113, 1101, 1076, 1071, 1067, 1082, 1087, 1111, 1125, 1140, 1151, 1183, 1235, 1189, 1137, 1126, 1122, 1112, 1104, 1091, 1089, 1081, 1085, 1103, 1112, 1125, 1140, 1157, 1175, 1242, 1234, 1181, 1161, 1150, 1127, 1117, 1101, 1094, 1094, 1102, 1117, 1130, 1138, 1155, 1171, 1192, 1399, ]
b: [1309, 1209, 1169, 1157, 1149, 1136, 1116, 1117, 1126, 1128, 1127, 1141, 1143, 1182, 1196, 1209, 1398, 1231, 1176, 1140, 1123, 1119, 1113, 1111, 1122, 1105, 1117, 1116, 1135, 1130, 1135, 1171, 1169, 1271, 1251, 1154, 1132, 1118, 1104, 1109, 1103, 1094, 1088, 1104, 1093, 1120, 1130, 1135, 1151, 1180, 1267, 1219, 1136, 1111, 1125, 1106, 1107, 1082, 1074, 1077, 1074, 1101, 1112, 1117, 1136, 1139, 1173, 1256, 1205, 1125, 1108, 1118, 1110, 1091, 1081, 1065, 1068, 1065, 1086, 1087, 1105, 1123, 1119, 1156, 1249, 1195, 1106, 1112, 1101, 1085, 1068, 1064, 1053, 1043, 1048, 1068, 1073, 1095, 1117, 1118, 1123, 1251, 1193, 1101, 1091, 1097, 1081, 1052, 1043, 1045, 1041, 1045, 1052, 1065, 1100, 1112, 1112, 1123, 1200, 1180, 1096, 1103, 1083, 1069, 1053, 1045, 1035, 1034, 1035, 1045, 1062, 1087, 1108, 1113, 1113, 1228, 1176, 1093, 1095, 1080, 1062, 1055, 1035, 1033, 1028, 1037, 1039, 1064, 1080, 1115, 1121, 1120, 1202, 1174, 1086, 1087, 1078, 1064, 1049, 1037, 1027, 1022, 1031, 1045, 1058, 1087, 1113, 1108, 1113, 1207, 1200, 1095, 1102, 1092, 1072, 1052, 1043, 1033, 1024, 1033, 1043, 1069, 1095, 1112, 1128, 1123, 1220, 1215, 1101, 1091, 1096, 1080, 1059, 1051, 1040, 1031, 1040, 1064, 1064, 1095, 1111, 1112, 1141, 1222, 1198, 1119, 1108, 1097, 1080, 1059, 1050, 1043, 1034, 1043, 1063, 1073, 1100, 1107, 1114, 1131, 1212, 1197, 1136, 1094, 1109, 1096, 1078, 1054, 1052, 1051, 1060, 1063, 1078, 1101, 1109, 1116, 1142, 1256, 1212, 1112, 1098, 1097, 1094, 1084, 1074, 1061, 1051, 1057, 1064, 1080, 1089, 1102, 1115, 1136, 1227, 1185, 1118, 1081, 1059, 1072, 1068, 1057, 1049, 1048, 1054, 1066, 1058, 1067, 1096, 1109, 1143, 1223, 1291, 1173, 1131, 1113, 1087, 1077, 1090, 1081, 1090, 1086, 1090, 1092, 1103, 1144, 1149, 1216, 1387, ]
#4208x3120_D50_70 - D50
- ct: 5003
resolution: 4208x3120
r: [1240, 1212, 1218, 1191, 1191, 1171, 1136, 1144, 1113, 1148, 1182, 1166, 1210, 1211, 1213, 1240, 1336, 1236, 1193, 1176, 1158, 1147, 1126, 1107, 1122, 1107, 1107, 1110, 1146, 1176, 1194, 1195, 1219, 1259, 1210, 1157, 1156, 1153, 1123, 1115, 1094, 1074, 1078, 1081, 1098, 1130, 1163, 1170, 1179, 1220, 1284, 1228, 1146, 1159, 1132, 1101, 1074, 1059, 1053, 1044, 1060, 1072, 1102, 1131, 1156, 1186, 1227, 1272, 1219, 1176, 1150, 1124, 1091, 1043, 1036, 1025, 1025, 1031, 1042, 1076, 1095, 1155, 1188, 1209, 1296, 1206, 1161, 1128, 1101, 1065, 1032, 1019, 1018, 1027, 1018, 1034, 1057, 1102, 1139, 1161, 1211, 1274, 1184, 1133, 1119, 1097, 1042, 1018, 1020, 1027, 1034, 1030, 1032, 1042, 1075, 1119, 1164, 1199, 1270, 1205, 1124, 1114, 1086, 1033, 1015, 1023, 1039, 1039, 1033, 1026, 1041, 1074, 1111, 1142, 1206, 1278, 1193, 1118, 1098, 1084, 1023, 1003, 1016, 1047, 1059, 1038, 1025, 1046, 1063, 1124, 1148, 1190, 1238, 1191, 1124, 1107, 1069, 1027, 1009, 1012, 1036, 1045, 1036, 1020, 1024, 1058, 1118, 1158, 1183, 1262, 1213, 1121, 1112, 1076, 1030, 1012, 1003, 1019, 1028, 1013, 1020, 1036, 1078, 1123, 1155, 1176, 1228, 1221, 1135, 1117, 1105, 1055, 1020, 1005, 1007, 1007, 1004, 1017, 1048, 1088, 1131, 1169, 1183, 1280, 1209, 1141, 1125, 1105, 1074, 1025, 1012, 1008, 1000, 1011, 1024, 1050, 1113, 1128, 1154, 1199, 1290, 1217, 1142, 1134, 1120, 1101, 1054, 1028, 1014, 1006, 1017, 1040, 1078, 1105, 1136, 1164, 1188, 1250, 1195, 1130, 1148, 1120, 1108, 1083, 1053, 1041, 1032, 1061, 1067, 1097, 1127, 1136, 1152, 1181, 1227, 1166, 1145, 1140, 1141, 1119, 1092, 1075, 1072, 1052, 1065, 1089, 1107, 1147, 1154, 1158, 1183, 1230, 1136, 1147, 1150, 1168, 1139, 1113, 1098, 1055, 1048, 1072, 1079, 1129, 1147, 1173, 1188, 1181, 1283, ]
gr: [1246, 1183, 1160, 1143, 1145, 1138, 1113, 1111, 1117, 1116, 1132, 1145, 1167, 1167, 1196, 1197, 1335, 1205, 1152, 1123, 1122, 1123, 1103, 1107, 1102, 1097, 1102, 1099, 1128, 1141, 1157, 1152, 1184, 1242, 1204, 1141, 1112, 1106, 1102, 1093, 1096, 1085, 1076, 1085, 1094, 1107, 1123, 1146, 1162, 1178, 1218, 1169, 1130, 1114, 1100, 1096, 1083, 1072, 1059, 1065, 1070, 1087, 1096, 1116, 1134, 1155, 1174, 1238, 1159, 1126, 1105, 1102, 1083, 1062, 1060, 1049, 1047, 1054, 1063, 1084, 1111, 1131, 1140, 1164, 1243, 1167, 1114, 1105, 1088, 1067, 1047, 1034, 1034, 1028, 1042, 1042, 1059, 1096, 1114, 1135, 1170, 1200, 1156, 1101, 1098, 1089, 1068, 1048, 1027, 1034, 1029, 1032, 1047, 1043, 1088, 1111, 1130, 1160, 1201, 1143, 1100, 1086, 1087, 1051, 1034, 1029, 1028, 1030, 1019, 1033, 1044, 1087, 1109, 1124, 1155, 1211, 1148, 1098, 1088, 1077, 1058, 1037, 1026, 1025, 1034, 1033, 1031, 1054, 1074, 1107, 1134, 1159, 1211, 1150, 1090, 1084, 1074, 1056, 1029, 1020, 1028, 1025, 1027, 1031, 1044, 1080, 1109, 1126, 1152, 1208, 1131, 1101, 1088, 1073, 1048, 1035, 1030, 1026, 1024, 1034, 1038, 1053, 1083, 1104, 1124, 1160, 1206, 1147, 1103, 1082, 1082, 1060, 1035, 1026, 1023, 1018, 1031, 1044, 1058, 1096, 1114, 1128, 1153, 1208, 1170, 1112, 1098, 1088, 1070, 1049, 1027, 1027, 1023, 1031, 1046, 1071, 1085, 1106, 1129, 1150, 1228, 1164, 1111, 1101, 1089, 1078, 1058, 1040, 1030, 1032, 1037, 1060, 1073, 1102, 1097, 1125, 1156, 1223, 1181, 1115, 1097, 1093, 1083, 1072, 1056, 1047, 1041, 1057, 1071, 1079, 1081, 1102, 1124, 1141, 1195, 1170, 1109, 1091, 1089, 1061, 1074, 1049, 1054, 1052, 1057, 1067, 1076, 1097, 1106, 1121, 1141, 1211, 1173, 1129, 1108, 1099, 1093, 1092, 1076, 1063, 1057, 1065, 1090, 1107, 1117, 1140, 1123, 1175, 1343, ]
gb: [1238, 1183, 1160, 1160, 1134, 1134, 1124, 1108, 1131, 1127, 1124, 1145, 1172, 1188, 1201, 1217, 1349, 1216, 1160, 1128, 1120, 1117, 1110, 1108, 1105, 1102, 1111, 1114, 1125, 1144, 1160, 1162, 1192, 1260, 1212, 1141, 1127, 1118, 1101, 1104, 1103, 1086, 1077, 1086, 1105, 1116, 1126, 1147, 1167, 1191, 1242, 1191, 1130, 1126, 1103, 1093, 1082, 1074, 1070, 1064, 1064, 1079, 1099, 1113, 1132, 1156, 1185, 1247, 1175, 1117, 1114, 1109, 1081, 1067, 1061, 1047, 1044, 1051, 1066, 1083, 1108, 1134, 1141, 1180, 1248, 1187, 1108, 1106, 1095, 1076, 1052, 1044, 1036, 1034, 1042, 1052, 1070, 1105, 1124, 1140, 1161, 1228, 1171, 1091, 1095, 1088, 1069, 1041, 1035, 1034, 1034, 1037, 1048, 1062, 1090, 1120, 1129, 1165, 1223, 1158, 1108, 1093, 1080, 1052, 1030, 1034, 1027, 1030, 1028, 1034, 1054, 1083, 1112, 1133, 1141, 1208, 1158, 1099, 1091, 1075, 1047, 1031, 1017, 1021, 1035, 1027, 1033, 1054, 1088, 1110, 1120, 1146, 1211, 1171, 1099, 1093, 1079, 1056, 1029, 1021, 1030, 1025, 1031, 1037, 1047, 1077, 1116, 1122, 1132, 1203, 1179, 1093, 1087, 1076, 1053, 1038, 1028, 1024, 1024, 1024, 1040, 1058, 1082, 1108, 1114, 1144, 1198, 1167, 1091, 1091, 1087, 1059, 1047, 1029, 1016, 1021, 1036, 1045, 1066, 1093, 1113, 1116, 1144, 1205, 1159, 1113, 1099, 1091, 1069, 1047, 1029, 1029, 1024, 1037, 1054, 1072, 1088, 1109, 1125, 1150, 1200, 1186, 1114, 1097, 1098, 1087, 1065, 1035, 1033, 1043, 1042, 1054, 1076, 1089, 1111, 1126, 1130, 1214, 1153, 1106, 1100, 1090, 1086, 1082, 1057, 1059, 1053, 1059, 1066, 1077, 1088, 1113, 1117, 1144, 1203, 1147, 1107, 1110, 1090, 1088, 1072, 1070, 1060, 1062, 1058, 1074, 1087, 1096, 1109, 1126, 1150, 1216, 1170, 1145, 1128, 1108, 1088, 1110, 1085, 1070, 1064, 1078, 1077, 1101, 1107, 1136, 1148, 1163, 1345, ]
b: [1252, 1185, 1146, 1139, 1147, 1130, 1114, 1111, 1122, 1111, 1121, 1123, 1144, 1150, 1171, 1167, 1303, 1187, 1152, 1125, 1101, 1104, 1096, 1101, 1099, 1093, 1096, 1098, 1103, 1118, 1141, 1160, 1156, 1226, 1222, 1125, 1112, 1118, 1104, 1094, 1083, 1073, 1073, 1094, 1099, 1103, 1114, 1133, 1146, 1174, 1212, 1162, 1123, 1104, 1110, 1100, 1081, 1066, 1065, 1057, 1053, 1072, 1094, 1107, 1117, 1136, 1162, 1226, 1197, 1124, 1088, 1092, 1084, 1066, 1055, 1051, 1044, 1049, 1061, 1081, 1096, 1102, 1134, 1143, 1234, 1171, 1110, 1099, 1075, 1070, 1051, 1052, 1030, 1030, 1035, 1055, 1071, 1092, 1100, 1113, 1128, 1214, 1174, 1099, 1080, 1069, 1054, 1047, 1032, 1031, 1027, 1034, 1042, 1061, 1086, 1091, 1113, 1139, 1222, 1156, 1088, 1089, 1072, 1051, 1036, 1032, 1026, 1030, 1024, 1040, 1047, 1074, 1091, 1109, 1131, 1198, 1158, 1090, 1079, 1071, 1047, 1038, 1031, 1028, 1027, 1028, 1029, 1046, 1068, 1087, 1105, 1122, 1196, 1173, 1098, 1080, 1060, 1040, 1036, 1022, 1019, 1022, 1029, 1029, 1045, 1077, 1094, 1103, 1109, 1189, 1170, 1096, 1070, 1063, 1048, 1033, 1026, 1023, 1016, 1021, 1037, 1053, 1068, 1098, 1107, 1128, 1195, 1166, 1099, 1086, 1066, 1061, 1040, 1022, 1022, 1028, 1027, 1041, 1057, 1086, 1094, 1103, 1124, 1188, 1202, 1113, 1081, 1083, 1071, 1040, 1025, 1024, 1025, 1019, 1055, 1055, 1081, 1099, 1112, 1128, 1202, 1171, 1108, 1083, 1084, 1078, 1051, 1043, 1020, 1037, 1037, 1049, 1072, 1069, 1100, 1107, 1115, 1176, 1180, 1106, 1094, 1077, 1068, 1053, 1050, 1035, 1041, 1038, 1062, 1068, 1068, 1084, 1098, 1125, 1184, 1164, 1104, 1077, 1057, 1064, 1049, 1039, 1041, 1036, 1041, 1042, 1058, 1064, 1087, 1099, 1111, 1173, 1209, 1137, 1099, 1083, 1076, 1072, 1077, 1065, 1066, 1065, 1061, 1081, 1096, 1135, 1126, 1150, 1333, ]
#4208x3120_D65_70 - D65
- ct: 6504
resolution: 4208x3120
r: [1359, 1336, 1313, 1273, 1274, 1250, 1250, 1218, 1222, 1223, 1240, 1266, 1308, 1327, 1333, 1336, 1456, 1359, 1286, 1256, 1249, 1235, 1235, 1216, 1219, 1187, 1205, 1216, 1240, 1267, 1277, 1303, 1311, 1420, 1326, 1254, 1250, 1239, 1212, 1207, 1191, 1181, 1176, 1181, 1187, 1226, 1241, 1281, 1295, 1326, 1391, 1304, 1253, 1234, 1234, 1209, 1174, 1156, 1147, 1131, 1139, 1168, 1196, 1227, 1265, 1282, 1293, 1385, 1302, 1242, 1224, 1216, 1171, 1140, 1112, 1098, 1087, 1098, 1124, 1177, 1206, 1245, 1266, 1310, 1389, 1327, 1227, 1231, 1195, 1156, 1116, 1094, 1070, 1067, 1073, 1101, 1151, 1190, 1223, 1251, 1281, 1402, 1285, 1229, 1203, 1184, 1135, 1093, 1063, 1047, 1041, 1050, 1083, 1119, 1176, 1211, 1248, 1288, 1388, 1269, 1210, 1215, 1173, 1118, 1078, 1046, 1028, 1025, 1037, 1059, 1103, 1170, 1213, 1230, 1268, 1355, 1295, 1208, 1203, 1171, 1124, 1070, 1041, 1024, 1027, 1030, 1057, 1094, 1168, 1206, 1252, 1270, 1364, 1293, 1196, 1187, 1156, 1110, 1075, 1039, 1022, 1022, 1028, 1065, 1096, 1166, 1213, 1245, 1273, 1349, 1291, 1213, 1203, 1162, 1131, 1079, 1053, 1038, 1029, 1044, 1080, 1119, 1176, 1225, 1243, 1271, 1354, 1284, 1222, 1202, 1186, 1136, 1097, 1063, 1054, 1041, 1054, 1083, 1131, 1186, 1232, 1256, 1276, 1360, 1290, 1237, 1210, 1207, 1166, 1116, 1076, 1066, 1070, 1080, 1109, 1152, 1188, 1230, 1240, 1293, 1341, 1304, 1231, 1229, 1210, 1177, 1153, 1128, 1097, 1105, 1108, 1140, 1170, 1213, 1224, 1260, 1282, 1357, 1299, 1237, 1218, 1218, 1202, 1171, 1144, 1135, 1131, 1143, 1161, 1189, 1221, 1233, 1261, 1271, 1346, 1262, 1216, 1229, 1218, 1191, 1187, 1162, 1161, 1148, 1153, 1180, 1201, 1220, 1234, 1251, 1250, 1352, 1294, 1234, 1242, 1240, 1246, 1200, 1178, 1172, 1137, 1154, 1187, 1214, 1252, 1251, 1247, 1296, 1456, ]
gr: [1240, 1187, 1158, 1152, 1144, 1129, 1130, 1118, 1115, 1113, 1119, 1141, 1156, 1172, 1180, 1199, 1330, 1223, 1153, 1127, 1123, 1115, 1104, 1104, 1095, 1100, 1107, 1110, 1121, 1137, 1156, 1169, 1179, 1261, 1205, 1138, 1122, 1108, 1101, 1104, 1098, 1088, 1083, 1090, 1106, 1119, 1125, 1144, 1163, 1186, 1236, 1170, 1122, 1112, 1101, 1091, 1089, 1076, 1068, 1061, 1072, 1084, 1101, 1118, 1134, 1156, 1179, 1243, 1162, 1120, 1105, 1105, 1088, 1067, 1061, 1050, 1050, 1057, 1070, 1088, 1112, 1127, 1145, 1166, 1232, 1163, 1108, 1111, 1099, 1079, 1054, 1046, 1041, 1030, 1040, 1053, 1074, 1098, 1120, 1140, 1170, 1226, 1158, 1105, 1094, 1099, 1064, 1048, 1034, 1036, 1028, 1029, 1049, 1055, 1089, 1116, 1135, 1166, 1218, 1142, 1107, 1094, 1092, 1061, 1041, 1030, 1024, 1025, 1028, 1036, 1053, 1087, 1110, 1128, 1153, 1223, 1142, 1098, 1092, 1084, 1056, 1036, 1025, 1024, 1027, 1024, 1038, 1055, 1082, 1108, 1132, 1153, 1203, 1155, 1098, 1094, 1080, 1056, 1034, 1023, 1025, 1022, 1025, 1036, 1053, 1078, 1112, 1126, 1144, 1212, 1163, 1096, 1092, 1083, 1059, 1039, 1027, 1023, 1028, 1026, 1044, 1056, 1091, 1114, 1130, 1149, 1204, 1152, 1103, 1090, 1089, 1065, 1045, 1031, 1028, 1025, 1035, 1048, 1064, 1092, 1116, 1131, 1157, 1203, 1162, 1100, 1098, 1093, 1076, 1049, 1033, 1030, 1030, 1040, 1050, 1067, 1094, 1103, 1127, 1154, 1221, 1162, 1112, 1099, 1095, 1079, 1064, 1042, 1033, 1034, 1048, 1061, 1077, 1091, 1108, 1126, 1148, 1213, 1154, 1112, 1106, 1095, 1081, 1065, 1056, 1052, 1050, 1059, 1071, 1082, 1091, 1102, 1129, 1149, 1211, 1157, 1106, 1092, 1081, 1066, 1072, 1064, 1048, 1056, 1061, 1066, 1076, 1091, 1107, 1122, 1145, 1207, 1204, 1127, 1117, 1106, 1098, 1081, 1073, 1068, 1062, 1068, 1081, 1107, 1102, 1127, 1148, 1170, 1353, ]
gb: [1240, 1177, 1157, 1143, 1129, 1130, 1118, 1112, 1123, 1123, 1123, 1137, 1159, 1181, 1197, 1206, 1354, 1217, 1153, 1130, 1124, 1109, 1114, 1105, 1108, 1116, 1110, 1114, 1131, 1145, 1145, 1163, 1183, 1249, 1197, 1134, 1124, 1107, 1115, 1104, 1100, 1085, 1091, 1097, 1102, 1110, 1133, 1145, 1155, 1190, 1227, 1191, 1125, 1107, 1105, 1093, 1084, 1072, 1066, 1071, 1072, 1081, 1106, 1124, 1129, 1153, 1178, 1238, 1193, 1108, 1104, 1098, 1085, 1072, 1059, 1052, 1048, 1059, 1075, 1089, 1105, 1126, 1146, 1162, 1233, 1166, 1098, 1099, 1091, 1078, 1053, 1043, 1036, 1035, 1045, 1058, 1070, 1100, 1113, 1128, 1156, 1230, 1173, 1100, 1087, 1087, 1064, 1046, 1037, 1031, 1031, 1034, 1047, 1063, 1092, 1107, 1112, 1153, 1228, 1169, 1089, 1089, 1079, 1057, 1043, 1030, 1030, 1027, 1027, 1035, 1057, 1087, 1111, 1125, 1136, 1218, 1166, 1097, 1087, 1079, 1056, 1035, 1022, 1021, 1027, 1022, 1035, 1053, 1083, 1109, 1118, 1138, 1198, 1151, 1100, 1087, 1077, 1057, 1034, 1023, 1024, 1027, 1025, 1036, 1051, 1083, 1109, 1116, 1129, 1215, 1159, 1096, 1091, 1079, 1053, 1037, 1026, 1021, 1020, 1020, 1039, 1063, 1086, 1113, 1116, 1134, 1214, 1158, 1096, 1091, 1087, 1065, 1043, 1034, 1025, 1020, 1028, 1046, 1059, 1088, 1109, 1119, 1130, 1202, 1168, 1101, 1091, 1084, 1074, 1050, 1029, 1028, 1026, 1035, 1055, 1072, 1099, 1105, 1121, 1138, 1204, 1160, 1104, 1093, 1094, 1079, 1067, 1043, 1036, 1036, 1048, 1057, 1081, 1089, 1107, 1118, 1140, 1222, 1158, 1101, 1096, 1090, 1082, 1076, 1059, 1052, 1053, 1063, 1071, 1086, 1094, 1103, 1119, 1134, 1206, 1150, 1105, 1098, 1093, 1082, 1077, 1067, 1063, 1065, 1069, 1081, 1081, 1088, 1108, 1123, 1138, 1211, 1198, 1133, 1114, 1117, 1097, 1093, 1076, 1073, 1067, 1077, 1076, 1089, 1101, 1119, 1154, 1163, 1346, ]
b: [1241, 1188, 1165, 1151, 1131, 1127, 1134, 1115, 1122, 1127, 1131, 1136, 1154, 1165, 1173, 1161, 1319, 1210, 1153, 1138, 1120, 1111, 1114, 1118, 1124, 1108, 1118, 1121, 1123, 1132, 1151, 1161, 1150, 1244, 1224, 1149, 1118, 1108, 1107, 1107, 1103, 1098, 1091, 1103, 1103, 1121, 1124, 1135, 1167, 1177, 1224, 1195, 1130, 1099, 1108, 1101, 1083, 1081, 1078, 1074, 1084, 1086, 1097, 1115, 1128, 1145, 1181, 1211, 1191, 1111, 1109, 1098, 1087, 1081, 1071, 1059, 1053, 1064, 1078, 1091, 1109, 1127, 1139, 1167, 1226, 1192, 1111, 1097, 1098, 1072, 1064, 1050, 1042, 1040, 1046, 1053, 1077, 1099, 1113, 1130, 1152, 1215, 1179, 1106, 1093, 1084, 1070, 1055, 1039, 1037, 1034, 1033, 1046, 1067, 1088, 1112, 1120, 1150, 1220, 1178, 1092, 1097, 1085, 1066, 1049, 1033, 1032, 1026, 1028, 1038, 1058, 1081, 1112, 1120, 1137, 1208, 1170, 1103, 1096, 1082, 1063, 1038, 1035, 1025, 1026, 1027, 1035, 1060, 1075, 1109, 1122, 1133, 1214, 1175, 1095, 1097, 1074, 1061, 1039, 1029, 1028, 1022, 1025, 1033, 1049, 1083, 1107, 1117, 1125, 1212, 1179, 1097, 1091, 1076, 1062, 1045, 1030, 1031, 1027, 1031, 1039, 1055, 1082, 1109, 1114, 1144, 1204, 1178, 1102, 1080, 1087, 1060, 1052, 1027, 1028, 1025, 1028, 1043, 1067, 1093, 1113, 1121, 1123, 1189, 1191, 1117, 1100, 1092, 1079, 1058, 1037, 1037, 1020, 1037, 1058, 1065, 1092, 1101, 1115, 1140, 1194, 1173, 1120, 1096, 1085, 1085, 1065, 1048, 1039, 1036, 1046, 1053, 1076, 1096, 1099, 1114, 1140, 1195, 1180, 1105, 1090, 1079, 1073, 1066, 1056, 1049, 1043, 1057, 1061, 1077, 1081, 1090, 1115, 1131, 1180, 1154, 1095, 1084, 1061, 1055, 1056, 1045, 1043, 1039, 1041, 1051, 1067, 1077, 1092, 1108, 1122, 1197, 1210, 1139, 1117, 1112, 1088, 1097, 1084, 1073, 1074, 1065, 1079, 1091, 1103, 1131, 1144, 1154, 1356, ]
#4208x3120_D75_70 - D75
- ct: 7504
resolution: 4208x3120
r: [2718, 2443, 2251, 2101, 1949, 1828, 1725, 1659, 1637, 1656, 1692, 1787, 1913, 2038, 2175, 2358, 2612, 2566, 2301, 2129, 1946, 1798, 1654, 1562, 1501, 1474, 1484, 1541, 1628, 1753, 1900, 2056, 2216, 2458, 2439, 2204, 2002, 1839, 1664, 1534, 1419, 1372, 1340, 1357, 1403, 1489, 1621, 1784, 1950, 2114, 2358, 2344, 2108, 1932, 1723, 1559, 1413, 1321, 1258, 1239, 1246, 1293, 1388, 1512, 1675, 1846, 2036, 2269, 2294, 2047, 1842, 1635, 1464, 1328, 1231, 1178, 1144, 1167, 1208, 1298, 1419, 1582, 1769, 1962, 2198, 2234, 1977, 1769, 1556, 1393, 1262, 1164, 1108, 1086, 1096, 1146, 1232, 1350, 1513, 1700, 1913, 2137, 2206, 1942, 1733, 1515, 1345, 1216, 1120, 1066, 1045, 1060, 1099, 1182, 1316, 1462, 1656, 1868, 2131, 2182, 1922, 1685, 1495, 1315, 1188, 1092, 1045, 1025, 1037, 1080, 1160, 1283, 1442, 1624, 1853, 2102, 2193, 1910, 1702, 1477, 1310, 1179, 1087, 1034, 1024, 1029, 1069, 1163, 1278, 1441, 1624, 1846, 2081, 2191, 1936, 1698, 1495, 1325, 1192, 1100, 1052, 1033, 1042, 1082, 1166, 1291, 1448, 1634, 1852, 2118, 2209, 1957, 1732, 1534, 1357, 1223, 1125, 1078, 1062, 1066, 1113, 1204, 1324, 1486, 1665, 1895, 2127, 2267, 2018, 1789, 1577, 1407, 1280, 1181, 1124, 1105, 1113, 1166, 1252, 1388, 1539, 1724, 1936, 2180, 2319, 2074, 1867, 1659, 1491, 1354, 1248, 1192, 1175, 1191, 1236, 1333, 1441, 1618, 1798, 2005, 2249, 2399, 2148, 1955, 1752, 1578, 1442, 1351, 1293, 1272, 1286, 1334, 1418, 1547, 1709, 1872, 2085, 2297, 2497, 2217, 2069, 1857, 1694, 1560, 1458, 1403, 1384, 1400, 1443, 1537, 1670, 1815, 1991, 2157, 2412, 2594, 2341, 2147, 2004, 1827, 1693, 1600, 1537, 1521, 1524, 1576, 1665, 1788, 1941, 2083, 2257, 2529, 2745, 2483, 2315, 2146, 2006, 1868, 1779, 1701, 1679, 1704, 1744, 1845, 1954, 2087, 2219, 2407, 2701, ]
gr: [2344, 2089, 1940, 1831, 1739, 1672, 1602, 1564, 1546, 1553, 1585, 1636, 1713, 1798, 1899, 2031, 2234, 2182, 1973, 1842, 1732, 1637, 1548, 1485, 1448, 1422, 1438, 1466, 1527, 1594, 1695, 1784, 1902, 2122, 2082, 1884, 1773, 1653, 1549, 1465, 1398, 1351, 1329, 1338, 1376, 1435, 1516, 1611, 1725, 1828, 2008, 1997, 1821, 1706, 1585, 1480, 1382, 1319, 1261, 1244, 1253, 1291, 1352, 1439, 1540, 1647, 1772, 1932, 1947, 1773, 1655, 1522, 1409, 1310, 1239, 1184, 1161, 1174, 1213, 1284, 1368, 1480, 1601, 1717, 1882, 1904, 1739, 1605, 1470, 1360, 1257, 1173, 1124, 1094, 1111, 1149, 1221, 1320, 1433, 1550, 1678, 1844, 1878, 1711, 1571, 1443, 1317, 1213, 1126, 1077, 1057, 1066, 1105, 1180, 1279, 1400, 1515, 1652, 1819, 1862, 1687, 1556, 1420, 1299, 1183, 1102, 1048, 1029, 1041, 1081, 1155, 1258, 1374, 1495, 1634, 1800, 1856, 1692, 1556, 1415, 1289, 1176, 1095, 1044, 1024, 1033, 1073, 1145, 1247, 1370, 1492, 1626, 1800, 1869, 1697, 1555, 1419, 1303, 1190, 1104, 1054, 1040, 1045, 1085, 1154, 1260, 1373, 1511, 1632, 1804, 1887, 1717, 1571, 1440, 1323, 1216, 1128, 1077, 1066, 1069, 1109, 1182, 1284, 1398, 1520, 1656, 1831, 1910, 1751, 1607, 1480, 1360, 1261, 1173, 1123, 1100, 1114, 1154, 1226, 1326, 1444, 1555, 1689, 1856, 1962, 1793, 1656, 1522, 1416, 1315, 1237, 1180, 1166, 1176, 1214, 1288, 1375, 1486, 1603, 1722, 1910, 2020, 1845, 1710, 1586, 1477, 1387, 1307, 1266, 1241, 1257, 1292, 1347, 1446, 1548, 1657, 1785, 1964, 2118, 1888, 1794, 1658, 1552, 1462, 1394, 1349, 1332, 1342, 1378, 1436, 1525, 1617, 1736, 1848, 2048, 2195, 1989, 1855, 1742, 1633, 1555, 1487, 1437, 1427, 1429, 1471, 1521, 1603, 1699, 1804, 1921, 2149, 2334, 2103, 1971, 1863, 1757, 1666, 1598, 1565, 1537, 1554, 1579, 1640, 1716, 1810, 1923, 2044, 2308, ]
gb: [2383, 2122, 1974, 1866, 1767, 1684, 1620, 1581, 1559, 1575, 1592, 1654, 1726, 1816, 1917, 2071, 2294, 2242, 2002, 1872, 1752, 1650, 1564, 1499, 1455, 1438, 1442, 1485, 1537, 1614, 1715, 1814, 1935, 2155, 2114, 1929, 1797, 1674, 1568, 1477, 1406, 1358, 1340, 1348, 1386, 1447, 1534, 1631, 1754, 1861, 2057, 2044, 1859, 1737, 1606, 1493, 1396, 1322, 1270, 1247, 1259, 1305, 1370, 1455, 1566, 1679, 1808, 1979, 1981, 1812, 1674, 1549, 1424, 1325, 1246, 1191, 1168, 1179, 1222, 1294, 1383, 1498, 1623, 1748, 1932, 1939, 1777, 1626, 1500, 1376, 1265, 1179, 1128, 1104, 1119, 1160, 1235, 1331, 1447, 1577, 1708, 1885, 1922, 1735, 1602, 1464, 1333, 1226, 1134, 1083, 1061, 1071, 1113, 1191, 1296, 1412, 1543, 1677, 1849, 1885, 1723, 1574, 1437, 1310, 1191, 1105, 1055, 1035, 1048, 1088, 1164, 1272, 1388, 1516, 1660, 1847, 1891, 1714, 1568, 1431, 1300, 1185, 1099, 1047, 1024, 1038, 1075, 1155, 1259, 1386, 1512, 1649, 1832, 1901, 1722, 1575, 1434, 1309, 1196, 1109, 1054, 1041, 1047, 1087, 1162, 1267, 1385, 1526, 1650, 1833, 1912, 1740, 1588, 1456, 1329, 1220, 1133, 1080, 1065, 1072, 1113, 1189, 1289, 1410, 1538, 1672, 1862, 1949, 1767, 1632, 1487, 1367, 1261, 1175, 1123, 1100, 1114, 1158, 1224, 1331, 1450, 1571, 1705, 1880, 1990, 1811, 1670, 1531, 1420, 1315, 1227, 1180, 1158, 1172, 1212, 1285, 1375, 1490, 1611, 1744, 1925, 2033, 1864, 1715, 1588, 1477, 1377, 1307, 1253, 1232, 1248, 1285, 1344, 1439, 1545, 1661, 1797, 1971, 2126, 1898, 1798, 1658, 1548, 1449, 1381, 1338, 1315, 1329, 1366, 1428, 1512, 1617, 1730, 1853, 2058, 2203, 1998, 1856, 1734, 1624, 1539, 1467, 1424, 1409, 1409, 1448, 1505, 1584, 1689, 1796, 1923, 2148, 2342, 2110, 1959, 1848, 1740, 1635, 1572, 1533, 1519, 1527, 1561, 1610, 1693, 1786, 1900, 2039, 2306, ]
b: [2199, 1976, 1828, 1725, 1640, 1549, 1510, 1473, 1457, 1462, 1485, 1529, 1603, 1690, 1796, 1922, 2111, 2048, 1861, 1735, 1618, 1532, 1462, 1400, 1360, 1346, 1355, 1384, 1433, 1501, 1589, 1680, 1793, 1982, 1975, 1801, 1672, 1564, 1465, 1387, 1326, 1294, 1272, 1284, 1310, 1363, 1440, 1518, 1627, 1730, 1888, 1903, 1736, 1617, 1500, 1405, 1325, 1260, 1219, 1198, 1208, 1239, 1296, 1365, 1465, 1557, 1664, 1833, 1837, 1684, 1556, 1449, 1345, 1261, 1200, 1151, 1132, 1137, 1175, 1238, 1307, 1402, 1517, 1627, 1775, 1806, 1650, 1518, 1407, 1306, 1216, 1144, 1099, 1078, 1092, 1120, 1185, 1270, 1360, 1472, 1596, 1740, 1778, 1621, 1499, 1381, 1270, 1180, 1110, 1066, 1046, 1057, 1087, 1150, 1236, 1335, 1447, 1560, 1703, 1764, 1612, 1479, 1367, 1255, 1158, 1089, 1045, 1031, 1038, 1071, 1128, 1218, 1312, 1430, 1544, 1702, 1773, 1604, 1480, 1359, 1252, 1148, 1082, 1041, 1024, 1036, 1061, 1124, 1210, 1314, 1432, 1542, 1693, 1782, 1617, 1485, 1366, 1253, 1162, 1092, 1046, 1038, 1043, 1068, 1130, 1215, 1322, 1431, 1549, 1700, 1786, 1634, 1499, 1378, 1276, 1184, 1108, 1067, 1060, 1062, 1094, 1153, 1235, 1346, 1450, 1556, 1722, 1813, 1667, 1535, 1411, 1306, 1220, 1148, 1103, 1089, 1091, 1132, 1189, 1277, 1372, 1474, 1593, 1740, 1852, 1712, 1569, 1449, 1354, 1263, 1195, 1156, 1137, 1149, 1180, 1239, 1319, 1413, 1516, 1627, 1798, 1910, 1741, 1617, 1509, 1403, 1324, 1267, 1221, 1205, 1213, 1244, 1296, 1377, 1459, 1565, 1679, 1826, 1984, 1788, 1696, 1556, 1473, 1386, 1333, 1296, 1280, 1282, 1316, 1361, 1442, 1519, 1624, 1732, 1905, 2059, 1881, 1746, 1642, 1533, 1467, 1400, 1370, 1354, 1357, 1389, 1438, 1500, 1587, 1688, 1800, 1995, 2190, 1971, 1845, 1743, 1643, 1562, 1515, 1468, 1453, 1454, 1501, 1532, 1608, 1692, 1782, 1904, 2117, ]
#4208x3120_F11_TL84_70 - F11_TL84
- ct: 4000
resolution: 4208x3120
r: [1286, 1278, 1265, 1240, 1240, 1217, 1199, 1205, 1185, 1191, 1213, 1243, 1251, 1276, 1282, 1297, 1358, 1273, 1227, 1225, 1219, 1199, 1190, 1164, 1151, 1137, 1151, 1174, 1213, 1238, 1237, 1261, 1274, 1331, 1273, 1220, 1214, 1199, 1174, 1154, 1126, 1115, 1105, 1106, 1132, 1183, 1215, 1238, 1260, 1277, 1310, 1254, 1204, 1204, 1193, 1151, 1097, 1081, 1066, 1057, 1066, 1094, 1133, 1183, 1228, 1240, 1275, 1341, 1239, 1196, 1193, 1167, 1112, 1071, 1046, 1035, 1034, 1045, 1056, 1097, 1153, 1210, 1232, 1257, 1313, 1240, 1187, 1195, 1142, 1080, 1048, 1031, 1023, 1025, 1026, 1034, 1065, 1115, 1186, 1223, 1254, 1322, 1241, 1178, 1166, 1121, 1060, 1031, 1014, 1029, 1039, 1026, 1032, 1057, 1101, 1162, 1210, 1247, 1295, 1224, 1178, 1157, 1104, 1049, 1021, 1015, 1036, 1044, 1036, 1024, 1049, 1097, 1144, 1206, 1235, 1312, 1215, 1170, 1153, 1098, 1046, 1020, 1017, 1043, 1046, 1036, 1028, 1039, 1086, 1144, 1202, 1234, 1280, 1224, 1178, 1148, 1093, 1049, 1010, 1011, 1032, 1038, 1030, 1024, 1042, 1094, 1153, 1213, 1231, 1294, 1237, 1185, 1157, 1104, 1050, 1017, 1005, 1029, 1030, 1022, 1027, 1048, 1098, 1172, 1213, 1243, 1300, 1244, 1173, 1168, 1122, 1073, 1021, 1011, 1004, 1007, 1015, 1029, 1062, 1115, 1176, 1219, 1227, 1304, 1243, 1192, 1182, 1148, 1093, 1048, 1014, 1004, 1007, 1019, 1039, 1068, 1132, 1187, 1214, 1237, 1290, 1233, 1197, 1186, 1170, 1130, 1068, 1043, 1021, 1024, 1035, 1063, 1100, 1148, 1200, 1218, 1239, 1280, 1225, 1193, 1182, 1178, 1152, 1113, 1082, 1057, 1055, 1069, 1098, 1133, 1184, 1199, 1214, 1224, 1291, 1224, 1180, 1184, 1176, 1165, 1145, 1105, 1093, 1081, 1091, 1128, 1167, 1185, 1197, 1202, 1207, 1268, 1216, 1185, 1208, 1194, 1182, 1156, 1131, 1104, 1097, 1110, 1150, 1176, 1214, 1220, 1219, 1234, 1375, ]
gr: [1267, 1211, 1186, 1180, 1181, 1169, 1162, 1152, 1144, 1152, 1159, 1184, 1192, 1196, 1221, 1236, 1372, 1236, 1175, 1159, 1149, 1143, 1142, 1134, 1123, 1120, 1130, 1134, 1154, 1170, 1190, 1202, 1212, 1256, 1214, 1170, 1139, 1139, 1125, 1116, 1120, 1100, 1097, 1106, 1111, 1131, 1160, 1173, 1191, 1203, 1266, 1206, 1150, 1137, 1128, 1111, 1095, 1087, 1073, 1069, 1077, 1097, 1116, 1137, 1160, 1182, 1204, 1252, 1187, 1142, 1137, 1122, 1098, 1068, 1065, 1046, 1052, 1054, 1069, 1093, 1121, 1147, 1174, 1200, 1253, 1176, 1136, 1125, 1111, 1080, 1061, 1044, 1042, 1032, 1041, 1055, 1072, 1106, 1139, 1157, 1186, 1246, 1182, 1120, 1109, 1092, 1067, 1042, 1037, 1033, 1028, 1031, 1043, 1058, 1094, 1130, 1156, 1179, 1240, 1162, 1120, 1110, 1088, 1054, 1032, 1030, 1027, 1027, 1025, 1035, 1050, 1091, 1121, 1149, 1186, 1226, 1152, 1122, 1108, 1092, 1054, 1031, 1024, 1026, 1029, 1021, 1037, 1055, 1085, 1113, 1144, 1178, 1217, 1168, 1113, 1102, 1084, 1053, 1032, 1025, 1024, 1027, 1027, 1032, 1048, 1083, 1123, 1142, 1168, 1226, 1163, 1116, 1111, 1086, 1060, 1033, 1023, 1023, 1025, 1028, 1035, 1062, 1090, 1124, 1140, 1164, 1216, 1179, 1124, 1107, 1100, 1072, 1043, 1024, 1024, 1020, 1029, 1044, 1067, 1106, 1128, 1143, 1163, 1219, 1179, 1127, 1117, 1105, 1086, 1053, 1034, 1029, 1029, 1034, 1054, 1076, 1102, 1125, 1157, 1179, 1231, 1165, 1137, 1120, 1112, 1100, 1069, 1051, 1038, 1038, 1052, 1068, 1097, 1109, 1132, 1146, 1166, 1233, 1187, 1128, 1122, 1111, 1107, 1083, 1073, 1057, 1060, 1076, 1083, 1105, 1114, 1134, 1139, 1170, 1243, 1174, 1126, 1115, 1111, 1097, 1093, 1072, 1073, 1067, 1077, 1095, 1104, 1120, 1139, 1135, 1169, 1256, 1232, 1141, 1148, 1125, 1122, 1123, 1104, 1096, 1093, 1094, 1117, 1137, 1146, 1153, 1158, 1160, 1389, ]
gb: [1264, 1211, 1190, 1175, 1162, 1153, 1144, 1142, 1132, 1132, 1149, 1168, 1193, 1211, 1221, 1230, 1377, 1240, 1176, 1162, 1152, 1140, 1139, 1131, 1120, 1120, 1122, 1142, 1155, 1163, 1191, 1203, 1210, 1274, 1240, 1171, 1153, 1142, 1131, 1118, 1104, 1091, 1099, 1099, 1111, 1133, 1156, 1172, 1192, 1213, 1273, 1222, 1157, 1140, 1134, 1117, 1092, 1075, 1069, 1067, 1080, 1091, 1115, 1136, 1167, 1180, 1211, 1272, 1226, 1153, 1134, 1124, 1102, 1079, 1063, 1048, 1050, 1055, 1072, 1097, 1123, 1158, 1180, 1201, 1273, 1199, 1142, 1131, 1117, 1088, 1059, 1042, 1035, 1034, 1037, 1057, 1078, 1116, 1145, 1161, 1193, 1256, 1211, 1141, 1116, 1106, 1074, 1049, 1035, 1031, 1033, 1033, 1045, 1073, 1104, 1136, 1153, 1188, 1250, 1196, 1128, 1114, 1100, 1060, 1039, 1030, 1034, 1032, 1030, 1030, 1057, 1094, 1125, 1155, 1169, 1257, 1204, 1126, 1114, 1100, 1063, 1037, 1022, 1024, 1032, 1034, 1036, 1060, 1094, 1125, 1148, 1172, 1242, 1188, 1123, 1116, 1093, 1060, 1035, 1025, 1024, 1027, 1027, 1034, 1057, 1090, 1134, 1146, 1172, 1239, 1192, 1122, 1119, 1095, 1069, 1040, 1021, 1026, 1016, 1030, 1038, 1065, 1094, 1136, 1148, 1173, 1244, 1202, 1132, 1117, 1104, 1068, 1043, 1034, 1020, 1019, 1025, 1042, 1072, 1102, 1136, 1152, 1167, 1237, 1191, 1136, 1120, 1108, 1087, 1053, 1034, 1025, 1020, 1032, 1050, 1073, 1110, 1130, 1148, 1182, 1238, 1201, 1133, 1117, 1120, 1100, 1071, 1049, 1038, 1032, 1048, 1064, 1090, 1117, 1134, 1152, 1170, 1237, 1188, 1128, 1128, 1115, 1106, 1090, 1067, 1058, 1058, 1066, 1082, 1107, 1115, 1135, 1148, 1171, 1250, 1187, 1138, 1126, 1119, 1108, 1095, 1078, 1075, 1066, 1079, 1090, 1099, 1121, 1143, 1149, 1165, 1237, 1229, 1158, 1157, 1139, 1119, 1118, 1101, 1078, 1084, 1091, 1103, 1125, 1130, 1149, 1173, 1184, 1398, ]
b: [1291, 1208, 1168, 1145, 1132, 1140, 1122, 1134, 1138, 1129, 1131, 1140, 1161, 1197, 1196, 1179, 1329, 1235, 1176, 1150, 1125, 1118, 1113, 1115, 1113, 1108, 1113, 1115, 1131, 1136, 1149, 1181, 1176, 1255, 1237, 1147, 1129, 1116, 1119, 1106, 1104, 1091, 1086, 1099, 1104, 1119, 1137, 1134, 1164, 1179, 1231, 1204, 1137, 1111, 1113, 1103, 1096, 1079, 1070, 1070, 1074, 1090, 1104, 1120, 1126, 1149, 1183, 1234, 1208, 1123, 1112, 1118, 1097, 1075, 1066, 1055, 1051, 1059, 1066, 1090, 1114, 1127, 1135, 1157, 1226, 1197, 1110, 1109, 1095, 1083, 1055, 1047, 1044, 1040, 1044, 1051, 1063, 1095, 1112, 1132, 1148, 1232, 1198, 1107, 1098, 1081, 1063, 1051, 1043, 1036, 1033, 1033, 1043, 1061, 1082, 1109, 1116, 1144, 1209, 1161, 1095, 1096, 1091, 1054, 1042, 1039, 1035, 1035, 1022, 1042, 1053, 1080, 1107, 1122, 1132, 1216, 1169, 1097, 1094, 1081, 1048, 1041, 1024, 1034, 1034, 1031, 1034, 1058, 1074, 1105, 1124, 1124, 1218, 1188, 1095, 1092, 1079, 1054, 1042, 1032, 1035, 1022, 1025, 1035, 1053, 1080, 1107, 1118, 1132, 1228, 1181, 1093, 1094, 1077, 1059, 1043, 1030, 1030, 1023, 1033, 1036, 1058, 1090, 1109, 1111, 1135, 1209, 1191, 1105, 1096, 1087, 1060, 1044, 1034, 1034, 1020, 1034, 1037, 1063, 1087, 1112, 1123, 1138, 1226, 1203, 1118, 1090, 1097, 1081, 1052, 1041, 1027, 1030, 1034, 1048, 1067, 1093, 1110, 1121, 1142, 1220, 1210, 1127, 1102, 1091, 1087, 1061, 1052, 1024, 1044, 1041, 1056, 1076, 1091, 1113, 1125, 1152, 1216, 1194, 1107, 1106, 1077, 1085, 1074, 1060, 1048, 1041, 1048, 1060, 1082, 1085, 1085, 1125, 1132, 1218, 1190, 1112, 1074, 1071, 1066, 1067, 1050, 1045, 1045, 1045, 1061, 1075, 1070, 1088, 1106, 1128, 1222, 1234, 1145, 1131, 1120, 1099, 1095, 1079, 1078, 1073, 1078, 1083, 1086, 1108, 1125, 1141, 1156, 1386, ]
#4208x3120_F2_CWF_70 - F2_CWF
- ct: 4230
resolution: 4208x3120
r: [1140, 1119, 1106, 1105, 1086, 1079, 1072, 1070, 1070, 1079, 1084, 1102, 1114, 1131, 1157, 1152, 1232, 1131, 1103, 1088, 1084, 1071, 1074, 1077, 1066, 1064, 1063, 1080, 1094, 1101, 1112, 1113, 1134, 1194, 1143, 1073, 1077, 1078, 1069, 1067, 1058, 1060, 1046, 1048, 1067, 1085, 1095, 1101, 1127, 1144, 1169, 1132, 1072, 1074, 1078, 1055, 1045, 1037, 1033, 1039, 1036, 1045, 1068, 1085, 1098, 1122, 1115, 1183, 1106, 1064, 1069, 1068, 1049, 1026, 1030, 1019, 1025, 1026, 1038, 1051, 1070, 1100, 1102, 1120, 1174, 1103, 1043, 1052, 1055, 1024, 1023, 1017, 1019, 1025, 1024, 1032, 1037, 1063, 1085, 1094, 1110, 1195, 1095, 1047, 1062, 1041, 1025, 1017, 1011, 1031, 1027, 1023, 1023, 1030, 1050, 1071, 1084, 1110, 1190, 1073, 1034, 1056, 1042, 1015, 1010, 1016, 1032, 1027, 1024, 1024, 1036, 1039, 1074, 1087, 1109, 1168, 1079, 1042, 1055, 1032, 1019, 1007, 1013, 1026, 1027, 1026, 1021, 1032, 1044, 1082, 1093, 1098, 1158, 1091, 1046, 1053, 1028, 1020, 1007, 1011, 1026, 1022, 1019, 1021, 1020, 1045, 1071, 1084, 1096, 1159, 1114, 1047, 1047, 1030, 1017, 997, 1008, 1016, 1019, 1021, 1016, 1028, 1053, 1080, 1094, 1103, 1157, 1088, 1049, 1052, 1040, 1024, 1003, 1001, 1004, 1010, 1006, 1019, 1037, 1057, 1085, 1084, 1099, 1161, 1106, 1057, 1063, 1056, 1032, 1010, 993, 998, 999, 1006, 1016, 1031, 1052, 1071, 1089, 1106, 1174, 1112, 1055, 1054, 1062, 1043, 1022, 1002, 1004, 1008, 1007, 1015, 1045, 1064, 1085, 1087, 1097, 1157, 1102, 1059, 1064, 1059, 1054, 1035, 1018, 1002, 1005, 1012, 1035, 1052, 1057, 1068, 1071, 1098, 1156, 1098, 1045, 1044, 1042, 1046, 1041, 1024, 1009, 1004, 1017, 1035, 1062, 1062, 1064, 1064, 1088, 1140, 1088, 1043, 1070, 1066, 1041, 1047, 1026, 1014, 1009, 1022, 1032, 1060, 1073, 1077, 1087, 1107, 1237, ]
gr: [1219, 1156, 1145, 1130, 1128, 1112, 1116, 1104, 1112, 1106, 1118, 1128, 1154, 1165, 1161, 1170, 1306, 1183, 1124, 1113, 1099, 1100, 1099, 1091, 1084, 1095, 1090, 1099, 1116, 1126, 1140, 1142, 1158, 1213, 1174, 1112, 1103, 1094, 1084, 1087, 1090, 1075, 1075, 1077, 1088, 1101, 1119, 1133, 1149, 1162, 1193, 1149, 1106, 1091, 1086, 1076, 1071, 1066, 1057, 1064, 1064, 1074, 1082, 1109, 1117, 1140, 1151, 1204, 1155, 1094, 1089, 1088, 1075, 1059, 1052, 1046, 1043, 1048, 1061, 1074, 1101, 1113, 1123, 1154, 1198, 1137, 1093, 1082, 1078, 1059, 1048, 1041, 1033, 1030, 1038, 1048, 1059, 1078, 1109, 1116, 1143, 1198, 1119, 1082, 1074, 1071, 1051, 1040, 1036, 1032, 1031, 1031, 1042, 1047, 1077, 1097, 1112, 1133, 1185, 1126, 1082, 1077, 1058, 1039, 1029, 1025, 1024, 1024, 1022, 1033, 1044, 1068, 1095, 1099, 1131, 1187, 1123, 1078, 1071, 1060, 1043, 1028, 1025, 1027, 1027, 1021, 1033, 1045, 1066, 1087, 1105, 1121, 1173, 1121, 1070, 1067, 1058, 1039, 1024, 1020, 1024, 1024, 1022, 1030, 1043, 1064, 1093, 1099, 1121, 1182, 1112, 1076, 1072, 1065, 1044, 1029, 1021, 1023, 1021, 1026, 1032, 1047, 1066, 1091, 1105, 1131, 1180, 1132, 1076, 1066, 1067, 1052, 1031, 1021, 1021, 1020, 1028, 1039, 1044, 1076, 1098, 1107, 1127, 1179, 1124, 1087, 1076, 1076, 1064, 1036, 1018, 1018, 1020, 1028, 1041, 1056, 1085, 1086, 1106, 1128, 1187, 1126, 1099, 1082, 1072, 1065, 1043, 1031, 1024, 1029, 1034, 1052, 1065, 1074, 1094, 1111, 1127, 1181, 1128, 1086, 1076, 1073, 1072, 1058, 1050, 1046, 1039, 1048, 1059, 1074, 1070, 1096, 1112, 1124, 1174, 1140, 1078, 1077, 1067, 1057, 1055, 1043, 1040, 1042, 1042, 1054, 1069, 1075, 1088, 1099, 1112, 1189, 1182, 1099, 1096, 1093, 1082, 1080, 1072, 1055, 1059, 1061, 1076, 1095, 1090, 1112, 1113, 1140, 1321, ]
gb: [1236, 1163, 1136, 1120, 1113, 1111, 1109, 1101, 1104, 1099, 1102, 1140, 1141, 1158, 1170, 1194, 1332, 1195, 1138, 1114, 1109, 1097, 1098, 1092, 1089, 1085, 1089, 1098, 1117, 1125, 1141, 1155, 1156, 1232, 1186, 1125, 1108, 1095, 1099, 1081, 1078, 1075, 1073, 1073, 1083, 1097, 1118, 1128, 1148, 1166, 1218, 1171, 1107, 1099, 1091, 1086, 1069, 1059, 1051, 1049, 1064, 1071, 1088, 1110, 1118, 1137, 1162, 1225, 1171, 1099, 1092, 1085, 1069, 1057, 1051, 1041, 1036, 1050, 1055, 1077, 1092, 1118, 1133, 1151, 1227, 1158, 1099, 1090, 1086, 1061, 1043, 1039, 1028, 1036, 1039, 1048, 1060, 1091, 1110, 1117, 1147, 1216, 1152, 1086, 1082, 1073, 1054, 1040, 1026, 1028, 1029, 1032, 1040, 1051, 1076, 1104, 1115, 1139, 1222, 1141, 1088, 1078, 1073, 1048, 1034, 1026, 1025, 1025, 1022, 1033, 1051, 1077, 1104, 1115, 1129, 1202, 1154, 1081, 1080, 1069, 1050, 1029, 1023, 1022, 1029, 1027, 1031, 1050, 1070, 1098, 1107, 1127, 1188, 1146, 1090, 1078, 1065, 1044, 1029, 1015, 1022, 1024, 1025, 1035, 1053, 1071, 1104, 1102, 1136, 1207, 1152, 1083, 1078, 1073, 1042, 1027, 1024, 1024, 1016, 1024, 1037, 1056, 1076, 1106, 1111, 1130, 1197, 1146, 1086, 1076, 1074, 1046, 1031, 1023, 1018, 1021, 1026, 1043, 1051, 1081, 1102, 1111, 1126, 1191, 1134, 1090, 1084, 1079, 1067, 1038, 1019, 1018, 1021, 1033, 1041, 1055, 1081, 1099, 1107, 1131, 1199, 1147, 1091, 1082, 1083, 1072, 1050, 1031, 1024, 1027, 1032, 1053, 1063, 1082, 1099, 1107, 1130, 1191, 1139, 1087, 1078, 1077, 1073, 1058, 1048, 1037, 1037, 1046, 1062, 1073, 1079, 1099, 1099, 1130, 1177, 1147, 1082, 1087, 1074, 1061, 1062, 1052, 1042, 1036, 1045, 1063, 1068, 1079, 1094, 1103, 1120, 1189, 1176, 1105, 1102, 1092, 1081, 1073, 1064, 1053, 1053, 1066, 1067, 1084, 1087, 1103, 1134, 1146, 1336, ]
b: [1203, 1195, 1154, 1123, 1104, 1106, 1116, 1099, 1099, 1099, 1102, 1106, 1123, 1155, 1149, 1168, 1283, 1196, 1141, 1119, 1102, 1098, 1088, 1088, 1095, 1086, 1095, 1097, 1101, 1117, 1121, 1156, 1135, 1209, 1211, 1127, 1102, 1082, 1089, 1088, 1072, 1075, 1083, 1083, 1085, 1106, 1107, 1120, 1142, 1149, 1224, 1163, 1121, 1087, 1078, 1085, 1077, 1062, 1065, 1056, 1057, 1082, 1093, 1094, 1096, 1111, 1147, 1193, 1179, 1105, 1083, 1088, 1070, 1074, 1060, 1048, 1055, 1044, 1068, 1082, 1091, 1097, 1102, 1141, 1209, 1178, 1091, 1076, 1077, 1063, 1060, 1043, 1043, 1035, 1046, 1059, 1064, 1084, 1103, 1107, 1125, 1196, 1156, 1088, 1068, 1070, 1057, 1043, 1046, 1041, 1038, 1038, 1046, 1059, 1073, 1083, 1086, 1111, 1178, 1146, 1067, 1083, 1068, 1044, 1042, 1033, 1044, 1033, 1026, 1037, 1045, 1067, 1089, 1092, 1108, 1203, 1148, 1082, 1072, 1066, 1050, 1044, 1035, 1035, 1031, 1028, 1035, 1055, 1069, 1082, 1094, 1101, 1188, 1163, 1067, 1074, 1056, 1040, 1034, 1037, 1026, 1022, 1033, 1037, 1049, 1067, 1084, 1092, 1103, 1185, 1156, 1074, 1073, 1066, 1042, 1036, 1028, 1031, 1030, 1034, 1042, 1051, 1073, 1091, 1090, 1102, 1196, 1172, 1086, 1071, 1077, 1055, 1041, 1036, 1025, 1024, 1028, 1032, 1053, 1076, 1094, 1089, 1101, 1178, 1179, 1095, 1079, 1075, 1070, 1043, 1026, 1022, 1022, 1029, 1045, 1054, 1078, 1075, 1092, 1120, 1179, 1193, 1091, 1074, 1061, 1064, 1056, 1043, 1034, 1026, 1027, 1039, 1060, 1081, 1070, 1078, 1115, 1205, 1172, 1096, 1069, 1060, 1071, 1055, 1044, 1035, 1027, 1043, 1048, 1063, 1054, 1065, 1083, 1122, 1186, 1158, 1088, 1060, 1043, 1037, 1037, 1031, 1033, 1025, 1029, 1035, 1041, 1041, 1060, 1084, 1114, 1202, 1217, 1122, 1101, 1079, 1058, 1061, 1049, 1056, 1051, 1036, 1062, 1061, 1076, 1094, 1116, 1139, 1331, ]
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/data/uncalibrated.yaml | # SPDX-License-Identifier: CC0-1.0
%YAML 1.1
---
version: 1
algorithms:
- Agc:
- Awb:
...
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/ccm.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Ideas On Board
*
* RkISP1 Color Correction Matrix control algorithm
*/
#include "ccm.h"
#include <algorithm>
#include <chrono>
#include <cmath>
#include <tuple>
#include <vector>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/control_ids.h>
#include <libcamera/ipa/core_ipa_interface.h>
#include "libcamera/internal/yaml_parser.h"
#include "../utils.h"
#include "libipa/matrix_interpolator.h"
/**
* \file ccm.h
*/
namespace libcamera {
namespace ipa::rkisp1::algorithms {
/**
* \class Ccm
* \brief A color correction matrix algorithm
*/
LOG_DEFINE_CATEGORY(RkISP1Ccm)
/**
* \copydoc libcamera::ipa::Algorithm::init
*/
int Ccm::init([[maybe_unused]] IPAContext &context, const YamlObject &tuningData)
{
int ret = ccm_.readYaml(tuningData["ccms"], "ct", "ccm");
if (ret < 0) {
LOG(RkISP1Ccm, Warning)
<< "Failed to parse 'ccm' "
<< "parameter from tuning file; falling back to unit matrix";
ccm_.reset();
}
ret = offsets_.readYaml(tuningData["ccms"], "ct", "offsets");
if (ret < 0) {
LOG(RkISP1Ccm, Warning)
<< "Failed to parse 'offsets' "
<< "parameter from tuning file; falling back to zero offsets";
/*
* MatrixInterpolator::reset() resets to identity matrices
* while here we need zero matrices so we need to construct it
* ourselves.
*/
Matrix<int16_t, 3, 1> m({ 0, 0, 0 });
std::map<unsigned int, Matrix<int16_t, 3, 1>> matrices = { { 0, m } };
offsets_ = MatrixInterpolator<int16_t, 3, 1>(matrices);
}
return 0;
}
void Ccm::setParameters(rkisp1_params_cfg *params,
const Matrix<float, 3, 3> &matrix,
const Matrix<int16_t, 3, 1> &offsets)
{
struct rkisp1_cif_isp_ctk_config &config = params->others.ctk_config;
/*
* 4 bit integer and 7 bit fractional, ranging from -8 (0x400) to
* +7.992 (0x3ff)
*/
for (unsigned int i = 0; i < 3; i++) {
for (unsigned int j = 0; j < 3; j++)
config.coeff[i][j] =
utils::floatingToFixedPoint<4, 7, uint16_t, double>(matrix[i][j]);
}
for (unsigned int i = 0; i < 3; i++)
config.ct_offset[i] = offsets[i][0] & 0xfff;
LOG(RkISP1Ccm, Debug) << "Setting matrix " << matrix;
LOG(RkISP1Ccm, Debug) << "Setting offsets " << offsets;
params->module_en_update |= RKISP1_CIF_ISP_MODULE_CTK;
params->module_ens |= RKISP1_CIF_ISP_MODULE_CTK;
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_CTK;
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void Ccm::prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params)
{
uint32_t ct = context.activeState.awb.temperatureK;
/*
* \todo The colour temperature will likely be noisy, add filtering to
* avoid updating the CCM matrix all the time.
*/
if (frame > 0 && ct == ct_)
return;
ct_ = ct;
Matrix<float, 3, 3> ccm = ccm_.get(ct);
Matrix<int16_t, 3, 1> offsets = offsets_.get(ct);
frameContext.ccm.ccm = ccm;
setParameters(params, ccm, offsets);
}
/**
* \copydoc libcamera::ipa::Algorithm::process
*/
void Ccm::process([[maybe_unused]] IPAContext &context,
[[maybe_unused]] const uint32_t frame,
IPAFrameContext &frameContext,
[[maybe_unused]] const rkisp1_stat_buffer *stats,
ControlList &metadata)
{
float m[9];
for (unsigned int i = 0; i < 3; i++) {
for (unsigned int j = 0; j < 3; j++)
m[i] = frameContext.ccm.ccm[i][j];
}
metadata.set(controls::ColourCorrectionMatrix, m);
}
REGISTER_IPA_ALGORITHM(Ccm, "Ccm")
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/gsl.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Gamma Sensor Linearization control
*/
#include "gsl.h"
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include "libcamera/internal/yaml_parser.h"
#include "linux/rkisp1-config.h"
/**
* \file gsl.h
*/
namespace libcamera {
namespace ipa::rkisp1::algorithms {
/**
* \class GammaSensorLinearization
* \brief RkISP1 Gamma Sensor Linearization control
*
* This algorithm linearizes the sensor output to compensate the sensor
* non-linearities by applying piecewise linear functions to the red, green and
* blue channels.
*
* The curves are specified in the tuning data and defined using 17 points.
*
* - The X coordinates are expressed using 16 intervals, with the first point
* at X coordinate 0. Each interval is expressed as a 2-bit value DX (from
* GAMMA_DX_1 to GAMMA_DX_16), stored in the RKISP1_CIF_ISP_GAMMA_DX_LO and
* RKISP1_CIF_ISP_GAMMA_DX_HI registers. The real interval is equal to
* \f$2^{dx+4}\f$. X coordinates are shared between the red, green and blue
* curves.
*
* - The Y coordinates are specified as 17 values separately for the
* red, green and blue channels, with a 12-bit resolution. Each value must be
* in the [-2048, 2047] range compared to the previous value.
*/
LOG_DEFINE_CATEGORY(RkISP1Gsl)
static constexpr unsigned int kDegammaXIntervals = 16;
GammaSensorLinearization::GammaSensorLinearization()
{
}
/**
* \copydoc libcamera::ipa::Algorithm::init
*/
int GammaSensorLinearization::init([[maybe_unused]] IPAContext &context,
const YamlObject &tuningData)
{
std::vector<uint16_t> xIntervals =
tuningData["x-intervals"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
if (xIntervals.size() != kDegammaXIntervals) {
LOG(RkISP1Gsl, Error)
<< "Invalid 'x' coordinates: expected "
<< kDegammaXIntervals << " elements, got "
<< xIntervals.size();
return -EINVAL;
}
/* Compute gammaDx_ intervals from xIntervals values */
gammaDx_[0] = 0;
gammaDx_[1] = 0;
for (unsigned int i = 0; i < kDegammaXIntervals; ++i)
gammaDx_[i / 8] |= (xIntervals[i] & 0x07) << ((i % 8) * 4);
const YamlObject &yObject = tuningData["y"];
if (!yObject.isDictionary()) {
LOG(RkISP1Gsl, Error)
<< "Issue while parsing 'y' in tuning file: "
<< "entry must be a dictionary";
return -EINVAL;
}
curveYr_ = yObject["red"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
if (curveYr_.size() != RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE) {
LOG(RkISP1Gsl, Error)
<< "Invalid 'y:red' coordinates: expected "
<< RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE
<< " elements, got " << curveYr_.size();
return -EINVAL;
}
curveYg_ = yObject["green"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
if (curveYg_.size() != RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE) {
LOG(RkISP1Gsl, Error)
<< "Invalid 'y:green' coordinates: expected "
<< RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE
<< " elements, got " << curveYg_.size();
return -EINVAL;
}
curveYb_ = yObject["blue"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
if (curveYb_.size() != RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE) {
LOG(RkISP1Gsl, Error)
<< "Invalid 'y:blue' coordinates: expected "
<< RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE
<< " elements, got " << curveYb_.size();
return -EINVAL;
}
return 0;
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void GammaSensorLinearization::prepare([[maybe_unused]] IPAContext &context,
const uint32_t frame,
[[maybe_unused]] IPAFrameContext &frameContext,
rkisp1_params_cfg *params)
{
if (frame > 0)
return;
params->others.sdg_config.xa_pnts.gamma_dx0 = gammaDx_[0];
params->others.sdg_config.xa_pnts.gamma_dx1 = gammaDx_[1];
std::copy(curveYr_.begin(), curveYr_.end(),
params->others.sdg_config.curve_r.gamma_y);
std::copy(curveYg_.begin(), curveYg_.end(),
params->others.sdg_config.curve_g.gamma_y);
std::copy(curveYb_.begin(), curveYb_.end(),
params->others.sdg_config.curve_b.gamma_y);
params->module_en_update |= RKISP1_CIF_ISP_MODULE_SDG;
params->module_ens |= RKISP1_CIF_ISP_MODULE_SDG;
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_SDG;
}
REGISTER_IPA_ALGORITHM(GammaSensorLinearization, "GammaSensorLinearization")
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/dpcc.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Defect Pixel Cluster Correction control
*/
#pragma once
#include "algorithm.h"
namespace libcamera {
namespace ipa::rkisp1::algorithms {
class DefectPixelClusterCorrection : public Algorithm
{
public:
DefectPixelClusterCorrection();
~DefectPixelClusterCorrection() = default;
int init(IPAContext &context, const YamlObject &tuningData) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params) override;
private:
rkisp1_cif_isp_dpcc_config config_;
};
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/awb.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* AWB control algorithm
*/
#include "awb.h"
#include <algorithm>
#include <cmath>
#include <iomanip>
#include <libcamera/base/log.h>
#include <libcamera/control_ids.h>
#include <libcamera/ipa/core_ipa_interface.h>
/**
* \file awb.h
*/
namespace libcamera {
namespace ipa::rkisp1::algorithms {
/**
* \class Awb
* \brief A Grey world white balance correction algorithm
*/
LOG_DEFINE_CATEGORY(RkISP1Awb)
/* Minimum mean value below which AWB can't operate. */
constexpr double kMeanMinThreshold = 2.0;
Awb::Awb()
: rgbMode_(false)
{
}
/**
* \copydoc libcamera::ipa::Algorithm::configure
*/
int Awb::configure(IPAContext &context,
const IPACameraSensorInfo &configInfo)
{
context.activeState.awb.gains.manual.red = 1.0;
context.activeState.awb.gains.manual.blue = 1.0;
context.activeState.awb.gains.manual.green = 1.0;
context.activeState.awb.gains.automatic.red = 1.0;
context.activeState.awb.gains.automatic.blue = 1.0;
context.activeState.awb.gains.automatic.green = 1.0;
context.activeState.awb.autoEnabled = true;
/*
* Define the measurement window for AWB as a centered rectangle
* covering 3/4 of the image width and height.
*/
context.configuration.awb.measureWindow.h_offs = configInfo.outputSize.width / 8;
context.configuration.awb.measureWindow.v_offs = configInfo.outputSize.height / 8;
context.configuration.awb.measureWindow.h_size = 3 * configInfo.outputSize.width / 4;
context.configuration.awb.measureWindow.v_size = 3 * configInfo.outputSize.height / 4;
context.configuration.awb.enabled = true;
return 0;
}
/**
* \copydoc libcamera::ipa::Algorithm::queueRequest
*/
void Awb::queueRequest(IPAContext &context,
[[maybe_unused]] const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls)
{
auto &awb = context.activeState.awb;
const auto &awbEnable = controls.get(controls::AwbEnable);
if (awbEnable && *awbEnable != awb.autoEnabled) {
awb.autoEnabled = *awbEnable;
LOG(RkISP1Awb, Debug)
<< (*awbEnable ? "Enabling" : "Disabling") << " AWB";
}
const auto &colourGains = controls.get(controls::ColourGains);
if (colourGains && !awb.autoEnabled) {
awb.gains.manual.red = (*colourGains)[0];
awb.gains.manual.blue = (*colourGains)[1];
LOG(RkISP1Awb, Debug)
<< "Set colour gains to red: " << awb.gains.manual.red
<< ", blue: " << awb.gains.manual.blue;
}
frameContext.awb.autoEnabled = awb.autoEnabled;
if (!awb.autoEnabled) {
frameContext.awb.gains.red = awb.gains.manual.red;
frameContext.awb.gains.green = 1.0;
frameContext.awb.gains.blue = awb.gains.manual.blue;
}
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void Awb::prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext, rkisp1_params_cfg *params)
{
/*
* This is the latest time we can read the active state. This is the
* most up-to-date automatic values we can read.
*/
if (frameContext.awb.autoEnabled) {
frameContext.awb.gains.red = context.activeState.awb.gains.automatic.red;
frameContext.awb.gains.green = context.activeState.awb.gains.automatic.green;
frameContext.awb.gains.blue = context.activeState.awb.gains.automatic.blue;
}
params->others.awb_gain_config.gain_green_b = 256 * frameContext.awb.gains.green;
params->others.awb_gain_config.gain_blue = 256 * frameContext.awb.gains.blue;
params->others.awb_gain_config.gain_red = 256 * frameContext.awb.gains.red;
params->others.awb_gain_config.gain_green_r = 256 * frameContext.awb.gains.green;
/* Update the gains. */
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_AWB_GAIN;
/* If we have already set the AWB measurement parameters, return. */
if (frame > 0)
return;
rkisp1_cif_isp_awb_meas_config &awb_config = params->meas.awb_meas_config;
/* Configure the measure window for AWB. */
awb_config.awb_wnd = context.configuration.awb.measureWindow;
/* Number of frames to use to estimate the means (0 means 1 frame). */
awb_config.frames = 0;
/* Select RGB or YCbCr means measurement. */
if (rgbMode_) {
awb_config.awb_mode = RKISP1_CIF_ISP_AWB_MODE_RGB;
/*
* For RGB-based measurements, pixels are selected with maximum
* red, green and blue thresholds that are set in the
* awb_ref_cr, awb_min_y and awb_ref_cb respectively. The other
* values are not used, set them to 0.
*/
awb_config.awb_ref_cr = 250;
awb_config.min_y = 250;
awb_config.awb_ref_cb = 250;
awb_config.max_y = 0;
awb_config.min_c = 0;
awb_config.max_csum = 0;
} else {
awb_config.awb_mode = RKISP1_CIF_ISP_AWB_MODE_YCBCR;
/* Set the reference Cr and Cb (AWB target) to white. */
awb_config.awb_ref_cb = 128;
awb_config.awb_ref_cr = 128;
/*
* Filter out pixels based on luminance and chrominance values.
* The acceptable luma values are specified as a [16, 250]
* range, while the acceptable chroma values are specified with
* a minimum of 16 and a maximum Cb+Cr sum of 250.
*/
awb_config.min_y = 16;
awb_config.max_y = 250;
awb_config.min_c = 16;
awb_config.max_csum = 250;
}
/* Enable the AWB gains. */
params->module_en_update |= RKISP1_CIF_ISP_MODULE_AWB_GAIN;
params->module_ens |= RKISP1_CIF_ISP_MODULE_AWB_GAIN;
/* Update the AWB measurement parameters and enable the AWB module. */
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_AWB;
params->module_en_update |= RKISP1_CIF_ISP_MODULE_AWB;
params->module_ens |= RKISP1_CIF_ISP_MODULE_AWB;
}
uint32_t Awb::estimateCCT(double red, double green, double blue)
{
/* Convert the RGB values to CIE tristimulus values (XYZ) */
double X = (-0.14282) * (red) + (1.54924) * (green) + (-0.95641) * (blue);
double Y = (-0.32466) * (red) + (1.57837) * (green) + (-0.73191) * (blue);
double Z = (-0.68202) * (red) + (0.77073) * (green) + (0.56332) * (blue);
/* Calculate the normalized chromaticity values */
double x = X / (X + Y + Z);
double y = Y / (X + Y + Z);
/* Calculate CCT */
double n = (x - 0.3320) / (0.1858 - y);
return 449 * n * n * n + 3525 * n * n + 6823.3 * n + 5520.33;
}
/**
* \copydoc libcamera::ipa::Algorithm::process
*/
void Awb::process(IPAContext &context,
[[maybe_unused]] const uint32_t frame,
IPAFrameContext &frameContext,
const rkisp1_stat_buffer *stats,
ControlList &metadata)
{
const rkisp1_cif_isp_stat *params = &stats->params;
const rkisp1_cif_isp_awb_stat *awb = ¶ms->awb;
IPAActiveState &activeState = context.activeState;
double greenMean;
double redMean;
double blueMean;
if (rgbMode_) {
greenMean = awb->awb_mean[0].mean_y_or_g;
redMean = awb->awb_mean[0].mean_cr_or_r;
blueMean = awb->awb_mean[0].mean_cb_or_b;
} else {
/* Get the YCbCr mean values */
double yMean = awb->awb_mean[0].mean_y_or_g;
double cbMean = awb->awb_mean[0].mean_cb_or_b;
double crMean = awb->awb_mean[0].mean_cr_or_r;
/*
* Convert from YCbCr to RGB.
* The hardware uses the following formulas:
* Y = 16 + 0.2500 R + 0.5000 G + 0.1094 B
* Cb = 128 - 0.1406 R - 0.2969 G + 0.4375 B
* Cr = 128 + 0.4375 R - 0.3750 G - 0.0625 B
*
* The inverse matrix is thus:
* [[1,1636, -0,0623, 1,6008]
* [1,1636, -0,4045, -0,7949]
* [1,1636, 1,9912, -0,0250]]
*/
yMean -= 16;
cbMean -= 128;
crMean -= 128;
redMean = 1.1636 * yMean - 0.0623 * cbMean + 1.6008 * crMean;
greenMean = 1.1636 * yMean - 0.4045 * cbMean - 0.7949 * crMean;
blueMean = 1.1636 * yMean + 1.9912 * cbMean - 0.0250 * crMean;
/*
* Due to hardware rounding errors in the YCbCr means, the
* calculated RGB means may be negative. This would lead to
* negative gains, messing up calculation. Prevent this by
* clamping the means to positive values.
*/
redMean = std::max(redMean, 0.0);
greenMean = std::max(greenMean, 0.0);
blueMean = std::max(blueMean, 0.0);
}
/*
* The ISP computes the AWB means after applying the colour gains,
* divide by the gains that were used to get the raw means from the
* sensor.
*/
redMean /= frameContext.awb.gains.red;
greenMean /= frameContext.awb.gains.green;
blueMean /= frameContext.awb.gains.blue;
/*
* If the means are too small we don't have enough information to
* meaningfully calculate gains. Freeze the algorithm in that case.
*/
if (redMean < kMeanMinThreshold && greenMean < kMeanMinThreshold &&
blueMean < kMeanMinThreshold) {
frameContext.awb.temperatureK = activeState.awb.temperatureK;
return;
}
activeState.awb.temperatureK = estimateCCT(redMean, greenMean, blueMean);
/*
* Estimate the red and blue gains to apply in a grey world. The green
* gain is hardcoded to 1.0. Avoid divisions by zero by clamping the
* divisor to a minimum value of 1.0.
*/
double redGain = greenMean / std::max(redMean, 1.0);
double blueGain = greenMean / std::max(blueMean, 1.0);
/*
* Clamp the gain values to the hardware, which expresses gains as Q2.8
* unsigned integer values. Set the minimum just above zero to avoid
* divisions by zero when computing the raw means in subsequent
* iterations.
*/
redGain = std::clamp(redGain, 1.0 / 256, 1023.0 / 256);
blueGain = std::clamp(blueGain, 1.0 / 256, 1023.0 / 256);
/* Filter the values to avoid oscillations. */
double speed = 0.2;
redGain = speed * redGain + (1 - speed) * activeState.awb.gains.automatic.red;
blueGain = speed * blueGain + (1 - speed) * activeState.awb.gains.automatic.blue;
activeState.awb.gains.automatic.red = redGain;
activeState.awb.gains.automatic.blue = blueGain;
activeState.awb.gains.automatic.green = 1.0;
frameContext.awb.temperatureK = activeState.awb.temperatureK;
metadata.set(controls::AwbEnable, frameContext.awb.autoEnabled);
metadata.set(controls::ColourGains, {
static_cast<float>(frameContext.awb.gains.red),
static_cast<float>(frameContext.awb.gains.blue)
});
metadata.set(controls::ColourTemperature, frameContext.awb.temperatureK);
LOG(RkISP1Awb, Debug) << std::showpoint
<< "Means [" << redMean << ", " << greenMean << ", " << blueMean
<< "], gains [" << activeState.awb.gains.automatic.red << ", "
<< activeState.awb.gains.automatic.green << ", "
<< activeState.awb.gains.automatic.blue << "], temp "
<< frameContext.awb.temperatureK << "K";
}
REGISTER_IPA_ALGORITHM(Awb, "Awb")
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/goc.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Ideas On Board
*
* RkISP1 Gamma out control
*/
#pragma once
#include "algorithm.h"
namespace libcamera {
namespace ipa::rkisp1::algorithms {
class GammaOutCorrection : public Algorithm
{
public:
GammaOutCorrection() = default;
~GammaOutCorrection() = default;
int init(IPAContext &context, const YamlObject &tuningData) override;
int configure(IPAContext &context,
const IPACameraSensorInfo &configInfo) override;
void queueRequest(IPAContext &context,
const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params) override;
void process(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
const rkisp1_stat_buffer *stats,
ControlList &metadata) override;
private:
float defaultGamma_;
};
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/gsl.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Gamma Sensor Linearization control
*/
#pragma once
#include "algorithm.h"
namespace libcamera {
namespace ipa::rkisp1::algorithms {
class GammaSensorLinearization : public Algorithm
{
public:
GammaSensorLinearization();
~GammaSensorLinearization() = default;
int init(IPAContext &context, const YamlObject &tuningData) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params) override;
private:
uint32_t gammaDx_[2];
std::vector<uint16_t> curveYr_;
std::vector<uint16_t> curveYg_;
std::vector<uint16_t> curveYb_;
};
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/blc.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Black Level Correction control
*/
#include "blc.h"
#include <libcamera/base/log.h>
#include "libcamera/internal/yaml_parser.h"
/**
* \file blc.h
*/
namespace libcamera {
namespace ipa::rkisp1::algorithms {
/**
* \class BlackLevelCorrection
* \brief RkISP1 Black Level Correction control
*
* The pixels output by the camera normally include a black level, because
* sensors do not always report a signal level of '0' for black. Pixels at or
* below this level should be considered black. To achieve that, the RkISP BLC
* algorithm subtracts a configurable offset from all pixels.
*
* The black level can be measured at runtime from an optical dark region of the
* camera sensor, or measured during the camera tuning process. The first option
* isn't currently supported.
*/
LOG_DEFINE_CATEGORY(RkISP1Blc)
BlackLevelCorrection::BlackLevelCorrection()
: tuningParameters_(false)
{
}
/**
* \copydoc libcamera::ipa::Algorithm::init
*/
int BlackLevelCorrection::init([[maybe_unused]] IPAContext &context,
const YamlObject &tuningData)
{
blackLevelRed_ = tuningData["R"].get<int16_t>(256);
blackLevelGreenR_ = tuningData["Gr"].get<int16_t>(256);
blackLevelGreenB_ = tuningData["Gb"].get<int16_t>(256);
blackLevelBlue_ = tuningData["B"].get<int16_t>(256);
tuningParameters_ = true;
LOG(RkISP1Blc, Debug)
<< "Black levels: red " << blackLevelRed_
<< ", green (red) " << blackLevelGreenR_
<< ", green (blue) " << blackLevelGreenB_
<< ", blue " << blackLevelBlue_;
return 0;
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
const uint32_t frame,
[[maybe_unused]] IPAFrameContext &frameContext,
rkisp1_params_cfg *params)
{
if (frame > 0)
return;
if (!tuningParameters_)
return;
params->others.bls_config.enable_auto = 0;
params->others.bls_config.fixed_val.r = blackLevelRed_;
params->others.bls_config.fixed_val.gr = blackLevelGreenR_;
params->others.bls_config.fixed_val.gb = blackLevelGreenB_;
params->others.bls_config.fixed_val.b = blackLevelBlue_;
params->module_en_update |= RKISP1_CIF_ISP_MODULE_BLS;
params->module_ens |= RKISP1_CIF_ISP_MODULE_BLS;
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_BLS;
}
REGISTER_IPA_ALGORITHM(BlackLevelCorrection, "BlackLevelCorrection")
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/blc.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Black Level Correction control
*/
#pragma once
#include "algorithm.h"
namespace libcamera {
namespace ipa::rkisp1::algorithms {
class BlackLevelCorrection : public Algorithm
{
public:
BlackLevelCorrection();
~BlackLevelCorrection() = default;
int init(IPAContext &context, const YamlObject &tuningData) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params) override;
private:
bool tuningParameters_;
int16_t blackLevelRed_;
int16_t blackLevelGreenR_;
int16_t blackLevelGreenB_;
int16_t blackLevelBlue_;
};
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/cproc.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Color Processing control
*/
#pragma once
#include <sys/types.h>
#include "algorithm.h"
namespace libcamera {
namespace ipa::rkisp1::algorithms {
class ColorProcessing : public Algorithm
{
public:
ColorProcessing() = default;
~ColorProcessing() = default;
int init(IPAContext &context, const YamlObject &tuningData) override;
int configure(IPAContext &context,
const IPACameraSensorInfo &configInfo) override;
void queueRequest(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params) override;
};
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/ccm.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Ideas On Board
*
* RkISP1 Color Correction Matrix control algorithm
*/
#pragma once
#include <linux/rkisp1-config.h>
#include "libipa/matrix.h"
#include "libipa/matrix_interpolator.h"
#include "algorithm.h"
namespace libcamera {
namespace ipa::rkisp1::algorithms {
class Ccm : public Algorithm
{
public:
Ccm() {}
~Ccm() = default;
int init(IPAContext &context, const YamlObject &tuningData) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params) override;
void process(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
const rkisp1_stat_buffer *stats,
ControlList &metadata) override;
private:
void parseYaml(const YamlObject &tuningData);
void setParameters(rkisp1_params_cfg *params,
const Matrix<float, 3, 3> &matrix,
const Matrix<int16_t, 3, 1> &offsets);
unsigned int ct_;
MatrixInterpolator<float, 3, 3> ccm_;
MatrixInterpolator<int16_t, 3, 1> offsets_;
};
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/algorithm.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021, Ideas On Board
*
* RkISP1 control algorithm interface
*/
#pragma once
#include <libipa/algorithm.h>
#include "module.h"
namespace libcamera {
namespace ipa::rkisp1 {
class Algorithm : public libcamera::ipa::Algorithm<Module>
{
public:
Algorithm()
: disabled_(false), supportsRaw_(false)
{
}
bool disabled_;
bool supportsRaw_;
};
} /* namespace ipa::rkisp1 */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/awb.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* AWB control algorithm
*/
#pragma once
#include "algorithm.h"
namespace libcamera {
namespace ipa::rkisp1::algorithms {
class Awb : public Algorithm
{
public:
Awb();
~Awb() = default;
int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override;
void queueRequest(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params) override;
void process(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
const rkisp1_stat_buffer *stats,
ControlList &metadata) override;
private:
uint32_t estimateCCT(double red, double green, double blue);
bool rgbMode_;
};
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/goc.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Ideas On Board
*
* RkISP1 Gamma out control
*/
#include "goc.h"
#include <cmath>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/control_ids.h>
#include "libcamera/internal/yaml_parser.h"
#include "linux/rkisp1-config.h"
/**
* \file goc.h
*/
namespace libcamera {
namespace ipa::rkisp1::algorithms {
/**
* \class GammaOutCorrection
* \brief RkISP1 Gamma out correction
*
* This algorithm implements the gamma out curve for the RkISP1. It defaults to
* a gamma value of 2.2.
*
* As gamma is internally represented as a piecewise linear function with only
* 17 knots, the difference between gamma=2.2 and sRGB gamma is minimal.
* Therefore sRGB gamma was not implemented as special case.
*
* Useful links:
* - https://www.cambridgeincolour.com/tutorials/gamma-correction.htm
* - https://en.wikipedia.org/wiki/SRGB
*/
LOG_DEFINE_CATEGORY(RkISP1Gamma)
const float kDefaultGamma = 2.2f;
/**
* \copydoc libcamera::ipa::Algorithm::init
*/
int GammaOutCorrection::init(IPAContext &context, const YamlObject &tuningData)
{
if (context.hw->numGammaOutSamples !=
RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10) {
LOG(RkISP1Gamma, Error)
<< "Gamma is not implemented for RkISP1 V12";
return -EINVAL;
}
defaultGamma_ = tuningData["gamma"].get<double>(kDefaultGamma);
context.ctrlMap[&controls::Gamma] = ControlInfo(0.1f, 10.0f, defaultGamma_);
return 0;
}
/**
* \copydoc libcamera::ipa::Algorithm::configure
*/
int GammaOutCorrection::configure(IPAContext &context,
[[maybe_unused]] const IPACameraSensorInfo &configInfo)
{
context.activeState.goc.gamma = defaultGamma_;
return 0;
}
/**
* \copydoc libcamera::ipa::Algorithm::queueRequest
*/
void GammaOutCorrection::queueRequest(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls)
{
if (frame == 0)
frameContext.goc.update = true;
const auto &gamma = controls.get(controls::Gamma);
if (gamma) {
context.activeState.goc.gamma = *gamma;
frameContext.goc.update = true;
LOG(RkISP1Gamma, Debug) << "Set gamma to " << *gamma;
}
frameContext.goc.gamma = context.activeState.goc.gamma;
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void GammaOutCorrection::prepare(IPAContext &context,
[[maybe_unused]] const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params)
{
ASSERT(context.hw->numGammaOutSamples ==
RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10);
/*
* The logarithmic segments as specified in the reference.
* Plus an additional 0 to make the loop easier
*/
static constexpr std::array<unsigned int, RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10> segments = {
64, 64, 64, 64, 128, 128, 128, 128, 256,
256, 256, 512, 512, 512, 512, 512, 0
};
__u16 *gamma_y = params->others.goc_config.gamma_y;
if (!frameContext.goc.update)
return;
unsigned x = 0;
for (const auto [i, size] : utils::enumerate(segments)) {
gamma_y[i] = std::pow(x / 4096.0, 1.0 / frameContext.goc.gamma) * 1023.0;
x += size;
}
params->others.goc_config.mode = RKISP1_CIF_ISP_GOC_MODE_LOGARITHMIC;
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_GOC;
params->module_en_update |= RKISP1_CIF_ISP_MODULE_GOC;
params->module_ens |= RKISP1_CIF_ISP_MODULE_GOC;
}
/**
* \copydoc libcamera::ipa::Algorithm::process
*/
void GammaOutCorrection::process([[maybe_unused]] IPAContext &context,
[[maybe_unused]] const uint32_t frame,
IPAFrameContext &frameContext,
[[maybe_unused]] const rkisp1_stat_buffer *stats,
ControlList &metadata)
{
metadata.set(controls::Gamma, frameContext.goc.gamma);
}
REGISTER_IPA_ALGORITHM(GammaOutCorrection, "GammaOutCorrection")
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/lsc.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Lens Shading Correction control
*/
#pragma once
#include <map>
#include "algorithm.h"
namespace libcamera {
namespace ipa::rkisp1::algorithms {
class LensShadingCorrection : public Algorithm
{
public:
LensShadingCorrection();
~LensShadingCorrection() = default;
int init(IPAContext &context, const YamlObject &tuningData) override;
int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params) override;
private:
struct Components {
uint32_t ct;
std::vector<uint16_t> r;
std::vector<uint16_t> gr;
std::vector<uint16_t> gb;
std::vector<uint16_t> b;
};
void setParameters(rkisp1_params_cfg *params);
void copyTable(rkisp1_cif_isp_lsc_config &config, const Components &set0);
void interpolateTable(rkisp1_cif_isp_lsc_config &config,
const Components &set0, const Components &set1,
const uint32_t ct);
std::map<uint32_t, Components> sets_;
std::vector<double> xSize_;
std::vector<double> ySize_;
uint16_t xGrad_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
uint16_t yGrad_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
uint16_t xSizes_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
uint16_t ySizes_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
struct {
uint32_t original;
uint32_t adjusted;
} lastCt_;
};
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/agc.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 AGC/AEC mean-based control algorithm
*/
#pragma once
#include <linux/rkisp1-config.h>
#include <libcamera/base/span.h>
#include <libcamera/base/utils.h>
#include <libcamera/geometry.h>
#include "libipa/agc_mean_luminance.h"
#include "libipa/histogram.h"
#include "algorithm.h"
namespace libcamera {
namespace ipa::rkisp1::algorithms {
class Agc : public Algorithm, public AgcMeanLuminance
{
public:
Agc();
~Agc() = default;
int init(IPAContext &context, const YamlObject &tuningData) override;
int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override;
void queueRequest(IPAContext &context,
const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params) override;
void process(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
const rkisp1_stat_buffer *stats,
ControlList &metadata) override;
private:
int parseMeteringModes(IPAContext &context, const YamlObject &tuningData);
uint8_t computeHistogramPredivider(const Size &size,
enum rkisp1_cif_isp_histogram_mode mode);
void fillMetadata(IPAContext &context, IPAFrameContext &frameContext,
ControlList &metadata);
double estimateLuminance(double gain) const override;
Span<const uint8_t> expMeans_;
std::map<int32_t, std::vector<uint8_t>> meteringModes_;
};
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/agc.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* AGC/AEC mean-based control algorithm
*/
#include "agc.h"
#include <algorithm>
#include <chrono>
#include <cmath>
#include <tuple>
#include <vector>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/control_ids.h>
#include <libcamera/ipa/core_ipa_interface.h>
#include "libcamera/internal/yaml_parser.h"
#include "libipa/histogram.h"
/**
* \file agc.h
*/
namespace libcamera {
using namespace std::literals::chrono_literals;
namespace ipa::rkisp1::algorithms {
/**
* \class Agc
* \brief A mean-based auto-exposure algorithm
*/
LOG_DEFINE_CATEGORY(RkISP1Agc)
int Agc::parseMeteringModes(IPAContext &context, const YamlObject &tuningData)
{
if (!tuningData.isDictionary())
LOG(RkISP1Agc, Warning)
<< "'AeMeteringMode' parameter not found in tuning file";
for (const auto &[key, value] : tuningData.asDict()) {
if (controls::AeMeteringModeNameValueMap.find(key) ==
controls::AeMeteringModeNameValueMap.end()) {
LOG(RkISP1Agc, Warning)
<< "Skipping unknown metering mode '" << key << "'";
continue;
}
std::vector<uint8_t> weights =
value.getList<uint8_t>().value_or(std::vector<uint8_t>{});
if (weights.size() != context.hw->numHistogramWeights) {
LOG(RkISP1Agc, Warning)
<< "Failed to read metering mode'" << key << "'";
continue;
}
meteringModes_[controls::AeMeteringModeNameValueMap.at(key)] = weights;
}
if (meteringModes_.empty()) {
LOG(RkISP1Agc, Warning)
<< "No metering modes read from tuning file; defaulting to matrix";
int32_t meteringModeId = controls::AeMeteringModeNameValueMap.at("MeteringMatrix");
std::vector<uint8_t> weights(context.hw->numHistogramWeights, 1);
meteringModes_[meteringModeId] = weights;
}
std::vector<ControlValue> meteringModes;
std::vector<int> meteringModeKeys = utils::map_keys(meteringModes_);
std::transform(meteringModeKeys.begin(), meteringModeKeys.end(),
std::back_inserter(meteringModes),
[](int x) { return ControlValue(x); });
context.ctrlMap[&controls::AeMeteringMode] = ControlInfo(meteringModes);
return 0;
}
uint8_t Agc::computeHistogramPredivider(const Size &size,
enum rkisp1_cif_isp_histogram_mode mode)
{
/*
* The maximum number of pixels that could potentially be in one bin is
* if all the pixels of the image are in it, multiplied by 3 for the
* three color channels. The counter for each bin is 16 bits wide, so
* `factor` thus contains the number of times we'd wrap around. This is
* obviously the number of pixels that we need to skip to make sure
* that we don't wrap around, but we compute the square root of it
* instead, as the skip that we need to program is for both the x and y
* directions.
*
* Even though it looks like dividing into a counter of 65536 would
* overflow by 1, this is apparently fine according to the hardware
* documentation, and this successfully gets the expected documented
* predivider size for cases where:
* (width / predivider) * (height / predivider) * 3 == 65536.
*
* There's a bit of extra rounding math to make sure the rounding goes
* the correct direction so that the square of the step is big enough
* to encompass the `factor` number of pixels that we need to skip.
*
* \todo Take into account weights. That is, if the weights are low
* enough we can potentially reduce the predivider to increase
* precision. This needs some investigation however, as this hardware
* behavior is undocumented and is only an educated guess.
*/
int count = mode == RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED ? 3 : 1;
double factor = size.width * size.height * count / 65536.0;
double root = std::sqrt(factor);
uint8_t predivider = static_cast<uint8_t>(std::ceil(root));
return std::clamp<uint8_t>(predivider, 3, 127);
}
Agc::Agc()
{
supportsRaw_ = true;
}
/**
* \brief Initialise the AGC algorithm from tuning files
* \param[in] context The shared IPA context
* \param[in] tuningData The YamlObject containing Agc tuning data
*
* This function calls the base class' tuningData parsers to discover which
* control values are supported.
*
* \return 0 on success or errors from the base class
*/
int Agc::init(IPAContext &context, const YamlObject &tuningData)
{
int ret;
ret = parseTuningData(tuningData);
if (ret)
return ret;
const YamlObject &yamlMeteringModes = tuningData["AeMeteringMode"];
ret = parseMeteringModes(context, yamlMeteringModes);
if (ret)
return ret;
context.ctrlMap[&controls::AeEnable] = ControlInfo(false, true);
context.ctrlMap.merge(controls());
return 0;
}
/**
* \brief Configure the AGC given a configInfo
* \param[in] context The shared IPA context
* \param[in] configInfo The IPA configuration data
*
* \return 0
*/
int Agc::configure(IPAContext &context, const IPACameraSensorInfo &configInfo)
{
/* Configure the default exposure and gain. */
context.activeState.agc.automatic.gain = context.configuration.sensor.minAnalogueGain;
context.activeState.agc.automatic.exposure =
10ms / context.configuration.sensor.lineDuration;
context.activeState.agc.manual.gain = context.activeState.agc.automatic.gain;
context.activeState.agc.manual.exposure = context.activeState.agc.automatic.exposure;
context.activeState.agc.autoEnabled = !context.configuration.raw;
context.activeState.agc.constraintMode =
static_cast<controls::AeConstraintModeEnum>(constraintModes().begin()->first);
context.activeState.agc.exposureMode =
static_cast<controls::AeExposureModeEnum>(exposureModeHelpers().begin()->first);
context.activeState.agc.meteringMode =
static_cast<controls::AeMeteringModeEnum>(meteringModes_.begin()->first);
/*
* \todo This should probably come from FrameDurationLimits instead,
* except it's computed in the IPA and not here so we'd have to
* recompute it.
*/
context.activeState.agc.maxFrameDuration = context.configuration.sensor.maxShutterSpeed;
/*
* Define the measurement window for AGC as a centered rectangle
* covering 3/4 of the image width and height.
*/
context.configuration.agc.measureWindow.h_offs = configInfo.outputSize.width / 8;
context.configuration.agc.measureWindow.v_offs = configInfo.outputSize.height / 8;
context.configuration.agc.measureWindow.h_size = 3 * configInfo.outputSize.width / 4;
context.configuration.agc.measureWindow.v_size = 3 * configInfo.outputSize.height / 4;
setLimits(context.configuration.sensor.minShutterSpeed,
context.configuration.sensor.maxShutterSpeed,
context.configuration.sensor.minAnalogueGain,
context.configuration.sensor.maxAnalogueGain);
resetFrameCount();
return 0;
}
/**
* \copydoc libcamera::ipa::Algorithm::queueRequest
*/
void Agc::queueRequest(IPAContext &context,
[[maybe_unused]] const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls)
{
auto &agc = context.activeState.agc;
if (!context.configuration.raw) {
const auto &agcEnable = controls.get(controls::AeEnable);
if (agcEnable && *agcEnable != agc.autoEnabled) {
agc.autoEnabled = *agcEnable;
LOG(RkISP1Agc, Debug)
<< (agc.autoEnabled ? "Enabling" : "Disabling")
<< " AGC";
}
}
const auto &exposure = controls.get(controls::ExposureTime);
if (exposure && !agc.autoEnabled) {
agc.manual.exposure = *exposure * 1.0us
/ context.configuration.sensor.lineDuration;
LOG(RkISP1Agc, Debug)
<< "Set exposure to " << agc.manual.exposure;
}
const auto &gain = controls.get(controls::AnalogueGain);
if (gain && !agc.autoEnabled) {
agc.manual.gain = *gain;
LOG(RkISP1Agc, Debug) << "Set gain to " << agc.manual.gain;
}
frameContext.agc.autoEnabled = agc.autoEnabled;
if (!frameContext.agc.autoEnabled) {
frameContext.agc.exposure = agc.manual.exposure;
frameContext.agc.gain = agc.manual.gain;
}
const auto &meteringMode = controls.get(controls::AeMeteringMode);
if (meteringMode) {
frameContext.agc.updateMetering = agc.meteringMode != *meteringMode;
agc.meteringMode =
static_cast<controls::AeMeteringModeEnum>(*meteringMode);
}
frameContext.agc.meteringMode = agc.meteringMode;
const auto &exposureMode = controls.get(controls::AeExposureMode);
if (exposureMode)
agc.exposureMode =
static_cast<controls::AeExposureModeEnum>(*exposureMode);
frameContext.agc.exposureMode = agc.exposureMode;
const auto &constraintMode = controls.get(controls::AeConstraintMode);
if (constraintMode)
agc.constraintMode =
static_cast<controls::AeConstraintModeEnum>(*constraintMode);
frameContext.agc.constraintMode = agc.constraintMode;
const auto &frameDurationLimits = controls.get(controls::FrameDurationLimits);
if (frameDurationLimits) {
utils::Duration maxFrameDuration =
std::chrono::milliseconds((*frameDurationLimits).back());
agc.maxFrameDuration = maxFrameDuration;
}
frameContext.agc.maxFrameDuration = agc.maxFrameDuration;
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void Agc::prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext, rkisp1_params_cfg *params)
{
if (frameContext.agc.autoEnabled) {
frameContext.agc.exposure = context.activeState.agc.automatic.exposure;
frameContext.agc.gain = context.activeState.agc.automatic.gain;
}
if (frame > 0 && !frameContext.agc.updateMetering)
return;
/* Configure the measurement window. */
params->meas.aec_config.meas_window = context.configuration.agc.measureWindow;
/* Use a continuous method for measure. */
params->meas.aec_config.autostop = RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0;
/* Estimate Y as (R + G + B) x (85/256). */
params->meas.aec_config.mode = RKISP1_CIF_ISP_EXP_MEASURING_MODE_1;
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_AEC;
params->module_ens |= RKISP1_CIF_ISP_MODULE_AEC;
params->module_en_update |= RKISP1_CIF_ISP_MODULE_AEC;
/* Configure histogram. */
params->meas.hst_config.meas_window = context.configuration.agc.measureWindow;
/* Produce the luminance histogram. */
params->meas.hst_config.mode = RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM;
/* Set an average weighted histogram. */
Span<uint8_t> weights{
params->meas.hst_config.hist_weight,
context.hw->numHistogramWeights
};
std::vector<uint8_t> &modeWeights = meteringModes_.at(frameContext.agc.meteringMode);
std::copy(modeWeights.begin(), modeWeights.end(), weights.begin());
struct rkisp1_cif_isp_window window = params->meas.hst_config.meas_window;
Size windowSize = { window.h_size, window.v_size };
params->meas.hst_config.histogram_predivider =
computeHistogramPredivider(windowSize,
static_cast<rkisp1_cif_isp_histogram_mode>(params->meas.hst_config.mode));
/* Update the configuration for histogram. */
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_HST;
/* Enable the histogram measure unit. */
params->module_ens |= RKISP1_CIF_ISP_MODULE_HST;
params->module_en_update |= RKISP1_CIF_ISP_MODULE_HST;
}
void Agc::fillMetadata(IPAContext &context, IPAFrameContext &frameContext,
ControlList &metadata)
{
utils::Duration exposureTime = context.configuration.sensor.lineDuration
* frameContext.sensor.exposure;
metadata.set(controls::AnalogueGain, frameContext.sensor.gain);
metadata.set(controls::ExposureTime, exposureTime.get<std::micro>());
metadata.set(controls::AeEnable, frameContext.agc.autoEnabled);
/* \todo Use VBlank value calculated from each frame exposure. */
uint32_t vTotal = context.configuration.sensor.size.height
+ context.configuration.sensor.defVBlank;
utils::Duration frameDuration = context.configuration.sensor.lineDuration
* vTotal;
metadata.set(controls::FrameDuration, frameDuration.get<std::micro>());
metadata.set(controls::AeMeteringMode, frameContext.agc.meteringMode);
metadata.set(controls::AeExposureMode, frameContext.agc.exposureMode);
metadata.set(controls::AeConstraintMode, frameContext.agc.constraintMode);
}
/**
* \brief Estimate the relative luminance of the frame with a given gain
* \param[in] gain The gain to apply to the frame
*
* This function estimates the average relative luminance of the frame that
* would be output by the sensor if an additional \a gain was applied.
*
* The estimation is based on the AE statistics for the current frame. Y
* averages for all cells are first multiplied by the gain, and then saturated
* to approximate the sensor behaviour at high brightness values. The
* approximation is quite rough, as it doesn't take into account non-linearities
* when approaching saturation. In this case, saturating after the conversion to
* YUV doesn't take into account the fact that the R, G and B components
* contribute differently to the relative luminance.
*
* The values are normalized to the [0.0, 1.0] range, where 1.0 corresponds to a
* theoretical perfect reflector of 100% reference white.
*
* More detailed information can be found in:
* https://en.wikipedia.org/wiki/Relative_luminance
*
* \return The relative luminance
*/
double Agc::estimateLuminance(double gain) const
{
double ySum = 0.0;
/* Sum the averages, saturated to 255. */
for (uint8_t expMean : expMeans_)
ySum += std::min(expMean * gain, 255.0);
/* \todo Weight with the AWB gains */
return ySum / expMeans_.size() / 255;
}
/**
* \brief Process RkISP1 statistics, and run AGC operations
* \param[in] context The shared IPA context
* \param[in] frame The frame context sequence number
* \param[in] frameContext The current frame context
* \param[in] stats The RKISP1 statistics and ISP results
* \param[out] metadata Metadata for the frame, to be filled by the algorithm
*
* Identify the current image brightness, and use that to estimate the optimal
* new exposure and gain for the scene.
*/
void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
IPAFrameContext &frameContext, const rkisp1_stat_buffer *stats,
ControlList &metadata)
{
if (!stats) {
fillMetadata(context, frameContext, metadata);
return;
}
/*
* \todo Verify that the exposure and gain applied by the sensor for
* this frame match what has been requested. This isn't a hard
* requirement for stability of the AGC (the guarantee we need in
* automatic mode is a perfect match between the frame and the values
* we receive), but is important in manual mode.
*/
const rkisp1_cif_isp_stat *params = &stats->params;
ASSERT(stats->meas_type & RKISP1_CIF_ISP_STAT_AUTOEXP);
/* The lower 4 bits are fractional and meant to be discarded. */
Histogram hist({ params->hist.hist_bins, context.hw->numHistogramBins },
[](uint32_t x) { return x >> 4; });
expMeans_ = { params->ae.exp_mean, context.hw->numAeCells };
utils::Duration maxShutterSpeed =
std::clamp(frameContext.agc.maxFrameDuration,
context.configuration.sensor.minShutterSpeed,
context.configuration.sensor.maxShutterSpeed);
setLimits(context.configuration.sensor.minShutterSpeed,
maxShutterSpeed,
context.configuration.sensor.minAnalogueGain,
context.configuration.sensor.maxAnalogueGain);
/*
* The Agc algorithm needs to know the effective exposure value that was
* applied to the sensor when the statistics were collected.
*/
utils::Duration exposureTime = context.configuration.sensor.lineDuration
* frameContext.sensor.exposure;
double analogueGain = frameContext.sensor.gain;
utils::Duration effectiveExposureValue = exposureTime * analogueGain;
utils::Duration shutterTime;
double aGain, dGain;
std::tie(shutterTime, aGain, dGain) =
calculateNewEv(frameContext.agc.constraintMode,
frameContext.agc.exposureMode,
hist, effectiveExposureValue);
LOG(RkISP1Agc, Debug)
<< "Divided up shutter, analogue gain and digital gain are "
<< shutterTime << ", " << aGain << " and " << dGain;
IPAActiveState &activeState = context.activeState;
/* Update the estimated exposure and gain. */
activeState.agc.automatic.exposure = shutterTime / context.configuration.sensor.lineDuration;
activeState.agc.automatic.gain = aGain;
fillMetadata(context, frameContext, metadata);
expMeans_ = {};
}
REGISTER_IPA_ALGORITHM(Agc, "Agc")
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/dpf.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Denoise Pre-Filter control
*/
#pragma once
#include <sys/types.h>
#include "algorithm.h"
namespace libcamera {
namespace ipa::rkisp1::algorithms {
class Dpf : public Algorithm
{
public:
Dpf();
~Dpf() = default;
int init(IPAContext &context, const YamlObject &tuningData) override;
void queueRequest(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params) override;
private:
struct rkisp1_cif_isp_dpf_config config_;
struct rkisp1_cif_isp_dpf_strength_config strengthConfig_;
};
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/dpf.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Denoise Pre-Filter control
*/
#include "dpf.h"
#include <cmath>
#include <libcamera/base/log.h>
#include <libcamera/control_ids.h>
#include "linux/rkisp1-config.h"
/**
* \file dpf.h
*/
namespace libcamera {
namespace ipa::rkisp1::algorithms {
/**
* \class Dpf
* \brief RkISP1 Denoise Pre-Filter control
*
* The denoise pre-filter algorithm is a bilateral filter which combines a
* range filter and a domain filter. The denoise pre-filter is applied before
* demosaicing.
*/
LOG_DEFINE_CATEGORY(RkISP1Dpf)
Dpf::Dpf()
: config_({}), strengthConfig_({})
{
}
/**
* \copydoc libcamera::ipa::Algorithm::init
*/
int Dpf::init([[maybe_unused]] IPAContext &context,
const YamlObject &tuningData)
{
std::vector<uint8_t> values;
/*
* The domain kernel is configured with a 9x9 kernel for the green
* pixels, and a 13x9 or 9x9 kernel for red and blue pixels.
*/
const YamlObject &dFObject = tuningData["DomainFilter"];
/*
* For the green component, we have the 9x9 kernel specified
* as 6 coefficients:
* Y
* ^
* 4 | 6 5 4 5 6
* 3 | 5 3 3 5
* 2 | 5 3 2 3 5
* 1 | 3 1 1 3
* 0 - 4 2 0 2 4
* -1 | 3 1 1 3
* -2 | 5 3 2 3 5
* -3 | 5 3 3 5
* -4 | 6 5 4 5 6
* +---------|--------> X
* -4....-1 0 1 2 3 4
*/
values = dFObject["g"].getList<uint8_t>().value_or(std::vector<uint8_t>{});
if (values.size() != RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS) {
LOG(RkISP1Dpf, Error)
<< "Invalid 'DomainFilter:g': expected "
<< RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS
<< " elements, got " << values.size();
return -EINVAL;
}
std::copy_n(values.begin(), values.size(),
std::begin(config_.g_flt.spatial_coeff));
config_.g_flt.gr_enable = true;
config_.g_flt.gb_enable = true;
/*
* For the red and blue components, we have the 13x9 kernel specified
* as 6 coefficients:
*
* Y
* ^
* 4 | 6 5 4 3 4 5 6
* |
* 2 | 5 4 2 1 2 4 5
* |
* 0 - 5 3 1 0 1 3 5
* |
* -2 | 5 4 2 1 2 4 5
* |
* -4 | 6 5 4 3 4 5 6
* +-------------|------------> X
* -6 -4 -2 0 2 4 6
*
* For a 9x9 kernel, columns -6 and 6 are dropped, so coefficient
* number 6 is not used.
*/
values = dFObject["rb"].getList<uint8_t>().value_or(std::vector<uint8_t>{});
if (values.size() != RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS &&
values.size() != RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS - 1) {
LOG(RkISP1Dpf, Error)
<< "Invalid 'DomainFilter:rb': expected "
<< RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS - 1
<< " or " << RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS
<< " elements, got " << values.size();
return -EINVAL;
}
config_.rb_flt.fltsize = values.size() == RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS
? RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9
: RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9;
std::copy_n(values.begin(), values.size(),
std::begin(config_.rb_flt.spatial_coeff));
config_.rb_flt.r_enable = true;
config_.rb_flt.b_enable = true;
/*
* The range kernel is configured with a noise level lookup table (NLL)
* which stores a piecewise linear function that characterizes the
* sensor noise profile as a noise level function curve (NLF).
*/
const YamlObject &rFObject = tuningData["NoiseLevelFunction"];
std::vector<uint16_t> nllValues;
nllValues = rFObject["coeff"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
if (nllValues.size() != RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS) {
LOG(RkISP1Dpf, Error)
<< "Invalid 'RangeFilter:coeff': expected "
<< RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS
<< " elements, got " << nllValues.size();
return -EINVAL;
}
std::copy_n(nllValues.begin(), nllValues.size(),
std::begin(config_.nll.coeff));
std::string scaleMode = rFObject["scale-mode"].get<std::string>("");
if (scaleMode == "linear") {
config_.nll.scale_mode = RKISP1_CIF_ISP_NLL_SCALE_LINEAR;
} else if (scaleMode == "logarithmic") {
config_.nll.scale_mode = RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC;
} else {
LOG(RkISP1Dpf, Error)
<< "Invalid 'RangeFilter:scale-mode': expected "
<< "'linear' or 'logarithmic' value, got "
<< scaleMode;
return -EINVAL;
}
const YamlObject &fSObject = tuningData["FilterStrength"];
strengthConfig_.r = fSObject["r"].get<uint16_t>(64);
strengthConfig_.g = fSObject["g"].get<uint16_t>(64);
strengthConfig_.b = fSObject["b"].get<uint16_t>(64);
return 0;
}
/**
* \copydoc libcamera::ipa::Algorithm::queueRequest
*/
void Dpf::queueRequest(IPAContext &context,
[[maybe_unused]] const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls)
{
auto &dpf = context.activeState.dpf;
bool update = false;
const auto &denoise = controls.get(controls::draft::NoiseReductionMode);
if (denoise) {
LOG(RkISP1Dpf, Debug) << "Set denoise to " << *denoise;
switch (*denoise) {
case controls::draft::NoiseReductionModeOff:
if (dpf.denoise) {
dpf.denoise = false;
update = true;
}
break;
case controls::draft::NoiseReductionModeMinimal:
case controls::draft::NoiseReductionModeHighQuality:
case controls::draft::NoiseReductionModeFast:
if (!dpf.denoise) {
dpf.denoise = true;
update = true;
}
break;
default:
LOG(RkISP1Dpf, Error)
<< "Unsupported denoise value "
<< *denoise;
break;
}
}
frameContext.dpf.denoise = dpf.denoise;
frameContext.dpf.update = update;
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void Dpf::prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext, rkisp1_params_cfg *params)
{
if (frame == 0) {
params->others.dpf_config = config_;
params->others.dpf_strength_config = strengthConfig_;
const auto &awb = context.configuration.awb;
const auto &lsc = context.configuration.lsc;
auto &mode = params->others.dpf_config.gain.mode;
/*
* The DPF needs to take into account the total amount of
* digital gain, which comes from the AWB and LSC modules. The
* DPF hardware can be programmed with a digital gain value
* manually, but can also use the gains supplied by the AWB and
* LSC modules automatically when they are enabled. Use that
* mode of operation as it simplifies control of the DPF.
*/
if (awb.enabled && lsc.enabled)
mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS;
else if (awb.enabled)
mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS;
else if (lsc.enabled)
mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS;
else
mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED;
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_DPF |
RKISP1_CIF_ISP_MODULE_DPF_STRENGTH;
}
if (frameContext.dpf.update) {
params->module_en_update |= RKISP1_CIF_ISP_MODULE_DPF;
if (frameContext.dpf.denoise)
params->module_ens |= RKISP1_CIF_ISP_MODULE_DPF;
}
}
REGISTER_IPA_ALGORITHM(Dpf, "Dpf")
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/cproc.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Color Processing control
*/
#include "cproc.h"
#include <algorithm>
#include <cmath>
#include <libcamera/base/log.h>
#include <libcamera/control_ids.h>
/**
* \file cproc.h
*/
namespace libcamera {
namespace ipa::rkisp1::algorithms {
/**
* \class ColorProcessing
* \brief RkISP1 Color Processing control
*
* The ColorProcessing algorithm is responsible for applying brightness,
* contrast and saturation corrections. The values are directly provided
* through requests by the corresponding controls.
*/
LOG_DEFINE_CATEGORY(RkISP1CProc)
namespace {
constexpr float kDefaultBrightness = 0.0f;
constexpr float kDefaultContrast = 1.0f;
constexpr float kDefaultSaturation = 1.0f;
int convertBrightness(const float v)
{
return std::clamp<int>(std::lround(v * 128), -128, 127);
}
int convertContrastOrSaturation(const float v)
{
return std::clamp<int>(std::lround(v * 128), 0, 255);
}
} /* namespace */
/**
* \copydoc libcamera::ipa::Algorithm::init
*/
int ColorProcessing::init(IPAContext &context,
[[maybe_unused]] const YamlObject &tuningData)
{
auto &cmap = context.ctrlMap;
cmap[&controls::Brightness] = ControlInfo(-1.0f, 0.993f, kDefaultBrightness);
cmap[&controls::Contrast] = ControlInfo(0.0f, 1.993f, kDefaultContrast);
cmap[&controls::Saturation] = ControlInfo(0.0f, 1.993f, kDefaultSaturation);
return 0;
}
/**
* \copydoc libcamera::ipa::Algorithm::configure
*/
int ColorProcessing::configure(IPAContext &context,
[[maybe_unused]] const IPACameraSensorInfo &configInfo)
{
auto &cproc = context.activeState.cproc;
cproc.brightness = convertBrightness(kDefaultBrightness);
cproc.contrast = convertContrastOrSaturation(kDefaultContrast);
cproc.saturation = convertContrastOrSaturation(kDefaultSaturation);
return 0;
}
/**
* \copydoc libcamera::ipa::Algorithm::queueRequest
*/
void ColorProcessing::queueRequest(IPAContext &context,
const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls)
{
auto &cproc = context.activeState.cproc;
bool update = false;
if (frame == 0)
update = true;
const auto &brightness = controls.get(controls::Brightness);
if (brightness) {
int value = convertBrightness(*brightness);
if (cproc.brightness != value) {
cproc.brightness = value;
update = true;
}
LOG(RkISP1CProc, Debug) << "Set brightness to " << value;
}
const auto &contrast = controls.get(controls::Contrast);
if (contrast) {
int value = convertContrastOrSaturation(*contrast);
if (cproc.contrast != value) {
cproc.contrast = value;
update = true;
}
LOG(RkISP1CProc, Debug) << "Set contrast to " << value;
}
const auto saturation = controls.get(controls::Saturation);
if (saturation) {
int value = convertContrastOrSaturation(*saturation);
if (cproc.saturation != value) {
cproc.saturation = value;
update = true;
}
LOG(RkISP1CProc, Debug) << "Set saturation to " << value;
}
frameContext.cproc.brightness = cproc.brightness;
frameContext.cproc.contrast = cproc.contrast;
frameContext.cproc.saturation = cproc.saturation;
frameContext.cproc.update = update;
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void ColorProcessing::prepare([[maybe_unused]] IPAContext &context,
[[maybe_unused]] const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params)
{
/* Check if the algorithm configuration has been updated. */
if (!frameContext.cproc.update)
return;
params->others.cproc_config.brightness = frameContext.cproc.brightness;
params->others.cproc_config.contrast = frameContext.cproc.contrast;
params->others.cproc_config.sat = frameContext.cproc.saturation;
params->module_en_update |= RKISP1_CIF_ISP_MODULE_CPROC;
params->module_ens |= RKISP1_CIF_ISP_MODULE_CPROC;
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_CPROC;
}
REGISTER_IPA_ALGORITHM(ColorProcessing, "ColorProcessing")
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/lsc.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Lens Shading Correction control
*/
#include "lsc.h"
#include <algorithm>
#include <cmath>
#include <numeric>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include "libcamera/internal/yaml_parser.h"
#include "linux/rkisp1-config.h"
/**
* \file lsc.h
*/
namespace libcamera {
namespace ipa::rkisp1::algorithms {
/**
* \class LensShadingCorrection
* \brief RkISP1 Lens Shading Correction control
*
* Due to the optical characteristics of the lens, the light intensity received
* by the sensor is not uniform.
*
* The Lens Shading Correction algorithm applies multipliers to all pixels
* to compensate for the lens shading effect. The coefficients are
* specified in a downscaled table in the YAML tuning file.
*/
LOG_DEFINE_CATEGORY(RkISP1Lsc)
static std::vector<double> parseSizes(const YamlObject &tuningData,
const char *prop)
{
std::vector<double> sizes =
tuningData[prop].getList<double>().value_or(std::vector<double>{});
if (sizes.size() != RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE) {
LOG(RkISP1Lsc, Error)
<< "Invalid '" << prop << "' values: expected "
<< RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE
<< " elements, got " << sizes.size();
return {};
}
/*
* The sum of all elements must be 0.5 to satisfy hardware constraints.
* Validate it here, allowing a 1% tolerance as rounding errors may
* prevent an exact match (further adjustments will be performed in
* LensShadingCorrection::prepare()).
*/
double sum = std::accumulate(sizes.begin(), sizes.end(), 0.0);
if (sum < 0.495 || sum > 0.505) {
LOG(RkISP1Lsc, Error)
<< "Invalid '" << prop << "' values: sum of the elements"
<< " should be 0.5, got " << sum;
return {};
}
return sizes;
}
static std::vector<uint16_t> parseTable(const YamlObject &tuningData,
const char *prop)
{
static constexpr unsigned int kLscNumSamples =
RKISP1_CIF_ISP_LSC_SAMPLES_MAX * RKISP1_CIF_ISP_LSC_SAMPLES_MAX;
std::vector<uint16_t> table =
tuningData[prop].getList<uint16_t>().value_or(std::vector<uint16_t>{});
if (table.size() != kLscNumSamples) {
LOG(RkISP1Lsc, Error)
<< "Invalid '" << prop << "' values: expected "
<< kLscNumSamples
<< " elements, got " << table.size();
return {};
}
return table;
}
LensShadingCorrection::LensShadingCorrection()
: lastCt_({ 0, 0 })
{
}
/**
* \copydoc libcamera::ipa::Algorithm::init
*/
int LensShadingCorrection::init([[maybe_unused]] IPAContext &context,
const YamlObject &tuningData)
{
xSize_ = parseSizes(tuningData, "x-size");
ySize_ = parseSizes(tuningData, "y-size");
if (xSize_.empty() || ySize_.empty())
return -EINVAL;
/* Get all defined sets to apply. */
const YamlObject &yamlSets = tuningData["sets"];
if (!yamlSets.isList()) {
LOG(RkISP1Lsc, Error)
<< "'sets' parameter not found in tuning file";
return -EINVAL;
}
const auto &sets = yamlSets.asList();
for (const auto &yamlSet : sets) {
uint32_t ct = yamlSet["ct"].get<uint32_t>(0);
if (sets_.count(ct)) {
LOG(RkISP1Lsc, Error)
<< "Multiple sets found for color temperature "
<< ct;
return -EINVAL;
}
Components &set = sets_[ct];
set.ct = ct;
set.r = parseTable(yamlSet, "r");
set.gr = parseTable(yamlSet, "gr");
set.gb = parseTable(yamlSet, "gb");
set.b = parseTable(yamlSet, "b");
if (set.r.empty() || set.gr.empty() ||
set.gb.empty() || set.b.empty()) {
LOG(RkISP1Lsc, Error)
<< "Set for color temperature " << ct
<< " is missing tables";
return -EINVAL;
}
}
if (sets_.empty()) {
LOG(RkISP1Lsc, Error) << "Failed to load any sets";
return -EINVAL;
}
return 0;
}
/**
* \copydoc libcamera::ipa::Algorithm::configure
*/
int LensShadingCorrection::configure(IPAContext &context,
[[maybe_unused]] const IPACameraSensorInfo &configInfo)
{
const Size &size = context.configuration.sensor.size;
Size totalSize{};
for (unsigned int i = 0; i < RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE; ++i) {
xSizes_[i] = xSize_[i] * size.width;
ySizes_[i] = ySize_[i] * size.height;
/*
* To prevent unexpected behavior of the ISP, the sum of x_size_tbl and
* y_size_tbl items shall be equal to respectively size.width/2 and
* size.height/2. Enforce it by computing the last tables value to avoid
* rounding-induced errors.
*/
if (i == RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE - 1) {
xSizes_[i] = size.width / 2 - totalSize.width;
ySizes_[i] = size.height / 2 - totalSize.height;
}
totalSize.width += xSizes_[i];
totalSize.height += ySizes_[i];
xGrad_[i] = std::round(32768 / xSizes_[i]);
yGrad_[i] = std::round(32768 / ySizes_[i]);
}
context.configuration.lsc.enabled = true;
return 0;
}
void LensShadingCorrection::setParameters(rkisp1_params_cfg *params)
{
struct rkisp1_cif_isp_lsc_config &config = params->others.lsc_config;
memcpy(config.x_grad_tbl, xGrad_, sizeof(config.x_grad_tbl));
memcpy(config.y_grad_tbl, yGrad_, sizeof(config.y_grad_tbl));
memcpy(config.x_size_tbl, xSizes_, sizeof(config.x_size_tbl));
memcpy(config.y_size_tbl, ySizes_, sizeof(config.y_size_tbl));
params->module_en_update |= RKISP1_CIF_ISP_MODULE_LSC;
params->module_ens |= RKISP1_CIF_ISP_MODULE_LSC;
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_LSC;
}
void LensShadingCorrection::copyTable(rkisp1_cif_isp_lsc_config &config,
const Components &set)
{
std::copy(set.r.begin(), set.r.end(), &config.r_data_tbl[0][0]);
std::copy(set.gr.begin(), set.gr.end(), &config.gr_data_tbl[0][0]);
std::copy(set.gb.begin(), set.gb.end(), &config.gb_data_tbl[0][0]);
std::copy(set.b.begin(), set.b.end(), &config.b_data_tbl[0][0]);
}
/*
* Interpolate LSC parameters based on color temperature value.
*/
void LensShadingCorrection::interpolateTable(rkisp1_cif_isp_lsc_config &config,
const Components &set0,
const Components &set1,
const uint32_t ct)
{
double coeff0 = (set1.ct - ct) / static_cast<double>(set1.ct - set0.ct);
double coeff1 = (ct - set0.ct) / static_cast<double>(set1.ct - set0.ct);
for (unsigned int i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; ++i) {
for (unsigned int j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; ++j) {
unsigned int sample = i * RKISP1_CIF_ISP_LSC_SAMPLES_MAX + j;
config.r_data_tbl[i][j] =
set0.r[sample] * coeff0 +
set1.r[sample] * coeff1;
config.gr_data_tbl[i][j] =
set0.gr[sample] * coeff0 +
set1.gr[sample] * coeff1;
config.gb_data_tbl[i][j] =
set0.gb[sample] * coeff0 +
set1.gb[sample] * coeff1;
config.b_data_tbl[i][j] =
set0.b[sample] * coeff0 +
set1.b[sample] * coeff1;
}
}
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void LensShadingCorrection::prepare(IPAContext &context,
const uint32_t frame,
[[maybe_unused]] IPAFrameContext &frameContext,
rkisp1_params_cfg *params)
{
struct rkisp1_cif_isp_lsc_config &config = params->others.lsc_config;
/*
* If there is only one set, the configuration has already been done
* for first frame.
*/
if (sets_.size() == 1 && frame > 0)
return;
/*
* If there is only one set, pick it. We can ignore lastCt_, as it will
* never be relevant.
*/
if (sets_.size() == 1) {
setParameters(params);
copyTable(config, sets_.cbegin()->second);
return;
}
uint32_t ct = context.activeState.awb.temperatureK;
ct = std::clamp(ct, sets_.cbegin()->first, sets_.crbegin()->first);
/*
* If the original is the same, then it means the same adjustment would
* be made. If the adjusted is the same, then it means that it's the
* same as what was actually applied. Thus in these cases we can skip
* reprogramming the LSC.
*
* original == adjusted can only happen if an interpolation
* happened, or if original has an exact entry in sets_. This means
* that if original != adjusted, then original was adjusted to
* the nearest available entry in sets_, resulting in adjusted.
* Clearly, any ct value that is in between original and adjusted
* will be adjusted to the same adjusted value, so we can skip
* reprogramming the LSC table.
*
* We also skip updating the original value, as the last one had a
* larger bound and thus a larger range of ct values that will be
* adjusted to the same adjusted.
*/
if ((lastCt_.original <= ct && ct <= lastCt_.adjusted) ||
(lastCt_.adjusted <= ct && ct <= lastCt_.original))
return;
setParameters(params);
/*
* The color temperature matches exactly one of the available LSC tables.
*/
if (sets_.count(ct)) {
copyTable(config, sets_[ct]);
lastCt_ = { ct, ct };
return;
}
/* No shortcuts left; we need to round or interpolate */
auto iter = sets_.upper_bound(ct);
const Components &set1 = iter->second;
const Components &set0 = (--iter)->second;
uint32_t ct0 = set0.ct;
uint32_t ct1 = set1.ct;
uint32_t diff0 = ct - ct0;
uint32_t diff1 = ct1 - ct;
static constexpr double kThreshold = 0.1;
float threshold = kThreshold * (ct1 - ct0);
if (diff0 < threshold || diff1 < threshold) {
const Components &set = diff0 < diff1 ? set0 : set1;
LOG(RkISP1Lsc, Debug) << "using LSC table for " << set.ct;
copyTable(config, set);
lastCt_ = { ct, set.ct };
return;
}
/*
* ct is not within 10% of the difference between the neighbouring
* color temperatures, so we need to interpolate.
*/
LOG(RkISP1Lsc, Debug)
<< "ct is " << ct << ", interpolating between "
<< ct0 << " and " << ct1;
interpolateTable(config, set0, set1, ct);
lastCt_ = { ct, ct };
}
REGISTER_IPA_ALGORITHM(LensShadingCorrection, "LensShadingCorrection")
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/filter.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Filter control
*/
#pragma once
#include <sys/types.h>
#include "algorithm.h"
namespace libcamera {
namespace ipa::rkisp1::algorithms {
class Filter : public Algorithm
{
public:
Filter() = default;
~Filter() = default;
void queueRequest(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params) override;
};
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/filter.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Filter control
*/
#include "filter.h"
#include <cmath>
#include <libcamera/base/log.h>
#include <libcamera/control_ids.h>
/**
* \file filter.h
*/
namespace libcamera {
namespace ipa::rkisp1::algorithms {
/**
* \class Filter
* \brief RkISP1 Filter control
*
* Denoise and Sharpness filters will be applied by RkISP1 during the
* demosaicing step. The denoise filter is responsible for removing noise from
* the image, while the sharpness filter will enhance its acutance.
*
* \todo In current version the denoise and sharpness control is based on user
* controls. In a future version it should be controlled automatically by the
* algorithm.
*/
LOG_DEFINE_CATEGORY(RkISP1Filter)
static constexpr uint32_t kFiltLumWeightDefault = 0x00022040;
static constexpr uint32_t kFiltModeDefault = 0x000004f2;
/**
* \copydoc libcamera::ipa::Algorithm::queueRequest
*/
void Filter::queueRequest(IPAContext &context,
[[maybe_unused]] const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls)
{
auto &filter = context.activeState.filter;
bool update = false;
const auto &sharpness = controls.get(controls::Sharpness);
if (sharpness) {
unsigned int value = std::round(std::clamp(*sharpness, 0.0f, 10.0f));
if (filter.sharpness != value) {
filter.sharpness = value;
update = true;
}
LOG(RkISP1Filter, Debug) << "Set sharpness to " << *sharpness;
}
const auto &denoise = controls.get(controls::draft::NoiseReductionMode);
if (denoise) {
LOG(RkISP1Filter, Debug) << "Set denoise to " << *denoise;
switch (*denoise) {
case controls::draft::NoiseReductionModeOff:
if (filter.denoise != 0) {
filter.denoise = 0;
update = true;
}
break;
case controls::draft::NoiseReductionModeMinimal:
if (filter.denoise != 1) {
filter.denoise = 1;
update = true;
}
break;
case controls::draft::NoiseReductionModeHighQuality:
case controls::draft::NoiseReductionModeFast:
if (filter.denoise != 3) {
filter.denoise = 3;
update = true;
}
break;
default:
LOG(RkISP1Filter, Error)
<< "Unsupported denoise value "
<< *denoise;
break;
}
}
frameContext.filter.denoise = filter.denoise;
frameContext.filter.sharpness = filter.sharpness;
frameContext.filter.update = update;
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void Filter::prepare([[maybe_unused]] IPAContext &context,
[[maybe_unused]] const uint32_t frame,
IPAFrameContext &frameContext, rkisp1_params_cfg *params)
{
/* Check if the algorithm configuration has been updated. */
if (!frameContext.filter.update)
return;
static constexpr uint16_t filt_fac_sh0[] = {
0x04, 0x07, 0x0a, 0x0c, 0x10, 0x14, 0x1a, 0x1e, 0x24, 0x2a, 0x30
};
static constexpr uint16_t filt_fac_sh1[] = {
0x04, 0x08, 0x0c, 0x10, 0x16, 0x1b, 0x20, 0x26, 0x2c, 0x30, 0x3f
};
static constexpr uint16_t filt_fac_mid[] = {
0x04, 0x06, 0x08, 0x0a, 0x0c, 0x10, 0x13, 0x17, 0x1d, 0x22, 0x28
};
static constexpr uint16_t filt_fac_bl0[] = {
0x02, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x10, 0x15, 0x1a, 0x24
};
static constexpr uint16_t filt_fac_bl1[] = {
0x00, 0x00, 0x00, 0x02, 0x04, 0x04, 0x06, 0x08, 0x0d, 0x14, 0x20
};
static constexpr uint16_t filt_thresh_sh0[] = {
0, 18, 26, 36, 41, 75, 90, 120, 170, 250, 1023
};
static constexpr uint16_t filt_thresh_sh1[] = {
0, 33, 44, 51, 67, 100, 120, 150, 200, 300, 1023
};
static constexpr uint16_t filt_thresh_bl0[] = {
0, 8, 13, 23, 26, 50, 60, 80, 140, 180, 1023
};
static constexpr uint16_t filt_thresh_bl1[] = {
0, 2, 5, 10, 15, 20, 26, 51, 100, 150, 1023
};
static constexpr uint16_t stage1_select[] = {
6, 6, 4, 4, 3, 3, 2, 2, 2, 1, 0
};
static constexpr uint16_t filt_chr_v_mode[] = {
1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
};
static constexpr uint16_t filt_chr_h_mode[] = {
0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
};
uint8_t denoise = frameContext.filter.denoise;
uint8_t sharpness = frameContext.filter.sharpness;
auto &flt_config = params->others.flt_config;
flt_config.fac_sh0 = filt_fac_sh0[sharpness];
flt_config.fac_sh1 = filt_fac_sh1[sharpness];
flt_config.fac_mid = filt_fac_mid[sharpness];
flt_config.fac_bl0 = filt_fac_bl0[sharpness];
flt_config.fac_bl1 = filt_fac_bl1[sharpness];
flt_config.lum_weight = kFiltLumWeightDefault;
flt_config.mode = kFiltModeDefault;
flt_config.thresh_sh0 = filt_thresh_sh0[denoise];
flt_config.thresh_sh1 = filt_thresh_sh1[denoise];
flt_config.thresh_bl0 = filt_thresh_bl0[denoise];
flt_config.thresh_bl1 = filt_thresh_bl1[denoise];
flt_config.grn_stage1 = stage1_select[denoise];
flt_config.chr_v_mode = filt_chr_v_mode[denoise];
flt_config.chr_h_mode = filt_chr_h_mode[denoise];
/*
* Combined high denoising and high sharpening requires some
* adjustments to the configuration of the filters. A first stage
* filter with a lower strength must be selected, and the blur factors
* must be decreased.
*/
if (denoise == 9) {
if (sharpness > 3)
flt_config.grn_stage1 = 2;
} else if (denoise == 10) {
if (sharpness > 5)
flt_config.grn_stage1 = 2;
else if (sharpness > 3)
flt_config.grn_stage1 = 1;
}
if (denoise > 7) {
if (sharpness > 7) {
flt_config.fac_bl0 /= 2;
flt_config.fac_bl1 /= 4;
} else if (sharpness > 4) {
flt_config.fac_bl0 = flt_config.fac_bl0 * 3 / 4;
flt_config.fac_bl1 /= 2;
}
}
params->module_en_update |= RKISP1_CIF_ISP_MODULE_FLT;
params->module_ens |= RKISP1_CIF_ISP_MODULE_FLT;
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_FLT;
}
REGISTER_IPA_ALGORITHM(Filter, "Filter")
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/rkisp1 | repos/libcamera/src/ipa/rkisp1/algorithms/dpcc.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021-2022, Ideas On Board
*
* RkISP1 Defect Pixel Cluster Correction control
*/
#include "dpcc.h"
#include <libcamera/base/log.h>
#include "libcamera/internal/yaml_parser.h"
#include "linux/rkisp1-config.h"
/**
* \file dpcc.h
*/
namespace libcamera {
namespace ipa::rkisp1::algorithms {
/**
* \class DefectPixelClusterCorrection
* \brief RkISP1 Defect Pixel Cluster Correction control
*
* Depending of the sensor quality, some pixels can be defective and then
* appear significantly brighter or darker than the other pixels.
*
* The Defect Pixel Cluster Correction algorithms is responsible to minimize
* the impact of the pixels. This can be done with algorithms applied at run
* time (on-the-fly method) or with a table of defective pixels. Only the first
* method is supported for the moment.
*/
LOG_DEFINE_CATEGORY(RkISP1Dpcc)
DefectPixelClusterCorrection::DefectPixelClusterCorrection()
: config_({})
{
}
/**
* \copydoc libcamera::ipa::Algorithm::init
*/
int DefectPixelClusterCorrection::init([[maybe_unused]] IPAContext &context,
const YamlObject &tuningData)
{
config_.mode = RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE;
config_.output_mode = RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER
| RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_RB_CENTER;
config_.set_use = tuningData["fixed-set"].get<bool>(false)
? RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_FIX_SET : 0;
/* Get all defined sets to apply (up to 3). */
const YamlObject &setsObject = tuningData["sets"];
if (!setsObject.isList()) {
LOG(RkISP1Dpcc, Error)
<< "'sets' parameter not found in tuning file";
return -EINVAL;
}
if (setsObject.size() > RKISP1_CIF_ISP_DPCC_METHODS_MAX) {
LOG(RkISP1Dpcc, Error)
<< "'sets' size in tuning file (" << setsObject.size()
<< ") exceeds the maximum hardware capacity (3)";
return -EINVAL;
}
for (std::size_t i = 0; i < setsObject.size(); ++i) {
struct rkisp1_cif_isp_dpcc_methods_config &method = config_.methods[i];
const YamlObject &set = setsObject[i];
uint16_t value;
/* Enable set if described in YAML tuning file. */
config_.set_use |= 1 << i;
/* PG Method */
const YamlObject &pgObject = set["pg-factor"];
if (pgObject.contains("green")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_GREEN_ENABLE;
value = pgObject["green"].get<uint16_t>(0);
method.pg_fac |= RKISP1_CIF_ISP_DPCC_PG_FAC_G(value);
}
if (pgObject.contains("red-blue")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_RED_BLUE_ENABLE;
value = pgObject["red-blue"].get<uint16_t>(0);
method.pg_fac |= RKISP1_CIF_ISP_DPCC_PG_FAC_RB(value);
}
/* RO Method */
const YamlObject &roObject = set["ro-limits"];
if (roObject.contains("green")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_GREEN_ENABLE;
value = roObject["green"].get<uint16_t>(0);
config_.ro_limits |=
RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_G(i, value);
}
if (roObject.contains("red-blue")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_RED_BLUE_ENABLE;
value = roObject["red-blue"].get<uint16_t>(0);
config_.ro_limits |=
RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_RB(i, value);
}
/* RG Method */
const YamlObject &rgObject = set["rg-factor"];
method.rg_fac = 0;
if (rgObject.contains("green")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_GREEN_ENABLE;
value = rgObject["green"].get<uint16_t>(0);
method.rg_fac |= RKISP1_CIF_ISP_DPCC_RG_FAC_G(value);
}
if (rgObject.contains("red-blue")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_RED_BLUE_ENABLE;
value = rgObject["red-blue"].get<uint16_t>(0);
method.rg_fac |= RKISP1_CIF_ISP_DPCC_RG_FAC_RB(value);
}
/* RND Method */
const YamlObject &rndOffsetsObject = set["rnd-offsets"];
if (rndOffsetsObject.contains("green")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE;
value = rndOffsetsObject["green"].get<uint16_t>(0);
config_.rnd_offs |=
RKISP1_CIF_ISP_DPCC_RND_OFFS_n_G(i, value);
}
if (rndOffsetsObject.contains("red-blue")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE;
value = rndOffsetsObject["red-blue"].get<uint16_t>(0);
config_.rnd_offs |=
RKISP1_CIF_ISP_DPCC_RND_OFFS_n_RB(i, value);
}
const YamlObject &rndThresholdObject = set["rnd-threshold"];
method.rnd_thresh = 0;
if (rndThresholdObject.contains("green")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE;
value = rndThresholdObject["green"].get<uint16_t>(0);
method.rnd_thresh |=
RKISP1_CIF_ISP_DPCC_RND_THRESH_G(value);
}
if (rndThresholdObject.contains("red-blue")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE;
value = rndThresholdObject["red-blue"].get<uint16_t>(0);
method.rnd_thresh |=
RKISP1_CIF_ISP_DPCC_RND_THRESH_RB(value);
}
/* LC Method */
const YamlObject &lcThresholdObject = set["line-threshold"];
method.line_thresh = 0;
if (lcThresholdObject.contains("green")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE;
value = lcThresholdObject["green"].get<uint16_t>(0);
method.line_thresh |=
RKISP1_CIF_ISP_DPCC_LINE_THRESH_G(value);
}
if (lcThresholdObject.contains("red-blue")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE;
value = lcThresholdObject["red-blue"].get<uint16_t>(0);
method.line_thresh |=
RKISP1_CIF_ISP_DPCC_LINE_THRESH_RB(value);
}
const YamlObject &lcTMadFactorObject = set["line-mad-factor"];
method.line_mad_fac = 0;
if (lcTMadFactorObject.contains("green")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE;
value = lcTMadFactorObject["green"].get<uint16_t>(0);
method.line_mad_fac |=
RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_G(value);
}
if (lcTMadFactorObject.contains("red-blue")) {
method.method |=
RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE;
value = lcTMadFactorObject["red-blue"].get<uint16_t>(0);
method.line_mad_fac |=
RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RB(value);
}
}
return 0;
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void DefectPixelClusterCorrection::prepare([[maybe_unused]] IPAContext &context,
const uint32_t frame,
[[maybe_unused]] IPAFrameContext &frameContext,
rkisp1_params_cfg *params)
{
if (frame > 0)
return;
params->others.dpcc_config = config_;
params->module_en_update |= RKISP1_CIF_ISP_MODULE_DPCC;
params->module_ens |= RKISP1_CIF_ISP_MODULE_DPCC;
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_DPCC;
}
REGISTER_IPA_ALGORITHM(DefectPixelClusterCorrection, "DefectPixelClusterCorrection")
} /* namespace ipa::rkisp1::algorithms */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa | repos/libcamera/src/ipa/simple/black_level.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Red Hat Inc.
*
* black level handling
*/
#pragma once
#include <array>
#include <stdint.h>
#include "libcamera/internal/software_isp/swisp_stats.h"
namespace libcamera {
class BlackLevel
{
public:
BlackLevel();
uint8_t get() const;
void update(SwIspStats::Histogram &yHistogram);
private:
uint8_t blackLevel_;
bool blackLevelSet_;
};
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa | repos/libcamera/src/ipa/simple/soft_simple.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2023, Linaro Ltd
*
* Simple Software Image Processing Algorithm module
*/
#include <cmath>
#include <numeric>
#include <stdint.h>
#include <sys/mman.h>
#include <linux/v4l2-controls.h>
#include <libcamera/base/file.h>
#include <libcamera/base/log.h>
#include <libcamera/base/shared_fd.h>
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
#include <libcamera/ipa/ipa_interface.h>
#include <libcamera/ipa/ipa_module_info.h>
#include <libcamera/ipa/soft_ipa_interface.h>
#include "libcamera/internal/software_isp/debayer_params.h"
#include "libcamera/internal/software_isp/swisp_stats.h"
#include "libcamera/internal/yaml_parser.h"
#include "libipa/camera_sensor_helper.h"
#include "black_level.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(IPASoft)
namespace ipa::soft {
/*
* The number of bins to use for the optimal exposure calculations.
*/
static constexpr unsigned int kExposureBinsCount = 5;
/*
* The exposure is optimal when the mean sample value of the histogram is
* in the middle of the range.
*/
static constexpr float kExposureOptimal = kExposureBinsCount / 2.0;
/*
* The below value implements the hysteresis for the exposure adjustment.
* It is small enough to have the exposure close to the optimal, and is big
* enough to prevent the exposure from wobbling around the optimal value.
*/
static constexpr float kExposureSatisfactory = 0.2;
class IPASoftSimple : public ipa::soft::IPASoftInterface
{
public:
IPASoftSimple()
: params_(nullptr), stats_(nullptr), blackLevel_(BlackLevel()),
ignoreUpdates_(0)
{
}
~IPASoftSimple();
int init(const IPASettings &settings,
const SharedFD &fdStats,
const SharedFD &fdParams,
const ControlInfoMap &sensorInfoMap) override;
int configure(const ControlInfoMap &sensorInfoMap) override;
int start() override;
void stop() override;
void processStats(const ControlList &sensorControls) override;
private:
void updateExposure(double exposureMSV);
DebayerParams *params_;
SwIspStats *stats_;
std::unique_ptr<CameraSensorHelper> camHelper_;
ControlInfoMap sensorInfoMap_;
BlackLevel blackLevel_;
static constexpr unsigned int kGammaLookupSize = 1024;
std::array<uint8_t, kGammaLookupSize> gammaTable_;
int lastBlackLevel_ = -1;
int32_t exposureMin_, exposureMax_;
int32_t exposure_;
double againMin_, againMax_, againMinStep_;
double again_;
unsigned int ignoreUpdates_;
};
IPASoftSimple::~IPASoftSimple()
{
if (stats_)
munmap(stats_, sizeof(SwIspStats));
if (params_)
munmap(params_, sizeof(DebayerParams));
}
int IPASoftSimple::init(const IPASettings &settings,
const SharedFD &fdStats,
const SharedFD &fdParams,
const ControlInfoMap &sensorInfoMap)
{
camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel);
if (!camHelper_) {
LOG(IPASoft, Warning)
<< "Failed to create camera sensor helper for "
<< settings.sensorModel;
}
/* Load the tuning data file */
File file(settings.configurationFile);
if (!file.open(File::OpenModeFlag::ReadOnly)) {
int ret = file.error();
LOG(IPASoft, Error)
<< "Failed to open configuration file "
<< settings.configurationFile << ": " << strerror(-ret);
return ret;
}
std::unique_ptr<libcamera::YamlObject> data = YamlParser::parse(file);
if (!data)
return -EINVAL;
/* \todo Use the IPA configuration file for real. */
unsigned int version = (*data)["version"].get<uint32_t>(0);
LOG(IPASoft, Debug) << "Tuning file version " << version;
params_ = nullptr;
stats_ = nullptr;
if (!fdStats.isValid()) {
LOG(IPASoft, Error) << "Invalid Statistics handle";
return -ENODEV;
}
if (!fdParams.isValid()) {
LOG(IPASoft, Error) << "Invalid Parameters handle";
return -ENODEV;
}
{
void *mem = mmap(nullptr, sizeof(DebayerParams), PROT_WRITE,
MAP_SHARED, fdParams.get(), 0);
if (mem == MAP_FAILED) {
LOG(IPASoft, Error) << "Unable to map Parameters";
return -errno;
}
params_ = static_cast<DebayerParams *>(mem);
}
{
void *mem = mmap(nullptr, sizeof(SwIspStats), PROT_READ,
MAP_SHARED, fdStats.get(), 0);
if (mem == MAP_FAILED) {
LOG(IPASoft, Error) << "Unable to map Statistics";
return -errno;
}
stats_ = static_cast<SwIspStats *>(mem);
}
/*
* Check if the sensor driver supports the controls required by the
* Soft IPA.
* Don't save the min and max control values yet, as e.g. the limits
* for V4L2_CID_EXPOSURE depend on the configured sensor resolution.
*/
if (sensorInfoMap.find(V4L2_CID_EXPOSURE) == sensorInfoMap.end()) {
LOG(IPASoft, Error) << "Don't have exposure control";
return -EINVAL;
}
if (sensorInfoMap.find(V4L2_CID_ANALOGUE_GAIN) == sensorInfoMap.end()) {
LOG(IPASoft, Error) << "Don't have gain control";
return -EINVAL;
}
return 0;
}
int IPASoftSimple::configure(const ControlInfoMap &sensorInfoMap)
{
sensorInfoMap_ = sensorInfoMap;
const ControlInfo &exposureInfo = sensorInfoMap_.find(V4L2_CID_EXPOSURE)->second;
const ControlInfo &gainInfo = sensorInfoMap_.find(V4L2_CID_ANALOGUE_GAIN)->second;
exposureMin_ = exposureInfo.min().get<int32_t>();
exposureMax_ = exposureInfo.max().get<int32_t>();
if (!exposureMin_) {
LOG(IPASoft, Warning) << "Minimum exposure is zero, that can't be linear";
exposureMin_ = 1;
}
int32_t againMin = gainInfo.min().get<int32_t>();
int32_t againMax = gainInfo.max().get<int32_t>();
if (camHelper_) {
againMin_ = camHelper_->gain(againMin);
againMax_ = camHelper_->gain(againMax);
againMinStep_ = (againMax_ - againMin_) / 100.0;
} else {
/*
* The camera sensor gain (g) is usually not equal to the value written
* into the gain register (x). But the way how the AGC algorithm changes
* the gain value to make the total exposure closer to the optimum
* assumes that g(x) is not too far from linear function. If the minimal
* gain is 0, the g(x) is likely to be far from the linear, like
* g(x) = a / (b * x + c). To avoid unexpected changes to the gain by
* the AGC algorithm (abrupt near one edge, and very small near the
* other) we limit the range of the gain values used.
*/
againMax_ = againMax;
if (!againMin) {
LOG(IPASoft, Warning)
<< "Minimum gain is zero, that can't be linear";
againMin_ = std::min(100, againMin / 2 + againMax / 2);
}
againMinStep_ = 1.0;
}
LOG(IPASoft, Info) << "Exposure " << exposureMin_ << "-" << exposureMax_
<< ", gain " << againMin_ << "-" << againMax_
<< " (" << againMinStep_ << ")";
return 0;
}
int IPASoftSimple::start()
{
return 0;
}
void IPASoftSimple::stop()
{
}
void IPASoftSimple::processStats(const ControlList &sensorControls)
{
SwIspStats::Histogram histogram = stats_->yHistogram;
if (ignoreUpdates_ > 0)
blackLevel_.update(histogram);
const uint8_t blackLevel = blackLevel_.get();
/*
* Black level must be subtracted to get the correct AWB ratios, they
* would be off if they were computed from the whole brightness range
* rather than from the sensor range.
*/
const uint64_t nPixels = std::accumulate(
histogram.begin(), histogram.end(), 0);
const uint64_t offset = blackLevel * nPixels;
const uint64_t sumR = stats_->sumR_ - offset / 4;
const uint64_t sumG = stats_->sumG_ - offset / 2;
const uint64_t sumB = stats_->sumB_ - offset / 4;
/*
* Calculate red and blue gains for AWB.
* Clamp max gain at 4.0, this also avoids 0 division.
* Gain: 128 = 0.5, 256 = 1.0, 512 = 2.0, etc.
*/
const unsigned int gainR = sumR <= sumG / 4 ? 1024 : 256 * sumG / sumR;
const unsigned int gainB = sumB <= sumG / 4 ? 1024 : 256 * sumG / sumB;
/* Green gain and gamma values are fixed */
constexpr unsigned int gainG = 256;
/* Update the gamma table if needed */
if (blackLevel != lastBlackLevel_) {
constexpr float gamma = 0.5;
const unsigned int blackIndex = blackLevel * kGammaLookupSize / 256;
std::fill(gammaTable_.begin(), gammaTable_.begin() + blackIndex, 0);
const float divisor = kGammaLookupSize - blackIndex - 1.0;
for (unsigned int i = blackIndex; i < kGammaLookupSize; i++)
gammaTable_[i] = UINT8_MAX *
std::pow((i - blackIndex) / divisor, gamma);
lastBlackLevel_ = blackLevel;
}
for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) {
constexpr unsigned int div =
DebayerParams::kRGBLookupSize * 256 / kGammaLookupSize;
unsigned int idx;
/* Apply gamma after gain! */
idx = std::min({ i * gainR / div, (kGammaLookupSize - 1) });
params_->red[i] = gammaTable_[idx];
idx = std::min({ i * gainG / div, (kGammaLookupSize - 1) });
params_->green[i] = gammaTable_[idx];
idx = std::min({ i * gainB / div, (kGammaLookupSize - 1) });
params_->blue[i] = gammaTable_[idx];
}
setIspParams.emit();
/* \todo Switch to the libipa/algorithm.h API someday. */
/*
* AE / AGC, use 2 frames delay to make sure that the exposure and
* the gain set have applied to the camera sensor.
* \todo This could be handled better with DelayedControls.
*/
if (ignoreUpdates_ > 0) {
--ignoreUpdates_;
return;
}
/*
* Calculate Mean Sample Value (MSV) according to formula from:
* https://www.araa.asn.au/acra/acra2007/papers/paper84final.pdf
*/
const unsigned int blackLevelHistIdx =
blackLevel / (256 / SwIspStats::kYHistogramSize);
const unsigned int histogramSize =
SwIspStats::kYHistogramSize - blackLevelHistIdx;
const unsigned int yHistValsPerBin = histogramSize / kExposureBinsCount;
const unsigned int yHistValsPerBinMod =
histogramSize / (histogramSize % kExposureBinsCount + 1);
int exposureBins[kExposureBinsCount] = {};
unsigned int denom = 0;
unsigned int num = 0;
for (unsigned int i = 0; i < histogramSize; i++) {
unsigned int idx = (i - (i / yHistValsPerBinMod)) / yHistValsPerBin;
exposureBins[idx] += stats_->yHistogram[blackLevelHistIdx + i];
}
for (unsigned int i = 0; i < kExposureBinsCount; i++) {
LOG(IPASoft, Debug) << i << ": " << exposureBins[i];
denom += exposureBins[i];
num += exposureBins[i] * (i + 1);
}
float exposureMSV = static_cast<float>(num) / denom;
/* Sanity check */
if (!sensorControls.contains(V4L2_CID_EXPOSURE) ||
!sensorControls.contains(V4L2_CID_ANALOGUE_GAIN)) {
LOG(IPASoft, Error) << "Control(s) missing";
return;
}
exposure_ = sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
int32_t again = sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>();
again_ = camHelper_ ? camHelper_->gain(again) : again;
updateExposure(exposureMSV);
ControlList ctrls(sensorInfoMap_);
ctrls.set(V4L2_CID_EXPOSURE, exposure_);
ctrls.set(V4L2_CID_ANALOGUE_GAIN,
static_cast<int32_t>(camHelper_ ? camHelper_->gainCode(again_) : again_));
ignoreUpdates_ = 2;
setSensorControls.emit(ctrls);
LOG(IPASoft, Debug) << "exposureMSV " << exposureMSV
<< " exp " << exposure_ << " again " << again_
<< " gain R/B " << gainR << "/" << gainB
<< " black level " << static_cast<unsigned int>(blackLevel);
}
void IPASoftSimple::updateExposure(double exposureMSV)
{
/*
* kExpDenominator of 10 gives ~10% increment/decrement;
* kExpDenominator of 5 - about ~20%
*/
static constexpr uint8_t kExpDenominator = 10;
static constexpr uint8_t kExpNumeratorUp = kExpDenominator + 1;
static constexpr uint8_t kExpNumeratorDown = kExpDenominator - 1;
double next;
if (exposureMSV < kExposureOptimal - kExposureSatisfactory) {
next = exposure_ * kExpNumeratorUp / kExpDenominator;
if (next - exposure_ < 1)
exposure_ += 1;
else
exposure_ = next;
if (exposure_ >= exposureMax_) {
next = again_ * kExpNumeratorUp / kExpDenominator;
if (next - again_ < againMinStep_)
again_ += againMinStep_;
else
again_ = next;
}
}
if (exposureMSV > kExposureOptimal + kExposureSatisfactory) {
if (exposure_ == exposureMax_ && again_ > againMin_) {
next = again_ * kExpNumeratorDown / kExpDenominator;
if (again_ - next < againMinStep_)
again_ -= againMinStep_;
else
again_ = next;
} else {
next = exposure_ * kExpNumeratorDown / kExpDenominator;
if (exposure_ - next < 1)
exposure_ -= 1;
else
exposure_ = next;
}
}
exposure_ = std::clamp(exposure_, exposureMin_, exposureMax_);
again_ = std::clamp(again_, againMin_, againMax_);
}
} /* namespace ipa::soft */
/*
* External IPA module interface
*/
extern "C" {
const struct IPAModuleInfo ipaModuleInfo = {
IPA_MODULE_API_VERSION,
0,
"simple",
"simple",
};
IPAInterface *ipaCreate()
{
return new ipa::soft::IPASoftSimple();
}
} /* extern "C" */
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa | repos/libcamera/src/ipa/simple/black_level.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Red Hat Inc.
*
* black level handling
*/
#include "black_level.h"
#include <numeric>
#include <libcamera/base/log.h>
namespace libcamera {
LOG_DEFINE_CATEGORY(IPASoftBL)
/**
* \class BlackLevel
* \brief Object providing black point level for software ISP
*
* Black level can be provided in hardware tuning files or, if no tuning file is
* available for the given hardware, guessed automatically, with less accuracy.
* As tuning files are not yet implemented for software ISP, BlackLevel
* currently provides only guessed black levels.
*
* This class serves for tracking black level as a property of the underlying
* hardware, not as means of enhancing a particular scene or image.
*
* The class is supposed to be instantiated for the given camera stream.
* The black level can be retrieved using BlackLevel::get() method. It is
* initially 0 and may change when updated using BlackLevel::update() method.
*/
BlackLevel::BlackLevel()
: blackLevel_(255), blackLevelSet_(false)
{
}
/**
* \brief Return the current black level
*
* \return The black level, in the range from 0 (minimum) to 255 (maximum).
* If the black level couldn't be determined yet, return 0.
*/
uint8_t BlackLevel::get() const
{
return blackLevelSet_ ? blackLevel_ : 0;
}
/**
* \brief Update black level from the provided histogram
* \param[in] yHistogram The histogram to be used for updating black level
*
* The black level is property of the given hardware, not image. It is updated
* only if it has not been yet set or if it is lower than the lowest value seen
* so far.
*/
void BlackLevel::update(SwIspStats::Histogram &yHistogram)
{
/*
* The constant is selected to be "good enough", not overly conservative or
* aggressive. There is no magic about the given value.
*/
constexpr float ignoredPercentage_ = 0.02;
const unsigned int total =
std::accumulate(begin(yHistogram), end(yHistogram), 0);
const unsigned int pixelThreshold = ignoredPercentage_ * total;
const unsigned int histogramRatio = 256 / SwIspStats::kYHistogramSize;
const unsigned int currentBlackIdx = blackLevel_ / histogramRatio;
for (unsigned int i = 0, seen = 0;
i < currentBlackIdx && i < SwIspStats::kYHistogramSize;
i++) {
seen += yHistogram[i];
if (seen >= pixelThreshold) {
blackLevel_ = i * histogramRatio;
blackLevelSet_ = true;
LOG(IPASoftBL, Debug)
<< "Auto-set black level: "
<< i << "/" << SwIspStats::kYHistogramSize
<< " (" << 100 * (seen - yHistogram[i]) / total << "% below, "
<< 100 * seen / total << "% at or below)";
break;
}
};
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/simple | repos/libcamera/src/ipa/simple/data/uncalibrated.yaml | # SPDX-License-Identifier: CC0-1.0
%YAML 1.1
---
version: 1
...
|
0 | repos/libcamera/src/ipa | repos/libcamera/src/ipa/vimc/vimc.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Vimc Image Processing Algorithm module
*/
#include <libcamera/ipa/vimc_ipa_interface.h>
#include <fcntl.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include <iostream>
#include <libcamera/base/file.h>
#include <libcamera/base/log.h>
#include <libcamera/ipa/ipa_interface.h>
#include <libcamera/ipa/ipa_module_info.h>
#include "libcamera/internal/mapped_framebuffer.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(IPAVimc)
class IPAVimc : public ipa::vimc::IPAVimcInterface
{
public:
IPAVimc();
~IPAVimc();
int init(const IPASettings &settings,
const ipa::vimc::IPAOperationCode code,
const Flags<ipa::vimc::TestFlag> inFlags,
Flags<ipa::vimc::TestFlag> *outFlags) override;
int start() override;
void stop() override;
int configure(const IPACameraSensorInfo &sensorInfo,
const std::map<unsigned int, IPAStream> &streamConfig,
const std::map<unsigned int, ControlInfoMap> &entityControls) override;
void mapBuffers(const std::vector<IPABuffer> &buffers) override;
void unmapBuffers(const std::vector<unsigned int> &ids) override;
void queueRequest(uint32_t frame, const ControlList &controls) override;
void fillParamsBuffer(uint32_t frame, uint32_t bufferId) override;
private:
void initTrace();
void trace(enum ipa::vimc::IPAOperationCode operation);
int fd_;
std::map<unsigned int, MappedFrameBuffer> buffers_;
};
IPAVimc::IPAVimc()
: fd_(-1)
{
initTrace();
}
IPAVimc::~IPAVimc()
{
if (fd_ != -1)
::close(fd_);
}
int IPAVimc::init(const IPASettings &settings,
const ipa::vimc::IPAOperationCode code,
const Flags<ipa::vimc::TestFlag> inFlags,
Flags<ipa::vimc::TestFlag> *outFlags)
{
trace(ipa::vimc::IPAOperationInit);
LOG(IPAVimc, Debug)
<< "initializing vimc IPA with configuration file "
<< settings.configurationFile;
LOG(IPAVimc, Debug) << "Got opcode " << code;
LOG(IPAVimc, Debug)
<< "Flag 2 was "
<< (inFlags & ipa::vimc::TestFlag::Flag2 ? "" : "not ")
<< "set";
*outFlags |= ipa::vimc::TestFlag::Flag1;
File conf(settings.configurationFile);
if (!conf.open(File::OpenModeFlag::ReadOnly)) {
LOG(IPAVimc, Error) << "Failed to open configuration file";
return -EINVAL;
}
return 0;
}
int IPAVimc::start()
{
trace(ipa::vimc::IPAOperationStart);
LOG(IPAVimc, Debug) << "start vimc IPA!";
return 0;
}
void IPAVimc::stop()
{
trace(ipa::vimc::IPAOperationStop);
LOG(IPAVimc, Debug) << "stop vimc IPA!";
}
int IPAVimc::configure([[maybe_unused]] const IPACameraSensorInfo &sensorInfo,
[[maybe_unused]] const std::map<unsigned int, IPAStream> &streamConfig,
[[maybe_unused]] const std::map<unsigned int, ControlInfoMap> &entityControls)
{
LOG(IPAVimc, Debug) << "configure()";
return 0;
}
void IPAVimc::mapBuffers(const std::vector<IPABuffer> &buffers)
{
for (const IPABuffer &buffer : buffers) {
const FrameBuffer fb(buffer.planes);
buffers_.emplace(std::piecewise_construct,
std::forward_as_tuple(buffer.id),
std::forward_as_tuple(&fb, MappedFrameBuffer::MapFlag::Read));
}
}
void IPAVimc::unmapBuffers(const std::vector<unsigned int> &ids)
{
for (unsigned int id : ids) {
auto it = buffers_.find(id);
if (it == buffers_.end())
continue;
buffers_.erase(it);
}
}
void IPAVimc::queueRequest([[maybe_unused]] uint32_t frame,
[[maybe_unused]] const ControlList &controls)
{
}
void IPAVimc::fillParamsBuffer([[maybe_unused]] uint32_t frame, uint32_t bufferId)
{
auto it = buffers_.find(bufferId);
if (it == buffers_.end()) {
LOG(IPAVimc, Error) << "Could not find parameter buffer";
return;
}
Flags<ipa::vimc::TestFlag> flags;
paramsBufferReady.emit(bufferId, flags);
}
void IPAVimc::initTrace()
{
struct stat fifoStat;
int ret = stat(ipa::vimc::VimcIPAFIFOPath.c_str(), &fifoStat);
if (ret)
return;
ret = ::open(ipa::vimc::VimcIPAFIFOPath.c_str(), O_WRONLY | O_CLOEXEC);
if (ret < 0) {
ret = errno;
LOG(IPAVimc, Error) << "Failed to open vimc IPA test FIFO: "
<< strerror(ret);
return;
}
fd_ = ret;
}
void IPAVimc::trace(enum ipa::vimc::IPAOperationCode operation)
{
if (fd_ < 0)
return;
int ret = ::write(fd_, &operation, sizeof(operation));
if (ret < 0) {
ret = errno;
LOG(IPAVimc, Error) << "Failed to write to vimc IPA test FIFO: "
<< strerror(ret);
}
}
/*
* External IPA module interface
*/
extern "C" {
const struct IPAModuleInfo ipaModuleInfo = {
IPA_MODULE_API_VERSION,
0,
"vimc",
"vimc",
};
IPAInterface *ipaCreate()
{
return new IPAVimc();
}
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/ipa/vimc | repos/libcamera/src/ipa/vimc/data/vimc.conf | # SPDX-License-Identifier: LGPL-2.1-or-later
#
# Dummy configuration file for the vimc IPA.
|
0 | repos/libcamera/src/ipa | repos/libcamera/src/ipa/rpi/README.md | .. SPDX-License-Identifier: BSD-2-Clause
# _libcamera_ for the Raspberry Pi
Raspberry Pi provides a fully featured pipeline handler and control algorithms
(IPAs, or "Image Processing Algorithms") to work with _libcamera_. Support is
included for all existing Raspberry Pi camera modules.
_libcamera_ for the Raspberry Pi allows users to:
1. Use their existing Raspberry Pi cameras.
1. Change the tuning of the image processing for their Raspberry Pi cameras.
1. Alter or amend the control algorithms (such as AGC/AEC, AWB or any others)
that control the sensor and ISP.
1. Implement their own custom control algorithms.
1. Supply new tunings and/or algorithms for completely new sensors.
## How to install and run _libcamera_ on the Raspberry Pi
Please follow the instructions [here](https://www.raspberrypi.com/documentation/accessories/camera.html).
## Documentation
Full documentation for the _Raspberry Pi Camera Algorithm and Tuning Guide_ can
be found [here](https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf).
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/cam_helper/cam_helper.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* helper class providing camera information
*/
#pragma once
#include <memory>
#include <string>
#include <utility>
#include <libcamera/base/span.h>
#include <libcamera/base/utils.h>
#include "controller/camera_mode.h"
#include "controller/controller.h"
#include "controller/metadata.h"
#include "md_parser.h"
#include "libcamera/internal/v4l2_videodevice.h"
namespace RPiController {
/*
* The CamHelper class provides a number of facilities that anyone trying
* to drive a camera will need to know, but which are not provided by the
* standard driver framework. Specifically, it provides:
*
* A "CameraMode" structure to describe extra information about the chosen
* mode of the driver. For example, how it is cropped from the full sensor
* area, how it is scaled, whether pixels are averaged compared to the full
* resolution.
*
* The ability to convert between number of lines of exposure and actual
* exposure time, and to convert between the sensor's gain codes and actual
* gains.
*
* A function to return the number of frames of delay between updating exposure,
* analogue gain and vblanking, and for the changes to take effect. For many
* sensors these take the values 2, 1 and 2 respectively, but sensors that are
* different will need to over-ride the default function provided.
*
* A function to query if the sensor outputs embedded data that can be parsed.
*
* A function to return the sensitivity of a given camera mode.
*
* A parser to parse the embedded data buffers provided by some sensors (for
* example, the imx219 does; the ov5647 doesn't). This allows us to know for
* sure the exposure and gain of the frame we're looking at. CamHelper
* provides functions for converting analogue gains to and from the sensor's
* native gain codes.
*
* Finally, a set of functions that determine how to handle the vagaries of
* different camera modules on start-up or when switching modes. Some
* modules may produce one or more frames that are not yet correctly exposed,
* or where the metadata may be suspect. We have the following functions:
* HideFramesStartup(): Tell the pipeline handler not to return this many
* frames at start-up. This can also be used to hide initial frames
* while the AGC and other algorithms are sorting themselves out.
* HideFramesModeSwitch(): Tell the pipeline handler not to return this
* many frames after a mode switch (other than start-up). Some sensors
* may produce innvalid frames after a mode switch; others may not.
* MistrustFramesStartup(): At start-up a sensor may return frames for
* which we should not run any control algorithms (for example, metadata
* may be invalid).
* MistrustFramesModeSwitch(): The number of frames, after a mode switch
* (other than start-up), for which control algorithms should not run
* (for example, metadata may be unreliable).
*/
class CamHelper
{
public:
static CamHelper *create(std::string const &camName);
CamHelper(std::unique_ptr<MdParser> parser, unsigned int frameIntegrationDiff);
virtual ~CamHelper();
void setCameraMode(const CameraMode &mode);
virtual void prepare(libcamera::Span<const uint8_t> buffer,
Metadata &metadata);
virtual void process(StatisticsPtr &stats, Metadata &metadata);
virtual uint32_t exposureLines(const libcamera::utils::Duration exposure,
const libcamera::utils::Duration lineLength) const;
virtual libcamera::utils::Duration exposure(uint32_t exposureLines,
const libcamera::utils::Duration lineLength) const;
virtual std::pair<uint32_t, uint32_t> getBlanking(libcamera::utils::Duration &exposure,
libcamera::utils::Duration minFrameDuration,
libcamera::utils::Duration maxFrameDuration) const;
libcamera::utils::Duration hblankToLineLength(uint32_t hblank) const;
uint32_t lineLengthToHblank(const libcamera::utils::Duration &duration) const;
libcamera::utils::Duration lineLengthPckToDuration(uint32_t lineLengthPck) const;
virtual uint32_t gainCode(double gain) const = 0;
virtual double gain(uint32_t gainCode) const = 0;
virtual void getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const;
virtual bool sensorEmbeddedDataPresent() const;
virtual double getModeSensitivity(const CameraMode &mode) const;
virtual unsigned int hideFramesStartup() const;
virtual unsigned int hideFramesModeSwitch() const;
virtual unsigned int mistrustFramesStartup() const;
virtual unsigned int mistrustFramesModeSwitch() const;
protected:
void parseEmbeddedData(libcamera::Span<const uint8_t> buffer,
Metadata &metadata);
virtual void populateMetadata(const MdParser::RegisterMap ®isters,
Metadata &metadata) const;
std::unique_ptr<MdParser> parser_;
CameraMode mode_;
private:
/*
* Smallest difference between the frame length and integration time,
* in units of lines.
*/
unsigned int frameIntegrationDiff_;
};
/*
* This is for registering camera helpers with the system, so that the
* CamHelper::Create function picks them up automatically.
*/
typedef CamHelper *(*CamHelperCreateFunc)();
struct RegisterCamHelper
{
RegisterCamHelper(char const *camName,
CamHelperCreateFunc createFunc);
};
} /* namespace RPi */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2021, Raspberry Pi Ltd
* Copyright (C) 2023, Ideas on Board Oy.
*
* camera information for ov64a40 sensor
*/
#include <assert.h>
#include "cam_helper.h"
using namespace RPiController;
class CamHelperOv64a40 : public CamHelper
{
public:
CamHelperOv64a40();
uint32_t gainCode(double gain) const override;
double gain(uint32_t gainCode) const override;
void getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const override;
double getModeSensitivity(const CameraMode &mode) const override;
private:
/*
* Smallest difference between the frame length and integration time,
* in units of lines.
*/
static constexpr int frameIntegrationDiff = 32;
};
CamHelperOv64a40::CamHelperOv64a40()
: CamHelper({}, frameIntegrationDiff)
{
}
uint32_t CamHelperOv64a40::gainCode(double gain) const
{
return static_cast<uint32_t>(gain * 128.0);
}
double CamHelperOv64a40::gain(uint32_t gainCode) const
{
return static_cast<double>(gainCode) / 128.0;
}
void CamHelperOv64a40::getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const
{
/* The driver appears to behave as follows: */
exposureDelay = 2;
gainDelay = 2;
vblankDelay = 2;
hblankDelay = 2;
}
double CamHelperOv64a40::getModeSensitivity(const CameraMode &mode) const
{
if (mode.binX >= 2 && mode.scaleX >= 4) {
return 4.0;
} else if (mode.binX >= 2 && mode.scaleX >= 2) {
return 2.0;
} else {
return 1.0;
}
}
static CamHelper *create()
{
return new CamHelperOv64a40();
}
static RegisterCamHelper reg("ov64a40", &create);
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Based on cam_helper_imx477.cpp
* Copyright (C) 2020, Raspberry Pi Ltd
*
* camera helper for imx519 sensor
* Copyright (C) 2021, Arducam Technology co., Ltd.
*/
#include <assert.h>
#include <cmath>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <libcamera/base/log.h>
#include "cam_helper.h"
#include "md_parser.h"
using namespace RPiController;
using namespace libcamera;
using libcamera::utils::Duration;
namespace libcamera {
LOG_DECLARE_CATEGORY(IPARPI)
}
/*
* We care about two gain registers and a pair of exposure registers. Their
* I2C addresses from the Sony IMX519 datasheet:
*/
constexpr uint32_t expHiReg = 0x0202;
constexpr uint32_t expLoReg = 0x0203;
constexpr uint32_t gainHiReg = 0x0204;
constexpr uint32_t gainLoReg = 0x0205;
constexpr uint32_t frameLengthHiReg = 0x0340;
constexpr uint32_t frameLengthLoReg = 0x0341;
constexpr uint32_t lineLengthHiReg = 0x0342;
constexpr uint32_t lineLengthLoReg = 0x0343;
constexpr std::initializer_list<uint32_t> registerList =
{ expHiReg, expLoReg, gainHiReg, gainLoReg, frameLengthHiReg, frameLengthLoReg,
lineLengthHiReg, lineLengthLoReg };
class CamHelperImx519 : public CamHelper
{
public:
CamHelperImx519();
uint32_t gainCode(double gain) const override;
double gain(uint32_t gainCode) const override;
void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration,
Duration maxFrameDuration) const override;
void getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const override;
bool sensorEmbeddedDataPresent() const override;
private:
/*
* Smallest difference between the frame length and integration time,
* in units of lines.
*/
static constexpr int frameIntegrationDiff = 32;
/* Maximum frame length allowable for long exposure calculations. */
static constexpr int frameLengthMax = 0xffdc;
/* Largest long exposure scale factor given as a left shift on the frame length. */
static constexpr int longExposureShiftMax = 7;
void populateMetadata(const MdParser::RegisterMap ®isters,
Metadata &metadata) const override;
};
CamHelperImx519::CamHelperImx519()
: CamHelper(std::make_unique<MdParserSmia>(registerList), frameIntegrationDiff)
{
}
uint32_t CamHelperImx519::gainCode(double gain) const
{
return static_cast<uint32_t>(1024 - 1024 / gain);
}
double CamHelperImx519::gain(uint32_t gainCode) const
{
return 1024.0 / (1024 - gainCode);
}
void CamHelperImx519::prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
{
MdParser::RegisterMap registers;
DeviceStatus deviceStatus;
if (metadata.get("device.status", deviceStatus)) {
LOG(IPARPI, Error) << "DeviceStatus not found from DelayedControls";
return;
}
parseEmbeddedData(buffer, metadata);
/*
* The DeviceStatus struct is first populated with values obtained from
* DelayedControls. If this reports frame length is > frameLengthMax,
* it means we are using a long exposure mode. Since the long exposure
* scale factor is not returned back through embedded data, we must rely
* on the existing exposure lines and frame length values returned by
* DelayedControls.
*
* Otherwise, all values are updated with what is reported in the
* embedded data.
*/
if (deviceStatus.frameLength > frameLengthMax) {
DeviceStatus parsedDeviceStatus;
metadata.get("device.status", parsedDeviceStatus);
parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed;
parsedDeviceStatus.frameLength = deviceStatus.frameLength;
metadata.set("device.status", parsedDeviceStatus);
LOG(IPARPI, Debug) << "Metadata updated for long exposure: "
<< parsedDeviceStatus;
}
}
std::pair<uint32_t, uint32_t> CamHelperImx519::getBlanking(Duration &exposure,
Duration minFrameDuration,
Duration maxFrameDuration) const
{
uint32_t frameLength, exposureLines;
unsigned int shift = 0;
auto [vblank, hblank] = CamHelper::getBlanking(exposure, minFrameDuration,
maxFrameDuration);
frameLength = mode_.height + vblank;
Duration lineLength = hblankToLineLength(hblank);
/*
* Check if the frame length calculated needs to be setup for long
* exposure mode. This will require us to use a long exposure scale
* factor provided by a shift operation in the sensor.
*/
while (frameLength > frameLengthMax) {
if (++shift > longExposureShiftMax) {
shift = longExposureShiftMax;
frameLength = frameLengthMax;
break;
}
frameLength >>= 1;
}
if (shift) {
/* Account for any rounding in the scaled frame length value. */
frameLength <<= shift;
exposureLines = CamHelperImx519::exposureLines(exposure, lineLength);
exposureLines = std::min(exposureLines, frameLength - frameIntegrationDiff);
exposure = CamHelperImx519::exposure(exposureLines, lineLength);
}
return { frameLength - mode_.height, hblank };
}
void CamHelperImx519::getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const
{
exposureDelay = 2;
gainDelay = 2;
vblankDelay = 3;
hblankDelay = 3;
}
bool CamHelperImx519::sensorEmbeddedDataPresent() const
{
return true;
}
void CamHelperImx519::populateMetadata(const MdParser::RegisterMap ®isters,
Metadata &metadata) const
{
DeviceStatus deviceStatus;
deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
registers.at(lineLengthLoReg));
deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
deviceStatus.lineLength);
deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
metadata.set("device.status", deviceStatus);
}
static CamHelper *create()
{
return new CamHelperImx519();
}
static RegisterCamHelper reg("imx519", &create);
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* camera information for ov5647 sensor
*/
#include <assert.h>
#include "cam_helper.h"
using namespace RPiController;
class CamHelperOv5647 : public CamHelper
{
public:
CamHelperOv5647();
uint32_t gainCode(double gain) const override;
double gain(uint32_t gainCode) const override;
void getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const override;
unsigned int hideFramesStartup() const override;
unsigned int hideFramesModeSwitch() const override;
unsigned int mistrustFramesStartup() const override;
unsigned int mistrustFramesModeSwitch() const override;
private:
/*
* Smallest difference between the frame length and integration time,
* in units of lines.
*/
static constexpr int frameIntegrationDiff = 4;
};
/*
* OV5647 doesn't output metadata, so we have to use the "unicam parser" which
* works by counting frames.
*/
CamHelperOv5647::CamHelperOv5647()
: CamHelper({}, frameIntegrationDiff)
{
}
uint32_t CamHelperOv5647::gainCode(double gain) const
{
return static_cast<uint32_t>(gain * 16.0);
}
double CamHelperOv5647::gain(uint32_t gainCode) const
{
return static_cast<double>(gainCode) / 16.0;
}
void CamHelperOv5647::getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const
{
/*
* We run this sensor in a mode where the gain delay is bumped up to
* 2. It seems to be the only way to make the delays "predictable".
*/
exposureDelay = 2;
gainDelay = 2;
vblankDelay = 2;
hblankDelay = 2;
}
unsigned int CamHelperOv5647::hideFramesStartup() const
{
/*
* On startup, we get a couple of under-exposed frames which
* we don't want shown.
*/
return 2;
}
unsigned int CamHelperOv5647::hideFramesModeSwitch() const
{
/*
* After a mode switch, we get a couple of under-exposed frames which
* we don't want shown.
*/
return 2;
}
unsigned int CamHelperOv5647::mistrustFramesStartup() const
{
/*
* First couple of frames are under-exposed and are no good for control
* algos.
*/
return 2;
}
unsigned int CamHelperOv5647::mistrustFramesModeSwitch() const
{
/*
* First couple of frames are under-exposed even after a simple
* mode switch, and are no good for control algos.
*/
return 2;
}
static CamHelper *create()
{
return new CamHelperOv5647();
}
static RegisterCamHelper reg("ov5647", &create);
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* camera helper for imx219 sensor
*/
#include <assert.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
/*
* We have observed that the imx219 embedded data stream randomly returns junk
* register values. Do not rely on embedded data until this has been resolved.
*/
#define ENABLE_EMBEDDED_DATA 0
#include "cam_helper.h"
#if ENABLE_EMBEDDED_DATA
#include "md_parser.h"
#endif
using namespace RPiController;
/*
* We care about one gain register and a pair of exposure registers. Their I2C
* addresses from the Sony IMX219 datasheet:
*/
constexpr uint32_t gainReg = 0x157;
constexpr uint32_t expHiReg = 0x15a;
constexpr uint32_t expLoReg = 0x15b;
constexpr uint32_t frameLengthHiReg = 0x160;
constexpr uint32_t frameLengthLoReg = 0x161;
constexpr uint32_t lineLengthHiReg = 0x162;
constexpr uint32_t lineLengthLoReg = 0x163;
constexpr std::initializer_list<uint32_t> registerList [[maybe_unused]]
= { expHiReg, expLoReg, gainReg, frameLengthHiReg, frameLengthLoReg,
lineLengthHiReg, lineLengthLoReg };
class CamHelperImx219 : public CamHelper
{
public:
CamHelperImx219();
uint32_t gainCode(double gain) const override;
double gain(uint32_t gainCode) const override;
unsigned int mistrustFramesModeSwitch() const override;
bool sensorEmbeddedDataPresent() const override;
private:
/*
* Smallest difference between the frame length and integration time,
* in units of lines.
*/
static constexpr int frameIntegrationDiff = 4;
void populateMetadata(const MdParser::RegisterMap ®isters,
Metadata &metadata) const override;
};
CamHelperImx219::CamHelperImx219()
#if ENABLE_EMBEDDED_DATA
: CamHelper(std::make_unique<MdParserSmia>(registerList), frameIntegrationDiff)
#else
: CamHelper({}, frameIntegrationDiff)
#endif
{
}
uint32_t CamHelperImx219::gainCode(double gain) const
{
return (uint32_t)(256 - 256 / gain);
}
double CamHelperImx219::gain(uint32_t gainCode) const
{
return 256.0 / (256 - gainCode);
}
unsigned int CamHelperImx219::mistrustFramesModeSwitch() const
{
/*
* For reasons unknown, we do occasionally get a bogus metadata frame
* at a mode switch (though not at start-up). Possibly warrants some
* investigation, though not a big deal.
*/
return 1;
}
bool CamHelperImx219::sensorEmbeddedDataPresent() const
{
return ENABLE_EMBEDDED_DATA;
}
void CamHelperImx219::populateMetadata(const MdParser::RegisterMap ®isters,
Metadata &metadata) const
{
DeviceStatus deviceStatus;
deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
registers.at(lineLengthLoReg));
deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
deviceStatus.lineLength);
deviceStatus.analogueGain = gain(registers.at(gainReg));
deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
metadata.set("device.status", deviceStatus);
}
static CamHelper *create()
{
return new CamHelperImx219();
}
static RegisterCamHelper reg("imx219", &create);
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/cam_helper/md_parser_smia.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019-2021, Raspberry Pi Ltd
*
* SMIA specification based embedded data parser
*/
#include <libcamera/base/log.h>
#include "md_parser.h"
using namespace RPiController;
using namespace libcamera;
/*
* This function goes through the embedded data to find the offsets (not
* values!), in the data block, where the values of the given registers can
* subsequently be found.
*
* Embedded data tag bytes, from Sony IMX219 datasheet but general to all SMIA
* sensors, I think.
*/
constexpr unsigned int LineStart = 0x0a;
constexpr unsigned int LineEndTag = 0x07;
constexpr unsigned int RegHiBits = 0xaa;
constexpr unsigned int RegLowBits = 0xa5;
constexpr unsigned int RegValue = 0x5a;
constexpr unsigned int RegSkip = 0x55;
MdParserSmia::MdParserSmia(std::initializer_list<uint32_t> registerList)
{
for (auto r : registerList)
offsets_[r] = {};
}
MdParser::Status MdParserSmia::parse(libcamera::Span<const uint8_t> buffer,
RegisterMap ®isters)
{
if (reset_) {
/*
* Search again through the metadata for all the registers
* requested.
*/
ASSERT(bitsPerPixel_);
for (const auto &kv : offsets_)
offsets_[kv.first] = {};
ParseStatus ret = findRegs(buffer);
/*
* > 0 means "worked partially but parse again next time",
* < 0 means "hard error".
*
* In either case, we retry parsing on the next frame.
*/
if (ret != ParseOk)
return ERROR;
reset_ = false;
}
/* Populate the register values requested. */
registers.clear();
for (const auto &[reg, offset] : offsets_) {
if (!offset) {
reset_ = true;
return NOTFOUND;
}
registers[reg] = buffer[offset.value()];
}
return OK;
}
MdParserSmia::ParseStatus MdParserSmia::findRegs(libcamera::Span<const uint8_t> buffer)
{
ASSERT(offsets_.size());
if (buffer[0] != LineStart)
return NoLineStart;
unsigned int currentOffset = 1; /* after the LineStart */
unsigned int currentLineStart = 0, currentLine = 0;
unsigned int regNum = 0, regsDone = 0;
while (1) {
int tag = buffer[currentOffset++];
/* Non-dummy bytes come in even-sized blocks: skip can only ever follow tag */
while ((bitsPerPixel_ == 10 &&
(currentOffset + 1 - currentLineStart) % 5 == 0) ||
(bitsPerPixel_ == 12 &&
(currentOffset + 1 - currentLineStart) % 3 == 0) ||
(bitsPerPixel_ == 14 &&
(currentOffset - currentLineStart) % 7 >= 4)) {
if (buffer[currentOffset++] != RegSkip)
return BadDummy;
}
int dataByte = buffer[currentOffset++];
if (tag == LineEndTag) {
if (dataByte != LineEndTag)
return BadLineEnd;
if (numLines_ && ++currentLine == numLines_)
return MissingRegs;
if (lineLengthBytes_) {
currentOffset = currentLineStart + lineLengthBytes_;
/* Require whole line to be in the buffer (if buffer size set). */
if (buffer.size() &&
currentOffset + lineLengthBytes_ > buffer.size())
return MissingRegs;
if (buffer[currentOffset] != LineStart)
return NoLineStart;
} else {
/* allow a zero line length to mean "hunt for the next line" */
while (currentOffset < buffer.size() &&
buffer[currentOffset] != LineStart)
currentOffset++;
if (currentOffset == buffer.size())
return NoLineStart;
}
/* inc currentOffset to after LineStart */
currentLineStart = currentOffset++;
} else {
if (tag == RegHiBits)
regNum = (regNum & 0xff) | (dataByte << 8);
else if (tag == RegLowBits)
regNum = (regNum & 0xff00) | dataByte;
else if (tag == RegSkip)
regNum++;
else if (tag == RegValue) {
auto reg = offsets_.find(regNum);
if (reg != offsets_.end()) {
offsets_[regNum] = currentOffset - 1;
if (++regsDone == offsets_.size())
return ParseOk;
}
regNum++;
} else
return IllegalTag;
}
}
}
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2021, Raspberry Pi Ltd
*
* camera information for ov9281 sensor
*/
#include <assert.h>
#include "cam_helper.h"
using namespace RPiController;
class CamHelperOv9281 : public CamHelper
{
public:
CamHelperOv9281();
uint32_t gainCode(double gain) const override;
double gain(uint32_t gainCode) const override;
void getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const override;
private:
/*
* Smallest difference between the frame length and integration time,
* in units of lines.
*/
static constexpr int frameIntegrationDiff = 4;
};
/*
* OV9281 doesn't output metadata, so we have to use the "unicam parser" which
* works by counting frames.
*/
CamHelperOv9281::CamHelperOv9281()
: CamHelper({}, frameIntegrationDiff)
{
}
uint32_t CamHelperOv9281::gainCode(double gain) const
{
return static_cast<uint32_t>(gain * 16.0);
}
double CamHelperOv9281::gain(uint32_t gainCode) const
{
return static_cast<double>(gainCode) / 16.0;
}
void CamHelperOv9281::getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const
{
/* The driver appears to behave as follows: */
exposureDelay = 2;
gainDelay = 2;
vblankDelay = 2;
hblankDelay = 2;
}
static CamHelper *create()
{
return new CamHelperOv9281();
}
static RegisterCamHelper reg("ov9281", &create);
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2021, Raspberry Pi Ltd
*
* camera helper for imx290 sensor
*/
#include <math.h>
#include "cam_helper.h"
using namespace RPiController;
class CamHelperImx290 : public CamHelper
{
public:
CamHelperImx290();
uint32_t gainCode(double gain) const override;
double gain(uint32_t gainCode) const override;
void getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const override;
unsigned int hideFramesStartup() const override;
unsigned int hideFramesModeSwitch() const override;
private:
/*
* Smallest difference between the frame length and integration time,
* in units of lines.
*/
static constexpr int frameIntegrationDiff = 2;
};
CamHelperImx290::CamHelperImx290()
: CamHelper({}, frameIntegrationDiff)
{
}
uint32_t CamHelperImx290::gainCode(double gain) const
{
int code = 66.6667 * log10(gain);
return std::max(0, std::min(code, 0xf0));
}
double CamHelperImx290::gain(uint32_t gainCode) const
{
return pow(10, 0.015 * gainCode);
}
void CamHelperImx290::getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const
{
exposureDelay = 2;
gainDelay = 2;
vblankDelay = 2;
hblankDelay = 2;
}
unsigned int CamHelperImx290::hideFramesStartup() const
{
/* On startup, we seem to get 1 bad frame. */
return 1;
}
unsigned int CamHelperImx290::hideFramesModeSwitch() const
{
/* After a mode switch, we seem to get 1 bad frame. */
return 1;
}
static CamHelper *create()
{
return new CamHelperImx290();
}
static RegisterCamHelper reg("imx290", &create);
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2022, Raspberry Pi Ltd
*
* camera helper for imx708 sensor
*/
#include <cmath>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <libcamera/base/log.h>
#include "controller/pdaf_data.h"
#include "cam_helper.h"
#include "md_parser.h"
using namespace RPiController;
using namespace libcamera;
using libcamera::utils::Duration;
using namespace std::literals::chrono_literals;
namespace libcamera {
LOG_DECLARE_CATEGORY(IPARPI)
}
/*
* We care about two gain registers and a pair of exposure registers. Their
* I2C addresses from the Sony imx708 datasheet:
*/
constexpr uint32_t expHiReg = 0x0202;
constexpr uint32_t expLoReg = 0x0203;
constexpr uint32_t gainHiReg = 0x0204;
constexpr uint32_t gainLoReg = 0x0205;
constexpr uint32_t frameLengthHiReg = 0x0340;
constexpr uint32_t frameLengthLoReg = 0x0341;
constexpr uint32_t lineLengthHiReg = 0x0342;
constexpr uint32_t lineLengthLoReg = 0x0343;
constexpr uint32_t temperatureReg = 0x013a;
constexpr std::initializer_list<uint32_t> registerList =
{ expHiReg, expLoReg, gainHiReg, gainLoReg, lineLengthHiReg,
lineLengthLoReg, frameLengthHiReg, frameLengthLoReg, temperatureReg };
class CamHelperImx708 : public CamHelper
{
public:
CamHelperImx708();
uint32_t gainCode(double gain) const override;
double gain(uint32_t gain_code) const override;
void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
void process(StatisticsPtr &stats, Metadata &metadata) override;
std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration,
Duration maxFrameDuration) const override;
void getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const override;
bool sensorEmbeddedDataPresent() const override;
double getModeSensitivity(const CameraMode &mode) const override;
unsigned int hideFramesModeSwitch() const override;
unsigned int hideFramesStartup() const override;
private:
/*
* Smallest difference between the frame length and integration time,
* in units of lines.
*/
static constexpr int frameIntegrationDiff = 22;
/* Maximum frame length allowable for long exposure calculations. */
static constexpr int frameLengthMax = 0xffdc;
/* Largest long exposure scale factor given as a left shift on the frame length. */
static constexpr int longExposureShiftMax = 7;
static constexpr int pdafStatsRows = 12;
static constexpr int pdafStatsCols = 16;
void populateMetadata(const MdParser::RegisterMap ®isters,
Metadata &metadata) const override;
static bool parsePdafData(const uint8_t *ptr, size_t len, unsigned bpp,
PdafRegions &pdaf);
bool parseAEHist(const uint8_t *ptr, size_t len, unsigned bpp);
void putAGCStatistics(StatisticsPtr stats);
Histogram aeHistLinear_;
uint32_t aeHistAverage_;
bool aeHistValid_;
};
CamHelperImx708::CamHelperImx708()
: CamHelper(std::make_unique<MdParserSmia>(registerList), frameIntegrationDiff),
aeHistLinear_{}, aeHistAverage_(0), aeHistValid_(false)
{
}
uint32_t CamHelperImx708::gainCode(double gain) const
{
return static_cast<uint32_t>(1024 - 1024 / gain);
}
double CamHelperImx708::gain(uint32_t gain_code) const
{
return 1024.0 / (1024 - gain_code);
}
void CamHelperImx708::prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
{
MdParser::RegisterMap registers;
DeviceStatus deviceStatus;
LOG(IPARPI, Debug) << "Embedded buffer size: " << buffer.size();
if (metadata.get("device.status", deviceStatus)) {
LOG(IPARPI, Error) << "DeviceStatus not found from DelayedControls";
return;
}
parseEmbeddedData(buffer, metadata);
/*
* Parse PDAF data, which we expect to occupy the third scanline
* of embedded data. As PDAF is quite sensor-specific, it's parsed here.
*/
size_t bytesPerLine = (mode_.width * mode_.bitdepth) >> 3;
if (buffer.size() > 2 * bytesPerLine) {
PdafRegions pdaf;
if (parsePdafData(&buffer[2 * bytesPerLine],
buffer.size() - 2 * bytesPerLine,
mode_.bitdepth, pdaf))
metadata.set("pdaf.regions", pdaf);
}
/* Parse AE-HIST data where present */
if (buffer.size() > 3 * bytesPerLine) {
aeHistValid_ = parseAEHist(&buffer[3 * bytesPerLine],
buffer.size() - 3 * bytesPerLine,
mode_.bitdepth);
}
/*
* The DeviceStatus struct is first populated with values obtained from
* DelayedControls. If this reports frame length is > frameLengthMax,
* it means we are using a long exposure mode. Since the long exposure
* scale factor is not returned back through embedded data, we must rely
* on the existing exposure lines and frame length values returned by
* DelayedControls.
*
* Otherwise, all values are updated with what is reported in the
* embedded data.
*/
if (deviceStatus.frameLength > frameLengthMax) {
DeviceStatus parsedDeviceStatus;
metadata.get("device.status", parsedDeviceStatus);
parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed;
parsedDeviceStatus.frameLength = deviceStatus.frameLength;
metadata.set("device.status", parsedDeviceStatus);
LOG(IPARPI, Debug) << "Metadata updated for long exposure: "
<< parsedDeviceStatus;
}
}
void CamHelperImx708::process(StatisticsPtr &stats, [[maybe_unused]] Metadata &metadata)
{
if (aeHistValid_)
putAGCStatistics(stats);
}
std::pair<uint32_t, uint32_t> CamHelperImx708::getBlanking(Duration &exposure,
Duration minFrameDuration,
Duration maxFrameDuration) const
{
uint32_t frameLength, exposureLines;
unsigned int shift = 0;
auto [vblank, hblank] = CamHelper::getBlanking(exposure, minFrameDuration,
maxFrameDuration);
frameLength = mode_.height + vblank;
Duration lineLength = hblankToLineLength(hblank);
/*
* Check if the frame length calculated needs to be setup for long
* exposure mode. This will require us to use a long exposure scale
* factor provided by a shift operation in the sensor.
*/
while (frameLength > frameLengthMax) {
if (++shift > longExposureShiftMax) {
shift = longExposureShiftMax;
frameLength = frameLengthMax;
break;
}
frameLength >>= 1;
}
if (shift) {
/* Account for any rounding in the scaled frame length value. */
frameLength <<= shift;
exposureLines = CamHelper::exposureLines(exposure, lineLength);
exposureLines = std::min(exposureLines, frameLength - frameIntegrationDiff);
exposure = CamHelper::exposure(exposureLines, lineLength);
}
return { frameLength - mode_.height, hblank };
}
void CamHelperImx708::getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const
{
exposureDelay = 2;
gainDelay = 2;
vblankDelay = 3;
hblankDelay = 3;
}
bool CamHelperImx708::sensorEmbeddedDataPresent() const
{
return true;
}
double CamHelperImx708::getModeSensitivity(const CameraMode &mode) const
{
/* In binned modes, sensitivity increases by a factor of 2 */
return (mode.width > 2304) ? 1.0 : 2.0;
}
unsigned int CamHelperImx708::hideFramesModeSwitch() const
{
/*
* We need to drop the first startup frame in HDR mode.
* Unfortunately the only way to currently determine if the sensor is in
* the HDR mode is to match with the resolution and framerate - the HDR
* mode only runs upto 30fps.
*/
if (mode_.width == 2304 && mode_.height == 1296 &&
mode_.minFrameDuration > 1.0s / 32)
return 1;
else
return 0;
}
unsigned int CamHelperImx708::hideFramesStartup() const
{
return hideFramesModeSwitch();
}
void CamHelperImx708::populateMetadata(const MdParser::RegisterMap ®isters,
Metadata &metadata) const
{
DeviceStatus deviceStatus;
deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
registers.at(lineLengthLoReg));
deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
deviceStatus.lineLength);
deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
deviceStatus.sensorTemperature = std::clamp<int8_t>(registers.at(temperatureReg), -20, 80);
metadata.set("device.status", deviceStatus);
}
bool CamHelperImx708::parsePdafData(const uint8_t *ptr, size_t len,
unsigned bpp, PdafRegions &pdaf)
{
size_t step = bpp >> 1; /* bytes per PDAF grid entry */
if (bpp < 10 || bpp > 14 || len < 194 * step || ptr[0] != 0 || ptr[1] >= 0x40) {
LOG(IPARPI, Error) << "PDAF data in unsupported format";
return false;
}
pdaf.init({ pdafStatsCols, pdafStatsRows });
ptr += 2 * step;
for (unsigned i = 0; i < pdafStatsRows; ++i) {
for (unsigned j = 0; j < pdafStatsCols; ++j) {
unsigned c = (ptr[0] << 3) | (ptr[1] >> 5);
int p = (((ptr[1] & 0x0F) - (ptr[1] & 0x10)) << 6) | (ptr[2] >> 2);
PdafData pdafData;
pdafData.conf = c;
pdafData.phase = c ? p : 0;
pdaf.set(libcamera::Point(j, i), { pdafData, 1, 0 });
ptr += step;
}
}
return true;
}
bool CamHelperImx708::parseAEHist(const uint8_t *ptr, size_t len, unsigned bpp)
{
static constexpr unsigned int PipelineBits = Statistics::NormalisationFactorPow2;
uint64_t count = 0, sum = 0;
size_t step = bpp >> 1; /* bytes per histogram bin */
uint32_t hist[128];
if (len < 144 * step)
return false;
/*
* Read the 128 bin linear histogram, which by default covers
* the full range of the HDR shortest exposure (small values are
* expected to dominate, so pixel-value resolution will be poor).
*/
for (unsigned i = 0; i < 128; ++i) {
if (ptr[3] != 0x55)
return false;
uint32_t c = (ptr[0] << 14) + (ptr[1] << 6) + (ptr[2] >> 2);
hist[i] = c >> 2; /* pixels to quads */
if (i != 0) {
count += c;
sum += c *
(i * (1u << (PipelineBits - 7)) +
(1u << (PipelineBits - 8)));
}
ptr += step;
}
/*
* Now use the first 9 bins of the log histogram (these should be
* subdivisions of the smallest linear bin), to get a more accurate
* average value. Don't assume that AEHIST1_AVERAGE is present.
*/
for (unsigned i = 0; i < 9; ++i) {
if (ptr[3] != 0x55)
return false;
uint32_t c = (ptr[0] << 14) + (ptr[1] << 6) + (ptr[2] >> 2);
count += c;
sum += c *
((3u << PipelineBits) >> (17 - i));
ptr += step;
}
if ((unsigned)((ptr[0] << 12) + (ptr[1] << 4) + (ptr[2] >> 4)) !=
hist[1]) {
LOG(IPARPI, Error) << "Lin/Log histogram mismatch";
return false;
}
aeHistLinear_ = Histogram(hist, 128);
aeHistAverage_ = count ? (sum / count) : 0;
return count != 0;
}
void CamHelperImx708::putAGCStatistics(StatisticsPtr stats)
{
/*
* For HDR mode, copy sensor's AE/AGC statistics over ISP's, so the
* AGC algorithm sees a linear response to exposure and gain changes.
*
* Histogram: Just copy the "raw" histogram over the tone-mapped one,
* although they have different distributions (raw values are lower).
* Tuning should either ignore it, or constrain for highlights only.
*
* Average: Overwrite all regional averages with a global raw average,
* scaled by a fiddle-factor so that a conventional (non-HDR) y_target
* of e.g. 0.17 will map to a suitable level for HDR.
*/
stats->yHist = aeHistLinear_;
constexpr unsigned int HdrHeadroomFactor = 4;
uint64_t v = HdrHeadroomFactor * aeHistAverage_;
for (auto ®ion : stats->agcRegions) {
region.val.rSum = region.val.gSum = region.val.bSum = region.counted * v;
}
}
static CamHelper *create()
{
return new CamHelperImx708();
}
static RegisterCamHelper reg("imx708", &create);
static RegisterCamHelper regWide("imx708_wide", &create);
static RegisterCamHelper regNoIr("imx708_noir", &create);
static RegisterCamHelper regWideNoIr("imx708_wide_noir", &create);
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
* Camera helper for IMX296 sensor
*/
#include <algorithm>
#include <cmath>
#include <stddef.h>
#include "cam_helper.h"
using namespace RPiController;
using libcamera::utils::Duration;
using namespace std::literals::chrono_literals;
class CamHelperImx296 : public CamHelper
{
public:
CamHelperImx296();
uint32_t gainCode(double gain) const override;
double gain(uint32_t gainCode) const override;
uint32_t exposureLines(const Duration exposure, const Duration lineLength) const override;
Duration exposure(uint32_t exposureLines, const Duration lineLength) const override;
void getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const override;
private:
static constexpr uint32_t minExposureLines = 1;
static constexpr uint32_t maxGainCode = 239;
static constexpr Duration timePerLine = 550.0 / 37.125e6 * 1.0s;
/*
* Smallest difference between the frame length and integration time,
* in units of lines.
*/
static constexpr int frameIntegrationDiff = 4;
};
CamHelperImx296::CamHelperImx296()
: CamHelper(nullptr, frameIntegrationDiff)
{
}
uint32_t CamHelperImx296::gainCode(double gain) const
{
uint32_t code = 20 * std::log10(gain) * 10;
return std::min(code, maxGainCode);
}
double CamHelperImx296::gain(uint32_t gainCode) const
{
return std::pow(10.0, gainCode / 200.0);
}
uint32_t CamHelperImx296::exposureLines(const Duration exposure,
[[maybe_unused]] const Duration lineLength) const
{
return std::max<uint32_t>(minExposureLines, (exposure - 14.26us) / timePerLine);
}
Duration CamHelperImx296::exposure(uint32_t exposureLines,
[[maybe_unused]] const Duration lineLength) const
{
return std::max<uint32_t>(minExposureLines, exposureLines) * timePerLine + 14.26us;
}
void CamHelperImx296::getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const
{
exposureDelay = 2;
gainDelay = 2;
vblankDelay = 2;
hblankDelay = 2;
}
static CamHelper *create()
{
return new CamHelperImx296();
}
static RegisterCamHelper reg("imx296", &create);
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/cam_helper/md_parser.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* image sensor metadata parser interface
*/
#pragma once
#include <initializer_list>
#include <map>
#include <optional>
#include <stdint.h>
#include <libcamera/base/span.h>
/*
* Camera metadata parser class. Usage as shown below.
*
* Setup:
*
* Usually the metadata parser will be made as part of the CamHelper class so
* application code doesn't have to worry which kind to instantiate. But for
* the sake of example let's suppose we're parsing imx219 metadata.
*
* MdParser *parser = new MdParserSmia({ expHiReg, expLoReg, gainReg });
* parser->SetBitsPerPixel(bpp);
* parser->SetLineLengthBytes(pitch);
* parser->SetNumLines(2);
*
* Note 1: if you don't know how many lines there are, the size of the input
* buffer is used as a limit instead.
*
* Note 2: if you don't know the line length, you can leave the line length unset
* (or set to zero) and the parser will hunt for the line start instead.
*
* Then on every frame:
*
* RegisterMap registers;
* if (parser->Parse(buffer, registers) != MdParser::OK)
* much badness;
* Metadata metadata;
* CamHelper::PopulateMetadata(registers, metadata);
*
* (Note that the CamHelper class converts to/from exposure lines and time,
* and gain_code / actual gain.)
*
* If you suspect your embedded data may have changed its layout, change any line
* lengths, number of lines, bits per pixel etc. that are different, and
* then:
*
* parser->Reset();
*
* before calling Parse again.
*/
namespace RPiController {
/* Abstract base class from which other metadata parsers are derived. */
class MdParser
{
public:
using RegisterMap = std::map<uint32_t, uint32_t>;
/*
* Parser status codes:
* OK - success
* NOTFOUND - value such as exposure or gain was not found
* ERROR - all other errors
*/
enum Status {
OK = 0,
NOTFOUND = 1,
ERROR = 2
};
MdParser()
: reset_(true), bitsPerPixel_(0), numLines_(0), lineLengthBytes_(0)
{
}
virtual ~MdParser() = default;
void reset()
{
reset_ = true;
}
void setBitsPerPixel(int bpp)
{
bitsPerPixel_ = bpp;
}
void setNumLines(unsigned int numLines)
{
numLines_ = numLines;
}
void setLineLengthBytes(unsigned int numBytes)
{
lineLengthBytes_ = numBytes;
}
virtual Status parse(libcamera::Span<const uint8_t> buffer,
RegisterMap ®isters) = 0;
protected:
bool reset_;
int bitsPerPixel_;
unsigned int numLines_;
unsigned int lineLengthBytes_;
};
/*
* This isn't a full implementation of a metadata parser for SMIA sensors,
* however, it does provide the findRegs function which will prove useful and
* make it easier to implement parsers for other SMIA-like sensors (see
* md_parser_imx219.cpp for an example).
*/
class MdParserSmia final : public MdParser
{
public:
MdParserSmia(std::initializer_list<uint32_t> registerList);
MdParser::Status parse(libcamera::Span<const uint8_t> buffer,
RegisterMap ®isters) override;
private:
/* Maps register address to offset in the buffer. */
using OffsetMap = std::map<uint32_t, std::optional<uint32_t>>;
/*
* Note that error codes > 0 are regarded as non-fatal; codes < 0
* indicate a bad data buffer. Status codes are:
* ParseOk - found all registers, much happiness
* MissingRegs - some registers found; should this be a hard error?
* The remaining codes are all hard errors.
*/
enum ParseStatus {
ParseOk = 0,
MissingRegs = 1,
NoLineStart = -1,
IllegalTag = -2,
BadDummy = -3,
BadLineEnd = -4,
BadPadding = -5
};
ParseStatus findRegs(libcamera::Span<const uint8_t> buffer);
OffsetMap offsets_;
};
} /* namespace RPi */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
* camera helper for imx477 sensor
*/
#include <algorithm>
#include <assert.h>
#include <cmath>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <libcamera/base/log.h>
#include "cam_helper.h"
#include "md_parser.h"
using namespace RPiController;
using namespace libcamera;
using libcamera::utils::Duration;
namespace libcamera {
LOG_DECLARE_CATEGORY(IPARPI)
}
/*
* We care about two gain registers and a pair of exposure registers. Their
* I2C addresses from the Sony IMX477 datasheet:
*/
constexpr uint32_t expHiReg = 0x0202;
constexpr uint32_t expLoReg = 0x0203;
constexpr uint32_t gainHiReg = 0x0204;
constexpr uint32_t gainLoReg = 0x0205;
constexpr uint32_t frameLengthHiReg = 0x0340;
constexpr uint32_t frameLengthLoReg = 0x0341;
constexpr uint32_t lineLengthHiReg = 0x0342;
constexpr uint32_t lineLengthLoReg = 0x0343;
constexpr uint32_t temperatureReg = 0x013a;
constexpr std::initializer_list<uint32_t> registerList =
{ expHiReg, expLoReg, gainHiReg, gainLoReg, frameLengthHiReg, frameLengthLoReg,
lineLengthHiReg, lineLengthLoReg, temperatureReg };
class CamHelperImx477 : public CamHelper
{
public:
CamHelperImx477();
uint32_t gainCode(double gain) const override;
double gain(uint32_t gainCode) const override;
void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration,
Duration maxFrameDuration) const override;
void getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const override;
bool sensorEmbeddedDataPresent() const override;
private:
/*
* Smallest difference between the frame length and integration time,
* in units of lines.
*/
static constexpr int frameIntegrationDiff = 22;
/* Maximum frame length allowable for long exposure calculations. */
static constexpr int frameLengthMax = 0xffdc;
/* Largest long exposure scale factor given as a left shift on the frame length. */
static constexpr int longExposureShiftMax = 7;
void populateMetadata(const MdParser::RegisterMap ®isters,
Metadata &metadata) const override;
};
CamHelperImx477::CamHelperImx477()
: CamHelper(std::make_unique<MdParserSmia>(registerList), frameIntegrationDiff)
{
}
uint32_t CamHelperImx477::gainCode(double gain) const
{
return static_cast<uint32_t>(1024 - 1024 / gain);
}
double CamHelperImx477::gain(uint32_t gainCode) const
{
return 1024.0 / (1024 - gainCode);
}
void CamHelperImx477::prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
{
MdParser::RegisterMap registers;
DeviceStatus deviceStatus;
if (metadata.get("device.status", deviceStatus)) {
LOG(IPARPI, Error) << "DeviceStatus not found from DelayedControls";
return;
}
parseEmbeddedData(buffer, metadata);
/*
* The DeviceStatus struct is first populated with values obtained from
* DelayedControls. If this reports frame length is > frameLengthMax,
* it means we are using a long exposure mode. Since the long exposure
* scale factor is not returned back through embedded data, we must rely
* on the existing exposure lines and frame length values returned by
* DelayedControls.
*
* Otherwise, all values are updated with what is reported in the
* embedded data.
*/
if (deviceStatus.frameLength > frameLengthMax) {
DeviceStatus parsedDeviceStatus;
metadata.get("device.status", parsedDeviceStatus);
parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed;
parsedDeviceStatus.frameLength = deviceStatus.frameLength;
metadata.set("device.status", parsedDeviceStatus);
LOG(IPARPI, Debug) << "Metadata updated for long exposure: "
<< parsedDeviceStatus;
}
}
std::pair<uint32_t, uint32_t> CamHelperImx477::getBlanking(Duration &exposure,
Duration minFrameDuration,
Duration maxFrameDuration) const
{
uint32_t frameLength, exposureLines;
unsigned int shift = 0;
auto [vblank, hblank] = CamHelper::getBlanking(exposure, minFrameDuration,
maxFrameDuration);
frameLength = mode_.height + vblank;
Duration lineLength = hblankToLineLength(hblank);
/*
* Check if the frame length calculated needs to be setup for long
* exposure mode. This will require us to use a long exposure scale
* factor provided by a shift operation in the sensor.
*/
while (frameLength > frameLengthMax) {
if (++shift > longExposureShiftMax) {
shift = longExposureShiftMax;
frameLength = frameLengthMax;
break;
}
frameLength >>= 1;
}
if (shift) {
/* Account for any rounding in the scaled frame length value. */
frameLength <<= shift;
exposureLines = CamHelperImx477::exposureLines(exposure, lineLength);
exposureLines = std::min(exposureLines, frameLength - frameIntegrationDiff);
exposure = CamHelperImx477::exposure(exposureLines, lineLength);
}
return { frameLength - mode_.height, hblank };
}
void CamHelperImx477::getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const
{
exposureDelay = 2;
gainDelay = 2;
vblankDelay = 3;
hblankDelay = 3;
}
bool CamHelperImx477::sensorEmbeddedDataPresent() const
{
return true;
}
void CamHelperImx477::populateMetadata(const MdParser::RegisterMap ®isters,
Metadata &metadata) const
{
DeviceStatus deviceStatus;
deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
registers.at(lineLengthLoReg));
deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
deviceStatus.lineLength);
deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
deviceStatus.sensorTemperature = std::clamp<int8_t>(registers.at(temperatureReg), -20, 80);
metadata.set("device.status", deviceStatus);
}
static CamHelper *create()
{
return new CamHelperImx477();
}
static RegisterCamHelper reg("imx477", &create);
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/cam_helper/cam_helper.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* helper information for different sensors
*/
#include <linux/videodev2.h>
#include <limits>
#include <map>
#include <string.h>
#include "libcamera/internal/v4l2_videodevice.h"
#include "cam_helper.h"
#include "md_parser.h"
using namespace RPiController;
using namespace libcamera;
using libcamera::utils::Duration;
using namespace std::literals::chrono_literals;
namespace libcamera {
LOG_DECLARE_CATEGORY(IPARPI)
}
namespace {
std::map<std::string, CamHelperCreateFunc> &camHelpers()
{
static std::map<std::string, CamHelperCreateFunc> helpers;
return helpers;
}
} /* namespace */
CamHelper *CamHelper::create(std::string const &camName)
{
/*
* CamHelpers get registered by static RegisterCamHelper
* initialisers.
*/
for (auto &p : camHelpers()) {
if (camName.find(p.first) != std::string::npos)
return p.second();
}
return nullptr;
}
CamHelper::CamHelper(std::unique_ptr<MdParser> parser, unsigned int frameIntegrationDiff)
: parser_(std::move(parser)), frameIntegrationDiff_(frameIntegrationDiff)
{
}
CamHelper::~CamHelper()
{
}
void CamHelper::prepare(Span<const uint8_t> buffer,
Metadata &metadata)
{
parseEmbeddedData(buffer, metadata);
}
void CamHelper::process([[maybe_unused]] StatisticsPtr &stats,
[[maybe_unused]] Metadata &metadata)
{
}
uint32_t CamHelper::exposureLines(const Duration exposure, const Duration lineLength) const
{
return exposure / lineLength;
}
Duration CamHelper::exposure(uint32_t exposureLines, const Duration lineLength) const
{
return exposureLines * lineLength;
}
std::pair<uint32_t, uint32_t> CamHelper::getBlanking(Duration &exposure,
Duration minFrameDuration,
Duration maxFrameDuration) const
{
uint32_t frameLengthMin, frameLengthMax, vblank, hblank;
Duration lineLength = mode_.minLineLength;
/*
* minFrameDuration and maxFrameDuration are clamped by the caller
* based on the limits for the active sensor mode.
*
* frameLengthMax gets calculated on the smallest line length as we do
* not want to extend that unless absolutely necessary.
*/
frameLengthMin = minFrameDuration / mode_.minLineLength;
frameLengthMax = maxFrameDuration / mode_.minLineLength;
/*
* Watch out for (exposureLines + frameIntegrationDiff_) overflowing a
* uint32_t in the std::clamp() below when the exposure time is
* extremely (extremely!) long - as happens when the IPA calculates the
* maximum possible exposure time.
*/
uint32_t exposureLines = std::min(CamHelper::exposureLines(exposure, lineLength),
std::numeric_limits<uint32_t>::max() - frameIntegrationDiff_);
uint32_t frameLengthLines = std::clamp(exposureLines + frameIntegrationDiff_,
frameLengthMin, frameLengthMax);
/*
* If our frame length lines is above the maximum allowed, see if we can
* extend the line length to accommodate the requested frame length.
*/
if (frameLengthLines > mode_.maxFrameLength) {
Duration lineLengthAdjusted = lineLength * frameLengthLines / mode_.maxFrameLength;
lineLength = std::min(mode_.maxLineLength, lineLengthAdjusted);
frameLengthLines = mode_.maxFrameLength;
}
hblank = lineLengthToHblank(lineLength);
vblank = frameLengthLines - mode_.height;
/*
* Limit the exposure to the maximum frame duration requested, and
* re-calculate if it has been clipped.
*/
exposureLines = std::min(frameLengthLines - frameIntegrationDiff_,
CamHelper::exposureLines(exposure, lineLength));
exposure = CamHelper::exposure(exposureLines, lineLength);
return { vblank, hblank };
}
Duration CamHelper::hblankToLineLength(uint32_t hblank) const
{
return (mode_.width + hblank) * (1.0s / mode_.pixelRate);
}
uint32_t CamHelper::lineLengthToHblank(const Duration &lineLength) const
{
return (lineLength * mode_.pixelRate / 1.0s) - mode_.width;
}
Duration CamHelper::lineLengthPckToDuration(uint32_t lineLengthPck) const
{
return lineLengthPck * (1.0s / mode_.pixelRate);
}
void CamHelper::setCameraMode(const CameraMode &mode)
{
mode_ = mode;
if (parser_) {
parser_->reset();
parser_->setBitsPerPixel(mode.bitdepth);
parser_->setLineLengthBytes(0); /* We use SetBufferSize. */
}
}
void CamHelper::getDelays(int &exposureDelay, int &gainDelay,
int &vblankDelay, int &hblankDelay) const
{
/*
* These values are correct for many sensors. Other sensors will
* need to over-ride this function.
*/
exposureDelay = 2;
gainDelay = 1;
vblankDelay = 2;
hblankDelay = 2;
}
bool CamHelper::sensorEmbeddedDataPresent() const
{
return false;
}
double CamHelper::getModeSensitivity([[maybe_unused]] const CameraMode &mode) const
{
/*
* Most sensors have the same sensitivity in every mode, but this
* function can be overridden for those that do not. Note that it is
* called before mode_ is set, so it must return the sensitivity
* of the mode that is passed in.
*/
return 1.0;
}
unsigned int CamHelper::hideFramesStartup() const
{
/*
* The number of frames when a camera first starts that shouldn't be
* displayed as they are invalid in some way.
*/
return 0;
}
unsigned int CamHelper::hideFramesModeSwitch() const
{
/* After a mode switch, many sensors return valid frames immediately. */
return 0;
}
unsigned int CamHelper::mistrustFramesStartup() const
{
/* Many sensors return a single bad frame on start-up. */
return 1;
}
unsigned int CamHelper::mistrustFramesModeSwitch() const
{
/* Many sensors return valid metadata immediately. */
return 0;
}
void CamHelper::parseEmbeddedData(Span<const uint8_t> buffer,
Metadata &metadata)
{
MdParser::RegisterMap registers;
Metadata parsedMetadata;
if (buffer.empty())
return;
if (parser_->parse(buffer, registers) != MdParser::Status::OK) {
LOG(IPARPI, Error) << "Embedded data buffer parsing failed";
return;
}
populateMetadata(registers, parsedMetadata);
metadata.merge(parsedMetadata);
/*
* Overwrite the exposure/gain, line/frame length and sensor temperature values
* in the existing DeviceStatus with values from the parsed embedded buffer.
* Fetch it first in case any other fields were set meaningfully.
*/
DeviceStatus deviceStatus, parsedDeviceStatus;
if (metadata.get("device.status", deviceStatus) ||
parsedMetadata.get("device.status", parsedDeviceStatus)) {
LOG(IPARPI, Error) << "DeviceStatus not found";
return;
}
deviceStatus.shutterSpeed = parsedDeviceStatus.shutterSpeed;
deviceStatus.analogueGain = parsedDeviceStatus.analogueGain;
deviceStatus.frameLength = parsedDeviceStatus.frameLength;
deviceStatus.lineLength = parsedDeviceStatus.lineLength;
if (parsedDeviceStatus.sensorTemperature)
deviceStatus.sensorTemperature = parsedDeviceStatus.sensorTemperature;
LOG(IPARPI, Debug) << "Metadata updated - " << deviceStatus;
metadata.set("device.status", deviceStatus);
}
void CamHelper::populateMetadata([[maybe_unused]] const MdParser::RegisterMap ®isters,
[[maybe_unused]] Metadata &metadata) const
{
}
RegisterCamHelper::RegisterCamHelper(char const *camName,
CamHelperCreateFunc createFunc)
{
camHelpers()[std::string(camName)] = createFunc;
}
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/black_level_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* black level control algorithm status
*/
#pragma once
/* The "black level" algorithm stores the black levels to use. */
struct BlackLevelStatus {
uint16_t blackLevelR; /* out of 16 bits */
uint16_t blackLevelG;
uint16_t blackLevelB;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/lux_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* Lux control algorithm status
*/
#pragma once
/*
* The "lux" algorithm looks at the (AGC) histogram statistics of the frame and
* estimates the current lux level of the scene. It does this by a simple ratio
* calculation comparing to a reference image that was taken in known conditions
* with known statistics and a properly measured lux level. There is a slight
* problem with aperture, in that it may be variable without the system knowing
* or being aware of it. In this case an external application may set a
* "current_aperture" value if it wishes, which would be used in place of the
* (presumably meaningless) value in the image metadata.
*/
struct LuxStatus {
double lux;
double aperture;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/sharpen_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* Sharpen control algorithm status
*/
#pragma once
/* The "sharpen" algorithm stores the strength to use. */
struct SharpenStatus {
/* controls the smallest level of detail (or noise!) that sharpening will pick up */
double threshold;
/* the rate at which the sharpening response ramps once above the threshold */
double strength;
/* upper limit of the allowed sharpening response */
double limit;
/* The sharpening strength requested by the user or application. */
double userStrength;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/ccm_algorithm.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* CCM (colour correction matrix) control algorithm interface
*/
#pragma once
#include "algorithm.h"
namespace RPiController {
class CcmAlgorithm : public Algorithm
{
public:
CcmAlgorithm(Controller *controller) : Algorithm(controller) {}
/* A CCM algorithm must provide the following: */
virtual void setSaturation(double saturation) = 0;
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/agc_algorithm.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* AGC/AEC control algorithm interface
*/
#pragma once
#include <vector>
#include <libcamera/base/utils.h>
#include "algorithm.h"
namespace RPiController {
class AgcAlgorithm : public Algorithm
{
public:
AgcAlgorithm(Controller *controller) : Algorithm(controller) {}
/* An AGC algorithm must provide the following: */
virtual unsigned int getConvergenceFrames() const = 0;
virtual std::vector<double> const &getWeights() const = 0;
virtual void setEv(unsigned int channel, double ev) = 0;
virtual void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) = 0;
virtual void setFixedShutter(unsigned int channel,
libcamera::utils::Duration fixedShutter) = 0;
virtual void setMaxShutter(libcamera::utils::Duration maxShutter) = 0;
virtual void setFixedAnalogueGain(unsigned int channel, double fixedAnalogueGain) = 0;
virtual void setMeteringMode(std::string const &meteringModeName) = 0;
virtual void setExposureMode(std::string const &exposureModeName) = 0;
virtual void setConstraintMode(std::string const &contraintModeName) = 0;
virtual void enableAuto() = 0;
virtual void disableAuto() = 0;
virtual void setActiveChannels(const std::vector<unsigned int> &activeChannels) = 0;
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/hdr_algorithm.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2023, Raspberry Pi Ltd
*
* HDR control algorithm interface
*/
#pragma once
#include <vector>
#include "algorithm.h"
namespace RPiController {
class HdrAlgorithm : public Algorithm
{
public:
HdrAlgorithm(Controller *controller)
: Algorithm(controller) {}
/* An HDR algorithm must provide the following: */
virtual int setMode(std::string const &modeName) = 0;
virtual std::vector<unsigned int> getChannels() const = 0;
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/black_level_algorithm.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2023, Raspberry Pi Ltd
*
* black level control algorithm interface
*/
#pragma once
#include "algorithm.h"
namespace RPiController {
class BlackLevelAlgorithm : public Algorithm
{
public:
BlackLevelAlgorithm(Controller *controller)
: Algorithm(controller) {}
/* A black level algorithm must provide the following: */
virtual void initialValues(uint16_t &blackLevelR, uint16_t &blackLevelG,
uint16_t &blackLevelB) = 0;
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/stitch_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2023 Raspberry Pi Ltd
*
* stitch control algorithm status
*/
#pragma once
/*
* Parameters for the stitch block.
*/
struct StitchStatus {
uint16_t thresholdLo;
uint8_t diffPower;
double motionThreshold;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/agc_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* AGC/AEC control algorithm status
*/
#pragma once
#include <string>
#include <libcamera/base/utils.h>
#include "hdr_status.h"
/*
* The AGC algorithm process method should post an AgcStatus into the image
* metadata under the tag "agc.status".
* The AGC algorithm prepare method should post an AgcPrepareStatus instead
* under "agc.prepare_status".
*/
/*
* Note: total_exposure_value will be reported as zero until the algorithm has
* seen statistics and calculated meaningful values. The contents should be
* ignored until then.
*/
struct AgcStatus {
libcamera::utils::Duration totalExposureValue; /* value for all exposure and gain for this image */
libcamera::utils::Duration targetExposureValue; /* (unfiltered) target total exposure AGC is aiming for */
libcamera::utils::Duration shutterTime;
double analogueGain;
std::string exposureMode;
std::string constraintMode;
std::string meteringMode;
double ev;
libcamera::utils::Duration flickerPeriod;
int floatingRegionEnable;
libcamera::utils::Duration fixedShutter;
double fixedAnalogueGain;
unsigned int channel;
HdrStatus hdr;
};
struct AgcPrepareStatus {
double digitalGain;
int locked;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/af_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2022, Raspberry Pi Ltd
*
* AF control algorithm status
*/
#pragma once
#include <optional>
/*
* The AF algorithm should post the following structure into the image's
* "af.status" metadata. lensSetting should control the lens.
*/
enum class AfState {
Idle = 0,
Scanning,
Focused,
Failed
};
enum class AfPauseState {
Running = 0,
Pausing,
Paused
};
struct AfStatus {
/* state for reporting */
AfState state;
AfPauseState pauseState;
/* lensSetting should be sent to the lens driver, when valid */
std::optional<int> lensSetting;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/histogram.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* histogram calculation interface
*/
#pragma once
#include <stdint.h>
#include <vector>
#include <cassert>
/*
* A simple histogram class, for use in particular to find "quantiles" and
* averages between "quantiles".
*/
namespace RPiController {
class Histogram
{
public:
Histogram()
{
cumulative_.push_back(0);
}
template<typename T> Histogram(T *histogram, int num)
{
assert(num);
cumulative_.reserve(num + 1);
cumulative_.push_back(0);
for (int i = 0; i < num; i++)
cumulative_.push_back(cumulative_.back() +
histogram[i]);
}
uint32_t bins() const { return cumulative_.size() - 1; }
uint64_t total() const { return cumulative_[cumulative_.size() - 1]; }
/* Cumulative frequency up to a (fractional) point in a bin. */
uint64_t cumulativeFreq(double bin) const;
/* Return the mean value between two (fractional) bins. */
double interBinMean(double binLo, double binHi) const;
/*
* Return the (fractional) bin of the point q (0 <= q <= 1) through the
* histogram. Optionally provide limits to help.
*/
double quantile(double q, int first = -1, int last = -1) const;
/* Return the average histogram bin value between the two quantiles. */
double interQuantileMean(double qLo, double qHi) const;
private:
std::vector<uint64_t> cumulative_;
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/controller.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* ISP controller
*/
#include <assert.h>
#include <libcamera/base/file.h>
#include <libcamera/base/log.h>
#include "libcamera/internal/yaml_parser.h"
#include "algorithm.h"
#include "controller.h"
using namespace RPiController;
using namespace libcamera;
using namespace std::literals::chrono_literals;
LOG_DEFINE_CATEGORY(RPiController)
static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap = {
{
"bcm2835",
{
/*
* There are only ever 15 AGC regions computed by the firmware
* due to zoning, but the HW defines AGC_REGIONS == 16!
*/
.agcRegions = { 15 , 1 },
.agcZoneWeights = { 15 , 1 },
.awbRegions = { 16, 12 },
.cacRegions = { 0, 0 },
.focusRegions = { 4, 3 },
.numHistogramBins = 128,
.numGammaPoints = 33,
.pipelineWidth = 13,
.statsInline = false,
.minPixelProcessingTime = 0s,
}
},
{
"pisp",
{
.agcRegions = { 0, 0 },
.agcZoneWeights = { 15, 15 },
.awbRegions = { 32, 32 },
.cacRegions = { 8, 8 },
.focusRegions = { 8, 8 },
.numHistogramBins = 1024,
.numGammaPoints = 64,
.pipelineWidth = 16,
.statsInline = true,
/*
* The constraint below is on the rate of pixels going
* from CSI2 peripheral to ISP-FE (400Mpix/s, plus tiny
* overheads per scanline, for which 380Mpix/s is a
* conservative bound).
*
* There is a 64kbit data FIFO before the bottleneck,
* which means that in all reasonable cases the
* constraint applies at a timescale >= 1 scanline, so
* adding horizontal blanking can prevent loss.
*
* If the backlog were to grow beyond 64kbit during a
* single scanline, there could still be loss. This
* could happen using 4 lanes at 1.5Gbps at 10bpp with
* frames wider than ~16,000 pixels.
*/
.minPixelProcessingTime = 1.0us / 380,
}
},
};
Controller::Controller()
: switchModeCalled_(false)
{
}
Controller::~Controller() {}
int Controller::read(char const *filename)
{
File file(filename);
if (!file.open(File::OpenModeFlag::ReadOnly)) {
LOG(RPiController, Warning)
<< "Failed to open tuning file '" << filename << "'";
return -EINVAL;
}
std::unique_ptr<YamlObject> root = YamlParser::parse(file);
if (!root)
return -EINVAL;
double version = (*root)["version"].get<double>(1.0);
target_ = (*root)["target"].get<std::string>("bcm2835");
if (version < 2.0) {
LOG(RPiController, Warning)
<< "This format of the tuning file will be deprecated soon!"
<< " Please use the convert_tuning.py utility to update to version 2.0.";
for (auto const &[key, value] : root->asDict()) {
int ret = createAlgorithm(key, value);
if (ret)
return ret;
}
} else if (version < 3.0) {
if (!root->contains("algorithms")) {
LOG(RPiController, Error)
<< "Tuning file " << filename
<< " does not have an \"algorithms\" list!";
return -EINVAL;
}
for (auto const &rootAlgo : (*root)["algorithms"].asList())
for (auto const &[key, value] : rootAlgo.asDict()) {
int ret = createAlgorithm(key, value);
if (ret)
return ret;
}
} else {
LOG(RPiController, Error)
<< "Unrecognised version " << version
<< " for the tuning file " << filename;
return -EINVAL;
}
return 0;
}
int Controller::createAlgorithm(const std::string &name, const YamlObject ¶ms)
{
auto it = getAlgorithms().find(name);
if (it == getAlgorithms().end()) {
LOG(RPiController, Warning)
<< "No algorithm found for \"" << name << "\"";
return 0;
}
Algorithm *algo = (*it->second)(this);
int ret = algo->read(params);
if (ret)
return ret;
algorithms_.push_back(AlgorithmPtr(algo));
return 0;
}
void Controller::initialise()
{
for (auto &algo : algorithms_)
algo->initialise();
}
void Controller::switchMode(CameraMode const &cameraMode, Metadata *metadata)
{
for (auto &algo : algorithms_)
algo->switchMode(cameraMode, metadata);
switchModeCalled_ = true;
}
void Controller::prepare(Metadata *imageMetadata)
{
assert(switchModeCalled_);
for (auto &algo : algorithms_)
algo->prepare(imageMetadata);
}
void Controller::process(StatisticsPtr stats, Metadata *imageMetadata)
{
assert(switchModeCalled_);
for (auto &algo : algorithms_)
algo->process(stats, imageMetadata);
}
Metadata &Controller::getGlobalMetadata()
{
return globalMetadata_;
}
Algorithm *Controller::getAlgorithm(std::string const &name) const
{
/*
* The passed name must be the entire algorithm name, or must match the
* last part of it with a period (.) just before.
*/
size_t nameLen = name.length();
for (auto &algo : algorithms_) {
char const *algoName = algo->name();
size_t algoNameLen = strlen(algoName);
if (algoNameLen >= nameLen &&
strcasecmp(name.c_str(),
algoName + algoNameLen - nameLen) == 0 &&
(nameLen == algoNameLen ||
algoName[algoNameLen - nameLen - 1] == '.'))
return algo.get();
}
return nullptr;
}
const std::string &Controller::getTarget() const
{
return target_;
}
const Controller::HardwareConfig &Controller::getHardwareConfig() const
{
auto cfg = HardwareConfigMap.find(getTarget());
/*
* This really should not happen, the IPA ought to validate the target
* on initialisation.
*/
ASSERT(cfg != HardwareConfigMap.end());
return cfg->second;
}
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/denoise_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019-2021, Raspberry Pi Ltd
*
* Denoise control algorithm status
*/
#pragma once
/* This stores the parameters required for Denoise. */
struct DenoiseStatus {
double noiseConstant;
double noiseSlope;
double strength;
unsigned int mode;
};
struct SdnStatus {
double noiseConstant;
double noiseSlope;
double noiseConstant2;
double noiseSlope2;
double strength;
};
struct CdnStatus {
double strength;
double threshold;
};
struct TdnStatus {
double noiseConstant;
double noiseSlope;
double threshold;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/histogram.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* histogram calculations
*/
#include <math.h>
#include <stdio.h>
#include "histogram.h"
using namespace RPiController;
uint64_t Histogram::cumulativeFreq(double bin) const
{
if (bin <= 0)
return 0;
else if (bin >= bins())
return total();
int b = (int)bin;
return cumulative_[b] +
(bin - b) * (cumulative_[b + 1] - cumulative_[b]);
}
double Histogram::quantile(double q, int first, int last) const
{
if (first == -1)
first = 0;
if (last == -1)
last = cumulative_.size() - 2;
assert(first <= last);
uint64_t items = q * total();
while (first < last) /* binary search to find the right bin */
{
int middle = (first + last) / 2;
if (cumulative_[middle + 1] > items)
last = middle; /* between first and middle */
else
first = middle + 1; /* after middle */
}
assert(items >= cumulative_[first] && items <= cumulative_[last + 1]);
double frac = cumulative_[first + 1] == cumulative_[first] ? 0
: (double)(items - cumulative_[first]) /
(cumulative_[first + 1] - cumulative_[first]);
return first + frac;
}
double Histogram::interBinMean(double binLo, double binHi) const
{
assert(binHi >= binLo);
double sumBinFreq = 0, cumulFreq = 0;
for (double binNext = floor(binLo) + 1.0; binNext <= ceil(binHi);
binLo = binNext, binNext += 1.0) {
int bin = floor(binLo);
double freq = (cumulative_[bin + 1] - cumulative_[bin]) *
(std::min(binNext, binHi) - binLo);
sumBinFreq += bin * freq;
cumulFreq += freq;
}
if (cumulFreq == 0) {
/* interval had zero width or contained no weight? */
return binHi;
}
/* add 0.5 to give an average for bin mid-points */
return sumBinFreq / cumulFreq + 0.5;
}
double Histogram::interQuantileMean(double qLo, double qHi) const
{
assert(qHi >= qLo);
double pLo = quantile(qLo);
double pHi = quantile(qHi, (int)pLo);
return interBinMean(pLo, pHi);
}
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/dpc_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* DPC (defective pixel correction) control algorithm status
*/
#pragma once
/* The "DPC" algorithm sets defective pixel correction strength. */
struct DpcStatus {
int strength; /* 0 = "off", 1 = "normal", 2 = "strong" */
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/alsc_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* ALSC (auto lens shading correction) control algorithm status
*/
#pragma once
#include <vector>
/*
* The ALSC algorithm should post the following structure into the image's
* "alsc.status" metadata.
*/
struct AlscStatus {
std::vector<double> r;
std::vector<double> g;
std::vector<double> b;
unsigned int rows;
unsigned int cols;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/noise_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* Noise control algorithm status
*/
#pragma once
/* The "noise" algorithm stores an estimate of the noise profile for this image. */
struct NoiseStatus {
double noiseConstant;
double noiseSlope;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/region_stats.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2022, Raspberry Pi Ltd
*
* Raspberry Pi region based statistics container
*/
#pragma once
#include <array>
#include <stdint.h>
#include <vector>
#include <libcamera/geometry.h>
namespace RPiController {
template<typename T>
class RegionStats
{
public:
struct Region {
T val;
uint32_t counted;
uint32_t uncounted;
};
RegionStats()
: size_({}), numFloating_(0), default_({})
{
}
void init(const libcamera::Size &size, unsigned int numFloating = 0)
{
size_ = size;
numFloating_ = numFloating;
regions_.clear();
regions_.resize(size_.width * size_.height + numFloating_);
}
void init(unsigned int num)
{
size_ = libcamera::Size(num, 1);
numFloating_ = 0;
regions_.clear();
regions_.resize(num);
}
unsigned int numRegions() const
{
return size_.width * size_.height;
}
unsigned int numFloatingRegions() const
{
return numFloating_;
}
libcamera::Size size() const
{
return size_;
}
void set(unsigned int index, const Region ®ion)
{
if (index >= numRegions())
return;
set_(index, region);
}
void set(const libcamera::Point &pos, const Region ®ion)
{
set(pos.y * size_.width + pos.x, region);
}
void setFloating(unsigned int index, const Region ®ion)
{
if (index >= numFloatingRegions())
return;
set(numRegions() + index, region);
}
const Region &get(unsigned int index) const
{
if (index >= numRegions())
return default_;
return get_(index);
}
const Region &get(const libcamera::Point &pos) const
{
return get(pos.y * size_.width + pos.x);
}
const Region &getFloating(unsigned int index) const
{
if (index >= numFloatingRegions())
return default_;
return get_(numRegions() + index);
}
typename std::vector<Region>::iterator begin() { return regions_.begin(); }
typename std::vector<Region>::iterator end() { return regions_.end(); }
typename std::vector<Region>::const_iterator begin() const { return regions_.begin(); }
typename std::vector<Region>::const_iterator end() const { return regions_.end(); }
private:
void set_(unsigned int index, const Region ®ion)
{
regions_[index] = region;
}
const Region &get_(unsigned int index) const
{
return regions_[index];
}
libcamera::Size size_;
unsigned int numFloating_;
std::vector<Region> regions_;
Region default_;
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/algorithm.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* ISP control algorithm interface
*/
#pragma once
/*
* All algorithms should be derived from this class and made available to the
* Controller.
*/
#include <string>
#include <memory>
#include <map>
#include "libcamera/internal/yaml_parser.h"
#include "controller.h"
namespace RPiController {
/* This defines the basic interface for all control algorithms. */
class Algorithm
{
public:
Algorithm(Controller *controller)
: controller_(controller)
{
}
virtual ~Algorithm() = default;
virtual char const *name() const = 0;
virtual int read(const libcamera::YamlObject ¶ms);
virtual void initialise();
virtual void switchMode(CameraMode const &cameraMode, Metadata *metadata);
virtual void prepare(Metadata *imageMetadata);
virtual void process(StatisticsPtr &stats, Metadata *imageMetadata);
Metadata &getGlobalMetadata() const
{
return controller_->getGlobalMetadata();
}
const std::string &getTarget() const
{
return controller_->getTarget();
}
const Controller::HardwareConfig &getHardwareConfig() const
{
return controller_->getHardwareConfig();
}
private:
Controller *controller_;
};
/*
* This code is for automatic registration of Front End algorithms with the
* system.
*/
typedef Algorithm *(*AlgoCreateFunc)(Controller *controller);
struct RegisterAlgorithm {
RegisterAlgorithm(char const *name, AlgoCreateFunc createFunc);
};
std::map<std::string, AlgoCreateFunc> const &getAlgorithms();
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/controller.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* ISP controller interface
*/
#pragma once
/*
* The Controller is simply a container for a collecting together a number of
* "control algorithms" (such as AWB etc.) and for running them all in a
* convenient manner.
*/
#include <vector>
#include <string>
#include <libcamera/base/utils.h>
#include "libcamera/internal/yaml_parser.h"
#include "camera_mode.h"
#include "device_status.h"
#include "metadata.h"
#include "statistics.h"
namespace RPiController {
class Algorithm;
typedef std::unique_ptr<Algorithm> AlgorithmPtr;
/*
* The Controller holds a pointer to some global_metadata, which is how
* different controllers and control algorithms within them can exchange
* information. The Prepare function returns a pointer to metadata for this
* specific image, and which should be passed on to the Process function.
*/
class Controller
{
public:
struct HardwareConfig {
libcamera::Size agcRegions;
libcamera::Size agcZoneWeights;
libcamera::Size awbRegions;
libcamera::Size cacRegions;
libcamera::Size focusRegions;
unsigned int numHistogramBins;
unsigned int numGammaPoints;
unsigned int pipelineWidth;
bool statsInline;
libcamera::utils::Duration minPixelProcessingTime;
};
Controller();
~Controller();
int read(char const *filename);
void initialise();
void switchMode(CameraMode const &cameraMode, Metadata *metadata);
void prepare(Metadata *imageMetadata);
void process(StatisticsPtr stats, Metadata *imageMetadata);
Metadata &getGlobalMetadata();
Algorithm *getAlgorithm(std::string const &name) const;
const std::string &getTarget() const;
const HardwareConfig &getHardwareConfig() const;
protected:
int createAlgorithm(const std::string &name, const libcamera::YamlObject ¶ms);
Metadata globalMetadata_;
std::vector<AlgorithmPtr> algorithms_;
bool switchModeCalled_;
private:
std::string target_;
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/device_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019-2021, Raspberry Pi Ltd
*
* device (image sensor) status
*/
#pragma once
#include <iostream>
#include <optional>
#include <libcamera/base/utils.h>
/*
* Definition of "device metadata" which stores things like shutter time and
* analogue gain that downstream control algorithms will want to know.
*/
struct DeviceStatus {
DeviceStatus()
: shutterSpeed(std::chrono::seconds(0)), frameLength(0),
lineLength(std::chrono::seconds(0)), analogueGain(0.0)
{
}
friend std::ostream &operator<<(std::ostream &out, const DeviceStatus &d);
/* time shutter is open */
libcamera::utils::Duration shutterSpeed;
/* frame length given in number of lines */
uint32_t frameLength;
/* line length for the current frame */
libcamera::utils::Duration lineLength;
double analogueGain;
/* 1.0/distance-in-metres */
std::optional<double> lensPosition;
/* 1/f so that brightness quadruples when this doubles */
std::optional<double> aperture;
/* proportional to brightness with 0 = no flash, 1 = maximum flash */
std::optional<double> flashIntensity;
/* Sensor reported temperature value (in degrees) */
std::optional<double> sensorTemperature;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/contrast_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* contrast (gamma) control algorithm status
*/
#pragma once
#include "libipa/pwl.h"
/*
* The "contrast" algorithm creates a gamma curve, optionally doing a little bit
* of contrast stretching based on the AGC histogram.
*/
struct ContrastStatus {
libcamera::ipa::Pwl gammaCurve;
double brightness;
double contrast;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/sharpen_algorithm.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
* sharpness control algorithm interface
*/
#pragma once
#include "algorithm.h"
namespace RPiController {
class SharpenAlgorithm : public Algorithm
{
public:
SharpenAlgorithm(Controller *controller) : Algorithm(controller) {}
/* A sharpness control algorithm must provide the following: */
virtual void setStrength(double strength) = 0;
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/denoise_algorithm.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2021, Raspberry Pi Ltd
*
* Denoise control algorithm interface
*/
#pragma once
#include <string>
#include "algorithm.h"
namespace RPiController {
enum class DenoiseMode { Off, ColourOff, ColourFast, ColourHighQuality };
class DenoiseAlgorithm : public Algorithm
{
public:
DenoiseAlgorithm(Controller *controller) : Algorithm(controller) {}
/* A Denoise algorithm must provide the following: */
virtual void setMode(DenoiseMode mode) = 0;
/* Some platforms may not be able to define this, so supply a default. */
virtual void setConfig([[maybe_unused]] std::string const &name) {}
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/statistics.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2022, Raspberry Pi Ltd
*
* Raspberry Pi generic statistics structure
*/
#pragma once
#include <memory>
#include <stdint.h>
#include <vector>
#include "histogram.h"
#include "region_stats.h"
namespace RPiController {
struct RgbySums {
RgbySums(uint64_t _rSum = 0, uint64_t _gSum = 0, uint64_t _bSum = 0, uint64_t _ySum = 0)
: rSum(_rSum), gSum(_gSum), bSum(_bSum), ySum(_ySum)
{
}
uint64_t rSum;
uint64_t gSum;
uint64_t bSum;
uint64_t ySum;
};
using RgbyRegions = RegionStats<RgbySums>;
using FocusRegions = RegionStats<uint64_t>;
struct Statistics {
/*
* All region based statistics are normalised to 16-bits, giving a
* maximum value of (1 << NormalisationFactorPow2) - 1.
*/
static constexpr unsigned int NormalisationFactorPow2 = 16;
/*
* Positioning of the AGC statistics gathering in the pipeline:
* Pre-WB correction or post-WB correction.
* Assume this is post-LSC.
*/
enum class AgcStatsPos { PreWb, PostWb };
const AgcStatsPos agcStatsPos;
/*
* Positioning of the AWB/ALSC statistics gathering in the pipeline:
* Pre-LSC or post-LSC.
*/
enum class ColourStatsPos { PreLsc, PostLsc };
const ColourStatsPos colourStatsPos;
Statistics(AgcStatsPos a, ColourStatsPos c)
: agcStatsPos(a), colourStatsPos(c)
{
}
/* Histogram statistics. Not all histograms may be populated! */
Histogram rHist;
Histogram gHist;
Histogram bHist;
Histogram yHist;
/* Row sums for flicker avoidance. */
std::vector<RgbySums> rowSums;
/* Region based colour sums. */
RgbyRegions agcRegions;
RgbyRegions awbRegions;
/* Region based focus FoM. */
FocusRegions focusRegions;
};
using StatisticsPtr = std::shared_ptr<Statistics>;
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/cac_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2023 Raspberry Pi Ltd
*
* CAC (Chromatic Abberation Correction) algorithm status
*/
#pragma once
struct CacStatus {
std::vector<double> lutRx;
std::vector<double> lutRy;
std::vector<double> lutBx;
std::vector<double> lutBy;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/awb_algorithm.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* AWB control algorithm interface
*/
#pragma once
#include "algorithm.h"
namespace RPiController {
class AwbAlgorithm : public Algorithm
{
public:
AwbAlgorithm(Controller *controller) : Algorithm(controller) {}
/* An AWB algorithm must provide the following: */
virtual unsigned int getConvergenceFrames() const = 0;
virtual void initialValues(double &gainR, double &gainB) = 0;
virtual void setMode(std::string const &modeName) = 0;
virtual void setManualGains(double manualR, double manualB) = 0;
virtual void enableAuto() = 0;
virtual void disableAuto() = 0;
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/ccm_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* CCM (colour correction matrix) control algorithm status
*/
#pragma once
/* The "ccm" algorithm generates an appropriate colour matrix. */
struct CcmStatus {
double matrix[9];
double saturation;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/camera_mode.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019-2020, Raspberry Pi Ltd
*
* description of a particular operating mode of a sensor
*/
#pragma once
#include <libcamera/transform.h>
#include <libcamera/base/utils.h>
/*
* Description of a "camera mode", holding enough information for control
* algorithms to adapt their behaviour to the different modes of the camera,
* including binning, scaling, cropping etc.
*/
struct CameraMode {
/* bit depth of the raw camera output */
uint32_t bitdepth;
/* size in pixels of frames in this mode */
uint16_t width;
uint16_t height;
/* size of full resolution uncropped frame ("sensor frame") */
uint16_t sensorWidth;
uint16_t sensorHeight;
/* binning factor (1 = no binning, 2 = 2-pixel binning etc.) */
uint8_t binX;
uint8_t binY;
/* location of top left pixel in the sensor frame */
uint16_t cropX;
uint16_t cropY;
/* scaling factor (so if uncropped, width*scaleX is sensorWidth) */
double scaleX;
double scaleY;
/* scaling of the noise compared to the native sensor mode */
double noiseFactor;
/* minimum and maximum line time and frame durations */
libcamera::utils::Duration minLineLength;
libcamera::utils::Duration maxLineLength;
libcamera::utils::Duration minFrameDuration;
libcamera::utils::Duration maxFrameDuration;
/* any camera transform *not* reflected already in the camera tuning */
libcamera::Transform transform;
/* minimum and maximum frame lengths in units of lines */
uint32_t minFrameLength;
uint32_t maxFrameLength;
/* sensitivity of this mode */
double sensitivity;
/* pixel clock rate */
uint64_t pixelRate;
/* Mode specific shutter speed limits */
libcamera::utils::Duration minShutter;
libcamera::utils::Duration maxShutter;
/* Mode specific analogue gain limits */
double minAnalogueGain;
double maxAnalogueGain;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/tonemap_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2022 Raspberry Pi Ltd
*
* Tonemap control algorithm status
*/
#pragma once
#include <libipa/pwl.h>
struct TonemapStatus {
uint16_t detailConstant;
double detailSlope;
double iirStrength;
double strength;
libcamera::ipa::Pwl tonemap;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/device_status.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2021, Raspberry Pi Ltd
*
* device (image sensor) status
*/
#include "device_status.h"
using namespace libcamera; /* for the Duration operator<< overload */
std::ostream &operator<<(std::ostream &out, const DeviceStatus &d)
{
out << "Exposure: " << d.shutterSpeed
<< " Frame length: " << d.frameLength
<< " Line length: " << d.lineLength
<< " Gain: " << d.analogueGain;
if (d.aperture)
out << " Aperture: " << *d.aperture;
if (d.lensPosition)
out << " Lens: " << *d.lensPosition;
if (d.flashIntensity)
out << " Flash: " << *d.flashIntensity;
if (d.sensorTemperature)
out << " Temperature: " << *d.sensorTemperature;
return out;
}
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/algorithm.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* ISP control algorithms
*/
#include "algorithm.h"
using namespace RPiController;
int Algorithm::read([[maybe_unused]] const libcamera::YamlObject ¶ms)
{
return 0;
}
void Algorithm::initialise()
{
}
void Algorithm::switchMode([[maybe_unused]] CameraMode const &cameraMode,
[[maybe_unused]] Metadata *metadata)
{
}
void Algorithm::prepare([[maybe_unused]] Metadata *imageMetadata)
{
}
void Algorithm::process([[maybe_unused]] StatisticsPtr &stats,
[[maybe_unused]] Metadata *imageMetadata)
{
}
/* For registering algorithms with the system: */
namespace {
std::map<std::string, AlgoCreateFunc> &algorithms()
{
static std::map<std::string, AlgoCreateFunc> algorithms;
return algorithms;
}
} /* namespace */
std::map<std::string, AlgoCreateFunc> const &RPiController::getAlgorithms()
{
return algorithms();
}
RegisterAlgorithm::RegisterAlgorithm(char const *name,
AlgoCreateFunc createFunc)
{
algorithms()[std::string(name)] = createFunc;
}
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/af_algorithm.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2022, Raspberry Pi Ltd
*
* af_algorithm.hpp - auto focus algorithm interface
*/
#pragma once
#include <optional>
#include <libcamera/base/span.h>
#include "algorithm.h"
namespace RPiController {
class AfAlgorithm : public Algorithm
{
public:
AfAlgorithm(Controller *controller)
: Algorithm(controller) {}
/*
* An autofocus algorithm should provide the following calls.
*
* Where a ControlList combines a change of AfMode with other AF
* controls, setMode() should be called first, to ensure the
* algorithm will be in the correct state to handle controls.
*
* setLensPosition() returns true if the mode was AfModeManual and
* the lens position has changed, otherwise returns false. When it
* returns true, hwpos should be sent immediately to the lens driver.
*
* getMode() is provided mainly for validating controls.
* getLensPosition() is provided for populating DeviceStatus.
*/
enum AfRange { AfRangeNormal = 0,
AfRangeMacro,
AfRangeFull,
AfRangeMax };
enum AfSpeed { AfSpeedNormal = 0,
AfSpeedFast,
AfSpeedMax };
enum AfMode { AfModeManual = 0,
AfModeAuto,
AfModeContinuous };
enum AfPause { AfPauseImmediate = 0,
AfPauseDeferred,
AfPauseResume };
virtual void setRange([[maybe_unused]] AfRange range)
{
}
virtual void setSpeed([[maybe_unused]] AfSpeed speed)
{
}
virtual void setMetering([[maybe_unused]] bool use_windows)
{
}
virtual void setWindows([[maybe_unused]] libcamera::Span<libcamera::Rectangle const> const &wins)
{
}
virtual void setMode(AfMode mode) = 0;
virtual AfMode getMode() const = 0;
virtual bool setLensPosition(double dioptres, int32_t *hwpos) = 0;
virtual std::optional<double> getLensPosition() const = 0;
virtual void triggerScan() = 0;
virtual void cancelScan() = 0;
virtual void pause(AfPause pause) = 0;
};
} // namespace RPiController
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/geq_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* GEQ (green equalisation) control algorithm status
*/
#pragma once
/* The "GEQ" algorithm calculates the green equalisation thresholds */
struct GeqStatus {
uint16_t offset;
double slope;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/metadata.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019-2021, Raspberry Pi Ltd
*
* general metadata class
*/
#pragma once
/* A simple class for carrying arbitrary metadata, for example about an image. */
#include <any>
#include <map>
#include <mutex>
#include <string>
#include <libcamera/base/thread_annotations.h>
namespace RPiController {
class LIBCAMERA_TSA_CAPABILITY("mutex") Metadata
{
public:
Metadata() = default;
Metadata(Metadata const &other)
{
std::scoped_lock otherLock(other.mutex_);
data_ = other.data_;
}
Metadata(Metadata &&other)
{
std::scoped_lock otherLock(other.mutex_);
data_ = std::move(other.data_);
other.data_.clear();
}
template<typename T>
void set(std::string const &tag, T const &value)
{
std::scoped_lock lock(mutex_);
data_[tag] = value;
}
template<typename T>
int get(std::string const &tag, T &value) const
{
std::scoped_lock lock(mutex_);
auto it = data_.find(tag);
if (it == data_.end())
return -1;
value = std::any_cast<T>(it->second);
return 0;
}
void clear()
{
std::scoped_lock lock(mutex_);
data_.clear();
}
Metadata &operator=(Metadata const &other)
{
std::scoped_lock lock(mutex_, other.mutex_);
data_ = other.data_;
return *this;
}
Metadata &operator=(Metadata &&other)
{
std::scoped_lock lock(mutex_, other.mutex_);
data_ = std::move(other.data_);
other.data_.clear();
return *this;
}
void merge(Metadata &other)
{
std::scoped_lock lock(mutex_, other.mutex_);
data_.merge(other.data_);
}
void mergeCopy(const Metadata &other)
{
std::scoped_lock lock(mutex_, other.mutex_);
/*
* If the metadata key exists, ignore this item and copy only
* unique key/value pairs.
*/
data_.insert(other.data_.begin(), other.data_.end());
}
template<typename T>
T *getLocked(std::string const &tag)
{
/*
* This allows in-place access to the Metadata contents,
* for which you should be holding the lock.
*/
auto it = data_.find(tag);
if (it == data_.end())
return nullptr;
return std::any_cast<T>(&it->second);
}
template<typename T>
void setLocked(std::string const &tag, T const &value)
{
/* Use this only if you're holding the lock yourself. */
data_[tag] = value;
}
/*
* Note: use of (lowercase) lock and unlock means you can create scoped
* locks with the standard lock classes.
* e.g. std::lock_guard<RPiController::Metadata> lock(metadata)
*/
void lock() LIBCAMERA_TSA_ACQUIRE() { mutex_.lock(); }
auto try_lock() LIBCAMERA_TSA_ACQUIRE() { return mutex_.try_lock(); }
void unlock() LIBCAMERA_TSA_RELEASE() { mutex_.unlock(); }
private:
mutable std::mutex mutex_;
std::map<std::string, std::any> data_;
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/contrast_algorithm.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* contrast (gamma) control algorithm interface
*/
#pragma once
#include "algorithm.h"
namespace RPiController {
class ContrastAlgorithm : public Algorithm
{
public:
ContrastAlgorithm(Controller *controller) : Algorithm(controller) {}
/* A contrast algorithm must provide the following: */
virtual void setBrightness(double brightness) = 0;
virtual void setContrast(double contrast) = 0;
virtual void enableCe(bool enable) = 0;
virtual void restoreCe() = 0;
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/pdaf_data.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2022, Raspberry Pi Ltd
*
* PDAF Metadata
*/
#pragma once
#include <stdint.h>
#include "region_stats.h"
namespace RPiController {
struct PdafData {
/* Confidence, in arbitrary units */
uint16_t conf;
/* Phase error, in s16 Q4 format (S.11.4) */
int16_t phase;
};
using PdafRegions = RegionStats<PdafData>;
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/saturation_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2022 Raspberry Pi Ltd
*
* Saturation control algorithm status
*/
#pragma once
struct SaturationStatus {
uint8_t shiftR;
uint8_t shiftG;
uint8_t shiftB;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/awb_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* AWB control algorithm status
*/
#pragma once
/*
* The AWB algorithm places its results into both the image and global metadata,
* under the tag "awb.status".
*/
struct AwbStatus {
char mode[32];
double temperatureK;
double gainR;
double gainG;
double gainB;
};
|
0 | repos/libcamera/src/ipa/rpi | repos/libcamera/src/ipa/rpi/controller/hdr_status.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2023 Raspberry Pi Ltd
*
* HDR control algorithm status
*/
#pragma once
#include <string>
/*
* The HDR algorithm process method should post an HdrStatus into the image
* metadata under the tag "hdr.status".
*/
struct HdrStatus {
std::string mode;
std::string channel;
};
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/contrast.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* contrast (gamma) control algorithm
*/
#include <stdint.h>
#include <libcamera/base/log.h>
#include "../contrast_status.h"
#include "../histogram.h"
#include "contrast.h"
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiContrast)
/*
* This is a very simple control algorithm which simply retrieves the results of
* AGC and AWB via their "status" metadata, and applies digital gain to the
* colour channels in accordance with those instructions. We take care never to
* apply less than unity gains, as that would cause fully saturated pixels to go
* off-white.
*/
#define NAME "rpi.contrast"
Contrast::Contrast(Controller *controller)
: ContrastAlgorithm(controller), brightness_(0.0), contrast_(1.0)
{
}
char const *Contrast::name() const
{
return NAME;
}
int Contrast::read(const libcamera::YamlObject ¶ms)
{
// enable adaptive enhancement by default
config_.ceEnable = params["ce_enable"].get<int>(1);
ceEnable_ = config_.ceEnable;
// the point near the bottom of the histogram to move
config_.loHistogram = params["lo_histogram"].get<double>(0.01);
// where in the range to try and move it to
config_.loLevel = params["lo_level"].get<double>(0.015);
// but don't move by more than this
config_.loMax = params["lo_max"].get<double>(500);
// equivalent values for the top of the histogram...
config_.hiHistogram = params["hi_histogram"].get<double>(0.95);
config_.hiLevel = params["hi_level"].get<double>(0.95);
config_.hiMax = params["hi_max"].get<double>(2000);
config_.gammaCurve = params["gamma_curve"].get<ipa::Pwl>(ipa::Pwl{});
return config_.gammaCurve.empty() ? -EINVAL : 0;
}
void Contrast::setBrightness(double brightness)
{
brightness_ = brightness;
}
void Contrast::setContrast(double contrast)
{
contrast_ = contrast;
}
void Contrast::enableCe(bool enable)
{
ceEnable_ = enable;
}
void Contrast::restoreCe()
{
ceEnable_ = config_.ceEnable;
}
void Contrast::initialise()
{
/*
* Fill in some default values as Prepare will run before Process gets
* called.
*/
status_.brightness = brightness_;
status_.contrast = contrast_;
status_.gammaCurve = config_.gammaCurve;
}
void Contrast::prepare(Metadata *imageMetadata)
{
imageMetadata->set("contrast.status", status_);
}
ipa::Pwl computeStretchCurve(Histogram const &histogram,
ContrastConfig const &config)
{
ipa::Pwl enhance;
enhance.append(0, 0);
/*
* If the start of the histogram is rather empty, try to pull it down a
* bit.
*/
double histLo = histogram.quantile(config.loHistogram) *
(65536 / histogram.bins());
double levelLo = config.loLevel * 65536;
LOG(RPiContrast, Debug)
<< "Move histogram point " << histLo << " to " << levelLo;
histLo = std::max(levelLo,
std::min(65535.0, std::min(histLo, levelLo + config.loMax)));
LOG(RPiContrast, Debug)
<< "Final values " << histLo << " -> " << levelLo;
enhance.append(histLo, levelLo);
/*
* Keep the mid-point (median) in the same place, though, to limit the
* apparent amount of global brightness shift.
*/
double mid = histogram.quantile(0.5) * (65536 / histogram.bins());
enhance.append(mid, mid);
/*
* If the top to the histogram is empty, try to pull the pixel values
* there up.
*/
double histHi = histogram.quantile(config.hiHistogram) *
(65536 / histogram.bins());
double levelHi = config.hiLevel * 65536;
LOG(RPiContrast, Debug)
<< "Move histogram point " << histHi << " to " << levelHi;
histHi = std::min(levelHi,
std::max(0.0, std::max(histHi, levelHi - config.hiMax)));
LOG(RPiContrast, Debug)
<< "Final values " << histHi << " -> " << levelHi;
enhance.append(histHi, levelHi);
enhance.append(65535, 65535);
return enhance;
}
ipa::Pwl applyManualContrast(ipa::Pwl const &gammaCurve, double brightness,
double contrast)
{
ipa::Pwl newGammaCurve;
LOG(RPiContrast, Debug)
<< "Manual brightness " << brightness << " contrast " << contrast;
gammaCurve.map([&](double x, double y) {
newGammaCurve.append(
x, std::max(0.0, std::min(65535.0,
(y - 32768) * contrast +
32768 + brightness)));
});
return newGammaCurve;
}
void Contrast::process(StatisticsPtr &stats,
[[maybe_unused]] Metadata *imageMetadata)
{
Histogram &histogram = stats->yHist;
/*
* We look at the histogram and adjust the gamma curve in the following
* ways: 1. Adjust the gamma curve so as to pull the start of the
* histogram down, and possibly push the end up.
*/
ipa::Pwl gammaCurve = config_.gammaCurve;
if (ceEnable_) {
if (config_.loMax != 0 || config_.hiMax != 0)
gammaCurve = computeStretchCurve(histogram, config_).compose(gammaCurve);
/*
* We could apply other adjustments (e.g. partial equalisation)
* based on the histogram...?
*/
}
/*
* 2. Finally apply any manually selected brightness/contrast
* adjustment.
*/
if (brightness_ != 0 || contrast_ != 1.0)
gammaCurve = applyManualContrast(gammaCurve, brightness_, contrast_);
/*
* And fill in the status for output. Use more points towards the bottom
* of the curve.
*/
status_.brightness = brightness_;
status_.contrast = contrast_;
status_.gammaCurve = std::move(gammaCurve);
}
/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Contrast(controller);
}
static RegisterAlgorithm reg(NAME, &create);
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/af.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2022-2023, Raspberry Pi Ltd
*
* Autofocus control algorithm
*/
#include "af.h"
#include <iomanip>
#include <math.h>
#include <stdlib.h>
#include <libcamera/base/log.h>
#include <libcamera/control_ids.h>
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiAf)
#define NAME "rpi.af"
/*
* Default values for parameters. All may be overridden in the tuning file.
* Many of these values are sensor- or module-dependent; the defaults here
* assume IMX708 in a Raspberry Pi V3 camera with the standard lens.
*
* Here all focus values are in dioptres (1/m). They are converted to hardware
* units when written to status.lensSetting or returned from setLensPosition().
*
* Gain and delay values are relative to the update rate, since much (not all)
* of the delay is in the sensor and (for CDAF) ISP, not the lens mechanism;
* but note that algorithms are updated at no more than 30 Hz.
*/
Af::RangeDependentParams::RangeDependentParams()
: focusMin(0.0),
focusMax(12.0),
focusDefault(1.0)
{
}
Af::SpeedDependentParams::SpeedDependentParams()
: stepCoarse(1.0),
stepFine(0.25),
contrastRatio(0.75),
pdafGain(-0.02),
pdafSquelch(0.125),
maxSlew(2.0),
pdafFrames(20),
dropoutFrames(6),
stepFrames(4)
{
}
Af::CfgParams::CfgParams()
: confEpsilon(8),
confThresh(16),
confClip(512),
skipFrames(5),
map()
{
}
template<typename T>
static void readNumber(T &dest, const libcamera::YamlObject ¶ms, char const *name)
{
auto value = params[name].get<T>();
if (value)
dest = *value;
else
LOG(RPiAf, Warning) << "Missing parameter \"" << name << "\"";
}
void Af::RangeDependentParams::read(const libcamera::YamlObject ¶ms)
{
readNumber<double>(focusMin, params, "min");
readNumber<double>(focusMax, params, "max");
readNumber<double>(focusDefault, params, "default");
}
void Af::SpeedDependentParams::read(const libcamera::YamlObject ¶ms)
{
readNumber<double>(stepCoarse, params, "step_coarse");
readNumber<double>(stepFine, params, "step_fine");
readNumber<double>(contrastRatio, params, "contrast_ratio");
readNumber<double>(pdafGain, params, "pdaf_gain");
readNumber<double>(pdafSquelch, params, "pdaf_squelch");
readNumber<double>(maxSlew, params, "max_slew");
readNumber<uint32_t>(pdafFrames, params, "pdaf_frames");
readNumber<uint32_t>(dropoutFrames, params, "dropout_frames");
readNumber<uint32_t>(stepFrames, params, "step_frames");
}
int Af::CfgParams::read(const libcamera::YamlObject ¶ms)
{
if (params.contains("ranges")) {
auto &rr = params["ranges"];
if (rr.contains("normal"))
ranges[AfRangeNormal].read(rr["normal"]);
else
LOG(RPiAf, Warning) << "Missing range \"normal\"";
ranges[AfRangeMacro] = ranges[AfRangeNormal];
if (rr.contains("macro"))
ranges[AfRangeMacro].read(rr["macro"]);
ranges[AfRangeFull].focusMin = std::min(ranges[AfRangeNormal].focusMin,
ranges[AfRangeMacro].focusMin);
ranges[AfRangeFull].focusMax = std::max(ranges[AfRangeNormal].focusMax,
ranges[AfRangeMacro].focusMax);
ranges[AfRangeFull].focusDefault = ranges[AfRangeNormal].focusDefault;
if (rr.contains("full"))
ranges[AfRangeFull].read(rr["full"]);
} else
LOG(RPiAf, Warning) << "No ranges defined";
if (params.contains("speeds")) {
auto &ss = params["speeds"];
if (ss.contains("normal"))
speeds[AfSpeedNormal].read(ss["normal"]);
else
LOG(RPiAf, Warning) << "Missing speed \"normal\"";
speeds[AfSpeedFast] = speeds[AfSpeedNormal];
if (ss.contains("fast"))
speeds[AfSpeedFast].read(ss["fast"]);
} else
LOG(RPiAf, Warning) << "No speeds defined";
readNumber<uint32_t>(confEpsilon, params, "conf_epsilon");
readNumber<uint32_t>(confThresh, params, "conf_thresh");
readNumber<uint32_t>(confClip, params, "conf_clip");
readNumber<uint32_t>(skipFrames, params, "skip_frames");
if (params.contains("map"))
map = params["map"].get<ipa::Pwl>(ipa::Pwl{});
else
LOG(RPiAf, Warning) << "No map defined";
return 0;
}
void Af::CfgParams::initialise()
{
if (map.empty()) {
/* Default mapping from dioptres to hardware setting */
static constexpr double DefaultMapX0 = 0.0;
static constexpr double DefaultMapY0 = 445.0;
static constexpr double DefaultMapX1 = 15.0;
static constexpr double DefaultMapY1 = 925.0;
map.append(DefaultMapX0, DefaultMapY0);
map.append(DefaultMapX1, DefaultMapY1);
}
}
/* Af Algorithm class */
static constexpr unsigned MaxWindows = 10;
Af::Af(Controller *controller)
: AfAlgorithm(controller),
cfg_(),
range_(AfRangeNormal),
speed_(AfSpeedNormal),
mode_(AfAlgorithm::AfModeManual),
pauseFlag_(false),
statsRegion_(0, 0, 0, 0),
windows_(),
useWindows_(false),
phaseWeights_(),
contrastWeights_(),
scanState_(ScanState::Idle),
initted_(false),
ftarget_(-1.0),
fsmooth_(-1.0),
prevContrast_(0.0),
skipCount_(0),
stepCount_(0),
dropCount_(0),
scanMaxContrast_(0.0),
scanMinContrast_(1.0e9),
scanData_(),
reportState_(AfState::Idle)
{
/*
* Reserve space for data, to reduce memory fragmentation. It's too early
* to query the size of the PDAF (from camera) and Contrast (from ISP)
* statistics, but these are plausible upper bounds.
*/
phaseWeights_.w.reserve(16 * 12);
contrastWeights_.w.reserve(getHardwareConfig().focusRegions.width *
getHardwareConfig().focusRegions.height);
scanData_.reserve(32);
}
Af::~Af()
{
}
char const *Af::name() const
{
return NAME;
}
int Af::read(const libcamera::YamlObject ¶ms)
{
return cfg_.read(params);
}
void Af::initialise()
{
cfg_.initialise();
}
void Af::switchMode(CameraMode const &cameraMode, [[maybe_unused]] Metadata *metadata)
{
(void)metadata;
/* Assume that PDAF and Focus stats grids cover the visible area */
statsRegion_.x = (int)cameraMode.cropX;
statsRegion_.y = (int)cameraMode.cropY;
statsRegion_.width = (unsigned)(cameraMode.width * cameraMode.scaleX);
statsRegion_.height = (unsigned)(cameraMode.height * cameraMode.scaleY);
LOG(RPiAf, Debug) << "switchMode: statsRegion: "
<< statsRegion_.x << ','
<< statsRegion_.y << ','
<< statsRegion_.width << ','
<< statsRegion_.height;
invalidateWeights();
if (scanState_ >= ScanState::Coarse && scanState_ < ScanState::Settle) {
/*
* If a scan was in progress, re-start it, as CDAF statistics
* may have changed. Though if the application is just about
* to take a still picture, this will not help...
*/
startProgrammedScan();
}
skipCount_ = cfg_.skipFrames;
}
void Af::computeWeights(RegionWeights *wgts, unsigned rows, unsigned cols)
{
wgts->rows = rows;
wgts->cols = cols;
wgts->sum = 0;
wgts->w.resize(rows * cols);
std::fill(wgts->w.begin(), wgts->w.end(), 0);
if (rows > 0 && cols > 0 && useWindows_ &&
statsRegion_.height >= rows && statsRegion_.width >= cols) {
/*
* Here we just merge all of the given windows, weighted by area.
* \todo Perhaps a better approach might be to find the phase in each
* window and choose either the closest or the highest-confidence one?
* Ensure weights sum to less than (1<<16). 46080 is a "round number"
* below 65536, for better rounding when window size is a simple
* fraction of image dimensions.
*/
const unsigned maxCellWeight = 46080u / (MaxWindows * rows * cols);
const unsigned cellH = statsRegion_.height / rows;
const unsigned cellW = statsRegion_.width / cols;
const unsigned cellA = cellH * cellW;
for (auto &w : windows_) {
for (unsigned r = 0; r < rows; ++r) {
int y0 = std::max(statsRegion_.y + (int)(cellH * r), w.y);
int y1 = std::min(statsRegion_.y + (int)(cellH * (r + 1)),
w.y + (int)(w.height));
if (y0 >= y1)
continue;
y1 -= y0;
for (unsigned c = 0; c < cols; ++c) {
int x0 = std::max(statsRegion_.x + (int)(cellW * c), w.x);
int x1 = std::min(statsRegion_.x + (int)(cellW * (c + 1)),
w.x + (int)(w.width));
if (x0 >= x1)
continue;
unsigned a = y1 * (x1 - x0);
a = (maxCellWeight * a + cellA - 1) / cellA;
wgts->w[r * cols + c] += a;
wgts->sum += a;
}
}
}
}
if (wgts->sum == 0) {
/* Default AF window is the middle 1/2 width of the middle 1/3 height */
for (unsigned r = rows / 3; r < rows - rows / 3; ++r) {
for (unsigned c = cols / 4; c < cols - cols / 4; ++c) {
wgts->w[r * cols + c] = 1;
wgts->sum += 1;
}
}
}
}
void Af::invalidateWeights()
{
phaseWeights_.sum = 0;
contrastWeights_.sum = 0;
}
bool Af::getPhase(PdafRegions const ®ions, double &phase, double &conf)
{
libcamera::Size size = regions.size();
if (size.height != phaseWeights_.rows || size.width != phaseWeights_.cols ||
phaseWeights_.sum == 0) {
LOG(RPiAf, Debug) << "Recompute Phase weights " << size.width << 'x' << size.height;
computeWeights(&phaseWeights_, size.height, size.width);
}
uint32_t sumWc = 0;
int64_t sumWcp = 0;
for (unsigned i = 0; i < regions.numRegions(); ++i) {
unsigned w = phaseWeights_.w[i];
if (w) {
const PdafData &data = regions.get(i).val;
unsigned c = data.conf;
if (c >= cfg_.confThresh) {
if (c > cfg_.confClip)
c = cfg_.confClip;
c -= (cfg_.confThresh >> 2);
sumWc += w * c;
c -= (cfg_.confThresh >> 2);
sumWcp += (int64_t)(w * c) * (int64_t)data.phase;
}
}
}
if (0 < phaseWeights_.sum && phaseWeights_.sum <= sumWc) {
phase = (double)sumWcp / (double)sumWc;
conf = (double)sumWc / (double)phaseWeights_.sum;
return true;
} else {
phase = 0.0;
conf = 0.0;
return false;
}
}
double Af::getContrast(const FocusRegions &focusStats)
{
libcamera::Size size = focusStats.size();
if (size.height != contrastWeights_.rows ||
size.width != contrastWeights_.cols || contrastWeights_.sum == 0) {
LOG(RPiAf, Debug) << "Recompute Contrast weights "
<< size.width << 'x' << size.height;
computeWeights(&contrastWeights_, size.height, size.width);
}
uint64_t sumWc = 0;
for (unsigned i = 0; i < focusStats.numRegions(); ++i)
sumWc += contrastWeights_.w[i] * focusStats.get(i).val;
return (contrastWeights_.sum > 0) ? ((double)sumWc / (double)contrastWeights_.sum) : 0.0;
}
void Af::doPDAF(double phase, double conf)
{
/* Apply loop gain */
phase *= cfg_.speeds[speed_].pdafGain;
if (mode_ == AfModeContinuous) {
/*
* PDAF in Continuous mode. Scale down lens movement when
* delta is small or confidence is low, to suppress wobble.
*/
phase *= conf / (conf + cfg_.confEpsilon);
if (std::abs(phase) < cfg_.speeds[speed_].pdafSquelch) {
double a = phase / cfg_.speeds[speed_].pdafSquelch;
phase *= a * a;
}
} else {
/*
* PDAF in triggered-auto mode. Allow early termination when
* phase delta is small; scale down lens movements towards
* the end of the sequence, to ensure a stable image.
*/
if (stepCount_ >= cfg_.speeds[speed_].stepFrames) {
if (std::abs(phase) < cfg_.speeds[speed_].pdafSquelch)
stepCount_ = cfg_.speeds[speed_].stepFrames;
} else
phase *= stepCount_ / cfg_.speeds[speed_].stepFrames;
}
/* Apply slew rate limit. Report failure if out of bounds. */
if (phase < -cfg_.speeds[speed_].maxSlew) {
phase = -cfg_.speeds[speed_].maxSlew;
reportState_ = (ftarget_ <= cfg_.ranges[range_].focusMin) ? AfState::Failed
: AfState::Scanning;
} else if (phase > cfg_.speeds[speed_].maxSlew) {
phase = cfg_.speeds[speed_].maxSlew;
reportState_ = (ftarget_ >= cfg_.ranges[range_].focusMax) ? AfState::Failed
: AfState::Scanning;
} else
reportState_ = AfState::Focused;
ftarget_ = fsmooth_ + phase;
}
bool Af::earlyTerminationByPhase(double phase)
{
if (scanData_.size() > 0 &&
scanData_[scanData_.size() - 1].conf >= cfg_.confEpsilon) {
double oldFocus = scanData_[scanData_.size() - 1].focus;
double oldPhase = scanData_[scanData_.size() - 1].phase;
/*
* Check that the gradient is finite and has the expected sign;
* Interpolate/extrapolate the lens position for zero phase.
* Check that the extrapolation is well-conditioned.
*/
if ((ftarget_ - oldFocus) * (phase - oldPhase) > 0.0) {
double param = phase / (phase - oldPhase);
if (-3.0 <= param && param <= 3.5) {
ftarget_ += param * (oldFocus - ftarget_);
LOG(RPiAf, Debug) << "ETBP: param=" << param;
return true;
}
}
}
return false;
}
double Af::findPeak(unsigned i) const
{
double f = scanData_[i].focus;
if (i > 0 && i + 1 < scanData_.size()) {
double dropLo = scanData_[i].contrast - scanData_[i - 1].contrast;
double dropHi = scanData_[i].contrast - scanData_[i + 1].contrast;
if (0.0 <= dropLo && dropLo < dropHi) {
double param = 0.3125 * (1.0 - dropLo / dropHi) * (1.6 - dropLo / dropHi);
f += param * (scanData_[i - 1].focus - f);
} else if (0.0 <= dropHi && dropHi < dropLo) {
double param = 0.3125 * (1.0 - dropHi / dropLo) * (1.6 - dropHi / dropLo);
f += param * (scanData_[i + 1].focus - f);
}
}
LOG(RPiAf, Debug) << "FindPeak: " << f;
return f;
}
void Af::doScan(double contrast, double phase, double conf)
{
/* Record lens position, contrast and phase values for the current scan */
if (scanData_.empty() || contrast > scanMaxContrast_) {
scanMaxContrast_ = contrast;
scanMaxIndex_ = scanData_.size();
}
if (contrast < scanMinContrast_)
scanMinContrast_ = contrast;
scanData_.emplace_back(ScanRecord{ ftarget_, contrast, phase, conf });
if (scanState_ == ScanState::Coarse) {
if (ftarget_ >= cfg_.ranges[range_].focusMax ||
contrast < cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) {
/*
* Finished course scan, or termination based on contrast.
* Jump to just after max contrast and start fine scan.
*/
ftarget_ = std::min(ftarget_, findPeak(scanMaxIndex_) +
2.0 * cfg_.speeds[speed_].stepFine);
scanState_ = ScanState::Fine;
scanData_.clear();
} else
ftarget_ += cfg_.speeds[speed_].stepCoarse;
} else { /* ScanState::Fine */
if (ftarget_ <= cfg_.ranges[range_].focusMin || scanData_.size() >= 5 ||
contrast < cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) {
/*
* Finished fine scan, or termination based on contrast.
* Use quadratic peak-finding to find best contrast position.
*/
ftarget_ = findPeak(scanMaxIndex_);
scanState_ = ScanState::Settle;
} else
ftarget_ -= cfg_.speeds[speed_].stepFine;
}
stepCount_ = (ftarget_ == fsmooth_) ? 0 : cfg_.speeds[speed_].stepFrames;
}
void Af::doAF(double contrast, double phase, double conf)
{
/* Skip frames at startup and after sensor mode change */
if (skipCount_ > 0) {
LOG(RPiAf, Debug) << "SKIP";
skipCount_--;
return;
}
if (scanState_ == ScanState::Pdaf) {
/*
* Use PDAF closed-loop control whenever available, in both CAF
* mode and (for a limited number of iterations) when triggered.
* If PDAF fails (due to poor contrast, noise or large defocus),
* fall back to a CDAF-based scan. To avoid "nuisance" scans,
* scan only after a number of frames with low PDAF confidence.
*/
if (conf > (dropCount_ ? 1.0 : 0.25) * cfg_.confEpsilon) {
doPDAF(phase, conf);
if (stepCount_ > 0)
stepCount_--;
else if (mode_ != AfModeContinuous)
scanState_ = ScanState::Idle;
dropCount_ = 0;
} else if (++dropCount_ == cfg_.speeds[speed_].dropoutFrames)
startProgrammedScan();
} else if (scanState_ >= ScanState::Coarse && fsmooth_ == ftarget_) {
/*
* Scanning sequence. This means PDAF has become unavailable.
* Allow a delay between steps for CDAF FoM statistics to be
* updated, and a "settling time" at the end of the sequence.
* [A coarse or fine scan can be abandoned if two PDAF samples
* allow direct interpolation of the zero-phase lens position.]
*/
if (stepCount_ > 0)
stepCount_--;
else if (scanState_ == ScanState::Settle) {
if (prevContrast_ >= cfg_.speeds[speed_].contrastRatio * scanMaxContrast_ &&
scanMinContrast_ <= cfg_.speeds[speed_].contrastRatio * scanMaxContrast_)
reportState_ = AfState::Focused;
else
reportState_ = AfState::Failed;
if (mode_ == AfModeContinuous && !pauseFlag_ &&
cfg_.speeds[speed_].dropoutFrames > 0)
scanState_ = ScanState::Pdaf;
else
scanState_ = ScanState::Idle;
scanData_.clear();
} else if (conf >= cfg_.confEpsilon && earlyTerminationByPhase(phase)) {
scanState_ = ScanState::Settle;
stepCount_ = (mode_ == AfModeContinuous) ? 0
: cfg_.speeds[speed_].stepFrames;
} else
doScan(contrast, phase, conf);
}
}
void Af::updateLensPosition()
{
if (scanState_ >= ScanState::Pdaf) {
ftarget_ = std::clamp(ftarget_,
cfg_.ranges[range_].focusMin,
cfg_.ranges[range_].focusMax);
}
if (initted_) {
/* from a known lens position: apply slew rate limit */
fsmooth_ = std::clamp(ftarget_,
fsmooth_ - cfg_.speeds[speed_].maxSlew,
fsmooth_ + cfg_.speeds[speed_].maxSlew);
} else {
/* from an unknown position: go straight to target, but add delay */
fsmooth_ = ftarget_;
initted_ = true;
skipCount_ = cfg_.skipFrames;
}
}
void Af::startAF()
{
/* Use PDAF if the tuning file allows it; else CDAF. */
if (cfg_.speeds[speed_].dropoutFrames > 0 &&
(mode_ == AfModeContinuous || cfg_.speeds[speed_].pdafFrames > 0)) {
if (!initted_) {
ftarget_ = cfg_.ranges[range_].focusDefault;
updateLensPosition();
}
stepCount_ = (mode_ == AfModeContinuous) ? 0 : cfg_.speeds[speed_].pdafFrames;
scanState_ = ScanState::Pdaf;
scanData_.clear();
dropCount_ = 0;
reportState_ = AfState::Scanning;
} else
startProgrammedScan();
}
void Af::startProgrammedScan()
{
ftarget_ = cfg_.ranges[range_].focusMin;
updateLensPosition();
scanState_ = ScanState::Coarse;
scanMaxContrast_ = 0.0;
scanMinContrast_ = 1.0e9;
scanMaxIndex_ = 0;
scanData_.clear();
stepCount_ = cfg_.speeds[speed_].stepFrames;
reportState_ = AfState::Scanning;
}
void Af::goIdle()
{
scanState_ = ScanState::Idle;
reportState_ = AfState::Idle;
scanData_.clear();
}
/*
* PDAF phase data are available in prepare(), but CDAF statistics are not
* available until process(). We are gambling on the availability of PDAF.
* To expedite feedback control using PDAF, issue the V4L2 lens control from
* prepare(). Conversely, during scans, we must allow an extra frame delay
* between steps, to retrieve CDAF statistics from the previous process()
* so we can terminate the scan early without having to change our minds.
*/
void Af::prepare(Metadata *imageMetadata)
{
/* Initialize for triggered scan or start of CAF mode */
if (scanState_ == ScanState::Trigger)
startAF();
if (initted_) {
/* Get PDAF from the embedded metadata, and run AF algorithm core */
PdafRegions regions;
double phase = 0.0, conf = 0.0;
double oldFt = ftarget_;
double oldFs = fsmooth_;
ScanState oldSs = scanState_;
uint32_t oldSt = stepCount_;
if (imageMetadata->get("pdaf.regions", regions) == 0)
getPhase(regions, phase, conf);
doAF(prevContrast_, phase, conf);
updateLensPosition();
LOG(RPiAf, Debug) << std::fixed << std::setprecision(2)
<< static_cast<unsigned int>(reportState_)
<< " sst" << static_cast<unsigned int>(oldSs)
<< "->" << static_cast<unsigned int>(scanState_)
<< " stp" << oldSt << "->" << stepCount_
<< " ft" << oldFt << "->" << ftarget_
<< " fs" << oldFs << "->" << fsmooth_
<< " cont=" << (int)prevContrast_
<< " phase=" << (int)phase << " conf=" << (int)conf;
}
/* Report status and produce new lens setting */
AfStatus status;
if (pauseFlag_)
status.pauseState = (scanState_ == ScanState::Idle) ? AfPauseState::Paused
: AfPauseState::Pausing;
else
status.pauseState = AfPauseState::Running;
if (mode_ == AfModeAuto && scanState_ != ScanState::Idle)
status.state = AfState::Scanning;
else
status.state = reportState_;
status.lensSetting = initted_ ? std::optional<int>(cfg_.map.eval(fsmooth_))
: std::nullopt;
imageMetadata->set("af.status", status);
}
void Af::process(StatisticsPtr &stats, [[maybe_unused]] Metadata *imageMetadata)
{
(void)imageMetadata;
prevContrast_ = getContrast(stats->focusRegions);
}
/* Controls */
void Af::setRange(AfRange r)
{
LOG(RPiAf, Debug) << "setRange: " << (unsigned)r;
if (r < AfAlgorithm::AfRangeMax)
range_ = r;
}
void Af::setSpeed(AfSpeed s)
{
LOG(RPiAf, Debug) << "setSpeed: " << (unsigned)s;
if (s < AfAlgorithm::AfSpeedMax) {
if (scanState_ == ScanState::Pdaf &&
cfg_.speeds[s].pdafFrames > cfg_.speeds[speed_].pdafFrames)
stepCount_ += cfg_.speeds[s].pdafFrames - cfg_.speeds[speed_].pdafFrames;
speed_ = s;
}
}
void Af::setMetering(bool mode)
{
if (useWindows_ != mode) {
useWindows_ = mode;
invalidateWeights();
}
}
void Af::setWindows(libcamera::Span<libcamera::Rectangle const> const &wins)
{
windows_.clear();
for (auto &w : wins) {
LOG(RPiAf, Debug) << "Window: "
<< w.x << ", "
<< w.y << ", "
<< w.width << ", "
<< w.height;
windows_.push_back(w);
if (windows_.size() >= MaxWindows)
break;
}
if (useWindows_)
invalidateWeights();
}
bool Af::setLensPosition(double dioptres, int *hwpos)
{
bool changed = false;
if (mode_ == AfModeManual) {
LOG(RPiAf, Debug) << "setLensPosition: " << dioptres;
ftarget_ = cfg_.map.domain().clamp(dioptres);
changed = !(initted_ && fsmooth_ == ftarget_);
updateLensPosition();
}
if (hwpos)
*hwpos = cfg_.map.eval(fsmooth_);
return changed;
}
std::optional<double> Af::getLensPosition() const
{
/*
* \todo We ought to perform some precise timing here to determine
* the current lens position.
*/
return initted_ ? std::optional<double>(fsmooth_) : std::nullopt;
}
void Af::cancelScan()
{
LOG(RPiAf, Debug) << "cancelScan";
if (mode_ == AfModeAuto)
goIdle();
}
void Af::triggerScan()
{
LOG(RPiAf, Debug) << "triggerScan";
if (mode_ == AfModeAuto && scanState_ == ScanState::Idle)
scanState_ = ScanState::Trigger;
}
void Af::setMode(AfAlgorithm::AfMode mode)
{
LOG(RPiAf, Debug) << "setMode: " << (unsigned)mode;
if (mode_ != mode) {
mode_ = mode;
pauseFlag_ = false;
if (mode == AfModeContinuous)
scanState_ = ScanState::Trigger;
else if (mode != AfModeAuto || scanState_ < ScanState::Coarse)
goIdle();
}
}
AfAlgorithm::AfMode Af::getMode() const
{
return mode_;
}
void Af::pause(AfAlgorithm::AfPause pause)
{
LOG(RPiAf, Debug) << "pause: " << (unsigned)pause;
if (mode_ == AfModeContinuous) {
if (pause == AfPauseResume && pauseFlag_) {
pauseFlag_ = false;
if (scanState_ < ScanState::Coarse)
scanState_ = ScanState::Trigger;
} else if (pause != AfPauseResume && !pauseFlag_) {
pauseFlag_ = true;
if (pause == AfPauseImmediate || scanState_ < ScanState::Coarse)
goIdle();
}
}
}
// Register algorithm with the system.
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Af(controller);
}
static RegisterAlgorithm reg(NAME, &create);
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/alsc.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* ALSC (auto lens shading correction) control algorithm
*/
#include <algorithm>
#include <functional>
#include <math.h>
#include <numeric>
#include <libcamera/base/log.h>
#include <libcamera/base/span.h>
#include "../awb_status.h"
#include "alsc.h"
/* Raspberry Pi ALSC (Auto Lens Shading Correction) algorithm. */
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiAlsc)
#define NAME "rpi.alsc"
static const double InsufficientData = -1.0;
Alsc::Alsc(Controller *controller)
: Algorithm(controller)
{
asyncAbort_ = asyncStart_ = asyncStarted_ = asyncFinished_ = false;
asyncThread_ = std::thread(std::bind(&Alsc::asyncFunc, this));
}
Alsc::~Alsc()
{
{
std::lock_guard<std::mutex> lock(mutex_);
asyncAbort_ = true;
}
asyncSignal_.notify_one();
asyncThread_.join();
}
char const *Alsc::name() const
{
return NAME;
}
static int generateLut(Array2D<double> &lut, const libcamera::YamlObject ¶ms)
{
/* These must be signed ints for the co-ordinate calculations below. */
int X = lut.dimensions().width, Y = lut.dimensions().height;
double cstrength = params["corner_strength"].get<double>(2.0);
if (cstrength <= 1.0) {
LOG(RPiAlsc, Error) << "corner_strength must be > 1.0";
return -EINVAL;
}
double asymmetry = params["asymmetry"].get<double>(1.0);
if (asymmetry < 0) {
LOG(RPiAlsc, Error) << "asymmetry must be >= 0";
return -EINVAL;
}
double f1 = cstrength - 1, f2 = 1 + sqrt(cstrength);
double R2 = X * Y / 4 * (1 + asymmetry * asymmetry);
int num = 0;
for (int y = 0; y < Y; y++) {
for (int x = 0; x < X; x++) {
double dy = y - Y / 2 + 0.5,
dx = (x - X / 2 + 0.5) * asymmetry;
double r2 = (dx * dx + dy * dy) / R2;
lut[num++] =
(f1 * r2 + f2) * (f1 * r2 + f2) /
(f2 * f2); /* this reproduces the cos^4 rule */
}
}
return 0;
}
static int readLut(Array2D<double> &lut, const libcamera::YamlObject ¶ms)
{
if (params.size() != lut.size()) {
LOG(RPiAlsc, Error) << "Invalid number of entries in LSC table";
return -EINVAL;
}
int num = 0;
for (const auto &p : params.asList()) {
auto value = p.get<double>();
if (!value)
return -EINVAL;
lut[num++] = *value;
}
return 0;
}
static int readCalibrations(std::vector<AlscCalibration> &calibrations,
const libcamera::YamlObject ¶ms,
std::string const &name, const Size &size)
{
if (params.contains(name)) {
double lastCt = 0;
for (const auto &p : params[name].asList()) {
auto value = p["ct"].get<double>();
if (!value)
return -EINVAL;
double ct = *value;
if (ct <= lastCt) {
LOG(RPiAlsc, Error)
<< "Entries in " << name << " must be in increasing ct order";
return -EINVAL;
}
AlscCalibration calibration;
calibration.ct = lastCt = ct;
const libcamera::YamlObject &table = p["table"];
if (table.size() != size.width * size.height) {
LOG(RPiAlsc, Error)
<< "Incorrect number of values for ct "
<< ct << " in " << name;
return -EINVAL;
}
int num = 0;
calibration.table.resize(size);
for (const auto &elem : table.asList()) {
value = elem.get<double>();
if (!value)
return -EINVAL;
calibration.table[num++] = *value;
}
calibrations.push_back(std::move(calibration));
LOG(RPiAlsc, Debug)
<< "Read " << name << " calibration for ct " << ct;
}
}
return 0;
}
int Alsc::read(const libcamera::YamlObject ¶ms)
{
config_.tableSize = getHardwareConfig().awbRegions;
config_.framePeriod = params["frame_period"].get<uint16_t>(12);
config_.startupFrames = params["startup_frames"].get<uint16_t>(10);
config_.speed = params["speed"].get<double>(0.05);
double sigma = params["sigma"].get<double>(0.01);
config_.sigmaCr = params["sigma_Cr"].get<double>(sigma);
config_.sigmaCb = params["sigma_Cb"].get<double>(sigma);
config_.minCount = params["min_count"].get<double>(10.0);
config_.minG = params["min_G"].get<uint16_t>(50);
config_.omega = params["omega"].get<double>(1.3);
config_.nIter = params["n_iter"].get<uint32_t>(config_.tableSize.width + config_.tableSize.height);
config_.luminanceStrength =
params["luminance_strength"].get<double>(1.0);
config_.luminanceLut.resize(config_.tableSize, 1.0);
int ret = 0;
if (params.contains("corner_strength"))
ret = generateLut(config_.luminanceLut, params);
else if (params.contains("luminance_lut"))
ret = readLut(config_.luminanceLut, params["luminance_lut"]);
else
LOG(RPiAlsc, Warning)
<< "no luminance table - assume unity everywhere";
if (ret)
return ret;
ret = readCalibrations(config_.calibrationsCr, params, "calibrations_Cr",
config_.tableSize);
if (ret)
return ret;
ret = readCalibrations(config_.calibrationsCb, params, "calibrations_Cb",
config_.tableSize);
if (ret)
return ret;
config_.defaultCt = params["default_ct"].get<double>(4500.0);
config_.threshold = params["threshold"].get<double>(1e-3);
config_.lambdaBound = params["lambda_bound"].get<double>(0.05);
return 0;
}
static double getCt(Metadata *metadata, double defaultCt);
static void getCalTable(double ct, std::vector<AlscCalibration> const &calibrations,
Array2D<double> &calTable);
static void resampleCalTable(const Array2D<double> &calTableIn, CameraMode const &cameraMode,
Array2D<double> &calTableOut);
static void compensateLambdasForCal(const Array2D<double> &calTable,
const Array2D<double> &oldLambdas,
Array2D<double> &newLambdas);
static void addLuminanceToTables(std::array<Array2D<double>, 3> &results,
const Array2D<double> &lambdaR, double lambdaG,
const Array2D<double> &lambdaB,
const Array2D<double> &luminanceLut,
double luminanceStrength);
void Alsc::initialise()
{
frameCount2_ = frameCount_ = framePhase_ = 0;
firstTime_ = true;
ct_ = config_.defaultCt;
const size_t XY = config_.tableSize.width * config_.tableSize.height;
for (auto &r : syncResults_)
r.resize(config_.tableSize);
for (auto &r : prevSyncResults_)
r.resize(config_.tableSize);
for (auto &r : asyncResults_)
r.resize(config_.tableSize);
luminanceTable_.resize(config_.tableSize);
asyncLambdaR_.resize(config_.tableSize);
asyncLambdaB_.resize(config_.tableSize);
/* The lambdas are initialised in the SwitchMode. */
lambdaR_.resize(config_.tableSize);
lambdaB_.resize(config_.tableSize);
/* Temporaries for the computations, but sensible to allocate this up-front! */
for (auto &c : tmpC_)
c.resize(config_.tableSize);
for (auto &m : tmpM_)
m.resize(XY);
}
void Alsc::waitForAysncThread()
{
if (asyncStarted_) {
asyncStarted_ = false;
std::unique_lock<std::mutex> lock(mutex_);
syncSignal_.wait(lock, [&] {
return asyncFinished_;
});
asyncFinished_ = false;
}
}
static bool compareModes(CameraMode const &cm0, CameraMode const &cm1)
{
/*
* Return true if the modes crop from the sensor significantly differently,
* or if the user transform has changed.
*/
if (cm0.transform != cm1.transform)
return true;
int leftDiff = abs(cm0.cropX - cm1.cropX);
int topDiff = abs(cm0.cropY - cm1.cropY);
int rightDiff = fabs(cm0.cropX + cm0.scaleX * cm0.width -
cm1.cropX - cm1.scaleX * cm1.width);
int bottomDiff = fabs(cm0.cropY + cm0.scaleY * cm0.height -
cm1.cropY - cm1.scaleY * cm1.height);
/*
* These thresholds are a rather arbitrary amount chosen to trigger
* when carrying on with the previously calculated tables might be
* worse than regenerating them (but without the adaptive algorithm).
*/
int thresholdX = cm0.sensorWidth >> 4;
int thresholdY = cm0.sensorHeight >> 4;
return leftDiff > thresholdX || rightDiff > thresholdX ||
topDiff > thresholdY || bottomDiff > thresholdY;
}
void Alsc::switchMode(CameraMode const &cameraMode,
[[maybe_unused]] Metadata *metadata)
{
/*
* We're going to start over with the tables if there's any "significant"
* change.
*/
bool resetTables = firstTime_ || compareModes(cameraMode_, cameraMode);
/* Believe the colour temperature from the AWB, if there is one. */
ct_ = getCt(metadata, ct_);
/* Ensure the other thread isn't running while we do this. */
waitForAysncThread();
cameraMode_ = cameraMode;
/*
* We must resample the luminance table like we do the others, but it's
* fixed so we can simply do it up front here.
*/
resampleCalTable(config_.luminanceLut, cameraMode_, luminanceTable_);
if (resetTables) {
/*
* Upon every "table reset", arrange for something sensible to be
* generated. Construct the tables for the previous recorded colour
* temperature. In order to start over from scratch we initialise
* the lambdas, but the rest of this code then echoes the code in
* doAlsc, without the adaptive algorithm.
*/
std::fill(lambdaR_.begin(), lambdaR_.end(), 1.0);
std::fill(lambdaB_.begin(), lambdaB_.end(), 1.0);
Array2D<double> &calTableR = tmpC_[0], &calTableB = tmpC_[1], &calTableTmp = tmpC_[2];
getCalTable(ct_, config_.calibrationsCr, calTableTmp);
resampleCalTable(calTableTmp, cameraMode_, calTableR);
getCalTable(ct_, config_.calibrationsCb, calTableTmp);
resampleCalTable(calTableTmp, cameraMode_, calTableB);
compensateLambdasForCal(calTableR, lambdaR_, asyncLambdaR_);
compensateLambdasForCal(calTableB, lambdaB_, asyncLambdaB_);
addLuminanceToTables(syncResults_, asyncLambdaR_, 1.0, asyncLambdaB_,
luminanceTable_, config_.luminanceStrength);
prevSyncResults_ = syncResults_;
framePhase_ = config_.framePeriod; /* run the algo again asap */
firstTime_ = false;
}
}
void Alsc::fetchAsyncResults()
{
LOG(RPiAlsc, Debug) << "Fetch ALSC results";
asyncFinished_ = false;
asyncStarted_ = false;
syncResults_ = asyncResults_;
}
double getCt(Metadata *metadata, double defaultCt)
{
AwbStatus awbStatus;
awbStatus.temperatureK = defaultCt; /* in case nothing found */
if (metadata->get("awb.status", awbStatus) != 0)
LOG(RPiAlsc, Debug) << "no AWB results found, using "
<< awbStatus.temperatureK;
else
LOG(RPiAlsc, Debug) << "AWB results found, using "
<< awbStatus.temperatureK;
return awbStatus.temperatureK;
}
static void copyStats(RgbyRegions ®ions, StatisticsPtr &stats,
std::array<Array2D<double>, 3> &prevSyncResults)
{
if (!regions.numRegions())
regions.init(stats->awbRegions.size());
const std::vector<double> &rTable = prevSyncResults[0].data(); //status.r;
const std::vector<double> &gTable = prevSyncResults[1].data(); //status.g;
const std::vector<double> &bTable = prevSyncResults[2].data(); //status.b;
for (unsigned int i = 0; i < stats->awbRegions.numRegions(); i++) {
auto r = stats->awbRegions.get(i);
if (stats->colourStatsPos == Statistics::ColourStatsPos::PostLsc) {
r.val.rSum = static_cast<uint64_t>(r.val.rSum / rTable[i]);
r.val.gSum = static_cast<uint64_t>(r.val.gSum / gTable[i]);
r.val.bSum = static_cast<uint64_t>(r.val.bSum / bTable[i]);
}
regions.set(i, r);
}
}
void Alsc::restartAsync(StatisticsPtr &stats, Metadata *imageMetadata)
{
LOG(RPiAlsc, Debug) << "Starting ALSC calculation";
/*
* Get the current colour temperature. It's all we need from the
* metadata. Default to the last CT value (which could be the default).
*/
ct_ = getCt(imageMetadata, ct_);
/*
* We have to copy the statistics here, dividing out our best guess of
* the LSC table that the pipeline applied to them which we get from
* prevSyncResults_.
*/
copyStats(statistics_, stats, prevSyncResults_);
framePhase_ = 0;
asyncStarted_ = true;
{
std::lock_guard<std::mutex> lock(mutex_);
asyncStart_ = true;
}
asyncSignal_.notify_one();
}
void Alsc::prepare(Metadata *imageMetadata)
{
/*
* Count frames since we started, and since we last poked the async
* thread.
*/
if (frameCount_ < (int)config_.startupFrames)
frameCount_++;
double speed = frameCount_ < (int)config_.startupFrames
? 1.0
: config_.speed;
LOG(RPiAlsc, Debug)
<< "frame count " << frameCount_ << " speed " << speed;
{
std::unique_lock<std::mutex> lock(mutex_);
if (asyncStarted_ && asyncFinished_)
fetchAsyncResults();
}
/* Apply IIR filter to results and program into the pipeline. */
for (unsigned int j = 0; j < syncResults_.size(); j++) {
for (unsigned int i = 0; i < syncResults_[j].size(); i++)
prevSyncResults_[j][i] = speed * syncResults_[j][i] + (1.0 - speed) * prevSyncResults_[j][i];
}
/* Put output values into status metadata. */
AlscStatus status;
status.r = prevSyncResults_[0].data();
status.g = prevSyncResults_[1].data();
status.b = prevSyncResults_[2].data();
imageMetadata->set("alsc.status", status);
/*
* Put the results in the global metadata as well. This will be used by
* AWB to factor in the colour shading correction.
*/
getGlobalMetadata().set("alsc.status", status);
}
void Alsc::process(StatisticsPtr &stats, Metadata *imageMetadata)
{
/*
* Count frames since we started, and since we last poked the async
* thread.
*/
if (framePhase_ < (int)config_.framePeriod)
framePhase_++;
if (frameCount2_ < (int)config_.startupFrames)
frameCount2_++;
LOG(RPiAlsc, Debug) << "frame_phase " << framePhase_;
if (framePhase_ >= (int)config_.framePeriod ||
frameCount2_ < (int)config_.startupFrames) {
if (asyncStarted_ == false)
restartAsync(stats, imageMetadata);
}
}
void Alsc::asyncFunc()
{
while (true) {
{
std::unique_lock<std::mutex> lock(mutex_);
asyncSignal_.wait(lock, [&] {
return asyncStart_ || asyncAbort_;
});
asyncStart_ = false;
if (asyncAbort_)
break;
}
doAlsc();
{
std::lock_guard<std::mutex> lock(mutex_);
asyncFinished_ = true;
}
syncSignal_.notify_one();
}
}
void getCalTable(double ct, std::vector<AlscCalibration> const &calibrations,
Array2D<double> &calTable)
{
if (calibrations.empty()) {
std::fill(calTable.begin(), calTable.end(), 1.0);
LOG(RPiAlsc, Debug) << "no calibrations found";
} else if (ct <= calibrations.front().ct) {
calTable = calibrations.front().table;
LOG(RPiAlsc, Debug) << "using calibration for "
<< calibrations.front().ct;
} else if (ct >= calibrations.back().ct) {
calTable = calibrations.back().table;
LOG(RPiAlsc, Debug) << "using calibration for "
<< calibrations.back().ct;
} else {
int idx = 0;
while (ct > calibrations[idx + 1].ct)
idx++;
double ct0 = calibrations[idx].ct, ct1 = calibrations[idx + 1].ct;
LOG(RPiAlsc, Debug)
<< "ct is " << ct << ", interpolating between "
<< ct0 << " and " << ct1;
for (unsigned int i = 0; i < calTable.size(); i++)
calTable[i] =
(calibrations[idx].table[i] * (ct1 - ct) +
calibrations[idx + 1].table[i] * (ct - ct0)) /
(ct1 - ct0);
}
}
void resampleCalTable(const Array2D<double> &calTableIn,
CameraMode const &cameraMode,
Array2D<double> &calTableOut)
{
int X = calTableIn.dimensions().width;
int Y = calTableIn.dimensions().height;
/*
* Precalculate and cache the x sampling locations and phases to save
* recomputing them on every row.
*/
int xLo[X], xHi[X];
double xf[X];
double scaleX = cameraMode.sensorWidth /
(cameraMode.width * cameraMode.scaleX);
double xOff = cameraMode.cropX / (double)cameraMode.sensorWidth;
double x = .5 / scaleX + xOff * X - .5;
double xInc = 1 / scaleX;
for (int i = 0; i < X; i++, x += xInc) {
xLo[i] = floor(x);
xf[i] = x - xLo[i];
xHi[i] = std::min(xLo[i] + 1, X - 1);
xLo[i] = std::max(xLo[i], 0);
if (!!(cameraMode.transform & libcamera::Transform::HFlip)) {
xLo[i] = X - 1 - xLo[i];
xHi[i] = X - 1 - xHi[i];
}
}
/* Now march over the output table generating the new values. */
double scaleY = cameraMode.sensorHeight /
(cameraMode.height * cameraMode.scaleY);
double yOff = cameraMode.cropY / (double)cameraMode.sensorHeight;
double y = .5 / scaleY + yOff * Y - .5;
double yInc = 1 / scaleY;
for (int j = 0; j < Y; j++, y += yInc) {
int yLo = floor(y);
double yf = y - yLo;
int yHi = std::min(yLo + 1, Y - 1);
yLo = std::max(yLo, 0);
if (!!(cameraMode.transform & libcamera::Transform::VFlip)) {
yLo = Y - 1 - yLo;
yHi = Y - 1 - yHi;
}
double const *rowAbove = calTableIn.ptr() + X * yLo;
double const *rowBelow = calTableIn.ptr() + X * yHi;
double *out = calTableOut.ptr() + X * j;
for (int i = 0; i < X; i++) {
double above = rowAbove[xLo[i]] * (1 - xf[i]) +
rowAbove[xHi[i]] * xf[i];
double below = rowBelow[xLo[i]] * (1 - xf[i]) +
rowBelow[xHi[i]] * xf[i];
*(out++) = above * (1 - yf) + below * yf;
}
}
}
/* Calculate chrominance statistics (R/G and B/G) for each region. */
static void calculateCrCb(const RgbyRegions &awbRegion, Array2D<double> &cr,
Array2D<double> &cb, uint32_t minCount, uint16_t minG)
{
for (unsigned int i = 0; i < cr.size(); i++) {
auto s = awbRegion.get(i);
/* Do not return unreliable, or zero, colour ratio statistics. */
if (s.counted <= minCount || s.val.gSum / s.counted <= minG ||
s.val.rSum / s.counted <= minG || s.val.bSum / s.counted <= minG) {
cr[i] = cb[i] = InsufficientData;
continue;
}
cr[i] = s.val.rSum / (double)s.val.gSum;
cb[i] = s.val.bSum / (double)s.val.gSum;
}
}
static void applyCalTable(const Array2D<double> &calTable, Array2D<double> &C)
{
for (unsigned int i = 0; i < C.size(); i++)
if (C[i] != InsufficientData)
C[i] *= calTable[i];
}
void compensateLambdasForCal(const Array2D<double> &calTable,
const Array2D<double> &oldLambdas,
Array2D<double> &newLambdas)
{
double minNewLambda = std::numeric_limits<double>::max();
for (unsigned int i = 0; i < newLambdas.size(); i++) {
newLambdas[i] = oldLambdas[i] * calTable[i];
minNewLambda = std::min(minNewLambda, newLambdas[i]);
}
for (unsigned int i = 0; i < newLambdas.size(); i++)
newLambdas[i] /= minNewLambda;
}
[[maybe_unused]] static void printCalTable(const Array2D<double> &C)
{
const Size &size = C.dimensions();
printf("table: [\n");
for (unsigned int j = 0; j < size.height; j++) {
for (unsigned int i = 0; i < size.width; i++) {
printf("%5.3f", 1.0 / C[j * size.width + i]);
if (i != size.width - 1 || j != size.height - 1)
printf(",");
}
printf("\n");
}
printf("]\n");
}
/*
* Compute weight out of 1.0 which reflects how similar we wish to make the
* colours of these two regions.
*/
static double computeWeight(double Ci, double Cj, double sigma)
{
if (Ci == InsufficientData || Cj == InsufficientData)
return 0;
double diff = (Ci - Cj) / sigma;
return exp(-diff * diff / 2);
}
/* Compute all weights. */
static void computeW(const Array2D<double> &C, double sigma,
SparseArray<double> &W)
{
size_t XY = C.size();
size_t X = C.dimensions().width;
for (unsigned int i = 0; i < XY; i++) {
/* Start with neighbour above and go clockwise. */
W[i][0] = i >= X ? computeWeight(C[i], C[i - X], sigma) : 0;
W[i][1] = i % X < X - 1 ? computeWeight(C[i], C[i + 1], sigma) : 0;
W[i][2] = i < XY - X ? computeWeight(C[i], C[i + X], sigma) : 0;
W[i][3] = i % X ? computeWeight(C[i], C[i - 1], sigma) : 0;
}
}
/* Compute M, the large but sparse matrix such that M * lambdas = 0. */
static void constructM(const Array2D<double> &C,
const SparseArray<double> &W,
SparseArray<double> &M)
{
size_t XY = C.size();
size_t X = C.dimensions().width;
double epsilon = 0.001;
for (unsigned int i = 0; i < XY; i++) {
/*
* Note how, if C[i] == INSUFFICIENT_DATA, the weights will all
* be zero so the equation is still set up correctly.
*/
int m = !!(i >= X) + !!(i % X < X - 1) + !!(i < XY - X) +
!!(i % X); /* total number of neighbours */
/* we'll divide the diagonal out straight away */
double diagonal = (epsilon + W[i][0] + W[i][1] + W[i][2] + W[i][3]) * C[i];
M[i][0] = i >= X ? (W[i][0] * C[i - X] + epsilon / m * C[i]) / diagonal : 0;
M[i][1] = i % X < X - 1 ? (W[i][1] * C[i + 1] + epsilon / m * C[i]) / diagonal : 0;
M[i][2] = i < XY - X ? (W[i][2] * C[i + X] + epsilon / m * C[i]) / diagonal : 0;
M[i][3] = i % X ? (W[i][3] * C[i - 1] + epsilon / m * C[i]) / diagonal : 0;
}
}
/*
* In the compute_lambda_ functions, note that the matrix coefficients for the
* left/right neighbours are zero down the left/right edges, so we don't need
* need to test the i value to exclude them.
*/
static double computeLambdaBottom(int i, const SparseArray<double> &M,
Array2D<double> &lambda)
{
return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + lambda.dimensions().width] +
M[i][3] * lambda[i - 1];
}
static double computeLambdaBottomStart(int i, const SparseArray<double> &M,
Array2D<double> &lambda)
{
return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + lambda.dimensions().width];
}
static double computeLambdaInterior(int i, const SparseArray<double> &M,
Array2D<double> &lambda)
{
return M[i][0] * lambda[i - lambda.dimensions().width] + M[i][1] * lambda[i + 1] +
M[i][2] * lambda[i + lambda.dimensions().width] + M[i][3] * lambda[i - 1];
}
static double computeLambdaTop(int i, const SparseArray<double> &M,
Array2D<double> &lambda)
{
return M[i][0] * lambda[i - lambda.dimensions().width] + M[i][1] * lambda[i + 1] +
M[i][3] * lambda[i - 1];
}
static double computeLambdaTopEnd(int i, const SparseArray<double> &M,
Array2D<double> &lambda)
{
return M[i][0] * lambda[i - lambda.dimensions().width] + M[i][3] * lambda[i - 1];
}
/* Gauss-Seidel iteration with over-relaxation. */
static double gaussSeidel2Sor(const SparseArray<double> &M, double omega,
Array2D<double> &lambda, double lambdaBound)
{
int XY = lambda.size();
int X = lambda.dimensions().width;
const double min = 1 - lambdaBound, max = 1 + lambdaBound;
Array2D<double> oldLambda = lambda;
int i;
lambda[0] = computeLambdaBottomStart(0, M, lambda);
lambda[0] = std::clamp(lambda[0], min, max);
for (i = 1; i < X; i++) {
lambda[i] = computeLambdaBottom(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
}
for (; i < XY - X; i++) {
lambda[i] = computeLambdaInterior(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
}
for (; i < XY - 1; i++) {
lambda[i] = computeLambdaTop(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
}
lambda[i] = computeLambdaTopEnd(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
/*
* Also solve the system from bottom to top, to help spread the updates
* better.
*/
lambda[i] = computeLambdaTopEnd(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
for (i = XY - 2; i >= XY - X; i--) {
lambda[i] = computeLambdaTop(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
}
for (; i >= X; i--) {
lambda[i] = computeLambdaInterior(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
}
for (; i >= 1; i--) {
lambda[i] = computeLambdaBottom(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
}
lambda[0] = computeLambdaBottomStart(0, M, lambda);
lambda[0] = std::clamp(lambda[0], min, max);
double maxDiff = 0;
for (i = 0; i < XY; i++) {
lambda[i] = oldLambda[i] + (lambda[i] - oldLambda[i]) * omega;
if (fabs(lambda[i] - oldLambda[i]) > fabs(maxDiff))
maxDiff = lambda[i] - oldLambda[i];
}
return maxDiff;
}
/* Normalise the values so that the smallest value is 1. */
static void normalise(Array2D<double> &results)
{
double minval = *std::min_element(results.begin(), results.end());
std::for_each(results.begin(), results.end(),
[minval](double val) { return val / minval; });
}
/* Rescale the values so that the average value is 1. */
static void reaverage(Array2D<double> &data)
{
double sum = std::accumulate(data.begin(), data.end(), 0.0);
double ratio = 1 / (sum / data.size());
std::for_each(data.begin(), data.end(),
[ratio](double val) { return val * ratio; });
}
static void runMatrixIterations(const Array2D<double> &C,
Array2D<double> &lambda,
const SparseArray<double> &W,
SparseArray<double> &M, double omega,
unsigned int nIter, double threshold, double lambdaBound)
{
constructM(C, W, M);
double lastMaxDiff = std::numeric_limits<double>::max();
for (unsigned int i = 0; i < nIter; i++) {
double maxDiff = fabs(gaussSeidel2Sor(M, omega, lambda, lambdaBound));
if (maxDiff < threshold) {
LOG(RPiAlsc, Debug)
<< "Stop after " << i + 1 << " iterations";
break;
}
/*
* this happens very occasionally (so make a note), though
* doesn't seem to matter
*/
if (maxDiff > lastMaxDiff)
LOG(RPiAlsc, Debug)
<< "Iteration " << i << ": maxDiff gone up "
<< lastMaxDiff << " to " << maxDiff;
lastMaxDiff = maxDiff;
}
/* We're going to normalise the lambdas so the total average is 1. */
reaverage(lambda);
}
static void addLuminanceRb(Array2D<double> &result, const Array2D<double> &lambda,
const Array2D<double> &luminanceLut,
double luminanceStrength)
{
for (unsigned int i = 0; i < result.size(); i++)
result[i] = lambda[i] * ((luminanceLut[i] - 1) * luminanceStrength + 1);
}
static void addLuminanceG(Array2D<double> &result, double lambda,
const Array2D<double> &luminanceLut,
double luminanceStrength)
{
for (unsigned int i = 0; i < result.size(); i++)
result[i] = lambda * ((luminanceLut[i] - 1) * luminanceStrength + 1);
}
void addLuminanceToTables(std::array<Array2D<double>, 3> &results,
const Array2D<double> &lambdaR,
double lambdaG, const Array2D<double> &lambdaB,
const Array2D<double> &luminanceLut,
double luminanceStrength)
{
addLuminanceRb(results[0], lambdaR, luminanceLut, luminanceStrength);
addLuminanceG(results[1], lambdaG, luminanceLut, luminanceStrength);
addLuminanceRb(results[2], lambdaB, luminanceLut, luminanceStrength);
for (auto &r : results)
normalise(r);
}
void Alsc::doAlsc()
{
Array2D<double> &cr = tmpC_[0], &cb = tmpC_[1], &calTableR = tmpC_[2],
&calTableB = tmpC_[3], &calTableTmp = tmpC_[4];
SparseArray<double> &wr = tmpM_[0], &wb = tmpM_[1], &M = tmpM_[2];
/*
* Calculate our R/B ("Cr"/"Cb") colour statistics, and assess which are
* usable.
*/
calculateCrCb(statistics_, cr, cb, config_.minCount, config_.minG);
/*
* Fetch the new calibrations (if any) for this CT. Resample them in
* case the camera mode is not full-frame.
*/
getCalTable(ct_, config_.calibrationsCr, calTableTmp);
resampleCalTable(calTableTmp, cameraMode_, calTableR);
getCalTable(ct_, config_.calibrationsCb, calTableTmp);
resampleCalTable(calTableTmp, cameraMode_, calTableB);
/*
* You could print out the cal tables for this image here, if you're
* tuning the algorithm...
* Apply any calibration to the statistics, so the adaptive algorithm
* makes only the extra adjustments.
*/
applyCalTable(calTableR, cr);
applyCalTable(calTableB, cb);
/* Compute weights between zones. */
computeW(cr, config_.sigmaCr, wr);
computeW(cb, config_.sigmaCb, wb);
/* Run Gauss-Seidel iterations over the resulting matrix, for R and B. */
runMatrixIterations(cr, lambdaR_, wr, M, config_.omega, config_.nIter,
config_.threshold, config_.lambdaBound);
runMatrixIterations(cb, lambdaB_, wb, M, config_.omega, config_.nIter,
config_.threshold, config_.lambdaBound);
/*
* Fold the calibrated gains into our final lambda values. (Note that on
* the next run, we re-start with the lambda values that don't have the
* calibration gains included.)
*/
compensateLambdasForCal(calTableR, lambdaR_, asyncLambdaR_);
compensateLambdasForCal(calTableB, lambdaB_, asyncLambdaB_);
/* Fold in the luminance table at the appropriate strength. */
addLuminanceToTables(asyncResults_, asyncLambdaR_, 1.0,
asyncLambdaB_, luminanceTable_,
config_.luminanceStrength);
}
/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Alsc(controller);
}
static RegisterAlgorithm reg(NAME, &create);
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/lux.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* Lux control algorithm
*/
#pragma once
#include <mutex>
#include <libcamera/base/utils.h>
#include "../lux_status.h"
#include "../algorithm.h"
/* This is our implementation of the "lux control algorithm". */
namespace RPiController {
class Lux : public Algorithm
{
public:
Lux(Controller *controller);
char const *name() const override;
int read(const libcamera::YamlObject ¶ms) override;
void prepare(Metadata *imageMetadata) override;
void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
void setCurrentAperture(double aperture);
private:
/*
* These values define the conditions of the reference image, against
* which we compare the new image.
*/
libcamera::utils::Duration referenceShutterSpeed_;
double referenceGain_;
double referenceAperture_; /* units of 1/f */
double referenceY_; /* out of 65536 */
double referenceLux_;
double currentAperture_;
LuxStatus status_;
std::mutex mutex_;
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/ccm.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* CCM (colour correction matrix) control algorithm
*/
#include <libcamera/base/log.h>
#include "../awb_status.h"
#include "../ccm_status.h"
#include "../lux_status.h"
#include "../metadata.h"
#include "ccm.h"
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiCcm)
/*
* This algorithm selects a CCM (Colour Correction Matrix) according to the
* colour temperature estimated by AWB (interpolating between known matricies as
* necessary). Additionally the amount of colour saturation can be controlled
* both according to the current estimated lux level and according to a
* saturation setting that is exposed to applications.
*/
#define NAME "rpi.ccm"
Matrix::Matrix()
{
memset(m, 0, sizeof(m));
}
Matrix::Matrix(double m0, double m1, double m2, double m3, double m4, double m5,
double m6, double m7, double m8)
{
m[0][0] = m0, m[0][1] = m1, m[0][2] = m2, m[1][0] = m3, m[1][1] = m4,
m[1][2] = m5, m[2][0] = m6, m[2][1] = m7, m[2][2] = m8;
}
int Matrix::read(const libcamera::YamlObject ¶ms)
{
double *ptr = (double *)m;
if (params.size() != 9) {
LOG(RPiCcm, Error) << "Wrong number of values in CCM";
return -EINVAL;
}
for (const auto ¶m : params.asList()) {
auto value = param.get<double>();
if (!value)
return -EINVAL;
*ptr++ = *value;
}
return 0;
}
Ccm::Ccm(Controller *controller)
: CcmAlgorithm(controller), saturation_(1.0) {}
char const *Ccm::name() const
{
return NAME;
}
int Ccm::read(const libcamera::YamlObject ¶ms)
{
int ret;
if (params.contains("saturation")) {
config_.saturation = params["saturation"].get<ipa::Pwl>(ipa::Pwl{});
if (config_.saturation.empty())
return -EINVAL;
}
for (auto &p : params["ccms"].asList()) {
auto value = p["ct"].get<double>();
if (!value)
return -EINVAL;
CtCcm ctCcm;
ctCcm.ct = *value;
ret = ctCcm.ccm.read(p["ccm"]);
if (ret)
return ret;
if (!config_.ccms.empty() && ctCcm.ct <= config_.ccms.back().ct) {
LOG(RPiCcm, Error)
<< "CCM not in increasing colour temperature order";
return -EINVAL;
}
config_.ccms.push_back(std::move(ctCcm));
}
if (config_.ccms.empty()) {
LOG(RPiCcm, Error) << "No CCMs specified";
return -EINVAL;
}
return 0;
}
void Ccm::setSaturation(double saturation)
{
saturation_ = saturation;
}
void Ccm::initialise()
{
}
template<typename T>
static bool getLocked(Metadata *metadata, std::string const &tag, T &value)
{
T *ptr = metadata->getLocked<T>(tag);
if (ptr == nullptr)
return false;
value = *ptr;
return true;
}
Matrix calculateCcm(std::vector<CtCcm> const &ccms, double ct)
{
if (ct <= ccms.front().ct)
return ccms.front().ccm;
else if (ct >= ccms.back().ct)
return ccms.back().ccm;
else {
int i = 0;
for (; ct > ccms[i].ct; i++)
;
double lambda =
(ct - ccms[i - 1].ct) / (ccms[i].ct - ccms[i - 1].ct);
return lambda * ccms[i].ccm + (1.0 - lambda) * ccms[i - 1].ccm;
}
}
Matrix applySaturation(Matrix const &ccm, double saturation)
{
Matrix RGB2Y(0.299, 0.587, 0.114, -0.169, -0.331, 0.500, 0.500, -0.419,
-0.081);
Matrix Y2RGB(1.000, 0.000, 1.402, 1.000, -0.345, -0.714, 1.000, 1.771,
0.000);
Matrix S(1, 0, 0, 0, saturation, 0, 0, 0, saturation);
return Y2RGB * S * RGB2Y * ccm;
}
void Ccm::prepare(Metadata *imageMetadata)
{
bool awbOk = false, luxOk = false;
struct AwbStatus awb = {};
awb.temperatureK = 4000; /* in case no metadata */
struct LuxStatus lux = {};
lux.lux = 400; /* in case no metadata */
{
/* grab mutex just once to get everything */
std::lock_guard<Metadata> lock(*imageMetadata);
awbOk = getLocked(imageMetadata, "awb.status", awb);
luxOk = getLocked(imageMetadata, "lux.status", lux);
}
if (!awbOk)
LOG(RPiCcm, Warning) << "no colour temperature found";
if (!luxOk)
LOG(RPiCcm, Warning) << "no lux value found";
Matrix ccm = calculateCcm(config_.ccms, awb.temperatureK);
double saturation = saturation_;
struct CcmStatus ccmStatus;
ccmStatus.saturation = saturation;
if (!config_.saturation.empty())
saturation *= config_.saturation.eval(
config_.saturation.domain().clamp(lux.lux));
ccm = applySaturation(ccm, saturation);
for (int j = 0; j < 3; j++)
for (int i = 0; i < 3; i++)
ccmStatus.matrix[j * 3 + i] =
std::max(-8.0, std::min(7.9999, ccm.m[j][i]));
LOG(RPiCcm, Debug)
<< "colour temperature " << awb.temperatureK << "K";
LOG(RPiCcm, Debug)
<< "CCM: " << ccmStatus.matrix[0] << " " << ccmStatus.matrix[1]
<< " " << ccmStatus.matrix[2] << " "
<< ccmStatus.matrix[3] << " " << ccmStatus.matrix[4]
<< " " << ccmStatus.matrix[5] << " "
<< ccmStatus.matrix[6] << " " << ccmStatus.matrix[7]
<< " " << ccmStatus.matrix[8];
imageMetadata->set("ccm.status", ccmStatus);
}
/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Ccm(controller);
;
}
static RegisterAlgorithm reg(NAME, &create);
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/cac.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2023, Raspberry Pi Ltd
*
* cac.hpp - CAC control algorithm
*/
#pragma once
#include "algorithm.h"
#include "cac_status.h"
namespace RPiController {
struct CacConfig {
bool enabled;
std::vector<double> lutRx;
std::vector<double> lutRy;
std::vector<double> lutBx;
std::vector<double> lutBy;
};
class Cac : public Algorithm
{
public:
Cac(Controller *controller = NULL);
char const *name() const override;
int read(const libcamera::YamlObject ¶ms) override;
void prepare(Metadata *imageMetadata) override;
private:
CacConfig config_;
CacStatus cacStatus_;
};
} // namespace RPiController
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/denoise.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2022 Raspberry Pi Ltd
*
* Denoise (spatial, colour, temporal) control algorithm
*/
#include "denoise.h"
#include <libcamera/base/log.h>
#include "denoise_status.h"
#include "noise_status.h"
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiDenoise)
// Calculate settings for the denoise blocks using the noise profile in
// the image metadata.
#define NAME "rpi.denoise"
int DenoiseConfig::read(const libcamera::YamlObject ¶ms)
{
sdnEnable = params.contains("sdn");
if (sdnEnable) {
auto &sdnParams = params["sdn"];
sdnDeviation = sdnParams["deviation"].get<double>(3.2);
sdnStrength = sdnParams["strength"].get<double>(0.25);
sdnDeviation2 = sdnParams["deviation2"].get<double>(sdnDeviation);
sdnDeviationNoTdn = sdnParams["deviation_no_tdn"].get<double>(sdnDeviation);
sdnStrengthNoTdn = sdnParams["strength_no_tdn"].get<double>(sdnStrength);
sdnTdnBackoff = sdnParams["backoff"].get<double>(0.75);
}
cdnEnable = params.contains("cdn");
if (cdnEnable) {
auto &cdnParams = params["cdn"];
cdnDeviation = cdnParams["deviation"].get<double>(120);
cdnStrength = cdnParams["strength"].get<double>(0.2);
}
tdnEnable = params.contains("tdn");
if (tdnEnable) {
auto &tdnParams = params["tdn"];
tdnDeviation = tdnParams["deviation"].get<double>(0.5);
tdnThreshold = tdnParams["threshold"].get<double>(0.75);
} else if (sdnEnable) {
/*
* If SDN is enabled but TDN isn't, overwrite all the SDN settings
* with the "no TDN" versions. This makes it easier to enable or
* disable TDN in the tuning file without editing all the other
* parameters.
*/
sdnDeviation = sdnDeviation2 = sdnDeviationNoTdn;
sdnStrength = sdnStrengthNoTdn;
}
return 0;
}
Denoise::Denoise(Controller *controller)
: DenoiseAlgorithm(controller), mode_(DenoiseMode::ColourHighQuality)
{
}
char const *Denoise::name() const
{
return NAME;
}
int Denoise::read(const libcamera::YamlObject ¶ms)
{
if (!params.contains("normal")) {
configs_["normal"].read(params);
currentConfig_ = &configs_["normal"];
return 0;
}
for (const auto &[key, value] : params.asDict()) {
if (configs_[key].read(value)) {
LOG(RPiDenoise, Error) << "Failed to read denoise config " << key;
return -EINVAL;
}
}
auto it = configs_.find("normal");
if (it == configs_.end()) {
LOG(RPiDenoise, Error) << "No normal denoise settings found";
return -EINVAL;
}
currentConfig_ = &it->second;
return 0;
}
void Denoise::initialise()
{
}
void Denoise::switchMode([[maybe_unused]] CameraMode const &cameraMode,
[[maybe_unused]] Metadata *metadata)
{
/* A mode switch effectively resets temporal denoise and it has to start over. */
currentSdnDeviation_ = currentConfig_->sdnDeviationNoTdn;
currentSdnStrength_ = currentConfig_->sdnStrengthNoTdn;
currentSdnDeviation2_ = currentConfig_->sdnDeviationNoTdn;
}
void Denoise::prepare(Metadata *imageMetadata)
{
struct NoiseStatus noiseStatus = {};
noiseStatus.noiseSlope = 3.0; // in case no metadata
if (imageMetadata->get("noise.status", noiseStatus) != 0)
LOG(RPiDenoise, Warning) << "no noise profile found";
LOG(RPiDenoise, Debug)
<< "Noise profile: constant " << noiseStatus.noiseConstant
<< " slope " << noiseStatus.noiseSlope;
if (mode_ == DenoiseMode::Off)
return;
if (currentConfig_->sdnEnable) {
struct SdnStatus sdn;
sdn.noiseConstant = noiseStatus.noiseConstant * currentSdnDeviation_;
sdn.noiseSlope = noiseStatus.noiseSlope * currentSdnDeviation_;
sdn.noiseConstant2 = noiseStatus.noiseConstant * currentConfig_->sdnDeviation2;
sdn.noiseSlope2 = noiseStatus.noiseSlope * currentSdnDeviation2_;
sdn.strength = currentSdnStrength_;
imageMetadata->set("sdn.status", sdn);
LOG(RPiDenoise, Debug)
<< "const " << sdn.noiseConstant
<< " slope " << sdn.noiseSlope
<< " str " << sdn.strength
<< " const2 " << sdn.noiseConstant2
<< " slope2 " << sdn.noiseSlope2;
/* For the next frame, we back off the SDN parameters as TDN ramps up. */
double f = currentConfig_->sdnTdnBackoff;
currentSdnDeviation_ = f * currentSdnDeviation_ + (1 - f) * currentConfig_->sdnDeviation;
currentSdnStrength_ = f * currentSdnStrength_ + (1 - f) * currentConfig_->sdnStrength;
currentSdnDeviation2_ = f * currentSdnDeviation2_ + (1 - f) * currentConfig_->sdnDeviation2;
}
if (currentConfig_->tdnEnable) {
struct TdnStatus tdn;
tdn.noiseConstant = noiseStatus.noiseConstant * currentConfig_->tdnDeviation;
tdn.noiseSlope = noiseStatus.noiseSlope * currentConfig_->tdnDeviation;
tdn.threshold = currentConfig_->tdnThreshold;
imageMetadata->set("tdn.status", tdn);
LOG(RPiDenoise, Debug)
<< "programmed tdn threshold " << tdn.threshold
<< " constant " << tdn.noiseConstant
<< " slope " << tdn.noiseSlope;
}
if (currentConfig_->cdnEnable && mode_ != DenoiseMode::ColourOff) {
struct CdnStatus cdn;
cdn.threshold = currentConfig_->cdnDeviation * noiseStatus.noiseSlope + noiseStatus.noiseConstant;
cdn.strength = currentConfig_->cdnStrength;
imageMetadata->set("cdn.status", cdn);
LOG(RPiDenoise, Debug)
<< "programmed cdn threshold " << cdn.threshold
<< " strength " << cdn.strength;
}
}
void Denoise::setMode(DenoiseMode mode)
{
// We only distinguish between off and all other modes.
mode_ = mode;
}
void Denoise::setConfig(std::string const &name)
{
auto it = configs_.find(name);
if (it == configs_.end()) {
/*
* Some platforms may have no need for different denoise settings, so we only issue
* a warning if there clearly are several configurations.
*/
if (configs_.size() > 1)
LOG(RPiDenoise, Warning) << "No denoise config found for " << name;
else
LOG(RPiDenoise, Debug) << "No denoise config found for " << name;
} else
currentConfig_ = &it->second;
}
// Register algorithm with the system.
static Algorithm *Create(Controller *controller)
{
return (Algorithm *)new Denoise(controller);
}
static RegisterAlgorithm reg(NAME, &Create);
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/awb.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* AWB control algorithm
*/
#include <assert.h>
#include <functional>
#include <libcamera/base/log.h>
#include "../lux_status.h"
#include "alsc_status.h"
#include "awb.h"
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiAwb)
#define NAME "rpi.awb"
/*
* todo - the locking in this algorithm needs some tidying up as has been done
* elsewhere (ALSC and AGC).
*/
int AwbMode::read(const libcamera::YamlObject ¶ms)
{
auto value = params["lo"].get<double>();
if (!value)
return -EINVAL;
ctLo = *value;
value = params["hi"].get<double>();
if (!value)
return -EINVAL;
ctHi = *value;
return 0;
}
int AwbPrior::read(const libcamera::YamlObject ¶ms)
{
auto value = params["lux"].get<double>();
if (!value)
return -EINVAL;
lux = *value;
prior = params["prior"].get<ipa::Pwl>(ipa::Pwl{});
return prior.empty() ? -EINVAL : 0;
}
static int readCtCurve(ipa::Pwl &ctR, ipa::Pwl &ctB, const libcamera::YamlObject ¶ms)
{
if (params.size() % 3) {
LOG(RPiAwb, Error) << "AwbConfig: incomplete CT curve entry";
return -EINVAL;
}
if (params.size() < 6) {
LOG(RPiAwb, Error) << "AwbConfig: insufficient points in CT curve";
return -EINVAL;
}
const auto &list = params.asList();
for (auto it = list.begin(); it != list.end(); it++) {
auto value = it->get<double>();
if (!value)
return -EINVAL;
double ct = *value;
assert(it == list.begin() || ct != ctR.domain().end);
value = (++it)->get<double>();
if (!value)
return -EINVAL;
ctR.append(ct, *value);
value = (++it)->get<double>();
if (!value)
return -EINVAL;
ctB.append(ct, *value);
}
return 0;
}
int AwbConfig::read(const libcamera::YamlObject ¶ms)
{
int ret;
bayes = params["bayes"].get<int>(1);
framePeriod = params["frame_period"].get<uint16_t>(10);
startupFrames = params["startup_frames"].get<uint16_t>(10);
convergenceFrames = params["convergence_frames"].get<unsigned int>(3);
speed = params["speed"].get<double>(0.05);
if (params.contains("ct_curve")) {
ret = readCtCurve(ctR, ctB, params["ct_curve"]);
if (ret)
return ret;
/* We will want the inverse functions of these too. */
ctRInverse = ctR.inverse().first;
ctBInverse = ctB.inverse().first;
}
if (params.contains("priors")) {
for (const auto &p : params["priors"].asList()) {
AwbPrior prior;
ret = prior.read(p);
if (ret)
return ret;
if (!priors.empty() && prior.lux <= priors.back().lux) {
LOG(RPiAwb, Error) << "AwbConfig: Prior must be ordered in increasing lux value";
return -EINVAL;
}
priors.push_back(prior);
}
if (priors.empty()) {
LOG(RPiAwb, Error) << "AwbConfig: no AWB priors configured";
return ret;
}
}
if (params.contains("modes")) {
for (const auto &[key, value] : params["modes"].asDict()) {
ret = modes[key].read(value);
if (ret)
return ret;
if (defaultMode == nullptr)
defaultMode = &modes[key];
}
if (defaultMode == nullptr) {
LOG(RPiAwb, Error) << "AwbConfig: no AWB modes configured";
return -EINVAL;
}
}
minPixels = params["min_pixels"].get<double>(16.0);
minG = params["min_G"].get<uint16_t>(32);
minRegions = params["min_regions"].get<uint32_t>(10);
deltaLimit = params["delta_limit"].get<double>(0.2);
coarseStep = params["coarse_step"].get<double>(0.2);
transversePos = params["transverse_pos"].get<double>(0.01);
transverseNeg = params["transverse_neg"].get<double>(0.01);
if (transversePos <= 0 || transverseNeg <= 0) {
LOG(RPiAwb, Error) << "AwbConfig: transverse_pos/neg must be > 0";
return -EINVAL;
}
sensitivityR = params["sensitivity_r"].get<double>(1.0);
sensitivityB = params["sensitivity_b"].get<double>(1.0);
if (bayes) {
if (ctR.empty() || ctB.empty() || priors.empty() ||
defaultMode == nullptr) {
LOG(RPiAwb, Warning)
<< "Bayesian AWB mis-configured - switch to Grey method";
bayes = false;
}
}
fast = params[fast].get<int>(bayes); /* default to fast for Bayesian, otherwise slow */
whitepointR = params["whitepoint_r"].get<double>(0.0);
whitepointB = params["whitepoint_b"].get<double>(0.0);
if (bayes == false)
sensitivityR = sensitivityB = 1.0; /* nor do sensitivities make any sense */
return 0;
}
Awb::Awb(Controller *controller)
: AwbAlgorithm(controller)
{
asyncAbort_ = asyncStart_ = asyncStarted_ = asyncFinished_ = false;
mode_ = nullptr;
manualR_ = manualB_ = 0.0;
asyncThread_ = std::thread(std::bind(&Awb::asyncFunc, this));
}
Awb::~Awb()
{
{
std::lock_guard<std::mutex> lock(mutex_);
asyncAbort_ = true;
}
asyncSignal_.notify_one();
asyncThread_.join();
}
char const *Awb::name() const
{
return NAME;
}
int Awb::read(const libcamera::YamlObject ¶ms)
{
return config_.read(params);
}
void Awb::initialise()
{
frameCount_ = framePhase_ = 0;
/*
* Put something sane into the status that we are filtering towards,
* just in case the first few frames don't have anything meaningful in
* them.
*/
if (!config_.ctR.empty() && !config_.ctB.empty()) {
syncResults_.temperatureK = config_.ctR.domain().clamp(4000);
syncResults_.gainR = 1.0 / config_.ctR.eval(syncResults_.temperatureK);
syncResults_.gainG = 1.0;
syncResults_.gainB = 1.0 / config_.ctB.eval(syncResults_.temperatureK);
} else {
/* random values just to stop the world blowing up */
syncResults_.temperatureK = 4500;
syncResults_.gainR = syncResults_.gainG = syncResults_.gainB = 1.0;
}
prevSyncResults_ = syncResults_;
asyncResults_ = syncResults_;
}
void Awb::initialValues(double &gainR, double &gainB)
{
gainR = syncResults_.gainR;
gainB = syncResults_.gainB;
}
void Awb::disableAuto()
{
/* Freeze the most recent values, and treat them as manual gains */
manualR_ = syncResults_.gainR = prevSyncResults_.gainR;
manualB_ = syncResults_.gainB = prevSyncResults_.gainB;
syncResults_.gainG = prevSyncResults_.gainG;
syncResults_.temperatureK = prevSyncResults_.temperatureK;
}
void Awb::enableAuto()
{
manualR_ = 0.0;
manualB_ = 0.0;
}
unsigned int Awb::getConvergenceFrames() const
{
/*
* If not in auto mode, there is no convergence
* to happen, so no need to drop any frames - return zero.
*/
if (!isAutoEnabled())
return 0;
else
return config_.convergenceFrames;
}
void Awb::setMode(std::string const &modeName)
{
modeName_ = modeName;
}
void Awb::setManualGains(double manualR, double manualB)
{
/* If any of these are 0.0, we swich back to auto. */
manualR_ = manualR;
manualB_ = manualB;
/*
* If not in auto mode, set these values into the syncResults which
* means that Prepare() will adopt them immediately.
*/
if (!isAutoEnabled()) {
syncResults_.gainR = prevSyncResults_.gainR = manualR_;
syncResults_.gainG = prevSyncResults_.gainG = 1.0;
syncResults_.gainB = prevSyncResults_.gainB = manualB_;
if (config_.bayes) {
/* Also estimate the best corresponding colour temperature from the curves. */
double ctR = config_.ctRInverse.eval(config_.ctRInverse.domain().clamp(1 / manualR_));
double ctB = config_.ctBInverse.eval(config_.ctBInverse.domain().clamp(1 / manualB_));
prevSyncResults_.temperatureK = (ctR + ctB) / 2;
syncResults_.temperatureK = prevSyncResults_.temperatureK;
}
}
}
void Awb::switchMode([[maybe_unused]] CameraMode const &cameraMode,
Metadata *metadata)
{
/* Let other algorithms know the current white balance values. */
metadata->set("awb.status", prevSyncResults_);
}
bool Awb::isAutoEnabled() const
{
return manualR_ == 0.0 || manualB_ == 0.0;
}
void Awb::fetchAsyncResults()
{
LOG(RPiAwb, Debug) << "Fetch AWB results";
asyncFinished_ = false;
asyncStarted_ = false;
/*
* It's possible manual gains could be set even while the async
* thread was running, so only copy the results if still in auto mode.
*/
if (isAutoEnabled())
syncResults_ = asyncResults_;
}
void Awb::restartAsync(StatisticsPtr &stats, double lux)
{
LOG(RPiAwb, Debug) << "Starting AWB calculation";
/* this makes a new reference which belongs to the asynchronous thread */
statistics_ = stats;
/* store the mode as it could technically change */
auto m = config_.modes.find(modeName_);
mode_ = m != config_.modes.end()
? &m->second
: (mode_ == nullptr ? config_.defaultMode : mode_);
lux_ = lux;
framePhase_ = 0;
asyncStarted_ = true;
size_t len = modeName_.copy(asyncResults_.mode,
sizeof(asyncResults_.mode) - 1);
asyncResults_.mode[len] = '\0';
{
std::lock_guard<std::mutex> lock(mutex_);
asyncStart_ = true;
}
asyncSignal_.notify_one();
}
void Awb::prepare(Metadata *imageMetadata)
{
if (frameCount_ < (int)config_.startupFrames)
frameCount_++;
double speed = frameCount_ < (int)config_.startupFrames
? 1.0
: config_.speed;
LOG(RPiAwb, Debug)
<< "frame_count " << frameCount_ << " speed " << speed;
{
std::unique_lock<std::mutex> lock(mutex_);
if (asyncStarted_ && asyncFinished_)
fetchAsyncResults();
}
/* Finally apply IIR filter to results and put into metadata. */
memcpy(prevSyncResults_.mode, syncResults_.mode,
sizeof(prevSyncResults_.mode));
prevSyncResults_.temperatureK = speed * syncResults_.temperatureK +
(1.0 - speed) * prevSyncResults_.temperatureK;
prevSyncResults_.gainR = speed * syncResults_.gainR +
(1.0 - speed) * prevSyncResults_.gainR;
prevSyncResults_.gainG = speed * syncResults_.gainG +
(1.0 - speed) * prevSyncResults_.gainG;
prevSyncResults_.gainB = speed * syncResults_.gainB +
(1.0 - speed) * prevSyncResults_.gainB;
imageMetadata->set("awb.status", prevSyncResults_);
LOG(RPiAwb, Debug)
<< "Using AWB gains r " << prevSyncResults_.gainR << " g "
<< prevSyncResults_.gainG << " b "
<< prevSyncResults_.gainB;
}
void Awb::process(StatisticsPtr &stats, Metadata *imageMetadata)
{
/* Count frames since we last poked the async thread. */
if (framePhase_ < (int)config_.framePeriod)
framePhase_++;
LOG(RPiAwb, Debug) << "frame_phase " << framePhase_;
/* We do not restart the async thread if we're not in auto mode. */
if (isAutoEnabled() &&
(framePhase_ >= (int)config_.framePeriod ||
frameCount_ < (int)config_.startupFrames)) {
/* Update any settings and any image metadata that we need. */
struct LuxStatus luxStatus = {};
luxStatus.lux = 400; /* in case no metadata */
if (imageMetadata->get("lux.status", luxStatus) != 0)
LOG(RPiAwb, Debug) << "No lux metadata found";
LOG(RPiAwb, Debug) << "Awb lux value is " << luxStatus.lux;
if (asyncStarted_ == false)
restartAsync(stats, luxStatus.lux);
}
}
void Awb::asyncFunc()
{
while (true) {
{
std::unique_lock<std::mutex> lock(mutex_);
asyncSignal_.wait(lock, [&] {
return asyncStart_ || asyncAbort_;
});
asyncStart_ = false;
if (asyncAbort_)
break;
}
doAwb();
{
std::lock_guard<std::mutex> lock(mutex_);
asyncFinished_ = true;
}
syncSignal_.notify_one();
}
}
static void generateStats(std::vector<Awb::RGB> &zones,
StatisticsPtr &stats, double minPixels,
double minG, Metadata &globalMetadata)
{
std::scoped_lock<RPiController::Metadata> l(globalMetadata);
for (unsigned int i = 0; i < stats->awbRegions.numRegions(); i++) {
Awb::RGB zone;
auto ®ion = stats->awbRegions.get(i);
if (region.counted >= minPixels) {
zone.G = region.val.gSum / region.counted;
if (zone.G < minG)
continue;
zone.R = region.val.rSum / region.counted;
zone.B = region.val.bSum / region.counted;
/* Factor in the ALSC applied colour shading correction if required. */
const AlscStatus *alscStatus = globalMetadata.getLocked<AlscStatus>("alsc.status");
if (stats->colourStatsPos == Statistics::ColourStatsPos::PreLsc && alscStatus) {
zone.R *= alscStatus->r[i];
zone.G *= alscStatus->g[i];
zone.B *= alscStatus->b[i];
}
zones.push_back(zone);
}
}
}
void Awb::prepareStats()
{
zones_.clear();
/*
* LSC has already been applied to the stats in this pipeline, so stop
* any LSC compensation. We also ignore config_.fast in this version.
*/
generateStats(zones_, statistics_, config_.minPixels,
config_.minG, getGlobalMetadata());
/*
* apply sensitivities, so values appear to come from our "canonical"
* sensor.
*/
for (auto &zone : zones_) {
zone.R *= config_.sensitivityR;
zone.B *= config_.sensitivityB;
}
}
double Awb::computeDelta2Sum(double gainR, double gainB)
{
/*
* Compute the sum of the squared colour error (non-greyness) as it
* appears in the log likelihood equation.
*/
double delta2Sum = 0;
for (auto &z : zones_) {
double deltaR = gainR * z.R - 1 - config_.whitepointR;
double deltaB = gainB * z.B - 1 - config_.whitepointB;
double delta2 = deltaR * deltaR + deltaB * deltaB;
/* LOG(RPiAwb, Debug) << "deltaR " << deltaR << " deltaB " << deltaB << " delta2 " << delta2; */
delta2 = std::min(delta2, config_.deltaLimit);
delta2Sum += delta2;
}
return delta2Sum;
}
ipa::Pwl Awb::interpolatePrior()
{
/*
* Interpolate the prior log likelihood function for our current lux
* value.
*/
if (lux_ <= config_.priors.front().lux)
return config_.priors.front().prior;
else if (lux_ >= config_.priors.back().lux)
return config_.priors.back().prior;
else {
int idx = 0;
/* find which two we lie between */
while (config_.priors[idx + 1].lux < lux_)
idx++;
double lux0 = config_.priors[idx].lux,
lux1 = config_.priors[idx + 1].lux;
return ipa::Pwl::combine(config_.priors[idx].prior,
config_.priors[idx + 1].prior,
[&](double /*x*/, double y0, double y1) {
return y0 + (y1 - y0) *
(lux_ - lux0) / (lux1 - lux0);
});
}
}
static double interpolateQuadatric(ipa::Pwl::Point const &a, ipa::Pwl::Point const &b,
ipa::Pwl::Point const &c)
{
/*
* Given 3 points on a curve, find the extremum of the function in that
* interval by fitting a quadratic.
*/
const double eps = 1e-3;
ipa::Pwl::Point ca = c - a, ba = b - a;
double denominator = 2 * (ba.y() * ca.x() - ca.y() * ba.x());
if (abs(denominator) > eps) {
double numerator = ba.y() * ca.x() * ca.x() - ca.y() * ba.x() * ba.x();
double result = numerator / denominator + a.x();
return std::max(a.x(), std::min(c.x(), result));
}
/* has degenerated to straight line segment */
return a.y() < c.y() - eps ? a.x() : (c.y() < a.y() - eps ? c.x() : b.x());
}
double Awb::coarseSearch(ipa::Pwl const &prior)
{
points_.clear(); /* assume doesn't deallocate memory */
size_t bestPoint = 0;
double t = mode_->ctLo;
int spanR = 0, spanB = 0;
/* Step down the CT curve evaluating log likelihood. */
while (true) {
double r = config_.ctR.eval(t, &spanR);
double b = config_.ctB.eval(t, &spanB);
double gainR = 1 / r, gainB = 1 / b;
double delta2Sum = computeDelta2Sum(gainR, gainB);
double priorLogLikelihood = prior.eval(prior.domain().clamp(t));
double finalLogLikelihood = delta2Sum - priorLogLikelihood;
LOG(RPiAwb, Debug)
<< "t: " << t << " gain R " << gainR << " gain B "
<< gainB << " delta2_sum " << delta2Sum
<< " prior " << priorLogLikelihood << " final "
<< finalLogLikelihood;
points_.push_back(ipa::Pwl::Point({ t, finalLogLikelihood }));
if (points_.back().y() < points_[bestPoint].y())
bestPoint = points_.size() - 1;
if (t == mode_->ctHi)
break;
/* for even steps along the r/b curve scale them by the current t */
t = std::min(t + t / 10 * config_.coarseStep, mode_->ctHi);
}
t = points_[bestPoint].x();
LOG(RPiAwb, Debug) << "Coarse search found CT " << t;
/*
* We have the best point of the search, but refine it with a quadratic
* interpolation around its neighbours.
*/
if (points_.size() > 2) {
unsigned long bp = std::min(bestPoint, points_.size() - 2);
bestPoint = std::max(1UL, bp);
t = interpolateQuadatric(points_[bestPoint - 1],
points_[bestPoint],
points_[bestPoint + 1]);
LOG(RPiAwb, Debug)
<< "After quadratic refinement, coarse search has CT "
<< t;
}
return t;
}
void Awb::fineSearch(double &t, double &r, double &b, ipa::Pwl const &prior)
{
int spanR = -1, spanB = -1;
config_.ctR.eval(t, &spanR);
config_.ctB.eval(t, &spanB);
double step = t / 10 * config_.coarseStep * 0.1;
int nsteps = 5;
double rDiff = config_.ctR.eval(t + nsteps * step, &spanR) -
config_.ctR.eval(t - nsteps * step, &spanR);
double bDiff = config_.ctB.eval(t + nsteps * step, &spanB) -
config_.ctB.eval(t - nsteps * step, &spanB);
ipa::Pwl::Point transverse({ bDiff, -rDiff });
if (transverse.length2() < 1e-6)
return;
/*
* unit vector orthogonal to the b vs. r function (pointing outwards
* with r and b increasing)
*/
transverse = transverse / transverse.length();
double bestLogLikelihood = 0, bestT = 0, bestR = 0, bestB = 0;
double transverseRange = config_.transverseNeg + config_.transversePos;
const int maxNumDeltas = 12;
/* a transverse step approximately every 0.01 r/b units */
int numDeltas = floor(transverseRange * 100 + 0.5) + 1;
numDeltas = numDeltas < 3 ? 3 : (numDeltas > maxNumDeltas ? maxNumDeltas : numDeltas);
/*
* Step down CT curve. March a bit further if the transverse range is
* large.
*/
nsteps += numDeltas;
for (int i = -nsteps; i <= nsteps; i++) {
double tTest = t + i * step;
double priorLogLikelihood =
prior.eval(prior.domain().clamp(tTest));
double rCurve = config_.ctR.eval(tTest, &spanR);
double bCurve = config_.ctB.eval(tTest, &spanB);
/* x will be distance off the curve, y the log likelihood there */
ipa::Pwl::Point points[maxNumDeltas];
int bestPoint = 0;
/* Take some measurements transversely *off* the CT curve. */
for (int j = 0; j < numDeltas; j++) {
points[j][0] = -config_.transverseNeg +
(transverseRange * j) / (numDeltas - 1);
ipa::Pwl::Point rbTest = ipa::Pwl::Point({ rCurve, bCurve }) +
transverse * points[j].x();
double rTest = rbTest.x(), bTest = rbTest.y();
double gainR = 1 / rTest, gainB = 1 / bTest;
double delta2Sum = computeDelta2Sum(gainR, gainB);
points[j][1] = delta2Sum - priorLogLikelihood;
LOG(RPiAwb, Debug)
<< "At t " << tTest << " r " << rTest << " b "
<< bTest << ": " << points[j].y();
if (points[j].y() < points[bestPoint].y())
bestPoint = j;
}
/*
* We have NUM_DELTAS points transversely across the CT curve,
* now let's do a quadratic interpolation for the best result.
*/
bestPoint = std::max(1, std::min(bestPoint, numDeltas - 2));
ipa::Pwl::Point rbTest = ipa::Pwl::Point({ rCurve, bCurve }) +
transverse * interpolateQuadatric(points[bestPoint - 1],
points[bestPoint],
points[bestPoint + 1]);
double rTest = rbTest.x(), bTest = rbTest.y();
double gainR = 1 / rTest, gainB = 1 / bTest;
double delta2Sum = computeDelta2Sum(gainR, gainB);
double finalLogLikelihood = delta2Sum - priorLogLikelihood;
LOG(RPiAwb, Debug)
<< "Finally "
<< tTest << " r " << rTest << " b " << bTest << ": "
<< finalLogLikelihood
<< (finalLogLikelihood < bestLogLikelihood ? " BEST" : "");
if (bestT == 0 || finalLogLikelihood < bestLogLikelihood)
bestLogLikelihood = finalLogLikelihood,
bestT = tTest, bestR = rTest, bestB = bTest;
}
t = bestT, r = bestR, b = bestB;
LOG(RPiAwb, Debug)
<< "Fine search found t " << t << " r " << r << " b " << b;
}
void Awb::awbBayes()
{
/*
* May as well divide out G to save computeDelta2Sum from doing it over
* and over.
*/
for (auto &z : zones_)
z.R = z.R / (z.G + 1), z.B = z.B / (z.G + 1);
/*
* Get the current prior, and scale according to how many zones are
* valid... not entirely sure about this.
*/
ipa::Pwl prior = interpolatePrior();
prior *= zones_.size() / (double)(statistics_->awbRegions.numRegions());
prior.map([](double x, double y) {
LOG(RPiAwb, Debug) << "(" << x << "," << y << ")";
});
double t = coarseSearch(prior);
double r = config_.ctR.eval(t);
double b = config_.ctB.eval(t);
LOG(RPiAwb, Debug)
<< "After coarse search: r " << r << " b " << b << " (gains r "
<< 1 / r << " b " << 1 / b << ")";
/*
* Not entirely sure how to handle the fine search yet. Mostly the
* estimated CT is already good enough, but the fine search allows us to
* wander transverely off the CT curve. Under some illuminants, where
* there may be more or less green light, this may prove beneficial,
* though I probably need more real datasets before deciding exactly how
* this should be controlled and tuned.
*/
fineSearch(t, r, b, prior);
LOG(RPiAwb, Debug)
<< "After fine search: r " << r << " b " << b << " (gains r "
<< 1 / r << " b " << 1 / b << ")";
/*
* Write results out for the main thread to pick up. Remember to adjust
* the gains from the ones that the "canonical sensor" would require to
* the ones needed by *this* sensor.
*/
asyncResults_.temperatureK = t;
asyncResults_.gainR = 1.0 / r * config_.sensitivityR;
asyncResults_.gainG = 1.0;
asyncResults_.gainB = 1.0 / b * config_.sensitivityB;
}
void Awb::awbGrey()
{
LOG(RPiAwb, Debug) << "Grey world AWB";
/*
* Make a separate list of the derivatives for each of red and blue, so
* that we can sort them to exclude the extreme gains. We could
* consider some variations, such as normalising all the zones first, or
* doing an L2 average etc.
*/
std::vector<RGB> &derivsR(zones_);
std::vector<RGB> derivsB(derivsR);
std::sort(derivsR.begin(), derivsR.end(),
[](RGB const &a, RGB const &b) {
return a.G * b.R < b.G * a.R;
});
std::sort(derivsB.begin(), derivsB.end(),
[](RGB const &a, RGB const &b) {
return a.G * b.B < b.G * a.B;
});
/* Average the middle half of the values. */
int discard = derivsR.size() / 4;
RGB sumR(0, 0, 0), sumB(0, 0, 0);
for (auto ri = derivsR.begin() + discard,
bi = derivsB.begin() + discard;
ri != derivsR.end() - discard; ri++, bi++)
sumR += *ri, sumB += *bi;
double gainR = sumR.G / (sumR.R + 1),
gainB = sumB.G / (sumB.B + 1);
asyncResults_.temperatureK = 4500; /* don't know what it is */
asyncResults_.gainR = gainR;
asyncResults_.gainG = 1.0;
asyncResults_.gainB = gainB;
}
void Awb::doAwb()
{
prepareStats();
LOG(RPiAwb, Debug) << "Valid zones: " << zones_.size();
if (zones_.size() > config_.minRegions) {
if (config_.bayes)
awbBayes();
else
awbGrey();
LOG(RPiAwb, Debug)
<< "CT found is "
<< asyncResults_.temperatureK
<< " with gains r " << asyncResults_.gainR
<< " and b " << asyncResults_.gainB;
}
/*
* we're done with these; we may as well relinquish our hold on the
* pointer.
*/
statistics_.reset();
}
/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Awb(controller);
}
static RegisterAlgorithm reg(NAME, &create);
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/noise.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* Noise control algorithm
*/
#pragma once
#include "../algorithm.h"
#include "../noise_status.h"
/* This is our implementation of the "noise algorithm". */
namespace RPiController {
class Noise : public Algorithm
{
public:
Noise(Controller *controller);
char const *name() const override;
void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
int read(const libcamera::YamlObject ¶ms) override;
void prepare(Metadata *imageMetadata) override;
private:
/* the noise profile for analogue gain of 1.0 */
double referenceConstant_;
double referenceSlope_;
double modeFactor_;
};
} /* namespace RPiController */
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/af.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2022-2023, Raspberry Pi Ltd
*
* Autofocus control algorithm
*/
#pragma once
#include "../af_algorithm.h"
#include "../af_status.h"
#include "../pdaf_data.h"
#include "libipa/pwl.h"
/*
* This algorithm implements a hybrid of CDAF and PDAF, favouring PDAF.
*
* Whenever PDAF is available, it is used in a continuous feedback loop.
* When triggered in auto mode, we simply enable AF for a limited number
* of frames (it may terminate early if the delta becomes small enough).
*
* When PDAF confidence is low (due e.g. to low contrast or extreme defocus)
* or PDAF data are absent, fall back to CDAF with a programmed scan pattern.
* A coarse and fine scan are performed, using ISP's CDAF focus FoM to
* estimate the lens position with peak contrast. This is slower due to
* extra latency in the ISP, and requires a settling time between steps.
*
* Some hysteresis is applied to the switch between PDAF and CDAF, to avoid
* "nuisance" scans. During each interval where PDAF is not working, only
* ONE scan will be performed; CAF cannot track objects using CDAF alone.
*
*/
namespace RPiController {
class Af : public AfAlgorithm
{
public:
Af(Controller *controller = NULL);
~Af();
char const *name() const override;
int read(const libcamera::YamlObject ¶ms) override;
void initialise() override;
/* IPA calls */
void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
void prepare(Metadata *imageMetadata) override;
void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
/* controls */
void setRange(AfRange range) override;
void setSpeed(AfSpeed speed) override;
void setMetering(bool use_windows) override;
void setWindows(libcamera::Span<libcamera::Rectangle const> const &wins) override;
void setMode(AfMode mode) override;
AfMode getMode() const override;
bool setLensPosition(double dioptres, int32_t *hwpos) override;
std::optional<double> getLensPosition() const override;
void triggerScan() override;
void cancelScan() override;
void pause(AfPause pause) override;
private:
enum class ScanState {
Idle = 0,
Trigger,
Pdaf,
Coarse,
Fine,
Settle
};
struct RangeDependentParams {
double focusMin; /* lower (far) limit in dipotres */
double focusMax; /* upper (near) limit in dioptres */
double focusDefault; /* default setting ("hyperfocal") */
RangeDependentParams();
void read(const libcamera::YamlObject ¶ms);
};
struct SpeedDependentParams {
double stepCoarse; /* used for scans */
double stepFine; /* used for scans */
double contrastRatio; /* used for scan termination and reporting */
double pdafGain; /* coefficient for PDAF feedback loop */
double pdafSquelch; /* PDAF stability parameter (device-specific) */
double maxSlew; /* limit for lens movement per frame */
uint32_t pdafFrames; /* number of iterations when triggered */
uint32_t dropoutFrames; /* number of non-PDAF frames to switch to CDAF */
uint32_t stepFrames; /* frames to skip in between steps of a scan */
SpeedDependentParams();
void read(const libcamera::YamlObject ¶ms);
};
struct CfgParams {
RangeDependentParams ranges[AfRangeMax];
SpeedDependentParams speeds[AfSpeedMax];
uint32_t confEpsilon; /* PDAF hysteresis threshold (sensor-specific) */
uint32_t confThresh; /* PDAF confidence cell min (sensor-specific) */
uint32_t confClip; /* PDAF confidence cell max (sensor-specific) */
uint32_t skipFrames; /* frames to skip at start or modeswitch */
libcamera::ipa::Pwl map; /* converts dioptres -> lens driver position */
CfgParams();
int read(const libcamera::YamlObject ¶ms);
void initialise();
};
struct ScanRecord {
double focus;
double contrast;
double phase;
double conf;
};
struct RegionWeights {
unsigned rows;
unsigned cols;
uint32_t sum;
std::vector<uint16_t> w;
RegionWeights()
: rows(0), cols(0), sum(0), w() {}
};
void computeWeights(RegionWeights *wgts, unsigned rows, unsigned cols);
void invalidateWeights();
bool getPhase(PdafRegions const ®ions, double &phase, double &conf);
double getContrast(const FocusRegions &focusStats);
void doPDAF(double phase, double conf);
bool earlyTerminationByPhase(double phase);
double findPeak(unsigned index) const;
void doScan(double contrast, double phase, double conf);
void doAF(double contrast, double phase, double conf);
void updateLensPosition();
void startAF();
void startProgrammedScan();
void goIdle();
/* Configuration and settings */
CfgParams cfg_;
AfRange range_;
AfSpeed speed_;
AfMode mode_;
bool pauseFlag_;
libcamera::Rectangle statsRegion_;
std::vector<libcamera::Rectangle> windows_;
bool useWindows_;
RegionWeights phaseWeights_;
RegionWeights contrastWeights_;
/* Working state. */
ScanState scanState_;
bool initted_;
double ftarget_, fsmooth_;
double prevContrast_;
unsigned skipCount_, stepCount_, dropCount_;
unsigned scanMaxIndex_;
double scanMaxContrast_, scanMinContrast_;
std::vector<ScanRecord> scanData_;
AfState reportState_;
};
} // namespace RPiController
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/cac.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2023 Raspberry Pi Ltd
*
* Chromatic Aberration Correction algorithm
*/
#include "cac.h"
#include <libcamera/base/log.h>
#include "cac_status.h"
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiCac)
#define NAME "rpi.cac"
Cac::Cac(Controller *controller)
: Algorithm(controller)
{
}
char const *Cac::name() const
{
return NAME;
}
static bool arrayToSet(const libcamera::YamlObject ¶ms, std::vector<double> &inputArray, const Size &size)
{
int num = 0;
int max_num = (size.width + 1) * (size.height + 1);
inputArray.resize(max_num);
for (const auto &p : params.asList()) {
if (num == max_num)
return false;
inputArray[num++] = p.get<double>(0);
}
return num == max_num;
}
static void setStrength(std::vector<double> &inputArray, std::vector<double> &outputArray,
double strengthFactor)
{
int num = 0;
for (const auto &p : inputArray) {
outputArray[num++] = p * strengthFactor;
}
}
int Cac::read(const libcamera::YamlObject ¶ms)
{
config_.enabled = params.contains("lut_rx") && params.contains("lut_ry") &&
params.contains("lut_bx") && params.contains("lut_by");
if (!config_.enabled)
return 0;
const Size &size = getHardwareConfig().cacRegions;
if (!arrayToSet(params["lut_rx"], config_.lutRx, size)) {
LOG(RPiCac, Error) << "Bad CAC lut_rx table";
return -EINVAL;
}
if (!arrayToSet(params["lut_ry"], config_.lutRy, size)) {
LOG(RPiCac, Error) << "Bad CAC lut_ry table";
return -EINVAL;
}
if (!arrayToSet(params["lut_bx"], config_.lutBx, size)) {
LOG(RPiCac, Error) << "Bad CAC lut_bx table";
return -EINVAL;
}
if (!arrayToSet(params["lut_by"], config_.lutBy, size)) {
LOG(RPiCac, Error) << "Bad CAC lut_by table";
return -EINVAL;
}
double strength = params["strength"].get<double>(1);
cacStatus_.lutRx = config_.lutRx;
cacStatus_.lutRy = config_.lutRy;
cacStatus_.lutBx = config_.lutBx;
cacStatus_.lutBy = config_.lutBy;
setStrength(config_.lutRx, cacStatus_.lutRx, strength);
setStrength(config_.lutBx, cacStatus_.lutBx, strength);
setStrength(config_.lutRy, cacStatus_.lutRy, strength);
setStrength(config_.lutBy, cacStatus_.lutBy, strength);
return 0;
}
void Cac::prepare(Metadata *imageMetadata)
{
if (config_.enabled)
imageMetadata->set("cac.status", cacStatus_);
}
// Register algorithm with the system.
static Algorithm *Create(Controller *controller)
{
return (Algorithm *)new Cac(controller);
}
static RegisterAlgorithm reg(NAME, &Create);
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/sdn.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019-2021, Raspberry Pi Ltd
*
* SDN (spatial denoise) control algorithm
*/
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include "../denoise_status.h"
#include "../noise_status.h"
#include "sdn.h"
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiSdn)
/*
* Calculate settings for the spatial denoise block using the noise profile in
* the image metadata.
*/
#define NAME "rpi.sdn"
Sdn::Sdn(Controller *controller)
: DenoiseAlgorithm(controller), mode_(DenoiseMode::ColourOff)
{
}
char const *Sdn::name() const
{
return NAME;
}
int Sdn::read(const libcamera::YamlObject ¶ms)
{
LOG(RPiSdn, Warning)
<< "Using legacy SDN tuning - please consider moving SDN inside rpi.denoise";
deviation_ = params["deviation"].get<double>(3.2);
strength_ = params["strength"].get<double>(0.75);
return 0;
}
void Sdn::initialise()
{
}
void Sdn::prepare(Metadata *imageMetadata)
{
struct NoiseStatus noiseStatus = {};
noiseStatus.noiseSlope = 3.0; /* in case no metadata */
if (imageMetadata->get("noise.status", noiseStatus) != 0)
LOG(RPiSdn, Warning) << "no noise profile found";
LOG(RPiSdn, Debug)
<< "Noise profile: constant " << noiseStatus.noiseConstant
<< " slope " << noiseStatus.noiseSlope;
struct DenoiseStatus status;
status.noiseConstant = noiseStatus.noiseConstant * deviation_;
status.noiseSlope = noiseStatus.noiseSlope * deviation_;
status.strength = strength_;
status.mode = utils::to_underlying(mode_);
imageMetadata->set("denoise.status", status);
LOG(RPiSdn, Debug)
<< "programmed constant " << status.noiseConstant
<< " slope " << status.noiseSlope
<< " strength " << status.strength;
}
void Sdn::setMode(DenoiseMode mode)
{
/* We only distinguish between off and all other modes. */
mode_ = mode;
}
/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Sdn(controller);
}
static RegisterAlgorithm reg(NAME, &create);
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/denoise.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2022, Raspberry Pi Ltd
*
* denoise.hpp - Denoise (spatial, colour, temporal) control algorithm
*/
#pragma once
#include <map>
#include <string>
#include "algorithm.h"
#include "denoise_algorithm.h"
namespace RPiController {
// Algorithm to calculate correct denoise settings.
struct DenoiseConfig {
double sdnDeviation;
double sdnStrength;
double sdnDeviation2;
double sdnDeviationNoTdn;
double sdnStrengthNoTdn;
double sdnTdnBackoff;
double cdnDeviation;
double cdnStrength;
double tdnDeviation;
double tdnThreshold;
bool tdnEnable;
bool sdnEnable;
bool cdnEnable;
int read(const libcamera::YamlObject ¶ms);
};
class Denoise : public DenoiseAlgorithm
{
public:
Denoise(Controller *controller);
char const *name() const override;
int read(const libcamera::YamlObject ¶ms) override;
void initialise() override;
void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
void prepare(Metadata *imageMetadata) override;
void setMode(DenoiseMode mode) override;
void setConfig(std::string const &name) override;
private:
std::map<std::string, DenoiseConfig> configs_;
DenoiseConfig *currentConfig_;
DenoiseMode mode_;
/* SDN parameters attenuate over time if TDN is running. */
double currentSdnDeviation_;
double currentSdnStrength_;
double currentSdnDeviation2_;
};
} // namespace RPiController
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/geq.cpp | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* GEQ (green equalisation) control algorithm
*/
#include <libcamera/base/log.h>
#include "../device_status.h"
#include "../lux_status.h"
#include "geq.h"
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiGeq)
/*
* We use the lux status so that we can apply stronger settings in darkness (if
* necessary).
*/
#define NAME "rpi.geq"
Geq::Geq(Controller *controller)
: Algorithm(controller)
{
}
char const *Geq::name() const
{
return NAME;
}
int Geq::read(const libcamera::YamlObject ¶ms)
{
config_.offset = params["offset"].get<uint16_t>(0);
config_.slope = params["slope"].get<double>(0.0);
if (config_.slope < 0.0 || config_.slope >= 1.0) {
LOG(RPiGeq, Error) << "Bad slope value";
return -EINVAL;
}
if (params.contains("strength")) {
config_.strength = params["strength"].get<ipa::Pwl>(ipa::Pwl{});
if (config_.strength.empty())
return -EINVAL;
}
return 0;
}
void Geq::prepare(Metadata *imageMetadata)
{
LuxStatus luxStatus = {};
luxStatus.lux = 400;
if (imageMetadata->get("lux.status", luxStatus))
LOG(RPiGeq, Warning) << "no lux data found";
DeviceStatus deviceStatus;
deviceStatus.analogueGain = 1.0; /* in case not found */
if (imageMetadata->get("device.status", deviceStatus))
LOG(RPiGeq, Warning)
<< "no device metadata - use analogue gain of 1x";
GeqStatus geqStatus = {};
double strength = config_.strength.empty()
? 1.0
: config_.strength.eval(config_.strength.domain().clamp(luxStatus.lux));
strength *= deviceStatus.analogueGain;
double offset = config_.offset * strength;
double slope = config_.slope * strength;
geqStatus.offset = std::min(65535.0, std::max(0.0, offset));
geqStatus.slope = std::min(.99999, std::max(0.0, slope));
LOG(RPiGeq, Debug)
<< "offset " << geqStatus.offset << " slope "
<< geqStatus.slope << " (analogue gain "
<< deviceStatus.analogueGain << " lux "
<< luxStatus.lux << ")";
imageMetadata->set("geq.status", geqStatus);
}
/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Geq(controller);
}
static RegisterAlgorithm reg(NAME, &create);
|
0 | repos/libcamera/src/ipa/rpi/controller | repos/libcamera/src/ipa/rpi/controller/rpi/sdn.h | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
* SDN (spatial denoise) control algorithm
*/
#pragma once
#include "../algorithm.h"
#include "../denoise_algorithm.h"
namespace RPiController {
/* Algorithm to calculate correct spatial denoise (SDN) settings. */
class Sdn : public DenoiseAlgorithm
{
public:
Sdn(Controller *controller = NULL);
char const *name() const override;
int read(const libcamera::YamlObject ¶ms) override;
void initialise() override;
void prepare(Metadata *imageMetadata) override;
void setMode(DenoiseMode mode) override;
private:
double deviation_;
double strength_;
DenoiseMode mode_;
};
} /* namespace RPiController */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.