text
stringlengths 0
3.34M
|
---|
//Released under the MIT License - https://opensource.org/licenses/MIT
//
//Copyright (c) 2019 AIT Austrian Institute of Technology GmbH
//
//Permission is hereby granted, free of charge, to any person obtaining
//a copy of this software and associated documentation files (the "Software"),
//to deal in the Software without restriction, including without limitation
//the rights to use, copy, modify, merge, publish, distribute, sublicense,
//and/or sell copies of the Software, and to permit persons to whom the
//Software is furnished to do so, subject to the following conditions:
//
//The above copyright notice and this permission notice shall be included
//in all copies or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
//EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
//MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
//IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
//DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
//OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
//USE OR OTHER DEALINGS IN THE SOFTWARE.
//
//Author: Josef Maier (josefjohann-dot-maier-at-gmail-dot-at)
#include "generateSequence.h"
#include "opencv2/imgproc/imgproc.hpp"
//#include <opencv2/imgcodecs.hpp>
#include <opencv2/core/eigen.hpp>
#include <array>
#include <chrono>
#include "pcl/filters/frustum_culling.h"
#include "pcl/common/transforms.h"
#include "pcl/filters/voxel_grid_occlusion_estimation.h"
#include <pcl/common/common.h>
#include <boost/thread/thread.hpp>
//#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
#include "nanoflann_utils.h"
#include <nanoflann.hpp>
#include "polygon_helper.h"
#include "side_funcs.h"
#include "io_data.h"
using namespace std;
using namespace cv;
/* --------------------------- Defines --------------------------- */
/* --------------------- Function prototypes --------------------- */
/* -------------------------- Functions -------------------------- */
genStereoSequ::genStereoSequ(cv::Size imgSize_,
cv::Mat K1_,
cv::Mat K2_,
std::vector<cv::Mat> R_,
std::vector<cv::Mat> t_,
StereoSequParameters &pars_,
bool filter_occluded_points_,
uint32_t verbose_,
const std::string &writeIntermRes_path_) :
verbose(verbose_),
writeIntermRes_path(writeIntermRes_path_),
filter_occluded_points(filter_occluded_points_),
imgSize(imgSize_),
K1(K1_),
K2(K2_),
pars(pars_),
R(std::move(R_)),
t(std::move(t_)) {
CV_Assert((K1.rows == 3) && (K2.rows == 3) && (K1.cols == 3) && (K2.cols == 3) && (K1.type() == CV_64FC1) &&
(K2.type() == CV_64FC1));
CV_Assert((imgSize.area() > 0) && (R.size() == t.size()) && !R.empty());
chrono::high_resolution_clock::time_point t1, t2;
t1 = chrono::high_resolution_clock::now();
long int seed = randSeed(rand_gen);
randSeed(rand2, seed);
//Generate a mask with the minimal distance between keypoints and a mask for marking used areas in the first stereo image
genMasks();
//Calculate inverse of camera matrices
K1i = K1.inv();
K2i = K2.inv();
//Calculate distorted camera matrices (only used for output)
calcDistortedIntrinsics();
//Number of stereo configurations
nrStereoConfs = R.size();
//Construct the camera path
constructCamPath();
//Calculate the thresholds for the depths near, mid, and far for every camera configuration
if (!getDepthRanges()) {
throw SequenceException("Depth ranges are negative!\n");
}
//Used inlier ratios
genInlierRatios();
//Initialize region ROIs and masks
genRegMasks();
//Number of correspondences per image and Correspondences per image regions
initNrCorrespondences();
//Depths per image region
adaptDepthsPerRegion();
//Check if the given ranges of connected depth areas per image region are correct and initialize them for every definition of depths per image region
checkDepthAreas();
//Calculate the area in pixels for every depth and region
calcPixAreaPerDepth();
//Reset variables for the moving objects & Initialize variable for storing static 3D world coordinates
resetInitVars();
//Calculate the initial number, size, and positions of moving objects in the image
getNrSizePosMovObj();
//Get the relative movement direction (compared to the camera movement) for every moving object
checkMovObjDirection();
//Calculate the execution time
t2 = chrono::high_resolution_clock::now();
tus_to_init = chrono::duration_cast<chrono::microseconds>(t2 - t1).count();
}
void genStereoSequ::resetInitVars(){
//Reset variables for the moving objects
combMovObjLabelsAll = Mat::zeros(imgSize, CV_8UC1);
movObjMask2All = Mat::zeros(imgSize, CV_8UC1);
movObjMaskFromLast = Mat::zeros(imgSize, CV_8UC1);
movObjMaskFromLast2 = Mat::zeros(imgSize, CV_8UC1);
movObjHasArea = std::vector<std::vector<bool>>(3, std::vector<bool>(3, false));
actCorrsOnMovObj = 0;
actCorrsOnMovObjFromLast = 0;
movObj3DPtsWorldAllFrames.clear();
//Initialize variable for storing static 3D world coordinates
staticWorld3DPts.reset(new pcl::PointCloud<pcl::PointXYZ>());
}
void genStereoSequ::genMasks() {
//Generate a mask with the minimal distance between keypoints
int sqrSi = 2 * max((int) ceil(pars.minKeypDist), 1) + 1;
csurr = Mat::ones(sqrSi, sqrSi, CV_8UC1);
//Generate a mask for marking used areas in the first stereo image
corrsIMG = Mat::zeros(imgSize.height + sqrSi - 1, imgSize.width + sqrSi - 1, CV_8UC1);
//Calculate the average area a keypoint occupies if it is masked by csurr taking into account the probabilies of
//different mask overlaps
calcAvgMaskingArea();
}
void genStereoSequ::calcAvgMaskingArea(){
int csurrArea = csurr.rows * csurr.cols;
avgMaskingArea = 0;
double tmp = 0;
/*avg. A = Sum(i^2)/Sum(i) with i = [1, 2, ...,d^2] and d the edge length of the mask used for reserving the
* minimal distance for keypoints. i^2 is used as a bigger free area has a higher probability (proportional to
* its area) to be hit by a random generator to generate a new keypoint e.g. a non-masked area of 1 pixel
* (all other areas are already covered by masks) has only half the probability to be hit by a random generator
* as a non-masked area of 2 pixels. Furthermore, a non-masked area of 3 pixels has a higher probability compared
* to 2 pixels.
*/
for(int i = 1; i <= csurrArea; i++){
avgMaskingArea += (double)(i * i);
tmp += (double)i;
}
avgMaskingArea /= tmp;
}
//Distorts the intrinsics for every new stereo configuration
void genStereoSequ::calcDistortedIntrinsics(){
if(nearZero(pars.distortCamMat.second)){
for(size_t i = 0; i < R.size(); ++i){
K1_distorted.emplace_back(K1.clone());
K2_distorted.emplace_back(K2.clone());
}
return;
}
for(size_t i = 0; i < R.size(); ++i){
Mat Kdist1 = K1.clone();
calcDisortedK(Kdist1);
K1_distorted.emplace_back(Kdist1.clone());
Mat Kdist2 = K2.clone();
calcDisortedK(Kdist2);
K2_distorted.emplace_back(Kdist2.clone());
}
}
void genStereoSequ::calcDisortedK(cv::Mat &Kd){
double f_distort = getRandDoubleValRng(pars.distortCamMat.first, pars.distortCamMat.second)
* pow(-1.0, (double)(rand2() % 2));
double c_distort = getRandDoubleValRng(pars.distortCamMat.first, pars.distortCamMat.second);
double c__dir_distort = getRandDoubleValRng(0, 2.0 * M_PI);
f_distort *= Kd.at<double>(0,0);
f_distort += Kd.at<double>(0,0);
Kd.at<double>(0,0) = f_distort;
Kd.at<double>(1,1) = f_distort;
double cx = Kd.at<double>(0,2);
double cy = Kd.at<double>(1,2);
c_distort *= sqrt(cx * cx + cy * cy);
cx += c_distort * cos(c__dir_distort);
cy += c_distort * sin(c__dir_distort);
Kd.at<double>(0,2) = cx;
Kd.at<double>(1,2) = cy;
}
//Function can only be called after the medium depth for the actual frame is calculated
void genStereoSequ::getImgIntersection(std::vector<cv::Point> &img1Poly,
std::vector<cv::Point> &img2Poly,
const cv::Mat &R_use,
const cv::Mat &t_use,
const double &depth_use,
bool visualize) {
CV_Assert(depth_use > 0);
//Project the corners of img1 on a plane at medium depth in 3D
vector<Point2f> imgCorners1(4);
double negXY[2] = {0, 0};
for (int y = 0; y < 2; ++y) {
for (int x = 0; x < 2; ++x) {
Point3d pt((double) (x * (imgSize.width - 1)), (double) (y * (imgSize.height - 1)), 1.0);
Mat ptm = Mat(pt, false).reshape(1, 3);
ptm = K1i * ptm;
ptm *= depth_use / ptm.at<double>(2); //3D position at medium depth
ptm /= ptm.at<double>(2); //Projection into a plane at medium depth
imgCorners1[y * 2 + x] = Point2f((float) pt.x, (float) pt.y);
if (negXY[0] > pt.x) {
negXY[0] = pt.x;
}
if (negXY[1] > pt.y) {
negXY[1] = pt.y;
}
}
}
//Project the corners of img2 on a plane at medium depth in 3D
vector<Point2f> imgCorners2(4);
Mat A = R_use.t() * K2i;
Mat a3 = A.row(2);//Get the 3rd row
double b3 = R_use.col(2).dot(t_use); // R(Col3) * t
for (int y = 0; y < 2; ++y) {
for (int x = 0; x < 2; ++x) {
Point3d pt((double) (x * (imgSize.width - 1)), (double) (y * (imgSize.height - 1)), 1.0);
Mat ptm = Mat(pt, false).reshape(1, 3);
//Get scale factor for image coordinate in image 2 to get 3D coordinates at medium depth in the coordinate system of camera 1
/* From s * x2 = K2 * (R * X + t) with x2...img coordinate in second stereo img, s...scale factor, and X...3D coordinate in coordinate system of camera 1
* ->we know x2 = (x2_1, x2_2, 1)^T and the distance X_3 of X = (X_1, X_2, X_3)^T
* Leads to s * R^T * K2^-1 * x2 - R^T * t = X
* Substituting A = R^T * K2^-1 and b = R^T * t leads to s * A * x2 = X + b
* As we only need row 3 of the equation: a3 = A_(3,:)...row 3 of A and b3 = b_3...last (3rd) entry of vector b with b3 = R_(:,3) * t
* Thus we have s * (a3 * x2) = X_3 + b3 which leads to s = (X_3 + b3) / (a3 * x2)
*/
double a3x2 = a3.dot(ptm.t());//Dot product
double s = (depth_use + b3) / a3x2;
ptm = R_use.t() * K2i * ptm;
ptm *= s; //Scale it to get the correct distance
ptm -= R_use.t() * t_use;
//Check, if we have the correct distance
/*if(!nearZero(ptm.at<double>(2) - depth_use)){
cout << "Check formulas" << endl;
}*/
ptm /= ptm.at<double>(2); //Projection into a plane at medium depth
imgCorners2[y * 2 + x] = Point2f((float) pt.x, (float) pt.y);
CV_Assert(nearZero(pt.x - ptm.at<double>(0)) && nearZero(pt.y - ptm.at<double>(1)));
if (negXY[0] > pt.x) {
negXY[0] = pt.x;
}
if (negXY[1] > pt.y) {
negXY[1] = pt.y;
}
}
}
//Transform into positive domain
for (int y = 0; y < 2; ++y) {
for (int x = 0; x < 2; ++x) {
imgCorners1[y * 2 + x].x -= (float) negXY[0];
imgCorners1[y * 2 + x].y -= (float) negXY[1];
imgCorners2[y * 2 + x].x -= (float) negXY[0];
imgCorners2[y * 2 + x].y -= (float) negXY[1];
}
}
//Get the correct order of the points
vector<Point2f> imgCorners1ch, imgCorners2ch;
convexHull(imgCorners1, imgCorners1ch);
convexHull(imgCorners2, imgCorners2ch);
CV_Assert(imgCorners2ch.size() == 4);
//Convert into double
vector<Point2d> imgCorners1chd(4), imgCorners2chd(4);
for (int k = 0; k < 4; ++k) {
imgCorners1chd[k] = Point2d((double)imgCorners1ch[k].x, (double)imgCorners1ch[k].y);
imgCorners2chd[k] = Point2d((double)imgCorners2ch[k].x, (double)imgCorners2ch[k].y);
}
//Get the polygon intersection
polygon_contour c1={0, nullptr, nullptr}, c2={0, nullptr, nullptr}, res={0, nullptr, nullptr};
add_contour_from_array((double*)&imgCorners1chd[0].x, (int)imgCorners1chd.size(), &c1);
add_contour_from_array((double*)&imgCorners2chd[0].x, (int)imgCorners2chd.size(), &c2);
calc_polygon_clipping(POLYGON_OP_INTERSECT, &c1, &c2, &res);
CV_Assert(res.num_contours == 1);
std::vector<cv::Point2d> midZPoly;
for(int v = 0; v < res.contour[0].num_vertices; ++v) {
midZPoly.emplace_back(cv::Point2d(res.contour[0].vertex[v].x,res.contour[0].vertex[v].y));
}
/*for(int c = 0; c < res.num_contours; ++c) {
std::vector<cv::Point2f> pntsRes;
for(int v = 0; v < res.contour[c].num_vertices; ++v) {
pntsRes.push_back(cv::Point2d(res.contour[c].vertex[v].x,res.contour[c].vertex[v].y));
}
}*/
free_polygon(&c1);
free_polygon(&c2);
free_polygon(&res);
//Get rotated rectangles
/*cv::RotatedRect r1, r2, r3;
r1 = cv::minAreaRect(imgCorners1);
r2 = cv::minAreaRect(imgCorners2);
//Get intersections
std::vector<cv::Point2f> midZPoly;
int ti = cv::rotatedRectangleIntersection(r1, r2, midZPoly);
if (ti == INTERSECT_NONE) {
throw SequenceException("No intersection of stereo images at medium depth!");
}*/
//Transform the points back to their initial coordinate system
for (auto& i : midZPoly) {
i.x += negXY[0];
i.y += negXY[1];
}
//Backproject the found intersections to the first image
projectPolyIntersect(img1Poly, midZPoly, cv::Mat::eye(3, 3, CV_64FC1), cv::Mat::zeros(3, 1, CV_64FC1), K1, depth_use);
//Backproject the found intersections to the second image
projectPolyIntersect(img2Poly, midZPoly, R_use, t_use, K2, depth_use);
//Draw intersection area
if (visualize && (verbose & SHOW_STEREO_INTERSECTION)) {
vector<vector<Point>> intersectCont1(1), intersectCont2(1);
intersectCont1[0] = img1Poly;
intersectCont2[0] = img2Poly;
Mat wimg1 = Mat::zeros(imgSize, CV_8UC3);
Mat wimg2 = Mat::zeros(imgSize, CV_8UC3);
drawContours(wimg1, intersectCont1, 0, Scalar(0, 255, 0), FILLED);
drawContours(wimg2, intersectCont2, 0, Scalar(0, 255, 0), FILLED);
if(!writeIntermediateImg(wimg1, "stereo_intersection_img1") || !writeIntermediateImg(wimg2, "stereo_intersection_img2")) {
namedWindow("Stereo intersection image 1", WINDOW_AUTOSIZE);
namedWindow("Stereo intersection image 2", WINDOW_AUTOSIZE);
imshow("Stereo intersection image 1", wimg1);
imshow("Stereo intersection image 2", wimg2);
waitKey(0);
destroyWindow("Stereo intersection image 1");
destroyWindow("Stereo intersection image 2");
}
}
}
void genStereoSequ::projectPolyIntersect(std::vector<cv::Point> &imgPoly,
const std::vector<cv::Point2d> &midZPoly,
const cv::Mat &R_use,
const cv::Mat &t_use,
const cv::Mat &K_use,
const double &depth_use) const{
std::vector<cv::Point> img1Poly1;
for (auto& i : midZPoly) {
Mat ptm = (Mat_<double>(3, 1) << i.x, i.y, 1.0);
ptm *= depth_use;
ptm = K_use * (R_use * ptm + t_use);
ptm /= ptm.at<double>(2);
img1Poly1.emplace_back(Point((int) round(ptm.at<double>(0)), (int) round(ptm.at<double>(1))));
if (img1Poly1.back().x > (imgSize.width - 1)) {
img1Poly1.back().x = imgSize.width - 1;
}
if (img1Poly1.back().x < 0) {
img1Poly1.back().x = 0;
}
if (img1Poly1.back().y > (imgSize.height - 1)) {
img1Poly1.back().y = imgSize.height - 1;
}
if (img1Poly1.back().y < 0) {
img1Poly1.back().y = 0;
}
}
//Get the correct order of the intersections
vector<int> intSecIdx;
convexHull(img1Poly1, intSecIdx);
imgPoly.resize(intSecIdx.size());
for (size_t j = 0; j < intSecIdx.size(); ++j) {
imgPoly[j] = img1Poly1[intSecIdx[j]];
}
}
//Writes an image of an intermediate result to disk
bool genStereoSequ::writeIntermediateImg(const cv::Mat &img, const std::string &filename){
if(writeIntermRes_path.empty()){
return false;
}
if(!checkPathExists(writeIntermRes_path)){
return false;
}
if(img.empty()){
return false;
}
Mat tmp;
if(!((img.type() == CV_8UC1) || (img.type() == CV_8UC3))){
// cout << "Data for storing into image is not 8bit" << endl;
int channels = img.channels();
if(img.type() == CV_16UC1){
img.convertTo(tmp, CV_8UC1, 1./256.);
}else if(img.type() == CV_16UC3){
img.convertTo(tmp, CV_8UC3, 1./256.);
}else if ((img.type() == CV_64F) || (img.type() == CV_32F)){
double minv, maxv;
if(channels == 1) {
cv::minMaxLoc(img, &minv, &maxv);
img.copyTo(tmp);
tmp.convertTo(tmp, CV_64FC1);
tmp -= minv;
tmp.convertTo(tmp, CV_8UC1, 255./(maxv - minv));
}else if (channels == 3){
vector<Mat> channels;
cv::split(img, channels);
for(auto &ch: channels){
cv::minMaxLoc(ch, &minv, &maxv);
ch.convertTo(tmp, CV_64FC1);
ch -= minv;
double scale = 255./(maxv - minv);
ch.convertTo(ch, CV_8UC1, scale, scale * minv);
}
cv::merge(channels, tmp);
}else{
return false;
}
}else{
double minv, maxv;
if(channels == 1){
cv::minMaxLoc(img, &minv, &maxv);
img.copyTo(tmp);
tmp.convertTo(tmp, CV_64FC1);
tmp -= minv;
tmp.convertTo(tmp, CV_8UC1, 255./(maxv - minv));
}else if (channels == 3){
vector<Mat> channels;
cv::split(img, channels);
for(auto &ch: channels){
cv::minMaxLoc(ch, &minv, &maxv);
ch.convertTo(tmp, CV_64FC1);
ch -= minv;
double scale = 255./(maxv - minv);
ch.convertTo(ch, CV_8UC1, scale, scale * minv);
}
cv::merge(channels, tmp);
}else{
return false;
}
}
}else{
tmp = img;
}
string filen = filename + "_frame_" + std::to_string(actFrameCnt);
string resPath = concatPath(writeIntermRes_path, filen + ".png");
int cnt = 0;
while(checkFileExists(resPath)){
resPath = concatPath(writeIntermRes_path, filen + "_" + std::to_string(cnt) + ".png");
cnt++;
}
cv::imwrite(resPath, tmp);
return true;
}
//Get the fraction of intersection between the stereo images for every image region (3x3)
void genStereoSequ::getInterSecFracRegions(cv::Mat &fracUseableTPperRegion_,
const cv::Mat &R_use,
const cv::Mat &t_use,
const double &depth_use,
cv::InputArray mask,
cv::OutputArray imgUsableMask) {
//Check overlap of the stereo images
std::vector<cv::Point> img1Poly, img2Poly;
getImgIntersection(img1Poly, img2Poly, R_use, t_use, depth_use, mask.empty());
//Create a mask for overlapping views
vector<vector<Point>> intersectCont(1);
intersectCont[0] = img1Poly;
Mat wimg = Mat::zeros(imgSize, CV_8UC1);
drawContours(wimg, intersectCont, 0, Scalar(255), FILLED);
if (!mask.empty()) {
wimg &= mask.getMat();
}
if (imgUsableMask.needed()) {
if (imgUsableMask.empty()) {
imgUsableMask.create(imgSize, CV_8UC1);
}
Mat outmat_tmp = imgUsableMask.getMat();
wimg.copyTo(outmat_tmp);
}
fracUseableTPperRegion_ = Mat(3, 3, CV_64FC1);
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 3; ++x) {
if (mask.empty()) {
fracUseableTPperRegion_.at<double>(y, x) =
(double) countNonZero(wimg(regROIs[y][x])) / (double) regROIs[y][x].area();
} else {
Mat regMask = mask.getMat()(regROIs[y][x]);
int cntNZMat = countNonZero(regMask);
if(cntNZMat == 0){
fracUseableTPperRegion_.at<double>(y, x) = 0;
}
else {
fracUseableTPperRegion_.at<double>(y, x) =
(double) countNonZero(wimg(regROIs[y][x])) /
(double) cntNZMat;
}
}
}
}
}
//Get the fraction of intersection between the stereo images for every image region (3x3) and for various depths (combined, smallest area)
void genStereoSequ::getInterSecFracRegions(cv::Mat &fracUseableTPperRegion_,
const cv::Mat &R_use,
const cv::Mat &t_use,
const std::vector<double> &depth_use,
cv::InputArray mask,
cv::OutputArray imgUsableMask){
//Check overlap of the stereo images
Mat combDepths = Mat::ones(imgSize, CV_8UC1) * 255;
for(auto &d: depth_use) {
std::vector<cv::Point> img1Poly, img2Poly;
getImgIntersection(img1Poly, img2Poly, R_use, t_use, d, false);
//Create a mask for overlapping views
vector<vector<Point>> intersectCont(1);
intersectCont[0] = img1Poly;
Mat wimg = Mat::zeros(imgSize, CV_8UC1);
drawContours(wimg, intersectCont, 0, Scalar(255), FILLED);
if (mask.empty() && (verbose & SHOW_STEREO_INTERSECTION)) {
vector<vector<Point>> intersectCont2(1);
intersectCont2[0] = img2Poly;
Mat wimg2 = Mat::zeros(imgSize, CV_8UC1);
drawContours(wimg2, intersectCont2, 0, Scalar(255), FILLED);
int nrIntSecPix1 = cv::countNonZero(wimg);
int nrIntSecPix2 = cv::countNonZero(wimg2);
int imgA = imgSize.area();
auto viewOverlap = static_cast<double>(nrIntSecPix1 + nrIntSecPix2) / static_cast<double>(2 * imgA);
cout << "View overlap at depth " << d << ": " << viewOverlap << endl;
}
combDepths &= wimg;
}
//Draw intersection area
if(mask.empty() && (verbose & SHOW_STEREO_INTERSECTION)){
int nrIntSecPix = cv::countNonZero(combDepths);
auto viewOverlap = static_cast<double>(nrIntSecPix) / static_cast<double>(imgSize.width * imgSize.height);
cout << "Average view overlap: " << viewOverlap << endl;
if(!writeIntermediateImg(combDepths, "stereo_intersection_combDepths")) {
namedWindow("Stereo intersection", WINDOW_AUTOSIZE);
imshow("Stereo intersection", combDepths);
waitKey(0);
destroyWindow("Stereo intersection");
}
}
if (!mask.empty()) {
combDepths &= mask.getMat();
}
if (imgUsableMask.needed()) {
if (imgUsableMask.empty()) {
imgUsableMask.create(imgSize, CV_8UC1);
}
Mat outmat_tmp = imgUsableMask.getMat();
combDepths.copyTo(outmat_tmp);
}
fracUseableTPperRegion_ = Mat(3, 3, CV_64FC1);
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 3; ++x) {
if (mask.empty()) {
fracUseableTPperRegion_.at<double>(y, x) =
(double) countNonZero(combDepths(regROIs[y][x])) / (double) regROIs[y][x].area();
} else {
Mat regMask = mask.getMat()(regROIs[y][x]);
int cntNZMat = countNonZero(regMask);
if(cntNZMat == 0){
fracUseableTPperRegion_.at<double>(y, x) = 0;
}
else {
fracUseableTPperRegion_.at<double>(y, x) =
(double) countNonZero(combDepths(regROIs[y][x])) /
(double) cntNZMat;
}
}
}
}
}
//Get number of correspondences per image and Correspondences per image regions
//Check if there are too many correspondences per region as every correspondence needs a minimum distance to its neighbor. If yes, the minimum distance and/or number of correspondences are adapted.
void genStereoSequ::initNrCorrespondences() {
//Number of correspondences per image
genNrCorrsImg();
//Correspondences per image regions
bool res = initFracCorrImgReg();
while (!res) {
genNrCorrsImg();
res = initFracCorrImgReg();
}
}
//Initialize fraction of correspondences per image region and calculate the absolute number of TP/TN correspondences per image region
bool genStereoSequ::initFracCorrImgReg() {
if ((pars.corrsPerRegRepRate == 0) && pars.corrsPerRegion.empty()) {
for (size_t i = 0; i < totalNrFrames; i++) {
Mat newCorrsPerRegion(3, 3, CV_64FC1);
double sumNewCorrsPerRegion = 0;
while(nearZero(sumNewCorrsPerRegion)) {
cv::randu(newCorrsPerRegion, Scalar(0), Scalar(1.0));
sumNewCorrsPerRegion = sum(newCorrsPerRegion)[0];
}
newCorrsPerRegion /= sumNewCorrsPerRegion;
pars.corrsPerRegion.push_back(newCorrsPerRegion.clone());
}
pars.corrsPerRegRepRate = 1;
} else if (pars.corrsPerRegRepRate == 0) {
pars.corrsPerRegRepRate = std::max<size_t>(totalNrFrames / pars.corrsPerRegion.size(), 1);
} else if (pars.corrsPerRegion.empty()) {
//Randomly initialize the fractions
size_t nrMats = std::max<size_t>(totalNrFrames / pars.corrsPerRegRepRate, 1);
for (size_t i = 0; i < nrMats; i++) {
Mat newCorrsPerRegion(3, 3, CV_64FC1);
double sumNewCorrsPerRegion = 0;
while(nearZero(sumNewCorrsPerRegion)) {
cv::randu(newCorrsPerRegion, Scalar(0), Scalar(1.0));
sumNewCorrsPerRegion = sum(newCorrsPerRegion)[0];
}
newCorrsPerRegion /= sumNewCorrsPerRegion;
pars.corrsPerRegion.push_back(newCorrsPerRegion.clone());
}
}
for (auto&& k : pars.corrsPerRegion) {
double regSum = sum(k)[0];
if (!nearZero(regSum) && !nearZero(regSum - 1.0))
k /= regSum;
else if (nearZero(regSum)) {
k = Mat::ones(3, 3, CV_64FC1) / 9.0;
}
}
//Generate absolute number of correspondences per image region and frame
nrTruePosRegs.clear();
nrCorrsRegs.clear();
nrTrueNegRegs.clear();
nrTruePosRegs.reserve(totalNrFrames);
nrCorrsRegs.reserve(totalNrFrames);
nrTrueNegRegs.reserve(totalNrFrames);
size_t cnt = 0, stereoIdx = 0;
cv::Mat fracUseableTPperRegion_tmp, fracUseableTPperRegionNeg, fUTPpRNSum1, stereoImgsOverlapMask_tmp;
for (size_t i = 0; i < totalNrFrames; i++) {
//Get intersection of stereo images at medium distance plane for every image region in camera 1
if ((i % pars.nFramesPerCamConf) == 0) {
if (i > 0) {
stereoIdx++;
}
vector<double> combDepths(3);
combDepths[0] = depthMid[stereoIdx];
combDepths[1] = depthFar[stereoIdx] + (maxFarDistMultiplier - 1.0) * depthFar[stereoIdx] * pars.corrsPerDepth.far;
combDepths[2] = depthNear[stereoIdx] + max((1.0 - pars.corrsPerDepth.near), 0.2) * (depthMid[stereoIdx] - depthNear[stereoIdx]);
if (verbose & SHOW_STEREO_INTERSECTION) {
cout << "Stereo Cam " << stereoIdx << ":" << endl;
}
getInterSecFracRegions(fracUseableTPperRegion_tmp,
R[stereoIdx],
t[stereoIdx],
combDepths,
cv::noArray(),
stereoImgsOverlapMask_tmp);
stereoImgsOverlapMask.push_back(stereoImgsOverlapMask_tmp.clone());
fracUseableTPperRegion.push_back(fracUseableTPperRegion_tmp.clone());
fracUseableTPperRegionNeg = Mat::ones(3, 3, CV_64FC1) - fracUseableTPperRegion_tmp;
double sumFracUseableTPperRegionNeg = cv::sum(fracUseableTPperRegionNeg)[0];
if(nearZero(sumFracUseableTPperRegionNeg))
sumFracUseableTPperRegionNeg = 1.0;
fUTPpRNSum1 = fracUseableTPperRegionNeg / sumFracUseableTPperRegionNeg;
}
cv::Mat corrsTooMuch;
double activeRegions = 9.0;
Mat corrsRemain = Mat::ones(3, 3, CV_32SC1);
//Get number of correspondences per region
Mat newCorrsPerRegion;
Mat negsReg(3, 3, CV_64FC1);
bool recalcCorrsPerRegion = false;
do {
newCorrsPerRegion = pars.corrsPerRegion[cnt] * (double)nrCorrs[i];
newCorrsPerRegion.convertTo(newCorrsPerRegion, CV_32SC1, 1.0, 0.5);//Corresponds to round
int32_t chkSize = (int32_t)sum(newCorrsPerRegion)[0] - (int32_t) nrCorrs[i];
if (chkSize > 0) {
do {
int pos = (int)(rand2() % 9);
if (newCorrsPerRegion.at<int32_t>(pos) > 0) {
newCorrsPerRegion.at<int32_t>(pos)--;
chkSize--;
} /*else
{
cout << "Zero corrs in region " << pos << "of frame " << i << endl;
}*/
} while (chkSize > 0);
} else if (chkSize < 0) {
do {
int pos = (int)(rand2() % 9);
if (!nearZero(pars.corrsPerRegion[cnt].at<double>(pos))) {
newCorrsPerRegion.at<int32_t>(pos)++;
chkSize++;
}
} while (chkSize < 0);
}
//Check, if the overall number of correspondences is still correct
if(verbose & PRINT_WARNING_MESSAGES) {
int corrsDiff = (int)sum(newCorrsPerRegion)[0] - (int)nrCorrs[i];
if (((double) abs(corrsDiff) / (double) nrCorrs[i]) > 0.01) {
cout << "The distribution of correspondences over the regions did not work!" << endl;
}
}
Mat usedTNonNoOverlapRat;
Mat newCorrsPerRegiond;
int cnt1 = 0;
const int cnt1Max = 3;
while ((cv::sum(corrsRemain)[0] > 0) && (cnt1 < cnt1Max)) {
//Check if there are too many correspondences per region as every correspondence needs a minimum distance to its neighbor
double minCorr, maxCorr;
cv::minMaxLoc(newCorrsPerRegion, &minCorr, &maxCorr);
if(nearZero(activeRegions)) activeRegions = 0.1;
double regA = activeRegions * (double) imgSize.area() / (9.0 * 9.0);
double areaCorrs = maxCorr * avgMaskingArea *
enlargeKPDist;//Multiply by 1.15 to take gaps into account that are a result of randomness
if (areaCorrs > regA) {
if(verbose & PRINT_WARNING_MESSAGES) {
cout << "There are too many keypoints per region when demanding a minimum keypoint distance of "
<< pars.minKeypDist << ". Changing it!" << endl;
}
double mKPdist = floor((sqrt(regA / (enlargeKPDist * maxCorr)) - 1.0) / 2.0) - DBL_EPSILON;
if (mKPdist <= 1.414214) {
pars.minKeypDist = 1.0 - DBL_EPSILON;
genMasks();
//Get max # of correspondences
double maxFC = (double) *std::max_element(nrCorrs.begin(), nrCorrs.end());
//Get the largest portion of correspondences within a single region
vector<double> cMaxV(pars.corrsPerRegion.size());
for (size_t k = 0; k < pars.corrsPerRegion.size(); k++) {
cv::minMaxLoc(pars.corrsPerRegion[k], &minCorr, &maxCorr);
cMaxV[k] = maxCorr;
}
maxCorr = *std::max_element(cMaxV.begin(), cMaxV.end());
maxCorr *= maxFC;
//# KPs reduction factor
double reduF = regA / (enlargeKPDist * avgMaskingArea * maxCorr);
if(reduF < 1.0) {
if (verbose & PRINT_WARNING_MESSAGES) {
cout
<< "Changed the minimum keypoint distance to 1.0. There are still too many keypoints. Changing the number of keypoints!"
<< endl;
}
//Get worst inlier ratio
double minILR = *std::min_element(inlRat.begin(), inlRat.end());
//Calc max true positives
auto maxTPNew = (size_t) floor(maxCorr * reduF * minILR);
if (verbose & PRINT_WARNING_MESSAGES) {
cout << "Changing max. true positives to " << maxTPNew << endl;
}
if ((pars.truePosRange.second - pars.truePosRange.first) == 0) {
pars.truePosRange.first = pars.truePosRange.second = maxTPNew;
} else {
if (pars.truePosRange.first >= maxTPNew) {
pars.truePosRange.first = maxTPNew / 2;
pars.truePosRange.second = maxTPNew;
} else {
pars.truePosRange.second = maxTPNew;
}
}
nrTruePosRegs.clear();
nrCorrsRegs.clear();
nrTrueNegRegs.clear();
return false;
}else{
if(verbose & PRINT_WARNING_MESSAGES) {
cout << "Changed the minimum keypoint distance to " << pars.minKeypDist << endl;
}
}
} else {
if(verbose & PRINT_WARNING_MESSAGES) {
cout << "Changed the minimum keypoint distance to " << mKPdist << endl;
}
pars.minKeypDist = mKPdist;
genMasks();
}
}
//Get number of true negatives per region
if (cnt1 == 0) {
int maxit = 5;
maxCorr = 1.0;
while ((maxCorr > 0.5) && (maxit > 0)) {
negsReg = Mat(3, 3, CV_64FC1);
double sumNegsReg = 0;
while(nearZero(sumNegsReg)) {
cv::randu(negsReg, Scalar(0), Scalar(1.0));
sumNegsReg = sum(negsReg)[0];
}
negsReg /= sumNegsReg;
negsReg = 0.33 * negsReg + negsReg.mul(fUTPpRNSum1);
negsReg /= sum(negsReg)[0];
cv::minMaxLoc(negsReg, &minCorr, &maxCorr);
maxit--;
}
if (maxit == 0) {
minCorr = maxCorr / 16.0;
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 3; ++x) {
if (nearZero(negsReg.at<double>(y, x) - maxCorr)) {
negsReg.at<double>(y, x) /= 2.0;
} else {
negsReg.at<double>(y, x) += minCorr;
}
}
}
negsReg /= sum(negsReg)[0];
}
newCorrsPerRegion.convertTo(newCorrsPerRegiond, CV_64FC1);
corrsTooMuch = fracUseableTPperRegionNeg.mul(newCorrsPerRegiond);
corrsTooMuch.convertTo(corrsTooMuch, CV_32SC1, 1.0, 0.5);//Corresponds to round
} else {
negsReg.convertTo(negsReg, CV_64FC1);
negsReg = 0.66 * negsReg + 0.33 * negsReg.mul(fUTPpRNSum1);
double sumNegsReg = sum(negsReg)[0];
if(!nearZero(sumNegsReg))
negsReg /= sumNegsReg;
cv::minMaxLoc(negsReg, &minCorr, &maxCorr);
if (maxCorr > 0.5) {
minCorr = maxCorr / 16.0;
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 3; ++x) {
if (nearZero(negsReg.at<double>(y, x) - maxCorr)) {
negsReg.at<double>(y, x) /= 2.0;
} else {
negsReg.at<double>(y, x) += minCorr;
}
}
}
negsReg /= sum(negsReg)[0];
}
}
negsReg = negsReg.mul(newCorrsPerRegiond);//Max number of true negatives per region
double sumNegsReg = sum(negsReg)[0];
if(!nearZero(sumNegsReg))
negsReg *= (double) nrTrueNeg[i] / sumNegsReg;
negsReg.convertTo(negsReg, CV_32SC1, 1.0, 0.5);//Corresponds to round
for (size_t j = 0; j < 9; j++) {
while (negsReg.at<int32_t>(j) > newCorrsPerRegion.at<int32_t>(j))
negsReg.at<int32_t>(j)--;
}
chkSize = (int32_t) sum(negsReg)[0] - (int32_t) nrTrueNeg[i];
if (chkSize > 0) {
do {
int pos = (int)(rand2() % 9);
if (negsReg.at<int32_t>(pos) > 0) {
negsReg.at<int32_t>(pos)--;
chkSize--;
} /*else
{
cout << "Zero neg corrs in region " << pos << "of frame " << i << endl;
}*/
} while (chkSize > 0);
} else if (chkSize < 0) {
do {
int pos = (int)(rand2() % 9);
if (negsReg.at<int32_t>(pos) < newCorrsPerRegion.at<int32_t>(pos)) {
negsReg.at<int32_t>(pos)++;
chkSize++;
}
} while (chkSize < 0);
}
//Check, if there are still TP outside the intersection of the 2 stereo camera views
corrsRemain = corrsTooMuch - negsReg;
usedTNonNoOverlapRat = Mat::ones(3, 3, CV_64FC1);
double areaPerKP = avgMaskingArea * enlargeKPDist;
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 3; ++x) {
if (corrsRemain.at<int32_t>(y, x) > 0) {
usedTNonNoOverlapRat.at<double>(y, x) =
(double) negsReg.at<int32_t>(y, x) / (double) corrsTooMuch.at<int32_t>(y, x);
double neededArea =
(newCorrsPerRegiond.at<double>(y, x) - (double) negsReg.at<int32_t>(y, x)) *
areaPerKP;
double usableArea =
fracUseableTPperRegion_tmp.at<double>(y, x) * (double) regROIs[y][x].area();
if (neededArea > usableArea) {
double diffArea = neededArea - usableArea;
corrsRemain.at<int32_t>(y, x) = (int32_t) round(diffArea / areaPerKP);
} else {
corrsRemain.at<int32_t>(y, x) = 0;
}
} else {
corrsRemain.at<int32_t>(y, x) = 0;
}
}
}
activeRegions = cv::sum(usedTNonNoOverlapRat)[0];
cnt1++;
}
if ((cv::sum(corrsRemain)[0] > 0) && (cnt1 >= cnt1Max)) {
//Adapt number of correspondences per region
pars.corrsPerRegion[cnt] = pars.corrsPerRegion[cnt].mul(usedTNonNoOverlapRat);
double sumCorrsPerRegion = cv::sum(pars.corrsPerRegion[cnt])[0];
if(nearZero(sumCorrsPerRegion)){
double sumNewCorrsPerRegion = 0;
while(nearZero(sumNewCorrsPerRegion)) {
cv::randu(pars.corrsPerRegion[cnt], Scalar(0), Scalar(1.0));
sumNewCorrsPerRegion = sum(pars.corrsPerRegion[cnt])[0];
}
newCorrsPerRegion /= sumNewCorrsPerRegion;
sumCorrsPerRegion = 1.0;
}
pars.corrsPerRegion[cnt] /= sumCorrsPerRegion;
recalcCorrsPerRegion = true;
cnt1 = 0;
corrsRemain = Mat::ones(3, 3, CV_32SC1);
activeRegions = 9.0;
} else {
recalcCorrsPerRegion = false;
}
} while (recalcCorrsPerRegion);
nrCorrsRegs.push_back(newCorrsPerRegion.clone());
nrTrueNegRegs.push_back(negsReg.clone());
//Check, if the overall number of correspondences is still correct
if(verbose & PRINT_WARNING_MESSAGES) {
int corrsDiff = (int)sum(newCorrsPerRegion)[0] - (int)nrCorrs[i];
if (((double) abs(corrsDiff) / (double) nrCorrs[i]) > 0.01) {
cout << "The distribution of correspondences over the regions did not work!" << endl;
}
}
//Get number of true positives per region
newCorrsPerRegion = newCorrsPerRegion - negsReg;
nrTruePosRegs.push_back(newCorrsPerRegion.clone());
//Check the overall inlier ratio
if(verbose & PRINT_WARNING_MESSAGES) {
int32_t allRegTP = (int32_t)sum(nrTruePosRegs.back())[0];
int32_t allRegCorrs = (int32_t)sum(nrCorrsRegs.back())[0];
double thisInlRatDiff = (double)allRegTP / (double)allRegCorrs - inlRat[i];
double testVal = min((double)allRegCorrs / 100.0, 1.0) * thisInlRatDiff / 100.0;
if (!nearZero(testVal)) {
cout << "Initial inlier ratio differs from given values (0 - 1.0) by "
<< thisInlRatDiff << endl;
}
}
//Check if the fraction of corrspondences per region must be changend
if ((((i + 1) % (pars.corrsPerRegRepRate)) == 0)) {
cnt++;
if (cnt >= pars.corrsPerRegion.size()) {
cnt = 0;
}
}
}
return true;
}
//Generate number of correspondences
void genStereoSequ::genNrCorrsImg() {
nrCorrs.resize(totalNrFrames);
nrTrueNeg.resize(totalNrFrames);
if ((pars.truePosRange.second - pars.truePosRange.first) == 0) {
if (pars.truePosRange.first == 0) {
throw SequenceException(
"Number of true positives specified 0 for all frames - nothing can be generated!\n");
}
nrTruePos.resize(totalNrFrames, pars.truePosRange.first);
for (size_t i = 0; i < totalNrFrames; i++) {
nrCorrs[i] = (size_t) round((double) pars.truePosRange.first / inlRat[i]);
nrTrueNeg[i] = nrCorrs[i] - pars.truePosRange.first;
}
if (nearZero(pars.inlRatRange.first - pars.inlRatRange.second)) {
fixedNrCorrs = true;
}
} else {
size_t initTruePos = std::max((size_t) round(
getRandDoubleValRng((double) pars.truePosRange.first, (double) pars.truePosRange.second)), (size_t) 1);
if (nearZero(pars.truePosChanges)) {
nrTruePos.resize(totalNrFrames, initTruePos);
for (size_t i = 0; i < totalNrFrames; i++) {
nrCorrs[i] = (size_t) round((double) initTruePos / inlRat[i]);
nrTrueNeg[i] = nrCorrs[i] - initTruePos;
}
} else if (nearZero(pars.truePosChanges - 100.0)) {
nrTruePos.resize(totalNrFrames);
std::uniform_int_distribution<size_t> distribution(pars.truePosRange.first, pars.truePosRange.second);
for (size_t i = 0; i < totalNrFrames; i++) {
nrTruePos[i] = distribution(rand_gen);
nrCorrs[i] = (size_t) round((double) nrTruePos[i] / inlRat[i]);
nrTrueNeg[i] = nrCorrs[i] - nrTruePos[i];
}
} else {
nrTruePos.resize(totalNrFrames);
nrTruePos[0] = initTruePos;
nrCorrs[0] = (size_t) round((double) nrTruePos[0] / inlRat[0]);
nrTrueNeg[0] = nrCorrs[0] - nrTruePos[0];
for (size_t i = 1; i < totalNrFrames; i++) {
auto rangeVal = (size_t) round(pars.truePosChanges * (double) nrTruePos[i - 1]);
size_t maxTruePos = nrTruePos[i - 1] + rangeVal;
maxTruePos = maxTruePos > pars.truePosRange.second ? pars.truePosRange.second : maxTruePos;
int64_t minTruePos_ = (int64_t) nrTruePos[i - 1] - (int64_t) rangeVal;
size_t minTruePos = minTruePos_ < 0 ? 0 : ((size_t) minTruePos_);
minTruePos = minTruePos < pars.truePosRange.first ? pars.truePosRange.first : minTruePos;
std::uniform_int_distribution<size_t> distribution(minTruePos, maxTruePos);
nrTruePos[i] = distribution(rand_gen);
nrCorrs[i] = (size_t) round((double) nrTruePos[i] / inlRat[i]);
nrTrueNeg[i] = nrCorrs[i] - nrTruePos[i];
}
}
}
}
//Generate the inlier ratio for every frame
void genStereoSequ::genInlierRatios() {
if (nearZero(pars.inlRatRange.first - pars.inlRatRange.second)) {
inlRat.resize(totalNrFrames, max(pars.inlRatRange.first, 0.01));
} else {
double initInlRat = getRandDoubleValRng(pars.inlRatRange.first, pars.inlRatRange.second, rand_gen);
initInlRat = max(initInlRat, 0.01);
if (nearZero(pars.inlRatChanges)) {
inlRat.resize(totalNrFrames, initInlRat);
} else if (nearZero(pars.inlRatChanges - 100.0)) {
inlRat.resize(totalNrFrames);
std::uniform_real_distribution<double> distribution(pars.inlRatRange.first, pars.inlRatRange.second);
for (size_t i = 0; i < totalNrFrames; i++) {
inlRat[i] = max(distribution(rand_gen), 0.01);
}
} else {
inlRat.resize(totalNrFrames);
inlRat[0] = initInlRat;
for (size_t i = 1; i < totalNrFrames; i++) {
double maxInlrat = inlRat[i - 1] + pars.inlRatChanges * inlRat[i - 1];
maxInlrat = maxInlrat > pars.inlRatRange.second ? pars.inlRatRange.second : maxInlrat;
double minInlrat = inlRat[i - 1] - pars.inlRatChanges * inlRat[i - 1];
minInlrat = minInlrat < pars.inlRatRange.first ? pars.inlRatRange.first : minInlrat;
inlRat[i] = max(getRandDoubleValRng(minInlrat, maxInlrat), 0.01);
}
}
}
}
/* Constructs an absolute camera path including the position and rotation of the stereo rig (left/lower camera centre)
*/
void genStereoSequ::constructCamPath() {
//Calculate the absolute velocity of the cameras
absCamVelocity = 0;
for (auto& i : t) {
absCamVelocity += norm(i);
}
absCamVelocity /= (double) t.size();
absCamVelocity *= pars.relCamVelocity;//in baselines from frame to frame
//Calculate total number of frames
// totalNrFrames = pars.nFramesPerCamConf * t.size();
if((pars.nTotalNrFrames >= max(pars.nFramesPerCamConf * (t.size() - 1) + 1, pars.nFramesPerCamConf))
&& (pars.nTotalNrFrames <= pars.nFramesPerCamConf * t.size())) {
totalNrFrames = pars.nTotalNrFrames;
}else{
if(pars.nTotalNrFrames <= pars.nFramesPerCamConf * t.size()) {
cout << "The provided number of total frames would be too small to use all provided stereo configurations."
<< endl;
}else{
cout << "The provided number of total frames is too large. The sequence of different "
"stereo configurations would be repeated." << endl;
}
totalNrFrames = pars.nFramesPerCamConf * t.size();
cout << "Changing number of frames from " << pars.nTotalNrFrames << " to " << totalNrFrames << endl;
}
//Number of track elements
size_t nrTracks = pars.camTrack.size();
absCamCoordinates = vector<Poses>(totalNrFrames);
Mat R0;
if (pars.R.empty())
R0 = Mat::eye(3, 3, CV_64FC1);
else
R0 = pars.R;
Mat t1 = Mat::zeros(3, 1, CV_64FC1);
if (nrTracks == 1) {
double camTrackNorm = norm(pars.camTrack[0]);
Mat R1;
if(!nearZero(camTrackNorm)) {
pars.camTrack[0] /= camTrackNorm;
R1 = R0 * getTrackRot(pars.camTrack[0]);
}
else{
R1 = R0;
}
Mat t_piece = absCamVelocity * pars.camTrack[0];
absCamCoordinates[0] = Poses(R1.clone(), t1.clone());
for (size_t i = 1; i < totalNrFrames; i++) {
t1 += t_piece;
absCamCoordinates[i] = Poses(R1.clone(), t1.clone());
}
} else {
//Get differential vectors of the path and the overall path length
vector<Mat> diffTrack = vector<Mat>(nrTracks - 1);
vector<double> tdiffNorms = vector<double>(nrTracks - 1);
double trackNormSum = 0;//norm(pars.camTrack[0]);
// diffTrack[0] = pars.camTrack[0].clone();// / trackNormSum;
// tdiffNorms[0] = trackNormSum;
for (size_t i = 0; i < nrTracks - 1; i++) {
Mat tdiff = pars.camTrack[i + 1] - pars.camTrack[i];
double tdiffnorm = norm(tdiff);
trackNormSum += tdiffnorm;
diffTrack[i] = tdiff.clone();// / tdiffnorm;
tdiffNorms[i] = tdiffnorm;
}
//Calculate a new scaling for the path based on the original path length, total number of frames and camera velocity
if(nearZero(trackNormSum)){
throw SequenceException("Provided a track without movement!");
}
double trackScale = (double) (totalNrFrames - 1) * absCamVelocity / trackNormSum;
//Rescale track diffs
for (size_t i = 0; i < nrTracks - 1; i++) {
diffTrack[i] *= trackScale;
tdiffNorms[i] *= trackScale;
}
//Get camera positions
Mat R_track = getTrackRot(diffTrack[0]);
Mat R_track_old = R_track.clone();
Mat R1 = R0 * R_track;
pars.camTrack[0].copyTo(t1);
t1 *= trackScale;
absCamCoordinates[0] = Poses(R1.clone(), t1.clone());
double actDiffLength = 0;
size_t actTrackNr = 0, lastTrackNr = 0;
for (size_t i = 1; i < totalNrFrames; i++) {
bool firstAdd = true;
Mat multTracks = Mat::zeros(3, 1, CV_64FC1);
double usedLength = 0;
while ((actDiffLength < (absCamVelocity - DBL_EPSILON)) && (actTrackNr < (nrTracks - 1))) {
if (firstAdd) {
if(!nearZero(tdiffNorms[lastTrackNr]))
multTracks += actDiffLength * diffTrack[lastTrackNr] / tdiffNorms[lastTrackNr];
usedLength = actDiffLength;
firstAdd = false;
} else {
multTracks += diffTrack[lastTrackNr];
usedLength += tdiffNorms[lastTrackNr];
}
lastTrackNr = actTrackNr;
actDiffLength += tdiffNorms[actTrackNr++];
}
if(!nearZero(tdiffNorms[lastTrackNr]))
multTracks += (absCamVelocity - usedLength) * diffTrack[lastTrackNr] / tdiffNorms[lastTrackNr];
R_track = getTrackRot(diffTrack[lastTrackNr], R_track_old);
R_track_old = R_track.clone();
R1 = R0 * R_track;
t1 += multTracks;
absCamCoordinates[i] = Poses(R1.clone(), t1.clone());
actDiffLength -= absCamVelocity;
}
}
if (verbose & SHOW_INIT_CAM_PATH)
visualizeCamPath();
}
/*Calculates a rotation for every differential vector of a track segment to ensure that the camera looks always in the direction of the track segment.
* If the track segment equals the x-axis, the camera faces into positive x-direction (if the initial rotaion equals the identity).
* The y axis always points down as the up vector is defined as [0,-1,0]. Thus, there is no roll in the camera rotation.
*/
cv::Mat genStereoSequ::getTrackRot(const cv::Mat tdiff, cv::InputArray R_old) {
CV_Assert((tdiff.rows == 3) && (tdiff.cols == 1) && (tdiff.type() == CV_64FC1));
Mat R_C2W = Mat::eye(3, 3, CV_64FC1);
if (nearZero(cv::norm(tdiff)))
return R_C2W;
Mat tdiff_ = tdiff.clone();
tdiff_ /= norm(tdiff_);
Mat Rold;
if (!R_old.empty())
Rold = R_old.getMat();
//Define up-vector as global -y axis
Mat world_up = (Mat_<double>(3, 1) << 0, -1, 0);
// Mat world_up = (Mat_<double>(3, 1) << 0, 1, 0);
world_up /= norm(world_up);
if (nearZero(cv::sum(abs(tdiff_ - world_up))[0])) {
R_C2W = (Mat_<double>(3, 3) << 1.0, 0, 0,
0, 0, -1.0,
0, 1.0, 0);
if (!Rold.empty()) {
Mat Rr;
if (roundR(Rold, Rr, R_C2W)) {
R_C2W = Rr;
}
}
} else if (nearZero(cv::sum(abs(tdiff_ + world_up))[0])) {
R_C2W = (Mat_<double>(3, 3) << 1.0, 0, 0,
0, 0, 1.0,
0, -1.0, 0);
if (!Rold.empty()) {
Mat Rr;
if (roundR(Rold, Rr, R_C2W)) {
R_C2W = Rr;
}
}
} else {
//Get local axis that is perpendicular to up-vector and look-at vector (new x axis) -> to prevent roll
Mat xa = tdiff_.cross(world_up);
xa /= norm(xa);
//Get local axis that is perpendicular to new x-axis and look-at vector (new y axis)
Mat ya = tdiff_.cross(xa);
ya /= norm(ya);
//Build the rotation matrix (camera to world: x_world = R_C2W * x_local + t) by stacking the normalized axis vectors as columns of R=[x,y,z]
xa.copyTo(R_C2W.col(0));
ya.copyTo(R_C2W.col(1));
tdiff_.copyTo(R_C2W.col(2));
}
return R_C2W;//return rotation from camera to world
}
void genStereoSequ::visualizeCamPath() {
boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer(new pcl::visualization::PCLVisualizer("Camera path"));
initPCLViewerCoordinateSystems(viewer);
for (auto &i : absCamCoordinates) {
addVisualizeCamCenter(viewer, i.R, i.t);
}
viewer->initCameraParameters();
startPCLViewer(viewer);
}
//Calculate the thresholds for the depths near, mid, and far for every camera configuration
bool genStereoSequ::getDepthRanges() {
depthFar = vector<double>(nrStereoConfs);
depthMid = vector<double>(nrStereoConfs);
depthNear = vector<double>(nrStereoConfs);
for (size_t i = 0; i < nrStereoConfs; i++) {
Mat x1, x2;
if (abs(t[i].at<double>(0)) > abs(t[i].at<double>(1))) {
if (t[i].at<double>(0) < t[i].at<double>(1)) {
x1 = (Mat_<double>(3, 1) << (double) imgSize.width, (double) imgSize.height / 2.0, 1.0);
x2 = (Mat_<double>(3, 1) << 0, (double) imgSize.height / 2.0, 1.0);
} else {
x2 = (Mat_<double>(3, 1) << (double) imgSize.width, (double) imgSize.height / 2.0, 1.0);
x1 = (Mat_<double>(3, 1) << 0, (double) imgSize.height / 2.0, 1.0);
}
} else {
if (t[i].at<double>(1) < t[i].at<double>(0)) {
x1 = (Mat_<double>(3, 1) << (double) imgSize.width / 2.0, (double) imgSize.height, 1.0);
x2 = (Mat_<double>(3, 1) << (double) imgSize.width / 2.0, 0, 1.0);
} else {
x2 = (Mat_<double>(3, 1) << (double) imgSize.width / 2.0, (double) imgSize.height, 1.0);
x1 = (Mat_<double>(3, 1) << (double) imgSize.width / 2.0, 0, 1.0);
}
}
double bl = norm(t[i]);
depthFar[i] = sqrt(K1.at<double>(0, 0) * bl * bl /
0.15);//0.15 corresponds to the approx. typ. correspondence accuracy in pixels
//Calculate min distance for 3D points visible in both images
Mat b1 = getLineCam1(K1, x1);
Mat a2, b2;
getLineCam2(R[i], t[i], K2, x2, a2, b2);
depthNear[i] = getLineIntersect(b1, a2, b2);
depthNear[i] = depthNear[i] > 0 ? depthNear[i] : 0;
depthMid[i] = (depthFar[i] - depthNear[i]) / 2.0;
if (depthMid[i] < 0) {
return false;
}
}
return true;
}
/* As the user can specify portions of different depths (near, mid, far) globally for the whole image and also for regions within the image,
these fractions typically do not match. As a result, the depth range fractions per region must be adapted to match the overall fractions of the
whole image. Moreover, the fraction of correspondences per region have an impact on the effective depth portions that must be considered when
adapting the fractions in the image regions.
*/
void genStereoSequ::adaptDepthsPerRegion() {
if (pars.depthsPerRegion.empty()) {
pars.depthsPerRegion = std::vector<std::vector<depthPortion>>(3, std::vector<depthPortion>(3));
std::uniform_real_distribution<double> distribution(0, 1.0);
for (size_t i = 0; i < 3; i++) {
for (size_t j = 0; j < 3; j++) {
pars.depthsPerRegion[i][j] = depthPortion(distribution(rand_gen), distribution(rand_gen),
distribution(rand_gen));
}
}
} else {
//Check if the sum of fractions is 1.0
for (size_t i = 0; i < 3; i++) {
for (size_t j = 0; j < 3; j++) {
pars.depthsPerRegion[i][j].sumTo1();
}
}
}
pars.corrsPerDepth.sumTo1();
depthsPerRegion = std::vector<std::vector<std::vector<depthPortion>>>(pars.corrsPerRegion.size(),
pars.depthsPerRegion);
//Correct the portion of depths per region so that they meet the global depth range requirement per image
for (size_t k = 0; k < pars.corrsPerRegion.size(); k++) {
//Adapt the fractions of near depths of every region to match the global requirement of the near depth fraction
updDepthReg(true, depthsPerRegion[k], pars.corrsPerRegion[k]);
//Update the mid and far depth fractions of each region according to the new near depth fractions
for (size_t i = 0; i < 3; i++) {
for (size_t j = 0; j < 3; j++) {
double splitrem = 1.0 - depthsPerRegion[k][i][j].near;
if (!nearZero(splitrem)) {
if (!nearZero(depthsPerRegion[k][i][j].mid) && !nearZero(depthsPerRegion[k][i][j].far)) {
double fmsum = depthsPerRegion[k][i][j].mid + depthsPerRegion[k][i][j].far;
depthsPerRegion[k][i][j].mid = splitrem * depthsPerRegion[k][i][j].mid / fmsum;
depthsPerRegion[k][i][j].far = splitrem * depthsPerRegion[k][i][j].far / fmsum;
} else if (nearZero(depthsPerRegion[k][i][j].mid) && nearZero(depthsPerRegion[k][i][j].far)) {
depthsPerRegion[k][i][j].mid = splitrem / 2.0;
depthsPerRegion[k][i][j].far = splitrem / 2.0;
} else if (nearZero(depthsPerRegion[k][i][j].mid)) {
depthsPerRegion[k][i][j].far = splitrem;
} else {
depthsPerRegion[k][i][j].mid = splitrem;
}
} else {
depthsPerRegion[k][i][j].mid = 0;
depthsPerRegion[k][i][j].far = 0;
}
}
}
//Adapt the fractions of far depths of every region to match the global requirement of the far depth fraction
updDepthReg(false, depthsPerRegion[k], pars.corrsPerRegion[k]);
//Update the mid depth fractions of each region according to the new near & far depth fractions
for (size_t i = 0; i < 3; i++) {
for (size_t j = 0; j < 3; j++) {
depthsPerRegion[k][i][j].mid = 1.0 - (depthsPerRegion[k][i][j].near + depthsPerRegion[k][i][j].far);
}
}
#if 1
//Now, the sum of mid depth regions should correspond to the global requirement
if(verbose & PRINT_WARNING_MESSAGES) {
double portSum = 0;
for (size_t i = 0; i < 3; i++) {
for (size_t j = 0; j < 3; j++) {
portSum += depthsPerRegion[k][i][j].mid * pars.corrsPerRegion[k].at<double>(i, j);
}
}
double c1 = pars.corrsPerDepth.mid - portSum;
if (!nearZero(c1 / 10.0)) {
cout << "Adaption of depth fractions in regions failed!" << endl;
}
}
#endif
}
}
//Only adapt the fraction of near or far depths per region to the global requirement
void genStereoSequ::updDepthReg(bool isNear, std::vector<std::vector<depthPortion>> &depthPerRegion, cv::Mat &cpr) {
//If isNear=false, it is assumed that the fractions of near depths are already fixed
std::vector<std::vector<double>> oneDepthPerRegion(3, std::vector<double>(3));
std::vector<std::vector<double>> oneDepthPerRegionMaxVal(3, std::vector<double>(3, 1.0));
if (isNear) {
for (size_t i = 0; i < 3; i++) {
for (size_t j = 0; j < 3; j++) {
oneDepthPerRegion[i][j] = depthPerRegion[i][j].near;
}
}
} else {
for (size_t i = 0; i < 3; i++) {
for (size_t j = 0; j < 3; j++) {
oneDepthPerRegion[i][j] = depthPerRegion[i][j].far;
oneDepthPerRegionMaxVal[i][j] = 1.0 - depthPerRegion[i][j].near;
}
}
}
double portSum = 0, c1 = 1.0, dsum = 0, dsum1 = 0;
size_t cnt = 0;
//Mat cpr = pars.corrsPerRegion[k];
while (!nearZero(c1)) {
cnt++;
portSum = 0;
dsum = 0;
dsum1 = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
portSum += oneDepthPerRegion[i][j] * cpr.at<double>(i, j);
dsum += oneDepthPerRegion[i][j];
dsum1 += 1.0 - oneDepthPerRegion[i][j];
}
}
if (isNear)
c1 = pars.corrsPerDepth.near - portSum;
else
c1 = pars.corrsPerDepth.far - portSum;
bool breakit = false;
if (!nearZero(c1)) {
double c12 = 0, c1sum = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
double newval;
if (cnt < 3) {
CV_Assert(!nearZero(dsum));
newval = oneDepthPerRegion[i][j] + c1 * cpr.at<double>(i, j) * oneDepthPerRegion[i][j] / dsum;
} else {
CV_Assert(!nearZero(dsum) && !nearZero(dsum1));
c12 = c1 * cpr.at<double>(i, j) *
(0.75 * oneDepthPerRegion[i][j] / dsum + 0.25 * (1.0 - oneDepthPerRegion[i][j]) / dsum1);
double c1diff = c1 - (c1sum + c12);
if (((c1 > 0) && (c1diff < 0)) ||
((c1 < 0) && (c1diff > 0))) {
c12 = c1 - c1sum;
}
newval = oneDepthPerRegion[i][j] + c12;
}
if (newval > oneDepthPerRegionMaxVal[i][j]) {
c1sum += oneDepthPerRegionMaxVal[i][j] - oneDepthPerRegion[i][j];
oneDepthPerRegion[i][j] = oneDepthPerRegionMaxVal[i][j];
} else if (newval < 0) {
c1sum -= oneDepthPerRegion[i][j];
oneDepthPerRegion[i][j] = 0;
} else {
c1sum += newval - oneDepthPerRegion[i][j];
oneDepthPerRegion[i][j] = newval;
}
if (nearZero(c1sum - c1)) {
breakit = true;
break;
}
}
if (breakit) break;
}
if (breakit) break;
}
}
if (isNear) {
for (size_t i = 0; i < 3; i++) {
for (size_t j = 0; j < 3; j++) {
depthPerRegion[i][j].near = oneDepthPerRegion[i][j];
}
}
} else {
for (size_t i = 0; i < 3; i++) {
for (size_t j = 0; j < 3; j++) {
depthPerRegion[i][j].far = oneDepthPerRegion[i][j];
}
}
}
}
//Check if the given ranges of connected depth areas per image region are correct and initialize them for every definition of depths per image region
void genStereoSequ::checkDepthAreas() {
//
//Below: 9 is the nr of regions, minDArea is the min area and 2*sqrt(minDArea) is the gap between areas;
//size_t maxElems = imgSize.area() / (9 * ((size_t)minDArea + 2 * (size_t)sqrt(minDArea)));
//Below: 9 is the nr of regions; 4 * (minDArea + sqrt(minDArea)) + 1 corresponds to the area using the side length 2*sqrt(minDArea)+1
size_t maxElems = (size_t) std::max(imgSize.area() / (9 * (int) (4 * (minDArea + sqrt(minDArea)) + 1)), 1);
if (pars.nrDepthAreasPReg.empty()) {
pars.nrDepthAreasPReg = std::vector<std::vector<std::pair<size_t, size_t>>>(3,
std::vector<std::pair<size_t, size_t>>(
3));
std::uniform_int_distribution<size_t> distribution(1, maxElems + 1);
for (size_t i = 0; i < 3; i++) {
for (size_t j = 0; j < 3; j++) {
size_t tmp = distribution(rand_gen);
tmp = tmp < 2 ? 2 : tmp;
size_t tmp1 = distribution(rand_gen) % tmp;
tmp1 = tmp1 == 0 ? 1 : tmp1;
pars.nrDepthAreasPReg[i][j] = make_pair(tmp1, tmp);
}
}
} else {
for (size_t i = 0; i < 3; i++) {
for (size_t j = 0; j < 3; j++) {
if (pars.nrDepthAreasPReg[i][j].first == 0) {
pars.nrDepthAreasPReg[i][j].first = 1;
} else if (pars.nrDepthAreasPReg[i][j].first > (maxElems - 1)) {
pars.nrDepthAreasPReg[i][j].first = maxElems - 1;
}
if (pars.nrDepthAreasPReg[i][j].second == 0) {
pars.nrDepthAreasPReg[i][j].second = 1;
} else if (pars.nrDepthAreasPReg[i][j].second > maxElems) {
pars.nrDepthAreasPReg[i][j].second = maxElems;
}
if (pars.nrDepthAreasPReg[i][j].second < pars.nrDepthAreasPReg[i][j].first) {
size_t tmp = pars.nrDepthAreasPReg[i][j].first;
pars.nrDepthAreasPReg[i][j].first = pars.nrDepthAreasPReg[i][j].second;
pars.nrDepthAreasPReg[i][j].second = tmp;
}
}
}
}
//Initialize the numbers for every region and depth definition
nrDepthAreasPRegNear = std::vector<cv::Mat>(depthsPerRegion.size());
nrDepthAreasPRegMid = std::vector<cv::Mat>(depthsPerRegion.size());
nrDepthAreasPRegFar = std::vector<cv::Mat>(depthsPerRegion.size());
for (size_t i = 0; i < depthsPerRegion.size(); i++) {
nrDepthAreasPRegNear[i] = Mat::ones(3, 3, CV_32SC1);
nrDepthAreasPRegMid[i] = Mat::ones(3, 3, CV_32SC1);
nrDepthAreasPRegFar[i] = Mat::ones(3, 3, CV_32SC1);
}
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (pars.nrDepthAreasPReg[y][x].second < 4) {
for (size_t i = 0; i < depthsPerRegion.size(); i++) {
if (!nearZero(depthsPerRegion[i][y][x].near) &&
!nearZero(depthsPerRegion[i][y][x].mid) &&
!nearZero(depthsPerRegion[i][y][x].far)) {
continue;//1 remains in every element
} else {
int cnt = (int) pars.nrDepthAreasPReg[y][x].second;
int tmp = -10;
nrDepthAreasPRegNear[i].at<int32_t>(y, x) = 0;
nrDepthAreasPRegMid[i].at<int32_t>(y, x) = 0;
nrDepthAreasPRegFar[i].at<int32_t>(y, x) = 0;
bool lockdistr[3] = {true, true, true};
while (cnt > 0) {
if (!nearZero(depthsPerRegion[i][y][x].near) && lockdistr[0]) {
cnt--;
nrDepthAreasPRegNear[i].at<int32_t>(y, x)++;
}
if (!nearZero(depthsPerRegion[i][y][x].mid) && lockdistr[1]) {
cnt--;
nrDepthAreasPRegMid[i].at<int32_t>(y, x)++;
}
if (!nearZero(depthsPerRegion[i][y][x].far) && lockdistr[2]) {
cnt--;
nrDepthAreasPRegFar[i].at<int32_t>(y, x)++;
}
if ((cnt > 0) && (tmp == -10)) {
if ((pars.nrDepthAreasPReg[y][x].second - pars.nrDepthAreasPReg[y][x].first) != 0) {
tmp = cnt - (int) pars.nrDepthAreasPReg[y][x].second;
tmp += (int)(pars.nrDepthAreasPReg[y][x].first + (rand2() %
(pars.nrDepthAreasPReg[y][x].second -
pars.nrDepthAreasPReg[y][x].first +
1)));
cnt = tmp;
}
if (cnt > 0) {
if (!(!nearZero(depthsPerRegion[i][y][x].near) &&
((depthsPerRegion[i][y][x].near > depthsPerRegion[i][y][x].mid) ||
(depthsPerRegion[i][y][x].near > depthsPerRegion[i][y][x].far)))) {
lockdistr[0] = false;
}
if (!(!nearZero(depthsPerRegion[i][y][x].mid) &&
((depthsPerRegion[i][y][x].mid > depthsPerRegion[i][y][x].near) ||
(depthsPerRegion[i][y][x].mid > depthsPerRegion[i][y][x].far)))) {
lockdistr[1] = false;
}
if (!(!nearZero(depthsPerRegion[i][y][x].far) &&
((depthsPerRegion[i][y][x].far > depthsPerRegion[i][y][x].near) ||
(depthsPerRegion[i][y][x].far > depthsPerRegion[i][y][x].mid)))) {
lockdistr[2] = false;
}
}
}
}
}
}
} else {
for (size_t i = 0; i < depthsPerRegion.size(); i++) {
int nra = (int)pars.nrDepthAreasPReg[y][x].first + ((int)(rand2() % INT_MAX) % ((int)pars.nrDepthAreasPReg[y][x].second -
(int)pars.nrDepthAreasPReg[y][x].first +
1));
int32_t maxAPReg[3];
double maxAPRegd[3];
maxAPRegd[0] = depthsPerRegion[i][y][x].near * (double) nra;
maxAPRegd[1] = depthsPerRegion[i][y][x].mid * (double) nra;
maxAPRegd[2] = depthsPerRegion[i][y][x].far * (double) nra;
maxAPReg[0] = (int32_t) round(maxAPRegd[0]);
maxAPReg[1] = (int32_t) round(maxAPRegd[1]);
maxAPReg[2] = (int32_t) round(maxAPRegd[2]);
int32_t diffap = (int32_t) nra - (maxAPReg[0] + maxAPReg[1] + maxAPReg[2]);
if (diffap != 0) {
maxAPRegd[0] -= (double) maxAPReg[0];
maxAPRegd[1] -= (double) maxAPReg[1];
maxAPRegd[2] -= (double) maxAPReg[2];
if (diffap < 0) {
int cnt = 0;
std::ptrdiff_t pdiff = min_element(maxAPRegd, maxAPRegd + 3) - maxAPRegd;
while ((diffap < 0) && (cnt < 3)) {
if (maxAPReg[pdiff] > 1) {
maxAPReg[pdiff]--;
diffap++;
}
if (diffap < 0) {
if ((maxAPReg[(pdiff + 1) % 3] > 1) &&
(maxAPRegd[(pdiff + 1) % 3] <= maxAPRegd[(pdiff + 2) % 3])) {
maxAPReg[(pdiff + 1) % 3]--;
diffap++;
} else if ((maxAPReg[(pdiff + 2) % 3] > 1) &&
(maxAPRegd[(pdiff + 2) % 3] < maxAPRegd[(pdiff + 1) % 3])) {
maxAPReg[(pdiff + 2) % 3]--;
diffap++;
}
}
cnt++;
}
} else {
std::ptrdiff_t pdiff = max_element(maxAPRegd, maxAPRegd + 3) - maxAPRegd;
while (diffap > 0) {
maxAPReg[pdiff]++;
diffap--;
if (diffap > 0) {
if (maxAPRegd[(pdiff + 1) % 3] >= maxAPRegd[(pdiff + 2) % 3]) {
maxAPReg[(pdiff + 1) % 3]++;
diffap--;
} else {
maxAPReg[(pdiff + 2) % 3]++;
diffap--;
}
}
}
}
}
nrDepthAreasPRegNear[i].at<int32_t>(y, x) = maxAPReg[0];
nrDepthAreasPRegMid[i].at<int32_t>(y, x) = maxAPReg[1];
nrDepthAreasPRegFar[i].at<int32_t>(y, x) = maxAPReg[2];
if (!nearZero(depthsPerRegion[i][y][x].near) && (maxAPReg[0] == 0)) {
nrDepthAreasPRegNear[i].at<int32_t>(y, x)++;
}
if (!nearZero(depthsPerRegion[i][y][x].mid) && (maxAPReg[1] == 0)) {
nrDepthAreasPRegMid[i].at<int32_t>(y, x)++;
}
if (!nearZero(depthsPerRegion[i][y][x].far) && (maxAPReg[2] == 0)) {
nrDepthAreasPRegFar[i].at<int32_t>(y, x)++;
}
}
}
}
}
}
//Calculate the area in pixels for every depth and region
void genStereoSequ::calcPixAreaPerDepth() {
// int32_t regArea = (int32_t) imgSize.area() / 9;
areaPRegNear.resize(depthsPerRegion.size());
areaPRegMid.resize(depthsPerRegion.size());
areaPRegFar.resize(depthsPerRegion.size());
for (size_t i = 0; i < depthsPerRegion.size(); i++) {
areaPRegNear[i] = Mat::zeros(3, 3, CV_32SC1);
areaPRegMid[i] = Mat::zeros(3, 3, CV_32SC1);
areaPRegFar[i] = Mat::zeros(3, 3, CV_32SC1);
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
int32_t tmp[3] = {0, 0, 0};
int regArea = regROIs[y][x].area();
tmp[0] = (int32_t) round(depthsPerRegion[i][y][x].near * (double) regArea);
if ((tmp[0] != 0) && (tmp[0] < minDArea))
tmp[0] = minDArea;
tmp[1] = (int32_t) round(depthsPerRegion[i][y][x].mid * (double) regArea);
if ((tmp[1] != 0) && (tmp[1] < minDArea))
tmp[1] = minDArea;
tmp[2] = (int32_t) round(depthsPerRegion[i][y][x].far * (double) regArea);
if ((tmp[2] != 0) && (tmp[2] < minDArea))
tmp[2] = minDArea;
if ((tmp[0] + tmp[1] + tmp[2]) != regArea) {
std::ptrdiff_t pdiff = max_element(tmp, tmp + 3) - tmp;
tmp[pdiff] = regArea - tmp[(pdiff + 1) % 3] - tmp[(pdiff + 2) % 3];
}
areaPRegNear[i].at<int32_t>(y, x) = tmp[0];
areaPRegMid[i].at<int32_t>(y, x) = tmp[1];
areaPRegFar[i].at<int32_t>(y, x) = tmp[2];
}
}
}
}
/*Backproject 3D points (generated one or more frames before) found to be possibly visible in the
current stereo rig position to the stereo image planes and check if they are visible or produce
outliers in the first or second stereo image.
*/
void genStereoSequ::backProject3D() {
if (!actCorrsImg2TNFromLast.empty())
actCorrsImg2TNFromLast.release();
if (!actCorrsImg2TNFromLast_Idx.empty())
actCorrsImg2TNFromLast_Idx.clear();
if (!actCorrsImg1TNFromLast.empty())
actCorrsImg1TNFromLast.release();
if (!actCorrsImg1TNFromLast_Idx.empty())
actCorrsImg1TNFromLast_Idx.clear();
if (!actCorrsImg1TPFromLast.empty())
actCorrsImg1TPFromLast.release();
if (!actCorrsImg2TPFromLast.empty())
actCorrsImg2TPFromLast.release();
if (!actCorrsImg12TPFromLast_Idx.empty())
actCorrsImg12TPFromLast_Idx.clear();
if (actImgPointCloudFromLast.empty())
return;
struct imgWH {
double width;
double height;
double maxDist;
} dimgWH = {0,0,0};
dimgWH.width = (double) (imgSize.width - 1);
dimgWH.height = (double) (imgSize.height - 1);
dimgWH.maxDist = maxFarDistMultiplier * actDepthFar;
std::vector<cv::Point3d> actImgPointCloudFromLast_tmp;
vector<int> actCorrsImg12TPFromLast_IdxWorld_tmp;
size_t idx1 = 0;
for (auto i = 0; i < (int)actImgPointCloudFromLast.size(); ++i) {
cv::Point3d &pt = actImgPointCloudFromLast[i];
if ((pt.z < actDepthNear) ||
(pt.z > dimgWH.maxDist)) {
continue;
}
Mat X = Mat(pt, false).reshape(1, 3);
Mat x1 = K1 * X;
if(nearZero(x1.at<double>(2))) continue;
x1 /= x1.at<double>(2);
bool outOfR[2] = {false, false};
//Check if the point is within the area of a moving object
Point x1r = Point((int) round(x1.at<double>(0)), (int) round(x1.at<double>(1)));
if ((x1r.x < 0) || (x1r.x > dimgWH.width) || (x1r.y < 0) || (x1r.y > dimgWH.height)) {
outOfR[0] = true;
} else if (combMovObjLabelsAll.at<unsigned char>(x1r.y, x1r.x) > 0)
continue;
if ((x1.at<double>(0) < 0) || (x1.at<double>(0) > dimgWH.width) ||
(x1.at<double>(1) < 0) || (x1.at<double>(1) > dimgWH.height))//Not visible in first image
{
outOfR[0] = true;
}
Mat x2 = K2 * (actR * X + actT);
if(nearZero(x2.at<double>(2))) continue;
x2 /= x2.at<double>(2);
if ((x2.at<double>(0) < 0) || (x2.at<double>(0) > dimgWH.width) ||
(x2.at<double>(1) < 0) || (x2.at<double>(1) > dimgWH.height))//Not visible in second image
{
outOfR[1] = true;
}
//Check if the point is within the area of a moving object in the second image
Point x2r = Point((int) round(x2.at<double>(0)), (int) round(x2.at<double>(1)));
if ((x2r.x < 0) || (x2r.x > dimgWH.width) || (x2r.y < 0) || (x2r.y > dimgWH.height)) {
outOfR[1] = true;
} else if (movObjMask2All.at<unsigned char>(x2r.y, x2r.x) > 0)
outOfR[1] = true;
if (outOfR[0] && outOfR[1]) {
continue;
} else if (outOfR[0]) {
actCorrsImg2TNFromLast.push_back(x2.t());
actCorrsImg2TNFromLast_Idx.push_back(idx1);
} else if (outOfR[1]) {
actCorrsImg1TNFromLast.push_back(x1.t());
actCorrsImg1TNFromLast_Idx.push_back(idx1);
} else {
actCorrsImg1TPFromLast.push_back(x1.t());
actCorrsImg2TPFromLast.push_back(x2.t());
actCorrsImg12TPFromLast_Idx.push_back(idx1);
}
actImgPointCloudFromLast_tmp.push_back(pt);
actCorrsImg12TPFromLast_IdxWorld_tmp.push_back(actCorrsImg12TPFromLast_IdxWorld[i]);
idx1++;
}
actImgPointCloudFromLast = actImgPointCloudFromLast_tmp;
actCorrsImg12TPFromLast_IdxWorld = actCorrsImg12TPFromLast_IdxWorld_tmp;
if (!actCorrsImg1TNFromLast.empty())
actCorrsImg1TNFromLast = actCorrsImg1TNFromLast.t();
if (!actCorrsImg2TNFromLast.empty())
actCorrsImg2TNFromLast = actCorrsImg2TNFromLast.t();
if (!actCorrsImg1TPFromLast.empty()) {
actCorrsImg1TPFromLast = actCorrsImg1TPFromLast.t();
actCorrsImg2TPFromLast = actCorrsImg2TPFromLast.t();
}
}
//Generate seeds for generating depth areas and include the seeds found by backprojection of the 3D points of the last frames
void genStereoSequ::checkDepthSeeds() {
seedsNear = std::vector<std::vector<std::vector<cv::Point3_<int32_t>>>>(3,
std::vector<std::vector<cv::Point3_<int32_t>>>(
3));
seedsMid = std::vector<std::vector<std::vector<cv::Point3_<int32_t>>>>(3,
std::vector<std::vector<cv::Point3_<int32_t>>>(
3));
seedsFar = std::vector<std::vector<std::vector<cv::Point3_<int32_t>>>>(3,
std::vector<std::vector<cv::Point3_<int32_t>>>(
3));
//Generate a mask for marking used areas in the first stereo image
corrsIMG = Mat::zeros(imgSize.height + csurr.cols - 1, imgSize.width + csurr.rows - 1, CV_8UC1);
int posadd1 = max((int) ceil(pars.minKeypDist), (int) sqrt(minDArea));
int sqrSi1 = 2 * posadd1;
cv::Mat filtInitPts = Mat::zeros(imgSize.height + sqrSi1, imgSize.width + sqrSi1, CV_8UC1);
sqrSi1++;
Mat csurr1 = Mat::ones(sqrSi1, sqrSi1, CV_8UC1);
//int maxSum1 = sqrSi1 * sqrSi1;
cv::Size regSi = Size(imgSize.width / 3, imgSize.height / 3);
if (!actCorrsImg1TPFromLast.empty())//Take seeding positions from backprojected coordinates
{
std::vector<cv::Point3_<int32_t>> seedsNear_tmp, seedsNear_tmp1;
std::vector<cv::Point3_<int32_t>> seedsMid_tmp, seedsMid_tmp1;
std::vector<cv::Point3_<int32_t>> seedsFar_tmp, seedsFar_tmp1;
//Identify depth categories
for (size_t i = 0; i < actCorrsImg12TPFromLast_Idx.size(); i++) {
if (actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[i]].z >= actDepthFar) {
seedsFar_tmp.emplace_back(cv::Point3_<int32_t>((int32_t) round(actCorrsImg1TPFromLast.at<double>(0, (int)i)),
(int32_t) round(actCorrsImg1TPFromLast.at<double>(1, (int)i)),
(int32_t) i));
} else if (actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[i]].z >= actDepthMid) {
seedsMid_tmp.emplace_back(cv::Point3_<int32_t>((int32_t) round(actCorrsImg1TPFromLast.at<double>(0, (int)i)),
(int32_t) round(actCorrsImg1TPFromLast.at<double>(1, (int)i)),
(int32_t) i));
} else {
seedsNear_tmp.emplace_back(cv::Point3_<int32_t>((int32_t) round(actCorrsImg1TPFromLast.at<double>(0, (int)i)),
(int32_t) round(actCorrsImg1TPFromLast.at<double>(1, (int)i)),
(int32_t) i));
}
}
//Check if the seeds are too near to each other
int posadd = max((int) ceil(pars.minKeypDist), 1);
int sqrSi = 2 * posadd;
//cv::Mat filtInitPts = Mat::zeros(imgSize.width + sqrSi, imgSize.height + sqrSi, CV_8UC1);
sqrSi++;//sqrSi = 2 * (int)floor(pars.minKeypDist) + 1;
//csurr = Mat::ones(sqrSi, sqrSi, CV_8UC1);
//int maxSum = sqrSi * sqrSi;
int sqrSiDiff2 = (sqrSi1 - sqrSi) / 2;
int hlp2 = sqrSi + sqrSiDiff2;
vector<size_t> delListCorrs, delList3D;
if (!seedsNear_tmp.empty()) {
for (auto& i : seedsNear_tmp) {
Mat s_tmp = filtInitPts(Range(i.y + sqrSiDiff2, i.y + hlp2),
Range(i.x + sqrSiDiff2, i.x + hlp2));
if (s_tmp.at<unsigned char>(posadd, posadd) > 0) {
delListCorrs.push_back((size_t) i.z);
delList3D.push_back(actCorrsImg12TPFromLast_Idx[delListCorrs.back()]);
continue;
}
// csurr.copyTo(s_tmp);
s_tmp += csurr;
seedsNear_tmp1.push_back(i);
}
}
if (!seedsMid_tmp.empty()) {
for (auto& i : seedsMid_tmp) {
Mat s_tmp = filtInitPts(Range(i.y + sqrSiDiff2, i.y + hlp2),
Range(i.x + sqrSiDiff2, i.x + hlp2));
if (s_tmp.at<unsigned char>(posadd, posadd) > 0) {
delListCorrs.push_back((size_t) i.z);
delList3D.push_back(actCorrsImg12TPFromLast_Idx[delListCorrs.back()]);
continue;
}
// csurr.copyTo(s_tmp);
s_tmp += csurr;
seedsMid_tmp1.push_back(i);
}
}
if (!seedsFar_tmp.empty()) {
for (auto& i : seedsFar_tmp) {
Mat s_tmp = filtInitPts(Range(i.y + sqrSiDiff2, i.y + hlp2),
Range(i.x + sqrSiDiff2, i.x + hlp2));
if (s_tmp.at<unsigned char>(posadd, posadd) > 0) {
delListCorrs.push_back((size_t) i.z);
delList3D.push_back(actCorrsImg12TPFromLast_Idx[delListCorrs.back()]);
continue;
}
// csurr.copyTo(s_tmp);
s_tmp += csurr;
seedsFar_tmp1.push_back(i);
}
}
filtInitPts(Rect(sqrSiDiff2, sqrSiDiff2, imgSize.width + 2 * posadd, imgSize.height + 2 * posadd)).copyTo(
corrsIMG);
//Check if seedsNear only holds near distances
/*for (int j = 0; j < seedsNear_tmp1.size(); ++j) {
if(actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[seedsNear_tmp1[j].z]].z >= actDepthMid){
cout << "Wrong distance!" << endl;
}
}
//Check if seedsMid only holds mid distances
for (int j = 0; j < seedsMid_tmp1.size(); ++j) {
if((actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[seedsMid_tmp1[j].z]].z < actDepthMid) ||
(actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[seedsMid_tmp1[j].z]].z >= actDepthFar)){
cout << "Wrong distance!" << endl;
}
}
//Check if seedsFar only holds far distances
for (int j = 0; j < seedsFar_tmp1.size(); ++j) {
if(actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[seedsFar_tmp1[j].z]].z < actDepthFar){
cout << "Wrong distance!" << endl;
}
}*/
//Delete correspondences and 3D points that were to near to each other in the image
if (!delListCorrs.empty()) {
// if(!checkCorr3DConsistency()){
// throw SequenceException("Correspondences are not projections of 3D points!");
// }
/*std::vector<cv::Point3d> actImgPointCloudFromLast_tmp;
cv::Mat actCorrsImg1TPFromLast_tmp, actCorrsImg2TPFromLast_tmp;*/
sort(delList3D.begin(), delList3D.end(),
[](size_t first, size_t second) { return first < second; });//Ascending order
if (!actCorrsImg1TNFromLast_Idx.empty())//Adapt the indices for TN (single keypoints without a match)
{
adaptIndicesNoDel(actCorrsImg1TNFromLast_Idx, delList3D);
}
if (!actCorrsImg2TNFromLast_Idx.empty())//Adapt the indices for TN (single keypoints without a match)
{
adaptIndicesNoDel(actCorrsImg2TNFromLast_Idx, delList3D);
}
adaptIndicesNoDel(actCorrsImg12TPFromLast_Idx, delList3D);
deleteVecEntriesbyIdx(actImgPointCloudFromLast, delList3D);
deleteVecEntriesbyIdx(actCorrsImg12TPFromLast_IdxWorld, delList3D);
sort(delListCorrs.begin(), delListCorrs.end(), [](size_t first, size_t second) { return first < second; });
if (!seedsNear_tmp1.empty())
adaptIndicesCVPtNoDel(seedsNear_tmp1, delListCorrs);
if (!seedsMid_tmp1.empty())
adaptIndicesCVPtNoDel(seedsMid_tmp1, delListCorrs);
if (!seedsFar_tmp1.empty())
adaptIndicesCVPtNoDel(seedsFar_tmp1, delListCorrs);
deleteVecEntriesbyIdx(actCorrsImg12TPFromLast_Idx, delListCorrs);
deleteMatEntriesByIdx(actCorrsImg1TPFromLast, delListCorrs, false);
deleteMatEntriesByIdx(actCorrsImg2TPFromLast, delListCorrs, false);
// if(!checkCorr3DConsistency()){
// throw SequenceException("Correspondences are not projections of 3D points!");
// }
}
//Add the seeds to their regions
for (auto& i : seedsNear_tmp1) {
int32_t ix = i.x / regSi.width;
ix = (ix > 2) ? 2 : ix;
int32_t iy = i.y / regSi.height;
iy = (iy > 2) ? 2 : iy;
seedsNear[iy][ix].push_back(i);
}
//Add the seeds to their regions
for (auto& i : seedsMid_tmp1) {
int32_t ix = i.x / regSi.width;
ix = (ix > 2) ? 2 : ix;
int32_t iy = i.y / regSi.height;
iy = (iy > 2) ? 2 : iy;
seedsMid[iy][ix].push_back(i);
}
//Add the seeds to their regions
for (auto& i : seedsFar_tmp1) {
int32_t ix = i.x / regSi.width;
ix = (ix > 2) ? 2 : ix;
int32_t iy = i.y / regSi.height;
iy = (iy > 2) ? 2 : iy;
seedsFar[iy][ix].push_back(i);
}
}
//Check if seedsNear only holds near distances
for (int l = 0; l < 3; ++l) {
for (int i = 0; i < 3; ++i) {
for (auto& j : seedsNear[l][i]) {
if (actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[j.z]].z >= actDepthMid) {
throw SequenceException(
"Seeding depth of backprojected static 3D points in the category near is out of range!");
}
}
}
}
//Check if seedsMid only holds mid distances
for (int l = 0; l < 3; ++l) {
for (int i = 0; i < 3; ++i) {
for (auto& j : seedsMid[l][i]) {
if ((actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[j.z]].z < actDepthMid) ||
(actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[j.z]].z >= actDepthFar)) {
throw SequenceException(
"Seeding depth of backprojected static 3D points in the category mid is out of range!");
}
}
}
}
//Check if seedsFar only holds far distances
for (int l = 0; l < 3; ++l) {
for (int i = 0; i < 3; ++i) {
for (auto& j : seedsFar[l][i]) {
if (actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[j.z]].z < actDepthFar) {
throw SequenceException(
"Seeding depth of backprojected static 3D points in the category far is out of range!");
}
}
}
}
//Generate new seeds
Point3_<int32_t> pt;
pt.z = -1;
for (int32_t y = 0; y < 3; y++) {
int32_t mmy[2];
mmy[0] = y * regSi.height;
mmy[1] = mmy[0] + regSi.height - 1;
std::uniform_int_distribution<int32_t> distributionY(mmy[0], mmy[1]);
for (int32_t x = 0; x < 3; x++) {
int32_t mmx[2];
mmx[0] = x * regSi.width;
mmx[1] = mmx[0] + regSi.width - 1;
std::uniform_int_distribution<int32_t> distributionX(mmx[0], mmx[1]);
int32_t diffNr = nrDepthAreasPRegNear[actCorrsPRIdx].at<int32_t>(y, x) - (int32_t) seedsNear[y][x].size();
uint32_t it_cnt = 0;
const uint32_t max_it_cnt = 500000;
while ((diffNr > 0) && (it_cnt < max_it_cnt))//Generate seeds for near depth areas
{
pt.x = distributionX(rand_gen);
pt.y = distributionY(rand_gen);
Mat s_tmp = filtInitPts(Range(pt.y, pt.y + sqrSi1), Range(pt.x, pt.x + sqrSi1));
if (s_tmp.at<unsigned char>(posadd1, posadd1) > 0) {
it_cnt++;
continue;
} else {
csurr1.copyTo(s_tmp);
seedsNear[y][x].push_back(pt);
diffNr--;
}
}
if (diffNr > 0){
nrDepthAreasPRegNear[actCorrsPRIdx].at<int32_t>(y, x) -= diffNr;
cout << "Unable to reach desired number of near depth areas in region ("
<< y << ", " << x << "). " << diffNr << " areas cannot be assigned." << endl;
}
it_cnt = 0;
diffNr = nrDepthAreasPRegMid[actCorrsPRIdx].at<int32_t>(y, x) - (int32_t) seedsMid[y][x].size();
while ((diffNr > 0) && (it_cnt < max_it_cnt))//Generate seeds for mid depth areas
{
pt.x = distributionX(rand_gen);
pt.y = distributionY(rand_gen);
Mat s_tmp = filtInitPts(Range(pt.y, pt.y + sqrSi1), Range(pt.x, pt.x + sqrSi1));
if (s_tmp.at<unsigned char>(posadd1, posadd1) > 0) {
it_cnt++;
continue;
} else {
csurr1.copyTo(s_tmp);
seedsMid[y][x].push_back(pt);
diffNr--;
}
}
if (diffNr > 0){
nrDepthAreasPRegMid[actCorrsPRIdx].at<int32_t>(y, x) -= diffNr;
cout << "Unable to reach desired number of mid depth areas in region ("
<< y << ", " << x << "). " << diffNr << " areas cannot be assigned." << endl;
}
it_cnt = 0;
diffNr = nrDepthAreasPRegFar[actCorrsPRIdx].at<int32_t>(y, x) - (int32_t) seedsFar[y][x].size();
while ((diffNr > 0) && (it_cnt < max_it_cnt))//Generate seeds for far depth areas
{
pt.x = distributionX(rand_gen);
pt.y = distributionY(rand_gen);
Mat s_tmp = filtInitPts(Range(pt.y, pt.y + sqrSi1), Range(pt.x, pt.x + sqrSi1));
if (s_tmp.at<unsigned char>(posadd1, posadd1) > 0) {
it_cnt++;
continue;
} else {
csurr1.copyTo(s_tmp);
seedsFar[y][x].push_back(pt);
diffNr--;
}
}
if (diffNr > 0){
nrDepthAreasPRegFar[actCorrsPRIdx].at<int32_t>(y, x) -= diffNr;
cout << "Unable to reach desired number of far depth areas in region ("
<< y << ", " << x << "). " << diffNr << " areas cannot be assigned." << endl;
}
}
}
//Check if seedsNear only holds near distances
/*for (int l = 0; l < 3; ++l) {
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < seedsNear[l][i].size(); ++j) {
if(seedsNear[l][i][j].z >= 0) {
if (actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[seedsNear[l][i][j].z]].z >= actDepthMid) {
cout << "Wrong distance!" << endl;
}
}
}
}
}
//Check if seedsMid only holds near distances
for (int l = 0; l < 3; ++l) {
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < seedsMid[l][i].size(); ++j) {
if(seedsMid[l][i][j].z >= 0) {
if ((actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[seedsMid[l][i][j].z]].z < actDepthMid) ||
(actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[seedsMid[l][i][j].z]].z >= actDepthFar)) {
cout << "Wrong distance!" << endl;
}
}
}
}
}
//Check if seedsMid only holds near distances
for (int l = 0; l < 3; ++l) {
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < seedsFar[l][i].size(); ++j) {
if(seedsFar[l][i][j].z >= 0) {
if (actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[seedsFar[l][i][j].z]].z < actDepthFar) {
cout << "Wrong distance!" << endl;
}
}
}
}
}*/
//Get distances between neighboring seeds
SeedCloud sc = {nullptr, nullptr, nullptr};
sc.seedsNear = &seedsNear;
sc.seedsMid = &seedsMid;
sc.seedsFar = &seedsFar;
// construct a kd-tree index:
typedef nanoflann::KDTreeSingleIndexAdaptor<
nanoflann::L2_Simple_Adaptor<int32_t, SeedCloud>,
SeedCloud,
2 /* dim */
> my_kd_tree_t;
my_kd_tree_t index(2 /*dim*/, sc, nanoflann::KDTreeSingleIndexAdaptorParams(10 /* max leaf */));
index.buildIndex();
size_t num_results_in = 2, num_results_out = 0;
std::vector<size_t> ret_index(num_results_in);
std::vector<int32_t> out_dist_sqr(num_results_in);
seedsNearNNDist = std::vector<std::vector<std::vector<int32_t>>>(3, std::vector<std::vector<int32_t>>(3));
for (int32_t j = 0; j < 3; ++j) {
for (int32_t i = 0; i < 3; ++i) {
seedsNearNNDist[j][i] = std::vector<int32_t>(seedsNear[j][i].size());
for (size_t k = 0; k < seedsNear[j][i].size(); ++k) {
num_results_out = index.knnSearch(&seedsNear[j][i][k].x, num_results_in, &ret_index[0],
&out_dist_sqr[0]);
if (num_results_out == num_results_in) {
seedsNearNNDist[j][i][k] = (int32_t)floor(sqrt(out_dist_sqr[1]));
} else {
int32_t negdistx = seedsNear[j][i][k].x - i * regSi.width;
int32_t posdistx = (i + 1) * regSi.width - seedsNear[j][i][k].x;
int32_t negdisty = seedsNear[j][i][k].y - j * regSi.height;
int32_t posdisty = (j + 1) * regSi.height - seedsNear[j][i][k].y;
seedsNearNNDist[j][i][k] = max(negdistx, max(posdistx, max(negdisty, posdisty)));
}
}
}
}
seedsMidNNDist = std::vector<std::vector<std::vector<int32_t>>>(3, std::vector<std::vector<int32_t>>(3));
for (int32_t j = 0; j < 3; ++j) {
for (int32_t i = 0; i < 3; ++i) {
seedsMidNNDist[j][i] = std::vector<int32_t>(seedsMid[j][i].size());
for (size_t k = 0; k < seedsMid[j][i].size(); ++k) {
num_results_out = index.knnSearch(&seedsMid[j][i][k].x, num_results_in, &ret_index[0],
&out_dist_sqr[0]);
if (num_results_out == num_results_in) {
seedsMidNNDist[j][i][k] = (int32_t)floor(sqrt(out_dist_sqr[1]));
} else {
int32_t negdistx = seedsMid[j][i][k].x - i * regSi.width;
int32_t posdistx = (i + 1) * regSi.width - seedsMid[j][i][k].x;
int32_t negdisty = seedsMid[j][i][k].y - j * regSi.height;
int32_t posdisty = (j + 1) * regSi.height - seedsMid[j][i][k].y;
seedsMidNNDist[j][i][k] = max(negdistx, max(posdistx, max(negdisty, posdisty)));
}
}
}
}
seedsFarNNDist = std::vector<std::vector<std::vector<int32_t>>>(3, std::vector<std::vector<int32_t>>(3));
for (int32_t j = 0; j < 3; ++j) {
for (int32_t i = 0; i < 3; ++i) {
seedsFarNNDist[j][i] = std::vector<int32_t>(seedsFar[j][i].size());
for (size_t k = 0; k < seedsFar[j][i].size(); ++k) {
num_results_out = index.knnSearch(&seedsFar[j][i][k].x, num_results_in, &ret_index[0],
&out_dist_sqr[0]);
if (num_results_out == num_results_in) {
seedsFarNNDist[j][i][k] = (int32_t)floor(sqrt(out_dist_sqr[1]));
} else {
int32_t negdistx = seedsFar[j][i][k].x - i * regSi.width;
int32_t posdistx = (i + 1) * regSi.width - seedsFar[j][i][k].x;
int32_t negdisty = seedsFar[j][i][k].y - j * regSi.height;
int32_t posdisty = (j + 1) * regSi.height - seedsFar[j][i][k].y;
seedsFarNNDist[j][i][k] = max(negdistx, max(posdistx, max(negdisty, posdisty)));
}
}
}
}
}
//Wrapper function for function adaptIndicesNoDel
void genStereoSequ::adaptIndicesCVPtNoDel(std::vector<cv::Point3_<int32_t>> &seedVec,
std::vector<size_t> &delListSortedAsc) {
std::vector<size_t> seedVecIdx;
seedVecIdx.reserve(seedVec.size());
for (auto &sV : seedVec) {
seedVecIdx.push_back((size_t) sV.z);
}
adaptIndicesNoDel(seedVecIdx, delListSortedAsc);
for (size_t i = 0; i < seedVecIdx.size(); i++) {
seedVec[i].z = (int32_t)seedVecIdx[i];
}
}
//Adapt the indices of a not continious vector for which a part of the target data where the indices point to was deleted (no data points the indices point to were deleted).
void genStereoSequ::adaptIndicesNoDel(std::vector<size_t> &idxVec, std::vector<size_t> &delListSortedAsc) {
std::vector<pair<size_t, size_t>> idxVec_tmp(idxVec.size());
for (size_t i = 0; i < idxVec.size(); i++) {
idxVec_tmp[i] = make_pair(idxVec[i], i);
}
sort(idxVec_tmp.begin(), idxVec_tmp.end(),
[](pair<size_t, size_t> first, pair<size_t, size_t> second) { return first.first < second.first; });
size_t idx = 0;
size_t maxIdx = delListSortedAsc.size() - 1;
for (auto& i : idxVec_tmp) {
if (idx <= maxIdx) {
if (i.first < delListSortedAsc[idx]) {
i.first -= idx;
} else {
while ((idx <= maxIdx) && (i.first > delListSortedAsc[idx])) {
idx++;
}
i.first -= idx;
}
} else {
i.first -= idx;
}
}
sort(idxVec_tmp.begin(), idxVec_tmp.end(),
[](pair<size_t, size_t> first, pair<size_t, size_t> second) { return first.second < second.second; });
for (size_t i = 0; i < idxVec_tmp.size(); i++) {
idxVec[i] = idxVec_tmp[i].first;
}
}
int genStereoSequ::deleteBackProjTPByDepth(std::vector<cv::Point_<int32_t>> &seedsFromLast,
int32_t nrToDel){
std::vector<size_t> delListCorrs, delList3D;
int actDelNr = deletedepthCatsByNr(seedsFromLast, nrToDel, actCorrsImg1TPFromLast, delListCorrs);
int32_t kSi = csurr.rows;
delList3D.reserve(delListCorrs.size());
for(auto &i: delListCorrs){
delList3D.push_back(actCorrsImg12TPFromLast_Idx[i]);
Point pt = Point((int)round(actCorrsImg1TPFromLast.at<double>(0,i)),
(int)round(actCorrsImg1TPFromLast.at<double>(1,i)));
Mat s_tmp = corrsIMG(Rect(pt, Size(kSi, kSi)));
s_tmp -= csurr;
}
if (!delListCorrs.empty()) {
// if(!checkCorr3DConsistency()){
// throw SequenceException("Correspondences are not projections of 3D points!");
// }
sort(delList3D.begin(), delList3D.end(),
[](size_t first, size_t second) { return first < second; });//Ascending order
if (!actCorrsImg1TNFromLast_Idx.empty())//Adapt the indices for TN (single keypoints without a match)
{
adaptIndicesNoDel(actCorrsImg1TNFromLast_Idx, delList3D);
}
if (!actCorrsImg2TNFromLast_Idx.empty())//Adapt the indices for TN (single keypoints without a match)
{
adaptIndicesNoDel(actCorrsImg2TNFromLast_Idx, delList3D);
}
adaptIndicesNoDel(actCorrsImg12TPFromLast_Idx, delList3D);
deleteVecEntriesbyIdx(actImgPointCloudFromLast, delList3D);
deleteVecEntriesbyIdx(actCorrsImg12TPFromLast_IdxWorld, delList3D);
// sort(delListCorrs.begin(), delListCorrs.end(), [](size_t first, size_t second) { return first < second; });
deleteVecEntriesbyIdx(actCorrsImg12TPFromLast_Idx, delListCorrs);
deleteMatEntriesByIdx(actCorrsImg1TPFromLast, delListCorrs, false);
deleteMatEntriesByIdx(actCorrsImg2TPFromLast, delListCorrs, false);
// if(!checkCorr3DConsistency()){
// throw SequenceException("Correspondences are not projections of 3D points!");
// }
}
return actDelNr;
}
//Initialize region ROIs and masks
void genStereoSequ::genRegMasks() {
//Construct valid areas for every region
regmasks = vector<vector<Mat>>(3, vector<Mat>(3));
regmasksROIs = vector<vector<cv::Rect>>(3, vector<cv::Rect>(3));
regROIs = vector<vector<cv::Rect>>(3, vector<cv::Rect>(3));
Size imgSi13 = Size(imgSize.width / 3, imgSize.height / 3);
Mat validRect = Mat::ones(imgSize, CV_8UC1);
const float overSi = 1.25f;//Allows the expension of created areas outside its region by a given percentage
for (int y = 0; y < 3; y++) {
cv::Point2i pl1, pr1, pl2, pr2;
pl1.y = y * imgSi13.height;
pl2.y = pl1.y;
if (y < 2) {
pr1.y = pl1.y + (int) (overSi * (float) imgSi13.height);
pr2.y = pl2.y + imgSi13.height;
} else {
pr1.y = imgSize.height;
pr2.y = imgSize.height;
}
if (y > 0) {
pl1.y -= (int) ((overSi - 1.f) * (float) imgSi13.height);
}
for (int x = 0; x < 3; x++) {
pl1.x = x * imgSi13.width;
pl2.x = pl1.x;
if (x < 2) {
pr1.x = pl1.x + (int) (overSi * (float) imgSi13.width);
pr2.x = pl2.x + imgSi13.width;
} else {
pr1.x = imgSize.width;
pr2.x = imgSize.width;
}
if (x > 0) {
pl1.x -= (int) ((overSi - 1.f) * (float) imgSi13.width);
}
Rect vROI = Rect(pl1, pr1);
regmasksROIs[y][x] = vROI;
regmasks[y][x] = Mat::zeros(imgSize, CV_8UC1);
validRect(vROI).copyTo(regmasks[y][x](vROI));
Rect vROIo = Rect(pl2, pr2);
regROIs[y][x] = vROIo;
}
}
}
//Generates a depth map with the size of the image where each pixel value corresponds to the depth
void genStereoSequ::genDepthMaps() {
int minSi = (int) sqrt(minDArea);
int maskEnlarge = 0;
for (size_t y = 0; y < 3; y++) {
for (size_t x = 0; x < 3; x++) {
int32_t tmp;
if (!seedsNearNNDist[y][x].empty()) {
tmp = *std::max_element(seedsNearNNDist[y][x].begin(), seedsNearNNDist[y][x].end());
if (tmp > maskEnlarge) {
maskEnlarge = tmp;
}
}
if (!seedsMidNNDist[y][x].empty()) {
tmp = *std::max_element(seedsMidNNDist[y][x].begin(), seedsMidNNDist[y][x].end());
if (tmp > maskEnlarge) {
maskEnlarge = tmp;
}
}
if (!seedsFarNNDist[y][x].empty()) {
tmp = *std::max_element(seedsFarNNDist[y][x].begin(), seedsFarNNDist[y][x].end());
if (tmp > maskEnlarge) {
maskEnlarge = tmp;
}
}
}
}
cv::Mat noGenMaskB = Mat::zeros(imgSize.height + 2 * maskEnlarge, imgSize.width + 2 * maskEnlarge, CV_8UC1);
Mat noGenMaskB2 = noGenMaskB.clone();
cv::Mat noGenMask = noGenMaskB(Range(maskEnlarge, imgSize.height + maskEnlarge),
Range(maskEnlarge, imgSize.width + maskEnlarge));
Mat noGenMask2 = noGenMaskB2(Range(maskEnlarge, imgSize.height + maskEnlarge),
Range(maskEnlarge, imgSize.width + maskEnlarge));
//Get an ordering of the different depth area sizes for every region
cv::Mat beginDepth = cv::Mat(3, 3, CV_32SC3);
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
int32_t maxAPReg[3];
maxAPReg[0] = areaPRegNear[actCorrsPRIdx].at<int32_t>(y, x);
maxAPReg[1] = areaPRegMid[actCorrsPRIdx].at<int32_t>(y, x);
maxAPReg[2] = areaPRegFar[actCorrsPRIdx].at<int32_t>(y, x);
std::ptrdiff_t pdiff = min_element(maxAPReg, maxAPReg + 3) - maxAPReg;
beginDepth.at<cv::Vec<int32_t, 3>>(y, x)[0] = (int32_t)pdiff;
if (maxAPReg[(pdiff + 1) % 3] < maxAPReg[(pdiff + 2) % 3]) {
beginDepth.at<cv::Vec<int32_t, 3>>(y, x)[1] = ((int32_t)pdiff + 1) % 3;
beginDepth.at<cv::Vec<int32_t, 3>>(y, x)[2] = ((int32_t)pdiff + 2) % 3;
} else {
beginDepth.at<cv::Vec<int32_t, 3>>(y, x)[2] = ((int32_t)pdiff + 1) % 3;
beginDepth.at<cv::Vec<int32_t, 3>>(y, x)[1] = ((int32_t)pdiff + 2) % 3;
}
}
}
//Get the average area for every seed position
Mat meanNearA = Mat::zeros(3, 3, CV_32SC2);
Mat meanMidA = Mat::zeros(3, 3, CV_32SC2);
Mat meanFarA = Mat::zeros(3, 3, CV_32SC2);
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
int32_t checkArea = 0;
if (!seedsNear[y][x].empty()) {
meanNearA.at<cv::Vec<int32_t, 2>>(y, x)[0] =
areaPRegNear[actCorrsPRIdx].at<int32_t>(y, x) / (int32_t) seedsNear[y][x].size();
meanNearA.at<cv::Vec<int32_t, 2>>(y, x)[1] = (int32_t) sqrt(
(double) meanNearA.at<cv::Vec<int32_t, 2>>(y, x)[0] / M_PI);
}
checkArea += areaPRegNear[actCorrsPRIdx].at<int32_t>(y, x);
if (!seedsMid[y][x].empty()) {
meanMidA.at<cv::Vec<int32_t, 2>>(y, x)[0] =
areaPRegMid[actCorrsPRIdx].at<int32_t>(y, x) / (int32_t) seedsMid[y][x].size();
meanMidA.at<cv::Vec<int32_t, 2>>(y, x)[1] = (int32_t) sqrt(
(double) meanMidA.at<cv::Vec<int32_t, 2>>(y, x)[0] / M_PI);
}
checkArea += areaPRegMid[actCorrsPRIdx].at<int32_t>(y, x);
if (!seedsFar[y][x].empty()) {
meanFarA.at<cv::Vec<int32_t, 2>>(y, x)[0] =
areaPRegFar[actCorrsPRIdx].at<int32_t>(y, x) / (int32_t) seedsFar[y][x].size();
meanFarA.at<cv::Vec<int32_t, 2>>(y, x)[1] = (int32_t) sqrt(
(double) meanFarA.at<cv::Vec<int32_t, 2>>(y, x)[0] / M_PI);
}
checkArea += areaPRegFar[actCorrsPRIdx].at<int32_t>(y, x);
if(verbose & PRINT_WARNING_MESSAGES) {
if (checkArea != regROIs[y][x].area()) {
cout << "Sum of static depth areas (" << checkArea << ") does not correspond to area of region ("
<< regROIs[y][x].area() << ")!" << endl;
}
}
}
}
//Reserve a little bit of space for depth areas generated later on (as they are larger)
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
for (int i = 2; i >= 1; i--) {
switch (beginDepth.at<cv::Vec<int32_t, 3>>(y, x)[i]) {
case 0:
for (size_t j = 0; j < seedsNear[y][x].size(); j++) {
cv::Point3_<int32_t> pt = seedsNear[y][x][j];
Mat part, rmask;
int32_t nnd = seedsNearNNDist[y][x][j];
int32_t useRad = max(min(max((nnd - 1) / 2, meanNearA.at<cv::Vec<int32_t, 2>>(y, x)[1]),
nnd - 2),1);
int32_t offset = maskEnlarge - useRad;
int32_t offset2 = maskEnlarge + useRad;
getRandMask(rmask, meanNearA.at<cv::Vec<int32_t, 2>>(y, x)[0], useRad, minSi);
if (i == 2) {
part = noGenMaskB2(Range(pt.y + offset, pt.y + offset2),
Range(pt.x + offset, pt.x + offset2));
} else {
part = noGenMaskB(Range(pt.y + offset, pt.y + offset2),
Range(pt.x + offset, pt.x + offset2));
}
part |= rmask;
}
break;
case 1:
for (size_t j = 0; j < seedsMid[y][x].size(); j++) {
cv::Point3_<int32_t> pt = seedsMid[y][x][j];
Mat part, rmask;
int32_t nnd = seedsMidNNDist[y][x][j];
int32_t useRad = max(min(max((nnd - 1) / 2, meanMidA.at<cv::Vec<int32_t, 2>>(y, x)[1]),
nnd - 2),1);
int32_t offset = maskEnlarge - useRad;
int32_t offset2 = maskEnlarge + useRad;
getRandMask(rmask, meanMidA.at<cv::Vec<int32_t, 2>>(y, x)[0], useRad, minSi);
if (i == 2) {
part = noGenMaskB2(Range(pt.y + offset, pt.y + offset2),
Range(pt.x + offset, pt.x + offset2));
} else {
part = noGenMaskB(Range(pt.y + offset, pt.y + offset2),
Range(pt.x + offset, pt.x + offset2));
}
part |= rmask;
}
break;
case 2:
for (size_t j = 0; j < seedsFar[y][x].size(); j++) {
cv::Point3_<int32_t> pt = seedsFar[y][x][j];
Mat part, rmask;
int32_t nnd = seedsFarNNDist[y][x][j];
int32_t useRad = max(min(max((nnd - 1) / 2, meanFarA.at<cv::Vec<int32_t, 2>>(y, x)[1]),
nnd - 2),1);
int32_t offset = maskEnlarge - useRad;
int32_t offset2 = maskEnlarge + useRad;
getRandMask(rmask, meanFarA.at<cv::Vec<int32_t, 2>>(y, x)[0], useRad, minSi);
if (i == 2) {
part = noGenMaskB2(Range(pt.y + offset, pt.y + offset2),
Range(pt.x + offset, pt.x + offset2));
} else {
part = noGenMaskB(Range(pt.y + offset, pt.y + offset2),
Range(pt.x + offset, pt.x + offset2));
}
part |= rmask;
}
break;
default:
break;
}
}
}
}
noGenMaskB |= noGenMaskB2;
//Show the masks
if (verbose & SHOW_BUILD_PROC_STATIC_OBJ) {
if(!writeIntermediateImg(noGenMask, "static_obj_mask_largest_2_depths") ||
!writeIntermediateImg(noGenMask2, "static_obj_mask_largest_depth")) {
namedWindow("Mask for largest 2 depths", WINDOW_AUTOSIZE);
imshow("Mask for largest 2 depths", noGenMask);
namedWindow("Mask for largest depth", WINDOW_AUTOSIZE);
imshow("Mask for largest depth", noGenMask2);
waitKey(0);
destroyWindow("Mask for largest 2 depths");
destroyWindow("Mask for largest depth");
}
}
//Show the region masks
/*namedWindow("Region mask", WINDOW_AUTOSIZE);
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 3; ++x) {
imshow("Region mask", (regmasks[y][x] > 0));
waitKey(0);
}
}
destroyWindow("Region mask");*/
//Create first layer of depth areas
std::vector<std::vector<std::vector<cv::Point_<int32_t>>>> actPosSeedsNear(3,
std::vector<std::vector<cv::Point_<int32_t>>>(
3));
std::vector<std::vector<std::vector<cv::Point_<int32_t>>>> actPosSeedsMid(3,
std::vector<std::vector<cv::Point_<int32_t>>>(
3));
std::vector<std::vector<std::vector<cv::Point_<int32_t>>>> actPosSeedsFar(3,
std::vector<std::vector<cv::Point_<int32_t>>>(
3));
std::vector<std::vector<std::vector<size_t>>> nrIterPerSeedNear(3, std::vector<std::vector<size_t>>(3));
std::vector<std::vector<std::vector<size_t>>> nrIterPerSeedMid(3, std::vector<std::vector<size_t>>(3));
std::vector<std::vector<std::vector<size_t>>> nrIterPerSeedFar(3, std::vector<std::vector<size_t>>(3));
std::vector<std::vector<int32_t>> actAreaNear(3, vector<int32_t>(3, 0));
std::vector<std::vector<int32_t>> actAreaMid(3, vector<int32_t>(3, 0));
std::vector<std::vector<int32_t>> actAreaFar(3, vector<int32_t>(3, 0));
std::vector<std::vector<unsigned char>> dilateOpNear(3, vector<unsigned char>(3, 0));
std::vector<std::vector<unsigned char>> dilateOpMid(3, vector<unsigned char>(3, 0));
std::vector<std::vector<unsigned char>> dilateOpFar(3, vector<unsigned char>(3, 0));
depthAreaMap = Mat::zeros(imgSize, CV_8UC1);
Mat actUsedAreaNear = Mat::zeros(imgSize, CV_8UC1);
Mat actUsedAreaMid = Mat::zeros(imgSize, CV_8UC1);
Mat actUsedAreaFar = Mat::zeros(imgSize, CV_8UC1);
Mat neighborRegMask = Mat::zeros(imgSize, CV_8UC1);
//Init actual positions
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (!seedsNear[y][x].empty() && (beginDepth.at<cv::Vec<int32_t, 3>>(y, x)[2] != 0)) {
actPosSeedsNear[y][x].resize(seedsNear[y][x].size());
nrIterPerSeedNear[y][x].resize(seedsNear[y][x].size(), 0);
for (size_t i = 0; i < seedsNear[y][x].size(); i++) {
int ix = seedsNear[y][x][i].x;
int iy = seedsNear[y][x][i].y;
actPosSeedsNear[y][x][i].x = ix;
actPosSeedsNear[y][x][i].y = iy;
depthAreaMap.at<unsigned char>(iy, ix) = 1;
actUsedAreaNear.at<unsigned char>(iy, ix) = 1;
neighborRegMask.at<unsigned char>(iy, ix) = (unsigned char) (y * 3 + x + 1);
actAreaNear[y][x]++;
}
}
if (!seedsMid[y][x].empty() && (beginDepth.at<cv::Vec<int32_t, 3>>(y, x)[2] != 1)) {
actPosSeedsMid[y][x].resize(seedsMid[y][x].size());
nrIterPerSeedMid[y][x].resize(seedsMid[y][x].size(), 0);
for (size_t i = 0; i < seedsMid[y][x].size(); i++) {
int ix = seedsMid[y][x][i].x;
int iy = seedsMid[y][x][i].y;
actPosSeedsMid[y][x][i].x = ix;
actPosSeedsMid[y][x][i].y = iy;
depthAreaMap.at<unsigned char>(iy, ix) = 2;
actUsedAreaMid.at<unsigned char>(iy, ix) = 1;
neighborRegMask.at<unsigned char>(iy, ix) = (unsigned char) (y * 3 + x + 1);
actAreaMid[y][x]++;
}
}
if (!seedsFar[y][x].empty() && (beginDepth.at<cv::Vec<int32_t, 3>>(y, x)[2] != 2)) {
actPosSeedsFar[y][x].resize(seedsFar[y][x].size());
nrIterPerSeedFar[y][x].resize(seedsFar[y][x].size(), 0);
for (size_t i = 0; i < seedsFar[y][x].size(); i++) {
int ix = seedsFar[y][x][i].x;
int iy = seedsFar[y][x][i].y;
actPosSeedsFar[y][x][i].x = ix;
actPosSeedsFar[y][x][i].y = iy;
depthAreaMap.at<unsigned char>(iy, ix) = 3;
actUsedAreaFar.at<unsigned char>(iy, ix) = 1;
neighborRegMask.at<unsigned char>(iy, ix) = (unsigned char) (y * 3 + x + 1);
actAreaFar[y][x]++;
}
}
}
}
//Create depth areas beginning with the smallest areas (near, mid, or far) per region
//Also create depth areas for the second smallest areas
Size imgSiM1 = Size(imgSize.width - 1, imgSize.height - 1);
size_t visualizeMask = 0;
for (int j = 0; j < 2; j++) {
if (j > 0) {
noGenMask = noGenMask2;
}
bool areasNFinish[3][3] = {{true, true, true}, {true, true, true}, {true, true, true}};
while (areasNFinish[0][0] || areasNFinish[0][1] || areasNFinish[0][2] ||
areasNFinish[1][0] || areasNFinish[1][1] || areasNFinish[1][2] ||
areasNFinish[2][0] || areasNFinish[2][1] || areasNFinish[2][2]) {
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (!areasNFinish[y][x]) continue;
switch (beginDepth.at<cv::Vec<int32_t, 3>>(y, x)[j]) {
case 0:
if (!actPosSeedsNear[y][x].empty()) {
for (size_t i = 0; i < actPosSeedsNear[y][x].size(); i++) {
if (areasNFinish[y][x]) {
/*Mat beforeAdding = actUsedAreaNear(regmasksROIs[y][x]) & (neighborRegMask(regmasksROIs[y][x]) == (unsigned char) (y * 3 + x));
int32_t Asv = actAreaNear[y][x];*/
areasNFinish[y][x] = addAdditionalDepth(1,
depthAreaMap,
actUsedAreaNear,
noGenMask,
regmasks[y][x],
actPosSeedsNear[y][x][i],
actPosSeedsNear[y][x][i],
actAreaNear[y][x],
areaPRegNear[actCorrsPRIdx].at<int32_t>(
y,
x),
imgSiM1,
cv::Point_<int32_t>(
seedsNear[y][x][i].x,
seedsNear[y][x][i].y),
regmasksROIs[y][x],
nrIterPerSeedNear[y][x][i],
dilateOpNear[y][x],
neighborRegMask,
(unsigned char) (y * 3 + x + 1));
/*Mat afterAdding = actUsedAreaNear(regmasksROIs[y][x]) & (neighborRegMask(regmasksROIs[y][x]) == (unsigned char) (y * 3 + x));
int realAreaBeforeDil = cv::countNonZero(afterAdding);
if(realAreaBeforeDil != actAreaNear[y][x])
{
cout << "Area difference: " << realAreaBeforeDil - actAreaNear[y][x] << endl;
cout << "Area diff between last and actual values: " << actAreaNear[y][x] - Asv << endl;
Mat addingDiff = afterAdding ^ beforeAdding;
namedWindow("Before", WINDOW_AUTOSIZE);
namedWindow("After", WINDOW_AUTOSIZE);
namedWindow("Diff", WINDOW_AUTOSIZE);
namedWindow("Mask", WINDOW_AUTOSIZE);
namedWindow("All Regions", WINDOW_AUTOSIZE);
namedWindow("Neighbours", WINDOW_AUTOSIZE);
imshow("Before", (beforeAdding > 0));
imshow("After", (afterAdding > 0));
imshow("Diff", (addingDiff > 0));
imshow("Mask", noGenMask(regmasksROIs[y][x]));
Mat colorMapImg;
unsigned char clmul = 255 / 3;
// Apply the colormap:
applyColorMap(depthAreaMap(regmasksROIs[y][x]) * clmul, colorMapImg, cv::COLORMAP_RAINBOW);
imshow("All Regions", colorMapImg);
clmul = 255 / 9;
applyColorMap(neighborRegMask(regmasksROIs[y][x]) * clmul, colorMapImg, cv::COLORMAP_RAINBOW);
imshow("Neighbours", colorMapImg);
waitKey(0);
destroyWindow("Before");
destroyWindow("After");
destroyWindow("Diff");
destroyWindow("Mask");
destroyWindow("All Regions");
destroyWindow("Neighbours");
}*/
} else {
break;
}
}
} else
areasNFinish[y][x] = false;
break;
case 1:
if (!actPosSeedsMid[y][x].empty()) {
for (size_t i = 0; i < actPosSeedsMid[y][x].size(); i++) {
if (areasNFinish[y][x]) {
/*Mat beforeAdding = actUsedAreaMid(regmasksROIs[y][x]) & (neighborRegMask(regmasksROIs[y][x]) == (unsigned char) (y * 3 + x));
int32_t Asv = actAreaMid[y][x];*/
areasNFinish[y][x] = addAdditionalDepth(2,
depthAreaMap,
actUsedAreaMid,
noGenMask,
regmasks[y][x],
actPosSeedsMid[y][x][i],
actPosSeedsMid[y][x][i],
actAreaMid[y][x],
areaPRegMid[actCorrsPRIdx].at<int32_t>(
y,
x),
imgSiM1,
cv::Point_<int32_t>(seedsMid[y][x][i].x,
seedsMid[y][x][i].y),
regmasksROIs[y][x],
nrIterPerSeedMid[y][x][i],
dilateOpMid[y][x],
neighborRegMask,
(unsigned char) (y * 3 + x + 1));
/*Mat afterAdding = actUsedAreaMid(regmasksROIs[y][x]) & (neighborRegMask(regmasksROIs[y][x]) == (unsigned char) (y * 3 + x));
int realAreaBeforeDil = cv::countNonZero(afterAdding);
if(realAreaBeforeDil != actAreaMid[y][x])
{
cout << "Area difference: " << realAreaBeforeDil - actAreaMid[y][x] << endl;
cout << "Area diff between last and actual values: " << actAreaMid[y][x] - Asv << endl;
Mat addingDiff = afterAdding ^ beforeAdding;
namedWindow("Before", WINDOW_AUTOSIZE);
namedWindow("After", WINDOW_AUTOSIZE);
namedWindow("Diff", WINDOW_AUTOSIZE);
namedWindow("Mask", WINDOW_AUTOSIZE);
namedWindow("All Regions", WINDOW_AUTOSIZE);
namedWindow("Neighbours", WINDOW_AUTOSIZE);
imshow("Before", (beforeAdding > 0));
imshow("After", (afterAdding > 0));
imshow("Diff", (addingDiff > 0));
imshow("Mask", noGenMask(regmasksROIs[y][x]));
Mat colorMapImg;
unsigned char clmul = 255 / 3;
// Apply the colormap:
applyColorMap(depthAreaMap(regmasksROIs[y][x]) * clmul, colorMapImg, cv::COLORMAP_RAINBOW);
imshow("All Regions", colorMapImg);
clmul = 255 / 9;
applyColorMap(neighborRegMask(regmasksROIs[y][x]) * clmul, colorMapImg, cv::COLORMAP_RAINBOW);
imshow("Neighbours", colorMapImg);
waitKey(0);
destroyWindow("Before");
destroyWindow("After");
destroyWindow("Diff");
destroyWindow("Mask");
destroyWindow("All Regions");
destroyWindow("Neighbours");
}*/
} else {
break;
}
}
} else
areasNFinish[y][x] = false;
break;
case 2:
if (!actPosSeedsFar[y][x].empty()) {
for (size_t i = 0; i < actPosSeedsFar[y][x].size(); i++) {
if (areasNFinish[y][x]) {
/*Mat beforeAdding = actUsedAreaFar(regmasksROIs[y][x]) & (neighborRegMask(regmasksROIs[y][x]) == (unsigned char) (y * 3 + x));
int32_t Asv = actAreaFar[y][x];*/
areasNFinish[y][x] = addAdditionalDepth(3,
depthAreaMap,
actUsedAreaFar,
noGenMask,
regmasks[y][x],
actPosSeedsFar[y][x][i],
actPosSeedsFar[y][x][i],
actAreaFar[y][x],
areaPRegFar[actCorrsPRIdx].at<int32_t>(
y,
x),
imgSiM1,
cv::Point_<int32_t>(seedsFar[y][x][i].x,
seedsFar[y][x][i].y),
regmasksROIs[y][x],
nrIterPerSeedFar[y][x][i],
dilateOpFar[y][x],
neighborRegMask,
(unsigned char) (y * 3 + x + 1));
/*Mat afterAdding = actUsedAreaFar(regmasksROIs[y][x]) & (neighborRegMask(regmasksROIs[y][x]) == (unsigned char) (y * 3 + x));
int realAreaBeforeDil = cv::countNonZero(afterAdding);
if(realAreaBeforeDil != actAreaFar[y][x])
{
cout << "Area difference: " << realAreaBeforeDil - actAreaFar[y][x] << endl;
cout << "Area diff between last and actual values: " << actAreaFar[y][x] - Asv << endl;
Mat addingDiff = afterAdding ^ beforeAdding;
namedWindow("Before", WINDOW_AUTOSIZE);
namedWindow("After", WINDOW_AUTOSIZE);
namedWindow("Diff", WINDOW_AUTOSIZE);
namedWindow("Mask", WINDOW_AUTOSIZE);
namedWindow("All Regions", WINDOW_AUTOSIZE);
namedWindow("Neighbours", WINDOW_AUTOSIZE);
imshow("Before", (beforeAdding > 0));
imshow("After", (afterAdding > 0));
imshow("Diff", (addingDiff > 0));
imshow("Mask", noGenMask(regmasksROIs[y][x]));
Mat colorMapImg;
unsigned char clmul = 255 / 3;
// Apply the colormap:
applyColorMap(depthAreaMap(regmasksROIs[y][x]) * clmul, colorMapImg, cv::COLORMAP_RAINBOW);
imshow("All Regions", colorMapImg);
clmul = 255 / 9;
applyColorMap(neighborRegMask(regmasksROIs[y][x]) * clmul, colorMapImg, cv::COLORMAP_RAINBOW);
imshow("Neighbours", colorMapImg);
waitKey(0);
destroyWindow("Before");
destroyWindow("After");
destroyWindow("Diff");
destroyWindow("Mask");
destroyWindow("All Regions");
destroyWindow("Neighbours");
}*/
} else {
break;
}
}
} else
areasNFinish[y][x] = false;
break;
default:
break;
}
}
}
if (verbose & SHOW_BUILD_PROC_STATIC_OBJ) {
if (visualizeMask % 200 == 0) {
Mat colorMapImg;
unsigned char clmul = 255 / 3;
// Apply the colormap:
applyColorMap(depthAreaMap * clmul, colorMapImg, cv::COLORMAP_RAINBOW);
if(!writeIntermediateImg(colorMapImg, "static_obj_depth_areas_creation_process_step_" + std::to_string(visualizeMask))){
namedWindow("Static object depth areas creation process", WINDOW_AUTOSIZE);
imshow("Static object depth areas creation process", colorMapImg);
waitKey(0);
destroyWindow("Static object depth areas creation process");
}
}
visualizeMask++;
}
}
}
//Show the intermediate result
if (verbose & SHOW_BUILD_PROC_STATIC_OBJ) {
unsigned char clmul = 255 / 3;
// Apply the colormap:
Mat colorMapImg;
applyColorMap(depthAreaMap * clmul, colorMapImg, cv::COLORMAP_RAINBOW);
if(!writeIntermediateImg(colorMapImg, "static_obj_depth_areas_after_filling_2_depths_per_region")){
namedWindow("Static object depth areas after filling 2 depths per region", WINDOW_AUTOSIZE);
imshow("Static object depth areas after filling 2 depths per region", colorMapImg);
waitKey(0);
destroyWindow("Static object depth areas after filling 2 depths per region");
}
}
/*//Show the mask
{
namedWindow("Mask for largest depth", WINDOW_AUTOSIZE);
imshow("Mask for largest depth", noGenMask2);
waitKey(0);
destroyWindow("Mask for largest depth");
}*/
//Fill the remaining areas:
//Generate the (largest) depth areas per region independent of the largest & different depth areas of other regions
Mat maskNear = Mat::zeros(imgSize, CV_8UC1);
Mat maskMid = Mat::zeros(imgSize, CV_8UC1);
Mat maskFar = Mat::zeros(imgSize, CV_8UC1);
int32_t fillAreas[3] = {0, 0, 0};
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
switch (beginDepth.at<cv::Vec<int32_t, 3>>(y, x)[2]) {
case 0:
maskNear(regROIs[y][x]) |= noGenMask2(regROIs[y][x]);
fillAreas[0] += areaPRegNear[actCorrsPRIdx].at<int32_t>(y, x);
break;
case 1:
maskMid(regROIs[y][x]) |= noGenMask2(regROIs[y][x]);
fillAreas[1] += areaPRegMid[actCorrsPRIdx].at<int32_t>(y, x);
break;
case 2:
maskFar(regROIs[y][x]) |= noGenMask2(regROIs[y][x]);
fillAreas[2] += areaPRegFar[actCorrsPRIdx].at<int32_t>(y, x);
break;
default:
break;
}
}
}
int32_t actualAreas[3] = {0, 0, 0};
actualAreas[0] = cv::countNonZero(maskNear);
actualAreas[1] = cv::countNonZero(maskMid);
actualAreas[2] = cv::countNonZero(maskFar);
fillRemainingAreas(maskNear, depthAreaMap, fillAreas[0], actualAreas[0]);
fillRemainingAreas(maskMid, depthAreaMap, fillAreas[1], actualAreas[1]);
fillRemainingAreas(maskFar, depthAreaMap, fillAreas[2], actualAreas[2]);
//Show the masks
/*{
Mat colorMapImg;
unsigned char clmul = 255 / 3;
// Apply the colormap:
Mat completeDepthMap = (maskNear & Mat::ones(imgSize, CV_8UC1)) * clmul;
completeDepthMap |= (maskMid & Mat::ones(imgSize, CV_8UC1)) * (clmul * 2);
completeDepthMap |= maskFar;
applyColorMap(completeDepthMap, colorMapImg, cv::COLORMAP_RAINBOW);
namedWindow("Static object depth areas", WINDOW_AUTOSIZE);
imshow("Static object depth areas", colorMapImg);
waitKey(0);
destroyWindow("Static object depth areas");
}*/
//Get overlaps of filled areas (3 different) and remove them
Mat overlap3 = maskNear & maskMid & maskFar;
int nr_overlap3 = cv::countNonZero(overlap3);
if (nr_overlap3) {
Mat overlap3sv = overlap3.clone();
int overlapDel = nr_overlap3 / 3;
//Remove small mid and far areas (only near areas remain in overlap areas)
removeNrFilledPixels(cv::Size(3, 3), imgSize, overlap3, overlapDel);
Mat changeMask = ((overlap3 ^ overlap3sv) == 0);
maskMid &= changeMask;
maskFar &= changeMask;
overlap3sv = overlap3.clone();
//Remove small near and far areas (only mid areas remain in overlap areas)
removeNrFilledPixels(cv::Size(3, 3), imgSize, overlap3, overlapDel);
changeMask = ((overlap3 ^ overlap3sv) == 0);
maskNear &= changeMask;
maskFar &= changeMask;
//Remove small near and mid areas (only far areas remain in overlap areas)
changeMask = (overlap3 == 0);
maskNear &= changeMask;
maskMid &= changeMask;
}
//Get overlaps of filled areas (2 different) and remove them
delOverlaps2(maskNear, maskMid);
delOverlaps2(maskNear, maskFar);
delOverlaps2(maskFar, maskMid);
//Show the masks
if (verbose & SHOW_BUILD_PROC_STATIC_OBJ) {
Mat colorMapImg;
unsigned char clmul = 255 / 3;
// Apply the colormap:
Mat completeDepthMap = (maskNear & Mat::ones(imgSize, CV_8UC1)) * clmul;
completeDepthMap |= (maskMid & Mat::ones(imgSize, CV_8UC1)) * (clmul * 2);
completeDepthMap |= maskFar;
applyColorMap(completeDepthMap, colorMapImg, cv::COLORMAP_RAINBOW);
if(!writeIntermediateImg(colorMapImg, "largest_static_object_depth_areas_before_final_dilation")){
namedWindow("Largest static object depth areas before final dilation", WINDOW_AUTOSIZE);
imshow("Largest static object depth areas before final dilation", colorMapImg);
waitKey(0);
destroyWindow("Largest static object depth areas before final dilation");
}
}
//Try to fill the remaining gaps using dilation
const int maxCnt = 20;
int cnt = 0;
bool nFinished[3] = {true, true, true};
actualAreas[0] = cv::countNonZero(maskNear);
actualAreas[1] = cv::countNonZero(maskMid);
actualAreas[2] = cv::countNonZero(maskFar);
if (actualAreas[0] >= fillAreas[0]) {
nFinished[0] = false;
}
if (actualAreas[1] >= fillAreas[1]) {
nFinished[1] = false;
}
if (actualAreas[2] >= fillAreas[2]) {
nFinished[2] = false;
}
while ((nFinished[0] || nFinished[1] || nFinished[2]) && (cnt < maxCnt)) {
if (nFinished[0]) {
if (!fillRemainingAreas(maskNear, depthAreaMap, fillAreas[0], actualAreas[0], maskMid, maskFar)) {
nFinished[0] = false;
}
if (actualAreas[0] >= fillAreas[0]) {
nFinished[0] = false;
}
}
if (nFinished[1]) {
if (fillRemainingAreas(maskMid, depthAreaMap, fillAreas[1], actualAreas[1], maskNear, maskFar)) {
nFinished[1] = false;
}
if (actualAreas[1] >= fillAreas[1]) {
nFinished[1] = false;
}
}
if (nFinished[2]) {
if (fillRemainingAreas(maskFar, depthAreaMap, fillAreas[2], actualAreas[2], maskNear, maskMid)) {
nFinished[2] = false;
}
if (actualAreas[2] >= fillAreas[2]) {
nFinished[2] = false;
}
}
cnt++;
}
if (actualAreas[0] < fillAreas[0]) {
nFinished[0] = true;
}
if (actualAreas[1] < fillAreas[1]) {
nFinished[1] = true;
}
if (actualAreas[2] < fillAreas[2]) {
nFinished[2] = true;
}
//Show the masks
if (verbose & SHOW_BUILD_PROC_STATIC_OBJ) {
Mat colorMapImg;
unsigned char clmul = 255 / 3;
// Apply the colormap:
Mat completeDepthMap = (maskNear & Mat::ones(imgSize, CV_8UC1)) * clmul;
completeDepthMap |= (maskMid & Mat::ones(imgSize, CV_8UC1)) * (clmul * 2);
completeDepthMap |= maskFar;
applyColorMap(completeDepthMap, colorMapImg, cv::COLORMAP_RAINBOW);
if(!writeIntermediateImg(colorMapImg, "static_object_depth_largest_areas")){
namedWindow("Static object depth areas (largest areas)", WINDOW_AUTOSIZE);
imshow("Static object depth areas (largest areas)", colorMapImg);
waitKey(0);
destroyWindow("Static object depth areas (largest areas)");
}
}
//Combine created masks
Mat maskNMF1s = maskNear & Mat::ones(imgSize, actUsedAreaNear.type());
actUsedAreaNear |= maskNMF1s;
depthAreaMap |= maskNMF1s;
maskNMF1s = maskMid & Mat::ones(imgSize, actUsedAreaNear.type());
actUsedAreaMid |= maskNMF1s;
maskNMF1s *= 2;
depthAreaMap |= maskNMF1s;
maskNMF1s = maskFar & Mat::ones(imgSize, actUsedAreaNear.type());
actUsedAreaFar |= maskNMF1s;
maskNMF1s *= 3;
depthAreaMap |= maskNMF1s;
//Show the result
if (verbose & SHOW_BUILD_PROC_STATIC_OBJ) {
unsigned char clmul = 255 / 3;
// Apply the colormap:
Mat colorMapImg;
applyColorMap(depthAreaMap * clmul, colorMapImg, cv::COLORMAP_RAINBOW);
if(!writeIntermediateImg(colorMapImg, "static_object_depth_areas_before_glob_area_fill")){
namedWindow("Static object depth areas before glob area fill", WINDOW_AUTOSIZE);
imshow("Static object depth areas before glob area fill", colorMapImg);
waitKey(0);
destroyWindow("Static object depth areas before glob area fill");
}
}
//Fill the remaining areas
// if(nFinished[0] || nFinished[1] || nFinished[2]) {
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
Mat fillMask =
(depthAreaMap(regROIs[y][x]) == 0) &
Mat::ones(regROIs[y][x].height, regROIs[y][x].width, CV_8UC1);
switch (beginDepth.at<cv::Vec<int32_t, 3>>(y, x)[2]) {
case 0:
actUsedAreaNear(regROIs[y][x]) |= fillMask;
depthAreaMap(regROIs[y][x]) |= fillMask;
break;
case 1:
actUsedAreaMid(regROIs[y][x]) |= fillMask;
fillMask *= 2;
depthAreaMap(regROIs[y][x]) |= fillMask;
break;
case 2:
actUsedAreaFar(regROIs[y][x]) |= fillMask;
fillMask *= 3;
depthAreaMap(regROIs[y][x]) |= fillMask;
break;
default:
break;
}
}
}
// }
//Show the result
if (verbose & (SHOW_BUILD_PROC_STATIC_OBJ | SHOW_STATIC_OBJ_DISTANCES | SHOW_STATIC_OBJ_3D_PTS)) {
unsigned char clmul = 255 / 3;
// Apply the colormap:
Mat colorMapImg;
applyColorMap(depthAreaMap * clmul, colorMapImg, cv::COLORMAP_RAINBOW);
if(!writeIntermediateImg(colorMapImg, "static_object_depth_areas")){
namedWindow("Static object depth areas", WINDOW_AUTOSIZE);
imshow("Static object depth areas", colorMapImg);
waitKey(0);
if ((verbose & SHOW_STATIC_OBJ_DISTANCES) &&
!(verbose & (SHOW_STATIC_OBJ_DISTANCES | SHOW_STATIC_OBJ_3D_PTS))) {
destroyWindow("Static object depth areas");
}
}
}
//Get final depth values for each depth region
Mat depthMapNear, depthMapMid, depthMapFar;
getDepthMaps(depthMapNear, actUsedAreaNear, actDepthNear, actDepthMid, seedsNear, 0);
getDepthMaps(depthMapMid, actUsedAreaMid, actDepthMid, actDepthFar, seedsMid, 1);
getDepthMaps(depthMapFar, actUsedAreaFar, actDepthFar, maxFarDistMultiplier * actDepthFar, seedsFar, 2);
//Combine the 3 depth maps to a single depth map
depthMap = depthMapNear + depthMapMid + depthMapFar;
//Visualize the depth values
if (verbose & SHOW_STATIC_OBJ_DISTANCES) {
Mat normalizedDepth;
cv::normalize(depthMapNear, normalizedDepth, 0.1, 1.0, cv::NORM_MINMAX, -1, depthMapNear > 0);
bool wtd = !writeIntermediateImg(normalizedDepth, "normalized_static_object_depth_near");
if(wtd){
namedWindow("Normalized Static Obj Depth Near", WINDOW_AUTOSIZE);
imshow("Normalized Static Obj Depth Near", normalizedDepth);
}
normalizedDepth.release();
cv::normalize(depthMapMid, normalizedDepth, 0.1, 1.0, cv::NORM_MINMAX, -1, depthMapMid > 0);
wtd |= !writeIntermediateImg(normalizedDepth, "normalized_static_object_depth_mid");
if(wtd){
namedWindow("Normalized Static Obj Depth Mid", WINDOW_AUTOSIZE);
imshow("Normalized Static Obj Depth Mid", normalizedDepth);
}
normalizedDepth.release();
cv::normalize(depthMapFar, normalizedDepth, 0.1, 1.0, cv::NORM_MINMAX, -1, depthMapFar > 0);
wtd |= !writeIntermediateImg(normalizedDepth, "normalized_static_object_depth_far");
if(wtd){
namedWindow("Normalized Static Obj Depth Far", WINDOW_AUTOSIZE);
imshow("Normalized Static Obj Depth Far", normalizedDepth);
}
// Mat normalizedDepth;//, labelMask = cv::Mat::zeros(imgSize, CV_8UC1);
//labelMask |= actUsedAreaNear | actUsedAreaMid | actUsedAreaFar;
normalizedDepth.release();
cv::normalize(depthMap, normalizedDepth, 0.1, 1.0, cv::NORM_MINMAX);//, -1, labelMask);
Mat normalizedDepthColor;
normalizedDepth.convertTo(normalizedDepthColor, CV_8UC1, 255.0);
applyColorMap(normalizedDepthColor, normalizedDepthColor, cv::COLORMAP_RAINBOW);
wtd |= !writeIntermediateImg(normalizedDepthColor, "normalized_static_object_depth_full");
if(wtd){
namedWindow("Normalized Static Obj Depth", WINDOW_AUTOSIZE);
imshow("Normalized Static Obj Depth", normalizedDepthColor);
}
normalizedDepth.release();
Mat labelMask = (depthMapFar == 0);
cv::normalize(depthMap, normalizedDepth, 0.1, 1.0, cv::NORM_MINMAX, -1, labelMask);
normalizedDepthColor.release();
normalizedDepth.convertTo(normalizedDepthColor, CV_8UC1, 255.0);
applyColorMap(normalizedDepthColor, normalizedDepthColor, cv::COLORMAP_RAINBOW);
wtd |= !writeIntermediateImg(normalizedDepthColor, "normalized_static_object_depth_near_mid");
if(wtd){
namedWindow("Normalized Static Obj Depth Near and Mid", WINDOW_AUTOSIZE);
imshow("Normalized Static Obj Depth Near and Mid", normalizedDepthColor);
}
//Check for 0 values
Mat check0 = (depthMap <= 0);
int check0val = cv::countNonZero(check0);
if (check0val) {
bool wtd2 = !writeIntermediateImg(check0, "zero_or_lower_obj_depth_positions");
if(wtd2){
namedWindow("Zero or lower Obj Depth", WINDOW_AUTOSIZE);
imshow("Zero or lower Obj Depth", check0);
}
Mat checkB0 = (depthMap < 0);
check0val = cv::countNonZero(checkB0);
if (check0val) {
if(!writeIntermediateImg(checkB0, "below_zero_obj_depth_positions")){
namedWindow("Below zero Obj Depth", WINDOW_AUTOSIZE);
imshow("Below zero Obj Depth", checkB0);
waitKey(0);
destroyWindow("Below zero Obj Depth");
}
}
if(wtd2){
waitKey(0);
destroyWindow("Zero or lower Obj Depth");
}
// throw SequenceException("Static depth value of zero or below zero found!");
}
if(wtd){
waitKey(0);
destroyWindow("Normalized Static Obj Depth Near");
destroyWindow("Normalized Static Obj Depth Mid");
destroyWindow("Normalized Static Obj Depth Far");
destroyWindow("Normalized Static Obj Depth");
destroyWindow("Normalized Static Obj Depth Near and Mid");
}
}
if (!(verbose & SHOW_STATIC_OBJ_3D_PTS)) {
destroyAllWindows();
}
}
//Get overlaps of filled areas (2 different) and remove them
void genStereoSequ::delOverlaps2(cv::Mat &depthArea1, cv::Mat &depthArea2) {
Mat overlap2 = depthArea1 & depthArea2;
int nr_overlap2 = cv::countNonZero(overlap2);
if (nr_overlap2) {
Mat overlap2sv = overlap2.clone();
int overlapDel = nr_overlap2 / 2;
removeNrFilledPixels(cv::Size(3, 3), imgSize, overlap2, overlapDel);
Mat changeMask = ((overlap2 ^ overlap2sv) == 0);
depthArea1 &= changeMask;
depthArea2 &= (overlap2 == 0);
}
}
bool genStereoSequ::fillRemainingAreas(cv::Mat &depthArea,
const cv::Mat &usedAreas,
int32_t areaToFill,
int32_t &actualArea,
cv::InputArray otherDepthA1,
cv::InputArray otherDepthA2) {
Mat mask;
bool only1It = false;
if (otherDepthA1.empty() || otherDepthA2.empty()) {
mask = (usedAreas == 0);
} else {
Mat otherDepthA1m = otherDepthA1.getMat();
Mat otherDepthA2m = otherDepthA2.getMat();
mask = (usedAreas == 0) & (otherDepthA1m == 0) & (otherDepthA2m == 0);
only1It = true;
}
int strElmSi = 5, cnt = 0, maxCnt = 50, strElmSiAdd = 0, strElmSiDir[2] = {0, 0};
int32_t siAfterDil = actualArea;
while (((!only1It && (siAfterDil < areaToFill)) || (only1It && ((siAfterDil - actualArea) == 0))) &&
(cnt < maxCnt)) {
cnt++;
Mat element;
int elSel = (int)(rand2() % 3);
strElmSiAdd = (rand2() % INT_MAX) % strElmSi;
strElmSiDir[0] = (int)(rand2() % 2);
strElmSiDir[1] = (int)(rand2() % 2);
switch (elSel) {
case 0:
element = cv::getStructuringElement(MORPH_ELLIPSE, Size(strElmSi + strElmSiDir[0] * strElmSiAdd,
strElmSi + strElmSiDir[1] * strElmSiAdd));
break;
case 1:
element = cv::getStructuringElement(MORPH_RECT, Size(strElmSi + strElmSiDir[0] * strElmSiAdd,
strElmSi + strElmSiDir[1] * strElmSiAdd));
break;
case 2:
element = cv::getStructuringElement(MORPH_CROSS, Size(strElmSi + strElmSiDir[0] * strElmSiAdd,
strElmSi + strElmSiDir[1] * strElmSiAdd));
break;
default:
element = cv::getStructuringElement(MORPH_ELLIPSE, Size(strElmSi, strElmSi));
break;
}
strElmSi += 2;
Mat depthAreaDilate;
dilate(depthArea, depthAreaDilate, element);
depthAreaDilate &= mask;
siAfterDil = (int32_t) cv::countNonZero(depthAreaDilate);
if (siAfterDil >= areaToFill) {
if (siAfterDil > areaToFill) {
int32_t diff = siAfterDil - areaToFill;
depthAreaDilate ^= depthArea;
removeNrFilledPixels(element.size(), imgSize, depthAreaDilate, diff);
}
depthArea |= depthAreaDilate;
actualArea = areaToFill;
return true;
} else if (((siAfterDil - actualArea) == 0) && (cnt > 10)) {
return false;
} else if (siAfterDil > actualArea) {
depthAreaDilate.copyTo(depthArea);
actualArea = siAfterDil;
if (only1It)
return true;
}
}
if (cnt >= maxCnt) {
return false;
}
return true;
}
void genStereoSequ::removeNrFilledPixels(cv::Size delElementSi, cv::Size matSize, cv::Mat &targetMat, int32_t nrToDel) {
cv::Size delSiEnd(matSize.width - delElementSi.width, matSize.height - delElementSi.height);
cv::Rect delPos(0, 0, delElementSi.width, delElementSi.height);
Mat delMask, delZeroMask = cv::Mat::zeros(delElementSi, targetMat.type());
int32_t diff = nrToDel;
for (int y = 0; y < delSiEnd.height; y += delElementSi.height) {
for (int x = 0; x < delSiEnd.width; x += delElementSi.width) {
delPos.x = x;
delPos.y = y;
delMask = targetMat(delPos);
int nonZeros = cv::countNonZero(delMask);
if (nonZeros > 0) {
if (diff >= nonZeros) {
diff -= nonZeros;
delZeroMask.copyTo(delMask);
} else if (diff > 0) {
for (int y1 = 0; y1 < delElementSi.height; y1++) {
for (int x1 = 0; x1 < delElementSi.width; x1++) {
if (delMask.at<unsigned char>(y1, x1)) {
delMask.at<unsigned char>(y1, x1) = 0;
diff--;
if (diff <= 0)
break;
}
}
if (diff <= 0)
break;
}
}
if (diff <= 0)
break;
}
}
if (diff <= 0)
break;
}
}
/*Create a random binary mask with a given size
* mask ... Output random mask with the size (2 * useRad) x (2 * useRad)
* area ... Approximate area of 'ones' (255 for 8bit) in the mask
* useRad ... Radius that should be used to fill a random circle mask
* midR ... Circle radius in the middle of the mask that should be filled with 'ones'
* Returns the number of 'ones' in the mask*/
int32_t genStereoSequ::getRandMask(cv::Mat &mask, int32_t area, int32_t useRad, int32_t midR) {
int32_t usedist = 2 * useRad;
int32_t area2 = min((int32_t) floor((double) (useRad * useRad) * M_PI), area);
int32_t kSize = useRad / 3;
kSize -= (kSize + 1) % 2;
kSize = max(kSize, 3);
if(usedist <= 3){
mask = 255 * cv::Mat::ones(usedist, usedist, CV_8UC1);
return usedist * usedist;
}
Mat mask_t = cv::Mat::zeros(usedist, usedist, CV_64FC1);
/*Mat minVals = Mat::zeros(usedist, usedist, CV_64FC1);
Mat maxVals = Mat::ones(usedist, usedist, CV_64FC1) * 255.0;*/
Mat mask2, mask3;
double mi, ma, mr;
int actA = 0;
do {
do {
randu(mask_t, cv::Scalar(0), cv::Scalar(255.0));
cv::GaussianBlur(mask_t, mask_t, Size(kSize, kSize), 0);
cv::minMaxLoc(mask_t, &mi, &ma);
mr = ma - mi;
} while (mr < 6.01);
double mv = getRandDoubleValRng(mi + 1.0, ma - 1.0);
double mrr = 0;
do {
mrr = (double) ((int)(rand2() % INT_MAX) % (int) floor((mr - 2.0) / 2.0));
} while (nearZero(mrr));
ma = mv + mrr;
mi = mv - mrr;
Mat mask_ti;
mask_t.convertTo(mask, CV_8UC1);
cv::threshold(mask, mask_ti, mi, 255.0, cv::THRESH_BINARY);
cv::threshold(mask, mask, ma, 255.0, cv::THRESH_BINARY_INV);
mask_ti.convertTo(mask2, CV_8UC1);
/*namedWindow("rand mask thresh bin", WINDOW_AUTOSIZE);
imshow("rand mask thresh bin", mask2);
waitKey(0);
namedWindow("rand mask thresh inv bin", WINDOW_AUTOSIZE);
imshow("rand mask thresh inv bin", mask);
waitKey(0);
destroyWindow("rand mask thresh bin");
destroyWindow("rand mask thresh inv bin");*/
mask &= mask2;
/*namedWindow("rand mask comb thresh bin", WINDOW_AUTOSIZE);
imshow("rand mask comb thresh bin", mask);*/
mask2 = Mat::zeros(usedist, usedist, CV_8UC1);
cv::circle(mask2, Point(useRad - 1, useRad - 1), useRad, cv::Scalar(255), -1);
mask &= mask2;
mask3 = Mat::zeros(usedist, usedist, CV_8UC1);
cv::circle(mask3, Point(useRad - 1, useRad - 1), midR, cv::Scalar(255), -1);
mask |= mask3;
/*namedWindow("rand mask with circles", WINDOW_AUTOSIZE);
imshow("rand mask with circles", mask);
waitKey(0);
destroyWindow("rand mask comb thresh bin");
destroyWindow("rand mask with circles");*/
actA = cv::countNonZero(mask);
} while (actA < 9);
mask.copyTo(mask3);
Mat element = cv::getStructuringElement(MORPH_CROSS, Size(3, 3));
int maxcnt = 50;
int minA = max(area2 / 2, 9);
while ((actA < minA) && (maxcnt > 0)) {
dilate(mask, mask, element);
mask &= mask2;
actA = cv::countNonZero(mask);
maxcnt--;
}
if (maxcnt < 50) {
/*namedWindow("rand mask dilate", WINDOW_AUTOSIZE);
imshow("rand mask dilate", mask);
waitKey(0);
destroyWindow("rand mask dilate");*/
return actA;
}
maxcnt = 50;
minA = max(2 * area2 / 3, 9);
while ((actA > minA) && (maxcnt > 0)) {
erode(mask, mask, element);
actA = cv::countNonZero(mask);
maxcnt--;
}
if (actA == 0) {
mask3.copyTo(mask);
actA = cv::countNonZero(mask);
}
/*namedWindow("rand mask erode", WINDOW_AUTOSIZE);
imshow("rand mask erode", mask);
waitKey(0);
destroyWindow("rand mask erode");*/
return actA;
}
//Generate depth values (for every pixel) for the given areas of depth regions taking into account the depth values from backprojected 3D points
void genStereoSequ::getDepthMaps(cv::OutputArray dout, cv::Mat &din, double dmin, double dmax,
std::vector<std::vector<std::vector<cv::Point3_<int32_t>>>> &initSeeds, int dNr) {
std::vector<cv::Point3_<int32_t>> initSeedInArea;
switch (dNr) {
case 0:
seedsNearFromLast = std::vector<std::vector<std::vector<cv::Point_<int32_t>>>>(3,
std::vector<std::vector<cv::Point_<int32_t>>>(
3));
break;
case 1:
seedsMidFromLast = std::vector<std::vector<std::vector<cv::Point_<int32_t>>>>(3,
std::vector<std::vector<cv::Point_<int32_t>>>(
3));
break;
case 2:
seedsFarFromLast = std::vector<std::vector<std::vector<cv::Point_<int32_t>>>>(3,
std::vector<std::vector<cv::Point_<int32_t>>>(
3));
break;
default:
break;
}
//Check, if there are depth seeds available that were already backprojected from 3D
for (size_t y = 0; y < 3; y++) {
for (size_t x = 0; x < 3; x++) {
for (size_t i = 0; i < initSeeds[y][x].size(); i++) {
if (initSeeds[y][x][i].z >= 0) {
initSeedInArea.push_back(initSeeds[y][x][i]);
switch (dNr) {
case 0:
seedsNearFromLast[y][x].emplace_back(
cv::Point_<int32_t>(initSeedInArea.back().x, initSeedInArea.back().y));
break;
case 1:
seedsMidFromLast[y][x].emplace_back(
cv::Point_<int32_t>(initSeedInArea.back().x, initSeedInArea.back().y));
break;
case 2:
seedsFarFromLast[y][x].emplace_back(
cv::Point_<int32_t>(initSeedInArea.back().x, initSeedInArea.back().y));
break;
default:
break;
}
}
}
}
}
getDepthVals(dout, din, dmin, dmax, initSeedInArea);
}
//Generate depth values (for every pixel) for the given areas of depth regions
void genStereoSequ::getDepthVals(cv::OutputArray dout, const cv::Mat &din, double dmin, double dmax,
std::vector<cv::Point3_<int32_t>> &initSeedInArea) {
Mat actUsedAreaLabel;
Mat actUsedAreaStats;
Mat actUsedAreaCentroids;
int nrLabels;
vector<std::vector<double>> funcPars;
uint16_t nL = 0;
//Get connected areas
nrLabels = connectedComponentsWithStats(din, actUsedAreaLabel, actUsedAreaStats, actUsedAreaCentroids, 8, CV_16U);
nL = nrLabels;//(uint16_t) (nrLabels + 1);
getRandDepthFuncPars(funcPars, (size_t) nL);
//cv::ConnectedComponentsTypes::CC_STAT_HEIGHT;
//Visualize the depth values
if (verbose & SHOW_STATIC_OBJ_DISTANCES) {
Mat colorMapImg;
Mat mask = (din > 0);
buildColorMapHSV2RGB(actUsedAreaLabel, colorMapImg, nrLabels, mask);
if(!writeIntermediateImg(colorMapImg, "Static_Obj_Connected_Components")){
namedWindow("Static Obj Connected Components", WINDOW_AUTOSIZE);
imshow("Static Obj Connected Components", colorMapImg);
waitKey(0);
destroyWindow("Static Obj Connected Components");
}
}
//dout.release();
dout.create(imgSize, CV_64FC1);
Mat dout_ = dout.getMat();
dout_.setTo(Scalar(0));
// dout = Mat::zeros(imgSize, CV_64FC1);
vector<cv::Point> singlePixelAreas;
for (uint16_t i = 0; i < nL; i++) {
Rect labelBB = Rect(actUsedAreaStats.at<int32_t>(i, cv::ConnectedComponentsTypes::CC_STAT_LEFT),
actUsedAreaStats.at<int32_t>(i, cv::ConnectedComponentsTypes::CC_STAT_TOP),
actUsedAreaStats.at<int32_t>(i, cv::ConnectedComponentsTypes::CC_STAT_WIDTH),
actUsedAreaStats.at<int32_t>(i, cv::ConnectedComponentsTypes::CC_STAT_HEIGHT));
if (labelBB.area() == 1) {
singlePixelAreas.emplace_back(Point(labelBB.x, labelBB.y));
continue;
}
Mat laMat = actUsedAreaLabel(labelBB);
Mat doutSlice = dout_(labelBB);
Mat dinSlice = din(labelBB);
double dmin_tmp = getRandDoubleValRng(dmin, dmin + 0.6 * (dmax - dmin));
double dmax_tmp = getRandDoubleValRng(dmin_tmp + 0.1 * (dmax - dmin), dmax);
double drange = dmax_tmp - dmin_tmp;
double rXr = getRandDoubleValRng(1.5, 3.0);
double rYr = getRandDoubleValRng(1.5, 3.0);
double h2 = (double) actUsedAreaStats.at<int32_t>(i, cv::ConnectedComponentsTypes::CC_STAT_HEIGHT);
h2 *= h2;
double w2 = (double) actUsedAreaStats.at<int32_t>(i, cv::ConnectedComponentsTypes::CC_STAT_WIDTH);
w2 *= w2;
double scale = sqrt(h2 + w2) / 2.0;
double rXrSc = rXr / scale;
double rYrSc = rYr / scale;
double cx = actUsedAreaCentroids.at<double>(i, 0) -
(double) actUsedAreaStats.at<int32_t>(i, cv::ConnectedComponentsTypes::CC_STAT_LEFT);
double cy = actUsedAreaCentroids.at<double>(i, 1) -
(double) actUsedAreaStats.at<int32_t>(i, cv::ConnectedComponentsTypes::CC_STAT_TOP);
//If an initial seed was backprojected from 3D to this component, the depth range of the current component must be similar
if (!initSeedInArea.empty()) {
int32_t minX = labelBB.x;
int32_t maxX = minX + labelBB.width;
int32_t minY = labelBB.y;
int32_t maxY = minY + labelBB.height;
vector<double> initDepths;
for (auto& j : initSeedInArea) {
if ((j.x >= minX) && (j.x < maxX) &&
(j.y >= minY) && (j.y < maxY)) {
if (actUsedAreaLabel.at<uint16_t>(j.y, j.x) == i) {
initDepths.push_back(
actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[j.z]].z);
}
}
}
if (!initDepths.empty()) {
if (initDepths.size() == 1) {
double tmp = getRandDoubleValRng(0.05, 0.5);
dmin_tmp = initDepths[0] - tmp * (dmax - dmin);
dmax_tmp = initDepths[0] + tmp * (dmax - dmin);
} else {
auto minMaxD = std::minmax_element(initDepths.begin(), initDepths.end());
double range1 = *minMaxD.second - *minMaxD.first;
if (range1 < 0.05 * (dmax - dmin)) {
double dmid_tmp = *minMaxD.first + range1 / 2.0;
double tmp = getRandDoubleValRng(0.05, 0.5);
dmin_tmp = dmid_tmp - tmp * (dmax - dmin);
dmax_tmp = dmid_tmp + tmp * (dmax - dmin);
} else {
dmin_tmp = *minMaxD.first - range1 / 2.0;
dmax_tmp = *minMaxD.second + range1 / 2.0;
}
}
dmin_tmp = std::max(dmin_tmp, dmin);
dmax_tmp = std::min(dmax_tmp, dmax);
drange = dmax_tmp - dmin_tmp;
CV_Assert(drange > 0);
}
}
double minVal = DBL_MAX, maxVal = -DBL_MAX;
int32_t lareaCnt = 0, lareaNCnt = 2 * labelBB.width;
for (int y = 0; y < labelBB.height; y++) {
for (int x = 0; x < labelBB.width; x++) {
if (laMat.at<uint16_t>(y, x) == i) {
if (dinSlice.at<unsigned char>(y, x) == 0) {
lareaNCnt--;
if ((lareaCnt == 0) && (lareaNCnt < 0)) {
lareaCnt = -1;
y = labelBB.height;
break;
}
continue;
}
lareaCnt++;
double val = getDepthFuncVal(funcPars[i], ((double) x - cx) * rXrSc, ((double) y - cy) * rYrSc);
doutSlice.at<double>(y, x) = val;
if (val > maxVal)
maxVal = val;
if (val < minVal)
minVal = val;
}
}
}
if (lareaCnt > 0) {
double ra = maxVal - minVal;
if(nearZero(ra)) ra = 1.0;
scale = drange / ra;
for (int y = 0; y < labelBB.height; y++) {
for (int x = 0; x < labelBB.width; x++) {
if (laMat.at<uint16_t>(y, x) == i) {
double val = doutSlice.at<double>(y, x);
val -= minVal;
val *= scale;
val += dmin_tmp;
doutSlice.at<double>(y, x) = val;
}
}
}
}
}
if (!singlePixelAreas.empty()) {
Mat dout_big;
const int extBord = 2;
const int mSi = extBord * 2 + 1;
Size mSi_ = Size(mSi, mSi);
cv::copyMakeBorder(dout_, dout_big, extBord, extBord, extBord, extBord, BORDER_CONSTANT, Scalar(0));
for (auto& i : singlePixelAreas) {
Mat doutBigSlice = dout_big(Rect(i, mSi_));
double dmsum = 0;
int nrN0 = 0;
for (int y = 0; y < mSi; ++y) {
for (int x = 0; x < mSi; ++x) {
if (!nearZero(doutBigSlice.at<double>(y, x))) {
nrN0++;
}
}
}
if (nrN0 == 0) {
dmsum = actDepthMid;
} else {
dmsum = sum(doutBigSlice)[0];
dmsum /= (double) nrN0;
}
dout_.at<double>(i) = dmsum;
}
}
/* Mat normalizedDepth;
cv::normalize(dout, normalizedDepth, 0.1, 1.0, cv::NORM_MINMAX, -1, dout > 0);
namedWindow("Normalized Static Obj Depth One Depth", WINDOW_AUTOSIZE);
imshow("Normalized Static Obj Depth One Depth", normalizedDepth);
waitKey(0);
destroyWindow("Normalized Static Obj Depth One Depth");*/
}
/*Calculates a depth value using the function
z = p1 * (p2 - x)^2 * e^(-x^2 - (y - p3)^2) - 10 * (x / p4 - x^p5 - y^p6) * e^(-x^2 - y^2) - p7 / 3 * e^(-(x + 1)^2 - y^2)
*/
inline double genStereoSequ::getDepthFuncVal(std::vector<double> &pars1, double x, double y) {
if(nearZero(pars1[3])) pars1[3] = 1.0;
double tmp = pars1[1] - x;
tmp *= tmp;
double z = pars1[0] * tmp;
tmp = y - pars1[2];
tmp *= -tmp;
tmp -= x * x;
z *= exp(tmp);
/*double tmp1[4];
tmp1[0] = x / pars1[3];
tmp1[1] = std::pow(x, pars1[4]);
tmp1[2] = std::pow(y, pars1[5]);
tmp1[3] = exp(-x * x - y * y);
z -= 10.0 * (tmp1[0] - tmp1[1] - tmp1[2]) * tmp1[3];*/
z -= 10.0 * (x / pars1[3] - std::pow(x, pars1[4]) - std::pow(y, pars1[5])) * exp(-x * x - y * y);
tmp = x + 1.0;
tmp *= -tmp;
z -= pars1[6] / 3.0 * exp(tmp - y * y);
return z;
}
/*Calculate random parameters for the function generating depth values
There are 7 random paramters p:
z = p1 * (p2 - x)^2 * e^(-x^2 - (y - p3)^2) - 10 * (x / p4 - x^p5 - y^p6) * e^(-x^2 - y^2) - p7 / 3 * e^(-(x + 1)^2 - y^2)
*/
void genStereoSequ::getRandDepthFuncPars(std::vector<std::vector<double>> &pars1, size_t n_pars) {
pars1 = std::vector<std::vector<double>>(n_pars, std::vector<double>(7, 0));
//p1:
std::uniform_real_distribution<double> distribution(0, 10.0);
for (size_t i = 0; i < n_pars; i++) {
pars1[i][0] = distribution(rand_gen);
}
//p2:
distribution = std::uniform_real_distribution<double>(0, 2.0);
for (size_t i = 0; i < n_pars; i++) {
pars1[i][1] = distribution(rand_gen);
}
//p3:
distribution = std::uniform_real_distribution<double>(0, 4.0);
for (size_t i = 0; i < n_pars; i++) {
pars1[i][2] = distribution(rand_gen);
}
//p4:
distribution = std::uniform_real_distribution<double>(0.5, 5.0);
for (size_t i = 0; i < n_pars; i++) {
pars1[i][3] = distribution(rand_gen);
}
//p5 & p6:
distribution = std::uniform_real_distribution<double>(2.0, 7.0);
for (size_t i = 0; i < n_pars; i++) {
pars1[i][4] = round(distribution(rand_gen));
pars1[i][5] = round(distribution(rand_gen));
}
//p7:
distribution = std::uniform_real_distribution<double>(1.0, 40.0);
for (size_t i = 0; i < n_pars; i++) {
pars1[i][6] = distribution(rand_gen);
}
}
/* Adds a few random depth pixels near a given position (no actual depth value, but a part of a mask indicating the depth range (near, mid, far)
unsigned char pixVal In: Value assigned to the random pixel positions
cv::Mat &imgD In/Out: Image holding all depth ranges where the new random depth pixels should be added
cv::Mat &imgSD In/Out: Image holding only one specific depth range where the new random depth pixels should be added
cv::Mat &mask In: Mask for imgD and imgSD (marks backprojected moving objects (with a 1))
cv::Point_<int32_t> &startpos In: Start position (excluding this single location) from where to start adding new depth pixels
cv::Point_<int32_t> &endpos Out: End position where the last depth pixel was set
int32_t &addArea In/Out: Adds the number of newly inserted pixels to the given number
int32_t &maxAreaReg In: Maximum number of specific depth pixels per image region (9x9)
cv::Size &siM1 In: Image size -1
cv::Point_<int32_t> &initSeed In: Initial position of the seed
cv::Rect &vROI In: ROI were it is actually allowed to add new pixels
size_t &nrAdds In/Out: Number of times this function was called for this depth area (including preceding calls to this function)
*/
bool genStereoSequ::addAdditionalDepth(unsigned char pixVal,
cv::Mat &imgD,
cv::Mat &imgSD,
const cv::Mat &mask,
const cv::Mat ®Mask,
cv::Point_<int32_t> &startpos,
cv::Point_<int32_t> &endpos,
int32_t &addArea,
int32_t &maxAreaReg,
cv::Size &siM1,
cv::Point_<int32_t> initSeed,
cv::Rect &vROI,
size_t &nrAdds,
unsigned char &usedDilate,
cv::InputOutputArray neighborRegMask,
unsigned char regIdx) {
const size_t max_iter = 10000;
const size_t midDilateCnt = 300;
Mat neighborRegMask_;
if (!neighborRegMask.empty()) {
neighborRegMask_ = neighborRegMask.getMat();
}
//get possible directions for expanding (max. 8 possibilities) by checking the masks
vector<int32_t> directions;
if ((nrAdds <= max_iter) && !usedDilate && ((nrAdds % midDilateCnt != 0) || (nrAdds < midDilateCnt))) {
if (!neighborRegMask_.empty()) {
directions = getPossibleDirections(startpos, mask, regMask, imgD, siM1, imgSD, true, neighborRegMask, regIdx);
}
else{
directions = getPossibleDirections(startpos, mask, regMask, imgD, siM1, imgSD, true);
}
}
if (directions.empty() || (nrAdds > max_iter) || usedDilate || ((nrAdds % midDilateCnt == 0) && (nrAdds >=
midDilateCnt)))//Dilate the label if no direction was found or there were already to many iterations
{
int strElmSi = 3, cnt = 0, maxCnt = 10, strElmSiAdd = 0, strElmSiDir[2] = {0, 0};
int32_t siAfterDil = addArea;
while (((siAfterDil - addArea) == 0) && (cnt < maxCnt)) {
cnt++;
Mat element;
int elSel = (int)(rand2() % 3);
strElmSiAdd = (int)(rand2() % INT_MAX) % strElmSi;
strElmSiDir[0] = (int)(rand2() % 2);
strElmSiDir[1] = (int)(rand2() % 2);
switch (elSel) {
case 0:
element = cv::getStructuringElement(MORPH_ELLIPSE, Size(strElmSi + strElmSiDir[0] * strElmSiAdd,
strElmSi + strElmSiDir[1] * strElmSiAdd));
break;
case 1:
element = cv::getStructuringElement(MORPH_RECT, Size(strElmSi + strElmSiDir[0] * strElmSiAdd,
strElmSi + strElmSiDir[1] * strElmSiAdd));
break;
case 2:
element = cv::getStructuringElement(MORPH_CROSS, Size(strElmSi + strElmSiDir[0] * strElmSiAdd,
strElmSi + strElmSiDir[1] * strElmSiAdd));
break;
default:
element = cv::getStructuringElement(MORPH_ELLIPSE, Size(strElmSi, strElmSi));
break;
}
strElmSi += 2;
Mat imgSDdilate;
Mat neighborRegMaskROI;
Mat newImgSDROI;
if (!neighborRegMask_.empty()) {
newImgSDROI = imgSD(vROI) & (neighborRegMask_(vROI) == regIdx);
dilate(newImgSDROI, imgSDdilate, element);
/*namedWindow("specific objLabels without neighbors", WINDOW_AUTOSIZE);
imshow("specific objLabels without neighbors", (newImgSDROI > 0));
namedWindow("specific objLabels with neighbors", WINDOW_AUTOSIZE);
imshow("specific objLabels with neighbors", (imgSD(vROI) > 0));*/
imgSDdilate &= (mask(vROI) == 0) & ((imgD(vROI) == 0) | newImgSDROI);
neighborRegMaskROI = ((imgSDdilate > 0) & Mat::ones(vROI.size(), CV_8UC1)) * regIdx;
/*namedWindow("specific objLabels without neighbors dilated and mask", WINDOW_AUTOSIZE);
imshow("specific objLabels without neighbors dilated and mask", (imgSDdilate > 0));*/
siAfterDil = (int32_t) cv::countNonZero(imgSDdilate);
// imgSDdilate |= imgSD(vROI);//Removed later on
/*namedWindow("specific objLabels with neighbors dilated and mask", WINDOW_AUTOSIZE);
imshow("specific objLabels with neighbors dilated and mask", (imgSDdilate > 0));
waitKey(0);
destroyWindow("specific objLabels without neighbors");
destroyWindow("specific objLabels with neighbors");
destroyWindow("specific objLabels without neighbors dilated and mask");
destroyWindow("specific objLabels with neighbors dilated and mask");*/
} else {
dilate(imgSD(vROI), imgSDdilate, element);
imgSDdilate &= (mask(vROI) == 0) & ((imgD(vROI) == 0) | (imgSD(vROI) > 0));
siAfterDil = (int32_t) cv::countNonZero(imgSDdilate);
}
/*static size_t visualizeMask = 0;
if (visualizeMask % 50 == 0) {
Mat colorMapImg;
// Apply the colormap:
applyColorMap(imgD * 20, colorMapImg, cv::COLORMAP_RAINBOW);
namedWindow("combined ObjLabels1", WINDOW_AUTOSIZE);
imshow("combined ObjLabels1", colorMapImg);
Mat dilImgTh;
cv::threshold(imgSDdilate, dilImgTh, 0, 255, 0);
namedWindow("Dilated1", WINDOW_AUTOSIZE);
imshow("Dilated1", dilImgTh);
Mat onlyDil = (imgSDdilate ^ imgSD(vROI)) * 20 + imgSD(vROI);
applyColorMap(onlyDil, colorMapImg, cv::COLORMAP_HOT);
namedWindow("Dilated", WINDOW_AUTOSIZE);
imshow("Dilated", onlyDil);
waitKey(0);
destroyWindow("combined ObjLabels1");
destroyWindow("Dilated");
destroyWindow("Dilated1");
}
visualizeMask++;*/
if (siAfterDil >= maxAreaReg) {
if (siAfterDil > maxAreaReg) {
int32_t diff = siAfterDil - maxAreaReg;
if (!neighborRegMask_.empty()) {
imgSDdilate ^= newImgSDROI;
}else{
imgSDdilate ^= imgSD(vROI);
}
removeNrFilledPixels(element.size(), vROI.size(), imgSDdilate, diff);
}
if (!neighborRegMask_.empty()) {
neighborRegMaskROI = ((imgSDdilate > 0) & Mat::ones(vROI.size(), CV_8UC1)) * regIdx;
neighborRegMask_(vROI) |= neighborRegMaskROI;
}
imgSD(vROI) |= imgSDdilate;
imgSDdilate *= pixVal;
imgD(vROI) |= imgSDdilate;
addArea = maxAreaReg;
nrAdds++;
usedDilate = 1;
return false;
} else if ((siAfterDil - addArea) > 0) {
if (!neighborRegMask_.empty()) {
neighborRegMask_(vROI) |= neighborRegMaskROI;
imgSDdilate |= imgSD(vROI);
}
imgSDdilate.copyTo(imgSD(vROI));
imgD(vROI) &= (imgSDdilate == 0);
imgSDdilate *= pixVal;
imgD(vROI) |= imgSDdilate;
if ((directions.empty() && ((nrAdds % midDilateCnt != 0) || (nrAdds < midDilateCnt))) ||
(nrAdds > max_iter)) {
usedDilate = 1;
}
nrAdds++;
addArea = siAfterDil;
return true;
} else if ((siAfterDil - addArea) < 0) {
throw SequenceException(
"Generated depth area is smaller after dilation (and using a mask afterwards) than before!");
}
}
if (cnt >= maxCnt) {
return false;
}
} else {
//Get a random direction where to add a pixel
int diri = (int)(rand2() % directions.size());
endpos = startpos;
nextPosition(endpos, directions[diri]);
//Set the pixel
/*if (imgD.at<unsigned char>(endpos) != 0) {
cout << "Found" << endl;
}*/
imgD.at<unsigned char>(endpos) = pixVal;
/*if (imgSD.at<unsigned char>(endpos) != 0) {
cout << "Found" << endl;
}*/
imgSD.at<unsigned char>(endpos) = 1;
if (!neighborRegMask_.empty()) {
neighborRegMask_.at<unsigned char>(endpos) = regIdx;
}
addArea++;
nrAdds++;
if (addArea >= maxAreaReg) {
return false;
}
//Add additional pixels in the local neighbourhood (other possible directions) of the actual added pixel
//and prevent adding new pixels in similar directions compared to the added one
vector<int32_t> extension;
if (!neighborRegMask_.empty()) {
extension = getPossibleDirections(endpos, mask, regMask, imgD, siM1, imgSD, false, neighborRegMask, regIdx);
}else{
extension = getPossibleDirections(endpos, mask, regMask, imgD, siM1, imgSD, false);
}
if (extension.size() >
1)//Check if we can add addition pixels without blocking the way for the next iteration
{
//Prevent adding additional pixels to the main direction and its immediate neighbor directions
int32_t noExt[3];
noExt[0] = (directions[diri] + 1) % 8;
noExt[1] = directions[diri];
noExt[2] = (directions[diri] + 7) % 8;
for (auto itr = extension.rbegin(); itr != extension.rend(); itr++) {
if ((*itr == noExt[0]) ||
(*itr == noExt[1]) ||
(*itr == noExt[2])) {
extension.erase(std::next(itr).base());
}
}
if (extension.size() > 1) {
//Choose a random number of additional pixels to add (based on possible directions of expansion)
int addsi = (int)(rand2() % (extension.size() + 1));
if (addsi) {
if ((addsi + addArea) > maxAreaReg) {
addsi = maxAreaReg - addArea;
}
const int beginExt = (int)(rand2() % extension.size());
for (int i = 0; i < addsi; i++) {
cv::Point_<int32_t> singleExt = endpos;
const int pos = (beginExt + i) % (int) extension.size();
nextPosition(singleExt, extension[pos]);
//Set the pixel
/*if (imgD.at<unsigned char>(singleExt) != 0) {
cout << "Found" << endl;
}*/
imgD.at<unsigned char>(singleExt) = pixVal;
/*if (imgSD.at<unsigned char>(singleExt) != 0) {
cout << "Found" << endl;
}*/
imgSD.at<unsigned char>(singleExt) = 1;
if (!neighborRegMask_.empty()) {
neighborRegMask_.at<unsigned char>(singleExt) = regIdx;
}
addArea++;
}
}
if (addArea >= maxAreaReg) {
return false;
}
}
}
}
return true;
}
//Get valid directions to expand the depth area given a start position
std::vector<int32_t>
genStereoSequ::getPossibleDirections(cv::Point_<int32_t> &startpos,
const cv::Mat &mask,
const cv::Mat ®Mask,
const cv::Mat &imgD,
const cv::Size &siM1,
const cv::Mat &imgSD,
bool escArea,
cv::InputArray neighborRegMask,
unsigned char regIdx) {
static int maxFixDirChange = 8;
int fixDirChange = 0;
Mat directions;
unsigned char atBorderX = 0, atBorderY = 0;
Mat directions_dist;
vector<int32_t> dirs;
int32_t fixedDir = 0;
bool dirFixed = false;
bool inOwnArea = false;
Mat neighborRegMaskLoc;
if (!neighborRegMask.empty()) {
neighborRegMaskLoc = neighborRegMask.getMat();
}
do {
directions = Mat::ones(3, 3, CV_8UC1);
directions.at<unsigned char>(1,1) = 0;
atBorderX = 0;
atBorderY = 0;
if (startpos.x <= 0) {
directions.col(0) = Mat::zeros(3, 1, CV_8UC1);
atBorderX = 0x1;
}
if (startpos.x >= siM1.width) {
directions.col(2) = Mat::zeros(3, 1, CV_8UC1);
atBorderX = 0x2;
}
if (startpos.y <= 0) {
directions.row(0) = Mat::zeros(1, 3, CV_8UC1);
atBorderY = 0x1;
}
if (startpos.y >= siM1.height) {
directions.row(2) = Mat::zeros(1, 3, CV_8UC1);
atBorderY = 0x2;
}
Range irx, iry, drx, dry;
if (atBorderX) {
const unsigned char atBorderXn = ~atBorderX;
const unsigned char v1 = (atBorderXn &
(unsigned char)0x1);//results in 0x0 (for atBorderX=0x1) or 0x1 (for atBorderX=0x2)
const unsigned char v2 = (atBorderXn & (unsigned char)0x2) + ((atBorderX & (unsigned char)0x2)
>> 1);//results in 0x2 (for atBorderX=0x1) or 0x1 (for atBorderX=0x2)
irx = Range(startpos.x - (int32_t) v1, startpos.x + (int32_t) v2);
drx = Range((int32_t) (~v1 & 0x1), 1 + (int32_t) v2);
if (atBorderY) {
const unsigned char atBorderYn = ~atBorderY;
const unsigned char v3 = (atBorderYn & 0x1);
const unsigned char v4 = (atBorderYn & 0x2) + ((atBorderY & 0x2) >> 1);
iry = Range(startpos.y - (int32_t) v3, startpos.y + (int32_t) v4);
dry = Range((int32_t) (~v3 & 0x1), 1 + (int32_t) v4);
} else {
iry = Range(startpos.y - 1, startpos.y + 2);
dry = Range::all();
}
} else if (atBorderY) {
unsigned char atBorderYn = ~atBorderY;
const unsigned char v3 = (atBorderYn & 0x1);
const unsigned char v4 = (atBorderYn & 0x2) + ((atBorderY & 0x2) >> 1);
iry = Range(startpos.y - (int32_t) v3, startpos.y + (int32_t) v4);
irx = Range(startpos.x - 1, startpos.x + 2);
drx = Range::all();
dry = Range((int32_t) (~v3 & 0x1), 1 + (int32_t) v4);
} else {
irx = Range(startpos.x - 1, startpos.x + 2);
iry = Range(startpos.y - 1, startpos.y + 2);
drx = Range::all();
dry = Range::all();
}
directions.copyTo(directions_dist);
if(neighborRegMaskLoc.empty()) {
directions(dry, drx) &= (imgD(iry, irx) == 0) & (mask(iry, irx) == 0) & (regMask(iry, irx) > 0);
}
else{
directions(dry, drx) &= (imgD(iry, irx) == 0) &
(mask(iry, irx) == 0) &
(regMask(iry, irx) > 0) &
(neighborRegMaskLoc(iry, irx) == 0);
}
if ((sum(directions)[0] == 0) && escArea) {
if(neighborRegMaskLoc.empty()) {
directions_dist(dry, drx) &=
((imgD(iry, irx) == 0) | imgSD(iry, irx)) & (mask(iry, irx) == 0) & (regMask(iry, irx) > 0);
}
else{
directions_dist(dry, drx) &=
((imgD(iry, irx) == 0) | imgSD(iry, irx)) &
(mask(iry, irx) == 0) &
(regMask(iry, irx) > 0) &
((neighborRegMaskLoc(iry, irx) == 0) | (neighborRegMaskLoc(iry, irx) == regIdx));
}
if (sum(directions_dist)[0] != 0) {
if (!dirFixed) {
directions_dist.copyTo(directions);
inOwnArea = true;
} else {
cv::Point_<int32_t> localPos = cv::Point_<int32_t>(1, 1);
nextPosition(localPos, fixedDir);
if (directions_dist.at<unsigned char>(localPos) == 0) {
if (fixDirChange > maxFixDirChange) {
inOwnArea = false;
dirFixed = false;
directions = Mat::zeros(3, 3, CV_8UC1);
} else {
inOwnArea = true;
dirFixed = false;
directions_dist.copyTo(directions);
}
fixDirChange++;
}
}
} else {
inOwnArea = false;
dirFixed = false;
directions = Mat::zeros(3, 3, CV_8UC1);
}
} else {
dirFixed = false;
inOwnArea = false;
}
if (!dirFixed) {
dirs.clear();
for (int32_t i = 0; i < 9; i++) {
if (directions.at<bool>(i)) {
switch (i) {
case 0:
dirs.push_back(0);
break;
case 1:
dirs.push_back(1);
break;
case 2:
dirs.push_back(2);
break;
case 3:
dirs.push_back(7);
break;
case 5:
dirs.push_back(3);
break;
case 6:
dirs.push_back(6);
break;
case 7:
dirs.push_back(5);
break;
case 8:
dirs.push_back(4);
break;
default:
break;
}
}
}
}
if (inOwnArea && !dirs.empty()) {
if (!dirFixed) {
if (dirs.size() == 1) {
fixedDir = dirs[0];
} else {
//Get a random direction where to go next
fixedDir = dirs[rand2() % dirs.size()];
}
dirFixed = true;
}
nextPosition(startpos, fixedDir);
}
} while (inOwnArea);
return dirs;
}
void genStereoSequ::nextPosition(cv::Point_<int32_t> &position, int32_t direction) {
switch (direction) {
case 0://direction left up
position.x--;
position.y--;
break;
case 1://direction up
position.y--;
break;
case 2://direction right up
position.x++;
position.y--;
break;
case 3://direction right
position.x++;
break;
case 4://direction right down
position.x++;
position.y++;
break;
case 5://direction down
position.y++;
break;
case 6://direction left down
position.x--;
position.y++;
break;
case 7://direction left
position.x--;
break;
default:
break;
}
}
//Generates correspondences and 3D points in the camera coordinate system (including false matches) from static scene elements
void genStereoSequ::getKeypoints() {
int32_t kSi = csurr.rows;
int32_t posadd = (kSi - 1) / 2;
//Mark used areas (by correspondences, TN, and moving objects) in the second image
Mat cImg2 = Mat::zeros(imgSize.height + kSi - 1, imgSize.width + kSi - 1, CV_8UC1);
vector<size_t> delListCorrs, delList3D;
for (int i = 0; i < actCorrsImg2TPFromLast.cols; i++) {
Point_<int32_t> pt((int32_t) round(actCorrsImg2TPFromLast.at<double>(0, i)),
(int32_t) round(actCorrsImg2TPFromLast.at<double>(1, i)));
Mat s_tmp = cImg2(Rect(pt, Size(kSi, kSi)));
if (s_tmp.at<unsigned char>(posadd, posadd) > 0) {
delListCorrs.push_back((size_t)i);
delList3D.push_back(actCorrsImg12TPFromLast_Idx[i]);
continue;
}
s_tmp.at<unsigned char>(posadd, posadd) = 1;
}
//Delete correspondences and 3D points that were to near to each other in the 2nd image
int TPfromLastRedu = 0;
if (!delListCorrs.empty()) {
// if(!checkCorr3DConsistency()){
// throw SequenceException("Correspondences are not projections of 3D points!");
// }
TPfromLastRedu = (int)delListCorrs.size();
sort(delList3D.begin(), delList3D.end(),
[](size_t first, size_t second) { return first < second; });//Ascending order
if (!actCorrsImg1TNFromLast_Idx.empty())//Adapt the indices for TN (single keypoints without a match)
{
adaptIndicesNoDel(actCorrsImg1TNFromLast_Idx, delList3D);
}
if (!actCorrsImg2TNFromLast_Idx.empty())//Adapt the indices for TN (single keypoints without a match)
{
adaptIndicesNoDel(actCorrsImg2TNFromLast_Idx, delList3D);
}
adaptIndicesNoDel(actCorrsImg12TPFromLast_Idx, delList3D);
deleteVecEntriesbyIdx(actImgPointCloudFromLast, delList3D);
deleteVecEntriesbyIdx(actCorrsImg12TPFromLast_IdxWorld, delList3D);
sort(delListCorrs.begin(), delListCorrs.end(), [](size_t first, size_t second) { return first < second; });
vector<size_t> delListCorrs_tmp = delListCorrs;
TPfromLastRedu -= deletedepthCatsByIdx(seedsNearFromLast, delListCorrs_tmp, actCorrsImg1TPFromLast);
if(TPfromLastRedu > 0){
TPfromLastRedu -= deletedepthCatsByIdx(seedsMidFromLast, delListCorrs_tmp, actCorrsImg1TPFromLast);
}
if(TPfromLastRedu > 0){
TPfromLastRedu -= deletedepthCatsByIdx(seedsFarFromLast, delListCorrs_tmp, actCorrsImg1TPFromLast);
}
if(TPfromLastRedu > 0){
cout << "Keypoints from last frames which should be deleted were not found!" << endl;
}
deleteVecEntriesbyIdx(actCorrsImg12TPFromLast_Idx, delListCorrs);
deleteMatEntriesByIdx(actCorrsImg1TPFromLast, delListCorrs, false);
deleteMatEntriesByIdx(actCorrsImg2TPFromLast, delListCorrs, false);
// if(!checkCorr3DConsistency()){
// throw SequenceException("Correspondences are not projections of 3D points!");
// }
}
int nrBPTN2 = actCorrsImg2TNFromLast.cols;
int nrBPTN2cnt = 0;
vector<size_t> delListTNlast;
for (int i = 0; i < nrBPTN2; i++) {
Point_<int32_t> pt((int32_t) round(actCorrsImg2TNFromLast.at<double>(0, i)),
(int32_t) round(actCorrsImg2TNFromLast.at<double>(1, i)));
Mat s_tmp = cImg2(Rect(pt, Size(kSi, kSi)));
if (s_tmp.at<unsigned char>(posadd, posadd) > 0) {
delListTNlast.push_back((size_t)i);
nrBPTN2--;
continue;
}
s_tmp.at<unsigned char>(posadd, posadd) = 1;
}
cImg2(Rect(Point(posadd, posadd), imgSize)) |= movObjMask2All;
if (!delListTNlast.empty()) {
sort(delListTNlast.begin(), delListTNlast.end(), [](size_t first, size_t second) { return first < second; });
deleteVecEntriesbyIdx(actCorrsImg2TNFromLast_Idx, delListTNlast);
deleteMatEntriesByIdx(actCorrsImg2TNFromLast, delListTNlast, false);
}
//Get regions of backprojected TN in first image and mark their positions; add true negatives from backprojection to the new outlier data
vector<vector<vector<Point2d>>> x1pTN(3, vector<vector<Point2d>>(3));
Size rSl(imgSize.width / 3, imgSize.height / 3);
delListTNlast.clear();
for (int i = 0; i < actCorrsImg1TNFromLast.cols; i++) {
Point_<int32_t> pt((int32_t) round(actCorrsImg1TNFromLast.at<double>(0, i)),
(int32_t) round(actCorrsImg1TNFromLast.at<double>(1, i)));
Mat s_tmp = corrsIMG(Rect(pt, Size(kSi, kSi)));
if (s_tmp.at<unsigned char>(posadd, posadd) > 0) {
delListTNlast.push_back((size_t)i);
continue;
}
s_tmp += csurr;
// csurr.copyTo(s_tmp);
int yreg_idx = pt.y / rSl.height;
yreg_idx = (yreg_idx > 2) ? 2 : yreg_idx;
int xreg_idx = pt.x / rSl.width;
xreg_idx = (xreg_idx > 2) ? 2 : xreg_idx;
if(pt.y < regROIs[yreg_idx][xreg_idx].y){
yreg_idx--;
}
else if(pt.y >= (regROIs[yreg_idx][xreg_idx].y + regROIs[yreg_idx][xreg_idx].height)){
yreg_idx++;
}
if(pt.x < regROIs[yreg_idx][xreg_idx].x){
xreg_idx--;
}
else if(pt.x >= (regROIs[yreg_idx][xreg_idx].x + regROIs[yreg_idx][xreg_idx].width)){
xreg_idx++;
}
if((int)x1pTN[yreg_idx][xreg_idx].size() >= nrTrueNegRegs[actFrameCnt].at<int32_t>(yreg_idx, xreg_idx)){
delListTNlast.push_back(i);
s_tmp -= csurr;
continue;
}
x1pTN[yreg_idx][xreg_idx].push_back(Point2d(actCorrsImg1TNFromLast.at<double>(0, i), actCorrsImg1TNFromLast.at<double>(1, i)));
}
if (!delListTNlast.empty()) {
sort(delListTNlast.begin(), delListTNlast.end(), [](size_t first, size_t second) { return first < second; });
deleteVecEntriesbyIdx(actCorrsImg1TNFromLast_Idx, delListTNlast);
deleteMatEntriesByIdx(actCorrsImg1TNFromLast, delListTNlast, false);
}
//For visualization
int dispit = 0;
const int dispit_interval = 50;
vector<vector<vector<Point_<int32_t>>>> corrsAllD(3, vector<vector<Point_<int32_t>>>(3));
vector<vector<vector<Point2d>>> corrsAllD2(3, vector<vector<Point2d>>(3));
Point_<int32_t> pt;
Point2d pt2;
Point3d pCam;
vector<vector<vector<Point3d>>> p3DTPnew(3, vector<vector<Point3d>>(3));
vector<vector<vector<Point2d>>> x1TN(3, vector<vector<Point2d>>(3));
vector<vector<vector<Point2d>>> x2TN(3, vector<vector<Point2d>>(3));
vector<vector<vector<double>>> x2TNdistCorr(3, vector<vector<double>>(3));
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (movObjHasArea[y][x])
continue;
auto nrNear = (int32_t) floor(
depthsPerRegion[actCorrsPRIdx][y][x].near *
(double) nrTruePosRegs[actFrameCnt].at<int32_t>(y, x));
auto nrFar = (int32_t) floor(
depthsPerRegion[actCorrsPRIdx][y][x].far *
(double) nrTruePosRegs[actFrameCnt].at<int32_t>(y, x));
int32_t nrMid = nrTruePosRegs[actFrameCnt].at<int32_t>(y, x) - nrNear - nrFar;
CV_Assert(nrMid >= 0);
int32_t nrTN = nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x) - (int32_t) x1pTN[y][x].size();
if(nrTN < 0){
std::vector<size_t> delListCorrsTN;
nrTN += deletedepthCatsByNr(x1pTN[y][x], -nrTN, actCorrsImg1TNFromLast, delListCorrsTN);
if(!delListCorrsTN.empty()){
deleteVecEntriesbyIdx(actCorrsImg1TNFromLast_Idx, delListCorrsTN);
deleteMatEntriesByIdx(actCorrsImg1TNFromLast, delListCorrsTN, false);
}
}
int32_t maxSelect = max(3 * nrTruePosRegs[actFrameCnt].at<int32_t>(y, x), 1000);
int32_t maxSelect2 = 50;
int32_t maxSelect3 = 50;
int32_t maxSelect4 = 50;
std::uniform_int_distribution<int32_t> distributionX(regROIs[y][x].x,
regROIs[y][x].x + regROIs[y][x].width - 1);
std::uniform_int_distribution<int32_t> distributionY(regROIs[y][x].y,
regROIs[y][x].y + regROIs[y][x].height - 1);
vector<Point_<int32_t>> corrsNearR, corrsMidR, corrsFarR;
vector<Point2d> corrsNearR2, corrsMidR2, corrsFarR2;
//vector<Point3d> p3DTPnewR, p3DTNnewR;
vector<Point3d> p3DTPnewRNear, p3DTPnewRMid, p3DTPnewRFar;
//vector<Point2d> x1TNR;
corrsNearR.reserve((size_t)nrNear);
corrsMidR.reserve((size_t)nrMid);
corrsFarR.reserve((size_t)nrFar);
p3DTPnew[y][x].reserve((size_t)(nrNear + nrMid + nrFar));
corrsAllD[y][x].reserve((size_t)(nrNear + nrMid + nrFar));
p3DTPnewRNear.reserve((size_t)nrNear);
p3DTPnewRMid.reserve((size_t)nrNear);
p3DTPnewRFar.reserve((size_t)nrFar);
x1TN[y][x].reserve((size_t)nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x));
x2TN[y][x].reserve((size_t)nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x));
x2TNdistCorr[y][x].reserve((size_t)nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x));
//Ckeck for backprojected correspondences
nrNear -= (int32_t) seedsNearFromLast[y][x].size();
nrFar -= (int32_t) seedsFarFromLast[y][x].size();
nrMid -= (int32_t) seedsMidFromLast[y][x].size();
if (nrNear < 0)
nrFar += nrNear;
if (nrFar < 0)
nrMid += nrFar;
//Delete backprojected correspondences if there are too much of them
if(nrMid < 0){
int nrToDel = -1 * (int)nrMid;
nrToDel -= deleteBackProjTPByDepth(seedsMidFromLast[y][x], nrToDel);
nrMid = 0;
if(nrToDel > 0){
nrToDel -= deleteBackProjTPByDepth(seedsFarFromLast[y][x], nrToDel);
nrFar = 0;
if(nrToDel > 0){
nrToDel -= deleteBackProjTPByDepth(seedsNearFromLast[y][x], nrToDel);
nrNear = 0;
}
}
CV_Assert(nrToDel == 0);
}
if(nrNear < 0) nrNear = 0;
if(nrFar < 0) nrFar = 0;
int32_t nrNMF = nrNear + nrMid + nrFar;
while (((nrNear > 0) || (nrFar > 0) || (nrMid > 0)) && (maxSelect2 > 0) && (maxSelect3 > 0) &&
(maxSelect4 > 0) && (nrNMF > 0)) {
pt.x = distributionX(rand_gen);
pt.y = distributionY(rand_gen);
if (depthAreaMap.at<unsigned char>(pt) == 1) {
maxSelect--;
if ((nrNear <= 0) && (maxSelect >= 0)) continue;
//Check if coordinate is too near to existing keypoint
Mat s_tmp = corrsIMG(Rect(pt, Size(kSi, kSi)));
if ((s_tmp.at<unsigned char>(posadd, posadd) > 0) ||
(combMovObjLabelsAll.at<unsigned char>(pt) > 0)) {
maxSelect++;
maxSelect2--;
continue;
}
maxSelect2 = 50;
//Check if it is also an inlier in the right image
bool isInl = checkLKPInlier(pt, pt2, pCam, depthMap);
if (isInl) {
Mat s_tmp1 = cImg2(Rect((int) round(pt2.x), (int) round(pt2.y), kSi, kSi));
if (s_tmp1.at<unsigned char>(posadd, posadd) > 0) {
maxSelect++;
maxSelect4--;
continue;
}
s_tmp1.at<unsigned char>(posadd,
posadd) = 1;//The minimum distance between keypoints in the second image is fixed to 1 for new correspondences
maxSelect4 = 50;
}
s_tmp += csurr;
if (!isInl) {
if (nrTN > 0) {
x1TN[y][x].push_back(Point2d((double) pt.x, (double) pt.y));
nrTN--;
} else {
maxSelect++;
maxSelect3--;
s_tmp -= csurr;
}
continue;
}
maxSelect3 = 50;
nrNear--;
nrNMF--;
corrsNearR.push_back(pt);
corrsNearR2.push_back(pt2);
p3DTPnewRNear.push_back(pCam);
} else if (depthAreaMap.at<unsigned char>(pt) == 2) {
maxSelect--;
if ((nrMid <= 0) && (maxSelect >= 0)) continue;
//Check if coordinate is too near to existing keypoint
Mat s_tmp = corrsIMG(Rect(pt, Size(kSi, kSi)));
if ((s_tmp.at<unsigned char>(posadd, posadd) > 0) ||
(combMovObjLabelsAll.at<unsigned char>(pt) > 0)) {
maxSelect++;
maxSelect2--;
continue;
}
maxSelect2 = 50;
//Check if it is also an inlier in the right image
bool isInl = checkLKPInlier(pt, pt2, pCam, depthMap);
if (isInl) {
Mat s_tmp1 = cImg2(Rect((int) round(pt2.x), (int) round(pt2.y), kSi, kSi));
if (s_tmp1.at<unsigned char>(posadd, posadd) > 0) {
maxSelect++;
maxSelect4--;
continue;
}
s_tmp1.at<unsigned char>(posadd,
posadd) = 1;//The minimum distance between keypoints in the second image is fixed to 1 for new correspondences
maxSelect4 = 50;
}
s_tmp += csurr;
if (!isInl) {
if (nrTN > 0) {
x1TN[y][x].push_back(Point2d((double) pt.x, (double) pt.y));
nrTN--;
} else {
maxSelect++;
maxSelect3--;
s_tmp -= csurr;
}
continue;
}
maxSelect3 = 50;
nrMid--;
nrNMF--;
corrsMidR.push_back(pt);
corrsMidR2.push_back(pt2);
p3DTPnewRMid.push_back(pCam);
} else if (depthAreaMap.at<unsigned char>(pt) == 3) {
maxSelect--;
if ((nrFar <= 0) && (maxSelect >= 0)) continue;
//Check if coordinate is too near to existing keypoint
Mat s_tmp = corrsIMG(Rect(pt, Size(kSi, kSi)));
if ((s_tmp.at<unsigned char>(posadd, posadd) > 0) ||
(combMovObjLabelsAll.at<unsigned char>(pt) > 0)) {
maxSelect++;
maxSelect2--;
continue;
}
maxSelect2 = 50;
//Check if it is also an inlier in the right image
bool isInl = checkLKPInlier(pt, pt2, pCam, depthMap);
if (isInl) {
Mat s_tmp1 = cImg2(Rect((int) round(pt2.x), (int) round(pt2.y), kSi, kSi));
if (s_tmp1.at<unsigned char>(posadd, posadd) > 0) {
maxSelect++;
maxSelect4--;
continue;
}
s_tmp1.at<unsigned char>(posadd,
posadd) = 1;//The minimum distance between keypoints in the second image is fixed to 1 for new correspondences
maxSelect4 = 50;
}
s_tmp += csurr;
if (!isInl) {
if (nrTN > 0) {
x1TN[y][x].push_back(Point2d((double) pt.x, (double) pt.y));
nrTN--;
} else {
maxSelect++;
maxSelect3--;
s_tmp -= csurr;
}
continue;
}
maxSelect3 = 50;
nrFar--;
nrNMF--;
corrsFarR.push_back(pt);
corrsFarR2.push_back(pt2);
p3DTPnewRFar.push_back(pCam);
} else {
cout << "Depth area not defined! This should not happen!" << endl;
}
//Visualize the masks
if (verbose & SHOW_STATIC_OBJ_CORRS_GEN) {
if (dispit % dispit_interval == 0) {
if(!writeIntermediateImg((corrsIMG > 0), "Static_Corrs_mask_img1_step_" + std::to_string(dispit)) ||
!writeIntermediateImg((cImg2 > 0), "Static_Corrs_mask_img2_step_" + std::to_string(dispit))){
namedWindow("Static Corrs mask img1", WINDOW_AUTOSIZE);
imshow("Static Corrs mask img1", (corrsIMG > 0));
namedWindow("Static Corrs mask img2", WINDOW_AUTOSIZE);
imshow("Static Corrs mask img2", (cImg2 > 0));
waitKey(0);
destroyWindow("Static Corrs mask img1");
destroyWindow("Static Corrs mask img2");
}
}
dispit++;
}
}
size_t corrsNotVisible = x1TN[y][x].size();
size_t foundTPCorrs = corrsNearR.size() + corrsMidR.size() + corrsFarR.size();
//Copy 3D points and correspondences
if (!p3DTPnewRNear.empty()) {
//std::copy(p3DTPnewRNear.begin(), p3DTPnewRNear.end(), p3DTPnew[y][x].end());
p3DTPnew[y][x].insert(p3DTPnew[y][x].end(), p3DTPnewRNear.begin(), p3DTPnewRNear.end());
}
if (!p3DTPnewRMid.empty()) {
//std::copy(p3DTPnewRMid.begin(), p3DTPnewRMid.end(), p3DTPnew[y][x].end());
p3DTPnew[y][x].insert(p3DTPnew[y][x].end(), p3DTPnewRMid.begin(), p3DTPnewRMid.end());
}
if (!p3DTPnewRFar.empty()) {
//std::copy(p3DTPnewRFar.begin(), p3DTPnewRFar.end(), p3DTPnew[y][x].end());
p3DTPnew[y][x].insert(p3DTPnew[y][x].end(), p3DTPnewRFar.begin(), p3DTPnewRFar.end());
}
if (!corrsNearR.empty()) {
//std::copy(corrsNearR.begin(), corrsNearR.end(), corrsAllD[y][x].end());
corrsAllD[y][x].insert(corrsAllD[y][x].end(), corrsNearR.begin(), corrsNearR.end());
}
if (!corrsMidR.empty()) {
//std::copy(corrsMidR.begin(), corrsMidR.end(), corrsAllD[y][x].end());
corrsAllD[y][x].insert(corrsAllD[y][x].end(), corrsMidR.begin(), corrsMidR.end());
}
if (!corrsFarR.empty()) {
//std::copy(corrsFarR.begin(), corrsFarR.end(), corrsAllD[y][x].end());
corrsAllD[y][x].insert(corrsAllD[y][x].end(), corrsFarR.begin(), corrsFarR.end());
}
if (!corrsNearR2.empty()) {
//std::copy(corrsNearR2.begin(), corrsNearR2.end(), corrsAllD2[y][x].end());
corrsAllD2[y][x].insert(corrsAllD2[y][x].end(), corrsNearR2.begin(), corrsNearR2.end());
}
if (!corrsMidR2.empty()) {
//std::copy(corrsMidR2.begin(), corrsMidR2.end(), corrsAllD2[y][x].end());
corrsAllD2[y][x].insert(corrsAllD2[y][x].end(), corrsMidR2.begin(), corrsMidR2.end());
}
if (!corrsFarR2.empty()) {
//std::copy(corrsFarR2.begin(), corrsFarR2.end(), corrsAllD2[y][x].end());
corrsAllD2[y][x].insert(corrsAllD2[y][x].end(), corrsFarR2.begin(), corrsFarR2.end());
}
//Add backprojected TN to the found ones
if(!x1pTN[y][x].empty()){
x1TN[y][x].insert(x1TN[y][x].end(), x1pTN[y][x].begin(), x1pTN[y][x].end());
}
//Generate mask for visualization before adding keypoints
Mat dispMask;
if ((verbose & SHOW_STATIC_OBJ_CORRS_GEN) && !x1TN[y][x].empty()) {
dispMask = (cImg2 > 0);
}
//Select for true negatives in image 1 (already generated ones) true negatives in image 2
size_t selTN2 = 0;
if (nrBPTN2cnt < nrBPTN2)//First take backprojected TN from the second image
{
for (size_t i = 0; i < x1TN[y][x].size(); i++) {
pt2.x = actCorrsImg2TNFromLast.at<double>(0, nrBPTN2cnt);
pt2.y = actCorrsImg2TNFromLast.at<double>(1, nrBPTN2cnt);
x2TN[y][x].push_back(pt2);
x2TNdistCorr[y][x].push_back(fakeDistTNCorrespondences);
nrBPTN2cnt++;
selTN2++;
if (nrBPTN2cnt >= nrBPTN2)
break;
}
}
std::uniform_int_distribution<int32_t> distributionX2(0, imgSize.width - 1);
std::uniform_int_distribution<int32_t> distributionY2(0, imgSize.height - 1);
if (selTN2 < x1TN[y][x].size())//Select the rest randomly
{
for (size_t i = selTN2; i < x1TN[y][x].size(); i++) {
int max_try = 10;
while (max_try > 0) {
pt.x = distributionX2(rand_gen);
pt.y = distributionY2(rand_gen);
Mat s_tmp = cImg2(Rect(pt, Size(kSi, kSi)));
if (s_tmp.at<unsigned char>(posadd, posadd) > 0) {
max_try--;
continue;
}
// csurr.copyTo(s_tmp);
s_tmp.at<unsigned char>(posadd, posadd) = 1;
x2TN[y][x].push_back(Point2d((double) pt.x, (double) pt.y));
x2TNdistCorr[y][x].push_back(fakeDistTNCorrespondences);
break;
}
}
while (x1TN[y][x].size() > x2TN[y][x].size()) {
Mat s_tmp = corrsIMG(Rect(Point_<int32_t>((int32_t) round(x1TN[y][x].back().x),
(int32_t) round(x1TN[y][x].back().y)), Size(kSi, kSi)));
s_tmp -= csurr;
x1TN[y][x].pop_back();
nrTN++;
}
}
//Visualize the mask afterwards
if ((verbose & SHOW_STATIC_OBJ_CORRS_GEN) && !x1TN[y][x].empty()) {
if (!x2TN[y][x].empty()) {
Mat dispMask2 = (cImg2 > 0);
vector<Mat> channels;
Mat b = Mat::zeros(dispMask2.size(), CV_8UC1);
channels.push_back(b);
channels.push_back(dispMask);
channels.push_back(dispMask2);
Mat img3c;
merge(channels, img3c);
if(!writeIntermediateImg(img3c, "Static_rand_TN_corrs_mask_img2")){
namedWindow("Static rand TN Corrs mask img2", WINDOW_AUTOSIZE);
imshow("Static rand TN Corrs mask img2", img3c);
waitKey(0);
destroyWindow("Static rand TN Corrs mask img2");
}
}
}
//Generate random TN in image 1
if ((nrTN > 0) && (nrBPTN2cnt < nrBPTN2))//Take backprojected TN from the second image if available
{
//Generate mask for visualization before adding keypoints
if (verbose & SHOW_STATIC_OBJ_CORRS_GEN) {
dispMask = (corrsIMG > 0);
}
int32_t nrTN_tmp = nrTN;
for (int32_t i = 0; i < nrTN_tmp; i++) {
int max_try = 10;
while (max_try > 0) {
pt.x = distributionX(rand_gen);
pt.y = distributionY(rand_gen);
Mat s_tmp = corrsIMG(Rect(pt, Size(kSi, kSi)));
if ((s_tmp.at<unsigned char>(posadd, posadd) > 0) ||
(combMovObjLabelsAll.at<unsigned char>(pt) > 0)) {
max_try--;
continue;
}
csurr.copyTo(s_tmp);
x1TN[y][x].push_back(Point2d((double) pt.x, (double) pt.y));
pt2.x = actCorrsImg2TNFromLast.at<double>(0, nrBPTN2cnt);
pt2.y = actCorrsImg2TNFromLast.at<double>(1, nrBPTN2cnt);
nrBPTN2cnt++;
x2TN[y][x].push_back(pt2);
x2TNdistCorr[y][x].push_back(fakeDistTNCorrespondences);
nrTN--;
break;
}
if (nrBPTN2cnt >= nrBPTN2)
break;
}
//Visualize the mask afterwards
if (verbose & SHOW_STATIC_OBJ_CORRS_GEN) {
Mat dispMask2 = (cImg2 > 0);
vector<Mat> channels;
Mat b = Mat::zeros(dispMask2.size(), CV_8UC1);
channels.push_back(b);
channels.push_back(dispMask);
channels.push_back(dispMask2);
Mat img3c;
merge(channels, img3c);
if(!writeIntermediateImg(img3c, "Static_rand_TN_corrs_mask_img1")){
namedWindow("Static rand TN Corrs mask img1", WINDOW_AUTOSIZE);
imshow("Static rand TN Corrs mask img1", img3c);
waitKey(0);
destroyWindow("Static rand TN Corrs mask img1");
}
}
}
//Get the rest of TN correspondences
if (nrTN > 0) {
std::vector<Point2d> x1TN_tmp, x2TN_tmp;
std::vector<double> x2TNdistCorr_tmp;
Mat maskImg1;
copyMakeBorder(combMovObjLabelsAll, maskImg1, posadd, posadd, posadd, posadd, BORDER_CONSTANT,
Scalar(0));
maskImg1 |= corrsIMG;
//Generate mask for visualization before adding keypoints
Mat dispMaskImg2;
Mat dispMaskImg1;
if (verbose & SHOW_STATIC_OBJ_CORRS_GEN) {
dispMaskImg2 = (cImg2 > 0);
dispMaskImg1 = (maskImg1 > 0);
}
nrTN = genTrueNegCorrs(nrTN, distributionX, distributionY, distributionX2, distributionY2, x1TN_tmp,
x2TN_tmp, x2TNdistCorr_tmp, maskImg1, cImg2, depthMap);
//Visualize the mask afterwards
if (verbose & SHOW_STATIC_OBJ_CORRS_GEN) {
Mat dispMask2Img2 = (cImg2 > 0);
Mat dispMask2Img1 = (maskImg1 > 0);
vector<Mat> channels, channels1;
Mat b = Mat::zeros(dispMask2Img2.size(), CV_8UC1);
channels.push_back(b);
channels.push_back(dispMaskImg2);
channels.push_back(dispMask2Img2);
channels1.push_back(b);
channels1.push_back(dispMaskImg1);
channels1.push_back(dispMask2Img1);
Mat img3c, img3c1;
merge(channels, img3c);
merge(channels1, img3c1);
if(!writeIntermediateImg(img3c1, "Static_rand_img1_rand_img2_TN_corrs_mask_img1") ||
!writeIntermediateImg(img3c, "Static_rand_img1_rand_img2_TN_corrs_mask_img2")){
namedWindow("Static rand img1 rand img2 TN Corrs mask img1", WINDOW_AUTOSIZE);
imshow("Static rand img1 rand img2 TN Corrs mask img1", img3c1);
namedWindow("Static rand img1 rand img2 TN Corrs mask img2", WINDOW_AUTOSIZE);
imshow("Static rand img1 rand img2 TN Corrs mask img2", img3c);
waitKey(0);
destroyWindow("Static rand img1 rand img2 TN Corrs mask img1");
destroyWindow("Static rand img1 rand img2 TN Corrs mask img2");
}
}
if (!x1TN_tmp.empty()) {
corrsIMG(Rect(Point(posadd, posadd), imgSize)) |=
maskImg1(Rect(Point(posadd, posadd), imgSize)) & (combMovObjLabelsAll == 0);
//copy(x1TN_tmp.begin(), x1TN_tmp.end(), x1TN[y][x].end());
x1TN[y][x].insert(x1TN[y][x].end(), x1TN_tmp.begin(), x1TN_tmp.end());
//copy(x2TN_tmp.begin(), x2TN_tmp.end(), x2TN[y][x].end());
x2TN[y][x].insert(x2TN[y][x].end(), x2TN_tmp.begin(), x2TN_tmp.end());
//copy(x2TNdistCorr_tmp.begin(), x2TNdistCorr_tmp.end(), x2TNdistCorr[y][x].end());
x2TNdistCorr[y][x].insert(x2TNdistCorr[y][x].end(), x2TNdistCorr_tmp.begin(),
x2TNdistCorr_tmp.end());
}
}
//Adapt the number of TP and TN in the next region based on the remaining number of TP and TN of the current region
adaptNRCorrespondences(nrNMF, nrTN, corrsNotVisible, foundTPCorrs, x, 0, y);
}
}
//Store correspondences
actImgPointCloud.clear();
distTNtoReal.clear();
size_t nrTPCorrs = 0, nrTNCorrs = 0;
vector<vector<size_t>> nrTPperR(3, vector<size_t>(3, 0)), nrTNperR(3, vector<size_t>(3,
0));//For checking against given values
for (size_t y = 0; y < 3; y++) {
for (size_t x = 0; x < 3; x++) {
nrTPperR[y][x] = corrsAllD[y][x].size();
nrTPCorrs += nrTPperR[y][x];
nrTNperR[y][x] = x1TN[y][x].size();
nrTNCorrs += nrTNperR[y][x];
}
}
actCorrsImg1TP = Mat::ones(3, (int)nrTPCorrs, CV_64FC1);
actCorrsImg2TP = Mat::ones(3, (int)nrTPCorrs, CV_64FC1);
actCorrsImg1TN = Mat::ones(3, (int)nrTNCorrs, CV_64FC1);
actCorrsImg2TN = Mat::ones(3, (int)nrTNCorrs, CV_64FC1);
int cnt = 0, cnt2 = 0;
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (!p3DTPnew[y][x].empty()) {
//std::copy(p3DTPnew[y][x].begin(), p3DTPnew[y][x].end(), actImgPointCloud.end());
actImgPointCloud.insert(actImgPointCloud.end(), p3DTPnew[y][x].begin(), p3DTPnew[y][x].end());
}
if (!x2TNdistCorr[y][x].empty()) {
//std::copy(x2TNdistCorr[y][x].begin(), x2TNdistCorr[y][x].end(), distTNtoReal.end());
distTNtoReal.insert(distTNtoReal.end(), x2TNdistCorr[y][x].begin(), x2TNdistCorr[y][x].end());
}
for (size_t i = 0; i < corrsAllD[y][x].size(); i++) {
actCorrsImg1TP.at<double>(0, cnt) = (double) corrsAllD[y][x][i].x;
actCorrsImg1TP.at<double>(1, cnt) = (double) corrsAllD[y][x][i].y;
actCorrsImg2TP.at<double>(0, cnt) = corrsAllD2[y][x][i].x;
actCorrsImg2TP.at<double>(1, cnt) = corrsAllD2[y][x][i].y;
cnt++;
}
for (size_t i = 0; i < x1TN[y][x].size(); i++) {
actCorrsImg1TN.at<double>(0, cnt2) = x1TN[y][x][i].x;
actCorrsImg1TN.at<double>(1, cnt2) = x1TN[y][x][i].y;
actCorrsImg2TN.at<double>(0, cnt2) = x2TN[y][x][i].x;
actCorrsImg2TN.at<double>(1, cnt2) = x2TN[y][x][i].y;
cnt2++;
}
}
}
//Check number of static TP and TN per region and the overall inlier ratio
if(verbose & PRINT_WARNING_MESSAGES) {
size_t nrCorrsR = 0, nrCorrsRGiven = 0;
size_t nrTPCorrsAll = nrTPCorrs;
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 3; ++x) {
nrTPCorrsAll += seedsNearFromLast[y][x].size();
nrTPCorrsAll += seedsMidFromLast[y][x].size();
nrTPCorrsAll += seedsFarFromLast[y][x].size();
}
}
nrCorrsR = nrTPCorrsAll + nrTNCorrs;
nrCorrsRGiven = (size_t) floor(sum(nrTruePosRegs[actFrameCnt])[0] + sum(nrTrueNegRegs[actFrameCnt])[0]);
if (nrCorrsR != nrCorrsRGiven) {
double chRate = (double) nrCorrsR / (double) nrCorrsRGiven;
if ((chRate < 0.90) || (chRate > 1.10)) {
cout << "Number of correspondences on static objects is " << 100.0 * (chRate - 1.0)
<< "% different to given values!" << endl;
cout << "Actual #: " << nrCorrsR << " Given #: " << nrCorrsRGiven << endl;
Mat baproTN = Mat::zeros(3, 3, CV_32SC1);
for (int k = 0; k < 3; ++k) {
for (int k1 = 0; k1 < 3; ++k1) {
size_t allnrTPperR = nrTPperR[k][k1] +
seedsNearFromLast[k][k1].size() +
seedsMidFromLast[k][k1].size() +
seedsFarFromLast[k][k1].size();
if ((int32_t) allnrTPperR != nrTruePosRegs[actFrameCnt].at<int32_t>(k, k1)) {
cout << "# of TP for static region (x, y): (" <<
k1 <<
", " << k
<< ") differs by "
<< (int32_t) allnrTPperR - nrTruePosRegs[actFrameCnt].at<int32_t>(k, k1)
<<
" correspondences (Actual #: " << allnrTPperR
<< " Given #: " << nrTruePosRegs[actFrameCnt].at<int32_t>(k, k1) << ")"
<< endl;
}
if ((int32_t) nrTNperR[k][k1] != nrTrueNegRegs[actFrameCnt].at<int32_t>(k, k1)) {
cout << "# of TN for static region (x, y): (" <<
k1 <<
", " << k
<< ") differs by "
<< (int32_t) nrTNperR[k][k1] - nrTrueNegRegs[actFrameCnt].at<int32_t>(k, k1)
<<
" correspondences (Actual #: " << nrTNperR[k][k1]
<< " Given #: " << nrTrueNegRegs[actFrameCnt].at<int32_t>(k, k1) << ")"
<< endl;
}
}
}
}
}
double inlRatDiffSR = (double) nrTPCorrsAll / ((double) nrCorrsR + DBL_EPSILON) - inlRat[actFrameCnt];
double testVal = min((double) nrCorrsR / 100.0, 1.0) * inlRatDiffSR / 300.0;
if (!nearZero(testVal)) {
cout << "Inlier ratio of static correspondences differs from global inlier ratio (0 - 1.0) by "
<< inlRatDiffSR << endl;
}
}
}
//Reduce the number of TP and TN correspondences of the next moving objects/image regions for which correspondences
// are generated based on the number of TP and TN that were not be able to generate for the current
// moving object/image region because of too less space (minimum distance between keypoints)
void genStereoSequ::adaptNRCorrespondences(int32_t nrTP,
int32_t nrTN,
size_t corrsNotVisible,
size_t foundTPCorrs,
int idx_x,
int32_t nr_movObj,
int y) {
int idx_xy, maxCnt;
vector<int32_t *> ptrTP, ptrTN;
if (nr_movObj == 0) {
idx_xy = 3 * y + idx_x;
maxCnt = 9;
for (int y_ = 0; y_ < 3; ++y_) {
for (int x_ = 0; x_ < 3; ++x_) {
ptrTP.push_back(&nrTruePosRegs[actFrameCnt].at<int32_t>(y_, x_));
ptrTN.push_back(&nrTrueNegRegs[actFrameCnt].at<int32_t>(y_, x_));
}
}
} else {
idx_xy = idx_x;
maxCnt = nr_movObj;
for (auto it = actTPPerMovObj.begin(); it != actTPPerMovObj.end(); it++) {
ptrTP.push_back(&(*it));
}
for (auto it = actTNPerMovObj.begin(); it != actTNPerMovObj.end(); it++) {
ptrTN.push_back(&(*it));
}
}
if (((nrTP <= 0) && (nrTN <= 0)) || (idx_xy >= (maxCnt - 1))) {
return;
}
if (nrTP < 0) {
nrTP = 0;
}
if (nrTN < 0) {
nrTN = 0;
}
if((*ptrTP[idx_xy] + *ptrTN[idx_xy]) == 0)
return;
double reductionFactor = (double) (*ptrTP[idx_xy] - nrTP + *ptrTN[idx_xy] - nrTN) /
(double) (*ptrTP[idx_xy] + *ptrTN[idx_xy]);
//incorporate fraction of not visible (in cam2) features
if ((corrsNotVisible + foundTPCorrs) > 0) {
reductionFactor *= (double) (corrsNotVisible + foundTPCorrs) / ((double) foundTPCorrs + 0.001);
}
reductionFactor = reductionFactor > 1.0 ? 1.0 : reductionFactor;
reductionFactor = reductionFactor < 0.33 ? 1.0 : reductionFactor;
for (int j = idx_xy + 1; j < maxCnt; ++j) {
*ptrTP[j] = (int32_t) (round((double) (*ptrTP[j]) * reductionFactor));
*ptrTN[j] = (int32_t) round((double) (*ptrTN[j]) * reductionFactor);
}
//Change the number of TP and TN to correct the overall inlier ratio of moving objects / image regions
// (as the desired inlier ratio of the current object/region is not reached)
int32_t next_corrs = *ptrTN[idx_xy + 1] + *ptrTP[idx_xy + 1];
int rest = nrTP + nrTN;
if (rest > next_corrs) {
if ((double) next_corrs / (double) rest > 0.5) {
*ptrTP[idx_xy + 1] = nrTP;
*ptrTN[idx_xy + 1] = nrTN;
} else {
for (int j = idx_xy + 2; j < maxCnt; ++j) {
next_corrs = *ptrTN[j] + *ptrTP[j];
if (rest > next_corrs) {
if ((double) next_corrs / (double) rest > 0.5) {
*ptrTP[j] = nrTP;
*ptrTN[j] = nrTN;
break;
} else {
continue;
}
} else {
reductionFactor = 1.0 - (double) rest / (double) next_corrs;
*ptrTP[j] = (int32_t) round((double) (*ptrTP[j]) * reductionFactor);
*ptrTN[j] = (int32_t) round((double) (*ptrTN[j]) * reductionFactor);
*ptrTP[j] += nrTP;
*ptrTN[j] += nrTN;
break;
}
}
}
} else if(next_corrs > 0){
reductionFactor = 1.0 - (double) rest / (double) next_corrs;
*ptrTP[idx_xy + 1] = (int32_t) round((double) (*ptrTP[idx_xy + 1]) * reductionFactor);
*ptrTN[idx_xy + 1] = (int32_t) round((double) (*ptrTN[idx_xy + 1]) * reductionFactor);
*ptrTP[idx_xy + 1] += nrTP;
*ptrTN[idx_xy + 1] += nrTN;
}
}
/*Generates a number of nrTN true negative correspondences with a given x- & y- distribution (including the range) in both
images. True negative correspondences are only created in areas where the values around the selected locations in the masks
for images 1 and 2 (img1Mask and img2Mask) are zero (indicates that there is no near neighboring correspondence).
nrTN In: Number of true negative correspondences to create
distributionX In: x-distribution and value range in the first image
distributionY In: y-distribution and value range in the first image
distributionX2 In: x-distribution and value range in the second image
distributionY2 In: y-distribution and value range in the second image
x1TN Out: True negative keypoints in the first image
x2TN Out: True negative keypoints in the second image
x2TNdistCorr Out: Distance of a TN keypoint in the second image to its true positive location. If the value is equal fakeDistTNCorrespondences, the TN was generated completely random.
img1Mask In/Out: Mask marking not usable regions / areas around already selected correspondences in camera 1
img2Mask In/Out: Mask marking not usable regions / areas around already selected correspondences in camera 2
Return value: Number of true negatives that could not be selected due to area restrictions.
*/
int32_t genStereoSequ::genTrueNegCorrs(int32_t nrTN,
std::uniform_int_distribution<int32_t> &distributionX,
std::uniform_int_distribution<int32_t> &distributionY,
std::uniform_int_distribution<int32_t> &distributionX2,
std::uniform_int_distribution<int32_t> &distributionY2,
std::vector<cv::Point2d> &x1TN,
std::vector<cv::Point2d> &x2TN,
std::vector<double> &x2TNdistCorr,
cv::Mat &img1Mask,
cv::Mat &img2Mask,
cv::Mat &usedDepthMap)/*,
cv::InputArray labelMask)*/
{
int32_t kSi = csurr.rows;
int32_t posadd = (kSi - 1) / 2;
double minDistr = abs(pars.TNErrDistr.first) + pars.TNErrDistr.second;
minDistr = nearZero(minDistr - 10.0) ? 15.0:max((3.5 * minDistr), 15.0);
std::normal_distribution<double> distributionNX2(0, max(static_cast<double>(imgSize.width) / 48., minDistr));
std::normal_distribution<double> distributionNY2(0, max(static_cast<double>(imgSize.height) / 48., minDistr));
int maxSelect2 = 75;
int maxSelect3 = max(3 * nrTN, 500);
Point pt;
Point2d pt2;
Point3d pCam;
/*Mat optLabelMask;//Optional mask for moving objects to select only TN on moving objects in the first image
if(labelMask.empty())
{
optLabelMask = Mat::ones(imgSize, CV_8UC1);
} else
{
optLabelMask = labelMask.getMat();
}*/
while ((nrTN > 0) && (maxSelect2 > 0) && (maxSelect3 > 0)) {
pt.x = distributionX(rand_gen);
pt.y = distributionY(rand_gen);
Mat s_tmp = img1Mask(Rect(pt, Size(kSi, kSi)));
if ((s_tmp.at<unsigned char>(posadd, posadd) > 0))// || (optLabelMask.at<unsigned char>(pt) == 0))
{
maxSelect2--;
continue;
}
maxSelect2 = 75;
s_tmp += csurr;
x1TN.emplace_back(Point2d((double) pt.x, (double) pt.y));
int max_try = 10;
double perfDist = fakeDistTNCorrespondences;
if (!checkLKPInlier(pt, pt2, pCam,
usedDepthMap))//Take a random corresponding point in the second image if the reprojection is not visible to get a TN
{
while (max_try > 0) {
pt.x = distributionX2(rand_gen);
pt.y = distributionY2(rand_gen);
Mat s_tmp1 = img2Mask(Rect(pt, Size(kSi, kSi)));
if (s_tmp1.at<unsigned char>(posadd, posadd) > 0) {
max_try--;
continue;
}
//s_tmp1 += csurr;
s_tmp1.at<unsigned char>(posadd, posadd)++;
break;
}
pt2 = Point2d((double) pt.x, (double) pt.y);
} else//Distort the reprojection in the second image to get a TN
{
Point2d ptd;
while (max_try > 0) {
int maxAtBorder = 10;
do {
do {
do {
ptd.x = distributionNX2(rand_gen);
}while(nearZero(ptd.x, 1.0));
ptd.x += 0.75 * ptd.x / abs(ptd.x);
ptd.x *= 1.5;
do {
ptd.y = distributionNY2(rand_gen);
}while(nearZero(ptd.y, 1.0));
ptd.y += 0.75 * ptd.y / abs(ptd.y);
ptd.y *= 1.5;
} while ((abs(ptd.x) < 3.0) && (abs(ptd.y) < 3.0));
pt2 += ptd;
maxAtBorder--;
} while (((pt2.x < 0) || (pt2.x > (double) (imgSize.width - 1)) ||
(pt2.y < 0) || (pt2.y > (double) (imgSize.height - 1))) && (maxAtBorder > 0));
if (maxAtBorder <= 0) {
max_try = 0;
break;
}
Mat s_tmp1 = img2Mask(Rect((int) round(pt2.x), (int) round(pt2.y), kSi, kSi));
if (s_tmp1.at<unsigned char>(posadd, posadd) > 0) {
max_try--;
continue;
}
//s_tmp1 += csurr;
s_tmp1.at<unsigned char>(posadd, posadd)++;
perfDist = norm(ptd);
break;
}
}
if (max_try <= 0) {
maxSelect3--;
x1TN.pop_back();
s_tmp -= csurr;
continue;
}
x2TN.push_back(pt2);
x2TNdistCorr.push_back(perfDist);
nrTN--;
}
return nrTN;
}
//Check, if the given point in the first camera is also visible in the second camera
//Calculates the 3D-point in the camera coordinate system and the corresponding point in the second image
bool
genStereoSequ::checkLKPInlier(cv::Point_<int32_t> pt, cv::Point2d &pt2, cv::Point3d &pCam, cv::Mat &usedDepthMap) {
Mat x = (Mat_<double>(3, 1) << (double) pt.x, (double) pt.y, 1.0);
double depth = usedDepthMap.at<double>(pt);
if (depth < 0) {
throw SequenceException("Found negative depth value!");
}
x = K1i * x;
if(nearZero(x.at<double>(2))) return false;
x *= depth / x.at<double>(2);
pCam = Point3d(x);
Mat x2 = K2 * (actR * x + actT);
if(nearZero(x2.at<double>(2))) return false;
x2 /= x2.at<double>(2);
pt2 = Point2d(x2.rowRange(0, 2));
if ((pt2.x < 0) || (pt2.x > (double) (imgSize.width - 1)) ||
(pt2.y < 0) || (pt2.y > (double) (imgSize.height - 1))) {
return false;
}
return true;
}
//Calculate the initial number, size, and positions of moving objects in the image
void genStereoSequ::getNrSizePosMovObj() {
//size_t nrMovObjs;//Number of moving objects in the scene
//cv::InputArray startPosMovObjs;//Possible starting positions of moving objects in the image (must be 3x3 boolean (CV_8UC1))
//std::pair<double, double> relAreaRangeMovObjs;//Relative area range of moving objects. Area range relative to the image area at the beginning.
if (pars.nrMovObjs == 0) {
return;
}
if (pars.startPosMovObjs.empty() || (cv::sum(pars.startPosMovObjs)[0] == 0)) {
startPosMovObjs = Mat::zeros(3, 3, CV_8UC1);
while (nearZero(cv::sum(startPosMovObjs)[0])) {
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
startPosMovObjs.at<unsigned char>(y, x) = (unsigned char) (rand2() % 2);
}
}
}
} else {
startPosMovObjs = pars.startPosMovObjs;
}
//Check, if the input paramters are valid and if not, adapt them
int nrStartA = 0;
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (startPosMovObjs.at<unsigned char>(y, x)) {
nrStartA++;
}
}
}
//Calculate the minimal usable image area
int imgArea = imgSize.area();
for (auto& j : stereoImgsOverlapMask) {
int overlapArea = cv::countNonZero(j);
if(overlapArea < imgArea){
imgArea = overlapArea;
}
}
maxOPerReg = (int) ceil((float) pars.nrMovObjs / (float) nrStartA);
int area23 = 2 * imgArea / 3;//The moving objects should not be larger than that
minOArea = max((int) round(pars.relAreaRangeMovObjs.first * (double) imgArea), 3);
maxOArea = max((int) round(pars.relAreaRangeMovObjs.second * (double) imgArea), minOArea + 1);
//The maximum image area coverd with moving objects should not exeed 2/3 of the image
if (minOArea * (int) pars.nrMovObjs > area23) {
adaptMinNrMovObjsAndNrMovObjs((size_t) (area23 / minOArea));
maxOArea = minOArea;
minOArea = minOArea / 2;
}
//If more than 2 seeds for moving objects are within an image region (9x9), then the all moving objects in a region should cover not more than 2/3 of the region
//This helps to reduce the propability that during the generation of the moving objects (beginning at the seed positions) one objects blocks the generation of an other
//For less than 3 objects per region, there shouldnt be a problem as they can grow outside an image region and the propability of blocking a different moving object is not that high
if (maxOPerReg > 2) {
int areaPerReg23 = area23 / 9;
if (maxOPerReg * minOArea > areaPerReg23) {
if (minOArea > areaPerReg23) {
maxOArea = areaPerReg23;
minOArea = maxOArea / 2;
maxOPerReg = 1;
} else {
maxOPerReg = areaPerReg23 / minOArea;
maxOArea = minOArea;
minOArea = minOArea / 2;
}
adaptMinNrMovObjsAndNrMovObjs((size_t) (maxOPerReg * nrStartA));
}
} else {
maxOPerReg = 2;
}
//Get the number of moving object seeds per region
Mat useableAreas = (fracUseableTPperRegion[0] > actFracUseableTPperRegionTH) & (startPosMovObjs > 0);
int maxMovObj = countNonZero(useableAreas) * maxOPerReg;
int nrMovObjs_tmp = (int) pars.nrMovObjs;
if(nrMovObjs_tmp > maxMovObj){
nrMovObjs_tmp = maxMovObj;
}
Mat nrPerReg = Mat::zeros(3, 3, CV_8UC1);
while (nrMovObjs_tmp > 0) {
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (startPosMovObjs.at<unsigned char>(y, x) &&
(maxOPerReg > (int) nrPerReg.at<unsigned char>(y, x)) &&
(fracUseableTPperRegion[0].at<double>(y,x) > actFracUseableTPperRegionTH)) {
int addit = (int)(rand2() % 2);
if (addit) {
nrPerReg.at<unsigned char>(y, x)++;
nrMovObjs_tmp--;
if (nrMovObjs_tmp == 0)
break;
}
}
}
if (nrMovObjs_tmp == 0)
break;
}
}
//Get the area for each moving object
int maxObjsArea = min(area23, maxOArea * (int) pars.nrMovObjs);
maxOArea = maxObjsArea / (int) pars.nrMovObjs;
std::uniform_int_distribution<int32_t> distribution((int32_t) minOArea, (int32_t) maxOArea);
movObjAreas = vector<vector<vector<int32_t>>>(3, vector<vector<int32_t>>(3));
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
int nr_tmp = (int) nrPerReg.at<unsigned char>(y, x);
for (int i = 0; i < nr_tmp; i++) {
movObjAreas[y][x].push_back(distribution(rand_gen));
}
}
}
//Get seed positions
std::vector<std::vector<std::pair<bool,cv::Rect>>> validRects;
getValidImgRegBorders(stereoImgsOverlapMask[0], validRects);
minODist = imgSize.height / (3 * (maxOPerReg + 1));
movObjSeeds = vector<vector<vector<cv::Point_<int32_t>>>>(3, vector<vector<cv::Point_<int32_t>>>(3));
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
int nr_tmp = (int) nrPerReg.at<unsigned char>(y, x);
if (nr_tmp > 0) {
if(!validRects[y][x].first){
nrPerReg.at<unsigned char>(y, x) = 0;
movObjAreas[y][x].clear();
continue;
}
rand_gen = std::default_random_engine(
rand2());//Prevent getting the same starting positions for equal ranges
std::uniform_int_distribution<int> distributionX(validRects[y][x].second.x,
validRects[y][x].second.x + validRects[y][x].second.width - 1);
std::uniform_int_distribution<int> distributionY(validRects[y][x].second.y,
validRects[y][x].second.y + validRects[y][x].second.height - 1);
cv::Point_<int32_t> seedPt(distributionX(rand_gen), distributionY(rand_gen));
while(!checkPointValidity(stereoImgsOverlapMask[0], seedPt)){
seedPt = cv::Point_<int32_t>(distributionX(rand_gen), distributionY(rand_gen));
}
movObjSeeds[y][x].push_back(seedPt);
nr_tmp--;
if (nr_tmp > 0) {
vector<int> xposes, yposes;
xposes.push_back(movObjSeeds[y][x].back().x);
yposes.push_back(movObjSeeds[y][x].back().y);
while (nr_tmp > 0) {
vector<double> xInterVals, yInterVals;
vector<double> xWeights, yWeights;
buildDistributionRanges(xposes, yposes, x, y, xInterVals, xWeights, yInterVals, yWeights, &validRects);
//Create piecewise uniform distribution and get a random seed
piecewise_constant_distribution<double> distrPieceX(xInterVals.begin(), xInterVals.end(),
xWeights.begin());
piecewise_constant_distribution<double> distrPieceY(yInterVals.begin(), yInterVals.end(),
yWeights.begin());
seedPt = cv::Point_<int32_t>(cv::Point_<int32_t>((int32_t) floor(distrPieceX(rand_gen)),
(int32_t) floor(distrPieceY(rand_gen))));
int max_trys = 50;
while(!checkPointValidity(stereoImgsOverlapMask[0], seedPt) && (max_trys > 0)){
seedPt = cv::Point_<int32_t>(cv::Point_<int32_t>((int32_t) floor(distrPieceX(rand_gen)),
(int32_t) floor(distrPieceY(rand_gen))));
max_trys--;
}
if(max_trys <= 0){
for (int i = 0; i < nr_tmp; ++i) {
nrPerReg.at<unsigned char>(y, x)--;
movObjAreas[y][x].pop_back();
}
break;
}
movObjSeeds[y][x].push_back(seedPt);
xposes.push_back(movObjSeeds[y][x].back().x);
yposes.push_back(movObjSeeds[y][x].back().y);
nr_tmp--;
}
}
}
}
}
}
void genStereoSequ::getValidImgRegBorders(const cv::Mat &mask, std::vector<std::vector<std::pair<bool,cv::Rect>>> &validRects){
validRects = std::vector<std::vector<std::pair<bool,cv::Rect>>>(3, std::vector<std::pair<bool,cv::Rect>>(3));
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 3; ++x) {
Rect validRect;
bool usable = getValidRegBorders(mask(regROIs[y][x]), validRect);
if(usable){
validRect.x += regROIs[y][x].x;
validRect.y += regROIs[y][x].y;
validRects[y][x] = make_pair(true, validRect);
}
else{
validRects[y][x] = make_pair(false, cv::Rect());
}
}
}
}
void genStereoSequ::adaptMinNrMovObjsAndNrMovObjs(size_t pars_nrMovObjsNew) {
if(pars.nrMovObjs == 0)
return;
float ratMinActMovObj = (float) pars.minNrMovObjs / (float) pars.nrMovObjs;
pars.minNrMovObjs = (size_t) round(ratMinActMovObj * (float) pars_nrMovObjsNew);
pars.minNrMovObjs = (pars.minNrMovObjs > pars_nrMovObjsNew) ? pars_nrMovObjsNew : pars.minNrMovObjs;
pars.nrMovObjs = pars_nrMovObjsNew;
}
//Build ranges and weights for a piecewise_constant_distribution based on values calculated before
void genStereoSequ::buildDistributionRanges(std::vector<int> &xposes,
std::vector<int> &yposes,
int &x,
int &y,
std::vector<double> &xInterVals,
std::vector<double> &xWeights,
std::vector<double> &yInterVals,
std::vector<double> &yWeights,
std::vector<std::vector<std::pair<bool,cv::Rect>>> *validRects) {
sort(xposes.begin(), xposes.end());
sort(yposes.begin(), yposes.end());
Rect validRect = regROIs[y][x];
if(validRects != nullptr){
if(!validRects->at((size_t)y)[x].first){
throw SequenceException("Area is not valid. Cannot build a valid distribution.");
}
validRect = validRects->at((size_t)y)[x].second;
}
//Get possible selection ranges for x-values
int start = max(xposes[0] - minODist, validRect.x);
int maxEnd = validRect.x + validRect.width - 1;
int xyend = min(xposes[0] + minODist, maxEnd);
if (start == validRect.x) {
xInterVals.push_back((double) start);
xInterVals.push_back((double) (xposes[0] + minODist));
xWeights.push_back(0);
} else {
xInterVals.push_back((double) validRect.x);
xInterVals.push_back((double) start);
xWeights.push_back(1.0);
if (xyend != maxEnd) {
xInterVals.push_back((double) xyend);
xWeights.push_back(0);
}
}
if (xyend != maxEnd) {
for (size_t i = 1; i < xposes.size(); i++) {
start = max(xposes[i] - minODist, (int) floor(xInterVals.back()));
if (start != (int) floor(xInterVals.back())) {
xInterVals.push_back((double) (xposes[i] - minODist));
xWeights.push_back(1.0);
}
xyend = min(xposes[i] + minODist, maxEnd);
if (xyend != maxEnd) {
xInterVals.push_back((double) xyend);
xWeights.push_back(0);
}
}
}
if (xyend != maxEnd) {
xInterVals.push_back((double) maxEnd);
xWeights.push_back(1.0);
}
//Check if we are able to select a new seed position
double wsum = 0;
for (auto &i: xWeights) {
wsum += i;
}
if (nearZero(wsum)) {
xWeights.clear();
xInterVals.clear();
if(xposes.size() > 1) {
vector<int> xposesAndEnds;
xposesAndEnds.push_back(validRect.x);
xposesAndEnds.insert(xposesAndEnds.end(), xposes.begin(), xposes.end());
xposesAndEnds.push_back(maxEnd);
vector<int> xIntervalDiffs(xposesAndEnds.size() - 1);
for (int i = 1; i < (int)xposesAndEnds.size(); ++i) {
xIntervalDiffs[i-1] = xposesAndEnds[i] - xposesAndEnds[i - 1];
}
int maxdiff = (int)std::distance(xIntervalDiffs.begin(),
std::max_element(xIntervalDiffs.begin(), xIntervalDiffs.end()));
int thisDist = xposesAndEnds[maxdiff + 1] - xposesAndEnds[maxdiff];
if(thisDist >= minODist) {
xInterVals.push_back((double) (xposesAndEnds[maxdiff] + minODist / 2));
xInterVals.push_back((double) (xposesAndEnds[maxdiff + 1] - minODist / 2));
}
else if(thisDist >= 3){
thisDist /= 3;
xInterVals.push_back((double) (xposesAndEnds[maxdiff] + thisDist));
xInterVals.push_back((double) (xposesAndEnds[maxdiff + 1] - thisDist));
}
else{
throw SequenceException("Cannot select a distribution range as the border values are too near to each other!");
}
}
else{
int xIntervalDiffs[2], idx = 0;
xIntervalDiffs[0] = xposes[0] - validRect.x;
xIntervalDiffs[1] = maxEnd - xposes[0];
if(xIntervalDiffs[1] > xIntervalDiffs[0]) idx = 1;
if(xIntervalDiffs[idx] >= 2){
int thisDist = xIntervalDiffs[idx] / 2;
if(idx){
xInterVals.push_back((double) (xposes[0] + thisDist));
xInterVals.push_back((double) maxEnd);
}else{
xInterVals.push_back((double) validRect.x);
xInterVals.push_back((double) (xposes[0] - thisDist));
}
}
else{
throw SequenceException("Cannot select a distribution range as the border values are too near to each other!");
}
}
xWeights.push_back(1.0);
}
//Get possible selection ranges for y-values
start = max(yposes[0] - minODist, validRect.y);
maxEnd = validRect.y + validRect.height - 1;
xyend = min(yposes[0] + minODist, maxEnd);
if (start == validRect.y) {
yInterVals.push_back((double) start);
yInterVals.push_back((double) (yposes[0] + minODist));
yWeights.push_back(0);
} else {
yInterVals.push_back((double) validRect.y);
yInterVals.push_back((double) start);
yWeights.push_back(1.0);
if (xyend != maxEnd) {
yInterVals.push_back((double) xyend);
yWeights.push_back(0);
}
}
if (xyend != maxEnd) {
for (size_t i = 1; i < yposes.size(); i++) {
start = max(yposes[i] - minODist, (int) floor(yInterVals.back()));
if (start != (int) floor(yInterVals.back())) {
yInterVals.push_back((double) (yposes[i] - minODist));
yWeights.push_back(1.0);
}
xyend = min(yposes[i] + minODist, maxEnd);
if (xyend != maxEnd) {
yInterVals.push_back((double) xyend);
yWeights.push_back(0);
}
}
}
if (xyend != maxEnd) {
yInterVals.push_back((double) maxEnd);
yWeights.push_back(1.0);
}
//Check if we are able to select a new seed position
wsum = 0;
for (auto& i: yWeights) {
wsum += i;
}
if (nearZero(wsum)) {
yWeights.clear();
yInterVals.clear();
if(yposes.size() > 1) {
vector<int> yposesAndEnds;
yposesAndEnds.push_back(validRect.y);
yposesAndEnds.insert(yposesAndEnds.end(), yposes.begin(), yposes.end());
yposesAndEnds.push_back(maxEnd);
vector<int> yIntervalDiffs(yposesAndEnds.size() - 1);
for (int i = 1; i < (int)yposesAndEnds.size(); ++i) {
yIntervalDiffs[i-1] = yposesAndEnds[i] - yposesAndEnds[i - 1];
}
int maxdiff = (int)std::distance(yIntervalDiffs.begin(),
std::max_element(yIntervalDiffs.begin(), yIntervalDiffs.end()));
int thisDist = yposesAndEnds[maxdiff + 1] - yposesAndEnds[maxdiff];
if(thisDist >= minODist) {
yInterVals.push_back((double) (yposesAndEnds[maxdiff] + minODist / 2));
yInterVals.push_back((double) (yposesAndEnds[maxdiff + 1] - minODist / 2));
}
else if(thisDist >= 3){
thisDist /= 3;
yInterVals.push_back((double) (yposesAndEnds[maxdiff] + thisDist));
yInterVals.push_back((double) (yposesAndEnds[maxdiff + 1] - thisDist));
}
else{
throw SequenceException("Cannot select a distribution range as the border values are too near to each other!");
}
}
else{
int yIntervalDiffs[2], idx = 0;
yIntervalDiffs[0] = yposes[0] - validRect.y;
yIntervalDiffs[1] = maxEnd - yposes[0];
if(yIntervalDiffs[1] > yIntervalDiffs[0]) idx = 1;
if(yIntervalDiffs[idx] >= 2){
int thisDist = yIntervalDiffs[idx] / 2;
if(idx){
yInterVals.push_back((double) (yposes[0] + thisDist));
yInterVals.push_back((double) maxEnd);
}else{
yInterVals.push_back((double) validRect.y);
yInterVals.push_back((double) (yposes[0] - thisDist));
}
}
else{
throw SequenceException("Cannot select a distribution range as the border values are too near to each other!");
}
}
yWeights.push_back(1.0);
}
}
void genStereoSequ::adaptNrBPMovObjCorrs(int32_t remSize){
//Remove backprojected moving object correspondences based on the ratio of the number of correspondences
CV_Assert(actCorrsOnMovObjFromLast > 0);
size_t nr_bp_movObj = movObjMaskFromLastLargeAdd.size();
if(remSize > (actCorrsOnMovObjFromLast - (int32_t)nr_bp_movObj * 2)){
remSize = actCorrsOnMovObjFromLast - (int32_t)nr_bp_movObj * 2;
}
vector<int> nr_reduce(nr_bp_movObj);
int sumRem = 0;
for (size_t i = 0; i < nr_bp_movObj; ++i) {
nr_reduce[i] = (int)round((double)remSize *
(double)(movObjCorrsImg1TPFromLast[i].cols + movObjCorrsImg1TNFromLast[i].cols) /
(double)actCorrsOnMovObjFromLast);
sumRem += nr_reduce[i];
}
while(sumRem < remSize){
for (size_t i = 0; i < nr_bp_movObj; ++i){
if((movObjCorrsImg1TPFromLast[i].cols + movObjCorrsImg1TNFromLast[i].cols - nr_reduce[i]) > 2){
nr_reduce[i]++;
sumRem++;
if(sumRem == remSize) break;
}
}
}
while(sumRem > remSize){
for (size_t i = 0; i < nr_bp_movObj; ++i){
if(nr_reduce[i] > 0){
nr_reduce[i]--;
sumRem--;
if(sumRem == remSize) break;
}
}
}
actCorrsOnMovObjFromLast -= remSize;
//Calculate number of TP and TN to remove for every moving object
vector<pair<int,int>> nr_reduceTPTN(nr_bp_movObj);
for (size_t i = 0; i < nr_bp_movObj; ++i){
if(nr_reduce[i] > 0){
CV_Assert((movObjCorrsImg1TPFromLast[i].cols + movObjCorrsImg1TNFromLast[i].cols) > 0);
double inlrat_tmp = (double)movObjCorrsImg1TPFromLast[i].cols /
(double)(movObjCorrsImg1TPFromLast[i].cols + movObjCorrsImg1TNFromLast[i].cols);
int rdTP = (int)round(inlrat_tmp * (double)nr_reduce[i]);
while(rdTP > movObjCorrsImg1TPFromLast[i].cols)
rdTP--;
int rdTN = nr_reduce[i] - rdTP;
nr_reduceTPTN[i] = make_pair(rdTP,rdTN);
}else{
nr_reduceTPTN[i] = make_pair(0,0);
}
}
//Remove the correspondences
cv::Size masi = Size(csurr.cols, csurr.rows);
for (size_t i = 0; i < nr_bp_movObj; ++i){
if(nr_reduce[i] > 0){
//Delete TP
int rd_tmp = 0;
int idx = 0;
Point pt;
if(nr_reduceTPTN[i].first > 0) {
rd_tmp = nr_reduceTPTN[i].first;
idx = movObjCorrsImg1TPFromLast[i].cols - 1;
while (rd_tmp > 0) {
pt = Point((int) round(movObjCorrsImg1TPFromLast[i].at<double>(0, idx)),
(int) round(movObjCorrsImg1TPFromLast[i].at<double>(1, idx)));
Mat s_tmp = movObjMaskFromLastLargeAdd[i](Rect(pt, masi));
s_tmp -= csurr;
pt = Point((int) round(movObjCorrsImg2TPFromLast[i].at<double>(0, idx)),
(int) round(movObjCorrsImg2TPFromLast[i].at<double>(1, idx)));
movObjMaskFromLast2.at<unsigned char>(pt) = 0;
idx--;
rd_tmp--;
}
movObjCorrsImg1TPFromLast[i] = movObjCorrsImg1TPFromLast[i].colRange(0,
movObjCorrsImg1TPFromLast[i].cols -
nr_reduceTPTN[i].first);
movObjCorrsImg2TPFromLast[i] = movObjCorrsImg2TPFromLast[i].colRange(0,
movObjCorrsImg2TPFromLast[i].cols -
nr_reduceTPTN[i].first);
movObjCorrsImg12TPFromLast_Idx[i].erase(movObjCorrsImg12TPFromLast_Idx[i].end() - nr_reduceTPTN[i].first,
movObjCorrsImg12TPFromLast_Idx[i].end());
}
//Delete TN
if(nr_reduceTPTN[i].second > 0) {
rd_tmp = nr_reduceTPTN[i].second;
idx = movObjCorrsImg1TNFromLast[i].cols - 1;
while (rd_tmp > 0) {
pt = Point((int) round(movObjCorrsImg1TNFromLast[i].at<double>(0, idx)),
(int) round(movObjCorrsImg1TNFromLast[i].at<double>(1, idx)));
Mat s_tmp = movObjMaskFromLastLargeAdd[i](Rect(pt, masi));
s_tmp -= csurr;
pt = Point((int) round(movObjCorrsImg2TNFromLast[i].at<double>(0, idx)),
(int) round(movObjCorrsImg2TNFromLast[i].at<double>(1, idx)));
movObjMaskFromLast2.at<unsigned char>(pt) = 0;
idx--;
rd_tmp--;
}
movObjDistTNtoReal[i].erase(movObjDistTNtoReal[i].end() - nr_reduceTPTN[i].second, movObjDistTNtoReal[i].end());
movObjCorrsImg1TNFromLast[i] = movObjCorrsImg1TNFromLast[i].colRange(0,
movObjCorrsImg1TNFromLast[i].cols -
nr_reduceTPTN[i].second);
movObjCorrsImg2TNFromLast[i] = movObjCorrsImg2TNFromLast[i].colRange(0,
movObjCorrsImg2TNFromLast[i].cols -
nr_reduceTPTN[i].second);
}
}
}
}
//Generates labels of moving objects within the image and calculates the percentage of overlap for each region
//Moreover, the number of static correspondences per region is adapted and the number of correspondences on the moving objects is calculated
//mask is used to exclude areas from generating labels and must have the same size as the image; mask holds the areas from backprojected moving objects
//seeds must hold the seeding positions for generating the labels
//areas must hold the desired area for every label
//corrsOnMovObjLF must hold the number of correspondences on moving objects that were backprojected (thus the objects were created one or more frames beforehand)
// from 3D (including TN calculated using the inlier ratio).
void
genStereoSequ::generateMovObjLabels(const cv::Mat &mask,
std::vector<cv::Point_<int32_t>> &seeds,
std::vector<int32_t> &areas,
int32_t corrsOnMovObjLF,
cv::InputArray validImgMask) {
CV_Assert(seeds.size() == areas.size());
CV_Assert(corrsOnMovObjLF == actCorrsOnMovObjFromLast);
size_t nr_movObj = areas.size();
if (nr_movObj == 0)
actCorrsOnMovObj = corrsOnMovObjLF;
else
actCorrsOnMovObj = (int32_t) round(pars.CorrMovObjPort * (double) nrCorrs[actFrameCnt]);
if (actCorrsOnMovObj <= corrsOnMovObjLF) {
nr_movObj = 0;
seeds.clear();
areas.clear();
actTruePosOnMovObj = 0;
actTrueNegOnMovObj = 0;
//Adapt # of correspondences on backprojected moving objects
if(actCorrsOnMovObj < corrsOnMovObjLF){
int32_t remSize = corrsOnMovObjLF - actCorrsOnMovObj;
//Remove them based on the ratio of the number of correspondences
adaptNrBPMovObjCorrs(remSize);
}
}
movObjLabels.clear();
if (nr_movObj) {
//movObjLabels.resize(nr_movObj, cv::Mat::zeros(imgSize, CV_8UC1));
for (size_t i = 0; i < nr_movObj; i++) {
movObjLabels.push_back(cv::Mat::zeros(imgSize, CV_8UC1));
}
}
combMovObjLabels = cv::Mat::zeros(imgSize, CV_8UC1);
//Set seeding positions in mov. obj. label images
for (size_t i = 0; i < nr_movObj; i++) {
movObjLabels[i].at<unsigned char>(seeds[i]) = 1;
combMovObjLabels.at<unsigned char>(seeds[i]) = (unsigned char) (i + 1);
}
Size siM1(imgSize.width - 1, imgSize.height - 1);
Mat maskValid = mask;
if(!validImgMask.empty()){
maskValid = mask | (validImgMask.getMat() == 0);
}
#define MOV_OBJ_ONLY_IN_REGIONS 0
#if MOV_OBJ_ONLY_IN_REGIONS
vector<cv::Point_<int32_t>> objRegionIndices(nr_movObj);
for (size_t i = 0; i < nr_movObj; i++)
{
objRegionIndices[i].x = seeds[i].x / (imgSize.width / 3);
objRegionIndices[i].y = seeds[i].y / (imgSize.height / 3);
}
#else
Rect imgArea = Rect(Point(0, 0), imgSize);//Is also useless as it covers the whole image
Mat regMask = cv::Mat::ones(imgSize,
CV_8UC1);//is currently not really used (should mark the areas where moving objects can grow)
#endif
std::vector<cv::Point_<int32_t>> startposes = seeds;
vector<int32_t> actArea(nr_movObj, 1);
vector<size_t> nrIterations(nr_movObj, 0);
vector<unsigned char> dilateOps(nr_movObj, 0);
vector<bool> objNFinished(nr_movObj, true);
int remainObj = (int) nr_movObj;
//Generate labels
size_t visualizeMask = 0;
while (remainObj > 0) {
for (size_t i = 0; i < nr_movObj; i++) {
if (objNFinished[i]) {
// Mat beforeAdding = movObjLabels[i].clone();
// int32_t Asv = actArea[i];
if (!addAdditionalDepth((unsigned char) (i + convhullPtsObj.size() + 1),
combMovObjLabels,
movObjLabels[i],
maskValid,
#if MOV_OBJ_ONLY_IN_REGIONS
regmasks[objRegionIndices[i].y][objRegionIndices[i].x],
#else
regMask,
#endif
startposes[i],
startposes[i],
actArea[i],
areas[i],
siM1,
seeds[i],
#if MOV_OBJ_ONLY_IN_REGIONS
regmasksROIs[objRegionIndices[i].y][objRegionIndices[i].x],
#else
imgArea,
#endif
nrIterations[i],
dilateOps[i])) {
objNFinished[i] = false;
remainObj--;
}
/*Mat afterAdding = movObjLabels[i].clone();;
int realAreaBeforeDil = cv::countNonZero(afterAdding);
if(realAreaBeforeDil != actArea[i])
{
cout << "Area difference: " << realAreaBeforeDil - actArea[i] << endl;
cout << "Area diff between last and actual values: " << actArea[i] - Asv << endl;
Mat addingDiff = afterAdding ^ beforeAdding;
namedWindow("Before", WINDOW_AUTOSIZE);
namedWindow("After", WINDOW_AUTOSIZE);
namedWindow("Diff", WINDOW_AUTOSIZE);
imshow("Before", (beforeAdding > 0));
imshow("After", (afterAdding > 0));
imshow("Diff", (addingDiff > 0));
waitKey(0);
destroyWindow("Before");
destroyWindow("After");
destroyWindow("Diff");
}*/
}
/*Mat dilImgTh4;
cv::threshold( movObjLabels[i], dilImgTh4, 0, 255,0 );
namedWindow( "Dilated4", WINDOW_AUTOSIZE );
imshow("Dilated4", dilImgTh4);
waitKey(0);
destroyWindow("Dilated4");*/
}
if (verbose & SHOW_BUILD_PROC_MOV_OBJ) {
if (visualizeMask % 200 == 0) {
Mat colorMapImg;
unsigned char clmul = (unsigned char)255 / (unsigned char)nr_movObj;
// Apply the colormap:
applyColorMap(combMovObjLabels * clmul, colorMapImg, cv::COLORMAP_RAINBOW);
if(!writeIntermediateImg(colorMapImg, "moving_object_labels_build_step_" + std::to_string(visualizeMask))){
namedWindow("combined ObjLabels", WINDOW_AUTOSIZE);
imshow("combined ObjLabels", colorMapImg);
waitKey(0);
destroyWindow("combined ObjLabels");
}
}
visualizeMask++;
}
}
//Finally visualize the labels
if ((nr_movObj > 0) && (verbose & (SHOW_BUILD_PROC_MOV_OBJ | SHOW_MOV_OBJ_3D_PTS))) {
//Generate colormap for moving obejcts (every object has a different color)
Mat colors = Mat((int)nr_movObj, 1, CV_8UC1);
unsigned char addc = nr_movObj > 255 ? (unsigned char)255 : (unsigned char) nr_movObj;
addc = addc < (unsigned char)2 ? (unsigned char)255 : ((unsigned char)255 / (addc - (unsigned char)1));
colors.at<unsigned char>(0) = 0;
for (int k = 1; k < (int)nr_movObj; ++k) {
colors.at<unsigned char>(k) = colors.at<unsigned char>(k - 1) + addc;
}
Mat colormap_img;
applyColorMap(colors, colormap_img, COLORMAP_PARULA);
Mat labelImgRGB = Mat::zeros(imgSize, CV_8UC3);
for (size_t i = 0; i < nr_movObj; i++) {
for (int r = 0; r < imgSize.height; r++) {
for (int c = 0; c < imgSize.width; c++) {
if (movObjLabels[i].at<unsigned char>(r, c) != 0) {
labelImgRGB.at<cv::Vec3b>(r, c) = colormap_img.at<cv::Vec3b>(i);
}
}
}
}
if(!writeIntermediateImg(labelImgRGB, "moving_object_labels_build_step_" + std::to_string(visualizeMask))){
namedWindow("Moving object labels for point cloud comparison", WINDOW_AUTOSIZE);
imshow("Moving object labels for point cloud comparison", labelImgRGB);
waitKey(0);
}
}
//Get bounding rectangles for the areas
if (nr_movObj > 0) {
movObjLabelsROIs.resize(nr_movObj);
for (size_t i = 0; i < nr_movObj; i++) {
Mat mask_tmp = movObjLabels[i].clone();
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
size_t dilTries = 0;
cv::findContours(mask_tmp, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
while ((contours.size() > 1) &&
(dilTries <
5))//Prevent the detection of multiple objects if connections between parts are too small
{
Mat element = cv::getStructuringElement(MORPH_ELLIPSE, Size(3, 3));
dilate(mask_tmp, mask_tmp, element);
contours.clear();
hierarchy.clear();
cv::findContours(mask_tmp, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
dilTries++;
}
if (dilTries >= 5) {
vector<vector<Point> > contours_new(1);
for (auto cit = contours.rbegin();
cit != contours.rend(); cit++) {
contours_new[0].insert(contours_new[0].end(), cit->begin(), cit->end());
}
contours = contours_new;
}
movObjLabelsROIs[i] = cv::boundingRect(contours[0]);
}
}
//Get overlap of regions and the portion of correspondences that is covered by the moving objects
vector<vector<double>> movObjOverlap(3, vector<double>(3, 0));
movObjHasArea = vector<vector<bool>>(3, vector<bool>(3, false));
vector<vector<int32_t>> movObjCorrsFromStatic(3, vector<int32_t>(3, 0));
vector<vector<int32_t>> movObjCorrsFromStaticInv(3, vector<int32_t>(3, 0));
int32_t absNrCorrsFromStatic = 0;
Mat statCorrsPRegNew = Mat::zeros(3, 3, CV_32SC1);
double oldMovObjAreaImgRat = 0;
if ((nr_movObj == 0) && (actCorrsOnMovObj > 0)) {
oldMovObjAreaImgRat = (double) cv::countNonZero(mask) / (double) imgSize.area();
}
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
movObjOverlap[y][x] = (double) (cv::countNonZero(combMovObjLabels(regROIs[y][x])) +
cv::countNonZero(mask(regROIs[y][x]))) /
(double) (regROIs[y][x].area());
CV_Assert(movObjOverlap[y][x] >= 0);
if (movObjOverlap[y][x] > 0.95) {
movObjHasArea[y][x] = true;
movObjCorrsFromStatic[y][x] = nrCorrsRegs[actFrameCnt].at<int32_t>(y, x);
movObjCorrsFromStaticInv[y][x] = 0;
absNrCorrsFromStatic += movObjCorrsFromStatic[y][x];
} else if (nearZero(movObjOverlap[y][x])) {
statCorrsPRegNew.at<int32_t>(y, x) = nrCorrsRegs[actFrameCnt].at<int32_t>(y, x);
movObjCorrsFromStatic[y][x] = 0;
movObjCorrsFromStaticInv[y][x] = nrCorrsRegs[actFrameCnt].at<int32_t>(y, x);
} else {
movObjCorrsFromStatic[y][x] = (int32_t) round(
(double) nrCorrsRegs[actFrameCnt].at<int32_t>(y, x) * movObjOverlap[y][x]);
if ((nr_movObj == 0) && (actCorrsOnMovObj > 0)) {
int32_t maxFromOld = corrsOnMovObjLF;
if(!nearZero(100.0 * oldMovObjAreaImgRat)) {
maxFromOld = (int32_t) round(
(double) corrsOnMovObjLF * movObjOverlap[y][x] / oldMovObjAreaImgRat);
}
movObjCorrsFromStatic[y][x] =
movObjCorrsFromStatic[y][x] > maxFromOld ? maxFromOld : movObjCorrsFromStatic[y][x];
}
movObjCorrsFromStaticInv[y][x] =
nrCorrsRegs[actFrameCnt].at<int32_t>(y, x) - movObjCorrsFromStatic[y][x];
absNrCorrsFromStatic += movObjCorrsFromStatic[y][x];
statCorrsPRegNew.at<int32_t>(y, x) = movObjCorrsFromStaticInv[y][x];
}
}
}
//Check if there are too many correspondences on the moving objects
int32_t maxCorrs = 0;
int32_t areassum = 0;
if (nr_movObj > 0) {
for (auto& i : actArea) {
areassum += i;
}
CV_Assert(areassum > 0);
//reduce the initial area by reducing the radius of a circle with corresponding area by 1: are_new = area - 2*sqrt(pi)*sqrt(area)+pi
// maxCorrs = max((int32_t)(((double)(areassum)-3.545 * sqrt((double)(areassum)+3.15)) / (1.5 * pars.minKeypDist * pars.minKeypDist)), 1);
//reduce the initial area by reducing the radius of a circle with corresponding area by reduceRadius
double reduceRadius = pars.minKeypDist < 3.0 ? 3.0 : pars.minKeypDist;
double tmp = max(sqrt((double) (areassum) / M_PI) - reduceRadius, pars.minKeypDist);
maxCorrs = max((int32_t) ((tmp * tmp * M_PI) / (enlargeKPDist * avgMaskingArea)), 1);
if ((actCorrsOnMovObj - corrsOnMovObjLF) > maxCorrs) {
actCorrsOnMovObj = maxCorrs + corrsOnMovObjLF;
}
//Check, if the areas of moving objects are valid
if(verbose & PRINT_WARNING_MESSAGES) {
int32_t initAsum = 0;
for (auto& i : areas) {
initAsum += i;
}
if (initAsum != areassum) {
double areaChange = (double) areassum / (double) initAsum;
if ((areaChange < 0.90) || (areaChange > 1.10)) {
cout << "Areas of moving objects are more than 5% different compared to given values." << endl;
for (size_t i = 0; i < areas.size(); i++) {
areaChange = (double) actArea[i] / (double) areas[i];
if (!nearZero(areaChange - 1.0)) {
cout << "Area " << i << " with seed position (x, y): (" << seeds[i].x << ", " << seeds[i].y
<<
") differs by " << 100.0 * (areaChange - 1.0) << "% or " << actArea[i] - areas[i]
<< " pixels."
<< endl;
}
}
}
}
}
}
if (nr_movObj > 0) {
double areaFracStaticCorrs = (double) absNrCorrsFromStatic /
(double) nrCorrs[actFrameCnt];//Fraction of correspondences which the moving objects should take because of their area
//double r_CorrMovObjPort = round(pars.CorrMovObjPort * 100.0) / 100.0;//Fraction of correspondences the user specified for the moving objects
double r_areaFracStaticCorrs = round(areaFracStaticCorrs * 100.0) / 100.0;
double r_effectiveFracMovObj = round((double) actCorrsOnMovObj / (double) nrCorrs[actFrameCnt] * 100.0) /
100.0;//Effective not changable fraction of correspondences on moving objects
if (r_effectiveFracMovObj >
r_areaFracStaticCorrs)//Remove additional static correspondences and add them to the moving objects
{
int32_t remStat = actCorrsOnMovObj - absNrCorrsFromStatic;
distributeStatObjCorrsOnMovObj(remStat,
absNrCorrsFromStatic,
movObjCorrsFromStaticInv,
statCorrsPRegNew);
} else if (r_effectiveFracMovObj <
r_areaFracStaticCorrs)//Distribute a part of the correspondences from moving objects over the static elements not covered by moving objects
{
int32_t remMov = absNrCorrsFromStatic - actCorrsOnMovObj;
cv::Mat mask_tmp = (combMovObjLabels == 0) | (mask == 0);
distributeMovObjCorrsOnStatObj(remMov,
absNrCorrsFromStatic,
mask_tmp,
movObjCorrsFromStaticInv,
movObjOverlap,
statCorrsPRegNew);
}
}
//Set new number of static correspondences
adaptStatNrCorrsReg(statCorrsPRegNew);
//Check the number of overall correspondences
if(verbose & PRINT_WARNING_MESSAGES){
int nrCorrs1 = (int)sum(nrCorrsRegs[actFrameCnt])[0] + (int)actCorrsOnMovObj;
if(nrCorrs1 != (int)nrCorrs[actFrameCnt]){
cout << "Total number of correspondencs differs after partitioning them between moving and static objects by " <<
nrCorrs1 - (int)nrCorrs[actFrameCnt] << endl;
}
}
//Calculate number of correspondences on newly created moving objects
if (nr_movObj > 0) {
actCorrsOnMovObj -= corrsOnMovObjLF;
if(actCorrsOnMovObj == 0){
actTruePosOnMovObj = 0;
actTrueNegOnMovObj = 0;
actTPPerMovObj.clear();
actTNPerMovObj.clear();
movObjLabels.clear();
combMovObjLabels = cv::Mat::zeros(imgSize, CV_8UC1);
movObjLabelsROIs.clear();
nr_movObj = 0;
}
else {
actTruePosOnMovObj = (int32_t) round(inlRat[actFrameCnt] * (double) actCorrsOnMovObj);
actTrueNegOnMovObj = actCorrsOnMovObj - actTruePosOnMovObj;
actTPPerMovObj.resize(nr_movObj, 0);
actTNPerMovObj.resize(nr_movObj, 0);
}
if (nr_movObj > 1) {
//First sort the areas and begin with the smallest as rounding for the number of TP and TN for every area can lead to a larger rest of correspondences that must be taken from the last area. Thus, it should be the largest.
vector<pair<size_t, int32_t>> actAreaIdx(nr_movObj);
for (size_t i = 0; i < nr_movObj; i++) {
actAreaIdx[i] = make_pair(i, actArea[i]);
}
sort(actAreaIdx.begin(), actAreaIdx.end(),
[](pair<size_t, int32_t> first, pair<size_t, int32_t> second) {
return first.second < second.second;
});
int32_t sumTP = 0, sumTN = 0;
for (size_t i = 0; i < nr_movObj - 1; i++) {
auto actTP = (int32_t) round(
(double) actTruePosOnMovObj * (double) actAreaIdx[i].second / (double) areassum);
auto actTN = (int32_t) round(
(double) actTrueNegOnMovObj * (double) actAreaIdx[i].second / (double) areassum);
actTPPerMovObj[actAreaIdx[i].first] = actTP;
actTNPerMovObj[actAreaIdx[i].first] = actTN;
sumTP += actTP;
sumTN += actTN;
}
int32_t restTP = actTruePosOnMovObj - sumTP;
int32_t restTN = actTrueNegOnMovObj - sumTN;
bool isValid = true;
if (restTP <= 0) {
int idx = 0;
while ((restTP <= 0) && (idx < (int)nr_movObj - 1)) {
if (actTPPerMovObj[actAreaIdx[idx].first] > 1) {
actTPPerMovObj[actAreaIdx[idx].first]--;
restTP++;
} else {
idx++;
}
}
if (restTP <= 0) {
seeds.erase(seeds.begin() + actAreaIdx[nr_movObj - 1].first);
areas.erase(areas.begin() + actAreaIdx[nr_movObj - 1].first);
combMovObjLabels = cv::Mat::zeros(imgSize, CV_8UC1);
for (int j = 0; j < (int)nr_movObj - 1; ++j) {
unsigned char pixVal;
if(actAreaIdx[nr_movObj - 1].first > actAreaIdx[j].first){
pixVal = (unsigned char) (actAreaIdx[j].first + convhullPtsObj.size() + 1);
}
else{
pixVal = (unsigned char) (actAreaIdx[j].first + convhullPtsObj.size());
}
combMovObjLabels |= movObjLabels[actAreaIdx[j].first] * pixVal;
}
movObjLabels.erase(movObjLabels.begin() + actAreaIdx[nr_movObj - 1].first);
actTPPerMovObj.erase(actTPPerMovObj.begin() + actAreaIdx[nr_movObj - 1].first);
actTNPerMovObj.erase(actTNPerMovObj.begin() + actAreaIdx[nr_movObj - 1].first);
movObjLabelsROIs.erase(movObjLabelsROIs.begin() + actAreaIdx[nr_movObj - 1].first);
actArea.erase(actArea.begin() + actAreaIdx[nr_movObj - 1].first);
for (int j = 0; j < (int)nr_movObj - 1; ++j){
if(actAreaIdx[nr_movObj - 1].first < actAreaIdx[j].first){
actAreaIdx[j].first--;
}
}
nr_movObj--;
vector<size_t> delList;
for (size_t i = 0; i < nr_movObj; ++i) {
if(actTPPerMovObj[i] <= 0){
delList.push_back(i);
}else if((actTPPerMovObj[i] == 1) && ((restTP < 0))){
restTP++;
delList.push_back(i);
}
}
if(!delList.empty()){
combMovObjLabels = cv::Mat::zeros(imgSize, CV_8UC1);
unsigned char delCnt = 0;
for (unsigned char j = 0; j < (unsigned char)nr_movObj; ++j) {
if(j == (unsigned char)delList[delCnt]){
delCnt++;
continue;
}
unsigned char pixVal = j - delCnt + (unsigned char) (convhullPtsObj.size() + 1);
combMovObjLabels |= movObjLabels[actAreaIdx[j].first] * pixVal;
}
for(int i = (int)delList.size() - 1; i >= 0; i--){
seeds.erase(seeds.begin() + delList[i]);
areas.erase(areas.begin() + delList[i]);
movObjLabels.erase(movObjLabels.begin() + delList[i]);
actTPPerMovObj.erase(actTPPerMovObj.begin() + delList[i]);
actTNPerMovObj.erase(actTNPerMovObj.begin() + delList[i]);
movObjLabelsROIs.erase(movObjLabelsROIs.begin() + delList[i]);
actArea.erase(actArea.begin() + delList[i]);
nr_movObj--;
}
}
if(nr_movObj == 0){
actCorrsOnMovObj = 0;
actTruePosOnMovObj = 0;
actTrueNegOnMovObj = 0;
actTPPerMovObj.clear();
actTNPerMovObj.clear();
movObjLabels.clear();
combMovObjLabels = cv::Mat::zeros(imgSize, CV_8UC1);
movObjLabelsROIs.clear();
isValid = false;
}
else if(nr_movObj == 1){
actTPPerMovObj[0] = actTruePosOnMovObj;
actTNPerMovObj[0] = actTrueNegOnMovObj;
isValid = false;
}
else{
areassum = 0;
for (auto& i : actArea) {
areassum += i;
}
actAreaIdx = vector<pair<size_t, int32_t>>(nr_movObj);
for (size_t i = 0; i < nr_movObj; i++) {
actAreaIdx[i] = make_pair(i, actArea[i]);
}
sort(actAreaIdx.begin(), actAreaIdx.end(),
[](pair<size_t, int32_t> first, pair<size_t, int32_t> second) {
return first.second < second.second;
});
sumTN = 0;
for (size_t i = 0; i < nr_movObj - 1; i++) {
auto actTN = (int32_t) round(
(double) actTrueNegOnMovObj * (double) actAreaIdx[i].second / (double) areassum);
actTNPerMovObj[actAreaIdx[i].first] = actTN;
sumTN += actTN;
}
restTN = actTrueNegOnMovObj - sumTN;
}
} else {
actTPPerMovObj[actAreaIdx[nr_movObj - 1].first] = restTP;
}
} else {
actTPPerMovObj[actAreaIdx[nr_movObj - 1].first] = restTP;
}
if (isValid) {
if (restTN < 0) {
actTNPerMovObj[actAreaIdx[nr_movObj - 1].first] = 0;
int idx = 0;
while ((restTN < 0) && (idx < (int)nr_movObj - 1)) {
if (actTNPerMovObj[actAreaIdx[idx].first] > 0) {
actTNPerMovObj[actAreaIdx[idx].first]--;
restTN++;
} else {
idx++;
}
}
if (restTN < 0) {
throw SequenceException("Found a negative number of TN for a moving object!");
}
} else {
actTNPerMovObj[actAreaIdx[nr_movObj - 1].first] = restTN;
}
}
} else if (nr_movObj > 0){
actTPPerMovObj[0] = actTruePosOnMovObj;
actTNPerMovObj[0] = actTrueNegOnMovObj;
}
} else {
actCorrsOnMovObj = 0;
actTruePosOnMovObj = 0;
actTrueNegOnMovObj = 0;
actTPPerMovObj.clear();
actTNPerMovObj.clear();
movObjLabels.clear();
combMovObjLabels = cv::Mat::zeros(imgSize, CV_8UC1);
movObjLabelsROIs.clear();
}
//Combine existing and new labels of moving objects
combMovObjLabelsAll = combMovObjLabels | movObjMaskFromLast;
//Check the inlier ratio, TP, TN, and number of correspondences
if((nr_movObj > 0) && (verbose & PRINT_WARNING_MESSAGES)) {
int32_t sumTPMO = 0, sumTNMO = 0, sumCorrsMO = 0;
for (auto& i:actTPPerMovObj) {
sumTPMO += i;
}
for (auto& i:actTNPerMovObj) {
sumTNMO += i;
}
sumCorrsMO = sumTPMO + sumTNMO;
if (sumCorrsMO != actCorrsOnMovObj) {
cout << "Sum of number of correspondences on moving objects is different to given number." << endl;
if (sumTPMO != actTruePosOnMovObj) {
cout << "Sum of number of TP correspondences on moving objects is different to given number. Sum: " <<
sumTPMO << " Given: " << actTruePosOnMovObj << endl;
}
if (sumTNMO != actTrueNegOnMovObj) {
cout << "Sum of number of TN correspondences on moving objects is different to given number. Sum: " <<
sumTNMO << " Given: " << actTrueNegOnMovObj << endl;
}
}
double inlRatDiffMO = (double) sumTPMO / (double) sumCorrsMO - inlRat[actFrameCnt];
double testVal = min((double) sumCorrsMO / 100.0, 1.0) * inlRatDiffMO / 300.0;
if (!nearZero(testVal)) {
cout << "Inlier ratio of moving object correspondences differs from global inlier ratio (0 - 1.0) by "
<< inlRatDiffMO << endl;
}
//Check the overall inlier ratio
double tps = (double) sum(nrTruePosRegs[actFrameCnt])[0] + sumTPMO;
double nrCorrs1 = (double) sum(nrCorrsRegs[actFrameCnt])[0] + sumCorrsMO;
double inlRatDiffSR = tps / (nrCorrs1 + DBL_EPSILON) - inlRat[actFrameCnt];
testVal = min(nrCorrs1 / 100.0, 1.0) * inlRatDiffSR / 300.0;
if (!nearZero(testVal)) {
cout << "Inlier ratio of combined static and moving correspondences after changing it because of moving objects differs "
"from global inlier ratio (0 - 1.0) by "
<< inlRatDiffSR << ". THIS SHOULD NOT HAPPEN!" << endl;
}
}
}
void genStereoSequ::adaptStatNrCorrsReg(const cv::Mat &statCorrsPRegNew){
CV_Assert((statCorrsPRegNew.cols == 3) && (statCorrsPRegNew.rows == 3) && (statCorrsPRegNew.type() == CV_32SC1));
//Calculate the TN per region first as this is an indicator for regions where the camera views of the 2 stereo
// cameras are not intersecting
Mat TNdistr, TNdistr1;
nrTrueNegRegs[actFrameCnt].convertTo(TNdistr, CV_64FC1);
Mat areaFull = Mat::zeros(3, 3, CV_8UC1);
double sumTN = sum(TNdistr)[0];
if(!nearZero(sumTN)) {
TNdistr /= sumTN;
double allRegCorrsNew = (double) sum(statCorrsPRegNew)[0];
double nrTPallReg = allRegCorrsNew * inlRat[actFrameCnt];
double nrTNallReg = allRegCorrsNew - nrTPallReg;
TNdistr1 = TNdistr * nrTNallReg;
nrTrueNegRegs[actFrameCnt].release();
TNdistr1.convertTo(nrTrueNegRegs[actFrameCnt], CV_32SC1, 1.0, 0.5);//Corresponds to round
nrTrueNegRegs[actFrameCnt].copyTo(TNdistr1);
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x) > statCorrsPRegNew.at<int32_t>(y, x)) {
nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x) = statCorrsPRegNew.at<int32_t>(y, x);
areaFull.at<bool>(y, x) = true;
}
}
}
}
auto remStatrem = (int32_t)round(sum(TNdistr1)[0] - sum(nrTrueNegRegs[actFrameCnt])[0]);
if(remStatrem > 0) {
int32_t remStat = remStatrem;
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (!areaFull.at<bool>(y, x) && (remStatrem > 0)) {
auto val = (int32_t) round(
TNdistr.at<double>(y, x) * (double) remStat);
int32_t newval = nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x) + val;
if (newval > statCorrsPRegNew.at<int32_t>(y, x)) {
int32_t diff = newval - statCorrsPRegNew.at<int32_t>(y, x);
val -= diff;
newval -= diff;
remStatrem -= val;
if (remStatrem < 0) {
val += remStatrem;
newval = nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x) + val;
remStatrem = 0;
}
nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x) = newval;
} else {
remStatrem -= val;
if (remStatrem < 0) {
nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x) = newval + remStatrem;
remStatrem = 0;
} else {
nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x) = newval;
}
}
}
}
}
if (remStatrem > 0) {
vector<pair<int, int32_t>> availableCorrs(9);
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
const int idx = y * 3 + x;
availableCorrs[idx] = make_pair(idx, statCorrsPRegNew.at<int32_t>(y, x) -
nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x));
}
}
sort(availableCorrs.begin(), availableCorrs.end(),
[](pair<int, int32_t> first, pair<int, int32_t> second) {
return first.second > second.second;
});
int maxIt = remStatrem;
while ((remStatrem > 0) && (maxIt > 0)) {
for (int i = 0; i < 9; i++) {
if (availableCorrs[i].second > 0) {
int y = availableCorrs[i].first / 3;
int x = availableCorrs[i].first - y * 3;
nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x)++;
remStatrem--;
availableCorrs[i].second--;
if (remStatrem == 0) {
break;
}
}
}
maxIt--;
}
}
}
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if(verbose & PRINT_WARNING_MESSAGES) {
if ((nrCorrsRegs[actFrameCnt].at<int32_t>(y, x) == 0) && (statCorrsPRegNew.at<int32_t>(y, x) != 0)) {
cout << "Distributed correspondences on areas that should not hold correspondences!" << endl;
}
}
nrTruePosRegs[actFrameCnt].at<int32_t>(y, x) = statCorrsPRegNew.at<int32_t>(y, x) -
nrTrueNegRegs[actFrameCnt].at<int32_t>(y, x);
nrCorrsRegs[actFrameCnt].at<int32_t>(y, x) = statCorrsPRegNew.at<int32_t>(y, x);
}
}
//Check the inlier ratio
if(verbose & PRINT_WARNING_MESSAGES) {
double tps = (double) sum(nrTruePosRegs[actFrameCnt])[0];
double nrCorrs1 = (double) sum(nrCorrsRegs[actFrameCnt])[0];
double inlRatDiffSR = tps / (nrCorrs1 + DBL_EPSILON) - inlRat[actFrameCnt];
double testVal = min(nrCorrs1 / 100.0, 1.0) * inlRatDiffSR / 300.0;
if (!nearZero(testVal)) {
cout << "Inlier ratio of static correspondences after changing the number of correspondences per region"
" because of moving objects differs "
"from global inlier ratio (0 - 1.0) by "
<< inlRatDiffSR << endl;
}
}
}
void genStereoSequ::adaptNrStaticCorrsBasedOnMovCorrs(const cv::Mat &mask){
if(actCorrsOnMovObjFromLast <= 0){
return;
}
//Remove correspondences from backprojected moving objects if there are too many of them
auto maxNrOldMovCorrs = (int32_t) round(pars.CorrMovObjPort * (double) nrCorrs[actFrameCnt]);
if(actCorrsOnMovObjFromLast > maxNrOldMovCorrs){
int32_t remSize = actCorrsOnMovObjFromLast - maxNrOldMovCorrs;
//Remove them based on the ratio of the number of correspondences
adaptNrBPMovObjCorrs(remSize);
}
//Get overlap of regions and the portion of correspondences that is covered by the moving objects
vector<vector<double>> movObjOverlap(3, vector<double>(3, 0));
movObjHasArea = vector<vector<bool>>(3, vector<bool>(3, false));
vector<vector<int32_t>> movObjCorrsFromStatic(3, vector<int32_t>(3, 0));
vector<vector<int32_t>> movObjCorrsFromStaticInv(3, vector<int32_t>(3, 0));
int32_t absNrCorrsFromStatic = 0;
Mat statCorrsPRegNew = Mat::zeros(3, 3, CV_32SC1);
double oldMovObjAreaImgRat = (double) cv::countNonZero(mask) / (double) imgSize.area();
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
movObjOverlap[y][x] = (double) (cv::countNonZero(mask(regROIs[y][x]))) /
(double) (regROIs[y][x].area());
CV_Assert(movObjOverlap[y][x] >= 0);
if (movObjOverlap[y][x] > 0.9) {
movObjHasArea[y][x] = true;
movObjCorrsFromStatic[y][x] = nrCorrsRegs[actFrameCnt].at<int32_t>(y, x);
movObjCorrsFromStaticInv[y][x] = 0;
absNrCorrsFromStatic += movObjCorrsFromStatic[y][x];
} else if (nearZero(movObjOverlap[y][x])) {
statCorrsPRegNew.at<int32_t>(y, x) = nrCorrsRegs[actFrameCnt].at<int32_t>(y, x);
movObjCorrsFromStatic[y][x] = 0;
movObjCorrsFromStaticInv[y][x] = nrCorrsRegs[actFrameCnt].at<int32_t>(y, x);
} else {
movObjCorrsFromStatic[y][x] = (int32_t) round(
(double) nrCorrsRegs[actFrameCnt].at<int32_t>(y, x) * movObjOverlap[y][x]);
int32_t maxFromOld = actCorrsOnMovObjFromLast;
if(!nearZero(100.0 * oldMovObjAreaImgRat)) {
maxFromOld = (int32_t) round(
(double) actCorrsOnMovObjFromLast * movObjOverlap[y][x] / oldMovObjAreaImgRat);
}
movObjCorrsFromStatic[y][x] =
movObjCorrsFromStatic[y][x] > maxFromOld ? maxFromOld : movObjCorrsFromStatic[y][x];
movObjCorrsFromStaticInv[y][x] =
nrCorrsRegs[actFrameCnt].at<int32_t>(y, x) - movObjCorrsFromStatic[y][x];
absNrCorrsFromStatic += movObjCorrsFromStatic[y][x];
statCorrsPRegNew.at<int32_t>(y, x) = movObjCorrsFromStaticInv[y][x];
}
}
}
//Distribute the remaining # of correspondences on the regions
int corrDiff = absNrCorrsFromStatic - actCorrsOnMovObjFromLast;
if (corrDiff < 0)//Remove additional static correspondences and add them to the moving objects
{
int32_t remStat = actCorrsOnMovObjFromLast - absNrCorrsFromStatic;
distributeStatObjCorrsOnMovObj(remStat,
absNrCorrsFromStatic,
movObjCorrsFromStaticInv,
statCorrsPRegNew);
} else if (corrDiff > 0)//Distribute a part of the correspondences from moving objects over the static elements not covered by moving objects
{
int32_t remMov = absNrCorrsFromStatic - actCorrsOnMovObjFromLast;
cv::Mat mask_tmp = (mask == 0);
distributeMovObjCorrsOnStatObj(remMov,
absNrCorrsFromStatic,
mask_tmp,
movObjCorrsFromStaticInv,
movObjOverlap,
statCorrsPRegNew);
}
if(verbose & PRINT_WARNING_MESSAGES) {
int32_t nrCorrsDiff = (int32_t)sum(statCorrsPRegNew)[0] + actCorrsOnMovObjFromLast - (int32_t)nrCorrs[actFrameCnt];
if (nrCorrsDiff != 0) {
cout << "Number of total correspondences differs by " << nrCorrsDiff << endl;
}
}
//Set new number of static correspondences
adaptStatNrCorrsReg(statCorrsPRegNew);
actCorrsOnMovObj = 0;
actTruePosOnMovObj = 0;
actTrueNegOnMovObj = 0;
actTPPerMovObj.clear();
actTNPerMovObj.clear();
movObjLabels.clear();
combMovObjLabels = cv::Mat::zeros(imgSize, CV_8UC1);
movObjLabelsROIs.clear();
}
void genStereoSequ::distributeMovObjCorrsOnStatObj(int32_t remMov,
int32_t absNrCorrsFromStatic,
const cv::Mat &movObjMask,
std::vector<std::vector<int32_t>> movObjCorrsFromStaticInv,
std::vector<std::vector<double>> movObjOverlap,
cv::Mat &statCorrsPRegNew){
CV_Assert(remMov >= 0);
int32_t actStatCorrs = (int32_t)nrCorrs[actFrameCnt] - absNrCorrsFromStatic;
CV_Assert(actStatCorrs >= 0);
int32_t remMovrem = remMov;
vector<vector<int32_t>> cmaxreg(3, vector<int32_t>(3, 0));
cv::Mat fracUseableTPperRegion_tmp;
vector<double> combDepths(3);
combDepths[0] = actDepthMid;
combDepths[1] = actDepthFar + (maxFarDistMultiplier - 1.0) * actDepthFar / 2.0;
combDepths[2] = actDepthNear + (actDepthMid - actDepthNear) / 2.0;
getInterSecFracRegions(fracUseableTPperRegion_tmp,
actR,
actT,
combDepths,
movObjMask);
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (!movObjHasArea[y][x] && (remMovrem > 0)) {
int32_t val = movObjCorrsFromStaticInv[y][x];
if(actStatCorrs != 0) {
val = (int32_t) round(
(double) movObjCorrsFromStaticInv[y][x] / (double) actStatCorrs * (double) remMov);
}
int32_t newval = movObjCorrsFromStaticInv[y][x] + val;
//Get the maximum # of correspondences per area using the minimum distance between keypoints
if (nearZero(actFracUseableTPperRegion.at<double>(y, x))) {
cmaxreg[y][x] = nrCorrsRegs[actFrameCnt].at<int32_t>(y, x);
} else if (!nearZero(actFracUseableTPperRegion.at<double>(y, x) - 1.0)) {
cmaxreg[y][x] = (int32_t) (
(double) ((regROIs[y][x].width - 1) * (regROIs[y][x].height - 1)) *
(1.0 - movObjOverlap[y][x]) * fracUseableTPperRegion_tmp.at<double>(y, x) /
(enlargeKPDist * avgMaskingArea));
} else {
cmaxreg[y][x] = (int32_t) (
(double) ((regROIs[y][x].width - 1) * (regROIs[y][x].height - 1)) *
(1.0 - movObjOverlap[y][x]) /
(enlargeKPDist * avgMaskingArea));
}
if (newval <= cmaxreg[y][x]) {
remMovrem -= val;
if (remMovrem < 0) {
val += remMovrem;
newval = movObjCorrsFromStaticInv[y][x] + val;
remMovrem = 0;
}
statCorrsPRegNew.at<int32_t>(y, x) = newval;
cmaxreg[y][x] -= newval;
} else {
if (movObjCorrsFromStaticInv[y][x] < cmaxreg[y][x]) {
statCorrsPRegNew.at<int32_t>(y, x) = cmaxreg[y][x];
remMovrem -= cmaxreg[y][x] - movObjCorrsFromStaticInv[y][x];
if (remMovrem < 0) {
statCorrsPRegNew.at<int32_t>(y, x) += remMovrem;
remMovrem = 0;
}
cmaxreg[y][x] -= statCorrsPRegNew.at<int32_t>(y, x);
} else {
cmaxreg[y][x] = 0;
}
}
}
}
}
if (remMovrem > 0) {
vector<pair<int, int32_t>> movObjCorrsFromStaticInv_tmp(9);
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
const int idx = y * 3 + x;
movObjCorrsFromStaticInv_tmp[idx] = make_pair(idx, cmaxreg[y][x]);
}
}
sort(movObjCorrsFromStaticInv_tmp.begin(), movObjCorrsFromStaticInv_tmp.end(),
[](pair<int, int32_t> first, pair<int, int32_t> second) {
return first.second > second.second;
});
int maxIt = remMovrem;
while ((remMovrem > 0) && (maxIt > 0)) {
for (int i = 0; i < 9; i++) {
int y = movObjCorrsFromStaticInv_tmp[i].first / 3;
int x = movObjCorrsFromStaticInv_tmp[i].first - y * 3;
if ((movObjCorrsFromStaticInv_tmp[i].second > 0) && (statCorrsPRegNew.at<int32_t>(y, x) > 0)) {
statCorrsPRegNew.at<int32_t>(y, x)++;
remMovrem--;
movObjCorrsFromStaticInv_tmp[i].second--;
if (remMovrem == 0) {
break;
}
}
}
maxIt--;
}
}
}
void genStereoSequ::distributeStatObjCorrsOnMovObj(int32_t remStat,
int32_t absNrCorrsFromStatic,
std::vector<std::vector<int32_t>> movObjCorrsFromStaticInv,
cv::Mat &statCorrsPRegNew){
CV_Assert(remStat >= 0);
int32_t actStatCorrs = (int32_t)nrCorrs[actFrameCnt] - absNrCorrsFromStatic;
CV_Assert(actStatCorrs >= 0);
if(actStatCorrs == 0)
return;
int32_t remStatrem = remStat;
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (!movObjHasArea[y][x] && (remStatrem > 0)) {
auto val = (int32_t) round(
(double) movObjCorrsFromStaticInv[y][x] / (double) actStatCorrs * (double) remStat);
int32_t newval = movObjCorrsFromStaticInv[y][x] - val;
if (newval > 0) {
remStatrem -= val;
if (remStatrem < 0) {
val += remStatrem;
newval = movObjCorrsFromStaticInv[y][x] - val;
remStatrem = 0;
}
statCorrsPRegNew.at<int32_t>(y, x) = newval;
} else {
remStatrem -= val + newval;
if (remStatrem < 0) {
statCorrsPRegNew.at<int32_t>(y, x) = -remStatrem;
remStatrem = 0;
} else {
statCorrsPRegNew.at<int32_t>(y, x) = 0;
}
}
}
}
}
if (remStatrem > 0) {
vector<pair<int, int32_t>> movObjCorrsFromStaticInv_tmp(9);
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
const int idx = y * 3 + x;
movObjCorrsFromStaticInv_tmp[idx] = make_pair(idx, statCorrsPRegNew.at<int32_t>(y, x));
}
}
sort(movObjCorrsFromStaticInv_tmp.begin(), movObjCorrsFromStaticInv_tmp.end(),
[](pair<int, int32_t> first, pair<int, int32_t> second) {
return first.second > second.second;
});
int maxIt = remStatrem;
while ((remStatrem > 0) && (maxIt > 0)) {
for (int i = 0; i < 9; i++) {
if (movObjCorrsFromStaticInv_tmp[i].second > 0) {
int y = movObjCorrsFromStaticInv_tmp[i].first / 3;
int x = movObjCorrsFromStaticInv_tmp[i].first - y * 3;
statCorrsPRegNew.at<int32_t>(y, x)--;
remStatrem--;
movObjCorrsFromStaticInv_tmp[i].second--;
if (remStatrem == 0) {
break;
}
}
}
maxIt--;
}
}
}
//Assign a depth category to each new object label and calculate all depth values for each label
void genStereoSequ::genNewDepthMovObj() {
if (pars.nrMovObjs == 0)
return;
if(movObjLabels.empty()) {
movObjDepthClassNew.clear();
return;
}
//Get the depth classes that should be used for the new generated moving objects
if (pars.movObjDepth.empty()) {
pars.movObjDepth.push_back(depthClass::MID);
}
if (pars.movObjDepth.size() == pars.nrMovObjs)//Take for every moving object its corresponding depth
{
if (!movObjDepthClass.empty() && (movObjDepthClass.size() < pars.nrMovObjs)) {
vector<bool> usedDepths(pars.movObjDepth.size(), false);
for (size_t i = 0; i < pars.movObjDepth.size(); i++) {
for (size_t j = 0; j < movObjDepthClass.size(); j++) {
if ((pars.movObjDepth[i] == movObjDepthClass[j]) && !usedDepths[i]) {
usedDepths[i] = true;
break;
}
}
}
movObjDepthClassNew.resize(movObjLabels.size());
for (size_t i = 0; i < movObjLabels.size(); i++) {
for (size_t j = 0; j < usedDepths.size(); j++) {
if (!usedDepths[j]) {
usedDepths[j] = true;
movObjDepthClassNew[i] = pars.movObjDepth[j];
break;
}
}
}
} else if (movObjDepthClass.empty()) {
movObjDepthClassNew.clear();
//copy(pars.movObjDepth.begin(), pars.movObjDepth.begin() + movObjLabels.size(), movObjDepthClassNew.begin());
movObjDepthClassNew.insert(movObjDepthClassNew.end(), pars.movObjDepth.begin(),
pars.movObjDepth.begin() + movObjLabels.size());
} else {
cout << "No new moving objects! This should not happen!" << endl;
return;
}
} else if ((pars.movObjDepth.size() == 1))//Always take this depth class
{
movObjDepthClassNew.clear();
movObjDepthClassNew.resize(movObjLabels.size(), pars.movObjDepth[0]);
} else if ((pars.movObjDepth.size() < pars.nrMovObjs) && (pars.movObjDepth.size() > 1) &&
(pars.movObjDepth.size() < 4))//Randomly choose a depth for every single object
{
movObjDepthClassNew.clear();
movObjDepthClassNew.resize(movObjLabels.size());
for (size_t i = 0; i < movObjLabels.size(); i++) {
int rval = (int)(rand2() % pars.movObjDepth.size());
movObjDepthClassNew[i] = pars.movObjDepth[rval];
}
} else//Use the given distribution of depth classes based on the number of given depth classes
{
movObjDepthClassNew.clear();
movObjDepthClassNew.resize(movObjLabels.size());
std::array<double, 3> depthDist = {{0, 0, 0}};
for (auto& i : pars.movObjDepth) {
switch (i) {
case depthClass::NEAR:
depthDist[0]++;
break;
case depthClass::MID:
depthDist[1]++;
break;
case depthClass::FAR:
depthDist[2]++;
break;
default:
break;
}
}
std::discrete_distribution<int> distribution(depthDist.begin(), depthDist.end());
for (size_t i = 0; i < movObjLabels.size(); i++) {
int usedDepthClass = distribution(rand_gen);
switch (usedDepthClass) {
case 0:
movObjDepthClassNew[i] = depthClass::NEAR;
break;
case 1:
movObjDepthClassNew[i] = depthClass::MID;
break;
case 2:
movObjDepthClassNew[i] = depthClass::FAR;
break;
default:
break;
}
}
}
//Get random parameters for the depth function
std::vector<std::vector<double>> depthFuncPars;
getRandDepthFuncPars(depthFuncPars, movObjDepthClassNew.size());
//Get depth values for every pixel position inside the new labels
combMovObjDepths = Mat::zeros(imgSize, CV_64FC1);
for (size_t i = 0; i < movObjDepthClassNew.size(); i++) {
double dmin = actDepthNear, dmax = actDepthMid;
switch (movObjDepthClassNew[i]) {
case depthClass::NEAR:
dmin = actDepthNear;
dmax = actDepthMid;
break;
case depthClass::MID:
dmin = actDepthMid;
dmax = actDepthFar;
break;
case depthClass::FAR:
dmin = actDepthFar;
dmax = maxFarDistMultiplier * actDepthFar;
break;
default:
break;
}
double dmin_tmp = getRandDoubleValRng(dmin, dmin + 0.6 * (dmax - dmin));
double dmax_tmp = getRandDoubleValRng(dmin_tmp + 0.1 * (dmax - dmin), dmax);
double drange = dmax_tmp - dmin_tmp;
double rXr = getRandDoubleValRng(1.5, 3.0);
double rYr = getRandDoubleValRng(1.5, 3.0);
auto h2 = (double) movObjLabelsROIs[i].height;
h2 *= h2;
auto w2 = (double) movObjLabelsROIs[i].width;
w2 *= w2;
double scale = sqrt(h2 + w2) / 2.0;
double rXrSc = rXr / scale;
double rYrSc = rYr / scale;
double cx = (double) movObjLabelsROIs[i].width / 2.0;
double cy = (double) movObjLabelsROIs[i].height / 2.0;
double minVal = DBL_MAX, maxVal = -DBL_MAX;
Mat objArea = movObjLabels[i](movObjLabelsROIs[i]);
Mat objAreaDepths = combMovObjDepths(movObjLabelsROIs[i]);
for (int y = 0; y < movObjLabelsROIs[i].height; y++) {
for (int x = 0; x < movObjLabelsROIs[i].width; x++) {
if (objArea.at<unsigned char>(y, x) != 0) {
double val = getDepthFuncVal(depthFuncPars[i], ((double) x - cx) * rXrSc,
((double) y - cy) * rYrSc);
objAreaDepths.at<double>(y, x) = val;
if (val > maxVal)
maxVal = val;
if (val < minVal)
minVal = val;
}
}
}
double ra = maxVal - minVal;
if(nearZero(ra))
ra = 1.0;
scale = drange / ra;
for (int y = 0; y < movObjLabelsROIs[i].height; y++) {
for (int x = 0; x < movObjLabelsROIs[i].width; x++) {
if (objArea.at<unsigned char>(y, x) != 0) {
double val = objAreaDepths.at<double>(y, x);
val -= minVal;
val *= scale;
val += dmin_tmp;
objAreaDepths.at<double>(y, x) = val;
}
}
}
}
//Visualize the depth values
if (verbose & SHOW_MOV_OBJ_DISTANCES) {
Mat normalizedDepth, labelMask = cv::Mat::zeros(imgSize, CV_8UC1);
for (auto& dl : movObjLabels) {
labelMask |= (dl != 0);
}
cv::normalize(combMovObjDepths, normalizedDepth, 0.1, 1.0, cv::NORM_MINMAX, -1, labelMask);
if(!writeIntermediateImg(normalizedDepth, "normalized_moving_obj_depth")){
namedWindow("Normalized Moving Obj Depth", WINDOW_AUTOSIZE);
imshow("Normalized Moving Obj Depth", normalizedDepth);
waitKey(0);
destroyWindow("Normalized Moving Obj Depth");
}
}
}
void genStereoSequ::clearNewMovObjVars() {
movObjCorrsImg1TP.clear();
movObjCorrsImg2TP.clear();
movObjCorrsImg1TN.clear();
movObjCorrsImg2TN.clear();
movObj3DPtsCamNew.clear();
movObjDistTNtoRealNew.clear();
actCorrsOnMovObj_IdxWorld.clear();
}
//Generate correspondences and TN for newly generated moving objects
void genStereoSequ::getMovObjCorrs() {
size_t nr_movObj = actTPPerMovObj.size();
if(nr_movObj == 0) {
clearNewMovObjVars();
movObjMaskFromLast2.copyTo(movObjMask2All);
return;
}
int32_t kSi = csurr.rows;
int32_t posadd = (kSi - 1) / 2;
Point_<int32_t> pt;
Point2d pt2;
Point3d pCam;
Mat corrsSet = Mat::zeros(imgSize.height + kSi - 1, imgSize.width + kSi - 1, CV_8UC1);
cv::copyMakeBorder(movObjMaskFromLast2, movObjMask2All, posadd, posadd, posadd, posadd, BORDER_CONSTANT,
Scalar(0));//movObjMask2All must be reduced to image size at the end
//int maxIt = 20;
//For visualization
int dispit = 0;
const int dispit_interval = 50;
//Generate TP correspondences
clearNewMovObjVars();
movObjCorrsImg1TP.resize(nr_movObj);
movObjCorrsImg2TP.resize(nr_movObj);
movObjCorrsImg1TN.resize(nr_movObj);
movObjCorrsImg2TN.resize(nr_movObj);
movObj3DPtsCamNew.resize(nr_movObj);
movObjDistTNtoRealNew.resize(nr_movObj);
actCorrsOnMovObj_IdxWorld.resize(nr_movObj);
for (int i = 0; i < (int)nr_movObj; i++) {
std::uniform_int_distribution<int32_t> distributionX(movObjLabelsROIs[i].x,
movObjLabelsROIs[i].x + movObjLabelsROIs[i].width - 1);
std::uniform_int_distribution<int32_t> distributionY(movObjLabelsROIs[i].y,
movObjLabelsROIs[i].y + movObjLabelsROIs[i].height -
1);
//int cnt1 = 0;
int nrTN = actTNPerMovObj[i];
int nrTP = actTPPerMovObj[i];
vector<Point2d> x1TN, x2TN;
vector<Point2d> x1TP, x2TP;
int32_t maxSelect = 50;
int32_t maxSelect2 = 50;
int32_t maxSelect3 = 50;
while ((nrTP > 0) && (maxSelect > 0) && (maxSelect2 > 0) && (maxSelect3 > 0)) {
pt.x = distributionX(rand_gen);
pt.y = distributionY(rand_gen);
Mat s_tmp = corrsSet(Rect(pt, Size(kSi, kSi)));
if ((movObjLabels[i].at<unsigned char>(pt) == 0) || (s_tmp.at<unsigned char>(posadd, posadd) > 0)) {
maxSelect--;
continue;
}
maxSelect = 50;
//Check if it is also an inlier in the right image
bool isInl = checkLKPInlier(pt, pt2, pCam, combMovObjDepths);
if (isInl) {
Mat s_tmp1 = movObjMask2All(Rect((int) round(pt2.x), (int) round(pt2.y), kSi, kSi));
if (s_tmp1.at<unsigned char>(posadd, posadd) > 0) {
maxSelect2--;
continue;
}
s_tmp1.at<unsigned char>(posadd,
posadd) = 1;//The minimum distance between keypoints in the second image is fixed to approx. 1 for new correspondences
maxSelect2 = 50;
}
s_tmp += csurr;
if (!isInl) {
if (nrTN > 0) {
x1TN.emplace_back(Point2d((double) pt.x, (double) pt.y));
nrTN--;
} else {
maxSelect3--;
s_tmp -= csurr;
}
continue;
}
maxSelect3 = 50;
nrTP--;
x1TP.emplace_back(Point2d((double) pt.x, (double) pt.y));
x2TP.push_back(pt2);
movObj3DPtsCamNew[i].push_back(pCam);
//Visualize the masks
if (verbose & SHOW_MOV_OBJ_CORRS_GEN) {
if (dispit % dispit_interval == 0) {
if(!writeIntermediateImg((corrsSet > 0), "moving_obj_corrs_mask_img1_step_" + std::to_string(dispit)) ||
!writeIntermediateImg((movObjMask2All > 0), "moving_obj_corrs_mask_img2_step_" + std::to_string(dispit))){
namedWindow("Move Corrs mask img1", WINDOW_AUTOSIZE);
imshow("Move Corrs mask img1", (corrsSet > 0));
namedWindow("Move Corrs mask img2", WINDOW_AUTOSIZE);
imshow("Move Corrs mask img2", (movObjMask2All > 0));
waitKey(0);
destroyWindow("Move Corrs mask img1");
destroyWindow("Move Corrs mask img2");
}
}
dispit++;
}
}
actCorrsOnMovObj_IdxWorld[i].resize(movObj3DPtsCamNew[i].size());
std::iota(actCorrsOnMovObj_IdxWorld[i].begin(), actCorrsOnMovObj_IdxWorld[i].end(), 0);
//If there are still correspondences missing, try to use them in the next object
/*if ((nrTP > 0) && (i < (nr_movObj - 1)))
{
actTPPerMovObj[i + 1] += nrTP;
}*/
std::uniform_int_distribution<int32_t> distributionX2(0, imgSize.width - 1);
std::uniform_int_distribution<int32_t> distributionY2(0, imgSize.height - 1);
//Find TN keypoints in the second image for already found TN in the first image
size_t corrsNotVisible = x1TN.size();
if (!x1TN.empty()) {
//Generate mask for visualization before adding keypoints
Mat dispMask;
if (verbose & SHOW_MOV_OBJ_CORRS_GEN) {
dispMask = (movObjMask2All > 0);
}
for (size_t j = 0; j < corrsNotVisible; j++) {
int max_try = 10;
while (max_try > 0) {
pt.x = distributionX2(rand_gen);
pt.y = distributionY2(rand_gen);
Mat s_tmp = movObjMask2All(Rect(pt, Size(kSi, kSi)));
if (s_tmp.at<unsigned char>(posadd, posadd) > 0) {
max_try--;
continue;
}
//csurr.copyTo(s_tmp);
s_tmp.at<unsigned char>(posadd, posadd) = 1;
x2TN.emplace_back(Point2d((double) pt.x, (double) pt.y));
movObjDistTNtoRealNew[i].push_back(fakeDistTNCorrespondences);
break;
}
}
while (x1TN.size() > x2TN.size()) {
Mat s_tmp = corrsSet(Rect(Point_<int32_t>((int32_t) round(x1TN.back().x),
(int32_t) round(x1TN.back().y)), Size(kSi, kSi)));
s_tmp -= csurr;
x1TN.pop_back();
nrTN++;
}
//Visualize the mask afterwards
if (verbose & SHOW_MOV_OBJ_CORRS_GEN) {
Mat dispMask2 = (movObjMask2All > 0);
vector<Mat> channels;
Mat b = Mat::zeros(dispMask2.size(), CV_8UC1);
channels.push_back(b);
channels.push_back(dispMask);
channels.push_back(dispMask2);
Mat img3c;
merge(channels, img3c);
if(!writeIntermediateImg(img3c, "moving_obj_TN_corrs_mask_img2")){
namedWindow("Move TN Corrs mask img2", WINDOW_AUTOSIZE);
imshow("Move TN Corrs mask img2", img3c);
waitKey(0);
destroyWindow("Move TN Corrs mask img2");
}
}
}
//Get the rest of TN correspondences
if (nrTN > 0) {
std::vector<Point2d> x1TN_tmp, x2TN_tmp;
std::vector<double> x2TNdistCorr_tmp;
Mat maskImg1;
copyMakeBorder(movObjLabels[i], maskImg1, posadd, posadd, posadd, posadd, BORDER_CONSTANT, Scalar(0));
maskImg1 = (maskImg1 == 0) | corrsSet;
//Generate mask for visualization before adding keypoints
Mat dispMaskImg2;
Mat dispMaskImg1;
if (verbose & SHOW_MOV_OBJ_CORRS_GEN) {
dispMaskImg2 = (movObjMask2All > 0);
dispMaskImg1 = (maskImg1 > 0);
}
//Generate a depth map for generating TN based on the depth of the actual moving object
double dmin = actDepthNear, dmax = actDepthMid;
switch (movObjDepthClassNew[i]) {
case depthClass::NEAR:
dmin = actDepthNear;
dmax = actDepthMid;
break;
case depthClass::MID:
dmin = actDepthMid;
dmax = actDepthFar;
break;
case depthClass::FAR:
dmin = actDepthFar;
dmax = maxFarDistMultiplier * actDepthFar;
break;
default:
break;
}
Mat randDepth(imgSize, CV_64FC1);
randu(randDepth, Scalar(dmin), Scalar(dmax + 0.001));
nrTN = genTrueNegCorrs(nrTN, distributionX, distributionY, distributionX2, distributionY2, x1TN_tmp,
x2TN_tmp, x2TNdistCorr_tmp, maskImg1, movObjMask2All,
randDepth);//, movObjLabels[i]);
//Visualize the mask afterwards
if (verbose & SHOW_MOV_OBJ_CORRS_GEN) {
Mat dispMask2Img2 = (movObjMask2All > 0);
Mat dispMask2Img1 = (maskImg1 > 0);
vector<Mat> channels, channels1;
Mat b = Mat::zeros(dispMask2Img2.size(), CV_8UC1);
channels.push_back(b);
channels.push_back(dispMaskImg2);
channels.push_back(dispMask2Img2);
channels1.push_back(b);
channels1.push_back(dispMaskImg1);
channels1.push_back(dispMask2Img1);
Mat img3c, img3c1;
merge(channels, img3c);
merge(channels1, img3c1);
if(!writeIntermediateImg(img3c1, "moving_obj_rand_TN_corrs_mask_img1") ||
!writeIntermediateImg(img3c, "moving_obj_rand_TN_corrs_mask_img2")){
namedWindow("Move rand TN Corrs mask img1", WINDOW_AUTOSIZE);
imshow("Move rand TN Corrs mask img1", img3c1);
namedWindow("Move rand TN Corrs mask img2", WINDOW_AUTOSIZE);
imshow("Move rand TN Corrs mask img2", img3c);
waitKey(0);
destroyWindow("Move rand TN Corrs mask img1");
destroyWindow("Move rand TN Corrs mask img2");
}
}
if (!x1TN_tmp.empty()) {
corrsSet(Rect(Point(posadd, posadd), imgSize)) |= (maskImg1(Rect(Point(posadd, posadd), imgSize)) &
(movObjLabels[i] > 0));
//copy(x1TN_tmp.begin(), x1TN_tmp.end(), x1TN.end());
x1TN.insert(x1TN.end(), x1TN_tmp.begin(), x1TN_tmp.end());
//copy(x2TN_tmp.begin(), x2TN_tmp.end(), x2TN.end());
x2TN.insert(x2TN.end(), x2TN_tmp.begin(), x2TN_tmp.end());
//copy(x2TNdistCorr_tmp.begin(), x2TNdistCorr_tmp.end(), movObjDistTNtoRealNew[i].end());
movObjDistTNtoRealNew[i].insert(movObjDistTNtoRealNew[i].end(), x2TNdistCorr_tmp.begin(),
x2TNdistCorr_tmp.end());
}
}
//Adapt the number of TP and TN in the next objects based on the remaining number of TP and TN of the current object
adaptNRCorrespondences(nrTP, nrTN, corrsNotVisible, x1TP.size(), i, (int32_t)nr_movObj);
//Store correspondences
if (!x1TP.empty()) {
movObjCorrsImg1TP[i] = Mat::ones(3, (int) x1TP.size(), CV_64FC1);
movObjCorrsImg2TP[i] = Mat::ones(3, (int) x1TP.size(), CV_64FC1);
movObjCorrsImg1TP[i].rowRange(0, 2) = Mat(x1TP).reshape(1).t();
movObjCorrsImg2TP[i].rowRange(0, 2) = Mat(x2TP).reshape(1).t();
}
if (!x1TN.empty()) {
movObjCorrsImg1TN[i] = Mat::ones(3, (int) x1TN.size(), CV_64FC1);
movObjCorrsImg1TN[i].rowRange(0, 2) = Mat(x1TN).reshape(1).t();
}
if (!x2TN.empty()) {
movObjCorrsImg2TN[i] = Mat::ones(3, (int) x2TN.size(), CV_64FC1);
movObjCorrsImg2TN[i].rowRange(0, 2) = Mat(x2TN).reshape(1).t();
}
}
movObjMask2All = movObjMask2All(Rect(Point(posadd, posadd), imgSize));
//Check number of TP and TN per moving object and the overall inlier ratio
if(verbose & PRINT_WARNING_MESSAGES) {
int nrCorrsMO = 0, nrTPMO = 0, nrTNMO = 0;
vector<int> nrTPperMO(nr_movObj, 0), nrTNperMO(nr_movObj, 0);
for (size_t k = 0; k < nr_movObj; ++k) {
nrTPperMO[k] = movObjCorrsImg1TP[k].cols;
nrTPMO += nrTPperMO[k];
nrTNperMO[k] = movObjCorrsImg1TN[k].cols;
nrTNMO += nrTNperMO[k];
}
nrCorrsMO = nrTPMO + nrTNMO;
if (nrCorrsMO != actCorrsOnMovObj) {
double chRate = (double) nrCorrsMO / (double) actCorrsOnMovObj;
if ((chRate < 0.90) || (chRate > 1.10)) {
cout << "Number of correspondences on moving objects is " << 100.0 * (chRate - 1.0)
<< "% different to given values!" << endl;
cout << "Actual #: " << nrCorrsMO << " Given #: " << actCorrsOnMovObj << endl;
for (size_t k = 0; k < nr_movObj; ++k) {
if ((int32_t) nrTPperMO[k] != actTPPerMovObj[k]) {
cout << "# of TP for moving object " << k << " at position (x, y): (" <<
movObjLabelsROIs[k].x + movObjLabelsROIs[k].width / 2 <<
", " << movObjLabelsROIs[k].y +
movObjLabelsROIs[k].height / 2
<< ") differs by "
<< (int32_t) nrTPperMO[k] - actTPPerMovObj[k]
<<
" correspondences (Actual #: " << nrTPperMO[k]
<< " Given #: " << actTPPerMovObj[k] << ")"
<< endl;
}
if ((int32_t) nrTNperMO[k] != actTNPerMovObj[k]) {
cout << "# of TN for moving object " << k << " at position (x, y): (" <<
movObjLabelsROIs[k].x + movObjLabelsROIs[k].width / 2 <<
", " << movObjLabelsROIs[k].y +
movObjLabelsROIs[k].height / 2
<< ") differs by "
<< (int32_t) nrTNperMO[k] - actTNPerMovObj[k]
<<
" correspondences (Actual #: " << nrTNperMO[k]
<< " Given #: " << actTNPerMovObj[k] << ")"
<< endl;
}
}
}
}
double inlRatDiffMO = (double) nrTPMO / (double) nrCorrsMO - inlRat[actFrameCnt];
double testVal = min((double)nrCorrsMO / 100.0, 1.0) * inlRatDiffMO / 300.0;
if (!nearZero(testVal)) {
cout << "Inlier ratio of moving object correspondences differs from global inlier ratio (0 - 1.0) by "
<< inlRatDiffMO << endl;
}
}
//Remove empty moving object point clouds
vector<int> delList;
for (int l = 0; l < (int)movObj3DPtsCamNew.size(); ++l) {
if(movObj3DPtsCamNew[l].empty()){
delList.push_back(l);
}
}
if(!delList.empty()){
for (int i = (int)delList.size() - 1; i >= 0; --i) {
movObj3DPtsCamNew.erase(movObj3DPtsCamNew.begin() + delList[i]);
movObjDepthClassNew.erase(movObjDepthClassNew.begin() + delList[i]);
actCorrsOnMovObj_IdxWorld.erase(actCorrsOnMovObj_IdxWorld.begin() + delList[i]);
}
}
/*for (auto itr = movObj3DPtsCamNew.rbegin();
itr != movObj3DPtsCamNew.rend(); itr++) {
if (itr->empty()) {
movObj3DPtsCamNew.erase(std::next(itr).base());
}
}*/
}
//Generate (backproject) correspondences from existing moving objects and generate hulls of the objects in the image
//Moreover, as many true negatives as needed by the inlier ratio are generated
void genStereoSequ::backProjectMovObj() {
convhullPtsObj.clear();
actTNPerMovObjFromLast.clear();
movObjLabelsFromLast.clear();
movObjCorrsImg1TPFromLast.clear();
movObjCorrsImg2TPFromLast.clear();
movObjCorrsImg12TPFromLast_Idx.clear();
movObjCorrsImg1TNFromLast.clear();
movObjCorrsImg2TNFromLast.clear();
movObjDistTNtoReal.clear();
movObjMaskFromLast = Mat::zeros(imgSize, CV_8UC1);
movObjMaskFromLast2 = Mat::zeros(imgSize, CV_8UC1);
actCorrsOnMovObjFromLast = 0;
actTruePosOnMovObjFromLast = 0;
actTrueNegOnMovObjFromLast = 0;
if (pars.nrMovObjs == 0) {
return;
}
vector<size_t> delList;
size_t actNrMovObj = movObj3DPtsCam.size();
if (movObj3DPtsCam.empty())
return;
movObjCorrsImg1TPFromLast.resize(actNrMovObj);
movObjCorrsImg2TPFromLast.resize(actNrMovObj);
movObjCorrsImg12TPFromLast_Idx.resize(actNrMovObj);
movObjCorrsImg1TNFromLast.resize(actNrMovObj);
movObjCorrsImg2TNFromLast.resize(actNrMovObj);
struct imgWH {
double width;
double height;
double maxDist;
} dimgWH = {0, 0, 0};
dimgWH.width = (double) (imgSize.width - 1);
dimgWH.height = (double) (imgSize.height - 1);
dimgWH.maxDist = maxFarDistMultiplier * actDepthFar;
int sqrSi = csurr.rows;
int posadd = (sqrSi - 1) / 2;
std::vector<Mat> movObjMaskFromLastLarge(actNrMovObj);
std::vector<Mat> movObjMaskFromLastLarge2(actNrMovObj);
movObjMaskFromLastLargeAdd = std::vector<Mat>(actNrMovObj);
std::vector<std::vector<cv::Point>> movObjPt1(actNrMovObj), movObjPt2(actNrMovObj);
//Get correspondences (TN + TP) of backprojected moving objects
for (size_t i = 0; i < actNrMovObj; i++) {
movObjMaskFromLastLarge[i] = Mat::zeros(imgSize.height + sqrSi - 1, imgSize.width + sqrSi - 1, CV_8UC1);
movObjMaskFromLastLarge2[i] = Mat::zeros(imgSize.height + sqrSi - 1, imgSize.width + sqrSi - 1, CV_8UC1);
size_t oor = 0;
size_t i2 = 0;
for (const auto& pt : movObj3DPtsCam[i]) {
i2++;
if ((pt.z < actDepthNear) ||
(pt.z > dimgWH.maxDist)) {
oor++;
continue;
}
Mat X = Mat(pt, false).reshape(1, 3);
Mat x1 = K1 * X;
if(nearZero(x1.at<double>(2))){
oor++;
continue;
}
x1 /= x1.at<double>(2);
bool outOfR[2] = {false, false};
if ((x1.at<double>(0) < 0) || (x1.at<double>(0) > dimgWH.width) ||
(x1.at<double>(1) < 0) || (x1.at<double>(1) > dimgWH.height))//Not visible in first image
{
outOfR[0] = true;
}
Mat x2 = K2 * (actR * X + actT);
if(nearZero(x2.at<double>(2))){
oor++;
continue;
}
x2 /= x2.at<double>(2);
if ((x2.at<double>(0) < 0) || (x2.at<double>(0) > dimgWH.width) ||
(x2.at<double>(1) < 0) || (x2.at<double>(1) > dimgWH.height))//Not visible in second image
{
outOfR[1] = true;
}
if (outOfR[0] || outOfR[1])
oor++;
Point ptr1 = Point((int) round(x1.at<double>(0)), (int) round(x1.at<double>(1)));
Point ptr2 = Point((int) round(x2.at<double>(0)), (int) round(x2.at<double>(1)));
//Check if the point is too near to an other correspondence of a moving object
if (!outOfR[0] && movObjMaskFromLast.at<unsigned char>(ptr1) > 0)
outOfR[0] = true;
//Check if the point is too near to an other correspondence of a moving object in the second image
if (!outOfR[1] && movObjMaskFromLast2.at<unsigned char>(ptr2) > 0)
outOfR[1] = true;
//Check if the point is too near to an other correspondence of this moving object
if (!outOfR[0]) {
Mat s_tmp = movObjMaskFromLastLarge[i](Rect(ptr1, Size(sqrSi, sqrSi)));
if (s_tmp.at<unsigned char>(posadd, posadd) > 0)
outOfR[0] = true;
else {
// csurr.copyTo(s_tmp);
s_tmp += csurr;
}
}
//Check if the point is too near to an other correspondence of this moving object in the second image
if (!outOfR[1]) {
Mat s_tmp = movObjMaskFromLastLarge2[i](Rect(ptr2, Size(sqrSi, sqrSi)));
if (s_tmp.at<unsigned char>(posadd, posadd) > 0)
outOfR[1] = true;
else {
// csurr.copyTo(s_tmp);
s_tmp.at<unsigned char>(posadd, posadd) = 1;
}
}
if (outOfR[0] && outOfR[1]) {
continue;
} else if (outOfR[0]) {
if (movObjCorrsImg2TNFromLast[i].empty()) {
movObjCorrsImg2TNFromLast[i] = x2.t();
} else {
movObjCorrsImg2TNFromLast[i].push_back(x2.t());
}
movObjPt2[i].push_back(ptr2);
} else if (outOfR[1]) {
if (movObjCorrsImg1TNFromLast[i].empty()) {
movObjCorrsImg1TNFromLast[i] = x1.t();
} else {
movObjCorrsImg1TNFromLast[i].push_back(x1.t());
}
movObjPt1[i].push_back(ptr1);
} else {
if (movObjCorrsImg1TPFromLast[i].empty()) {
movObjCorrsImg1TPFromLast[i] = x1.t();
movObjCorrsImg2TPFromLast[i] = x2.t();
} else {
movObjCorrsImg1TPFromLast[i].push_back(x1.t());
movObjCorrsImg2TPFromLast[i].push_back(x2.t());
}
movObjCorrsImg12TPFromLast_Idx[i].push_back(i2 - 1);
movObjPt1[i].push_back(ptr1);
movObjPt2[i].push_back(ptr2);
}
}
movObjMaskFromLastLarge[i].copyTo(movObjMaskFromLastLargeAdd[i]);
movObjMaskFromLastLarge[i] = (movObjMaskFromLastLarge[i] > 0) & Mat::ones(imgSize.height + sqrSi - 1,
imgSize.width + sqrSi - 1, CV_8UC1);
//Check if the portion of usable 3D points of this moving object is below a user specified threshold. If yes, delete it.
double actGoodPortion = 0;
if (!movObj3DPtsCam[i].empty()) {
actGoodPortion = (double) (movObj3DPtsCam[i].size() - oor) / (double) movObj3DPtsCam[i].size();
if(movObjCorrsImg12TPFromLast_Idx[i].empty()){
actGoodPortion = 0;
}
}
if ((actGoodPortion < pars.minMovObjCorrPortion) || nearZero(actGoodPortion)) {
delList.push_back(i);
} else {
if (!movObjCorrsImg1TNFromLast[i].empty())
movObjCorrsImg1TNFromLast[i] = movObjCorrsImg1TNFromLast[i].t();
if (!movObjCorrsImg2TNFromLast[i].empty())
movObjCorrsImg2TNFromLast[i] = movObjCorrsImg2TNFromLast[i].t();
if (!movObjCorrsImg1TPFromLast[i].empty()) {
movObjCorrsImg1TPFromLast[i] = movObjCorrsImg1TPFromLast[i].t();
movObjCorrsImg2TPFromLast[i] = movObjCorrsImg2TPFromLast[i].t();
}
Mat dispMask1, dispMask2;
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
dispMask1 = (movObjMaskFromLast > 0);
dispMask2 = (movObjMaskFromLast2 > 0);
}
movObjMaskFromLast |= movObjMaskFromLastLarge[i](Rect(Point(posadd, posadd), imgSize));
movObjMaskFromLast2 |= movObjMaskFromLastLarge2[i](Rect(Point(posadd, posadd), imgSize));
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
Mat dispMask12 = (movObjMaskFromLast > 0);
Mat dispMask22 = (movObjMaskFromLast2 > 0);
vector<Mat> channels;
Mat b = Mat::zeros(dispMask12.size(), CV_8UC1);
channels.push_back(b);
channels.push_back(dispMask1);
channels.push_back(dispMask12);
Mat img3c;
merge(channels, img3c);
if(!writeIntermediateImg(img3c, "backprojected_moving_obj_mask_of_TP_and_TN_corrs_img1")){
namedWindow("Backprojected moving objects mask of TP and TN image 1", WINDOW_AUTOSIZE);
imshow("Backprojected moving objects mask of TP and TN image 1", img3c);
}
channels.clear();
channels.push_back(b);
channels.push_back(dispMask2);
channels.push_back(dispMask22);
merge(channels, img3c);
if(!writeIntermediateImg(img3c, "backprojected_moving_obj_mask_of_TP_and_TN_corrs_img2")){
namedWindow("Backprojected moving objects mask of TP and TN image 2", WINDOW_AUTOSIZE);
imshow("Backprojected moving objects mask of TP and TN image 2", img3c);
waitKey(0);
destroyWindow("Backprojected moving objects mask of TP and TN image 1");
destroyWindow("Backprojected moving objects mask of TP and TN image 2");
}
}
}
}
if (!delList.empty()) {
for (int i = (int) delList.size() - 1; i >= 0; i--) {
movObjCorrsImg1TNFromLast.erase(movObjCorrsImg1TNFromLast.begin() + delList[i]);
movObjCorrsImg2TNFromLast.erase(movObjCorrsImg2TNFromLast.begin() + delList[i]);
movObjCorrsImg1TPFromLast.erase(movObjCorrsImg1TPFromLast.begin() + delList[i]);
movObjCorrsImg2TPFromLast.erase(movObjCorrsImg2TPFromLast.begin() + delList[i]);
movObjCorrsImg12TPFromLast_Idx.erase(movObjCorrsImg12TPFromLast_Idx.begin() + delList[i]);
movObjMaskFromLast &= (movObjMaskFromLastLarge[delList[i]](Rect(Point(posadd, posadd), imgSize)) == 0);
movObjMaskFromLast2 &= (movObjMaskFromLastLarge2[delList[i]](Rect(Point(posadd, posadd), imgSize)) == 0);
movObjMaskFromLastLarge.erase(movObjMaskFromLastLarge.begin() + delList[i]);
movObjMaskFromLastLarge2.erase(movObjMaskFromLastLarge2.begin() + delList[i]);
movObjMaskFromLastLargeAdd.erase(movObjMaskFromLastLargeAdd.begin() + delList[i]);
movObjPt1.erase(movObjPt1.begin() + delList[i]);
movObjPt2.erase(movObjPt2.begin() + delList[i]);
movObj3DPtsCam.erase(movObj3DPtsCam.begin() + delList[i]);
movObj3DPtsWorld.erase(movObj3DPtsWorld.begin() + delList[i]);
movObjFrameEmerge.erase(movObjFrameEmerge.begin() + delList[i]);
movObjImg12TP_InitIdxWorld.erase(movObjImg12TP_InitIdxWorld.begin() + delList[i]);
actCorrsOnMovObjFromLast_IdxWorld.erase(actCorrsOnMovObjFromLast_IdxWorld.begin() + delList[i]);
movObjWorldMovement.erase(movObjWorldMovement.begin() + delList[i]);
movObjDepthClass.erase(movObjDepthClass.begin() + delList[i]);
}
actNrMovObj = movObj3DPtsCam.size();
if (actNrMovObj == 0)
return;
}
movObjDistTNtoReal.resize(actNrMovObj);
//Remove TN if we have too much of them based on the used inlier ratio
for (size_t i = 0; i < actNrMovObj; ++i) {
auto maxTNthisMO =
(int) round((double) movObjCorrsImg1TPFromLast[i].cols * (1.0 / inlRat[actFrameCnt] - 1.0));
//Remove TN in the first image
if(maxTNthisMO < movObjCorrsImg1TNFromLast[i].cols){
Point pt;
for (int j = maxTNthisMO; j < movObjCorrsImg1TNFromLast[i].cols; ++j) {
pt = Point((int)round(movObjCorrsImg1TNFromLast[i].at<double>(0,j)),
(int)round(movObjCorrsImg1TNFromLast[i].at<double>(1,j)));
Mat s_tmp = movObjMaskFromLastLargeAdd[i](Rect(pt, Size(sqrSi, sqrSi)));
s_tmp -= csurr;
}
movObjMaskFromLast &= (movObjMaskFromLastLarge[i](Rect(Point(posadd, posadd), imgSize)) == 0);
movObjMaskFromLastLarge[i] = (movObjMaskFromLastLargeAdd[i] > 0) & Mat::ones(imgSize.height + sqrSi - 1,
imgSize.width + sqrSi - 1, CV_8UC1);
movObjMaskFromLast |= movObjMaskFromLastLarge[i](Rect(Point(posadd, posadd), imgSize));
movObjCorrsImg1TNFromLast[i] = movObjCorrsImg1TNFromLast[i].colRange(0, maxTNthisMO);
}
//Remove TN in the second image
if(maxTNthisMO < movObjCorrsImg2TNFromLast[i].cols){
Point pt;
for (int j = maxTNthisMO; j < movObjCorrsImg2TNFromLast[i].cols; ++j) {
pt = Point((int)round(movObjCorrsImg2TNFromLast[i].at<double>(0,j)),
(int)round(movObjCorrsImg2TNFromLast[i].at<double>(1,j)));
movObjMaskFromLast2.at<unsigned char>(pt) = 0;
movObjMaskFromLastLarge2[i].at<unsigned char>(pt.y + posadd, pt.x + posadd) = 0;
}
movObjCorrsImg2TNFromLast[i] = movObjCorrsImg2TNFromLast[i].colRange(0, maxTNthisMO);
}
}
//Generate hulls of the objects in the image and a mask fo every moving object and a global label mask
movObjLabelsFromLast.resize(actNrMovObj);
convhullPtsObj.resize(actNrMovObj);
Mat movObjMaskFromLastOld = movObjMaskFromLast.clone();
movObjMaskFromLast = Mat::zeros(imgSize, CV_8UC1);
//vector<double> actAreaMovObj(actNrMovObj, 0);
delList.clear();
for (size_t i = 0; i < actNrMovObj; i++) {
Mat movObjMaskFromLastLargePiece = movObjMaskFromLastLarge[i](Rect(Point(posadd, posadd), imgSize));
genMovObjHulls(movObjMaskFromLastLargePiece, movObjPt1[i], movObjLabelsFromLast[i]);
if (i > 0) {
movObjLabelsFromLast[i] &= (movObjMaskFromLast == 0);
int movObjLabelSizePix = countNonZero(movObjLabelsFromLast[i]);
if(movObjLabelSizePix == 0){
delList.push_back(i);
}
}
Mat dispMask;
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
dispMask = (movObjMaskFromLast > 0);
}
movObjMaskFromLast |= movObjLabelsFromLast[i];
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
Mat dispMask2 = (movObjMaskFromLast > 0);
vector<Mat> channels;
Mat b = Mat::zeros(dispMask2.size(), CV_8UC1);
channels.push_back(b);
channels.push_back(dispMask);
channels.push_back(dispMask2);
Mat img3c;
merge(channels, img3c);
if(!writeIntermediateImg(img3c, "backprojected_moving_obj_hulls")){
namedWindow("Backprojected moving object hulls", WINDOW_AUTOSIZE);
imshow("Backprojected moving object hulls", img3c);
waitKey(0);
destroyWindow("Backprojected moving object hulls");
}
}
}
if(!delList.empty()){
for (int i = (int) delList.size() - 1; i >= 0; i--) {
movObjCorrsImg1TNFromLast.erase(movObjCorrsImg1TNFromLast.begin() + delList[i]);
movObjCorrsImg2TNFromLast.erase(movObjCorrsImg2TNFromLast.begin() + delList[i]);
movObjCorrsImg1TPFromLast.erase(movObjCorrsImg1TPFromLast.begin() + delList[i]);
movObjCorrsImg2TPFromLast.erase(movObjCorrsImg2TPFromLast.begin() + delList[i]);
movObjCorrsImg12TPFromLast_Idx.erase(movObjCorrsImg12TPFromLast_Idx.begin() + delList[i]);
movObjMaskFromLastOld &= (movObjMaskFromLastLarge[delList[i]](Rect(Point(posadd, posadd), imgSize)) == 0);
movObjMaskFromLast2 &= (movObjMaskFromLastLarge2[delList[i]](Rect(Point(posadd, posadd), imgSize)) == 0);
movObjMaskFromLastLarge.erase(movObjMaskFromLastLarge.begin() + delList[i]);
movObjMaskFromLastLarge2.erase(movObjMaskFromLastLarge2.begin() + delList[i]);
movObjMaskFromLastLargeAdd.erase(movObjMaskFromLastLargeAdd.begin() + delList[i]);
movObjPt1.erase(movObjPt1.begin() + delList[i]);
movObjPt2.erase(movObjPt2.begin() + delList[i]);
movObj3DPtsCam.erase(movObj3DPtsCam.begin() + delList[i]);
movObj3DPtsWorld.erase(movObj3DPtsWorld.begin() + delList[i]);
movObjFrameEmerge.erase(movObjFrameEmerge.begin() + delList[i]);
movObjImg12TP_InitIdxWorld.erase(movObjImg12TP_InitIdxWorld.begin() + delList[i]);
actCorrsOnMovObjFromLast_IdxWorld.erase(actCorrsOnMovObjFromLast_IdxWorld.begin() + delList[i]);
movObjWorldMovement.erase(movObjWorldMovement.begin() + delList[i]);
movObjDepthClass.erase(movObjDepthClass.begin() + delList[i]);
movObjDistTNtoReal.erase(movObjDistTNtoReal.begin() + delList[i]);
movObjLabelsFromLast.erase(movObjLabelsFromLast.begin() + delList[i]);
convhullPtsObj.erase(convhullPtsObj.begin() + delList[i]);
}
actNrMovObj = movObj3DPtsCam.size();
if (actNrMovObj == 0)
return;
}
//Enlarge the object areas if they are too small
Mat element = cv::getStructuringElement(MORPH_ELLIPSE, Size(5, 5));
const int maxCnt = 40;
for (size_t i = 0; i < actNrMovObj; i++) {
int areaMO = cv::countNonZero(movObjLabelsFromLast[i]);
if(areaMO == 0){
if(verbose & SHOW_IMGS_AT_ERROR) {
if(!writeIntermediateImg(movObjMaskFromLast, "error_zero_backprojected_moving_obj_are_whole_mask_-_obj_nr_" + std::to_string(i)) ||
!writeIntermediateImg(movObjMaskFromLastLarge[i], "error_zero_backprojected_moving_obj_are_marked_keypoints_-_obj_nr_" + std::to_string(i))){
namedWindow("Error - Backprojected moving object area zero - whole mask", WINDOW_AUTOSIZE);
imshow("Error - Backprojected moving object area zero - whole mask", movObjMaskFromLast);
namedWindow("Error - Backprojected moving object area zero - marked keypoints", WINDOW_AUTOSIZE);
imshow("Error - Backprojected moving object area zero - marked keypoints", movObjMaskFromLastLarge[i]);
waitKey(0);
destroyWindow("Error - Backprojected moving object area zero - whole mask");
destroyWindow("Error - Backprojected moving object area zero - marked keypoints");
}
}
throw SequenceException("Label area of backprojected moving object is zero!");
}
int cnt = 0;
while ((areaMO < minOArea) && (cnt < maxCnt)) {
Mat imgSDdilate;
dilate(movObjLabelsFromLast[i], imgSDdilate, element);
imgSDdilate &= ((movObjMaskFromLast == 0) | movObjLabelsFromLast[i]);
int areaMO2 = cv::countNonZero(imgSDdilate);
if (areaMO2 > areaMO) {
areaMO = areaMO2;
imgSDdilate.copyTo(movObjLabelsFromLast[i]);
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
if ((cnt % 4) == 0) {
if(!writeIntermediateImg(movObjLabelsFromLast[i] > 0,
"backprojected_moving_obj_nr" + std::to_string(i) + "_hull_enlargement_step_" + std::to_string(cnt))){
namedWindow("Backprojected moving object hull enlargement", WINDOW_AUTOSIZE);
imshow("Backprojected moving object hull enlargement", movObjLabelsFromLast[i] > 0);
waitKey(0);
destroyWindow("Backprojected moving object hull enlargement");
}
}
}
} else {
break;
}
cnt++;
}
if (cnt > 0) {
Mat dispMask;
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
dispMask = (movObjMaskFromLast > 0);
}
movObjMaskFromLast |= movObjLabelsFromLast[i];
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
Mat dispMask2 = (movObjMaskFromLast > 0);
vector<Mat> channels;
Mat b = Mat::zeros(dispMask2.size(), CV_8UC1);
channels.push_back(b);
channels.push_back(dispMask);
channels.push_back(dispMask2);
Mat img3c;
merge(channels, img3c);
if(!writeIntermediateImg(img3c, "backprojected_dilated_moving_obj_hulls")){
namedWindow("Backprojected dilated moving object hulls", WINDOW_AUTOSIZE);
imshow("Backprojected dilated moving object hulls", img3c);
waitKey(0);
destroyWindow("Backprojected dilated moving object hulls");
}
}
}
}
//Get missing TN correspondences in second image for found TN keypoints in first image using found TN keypoints in second image
vector<int> missingCImg2(actNrMovObj, 0);
vector<int> missingCImg1(actNrMovObj, 0);
for (size_t i = 0; i < actNrMovObj; i++) {
if (!movObjCorrsImg1TNFromLast[i].empty() && !movObjCorrsImg2TNFromLast[i].empty()) {
if (movObjCorrsImg1TNFromLast[i].cols > movObjCorrsImg2TNFromLast[i].cols) {
missingCImg2[i] = movObjCorrsImg1TNFromLast[i].cols - movObjCorrsImg2TNFromLast[i].cols;
movObjDistTNtoReal[i] = vector<double>((size_t)movObjCorrsImg1TNFromLast[i].cols, fakeDistTNCorrespondences);
} else if (movObjCorrsImg1TNFromLast[i].cols < movObjCorrsImg2TNFromLast[i].cols) {
missingCImg1[i] = movObjCorrsImg2TNFromLast[i].cols - movObjCorrsImg1TNFromLast[i].cols;
movObjDistTNtoReal[i] = vector<double>((size_t)movObjCorrsImg2TNFromLast[i].cols, fakeDistTNCorrespondences);
} else {
movObjDistTNtoReal[i] = vector<double>((size_t)movObjCorrsImg1TNFromLast[i].cols, fakeDistTNCorrespondences);
}
} else if (!movObjCorrsImg1TNFromLast[i].empty()) {
missingCImg2[i] = movObjCorrsImg1TNFromLast[i].cols;
movObjDistTNtoReal[i] = vector<double>((size_t)movObjCorrsImg1TNFromLast[i].cols, fakeDistTNCorrespondences);
} else if (!movObjCorrsImg2TNFromLast[i].empty()) {
missingCImg1[i] = movObjCorrsImg2TNFromLast[i].cols;
movObjDistTNtoReal[i] = vector<double>((size_t)movObjCorrsImg2TNFromLast[i].cols, fakeDistTNCorrespondences);
}
}
//Get ROIs of moving objects by calculating the simplified contour (non-convex) of every object
vector<Rect> objROIs(actNrMovObj);
for (size_t i = 0; i < actNrMovObj; i++) {
vector<Point> hull;
genHullFromMask(movObjLabelsFromLast[i], hull);
objROIs[i] = boundingRect(hull);
}
//Get missing TN correspondences for found keypoints
std::uniform_int_distribution<int32_t> distributionX2(0, imgSize.width - 1);
std::uniform_int_distribution<int32_t> distributionY2(0, imgSize.height - 1);
for (size_t i = 0; i < actNrMovObj; i++) {
if (missingCImg2[i] > 0) {
//Enlarge mask
Mat movObjMaskFromLast2Border(movObjMaskFromLastLarge2[i].size(), movObjMaskFromLastLarge2[i].type());
cv::copyMakeBorder(movObjMaskFromLast2, movObjMaskFromLast2Border, posadd, posadd, posadd, posadd,
BORDER_CONSTANT, cv::Scalar(0));
Mat elemnew = Mat::ones(missingCImg2[i], 3, CV_64FC1);
int cnt1 = 0;
for (int j = 0; j < missingCImg2[i]; j++) {
int cnt = 0;
while (cnt < maxCnt) {
Point_<int32_t> pt = Point_<int32_t>(distributionX2(rand_gen), distributionY2(rand_gen));
Mat s_tmp = movObjMaskFromLast2Border(Rect(pt, Size(sqrSi, sqrSi)));
if (s_tmp.at<unsigned char>(posadd, posadd) == 0) {
// csurr.copyTo(s_tmp);
s_tmp.at<unsigned char>(posadd, posadd) = 1;
elemnew.at<double>(j, 0) = (double) pt.x;
elemnew.at<double>(j, 1) = (double) pt.y;
break;
}
cnt++;
}
if (cnt == maxCnt)
break;
cnt1++;
}
if (cnt1 > 0) {
Mat dispMask;
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
dispMask = (movObjMaskFromLast2 > 0);
}
movObjMaskFromLast2 |= movObjMaskFromLast2Border(Rect(Point(posadd, posadd), imgSize));
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
Mat dispMask2 = (movObjMaskFromLast2 > 0);
vector<Mat> channels;
Mat b = Mat::zeros(dispMask2.size(), CV_8UC1);
channels.push_back(b);
channels.push_back(dispMask);
channels.push_back(dispMask2);
Mat img3c;
merge(channels, img3c);
if(!writeIntermediateImg(img3c, "random_TN_in_img2_for_backprojected_moving_obj_TN_of_img1")){
namedWindow("Random TN in img2 for backprojected moving object TN of img1", WINDOW_AUTOSIZE);
imshow("Random TN in img2 for backprojected moving object TN of img1", img3c);
waitKey(0);
destroyWindow("Random TN in img2 for backprojected moving object TN of img1");
}
}
if (movObjCorrsImg2TNFromLast[i].empty()){
elemnew.rowRange(0, cnt1).copyTo(movObjCorrsImg2TNFromLast[i]);
}else {
movObjCorrsImg2TNFromLast[i] = movObjCorrsImg2TNFromLast[i].t();
movObjCorrsImg2TNFromLast[i].push_back(elemnew.rowRange(0, cnt1));
}
movObjCorrsImg2TNFromLast[i] = movObjCorrsImg2TNFromLast[i].t();
missingCImg2[i] -= cnt1;
// movObjDistTNtoReal[i] = vector<double>(cnt1, fakeDistTNCorrespondences);
}
if (missingCImg2[i] > 0) {
movObjDistTNtoReal[i].erase(movObjDistTNtoReal[i].begin() + movObjCorrsImg1TNFromLast[i].cols -
missingCImg2[i], movObjDistTNtoReal[i].end());
movObjCorrsImg1TNFromLast[i] = movObjCorrsImg1TNFromLast[i].colRange(0,
movObjCorrsImg1TNFromLast[i].cols -
missingCImg2[i]);
}
} else if (missingCImg1[i] > 0) {
Mat elemnew = Mat::ones(missingCImg1[i], 3, CV_64FC1);
int cnt1 = 0;
std::uniform_int_distribution<int32_t> distributionX(objROIs[i].x, objROIs[i].x + objROIs[i].width - 1);
std::uniform_int_distribution<int32_t> distributionY(objROIs[i].y,
objROIs[i].y + objROIs[i].height - 1);
//Enlarge mask
Mat movObjMaskFromLastBorder(movObjMaskFromLastLarge[i].size(), movObjMaskFromLastLarge[i].type());
cv::copyMakeBorder(movObjMaskFromLastOld, movObjMaskFromLastBorder, posadd, posadd, posadd, posadd,
BORDER_CONSTANT, cv::Scalar(0));
for (int j = 0; j < missingCImg1[i]; j++) {
int cnt = 0;
while (cnt < maxCnt) {
Point_<int32_t> pt = Point_<int32_t>(distributionX(rand_gen), distributionY(rand_gen));
Mat s_tmp = movObjMaskFromLastBorder(Rect(pt, Size(sqrSi, sqrSi)));
if ((s_tmp.at<unsigned char>(posadd, posadd) == 0) &&
(movObjLabelsFromLast[i].at<unsigned char>(pt) > 0)) {
csurr.copyTo(s_tmp);
Mat s_tmpAdd = movObjMaskFromLastLargeAdd[i](Rect(pt, Size(sqrSi, sqrSi)));
s_tmpAdd += csurr;
elemnew.at<double>(j, 0) = (double) pt.x;
elemnew.at<double>(j, 1) = (double) pt.y;
break;
}
cnt++;
}
if (cnt == maxCnt)
break;
cnt1++;
}
if (cnt1 > 0) {
Mat dispMask;
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
dispMask = (movObjMaskFromLastOld > 0);
}
movObjMaskFromLastOld |= movObjMaskFromLastBorder(Rect(Point(posadd, posadd), imgSize));
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
Mat dispMask2 = (movObjMaskFromLastOld > 0);
vector<Mat> channels;
Mat b = Mat::zeros(dispMask2.size(), CV_8UC1);
channels.push_back(b);
channels.push_back(dispMask);
channels.push_back(dispMask2);
Mat img3c;
merge(channels, img3c);
if(!writeIntermediateImg(img3c, "random_TN_in_img1_for_backprojected_moving_obj_TN_of_img2")){
namedWindow("Random TN in img1 for backprojected moving object TN of img2", WINDOW_AUTOSIZE);
imshow("Random TN in img1 for backprojected moving object TN of img2", img3c);
waitKey(0);
destroyWindow("Random TN in img1 for backprojected moving object TN of img2");
}
}
// movObjLabelsFromLast[i] |= movObjMaskFromLastBorder(Rect(Point(posadd, posadd), imgSize));
if(movObjCorrsImg1TNFromLast[i].empty()){
elemnew.rowRange(0, cnt1).copyTo(movObjCorrsImg1TNFromLast[i]);
}else {
movObjCorrsImg1TNFromLast[i] = movObjCorrsImg1TNFromLast[i].t();
movObjCorrsImg1TNFromLast[i].push_back(elemnew.rowRange(0, cnt1));
}
movObjCorrsImg1TNFromLast[i] = movObjCorrsImg1TNFromLast[i].t();
missingCImg1[i] -= cnt1;
// movObjDistTNtoReal[i] = vector<double>(cnt1, fakeDistTNCorrespondences);
}
if (missingCImg1[i] > 0) {
movObjDistTNtoReal[i].erase(movObjDistTNtoReal[i].begin() + movObjCorrsImg2TNFromLast[i].cols -
missingCImg1[i], movObjDistTNtoReal[i].end());
movObjCorrsImg2TNFromLast[i] = movObjCorrsImg2TNFromLast[i].colRange(0,
movObjCorrsImg2TNFromLast[i].cols -
missingCImg1[i]);
}
}
}
//Additionally add TN using the inlier ratio
for (size_t i = 0; i < actNrMovObj; i++) {
if (missingCImg2[i] == 0)//Skip adding TN if adding TN in the second image failed already before
{
/*missingCImg2[i] =
(int) round((double) movObjCorrsImg1TPFromLast[i].cols * (1.0 - inlRat[actFrameCnt])) -
movObjCorrsImg1TNFromLast[i].cols;*/
missingCImg2[i] =
(int) round((double) movObjCorrsImg1TPFromLast[i].cols * (1.0 / inlRat[actFrameCnt] - 1.0)) -
movObjCorrsImg1TNFromLast[i].cols;
if(missingCImg2[i] < 0){
cout << "There are still too many TN (+" << -1*missingCImg2[i] << ") on the backprojected moving object #" << i << " in image 1!" << endl;
}
} else {
missingCImg2[i] = -1;
}
}
element = cv::getStructuringElement(MORPH_RECT, Size(sqrSi, sqrSi));
//Enlarge mask for image 2
Mat movObjMaskFromLast2Border(movObjMaskFromLastLarge2[0].size(), movObjMaskFromLastLarge2[0].type());
cv::copyMakeBorder(movObjMaskFromLast2, movObjMaskFromLast2Border, posadd, posadd, posadd, posadd,
BORDER_CONSTANT,
cv::Scalar(0));
//Enlarge mask for image 1
Mat movObjMaskFromLastBorder(movObjMaskFromLastLarge[0].size(), movObjMaskFromLastLarge[0].type());
cv::copyMakeBorder(movObjMaskFromLastOld, movObjMaskFromLastBorder, posadd, posadd, posadd, posadd,
BORDER_CONSTANT,
cv::Scalar(0));
for (size_t i = 0; i < actNrMovObj; i++) {
//Generate a depth map for generating TN based on the depth of the back-projected 3D points
double minDepth = DBL_MAX, maxDepth = DBL_MIN;
for (size_t j = 0; j < movObjCorrsImg12TPFromLast_Idx[i].size(); j++) {
double sDepth = movObj3DPtsCam[i][movObjCorrsImg12TPFromLast_Idx[i][j]].z;
if (sDepth < minDepth)
minDepth = sDepth;
if (sDepth > maxDepth)
maxDepth = sDepth;
}
Mat randDepth(imgSize, CV_64FC1);
randu(randDepth, Scalar(minDepth), Scalar(maxDepth + 0.001));
std::uniform_int_distribution<int32_t> distributionX(objROIs[i].x, objROIs[i].x + objROIs[i].width - 1);
std::uniform_int_distribution<int32_t> distributionY(objROIs[i].y, objROIs[i].y + objROIs[i].height - 1);
int areaMO = cv::countNonZero(movObjLabelsFromLast[i]);
int cnt2 = 0;
while (((areaMO < maxOArea) || (cnt2 == 0)) && (missingCImg2[i] > 0) &&
(cnt2 < maxCnt))//If not all elements could be selected, try to enlarge the area
{
//Generate label mask for image 1
Mat movObjLabelsFromLastN;
cv::copyMakeBorder(movObjLabelsFromLast[i], movObjLabelsFromLastN, posadd, posadd, posadd, posadd,
BORDER_CONSTANT, cv::Scalar(0));
movObjLabelsFromLastN = (movObjLabelsFromLastN == 0);
movObjLabelsFromLastN |= movObjMaskFromLastBorder;
Mat dispMask1, dispMask2;
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
dispMask1 = (movObjLabelsFromLastN > 0);
dispMask2 = (movObjMaskFromLast2Border > 0);
}
std::vector<cv::Point2d> x1TN;
std::vector<cv::Point2d> x2TN;
int32_t remainingTN = genTrueNegCorrs(missingCImg2[i],
distributionX,
distributionY,
distributionX2,
distributionY2,
x1TN,
x2TN,
movObjDistTNtoReal[i],
movObjLabelsFromLastN,
movObjMaskFromLast2Border,
randDepth);
cnt2++;
if (remainingTN != missingCImg2[i]) {
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
Mat dispMask12 = (movObjLabelsFromLastN > 0);
Mat dispMask22 = (movObjMaskFromLast2Border > 0);
vector<Mat> channels;
Mat b = Mat::zeros(dispMask12.size(), CV_8UC1);
channels.push_back(b);
channels.push_back(dispMask1);
channels.push_back(dispMask12);
Mat img3c;
merge(channels, img3c);
bool wii = !writeIntermediateImg(img3c, "random_TN_in_img1_for_backprojected_moving_obj");
if(wii){
namedWindow("Random TN in img1 for backprojected moving object", WINDOW_AUTOSIZE);
imshow("Random TN in img1 for backprojected moving object", img3c);
}
channels.clear();
channels.push_back(b);
channels.push_back(dispMask2);
channels.push_back(dispMask22);
merge(channels, img3c);
wii |= !writeIntermediateImg(img3c, "random_TN_in_img2_for_backprojected_moving_obj");
if(wii){
namedWindow("Random TN in img2 for backprojected moving object", WINDOW_AUTOSIZE);
imshow("Random TN in img2 for backprojected moving object", img3c);
waitKey(0);
destroyWindow("Random TN in img1 for backprojected moving object");
destroyWindow("Random TN in img2 for backprojected moving object");
}
}
movObjLabelsFromLastN(Rect(Point(posadd, posadd), imgSize)) &= movObjLabelsFromLast[i];
movObjMaskFromLastBorder(Rect(Point(posadd, posadd), imgSize)) |= movObjLabelsFromLastN(
Rect(Point(posadd, posadd), imgSize));
auto nelem = (int32_t) x1TN.size();
Mat elemnew = Mat::ones(nelem, 3, CV_64FC1);
Mat elemnew2 = Mat::ones(nelem, 3, CV_64FC1);
for (int32_t j = 0; j < nelem; j++) {
elemnew.at<double>(j, 0) = x1TN[j].x;
elemnew.at<double>(j, 1) = x1TN[j].y;
elemnew2.at<double>(j, 0) = x2TN[j].x;
elemnew2.at<double>(j, 1) = x2TN[j].y;
}
if (movObjCorrsImg1TNFromLast[i].empty()){
elemnew.copyTo(movObjCorrsImg1TNFromLast[i]);
}else {
movObjCorrsImg1TNFromLast[i] = movObjCorrsImg1TNFromLast[i].t();
movObjCorrsImg1TNFromLast[i].push_back(elemnew);
}
movObjCorrsImg1TNFromLast[i] = movObjCorrsImg1TNFromLast[i].t();
if (movObjCorrsImg2TNFromLast[i].empty()){
elemnew2.copyTo(movObjCorrsImg2TNFromLast[i]);
}else {
movObjCorrsImg2TNFromLast[i] = movObjCorrsImg2TNFromLast[i].t();
movObjCorrsImg2TNFromLast[i].push_back(elemnew2);
}
movObjCorrsImg2TNFromLast[i] = movObjCorrsImg2TNFromLast[i].t();
}
if (remainingTN > 0) {
//Perform dilation
Mat imgSDdilate;
dilate(movObjLabelsFromLast[i], imgSDdilate, element);
imgSDdilate &= ((movObjMaskFromLast == 0) | movObjLabelsFromLast[i]);
int areaMO2 = cv::countNonZero(imgSDdilate);
if (areaMO2 > areaMO) {
areaMO = areaMO2;
if (areaMO < maxOArea) {
imgSDdilate.copyTo(movObjLabelsFromLast[i]);
objROIs[i] = Rect(max(objROIs[i].x - posadd, 0), max(objROIs[i].y - posadd, 0),
objROIs[i].width + 2 * posadd, objROIs[i].height + 2 * posadd);
objROIs[i] = Rect(objROIs[i].x, objROIs[i].y,
(objROIs[i].x + objROIs[i].width) > imgSize.width ? (imgSize.width -
objROIs[i].x)
: objROIs[i].width,
(objROIs[i].y + objROIs[i].height) > imgSize.height ? (imgSize.height -
objROIs[i].y)
: objROIs[i].height);
distributionX = std::uniform_int_distribution<int32_t>(objROIs[i].x,
objROIs[i].x + objROIs[i].width - 1);
distributionY = std::uniform_int_distribution<int32_t>(objROIs[i].y,
objROIs[i].y + objROIs[i].height - 1);
} else {
break;
}
} else {
break;
}
}
missingCImg2[i] = remainingTN;
}
}
movObjMaskFromLastBorder(Rect(Point(posadd, posadd), imgSize)).copyTo(movObjMaskFromLastOld);
movObjMaskFromLast2 |= movObjMaskFromLast2Border(Rect(Point(posadd, posadd), imgSize));
movObjMaskFromLast = Mat::zeros(imgSize, CV_8UC1);
actTNPerMovObjFromLast.resize(actNrMovObj);
for (size_t i = 0; i < actNrMovObj; i++) {
//Generate a final non-convex hull or contour for every moving object
genHullFromMask(movObjLabelsFromLast[i], convhullPtsObj[i]);
movObjMaskFromLast |= (unsigned char) (i + 1) * movObjLabelsFromLast[i];
//actAreaMovObj[i] = contourArea(convhullPtsObj[i]);
actTNPerMovObjFromLast[i] = movObjCorrsImg1TNFromLast[i].cols;
actTruePosOnMovObjFromLast += movObjCorrsImg1TPFromLast[i].cols;
actTrueNegOnMovObjFromLast += actTNPerMovObjFromLast[i];
}
actCorrsOnMovObjFromLast = actTrueNegOnMovObjFromLast + actTruePosOnMovObjFromLast;
movObj3DPtsWorldAllFrames.insert(movObj3DPtsWorldAllFrames.end(), movObj3DPtsWorld.begin(), movObj3DPtsWorld.end());
//Finally visualize the labels
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
//Generate colormap for moving obejcts (every object has a different color)
Mat colors = Mat((int)actNrMovObj, 1, CV_8UC1);
unsigned char addc = actNrMovObj > 255 ? (unsigned char)255 : (unsigned char) actNrMovObj;
addc = addc < (unsigned char)2 ? (unsigned char)255 : ((unsigned char)255 / (addc - (unsigned char)1));
colors.at<unsigned char>(0) = 0;
for (int k = 1; k < (int)actNrMovObj; ++k) {
colors.at<unsigned char>(k) = colors.at<unsigned char>(k - 1) + addc;
}
Mat colormap_img;
applyColorMap(colors, colormap_img, COLORMAP_PARULA);
Mat labelImgRGB = Mat::zeros(imgSize, CV_8UC3);
for (size_t i = 0; i < actNrMovObj; i++) {
for (int r = 0; r < imgSize.height; r++) {
for (int c = 0; c < imgSize.width; c++) {
if (movObjLabelsFromLast[i].at<unsigned char>(r, c) != 0) {
labelImgRGB.at<cv::Vec3b>(r, c) = colormap_img.at<cv::Vec3b>(i);
}
}
}
}
if(!writeIntermediateImg(labelImgRGB, "final_backprojected_moving_obj_labels")){
namedWindow("Backprojected final moving object labels", WINDOW_AUTOSIZE);
imshow("Backprojected final moving object labels", labelImgRGB);
waitKey(0);
destroyWindow("Backprojected final moving object labels");
}
}
}
void genStereoSequ::genHullFromMask(const cv::Mat &mask, std::vector<cv::Point> &finalHull) {
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
if(!writeIntermediateImg(mask > 0, "original_backprojected_moving_obj_mask")){
namedWindow("Original backprojected moving object mask", WINDOW_AUTOSIZE);
imshow("Original backprojected moving object mask", mask > 0);
waitKey(0);
}
}
//Get the contour of the mask
vector<vector<Point>> contours;
Mat finalMcopy = mask.clone();
findContours(finalMcopy, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
//Calculate 1 single outer contour for multiple sub-objects avoiding a convex hull
int contSize = (int) contours.size();
if (contSize > 1) {
//Get biggest element (which will be the main element)
double biggestArea = 0;
int idx = 0;
for (int i = 0; i < contSize; ++i) {
double actArea = contourArea(contours[i]);
if (actArea > biggestArea) {
biggestArea = actArea;
idx = i;
}
}
//Calculate the center of mass of every element to get the distances between elements
Point2f bigAreaCenter;
vector<Point> bigAreaContour = contours[idx];
cv::Moments bigAreaMoments = moments(bigAreaContour, true);
bigAreaCenter = Point2f((float)(bigAreaMoments.m10 / bigAreaMoments.m00),
(float)(bigAreaMoments.m01 / bigAreaMoments.m00));
vector<pair<int, Point2f>> areaCenters;
for (int i = 0; i < contSize; ++i) {
if (i == idx) continue;
cv::Moments areaMoment = moments(contours[i], true);
areaCenters.emplace_back(
make_pair(i, Point2f((float)(areaMoment.m10 / areaMoment.m00),
(float)(areaMoment.m01 / areaMoment.m00))));
}
//Begin with the nearest element and combine every 2 nearest elements
for (int i = 0; i < contSize - 1; ++i) {
float minDist = FLT_MAX;
pair<int, int> minDistIdx;
for (int j = 0; j < (int)areaCenters.size(); ++j) {
Point2f ptdiff = bigAreaCenter - areaCenters[j].second;
float dist = sqrt(ptdiff.x * ptdiff.x + ptdiff.y * ptdiff.y);
if (dist < minDist) {
minDist = dist;
minDistIdx = make_pair(areaCenters[j].first, j);
}
}
//Combine nearest element and biggest element
int maxBOSi = (int) bigAreaContour.size();
vector<int> hullIdxs;
vector<Point> hullPts1, hullPts2;
vector<Point> comb2Areas = bigAreaContour;
comb2Areas.insert(comb2Areas.end(), contours[minDistIdx.first].begin(), contours[minDistIdx.first].end());
convexHull(comb2Areas, hullIdxs);
//Check from which area the convex hull points are
int hullIdxInsert[2][2] = {{-1, -1},
{-1, -1}};
bool advIdx[2][2] = {{false, false},
{false, false}};
bool branchChk[4] = {false, false, false, false};
for (int k = 0; k < (int)hullIdxs.size(); ++k) {
if (hullIdxs[k] < maxBOSi) {
branchChk[0] = true;
if (branchChk[0] && branchChk[1]) {
branchChk[3] = true;
if (hullIdxInsert[0][0] < 0) {
hullIdxInsert[0][0] = hullIdxs[k];
}
if (hullIdxInsert[1][0] < 0) {
hullIdxInsert[1][0] = hullIdxs[k - 1] - maxBOSi;
advIdx[1][0] = true;
}
}
if (branchChk[2]) {
if (hullIdxInsert[0][1] < 0) {
hullIdxInsert[0][1] = hullIdxs[k];
}
if ((hullIdxInsert[1][1] < 0) && (!hullPts2.empty())) {
hullIdxInsert[1][1] = hullIdxs[k - 1] - maxBOSi;
advIdx[1][1] = true;
}
}
hullPts1.push_back(comb2Areas[hullIdxs[k]]);
} else {
branchChk[1] = true;
if (branchChk[0] && branchChk[1]) {
branchChk[2] = true;
if (hullIdxInsert[0][0] < 0) {
hullIdxInsert[0][0] = hullIdxs[k - 1];
advIdx[0][0] = true;
}
if (hullIdxInsert[1][0] < 0) {
hullIdxInsert[1][0] = hullIdxs[k] - maxBOSi;
}
}
if (branchChk[3]) {
if (hullIdxInsert[0][1] < 0) {
hullIdxInsert[0][1] = hullIdxs[k - 1];
advIdx[0][1] = true;
}
if (hullIdxInsert[1][1] < 0) {
hullIdxInsert[1][1] = hullIdxs[k] - maxBOSi;
}
}
hullPts2.push_back(comb2Areas[hullIdxs[k]]);
}
}
if (!hullPts2.empty()) {
/*if(verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
Mat maskcontours = Mat::zeros(imgSize, CV_8UC3);
vector<vector<Point>> tmp(1);
tmp[0] = bigAreaContour;
drawContours(maskcontours, tmp, 0, Scalar(0, 255, 0));
namedWindow("New big area", WINDOW_AUTOSIZE);
imshow("New big area", maskcontours);
waitKey(0);
destroyWindow("New big area");
}*/
if (hullIdxInsert[0][1] < 0) {
if (advIdx[0][0]) {
hullIdxInsert[0][1] = hullIdxs[0];
hullIdxInsert[1][1] = hullIdxs.back() - maxBOSi;
advIdx[1][1] = true;
} else {
hullIdxInsert[1][1] = hullIdxs[0] - maxBOSi;
hullIdxInsert[0][1] = hullIdxs.back();
advIdx[0][1] = true;
}
}
CV_Assert((advIdx[0][0] ^ advIdx[1][0]) && (advIdx[0][1] ^ advIdx[1][1]) &&
(advIdx[0][0] ^ advIdx[0][1]));
//Extract for each area both possible contour elements
vector<Point> bigAreaContourNew1, bigAreaContourNew2, bigAreaContourNew12, bigAreaContourNew22;
if (advIdx[0][0]) {
if (hullIdxInsert[0][1] > hullIdxInsert[0][0]) {
getFirstPartContourNeg(bigAreaContourNew1, bigAreaContour, hullIdxInsert[0][1],
hullIdxInsert[0][0]);
getSecPartContourNeg(bigAreaContourNew12, bigAreaContour, hullIdxInsert[0][1],
hullIdxInsert[0][0]);
} else {
getFirstPartContourPos(bigAreaContourNew1, bigAreaContour, hullIdxInsert[0][1],
hullIdxInsert[0][0]);
getSecPartContourPos(bigAreaContourNew12, bigAreaContour, hullIdxInsert[0][1],
hullIdxInsert[0][0]);
}
if (hullIdxInsert[1][0] > hullIdxInsert[1][1]) {
getFirstPartContourNeg(bigAreaContourNew2, contours[minDistIdx.first], hullIdxInsert[1][0],
hullIdxInsert[1][1]);
getSecPartContourNeg(bigAreaContourNew22, contours[minDistIdx.first], hullIdxInsert[1][0],
hullIdxInsert[1][1]);
} else {
getFirstPartContourPos(bigAreaContourNew2, contours[minDistIdx.first], hullIdxInsert[1][0],
hullIdxInsert[1][1]);
getSecPartContourPos(bigAreaContourNew22, contours[minDistIdx.first], hullIdxInsert[1][0],
hullIdxInsert[1][1]);
}
} else {
if (hullIdxInsert[0][0] > hullIdxInsert[0][1]) {
getFirstPartContourNeg(bigAreaContourNew1, bigAreaContour, hullIdxInsert[0][0],
hullIdxInsert[0][1]);
getSecPartContourNeg(bigAreaContourNew12, bigAreaContour, hullIdxInsert[0][0],
hullIdxInsert[0][1]);
} else {
getFirstPartContourPos(bigAreaContourNew1, bigAreaContour, hullIdxInsert[0][0],
hullIdxInsert[0][1]);
getSecPartContourPos(bigAreaContourNew12, bigAreaContour, hullIdxInsert[0][0],
hullIdxInsert[0][1]);
}
if (hullIdxInsert[1][1] > hullIdxInsert[1][0]) {
getFirstPartContourNeg(bigAreaContourNew2, contours[minDistIdx.first], hullIdxInsert[1][1],
hullIdxInsert[1][0]);
getSecPartContourNeg(bigAreaContourNew22, contours[minDistIdx.first], hullIdxInsert[1][1],
hullIdxInsert[1][0]);
} else {
getFirstPartContourPos(bigAreaContourNew2, contours[minDistIdx.first], hullIdxInsert[1][1],
hullIdxInsert[1][0]);
getSecPartContourPos(bigAreaContourNew22, contours[minDistIdx.first], hullIdxInsert[1][1],
hullIdxInsert[1][0]);
}
}
//Select the correct contours of both seperate areas which offer together the largest area and/or have points of the convex hull in common
if ((hullPts1.size() <= 2) || (hullPts2.size() <= 2)) {
double areas[4] = {0, 0, 0, 0};
vector<Point> testCont[4];
testCont[0] = bigAreaContourNew1;
testCont[0].insert(testCont[0].end(), bigAreaContourNew2.begin(), bigAreaContourNew2.end());
areas[0] = cv::contourArea(testCont[0]);
testCont[1] = bigAreaContourNew1;
testCont[1].insert(testCont[1].end(), bigAreaContourNew22.begin(), bigAreaContourNew22.end());
areas[1] = cv::contourArea(testCont[1]);
testCont[2] = bigAreaContourNew12;
testCont[2].insert(testCont[2].end(), bigAreaContourNew2.begin(), bigAreaContourNew2.end());
areas[2] = cv::contourArea(testCont[2]);
testCont[3] = bigAreaContourNew12;
testCont[3].insert(testCont[3].end(), bigAreaContourNew22.begin(), bigAreaContourNew22.end());
areas[3] = cv::contourArea(testCont[3]);
std::ptrdiff_t pdiff = max_element(areas, areas + 4) - areas;
bool selfintersection = true;
bool hullUsedasOutPt = false;
while (selfintersection) {
Rect conRe;
conRe = cv::boundingRect(testCont[pdiff]);
Point conReSP = Point(conRe.x, conRe.y);
Mat testMat = Mat::zeros(conRe.height, conRe.width, CV_8UC1);
for (int j = 1; j < (int) testCont[pdiff].size(); ++j) {
Mat testMat1 = Mat::zeros(conRe.height, conRe.width, CV_8UC1);
cv::line(testMat1, testCont[pdiff][j - 1] - conReSP, testCont[pdiff][j] - conReSP,
Scalar(1));
testMat1.at<unsigned char>(testCont[pdiff][j] - conReSP) = 0;
testMat += testMat1;
}
Mat testMat1 = Mat::zeros(conRe.height, conRe.width, CV_8UC1);
cv::line(testMat1, testCont[pdiff].back() - conReSP, testCont[pdiff][0] - conReSP, Scalar(1));
testMat1.at<unsigned char>(testCont[pdiff][0] - conReSP) = 0;
testMat += testMat1;
/*namedWindow("Line intersections", WINDOW_AUTOSIZE);
imshow("Line intersections", testMat > 0);
namedWindow("Line intersections1", WINDOW_AUTOSIZE);
imshow("Line intersections1", testMat > 1);
waitKey(0);
destroyWindow("Line intersections");
destroyWindow("Line intersections1");*/
bool foundIntSec = false;
for (int k = 0; k < conRe.height; ++k) {
for (int l = 0; l < conRe.width; ++l) {
if (testMat.at<unsigned char>(k, l) > 1) {
foundIntSec = true;
break;
}
}
if (foundIntSec) break;
}
if (foundIntSec) {
areas[pdiff] = 0;
pdiff = max_element(areas, areas + 4) - areas;
if(areas[pdiff] == 0){
if((hullPts1.size() <= 2) && (hullPts2.size() > 2) && !hullUsedasOutPt){
testCont[1] = hullPts1;
testCont[1].insert(testCont[1].end(), bigAreaContourNew22.begin(), bigAreaContourNew22.end());
areas[1] = cv::contourArea(testCont[1]);
testCont[3] = hullPts1;
testCont[3].insert(testCont[3].end(), bigAreaContourNew22.begin(), bigAreaContourNew22.end());
areas[3] = cv::contourArea(testCont[3]);
pdiff = max_element(areas, areas + 4) - areas;
}
else if((hullPts1.size() > 2) && (hullPts2.size() <= 2) && !hullUsedasOutPt){
testCont[0] = bigAreaContourNew1;
testCont[0].insert(testCont[0].end(), hullPts2.begin(), hullPts2.end());
areas[0] = cv::contourArea(testCont[0]);
testCont[2] = bigAreaContourNew12;
testCont[2].insert(testCont[2].end(), hullPts2.begin(), hullPts2.end());
areas[2] = cv::contourArea(testCont[2]);
pdiff = max_element(areas, areas + 4) - areas;
}
else{
testCont[0].clear();
testCont[0].resize(hullIdxs.size());
for (size_t j = 0; j < hullIdxs.size(); ++j) {
testCont[0][j] = comb2Areas[hullIdxs[j]];
}
pdiff = 0;
selfintersection = false;
}
hullUsedasOutPt = true;
}
} else {
selfintersection = false;
}
}
bigAreaContour = testCont[pdiff];
} else {
bool selCont[4] = {false, false, false, false};
int equCnt[2] = {0, 0};
for (auto& j : hullPts1) {
for (auto& k : bigAreaContourNew1) {
if ((abs(j.x - k.x) +
abs(j.y - k.y)) == 0) {
equCnt[0]++;
if (equCnt[0] > 2) {
selCont[0] = true;
break;
}
}
}
if (selCont[0]) break;
for (auto& k : bigAreaContourNew12) {
if ((abs(j.x - k.x) +
abs(j.y - k.y)) == 0) {
equCnt[1]++;
if (equCnt[1] > 2) {
selCont[1] = true;
break;
}
}
}
if (selCont[1]) break;
}
equCnt[0] = 0;
equCnt[1] = 0;
for (auto& j : hullPts2) {
for (auto& k : bigAreaContourNew2) {
if ((abs(j.x - k.x) +
abs(j.y - k.y)) == 0) {
equCnt[0]++;
if (equCnt[0] > 2) {
selCont[2] = true;
break;
}
}
}
if (selCont[2]) break;
for (auto& k : bigAreaContourNew22) {
if ((abs(j.x - k.x) +
abs(j.y - k.y)) == 0) {
equCnt[1]++;
if (equCnt[1] > 2) {
selCont[3] = true;
break;
}
}
}
if (selCont[3]) break;
}
if (selCont[0] && selCont[2]) {
bigAreaContour = bigAreaContourNew1;
bigAreaContour.insert(bigAreaContour.end(), bigAreaContourNew2.begin(),
bigAreaContourNew2.end());
} else if (selCont[0] && selCont[3]) {
bigAreaContour = bigAreaContourNew1;
bigAreaContour.insert(bigAreaContour.end(), bigAreaContourNew22.begin(),
bigAreaContourNew22.end());
} else if (selCont[1] && selCont[2]) {
bigAreaContour = bigAreaContourNew12;
bigAreaContour.insert(bigAreaContour.end(), bigAreaContourNew2.begin(),
bigAreaContourNew2.end());
} else {
bigAreaContour = bigAreaContourNew12;
bigAreaContour.insert(bigAreaContour.end(), bigAreaContourNew22.begin(),
bigAreaContourNew22.end());
}
}
//Calculate the new center of the big area
bigAreaMoments = moments(bigAreaContour, true);
bigAreaCenter = Point2f((float)(bigAreaMoments.m10 / bigAreaMoments.m00),
(float)(bigAreaMoments.m01 / bigAreaMoments.m00));
}
//Delete the used area center
areaCenters.erase(areaCenters.begin() + minDistIdx.second);
}
contours.clear();
contours.resize(1);
contours[0] = bigAreaContour;
}else if(contSize == 0){
if(verbose & SHOW_IMGS_AT_ERROR) {
if(!writeIntermediateImg(mask > 0, "error_no_contour_found")){
namedWindow("Error - No contour found", WINDOW_AUTOSIZE);
imshow("Error - No contour found", mask > 0);
waitKey(0);
destroyWindow("Error - No contour found");
}
}
throw SequenceException("No contour found for backprojected moving object!");
}
//Simplify the contour
double epsilon = 0.005 * cv::arcLength(contours[0], true);//0.5% of the overall contour length
if (epsilon > 6.5) {
epsilon = 6.5;
} else if (epsilon < 2.0) {
epsilon = 2.0;
}
approxPolyDP(contours[0], finalHull, epsilon, true);
if (verbose & SHOW_BACKPROJECT_MOV_OBJ_CORRS) {
Mat maskcontours = Mat::zeros(imgSize, CV_8UC3);
drawContours(maskcontours, contours, 0, Scalar(0, 255, 0));
vector<vector<Point>> tmp(1);
tmp[0] = finalHull;
drawContours(maskcontours, tmp, 0, Scalar(0, 0, 255));
if(!writeIntermediateImg(maskcontours > 0, "approximated_and_original_backprojected_moving_object_mask_contour")){
namedWindow("Approximated and original backprojected moving object mask contour", WINDOW_AUTOSIZE);
imshow("Approximated and original backprojected moving object mask contour", maskcontours > 0);
waitKey(0);
destroyWindow("Original backprojected moving object mask");
destroyWindow("Approximated and original backprojected moving object mask contour");
}
}
}
void genStereoSequ::genMovObjHulls(const cv::Mat &corrMask, std::vector<cv::Point> &kps, cv::Mat &finalMask,
std::vector<cv::Point> *hullPts) {
int sqrSi = csurr.rows;
//Get the convex hull of the keypoints
vector<vector<Point>> hull(1);
convexHull(kps, hull[0]);
if (hullPts) {
*hullPts = hull[0];
}
//Get bounding box
Rect hullBB = boundingRect(hull[0]);
hullBB = Rect(max(hullBB.x - sqrSi, 0), max(hullBB.y - sqrSi, 0), hullBB.width + 2 * sqrSi,
hullBB.height + 2 * sqrSi);
hullBB = Rect(hullBB.x, hullBB.y,
(hullBB.x + hullBB.width) > imgSize.width ? (imgSize.width - hullBB.x) : hullBB.width,
(hullBB.y + hullBB.height) > imgSize.height ? (imgSize.height - hullBB.y) : hullBB.height);
//Invert the mask
Mat ncm = (corrMask(hullBB) == 0);
/*namedWindow("Inverted keypoint mask", WINDOW_AUTOSIZE);
imshow("Inverted keypoint mask", ncm);*/
//draw the filled convex hull with enlarged borders
Mat hullMat1 = Mat::zeros(imgSize, CV_8UC1);
//with filled contour:
drawContours(hullMat1, hull, -1, Scalar(255), FILLED);
/*namedWindow("Convex hull filled", WINDOW_AUTOSIZE);
imshow("Convex hull filled", hullMat1);*/
//enlarge borders:
drawContours(hullMat1, hull, -1, Scalar(255), sqrSi);
hullMat1 = hullMat1(hullBB);
/*namedWindow("Convex hull filled enlarged", WINDOW_AUTOSIZE);
imshow("Convex hull filled enlarged", hullMat1);*/
//Combine convex hull and inverted mask
Mat icm = ncm & hullMat1;
/*namedWindow("Convex hull combined", WINDOW_AUTOSIZE);
imshow("Convex hull combined", icm);*/
//Apply distance transform algorithm
Mat distImg;
distanceTransform(icm, distImg, DIST_L2, DIST_MASK_PRECISE, CV_32FC1);
/*Mat distTransNorm;
normalize(distImg, distTransNorm, 0, 1.0, NORM_MINMAX);
namedWindow("Distance Transform", WINDOW_AUTOSIZE);
imshow("Distance Transform", distTransNorm);
waitKey(0);
destroyWindow("Distance Transform");*/
/*destroyWindow("Convex hull combined");
destroyWindow("Convex hull filled enlarged");
destroyWindow("Convex hull filled");*/
//Get the largest distance from white pixels to black pixels
double minVal, maxVal;
minMaxLoc(distImg, &minVal, &maxVal);
//Calculate the kernel size for closing
int kSize = (int) ceil(maxVal) * 2 + 1;
//Get structering element
Mat element = getStructuringElement(MORPH_RECT, Size(kSize, kSize));
//Perform closing to generate the final mask
morphologyEx(corrMask, finalMask, MORPH_CLOSE, element);
/*namedWindow("Final mask for given points", WINDOW_AUTOSIZE);
imshow("Final mask for given points", finalMask > 0);
waitKey(0);
destroyWindow("Final mask for given points");*/
// destroyWindow("Inverted keypoint mask");
}
//Calculate the seeding position and area for every new moving object
//This function should not be called for the first frame
bool genStereoSequ::getSeedsAreasMovObj() {
//Get number of possible seeds per region
Mat nrPerRegMax = Mat::zeros(3, 3, CV_8UC1);
int minOArea23 = 2 * min(minOArea, maxOArea - minOArea) / 3;
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (startPosMovObjs.at<unsigned char>(y, x) == 0)
continue;
if(actFracUseableTPperRegion.at<double>(y,x) <= actFracUseableTPperRegionTH)
continue;
int moarea = countNonZero(movObjMaskFromLast(regROIs[y][x]) | (actStereoImgsOverlapMask(regROIs[y][x]) == 0));
if (moarea < minOArea23) {
nrPerRegMax.at<unsigned char>(y, x) = (unsigned char) maxOPerReg;
} else if ((moarea <= maxOArea) && (maxOPerReg > 1)) {
nrPerRegMax.at<unsigned char>(y, x) = (unsigned char) (maxOPerReg - 1);
} else if (moarea <= (maxOPerReg - 1) * maxOArea) {
Mat actUsedAreaLabel;
// int nrLabels = connectedComponents(movObjMaskFromLast(regROIs[y][x]), actUsedAreaLabel, 4, CV_16U);
Mat actUsedAreaStats;
Mat actUsedAreaCentroids;
Mat usedPartMask = movObjMaskFromLast(regROIs[y][x]);
Mat markSameObj = Mat::zeros(regROIs[y][x].height, regROIs[y][x].width, CV_8UC1);
int nrLabels = connectedComponentsWithStats(usedPartMask, actUsedAreaLabel, actUsedAreaStats,
actUsedAreaCentroids, 8, CV_16U);
int nrFndObj = nrLabels;
for (int i = 0; i < nrLabels; i++) {
Rect labelBB = Rect(actUsedAreaStats.at<int32_t>(i, cv::ConnectedComponentsTypes::CC_STAT_LEFT),
actUsedAreaStats.at<int32_t>(i, cv::ConnectedComponentsTypes::CC_STAT_TOP),
actUsedAreaStats.at<int32_t>(i, cv::ConnectedComponentsTypes::CC_STAT_WIDTH),
actUsedAreaStats.at<int32_t>(i, cv::ConnectedComponentsTypes::CC_STAT_HEIGHT));
Mat labelPart = actUsedAreaLabel(labelBB);
Mat usedPartofPartMask = usedPartMask(labelBB);
Point checkPos = Point(0, 0);
bool noMovObj = false, validPT = false;
for (int y1 = 0; y1 < labelBB.height; ++y1) {
for (int x1 = 0; x1 < labelBB.width; ++x1) {
if (labelPart.at<uint16_t>(y1, x1) == i) {
if (usedPartofPartMask.at<unsigned char>(y1, x1) == 0) {
noMovObj = true;
nrFndObj--;
break;
}
checkPos = Point(y1, x1);
validPT = true;
break;
}
}
if (noMovObj || validPT) break;
}
if (!noMovObj && validPT) {
if (markSameObj.at<unsigned char>(checkPos.y + labelBB.y, checkPos.x + labelBB.x) != 0) {
nrFndObj--;
} else {
unsigned char m_obj_nr = usedPartofPartMask.at<unsigned char>(checkPos);
markSameObj |= (usedPartMask == m_obj_nr);
}
}
}
if (nrFndObj < maxOPerReg)
nrPerRegMax.at<unsigned char>(y, x) = (unsigned char) (maxOPerReg - nrFndObj);
}
}
}
if (cv::sum(nrPerRegMax)[0] == 0) return false;
//Get the number of moving object seeds per region
int nrMovObjs_tmp = (int) pars.nrMovObjs - (int) movObj3DPtsWorld.size();
Mat nrPerReg = Mat::zeros(3, 3, CV_8UC1);
while (nrMovObjs_tmp > 0) {
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (nrPerRegMax.at<unsigned char>(y, x) > 0) {
int addit = (int)(rand2() % 2);
if (addit) {
nrPerReg.at<unsigned char>(y, x)++;
nrPerRegMax.at<unsigned char>(y, x)--;
nrMovObjs_tmp--;
if (nrMovObjs_tmp == 0)
break;
}
}
}
if (nrMovObjs_tmp == 0)
break;
}
if (cv::sum(nrPerRegMax)[0] == 0) break;
}
//Get the area for each moving object
std::uniform_int_distribution<int32_t> distribution((int32_t) minOArea, (int32_t) maxOArea);
movObjAreas.clear();
movObjAreas = vector<vector<vector<int32_t>>>(3, vector<vector<int32_t>>(3));
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
int nr_tmp = (int) nrPerReg.at<unsigned char>(y, x);
for (int i = 0; i < nr_tmp; i++) {
movObjAreas[y][x].push_back(distribution(rand_gen));
}
}
}
//Get seed positions
std::vector<std::vector<std::pair<bool,cv::Rect>>> validRects;
getValidImgRegBorders(actStereoImgsOverlapMask, validRects);
minODist = imgSize.height / (3 * (maxOPerReg + 1));
movObjSeeds.clear();
movObjSeeds = vector<vector<vector<cv::Point_<int32_t>>>>(3, vector<vector<cv::Point_<int32_t>>>(3));
int maxIt = 20;
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
int nr_tmp = (int) nrPerReg.at<unsigned char>(y, x);
if (nr_tmp > 0) {
if(!validRects[y][x].first){
nrPerReg.at<unsigned char>(y, x) = 0;
movObjAreas[y][x].clear();
continue;
}
rand_gen = std::default_random_engine(
rand2());//Prevent getting the same starting positions for equal ranges
std::uniform_int_distribution<int> distributionX(validRects[y][x].second.x,
validRects[y][x].second.x + validRects[y][x].second.width - 1);
std::uniform_int_distribution<int> distributionY(validRects[y][x].second.y,
validRects[y][x].second.y + validRects[y][x].second.height - 1);
cv::Point_<int32_t> pt;
int cnt = 0;
while (cnt < maxIt) {
pt = cv::Point_<int32_t>(distributionX(rand_gen), distributionY(rand_gen));
if ((movObjMaskFromLast.at<unsigned char>(pt) == 0) && (actStereoImgsOverlapMask.at<unsigned char>(pt) > 0)) {
break;
}
cnt++;
}
if (cnt == maxIt) {
movObjAreas[y][x].clear();
nrPerReg.at<unsigned char>(y, x) = 0;
break;
}
movObjSeeds[y][x].push_back(pt);
nr_tmp--;
if (nr_tmp > 0) {
vector<int> xposes, yposes;
xposes.push_back(movObjSeeds[y][x].back().x);
yposes.push_back(movObjSeeds[y][x].back().y);
while (nr_tmp > 0) {
vector<double> xInterVals, yInterVals;
vector<double> xWeights, yWeights;
buildDistributionRanges(xposes, yposes, x, y, xInterVals, xWeights, yInterVals, yWeights, &validRects);
//Create piecewise uniform distribution and get a random seed
piecewise_constant_distribution<double> distrPieceX(xInterVals.begin(), xInterVals.end(),
xWeights.begin());
piecewise_constant_distribution<double> distrPieceY(yInterVals.begin(), yInterVals.end(),
yWeights.begin());
cnt = 0;
while (cnt < maxIt) {
pt = cv::Point_<int32_t>((int32_t) floor(distrPieceX(rand_gen)),
(int32_t) floor(distrPieceY(rand_gen)));
if ((movObjMaskFromLast.at<unsigned char>(pt) == 0) && (actStereoImgsOverlapMask.at<unsigned char>(pt) > 0)) {
break;
}
cnt++;
}
if (cnt == maxIt) {
for (size_t i = 0; i < (movObjAreas[y][x].size() - movObjSeeds[y][x].size()); i++) {
movObjAreas[y][x].pop_back();
}
nrPerReg.at<unsigned char>(y, x) = (unsigned char) movObjSeeds[y][x].size();
break;
}
movObjSeeds[y][x].push_back(pt);
xposes.push_back(movObjSeeds[y][x].back().x);
yposes.push_back(movObjSeeds[y][x].back().y);
nr_tmp--;
}
}
}
}
}
return true;
}
//Extracts the areas and seeding positions for new moving objects from the region structure
bool genStereoSequ::getSeedAreaListFromReg(std::vector<cv::Point_<int32_t>> &seeds, std::vector<int32_t> &areas) {
seeds.clear();
areas.clear();
seeds.reserve(pars.nrMovObjs);
areas.reserve(pars.nrMovObjs);
for (int y = 0; y < 3; y++) {
for (int x = 0; x < 3; x++) {
if (!movObjAreas[y][x].empty()) {
//copy(movObjAreas[y][x].begin(), movObjAreas[y][x].end(), areas.end());
areas.insert(areas.end(), movObjAreas[y][x].begin(), movObjAreas[y][x].end());
}
if (!movObjSeeds[y][x].empty()) {
//copy(movObjSeeds[y][x].begin(), movObjSeeds[y][x].end(), seeds.end());
seeds.insert(seeds.end(), movObjSeeds[y][x].begin(), movObjSeeds[y][x].end());
}
}
}
CV_Assert(seeds.size() == areas.size());
if (seeds.empty())
return false;
return true;
}
//Check, if it is necessary to calculate new moving objects
//This function should not be called during the first frame
//The function backProjectMovObj() must be called before
bool genStereoSequ::getNewMovObjs() {
if (pars.minNrMovObjs == 0) {
clearNewMovObjVars();
return false;
}
if (movObj3DPtsWorld.size() >= pars.minNrMovObjs) {
clearNewMovObjVars();
return false;
}
return true;
}
//Combines correspondences from static and moving objects
void genStereoSequ::combineCorrespondences() {
//Get number of TP correspondences
finalNrTPStatCorrs = actCorrsImg1TP.cols;
finalNrTPStatCorrsFromLast = actCorrsImg1TPFromLast.cols;
finalNrTPMovCorrs = 0;
finalNrTPMovCorrsFromLast = 0;
combNrCorrsTP = finalNrTPStatCorrsFromLast + finalNrTPStatCorrs;
for (auto& i : movObjCorrsImg1TPFromLast) {
if (!i.empty()) {
combNrCorrsTP += i.cols;
finalNrTPMovCorrsFromLast += i.cols;
}
}
for (auto& i : movObjCorrsImg1TP) {
if (!i.empty()) {
combNrCorrsTP += i.cols;
finalNrTPMovCorrs += i.cols;
}
}
//Get number of TN correspondences
combNrCorrsTN = actCorrsImg1TN.cols;
for (auto& i : movObjCorrsImg1TNFromLast) {
if (!i.empty()) {
combNrCorrsTN += i.cols;
}
}
for (auto& i : movObjCorrsImg1TN) {
if (!i.empty()) {
combNrCorrsTN += i.cols;
}
}
//Update number of correspondences
nrCorrs[actFrameCnt] = combNrCorrsTP + combNrCorrsTN;
combCorrsImg1TP.release();
combCorrsImg2TP.release();
if (combNrCorrsTP) {
combCorrsImg1TP = Mat(3, combNrCorrsTP, CV_64FC1);
combCorrsImg2TP = Mat(3, combNrCorrsTP, CV_64FC1);
}
comb3DPts.clear();
comb3DPts.reserve((size_t)combNrCorrsTP);
//Copy all TP keypoints of first image
int actColNr = 0;
int actColNr2 = actCorrsImg1TPFromLast.cols;
if (actColNr2 != actColNr) {
actCorrsImg1TPFromLast.copyTo(combCorrsImg1TP.colRange(actColNr, actColNr2));
}
actColNr = actColNr2;
actColNr2 = actColNr + actCorrsImg1TP.cols;
if (actColNr2 != actColNr) {
actCorrsImg1TP.copyTo(combCorrsImg1TP.colRange(actColNr, actColNr2));
}
for (auto& i : movObjCorrsImg1TPFromLast) {
actColNr = actColNr2;
actColNr2 = actColNr + i.cols;
if (actColNr2 != actColNr) {
i.copyTo(combCorrsImg1TP.colRange(actColNr, actColNr2));
}
}
for (auto& i : movObjCorrsImg1TP) {
actColNr = actColNr2;
actColNr2 = actColNr + i.cols;
if (actColNr2 != actColNr) {
i.copyTo(combCorrsImg1TP.colRange(actColNr, actColNr2));
}
}
//Copy all 3D points
for (auto& i : actCorrsImg12TPFromLast_Idx) {
comb3DPts.push_back(actImgPointCloudFromLast[i]);
}
if (!actImgPointCloud.empty()) {
comb3DPts.insert(comb3DPts.end(), actImgPointCloud.begin(), actImgPointCloud.end());
}
for (size_t i = 0; i < movObjCorrsImg12TPFromLast_Idx.size(); i++) {
for (auto& j : movObjCorrsImg12TPFromLast_Idx[i]) {
comb3DPts.push_back(movObj3DPtsCam[i][j]);
}
}
for (auto& i : movObj3DPtsCamNew) {
//copy(i.begin(), i.end(), comb3DPts.end());
if (!i.empty()) {
comb3DPts.insert(comb3DPts.end(), i.begin(), i.end());
}
}
CV_Assert(combCorrsImg1TP.cols == (int) comb3DPts.size());
//Copy all TP keypoints of second image
actColNr = 0;
actColNr2 = actCorrsImg2TPFromLast.cols;
if (actColNr2 != actColNr) {
actCorrsImg2TPFromLast.copyTo(combCorrsImg2TP.colRange(actColNr, actColNr2));
}
actColNr = actColNr2;
actColNr2 = actColNr + actCorrsImg2TP.cols;
if (actColNr2 != actColNr) {
actCorrsImg2TP.copyTo(combCorrsImg2TP.colRange(actColNr, actColNr2));
}
for (auto& i : movObjCorrsImg2TPFromLast) {
actColNr = actColNr2;
actColNr2 = actColNr + i.cols;
if (actColNr2 != actColNr) {
i.copyTo(combCorrsImg2TP.colRange(actColNr, actColNr2));
}
}
for (auto& i : movObjCorrsImg2TP) {
actColNr = actColNr2;
actColNr2 = actColNr + i.cols;
if (actColNr2 != actColNr) {
i.copyTo(combCorrsImg2TP.colRange(actColNr, actColNr2));
}
}
combCorrsImg1TN.release();
combCorrsImg2TN.release();
if (combNrCorrsTN) {
combCorrsImg1TN = Mat(3, combNrCorrsTN, CV_64FC1);
combCorrsImg2TN = Mat(3, combNrCorrsTN, CV_64FC1);
}
//Copy all TN keypoints of first image
finalNrTNStatCorrs = actCorrsImg1TN.cols;
finalNrTNMovCorrs = 0;
actColNr = 0;
actColNr2 = finalNrTNStatCorrs;
if (actColNr2 != actColNr) {
actCorrsImg1TN.copyTo(combCorrsImg1TN.colRange(actColNr, actColNr2));
}
for (auto& i : movObjCorrsImg1TNFromLast) {
actColNr = actColNr2;
actColNr2 = actColNr + i.cols;
if (actColNr2 != actColNr) {
i.copyTo(combCorrsImg1TN.colRange(actColNr, actColNr2));
finalNrTNMovCorrs += i.cols;
}
}
for (auto& i : movObjCorrsImg1TN) {
actColNr = actColNr2;
actColNr2 = actColNr + i.cols;
if (actColNr2 != actColNr) {
i.copyTo(combCorrsImg1TN.colRange(actColNr, actColNr2));
finalNrTNMovCorrs += i.cols;
}
}
//Copy all TN keypoints of second image
actColNr = 0;
actColNr2 = actCorrsImg2TN.cols;
if (actColNr2 != actColNr) {
actCorrsImg2TN.copyTo(combCorrsImg2TN.colRange(actColNr, actColNr2));
}
for (auto& i : movObjCorrsImg2TNFromLast) {
actColNr = actColNr2;
actColNr2 = actColNr + i.cols;
if (actColNr2 != actColNr) {
i.copyTo(combCorrsImg2TN.colRange(actColNr, actColNr2));
}
}
for (auto& i : movObjCorrsImg2TN) {
actColNr = actColNr2;
actColNr2 = actColNr + i.cols;
if (actColNr2 != actColNr) {
i.copyTo(combCorrsImg2TN.colRange(actColNr, actColNr2));
}
}
//Copy distances of TN locations to their real matching position
combDistTNtoReal.clear();
combDistTNtoReal.reserve((size_t)combNrCorrsTN);
if (!distTNtoReal.empty()) {
combDistTNtoReal.insert(combDistTNtoReal.end(), distTNtoReal.begin(), distTNtoReal.end());
}
for (auto& i : movObjDistTNtoReal) {
//copy(i.begin(), i.end(), combDistTNtoReal.end());
if (!i.empty()) {
combDistTNtoReal.insert(combDistTNtoReal.end(), i.begin(), i.end());
}
}
for (auto& i : movObjDistTNtoRealNew) {
//copy(i.begin(), i.end(), combDistTNtoReal.end());
if (!i.empty()) {
combDistTNtoReal.insert(combDistTNtoReal.end(), i.begin(), i.end());
}
}
CV_Assert((size_t) combCorrsImg1TN.cols == combDistTNtoReal.size());
if(verbose & SHOW_COMBINED_CORRESPONDENCES){
visualizeAllCorrespondences();
}
//Check global inlier ratio of backprojected and new static and moving objects
if(verbose & PRINT_WARNING_MESSAGES) {
double inlRatDiffSR = (double) combNrCorrsTP / (double) (combNrCorrsTP + combNrCorrsTN) - inlRat[actFrameCnt];
double testVal = min((double) (combNrCorrsTP + combNrCorrsTN) / 100.0, 1.0) * inlRatDiffSR / 300.0;
if (!nearZero(testVal)) {
cout
<< "Inlier ratio of combined static and moving correspondences differs from global inlier ratio (0 - 1.0) by "
<< inlRatDiffSR << endl;
}
}
}
void genStereoSequ::combineWorldCoordinateIndices(){
combCorrsImg12TP_IdxWorld.clear();
combCorrsImg12TP_IdxWorld2.clear();
//Copy all 3D world coordinate indices
for (auto& i : actCorrsImg12TPFromLast_Idx) {
combCorrsImg12TP_IdxWorld.push_back(static_cast<int64_t>(actCorrsImg12TPFromLast_IdxWorld[i]));
}
if (!actImgPointCloud.empty()) {
CV_Assert(!actCorrsImg12TP_IdxWorld.empty());
combCorrsImg12TP_IdxWorld.reserve(combCorrsImg12TP_IdxWorld.size() + actCorrsImg12TP_IdxWorld.size());
for(auto& i : actCorrsImg12TP_IdxWorld){
combCorrsImg12TP_IdxWorld.push_back(static_cast<int64_t>(i));
}
}
combCorrsImg12TPContMovObj_IdxWorld = combCorrsImg12TP_IdxWorld;
combCorrsImg12TP_IdxWorld2 = combCorrsImg12TP_IdxWorld;
int64_t idxMOAllFrames = static_cast<int64_t>(movObj3DPtsWorldAllFrames.size()) -
static_cast<int64_t>(movObjCorrsImg12TPFromLast_Idx.size()) -
static_cast<int64_t>(actCorrsOnMovObj_IdxWorld.size());
nrMovingObjPerFrame.push_back(movObjCorrsImg12TPFromLast_Idx.size() + actCorrsOnMovObj_IdxWorld.size());
for (size_t i = 0; i < movObjCorrsImg12TPFromLast_Idx.size(); i++) {
combCorrsImg12TP_IdxWorld.reserve(combCorrsImg12TP_IdxWorld.size() + movObjCorrsImg12TPFromLast_Idx[i].size());
combCorrsImg12TP_IdxWorld2.reserve(combCorrsImg12TP_IdxWorld2.size() + movObjCorrsImg12TPFromLast_Idx[i].size());
combCorrsImg12TPContMovObj_IdxWorld.reserve(combCorrsImg12TPContMovObj_IdxWorld.size() +
movObjCorrsImg12TPFromLast_Idx[i].size());
const auto idxMO = static_cast<int64_t>(i + 1);
const int64_t idxMOAll = idxMO + idxMOAllFrames;
const auto idx_FrEmerge = static_cast<int64_t>(get<0>(movObjFrameEmerge[i])) << 8;
const auto init_idx = static_cast<int64_t>(get<1>(movObjFrameEmerge[i]) + 1);
for (auto& j : movObjCorrsImg12TPFromLast_Idx[i]) {
auto idxPts = static_cast<int64_t>(actCorrsOnMovObjFromLast_IdxWorld[i][j]);
auto idx_emerge = static_cast<int64_t>(movObjImg12TP_InitIdxWorld[i][idxPts]);
CV_Assert(idxPts < static_cast<int64_t>(INT32_MAX));
idxPts = idxPts << 32;
idx_emerge = (idx_emerge + 1) << 32;
int64_t idx = -1 * (idxMO | idxPts);
combCorrsImg12TP_IdxWorld.push_back(idx);
idx = -1 * (idxMOAll | idxPts);
combCorrsImg12TPContMovObj_IdxWorld.push_back(idx);
const auto idx_id = -1 * (idx_emerge | idx_FrEmerge | init_idx);
combCorrsImg12TP_IdxWorld2.push_back(idx_id);
}
}
const auto movObjFromLastSi = static_cast<int64_t>(movObjCorrsImg12TPFromLast_Idx.size());
idxMOAllFrames += movObjFromLastSi;
for (size_t i = 0; i < actCorrsOnMovObj_IdxWorld.size(); i++) {
if (!actCorrsOnMovObj_IdxWorld[i].empty()) {
combCorrsImg12TP_IdxWorld.reserve(combCorrsImg12TP_IdxWorld.size() + actCorrsOnMovObj_IdxWorld[i].size());
combCorrsImg12TP_IdxWorld2.reserve(combCorrsImg12TP_IdxWorld2.size() + actCorrsOnMovObj_IdxWorld[i].size());
combCorrsImg12TPContMovObj_IdxWorld.reserve(combCorrsImg12TPContMovObj_IdxWorld.size() +
actCorrsOnMovObj_IdxWorld[i].size());
const auto idxMO = static_cast<int64_t>(i + 1);
const auto idx2 = static_cast<size_t>(movObjFromLastSi) + i;
const int64_t idxMOAll = idxMO + idxMOAllFrames;
get<1>(movObjFrameEmerge[idx2]) = idx2;//i;
const auto idx_frCnt = static_cast<int64_t>(actFrameCnt) << 8;
for (auto& j : actCorrsOnMovObj_IdxWorld[i]) {
auto idxPts = static_cast<int64_t>(j);
auto idx_emerge = static_cast<int64_t>(movObjImg12TP_InitIdxWorld[idx2][j]);
CV_Assert(idxPts < static_cast<int64_t>(INT32_MAX));
idxPts = idxPts << 32;
idx_emerge = (idx_emerge + 1) << 32;
int64_t idx = -1 * (idxMO | idxPts);
combCorrsImg12TP_IdxWorld.push_back(idx);
idx = -1 * (idxMOAll | idxPts);
combCorrsImg12TPContMovObj_IdxWorld.push_back(idx);
const auto idx_id = -1 * (idx_emerge | idx_frCnt | (idxMO + movObjFromLastSi));
combCorrsImg12TP_IdxWorld2.push_back(idx_id);
}
}
}
}
void genStereoSequ::visualizeAllCorrespondences(){
Mat allCorrs = cv::Mat::zeros(imgSize, CV_8UC3);
for (int i = 0; i < actCorrsImg1TPFromLast.cols; ++i) {
int x = (int)round(actCorrsImg1TPFromLast.at<double>(0,i));
int y = (int)round(actCorrsImg1TPFromLast.at<double>(1,i));
allCorrs.at<cv::Vec3b>(y,x)[1] = 255;
}
int cnt_overlaps = 0;
for (int i = 0; i < actCorrsImg1TP.cols; ++i) {
int x = (int)round(actCorrsImg1TP.at<double>(0,i));
int y = (int)round(actCorrsImg1TP.at<double>(1,i));
if((allCorrs.at<cv::Vec3b>(y,x)[0] == 0) && (allCorrs.at<cv::Vec3b>(y,x)[1] == 0) && (allCorrs.at<cv::Vec3b>(y,x)[2] == 0)){
allCorrs.at<cv::Vec3b>(y,x)[1] = 255;
}
else{
allCorrs.at<cv::Vec3b>(y,x)[1] = 0;
allCorrs.at<cv::Vec3b>(y,x)[2] = 255;
cnt_overlaps++;
}
}
for (auto &j : movObjCorrsImg1TPFromLast) {
for (int i = 0; i < j.cols; ++i) {
int x = (int)round(j.at<double>(0,i));
int y = (int)round(j.at<double>(1,i));
if((allCorrs.at<cv::Vec3b>(y,x)[0] == 0) && (allCorrs.at<cv::Vec3b>(y,x)[1] == 0) && (allCorrs.at<cv::Vec3b>(y,x)[2] == 0)){
allCorrs.at<cv::Vec3b>(y,x)[0] = 255;
}
else{
allCorrs.at<cv::Vec3b>(y,x)[2] = 255;
cnt_overlaps++;
}
}
}
for (auto &j : movObjCorrsImg1TP) {
for (int i = 0; i < j.cols; ++i) {
int x = (int)round(j.at<double>(0,i));
int y = (int)round(j.at<double>(1,i));
if((allCorrs.at<cv::Vec3b>(y,x)[0] == 0) && (allCorrs.at<cv::Vec3b>(y,x)[1] == 0) && (allCorrs.at<cv::Vec3b>(y,x)[2] == 0)){
allCorrs.at<cv::Vec3b>(y,x)[0] = 255;
}
else{
allCorrs.at<cv::Vec3b>(y,x)[2] = 255;
cnt_overlaps++;
}
}
}
for (int i = 0; i < actCorrsImg1TN.cols; ++i) {
int x = (int)round(actCorrsImg1TN.at<double>(0,i));
int y = (int)round(actCorrsImg1TN.at<double>(1,i));
if((allCorrs.at<cv::Vec3b>(y,x)[0] == 0) && (allCorrs.at<cv::Vec3b>(y,x)[1] == 0) && (allCorrs.at<cv::Vec3b>(y,x)[2] == 0)){
allCorrs.at<cv::Vec3b>(y,x)[1] = 127;
}
else{
allCorrs.at<cv::Vec3b>(y,x)[1] = 0;
allCorrs.at<cv::Vec3b>(y,x)[2] = 127;
cnt_overlaps++;
}
}
for (auto &j : movObjCorrsImg1TNFromLast) {
for (int i = 0; i < j.cols; ++i) {
int x = (int)round(j.at<double>(0,i));
int y = (int)round(j.at<double>(1,i));
if((allCorrs.at<cv::Vec3b>(y,x)[0] == 0) && (allCorrs.at<cv::Vec3b>(y,x)[1] == 0) && (allCorrs.at<cv::Vec3b>(y,x)[2] == 0)){
allCorrs.at<cv::Vec3b>(y,x)[0] = 127;
}
else{
allCorrs.at<cv::Vec3b>(y,x)[2] = 127;
cnt_overlaps++;
}
}
}
for (auto &j : movObjCorrsImg1TN) {
for (int i = 0; i < j.cols; ++i) {
int x = (int)round(j.at<double>(0,i));
int y = (int)round(j.at<double>(1,i));
if((allCorrs.at<cv::Vec3b>(y,x)[0] == 0) && (allCorrs.at<cv::Vec3b>(y,x)[1] == 0) && (allCorrs.at<cv::Vec3b>(y,x)[2] == 0)){
allCorrs.at<cv::Vec3b>(y,x)[0] = 127;
}
else{
allCorrs.at<cv::Vec3b>(y,x)[2] = 127;
cnt_overlaps++;
}
}
}
if(cnt_overlaps > 0){
cout << "Found " << cnt_overlaps << " overlapping correspondences!" << endl;
}
if(!writeIntermediateImg(allCorrs, "combined_correspondences_in_image_1")) {
namedWindow("Combined correspondences in image 1", WINDOW_AUTOSIZE);
imshow("Combined correspondences in image 1", allCorrs);
waitKey(0);
destroyWindow("Combined correspondences in image 1");
}
}
//Get the paramters and indices for the actual frame. This function must be called before simulating a new stereo frame
void genStereoSequ::updateFrameParameters() {
if (((actFrameCnt % (pars.nFramesPerCamConf)) == 0) && (actFrameCnt > 0)) {
actStereoCIdx++;
}
actR = R[actStereoCIdx];
actT = t[actStereoCIdx];
actKd1 = K1_distorted[actStereoCIdx];
actKd2 = K2_distorted[actStereoCIdx];
actDepthNear = depthNear[actStereoCIdx];
actDepthMid = depthMid[actStereoCIdx];
actDepthFar = depthFar[actStereoCIdx];
actFracUseableTPperRegion = fracUseableTPperRegion[actStereoCIdx];
actStereoImgsOverlapMask = stereoImgsOverlapMask[actStereoCIdx];
if (((actFrameCnt % (pars.corrsPerRegRepRate)) == 0) && (actFrameCnt > 0)) {
actCorrsPRIdx++;
if (actCorrsPRIdx >= pars.corrsPerRegion.size()) {
actCorrsPRIdx = 0;
}
}
}
//Insert new generated 3D points into the world coordinate point cloud
void genStereoSequ::transPtsToWorld() {
if (actImgPointCloud.empty()) {
return;
}
size_t nrPts = actImgPointCloud.size();
size_t nrOldPts = staticWorld3DPts->size();
actCorrsImg12TP_IdxWorld.clear();
actCorrsImg12TP_IdxWorld.resize(nrPts);
std::iota(actCorrsImg12TP_IdxWorld.begin(), actCorrsImg12TP_IdxWorld.end(), nrOldPts);
staticWorld3DPts->reserve(nrOldPts + nrPts);
for (size_t i = 0; i < nrPts; i++) {
Mat ptm = absCamCoordinates[actFrameCnt].R * Mat(actImgPointCloud[i]).reshape(1) +
absCamCoordinates[actFrameCnt].t;
staticWorld3DPts->push_back(
pcl::PointXYZ((float) ptm.at<double>(0), (float) ptm.at<double>(1), (float) ptm.at<double>(2)));
}
if (verbose & SHOW_STATIC_OBJ_3D_PTS) {
visualizeStaticObjPtCloud();
}
destroyAllWindows();
if ((verbose & SHOW_MOV_OBJ_3D_PTS) && (verbose & SHOW_STATIC_OBJ_3D_PTS)) {
visualizeMovingAndStaticObjPtCloud();
}
}
//Transform new generated 3D points from new generated moving objects into world coordinates
void genStereoSequ::transMovObjPtsToWorld() {
if (movObj3DPtsCamNew.empty()) {
return;
}
size_t nrNewObjs = movObj3DPtsCamNew.size();
size_t nrOldObjs = movObj3DPtsWorld.size();
size_t nrSaveObjs = movObj3DPtsWorldAllFrames.size();
movObj3DPtsWorld.resize(nrOldObjs + nrNewObjs);
movObjFrameEmerge.resize(nrOldObjs + nrNewObjs);
movObjImg12TP_InitIdxWorld.resize(nrOldObjs + nrNewObjs);
movObjWorldMovement.resize(nrOldObjs + nrNewObjs);
movObj3DPtsWorldAllFrames.resize(nrSaveObjs + nrNewObjs);
//Add new depth classes to existing ones
movObjDepthClass.insert(movObjDepthClass.end(), movObjDepthClassNew.begin(), movObjDepthClassNew.end());
/* WRONG:
* Mat trans_c2w;//Translation vector for transferring 3D points from camera to world coordinates
Mat RC2W = absCamCoordinates[actFrameCnt].R.t();
trans_c2w = -1.0 * RC2W * absCamCoordinates[actFrameCnt].t;//Get the C2W-translation from the position of the camera centre in world coordinates*/
for (size_t i = 0; i < nrNewObjs; i++) {
size_t idx = nrOldObjs + i;
movObj3DPtsWorld[idx].reserve(movObj3DPtsCamNew[i].size());
movObjImg12TP_InitIdxWorld[idx].resize(movObj3DPtsCamNew[i].size());
std::iota (movObjImg12TP_InitIdxWorld[idx].begin(), movObjImg12TP_InitIdxWorld[idx].end(), 0);
for (size_t j = 0; j < movObj3DPtsCamNew[i].size(); j++) {
Mat pt3 = Mat(movObj3DPtsCamNew[i][j]).reshape(1);
//X_world = R * X_cam + t
Mat ptm = absCamCoordinates[actFrameCnt].R * pt3 + absCamCoordinates[actFrameCnt].t;
// Mat ptm = RC2W * pt3 + trans_c2w;
movObj3DPtsWorld[idx].push_back(
pcl::PointXYZ((float) ptm.at<double>(0), (float) ptm.at<double>(1), (float) ptm.at<double>(2)));
}
movObj3DPtsWorldAllFrames[nrSaveObjs + i] = movObj3DPtsWorld[idx];
double velocity = 0;
if (nearZero(pars.relMovObjVelRange.first - pars.relMovObjVelRange.second)) {
velocity = absCamVelocity * pars.relMovObjVelRange.first;
} else {
double relV = getRandDoubleValRng(pars.relMovObjVelRange.first, pars.relMovObjVelRange.second);
velocity = absCamVelocity * relV;
}
Mat tdiff;
if ((actFrameCnt + 1) < totalNrFrames) {
//Get direction of camera from actual to next frame
tdiff = absCamCoordinates[actFrameCnt + 1].t - absCamCoordinates[actFrameCnt].t;
} else {
//Get direction of camera from last to actual frame
tdiff = absCamCoordinates[actFrameCnt].t - absCamCoordinates[actFrameCnt - 1].t;
}
double tnorm = norm(tdiff);
if(nearZero(tnorm))
tdiff = Mat::zeros(3,1,CV_64FC1);
else
tdiff /= tnorm;
//Add the movement direction of the moving object
tdiff += movObjDir;
tnorm = norm(tdiff);
if(nearZero(tnorm))
tdiff = Mat::zeros(3,1,CV_64FC1);
else
tdiff /= tnorm;
tdiff *= velocity;
movObjWorldMovement[idx] = tdiff.clone();
movObjFrameEmerge[idx] = make_tuple(actFrameCnt, 0, tdiff.clone());
}
if (verbose & SHOW_MOV_OBJ_3D_PTS) {
visualizeMovObjPtCloud();
}
destroyAllWindows();
}
void genStereoSequ::visualizeMovObjPtCloud() {
if (movObj3DPtsWorld.empty())
return;
boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer(
new pcl::visualization::PCLVisualizer("Moving objects"));
Eigen::Affine3f m = initPCLViewerCoordinateSystems(viewer, absCamCoordinates[actFrameCnt].R,
absCamCoordinates[actFrameCnt].t);
//Generate colormap for moving obejcts (every object has a different color)
Mat colormap_img;
getNColors(colormap_img, movObj3DPtsWorld.size(), COLORMAP_PARULA);
pcl::PointCloud<pcl::PointXYZRGB>::Ptr basic_cloud_ptr(new pcl::PointCloud<pcl::PointXYZRGB>);
int idx = 0;
for (auto &i : movObj3DPtsWorld) {
for (auto &j : i) {
pcl::PointXYZRGB point;
point.x = j.x;
point.y = j.y;
point.z = j.z;
point.b = colormap_img.at<cv::Vec3b>(idx)[0];
point.g = colormap_img.at<cv::Vec3b>(idx)[1];
point.r = colormap_img.at<cv::Vec3b>(idx)[2];
basic_cloud_ptr->push_back(point);
}
idx++;
}
viewer->addPointCloud<pcl::PointXYZRGB>(basic_cloud_ptr, "moving objects cloud");
viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1,
"moving objects cloud");
setPCLViewerCamPars(viewer, m.matrix(), K1);
startPCLViewer(viewer);
}
void genStereoSequ::visualizeStaticObjPtCloud() {
if (staticWorld3DPts->empty())
return;
boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer(
new pcl::visualization::PCLVisualizer("Static Objects"));
Eigen::Affine3f m = initPCLViewerCoordinateSystems(viewer, absCamCoordinates[actFrameCnt].R,
absCamCoordinates[actFrameCnt].t);
viewer->addPointCloud<pcl::PointXYZ>(staticWorld3DPts, "static objects cloud");
viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1,
"static objects cloud");
setPCLViewerCamPars(viewer, m.matrix(), K1);
startPCLViewer(viewer);
}
void genStereoSequ::visualizeMovingAndStaticObjPtCloud() {
if (staticWorld3DPts->empty() || movObj3DPtsWorld.empty())
return;
boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer(
new pcl::visualization::PCLVisualizer("Moving and static objects"));
Eigen::Affine3f m = initPCLViewerCoordinateSystems(viewer, absCamCoordinates[actFrameCnt].R,
absCamCoordinates[actFrameCnt].t);
pcl::PointCloud<pcl::PointXYZRGB>::Ptr basic_cloud_ptr(new pcl::PointCloud<pcl::PointXYZRGB>);
//Generate colormap for moving obejcts (every object has a different color)
Mat colormap_img;
getNColors(colormap_img, movObj3DPtsWorld.size(), COLORMAP_AUTUMN);
int idx = 0;
for (auto &i : movObj3DPtsWorld) {
for (auto &j : i) {
pcl::PointXYZRGB point;
point.x = j.x;
point.y = j.y;
point.z = j.z;
point.b = colormap_img.at<cv::Vec3b>(idx)[0];
point.g = colormap_img.at<cv::Vec3b>(idx)[1];
point.r = colormap_img.at<cv::Vec3b>(idx)[2];
basic_cloud_ptr->push_back(point);
}
idx++;
}
for (const auto &i : *(staticWorld3DPts.get())) {
pcl::PointXYZRGB point;
point.x = i.x;
point.y = i.y;
point.z = i.z;
point.b = 0;
point.g = 255;
point.r = 0;
basic_cloud_ptr->push_back(point);
}
viewer->addPointCloud<pcl::PointXYZRGB>(basic_cloud_ptr, "static and moving objects cloud");
viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1,
"static and moving objects cloud");
setPCLViewerCamPars(viewer, m.matrix(), K1);
startPCLViewer(viewer);
}
void genStereoSequ::visualizeMovObjMovement(std::vector<pcl::PointXYZ> &cloudCentroids_old,
std::vector<pcl::PointXYZ> &cloudCentroids_new,
std::vector<float> &cloudExtensions) {
if (cloudCentroids_old.empty() || cloudCentroids_new.empty())
return;
CV_Assert(cloudCentroids_old.size() == cloudCentroids_new.size());
boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer(
new pcl::visualization::PCLVisualizer("Moving object movement"));
Eigen::Affine3f m = initPCLViewerCoordinateSystems(viewer, absCamCoordinates[actFrameCnt].R,
absCamCoordinates[actFrameCnt].t);
//Generate colormap for moving obejcts (every object has a different color)
Mat colormap_img;
getNColors(colormap_img, cloudCentroids_old.size() * 2, COLORMAP_PARULA);
colormap_img.convertTo(colormap_img, CV_64FC3);
colormap_img /= 255.0;
for (int i = 0; i < (int)cloudCentroids_old.size(); ++i) {
int i2 = 2 * i;
int i21 = i2 + 1;
viewer->addSphere(cloudCentroids_old[i],
(double) cloudExtensions[i],
colormap_img.at<cv::Vec3d>(i2)[2],
colormap_img.at<cv::Vec3d>(i2)[1],
colormap_img.at<cv::Vec3d>(i2)[0],
"sphere_old" + std::to_string(i));
viewer->addSphere(cloudCentroids_new[i],
(double) cloudExtensions[i],
colormap_img.at<cv::Vec3d>(i21)[2],
colormap_img.at<cv::Vec3d>(i21)[1],
colormap_img.at<cv::Vec3d>(i21)[0],
"sphere_new" + std::to_string(i));
viewer->addArrow(cloudCentroids_new[i],
cloudCentroids_old[i],
colormap_img.at<cv::Vec3d>(i2)[2],
colormap_img.at<cv::Vec3d>(i2)[1],
colormap_img.at<cv::Vec3d>(i2)[0],
false,
"arrow" + std::to_string(i));
}
//Add last camera center
if (actFrameCnt > 0) {
addVisualizeCamCenter(viewer, absCamCoordinates[actFrameCnt - 1].R, absCamCoordinates[actFrameCnt - 1].t);
pcl::PointXYZ c_old, c_new;
c_old.x = (float) absCamCoordinates[actFrameCnt - 1].t.at<double>(0);
c_old.y = (float) absCamCoordinates[actFrameCnt - 1].t.at<double>(1);
c_old.z = (float) absCamCoordinates[actFrameCnt - 1].t.at<double>(2);
c_new.x = (float) absCamCoordinates[actFrameCnt].t.at<double>(0);
c_new.y = (float) absCamCoordinates[actFrameCnt].t.at<double>(1);
c_new.z = (float) absCamCoordinates[actFrameCnt].t.at<double>(2);
viewer->addArrow(c_new,
c_old,
1.0,
1.0,
1.0,
false,
"arrow_cams");
}
setPCLViewerCamPars(viewer, m.matrix(), K1);
startPCLViewer(viewer);
}
//Get the relative movement direction (compared to the camera movement) for every moving object
void genStereoSequ::checkMovObjDirection() {
if (pars.movObjDir.empty()) {
Mat newMovObjDir(3, 1, CV_64FC1);
cv::randu(newMovObjDir, Scalar(0), Scalar(1.0));
double newMovObjDirNorm = norm(newMovObjDir);
if(nearZero(newMovObjDirNorm))
newMovObjDir = Mat::zeros(3,1,CV_64FC1);
else
newMovObjDir /= newMovObjDirNorm;
newMovObjDir.copyTo(movObjDir);
} else {
movObjDir = pars.movObjDir;
double movObjDirNorm = norm(movObjDir);
if(nearZero(movObjDirNorm))
movObjDir = Mat::zeros(3,1,CV_64FC1);
else
movObjDir /= movObjDirNorm;
}
}
//Updates the actual position of moving objects and their corresponding 3D points according to their moving direction and velocity.
void genStereoSequ::updateMovObjPositions() {
if (movObj3DPtsWorld.empty()) {
return;
}
vector<pcl::PointXYZ> mocentroids;
if (verbose & SHOW_MOV_OBJ_MOVEMENT) {
getCloudCentroids(movObj3DPtsWorld, mocentroids);
}
for (size_t i = 0; i < movObj3DPtsWorld.size(); i++) {
Eigen::Affine3f obj_transform = Eigen::Affine3f::Identity();
obj_transform.translation() << (float) movObjWorldMovement[i].at<double>(0),
(float) movObjWorldMovement[i].at<double>(1),
(float) movObjWorldMovement[i].at<double>(2);
pcl::transformPointCloud(movObj3DPtsWorld[i], movObj3DPtsWorld[i], obj_transform);
}
if (verbose & SHOW_MOV_OBJ_MOVEMENT) {
vector<pcl::PointXYZ> mocentroids2;
vector<float> cloudExtensions;
getCloudCentroids(movObj3DPtsWorld, mocentroids2);
getMeanCloudStandardDevs(movObj3DPtsWorld, cloudExtensions, mocentroids2);
visualizeMovObjMovement(mocentroids, mocentroids2, cloudExtensions);
}
}
//Get 3D-points of moving objects that are visible in the camera and transform them from the world coordinate system into camera coordinate system
void genStereoSequ::getMovObjPtsCam() {
if (movObj3DPtsWorld.empty()) {
return;
}
size_t movObjSize = movObj3DPtsWorld.size();
vector<int> delList;
movObj3DPtsCam.clear();
movObj3DPtsCam.resize(movObjSize);
actCorrsOnMovObjFromLast_IdxWorld.clear();
actCorrsOnMovObjFromLast_IdxWorld.resize(movObjSize);
vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> filteredOccludedPts(movObjSize);
vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> filteredOccludedCamPts(movObjSize);
vector<vector<int>> filteredOccludedCamPts_idx(movObjSize);
vector<pair<float, int>> meanDists(movObjSize, make_pair(0, -1));
Eigen::Affine3f obj_transform = Eigen::Affine3f::Identity();
Eigen::Vector3d te;
Eigen::Matrix3d Re;
cv::cv2eigen(absCamCoordinates[actFrameCnt].R.t(), Re);
cv::cv2eigen(absCamCoordinates[actFrameCnt].t, te);
te = -1.0 * Re * te;
obj_transform.matrix().block<3, 3>(0, 0) = Re.cast<float>();
obj_transform.matrix().block<3, 1>(0, 3) = te.cast<float>();
for (int i = 0; i < (int)movObjSize; i++) {
pcl::PointCloud<pcl::PointXYZ>::Ptr ptr_movObj3DPtsWorld(movObj3DPtsWorld[i].makeShared());
pcl::PointCloud<pcl::PointXYZ>::Ptr camFilteredPts(new pcl::PointCloud<pcl::PointXYZ>());
vector<int> camFilteredPts_idx;
//Check if the moving object is visible in the camera
bool success = getVisibleCamPointCloud(ptr_movObj3DPtsWorld, &camFilteredPts_idx);
if (!success) {
delList.push_back(i);
continue;
}
camFilteredPts->reserve(camFilteredPts_idx.size());
for(auto& j : camFilteredPts_idx){
camFilteredPts->push_back(movObj3DPtsWorld[i][j]);
}
//Check if due to the changed camera position, some 3D points are occluded from others of the same moving object
success = filterNotVisiblePts(camFilteredPts, actCorrsOnMovObjFromLast_IdxWorld[i]);
if (!success) {
actCorrsOnMovObjFromLast_IdxWorld[i].clear();
success = filterNotVisiblePts(camFilteredPts, actCorrsOnMovObjFromLast_IdxWorld[i], true);
if (!success) {
if (actCorrsOnMovObjFromLast_IdxWorld[i].size() < 5) {
delList.push_back(i);
continue;
}
}
}
filteredOccludedPts[i].reset(new pcl::PointCloud<pcl::PointXYZ>());
for(auto& j : actCorrsOnMovObjFromLast_IdxWorld[i]){
filteredOccludedPts[i]->push_back(camFilteredPts->at((size_t)j));
j = camFilteredPts_idx[j];
}
//Convert 3D points from world into camera coordinates
filteredOccludedCamPts[i].reset(new pcl::PointCloud<pcl::PointXYZ>());
pcl::transformPointCloud(*filteredOccludedPts[i].get(), *filteredOccludedCamPts[i].get(), obj_transform);
//Get the mean distance of every moving object to the camera
if (movObjSize > 1) {
pcl::PointXYZ mocentroid;
getCloudCentroid(*filteredOccludedCamPts[i].get(), mocentroid);
meanDists[i] = make_pair(mocentroid.z, i);
}
}
if ((movObjSize - delList.size()) > 1) {
//Check, if there are overlaps between moving objects and filter 3D points that would be behind another moving object
sort(meanDists.begin(), meanDists.end(), [](std::pair<float, int> first, std::pair<float, int> second) {
return first.first < second.first;
});
int sqrSi = csurr.rows;
int posadd = (sqrSi - 1) / 2;
Mat globMOmask = Mat::zeros(imgSize, CV_8UC1);
for (size_t i = 0; i < movObjSize; i++) {
int idx = meanDists[i].second;
if (idx < 0)
continue;
// bool kpOutofR = false;
vector<int> keyPDelList;
Mat locMOmask = Mat::zeros(imgSize.height + sqrSi - 1, imgSize.width + sqrSi - 1, CV_8UC1);
std::vector<cv::Point> keypointsMO(filteredOccludedCamPts[idx]->size());
for (int j = 0; j < (int)filteredOccludedCamPts[idx]->size(); ++j) {
Mat pt = K1 * (Mat_<double>(3, 1) << (double) (*filteredOccludedCamPts[idx])[j].x,
(double) (*filteredOccludedCamPts[idx])[j].y,
(double) (*filteredOccludedCamPts[idx])[j].z);
if(nearZero(pt.at<double>(2))){
keyPDelList.push_back(j);
continue;
}
pt /= pt.at<double>(2);
keypointsMO[j].x = (int) round(pt.at<double>(0));
keypointsMO[j].y = (int) round(pt.at<double>(1));
if ((keypointsMO[j].x < 0) || (keypointsMO[j].y < 0) ||
(keypointsMO[j].x >= imgSize.width) ||
(keypointsMO[j].y >= imgSize.height)) {
// kpOutofR = true;
keyPDelList.push_back(j);
continue;
}
Mat s_tmp = locMOmask(Rect(keypointsMO[j], Size(sqrSi, sqrSi)));
csurr.copyTo(s_tmp);
}
if (!keyPDelList.empty()) {
for (int j = (int) keyPDelList.size() - 1; j >= 0; j--) {
keypointsMO.erase(keypointsMO.begin() + keyPDelList[j]);
filteredOccludedCamPts[idx]->erase(filteredOccludedCamPts[idx]->begin() + keyPDelList[j]);
actCorrsOnMovObjFromLast_IdxWorld[idx].erase(actCorrsOnMovObjFromLast_IdxWorld[idx].begin() + keyPDelList[j]);
}
if (filteredOccludedCamPts[idx]->empty()) {
delList.push_back(idx);
continue;
}
}
if (verbose & SHOW_BACKPROJECT_OCCLUSIONS_MOV_OBJ) {
if(!writeIntermediateImg(locMOmask > 0, "backprojected_moving_object_keypoints")){
namedWindow("Backprojected moving object keypoints", WINDOW_AUTOSIZE);
imshow("Backprojected moving object keypoints", locMOmask > 0);
waitKey(0);
destroyWindow("Backprojected moving object keypoints");
}
}
Mat resMOmask;
std::vector<vector<cv::Point>> hullPts(1);
genMovObjHulls(locMOmask, keypointsMO, resMOmask, &hullPts[0]);
/*namedWindow("Backprojected moving object area using convex hull", WINDOW_AUTOSIZE);
imshow("Backprojected moving object area using convex hull", resMOmask > 0);*/
locMOmask = (resMOmask(Rect(Point(posadd, posadd), imgSize)) > 0);
if (keypointsMO.size() > 2) {
Mat hullMat = Mat::zeros(imgSize, CV_8UC1);;
drawContours(hullMat, hullPts, -1, Scalar(255), FILLED);
locMOmask &= hullMat;
}
if (verbose & SHOW_BACKPROJECT_OCCLUSIONS_MOV_OBJ) {
if(!writeIntermediateImg(locMOmask, "backprojected_moving_object_area_final")){
namedWindow("Backprojected moving object area final", WINDOW_AUTOSIZE);
imshow("Backprojected moving object area final", locMOmask);
}
}
Mat overlaps = globMOmask & locMOmask;
if (verbose & SHOW_BACKPROJECT_OCCLUSIONS_MOV_OBJ) {
if(!writeIntermediateImg(overlaps, "overlap_with_other_moving_objects")){
namedWindow("Overlap with other moving objects", WINDOW_AUTOSIZE);
imshow("Overlap with other moving objects", overlaps);
waitKey(0);
destroyWindow("Overlap with other moving objects");
destroyWindow("Backprojected moving object area final");
}
}
// destroyWindow("Backprojected moving object area using convex hull");
if (cv::countNonZero(overlaps) > 0) {
/*if(kpOutofR)
{
throw SequenceException("Backprojected image coordinate of moving object is out of range.");
}*/
vector<int> filteredOccludedPts_idx_tmp;
filteredOccludedPts_idx_tmp.reserve(actCorrsOnMovObjFromLast_IdxWorld.size());
for (int j = 0; j < (int)keypointsMO.size(); ++j) {
if (overlaps.at<unsigned char>(keypointsMO[j]) == 0) {
movObj3DPtsCam[idx].push_back(cv::Point3d((double) (*filteredOccludedCamPts[idx])[j].x,
(double) (*filteredOccludedCamPts[idx])[j].y,
(double) (*filteredOccludedCamPts[idx])[j].z));
filteredOccludedPts_idx_tmp.push_back(actCorrsOnMovObjFromLast_IdxWorld[idx][j]);
}
}
if (movObj3DPtsCam[idx].empty()) {
delList.push_back(idx);
}
actCorrsOnMovObjFromLast_IdxWorld[idx] = filteredOccludedPts_idx_tmp;
} else {
for (int j = 0; j < (int)filteredOccludedCamPts[idx]->size(); ++j) {
movObj3DPtsCam[idx].push_back(cv::Point3d((double) (*filteredOccludedCamPts[idx])[j].x,
(double) (*filteredOccludedCamPts[idx])[j].y,
(double) (*filteredOccludedCamPts[idx])[j].z));
}
}
globMOmask |= locMOmask;
if (verbose & SHOW_BACKPROJECT_OCCLUSIONS_MOV_OBJ) {
if(!writeIntermediateImg(globMOmask, "global_backprojected_moving_objects_mask")){
namedWindow("Global backprojected moving objects mask", WINDOW_AUTOSIZE);
imshow("Global backprojected moving objects mask", globMOmask);
waitKey(0);
destroyWindow("Global backprojected moving objects mask");
}
}
}
} else if (delList.empty()) {
for (int j = 0; j < (int)filteredOccludedCamPts[0]->size(); ++j) {
movObj3DPtsCam[0].push_back(cv::Point3d((double) (*filteredOccludedCamPts[0])[j].x,
(double) (*filteredOccludedCamPts[0])[j].y,
(double) (*filteredOccludedCamPts[0])[j].z));
}
}
sort(delList.begin(), delList.end());
if (!delList.empty()) {
for (int i = (int) delList.size() - 1; i >= 0; i--) {
movObj3DPtsCam.erase(movObj3DPtsCam.begin() + delList[i]);
movObj3DPtsWorld.erase(movObj3DPtsWorld.begin() +
delList[i]);//at this time, also moving objects that are only occluded are deleted
movObjFrameEmerge.erase(movObjFrameEmerge.begin() + delList[i]);
movObjImg12TP_InitIdxWorld.erase(movObjImg12TP_InitIdxWorld.begin() + delList[i]);
movObjDepthClass.erase(movObjDepthClass.begin() + delList[i]);
movObjWorldMovement.erase(movObjWorldMovement.begin() + delList[i]);
actCorrsOnMovObjFromLast_IdxWorld.erase(actCorrsOnMovObjFromLast_IdxWorld.begin() + delList[i]);
}
}
}
void genStereoSequ::getCamPtsFromWorld() {
if (staticWorld3DPts->empty()) {
return;
}
actImgPointCloudFromLast.clear();
actCorrsImg12TPFromLast_IdxWorld.clear();
// pcl::PointCloud<pcl::PointXYZ>::Ptr ptr_actImgPointCloudFromLast(staticWorld3DPts.makeShared());
//Enables or disables filtering of occluded points for back-projecting existing 3D-world coorindinates to
//the image plane as filtering occluded points is very time-consuming it can be disabled
if(filter_occluded_points) {
vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> camFilteredPtsParts;
vector<vector<int>> camFilteredPtsParts_idx;
const int partsx = 5;
const int partsy = 5;
const int partsxy = partsx * partsy;
// bool success = getVisibleCamPointCloudSlices(staticWorld3DPts, camFilteredPtsParts, partsy, partsx, 0, 0);
bool success = getVisibleCamPointCloudSlicesAndDepths(staticWorld3DPts,
camFilteredPtsParts,
camFilteredPtsParts_idx,
partsy,
partsx);
if (!success) {
return;
}
pcl::PointCloud<pcl::PointXYZ>::Ptr occludedPtsAll(new pcl::PointCloud<pcl::PointXYZ>());
success = false;
for (int i = 0; i < partsxy; i++) {
if (camFilteredPtsParts[i].get() == nullptr) continue;
if (camFilteredPtsParts[i]->empty()) continue;
vector<int> filteredOccludedPts_idx;
pcl::PointCloud<pcl::PointXYZ>::Ptr occludedPts(new pcl::PointCloud<pcl::PointXYZ>());
bool success1 = filterNotVisiblePts(camFilteredPtsParts[i], filteredOccludedPts_idx, false, false,
occludedPts);
if (!success1) {
filteredOccludedPts_idx.clear();
occludedPts->clear();
success |= filterNotVisiblePts(camFilteredPtsParts[i], filteredOccludedPts_idx, true, false,
occludedPts);
if (!filteredOccludedPts_idx.empty()) {
actCorrsImg12TPFromLast_IdxWorld.reserve(
actCorrsImg12TPFromLast_IdxWorld.size() + filteredOccludedPts_idx.size());
for (auto &j : filteredOccludedPts_idx) {
actCorrsImg12TPFromLast_IdxWorld.push_back(camFilteredPtsParts_idx[i][j]);
}
}
if (!occludedPts->empty()) {
occludedPtsAll->insert(occludedPtsAll->end(), occludedPts->begin(), occludedPts->end());
}
} else {
success = true;
actCorrsImg12TPFromLast_IdxWorld.reserve(
actCorrsImg12TPFromLast_IdxWorld.size() + filteredOccludedPts_idx.size());
for (auto &j : filteredOccludedPts_idx) {
actCorrsImg12TPFromLast_IdxWorld.push_back(camFilteredPtsParts_idx[i][j]);
}
if (!occludedPts->empty()) {
occludedPtsAll->insert(occludedPtsAll->end(), occludedPts->begin(), occludedPts->end());
}
}
}
if (!actCorrsImg12TPFromLast_IdxWorld.empty() || !occludedPtsAll->empty()) {
if (verbose & SHOW_BACKPROJECT_OCCLUSIONS_STAT_OBJ) {
pcl::PointCloud<pcl::PointXYZ>::Ptr filteredOccludedPtsAll(new pcl::PointCloud<pcl::PointXYZ>());
filteredOccludedPtsAll->reserve(actCorrsImg12TPFromLast_IdxWorld.size());
for (auto &i : actCorrsImg12TPFromLast_IdxWorld) {
filteredOccludedPtsAll->push_back(staticWorld3DPts->at((size_t) i));
}
visualizeOcclusions(filteredOccludedPtsAll, occludedPtsAll, 1.0);
}
}
if (!success) {
if (actCorrsImg12TPFromLast_IdxWorld.empty()) {
return;
}
}
}else {
bool success = getVisibleCamPointCloud(staticWorld3DPts, &actCorrsImg12TPFromLast_IdxWorld);
if (!success) {
return;
}
}
for (auto& j : actCorrsImg12TPFromLast_IdxWorld) {
cv::Point3d pt = Point3d((double) staticWorld3DPts->at((size_t)j).x, (double) staticWorld3DPts->at((size_t)j).y,
(double) staticWorld3DPts->at((size_t)j).z);
Mat ptm = Mat(pt, false).reshape(1, 3);
ptm = absCamCoordinates[actFrameCnt].R.t() * (ptm -
absCamCoordinates[actFrameCnt].t);//physical memory of pt and ptm must be the same
actImgPointCloudFromLast.emplace_back(pt);
}
}
//Calculates the actual camera pose in camera coordinates in a different camera coordinate system (X forward, Y is up, and Z is right) to use the PCL filter FrustumCulling
void genStereoSequ::getActEigenCamPose() {
Eigen::Affine3f cam_pose;
cam_pose.setIdentity();
Eigen::Vector3d te;
Eigen::Matrix3d Re;
cv::cv2eigen(absCamCoordinates[actFrameCnt].R, Re);
cv::cv2eigen(absCamCoordinates[actFrameCnt].t, te);
cam_pose.matrix().block<3, 3>(0, 0) = Re.cast<float>();
cam_pose.matrix().block<3, 1>(0, 3) = te.cast<float>();
Eigen::Matrix4f pose_orig = cam_pose.matrix();
Eigen::Matrix4f cam2robot;
cam2robot
<< 0, 0, 1.f, 0,//To convert from the traditional camera coordinate system (X right, Y down, Z forward) to (X is forward, Y is up, and Z is right)
0, -1.f, 0, 0,
1.f, 0, 0, 0,
0, 0, 0, 1.f;
//X_w = R_c2w * R_z^T * R_y^T * (R_y * R_z * X_c); cam2robot = R_z^T * R_y^T; X_w is in the normal (z forward, y down, x right) coordinate system
//For the conversion from normal X_n to the other X_o (x forward, y up, z right) coordinate system: X_o = R_y * R_z * X_n (R_z rotation followed by R_y rotation)
//R_y is a -90deg and R_z a 180deg rotation
actCamPose = pose_orig * cam2robot;
Eigen::Vector4d quat;
MatToQuat(Re, quat);
quatNormalise(quat);
actCamRot = Eigen::Quaternionf(quat.cast<float>());
}
//Split the visible point cloud (through the camera) into a few slices to be able to use smaller leaf sizes in the
// pcl::VoxelGridOcclusionEstimation function as the number of useable voxels is bounded by a 32bit index
//Moreover split each slice in a near and a far part and use for each depth region a different voxel size based on the
//mean point cloud slice distance (bigger voxels for 3D points more distant to the camera)
bool genStereoSequ::getVisibleCamPointCloudSlicesAndDepths(pcl::PointCloud<pcl::PointXYZ>::Ptr cloudIn,
std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> &cloudOut,
std::vector<std::vector<int>> &cloudOut_idx,
int fovDevideVertical,
int fovDevideHorizontal) {
if (cloudIn->empty()) return false;
if(!getVisibleCamPointCloudSlicesAndDepths(cloudIn, cloudOut_idx, fovDevideVertical, fovDevideHorizontal)){
return false;
}
cloudOut.resize(cloudOut_idx.size());
for(size_t i = 0; i < cloudOut_idx.size(); i++){
if(!cloudOut_idx[i].empty()){
cloudOut[i].reset(new pcl::PointCloud<pcl::PointXYZ>());
cloudOut[i]->reserve(cloudOut_idx[i].size());
for(auto& j : cloudOut_idx[i]){
cloudOut[i]->push_back(cloudIn->at(j));
}
}
}
return true;
}
//Split the visible point cloud (through the camera) into a few slices to be able to use smaller leaf sizes in the
// pcl::VoxelGridOcclusionEstimation function as the number of useable voxels is bounded by a 32bit index
//Moreover split each slice in a near and a far part and use for each depth region a different voxel size based on the
//mean point cloud slice distance (bigger voxels for 3D points more distant to the camera)
bool genStereoSequ::getVisibleCamPointCloudSlicesAndDepths(pcl::PointCloud<pcl::PointXYZ>::Ptr cloudIn,
std::vector<std::vector<int>> &cloudOut,
int fovDevideVertical,
int fovDevideHorizontal) {
if (cloudIn->empty()) return false;
//Get point cloud slices with 3D point depths in the camera coordinate system from near to mid
int partsxy = fovDevideVertical * fovDevideHorizontal;
std::vector<std::vector<int>> cloudsNear_idx;
bool success1 = getVisibleCamPointCloudSlices(cloudIn,
cloudsNear_idx,
fovDevideVertical,
fovDevideHorizontal,
(float) actDepthNear,
(float) actDepthMid);
vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> cloudsNear(cloudsNear_idx.size());
for(size_t i = 0; i < cloudsNear_idx.size(); i++){
cloudsNear[i].reset(new pcl::PointCloud<pcl::PointXYZ>());
cloudsNear[i]->reserve(cloudsNear_idx[i].size());
for(auto& j : cloudsNear_idx[i]){
cloudsNear[i]->push_back(cloudIn->at((size_t)j));
}
}
//Get point cloud slices with 3D point depths in the camera coordinate system from mid to far
std::vector<std::vector<int>> cloudsFar_idx;
bool success2 = getVisibleCamPointCloudSlices(cloudIn,
cloudsFar_idx,
fovDevideVertical,
fovDevideHorizontal,
(float) actDepthMid,
(float) (maxFarDistMultiplier * actDepthFar));
//Check for duplicates at the borders
if (success1 && success2) {
vector<pair<size_t, size_t>> delList;
for (size_t i = 0; i < cloudsNear_idx.size(); ++i) {
for (size_t j = 0; j < cloudsNear_idx[i].size(); ++j) {
for (size_t l = 0; l < cloudsFar_idx[i].size(); ++l) {
float dist = abs(cloudIn->at(cloudsNear_idx[i][j]).x - cloudIn->at(cloudsFar_idx[i][l]).x) +
abs(cloudIn->at(cloudsNear_idx[i][j]).y - cloudIn->at(cloudsFar_idx[i][l]).y) +
abs(cloudIn->at(cloudsNear_idx[i][j]).z - cloudIn->at(cloudsFar_idx[i][l]).z);
if (nearZero((double) dist)) {
delList.emplace_back(make_pair(i, l));
}
}
}
}
if (!delList.empty()) {
for (int i = (int) delList.size() - 1; i >= 0; i--) {
cloudsFar_idx[delList[i].first].erase(cloudsFar_idx[delList[i].first].begin() + delList[i].second);
}
}
} else if (!success1 && !success2) {
return false;
}
vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> cloudsFar(cloudsFar_idx.size());
for(size_t i = 0; i < cloudsFar_idx.size(); i++){
cloudsFar[i].reset(new pcl::PointCloud<pcl::PointXYZ>());
cloudsFar[i]->reserve(cloudsFar_idx[i].size());
for(auto& j : cloudsFar_idx[i]){
cloudsFar[i]->push_back(cloudIn->at((size_t)j));
}
}
//Filter near occluded 3D points with a smaller voxel size
vector<vector<int>> filteredOccludedPtsNear((size_t)partsxy);
if (success1) {
success1 = false;
for (int i = 0; i < partsxy; i++) {
if (cloudsNear_idx[i].empty()) continue;
bool success3 = filterNotVisiblePts(cloudsNear[i], filteredOccludedPtsNear[i], false, false);
if (!success3) {
vector<int> filteredOccludedPtsNear2;
success3 = filterNotVisiblePts(cloudsNear[i], filteredOccludedPtsNear2, true, false);
if (success3) {
filteredOccludedPtsNear[i] = filteredOccludedPtsNear2;
success1 = true;
}
} else {
success1 = true;
}
}
}
//Filter far occluded 3D points with a bigger voxel size
vector<vector<int>> filteredOccludedPtsFar((size_t)partsxy);
if (success2) {
success2 = false;
for (int i = 0; i < partsxy; i++) {
if (cloudsFar_idx[i].empty()) continue;
bool success3 = filterNotVisiblePts(cloudsFar[i], filteredOccludedPtsFar[i], false, false);
if (!success3) {
vector<int> filteredOccludedPtsFar2;
success3 = filterNotVisiblePts(cloudsFar[i], filteredOccludedPtsFar2, true, false);
if (success3) {
filteredOccludedPtsFar[i] = filteredOccludedPtsFar2;
success2 = true;
}
} else {
success2 = true;
}
}
}
//Combine near and far filtered point clouds
success1 = false;
cloudOut = std::vector<std::vector<int>>(partsxy);
for(int i = 0; i < partsxy; i++){
for(auto& j : filteredOccludedPtsNear[i]){
cloudOut[i].push_back(cloudsNear_idx[i][j]);
}
}
for (int i = 0; i < partsxy; i++) {
if (!filteredOccludedPtsFar[i].empty()) {
cloudOut[i].reserve(cloudOut[i].size() + filteredOccludedPtsFar[i].size());
for(auto& j : filteredOccludedPtsFar[i]){
cloudOut[i].push_back(cloudsFar_idx[i][j]);
}
}
if (!cloudOut[i].empty()) success1 = true;
}
return success1;
}
bool genStereoSequ::getVisibleCamPointCloudSlices(pcl::PointCloud<pcl::PointXYZ>::Ptr cloudIn,
std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> &cloudOut,
int fovDevideVertical,
int fovDevideHorizontal,
float minDistance,
float maxDistance) {
if (cloudIn->empty()) return false;
pcl::PointCloud<pcl::PointXYZ>::Ptr camFilteredPts(new pcl::PointCloud<pcl::PointXYZ>());
bool success = getVisibleCamPointCloud(cloudIn, camFilteredPts, 0, 0, 0, 0, minDistance, maxDistance);
if (!success) {
return false;
}
cloudOut = vector<pcl::PointCloud<pcl::PointXYZ>::Ptr>((size_t)(fovDevideVertical * fovDevideHorizontal));
// int checkSizePtCld = 0;
success = false;
for (int y = 0; y < fovDevideVertical; ++y) {
for (int x = 0; x < fovDevideHorizontal; ++x) {
cloudOut[y * fovDevideHorizontal + x].reset(new pcl::PointCloud<pcl::PointXYZ>());
success |= getVisibleCamPointCloud(camFilteredPts,
cloudOut[y * fovDevideHorizontal + x],
fovDevideVertical,
fovDevideHorizontal,
y,
x,
minDistance,
maxDistance);
// checkSizePtCld += (int)cloudOut[y * fovDevideHorizontal + x]->size();
}
}
//Remove duplicates
vector<pair<size_t, size_t>> delList;
for (size_t i = 0; i < cloudOut.size(); ++i) {
for (size_t j = 0; j < cloudOut[i]->size(); ++j) {
for (size_t k = i; k < cloudOut.size(); ++k) {
for (size_t l = (k == i) ? (j + 1) : 0; l < cloudOut[k]->size(); ++l) {
float dist = abs(cloudOut[i]->at(j).x - cloudOut[k]->at(l).x) +
abs(cloudOut[i]->at(j).y - cloudOut[k]->at(l).y) +
abs(cloudOut[i]->at(j).z - cloudOut[k]->at(l).z);
if (nearZero((double) dist)) {
delList.emplace_back(make_pair(i, j));
}
}
}
}
}
if (!delList.empty()) {
for (int i = (int) delList.size() - 1; i >= 0; i--) {
cloudOut[delList[i].first]->erase(cloudOut[delList[i].first]->begin() + delList[i].second);
// checkSizePtCld--;
}
}
//Typically, the returned number of 3D points within all slices is 1 smaller compared to the whole region (why?)
/*if(checkSizePtCld != camFilteredPts->size()){
cout << "Not working" << endl;
}*/
return success;
}
bool genStereoSequ::getVisibleCamPointCloudSlices(pcl::PointCloud<pcl::PointXYZ>::Ptr cloudIn,
std::vector<std::vector<int>> &cloudOut,
int fovDevideVertical,
int fovDevideHorizontal,
float minDistance,
float maxDistance) {
if (cloudIn->empty()) return false;
std::vector<int> camFilteredPts_idx;
bool success = getVisibleCamPointCloud(cloudIn, &camFilteredPts_idx, 0, 0, 0, 0, minDistance, maxDistance);
if (!success) {
return false;
}
pcl::PointCloud<pcl::PointXYZ>::Ptr camFilteredPts(new pcl::PointCloud<pcl::PointXYZ>());
camFilteredPts->reserve(camFilteredPts_idx.size());
for(auto& i : camFilteredPts_idx){
camFilteredPts->push_back(cloudIn->at((size_t)i));
}
cloudOut = vector<std::vector<int>>((size_t)(fovDevideVertical * fovDevideHorizontal));
// int checkSizePtCld = 0;
success = false;
for (int y = 0; y < fovDevideVertical; ++y) {
for (int x = 0; x < fovDevideHorizontal; ++x) {
success |= getVisibleCamPointCloud(camFilteredPts,
&cloudOut[y * fovDevideHorizontal + x],
fovDevideVertical,
fovDevideHorizontal,
y,
x,
minDistance,
maxDistance);
// checkSizePtCld += (int)cloudOut[y * fovDevideHorizontal + x]->size();
}
}
//Remove duplicates
vector<pair<size_t, size_t>> delList;
for (size_t i = 0; i < cloudOut.size(); ++i) {
for (size_t j = 0; j < cloudOut[i].size(); ++j) {
for (size_t k = i; k < cloudOut.size(); ++k) {
for (size_t l = (k == i) ? (j + 1) : 0; l < cloudOut[k].size(); ++l) {
float dist = abs(camFilteredPts->at(cloudOut[i][j]).x - camFilteredPts->at(cloudOut[k][l]).x) +
abs(camFilteredPts->at(cloudOut[i][j]).y - camFilteredPts->at(cloudOut[k][l]).y) +
abs(camFilteredPts->at(cloudOut[i][j]).z - camFilteredPts->at(cloudOut[k][l]).z);
if (nearZero((double) dist)) {
delList.emplace_back(make_pair(i, j));
}
}
}
}
}
if (!delList.empty()) {
for (int i = (int) delList.size() - 1; i >= 0; i--) {
cloudOut[delList[i].first].erase(cloudOut[delList[i].first].begin() + delList[i].second);
// checkSizePtCld--;
}
}
for(auto&& i : cloudOut){
for(auto&& j : i){
j = camFilteredPts_idx[j];
}
}
//Typically, the returned number of 3D points within all slices is 1 smaller compared to the whole region (why?)
/*if(checkSizePtCld != camFilteredPts->size()){
cout << "Not working" << endl;
}*/
return success;
}
//Get part of a pointcloud visible in a camera.
// The function supports slicing of the truncated pyramid (frustum of pyramid) given the number of slices in vertical
// and horizontal direction and their 0-based indices (This function must be called for every slice seperately). Before
// using slicing, the function should be called without slicing and the results of this call should be provided to the
// function with slicing
//The output variables can only be of type pcl::PointCloud<pcl::PointXYZ>::Ptr or *std::vector<int>
template<typename T>
bool genStereoSequ::getVisibleCamPointCloud(pcl::PointCloud<pcl::PointXYZ>::Ptr cloudIn,
T cloudOut,
int fovDevideVertical,
int fovDevideHorizontal,
int returnDevPartNrVer,
int returnDevPartNrHor,
float minDistance,
float maxDistance) {
if (fovDevideVertical || fovDevideHorizontal || !nearZero((double) minDistance) ||
!nearZero((double) maxDistance)) {
CV_Assert((fovDevideVertical == 0) || ((returnDevPartNrVer >= 0) && (returnDevPartNrVer < fovDevideVertical)));
CV_Assert((fovDevideHorizontal == 0) ||
((returnDevPartNrHor >= 0) && (returnDevPartNrHor < fovDevideHorizontal)));
CV_Assert(nearZero((double) minDistance) || ((minDistance >= (float) actDepthNear) &&
(((minDistance < maxDistance) || nearZero((double) maxDistance)) &&
(minDistance < (float) (maxFarDistMultiplier * actDepthFar)))));
CV_Assert(nearZero((double) maxDistance) || ((maxDistance <= (float) (maxFarDistMultiplier * actDepthFar)) &&
((minDistance < maxDistance) &&
(maxDistance > (float) actDepthNear))));
}
pcl::FrustumCulling<pcl::PointXYZ> fc;
fc.setInputCloud(cloudIn);
float verFOV = 2.f * std::atan((float) imgSize.height / (2.f * (float) K1.at<double>(1, 1)));
float horFOV = 2.f * std::atan((float) imgSize.width / (2.f * (float) K1.at<double>(0, 0)));
if (fovDevideVertical) {
verFOV /= (float) fovDevideVertical;
}
if (fovDevideHorizontal) {
horFOV /= (float) fovDevideHorizontal;
}
fc.setVerticalFOV(180.f * verFOV / (float) M_PI);
fc.setHorizontalFOV(180.f * horFOV / (float) M_PI);
float mimaDistance[2];
if (nearZero((double) minDistance)) {
mimaDistance[0] = (float) actDepthNear;
// fc.setNearPlaneDistance((float) actDepthNear);
} else {
mimaDistance[0] = minDistance;
// fc.setNearPlaneDistance(minDistance);
}
if (nearZero((double) maxDistance)) {
mimaDistance[1] = (float) (maxFarDistMultiplier * actDepthFar);
// fc.setFarPlaneDistance((float) (maxFarDistMultiplier * actDepthFar));
} else {
mimaDistance[1] = maxDistance;
// fc.setFarPlaneDistance(maxDistance);
}
if (fovDevideVertical || fovDevideHorizontal) {
Eigen::Matrix4f rotVer, rotHor, rotBoth;
bool bothAngNotZero = false;
double resalpha = 0;
if (fovDevideVertical) {
float angV = verFOV * ((float) returnDevPartNrVer - (float) fovDevideVertical / 2.f + 0.5f);
if (!nearZero((double) angV)) {
bothAngNotZero = true;
resalpha = (double) angV;
}
/*rotVer
<< 1.f, 0, 0, 0,
0, cos(angV), -sin(angV), 0,
0, sin(angV), cos(angV), 0,
0, 0, 0, 1.f;*/
rotVer
<< cos(angV), -sin(angV), 0, 0,
sin(angV), cos(angV), 0, 0,
0, 0, 1.f, 0,
0, 0, 0, 1.f;
rotVer.transposeInPlace();
} else {
rotVer.setIdentity();
}
if (fovDevideHorizontal) {
float angH = horFOV * ((float) returnDevPartNrHor - (float) fovDevideHorizontal / 2.f + 0.5f);
if (nearZero((double) angH) && bothAngNotZero) {
bothAngNotZero = false;
} else if (!nearZero((double) angH)) {
resalpha = (double) angH;
}
rotHor
<< cos(angH), 0, sin(angH), 0,
0, 1.f, 0, 0,
-sin(angH), 0, cos(angH), 0,
0, 0, 0, 1.f;
rotHor.transposeInPlace();
} else {
rotHor.setIdentity();
}
rotBoth = actCamPose * rotHor * rotVer;
fc.setCameraPose(rotBoth);
if (bothAngNotZero) {
resalpha = rotDiff(actCamPose, rotBoth);
}
if (!nearZero(resalpha)) {
resalpha = resalpha < 0 ? -resalpha : resalpha;
while(resalpha > 2 * M_PI){
resalpha -= 2 * M_PI;
}
if (!nearZero(resalpha)) {
double tmp = cos(resalpha);
tmp *= tmp * tmp;
if(!nearZero(tmp)) {
//Adapt the minimum and maximum distance as the given distances which form 2 planes appear
// under a different angle compared to the original planes without slicing
mimaDistance[0] = (float)(((double)mimaDistance[0] * cos(2.0 * resalpha)) / tmp);
mimaDistance[1] = mimaDistance[1] / (float)tmp;
}
}
}
} else {
fc.setCameraPose(actCamPose);
}
fc.setNearPlaneDistance(mimaDistance[0]);
fc.setFarPlaneDistance(mimaDistance[1]);
fc.filter(*cloudOut);
if (cloudOut->empty())
return false;
return true;
}
//Filters occluded 3D points based on a voxel size corresponding to 1 pixel (when projected to the image plane) at near_depth + (medium depth - near_depth) / 2
//Returns false if more than 33% are occluded
bool genStereoSequ::filterNotVisiblePts(pcl::PointCloud<pcl::PointXYZ>::Ptr cloudIn,
pcl::PointCloud<pcl::PointXYZ>::Ptr cloudOut,
bool useNearLeafSize,
bool visRes,
pcl::PointCloud<pcl::PointXYZ>::Ptr cloudOccluded) {
if (cloudIn->empty())
return false;
vector<int> cloudOut_idx;
bool success = filterNotVisiblePts(cloudIn, cloudOut_idx, useNearLeafSize, visRes, cloudOccluded);
if(!cloudOut_idx.empty()){
cloudOut->reserve(cloudOut_idx.size());
for(auto& i : cloudOut_idx){
cloudOut->push_back(cloudIn->at((size_t)i));
}
}
return success;
}
//Filters occluded 3D points based on a voxel size corresponding to 1 pixel (when projected to the image plane) at near_depth + (medium depth - near_depth) / 2
//Returns false if more than 33% are occluded
bool genStereoSequ::filterNotVisiblePts(const pcl::PointCloud<pcl::PointXYZ>::Ptr& cloudIn,
std::vector<int> &cloudOut,
bool useNearLeafSize,
bool visRes,
const pcl::PointCloud<pcl::PointXYZ>::Ptr& cloudOccluded) {
if (cloudIn->empty())
return false;
cloudIn->sensor_origin_ = Eigen::Vector4f((float) absCamCoordinates[actFrameCnt].t.at<double>(0),
(float) absCamCoordinates[actFrameCnt].t.at<double>(1),
(float) absCamCoordinates[actFrameCnt].t.at<double>(2), 1.f);
cloudIn->sensor_orientation_ = actCamRot;
pcl::VoxelGridOcclusionEstimation<pcl::PointXYZ> voxelFilter;
voxelFilter.setInputCloud(cloudIn);
float leaf_size;
pcl::PointXYZ cloudCentroid;
getCloudCentroid(*cloudIn.get(), cloudCentroid);
double usedZ;
if (useNearLeafSize) {
pcl::PointXYZ cloudDim;
getCloudDimensionStdDev(*cloudIn.get(), cloudDim, cloudCentroid);
double x[2], y[2], z[2];
x[0] = (double) (cloudCentroid.x + cloudDim.x);
x[1] = (double) (cloudCentroid.x - cloudDim.x);
y[0] = (double) (cloudCentroid.y + cloudDim.y);
y[1] = (double) (cloudCentroid.y - cloudDim.y);
z[0] = (double) (cloudCentroid.z + cloudDim.z);
z[1] = (double) (cloudCentroid.z - cloudDim.z);
double minZ = DBL_MAX;
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 2; ++j) {
for (int k = 0; k < 2; ++k) {
Mat Xw = (Mat_<double>(3, 1) << x[i], y[j], z[k]);
Mat Xc = absCamCoordinates[actFrameCnt].R.t() * (Xw - absCamCoordinates[actFrameCnt].t);
double ptz = Xc.at<double>(2);
if ((ptz < minZ) && (ptz > actDepthNear)) {
minZ = ptz;
}
}
}
}
if (minZ > maxFarDistMultiplier * actDepthFar) {
Mat Xw = (Mat_<double>(3, 1)
<< (double) cloudCentroid.x, (double) cloudCentroid.y, (double) cloudCentroid.z);
Mat Xc = absCamCoordinates[actFrameCnt].R.t() * (Xw - absCamCoordinates[actFrameCnt].t);
minZ = Xc.at<double>(2);
}
usedZ = minZ;
leaf_size = (float) minZ;
} else {
Mat Xw = (Mat_<double>(3, 1) << (double) cloudCentroid.x, (double) cloudCentroid.y, (double) cloudCentroid.z);
Mat Xc = absCamCoordinates[actFrameCnt].R.t() * (Xw - absCamCoordinates[actFrameCnt].t);
usedZ = Xc.at<double>(2);
leaf_size = (float) usedZ;
}
if(nearZero(usedZ))
usedZ = 1.0;
leaf_size /= (float) K1.at<double>(0, 0);
if(nearZero(leaf_size))
leaf_size = 0.1;
//Check if leaf size is too small for PCL (as there is a limitation within PCL)
Eigen::Vector4f min_p, max_p;
pcl::getMinMax3D(*cloudIn.get(), min_p, max_p);
float d1, d2, d3;
d1 = abs(max_p[0] - min_p[0]);
d2 = abs(max_p[1] - min_p[1]);
d3 = abs(max_p[2] - min_p[2]);
int64_t dx = static_cast<int64_t>(d1 / leaf_size) + 1;
int64_t dy = static_cast<int64_t>(d2 / leaf_size) + 1;
int64_t dz = static_cast<int64_t>(d3 / leaf_size) + 1;
auto maxIdxSize = static_cast<int64_t>(std::numeric_limits<int32_t>::max()) - 1;
if ((dx * dy * dz) > maxIdxSize) {
double kSi = (double) max(((csurr.rows - 1) / 2), 1);
kSi = kSi > 3.0 ? 3.0 : kSi;
leaf_size = (float) (kSi * usedZ / K1.at<double>(0, 0));
if(nearZero(leaf_size))
leaf_size = 0.1;
dx = static_cast<int64_t>(d1 / leaf_size) + 1;
dy = static_cast<int64_t>(d2 / leaf_size) + 1;
dz = static_cast<int64_t>(d3 / leaf_size) + 1;
while (((dx * dy * dz) > maxIdxSize) && ((kSi + DBL_EPSILON) < csurr.rows)) {
kSi++;
leaf_size = (float) (kSi * usedZ / K1.at<double>(0, 0));
dx = static_cast<int64_t>(d1 / leaf_size) + 1;
dy = static_cast<int64_t>(d2 / leaf_size) + 1;
dz = static_cast<int64_t>(d3 / leaf_size) + 1;
}
if ((dx * dy * dz) > maxIdxSize) {
double lNew = cbrt(ceil(100.0 * (double) d1 * (double) d2 * (double) d3 / (double) maxIdxSize) / 100.0);
const double maxlsenlarge = 1.2;
if (lNew > maxlsenlarge * (double) leaf_size) {
//Go on without filtering
// *cloudOut.get() = *cloudIn.get();
cloudOut.resize(cloudIn->size());
std::iota(cloudOut.begin(), cloudOut.end(), 0);
return true;
} else {
leaf_size = (float)lNew;
if(nearZero(leaf_size))
leaf_size = 0.1;
dx = static_cast<int64_t>(d1 / leaf_size) + 1;
dy = static_cast<int64_t>(d2 / leaf_size) + 1;
dz = static_cast<int64_t>(d3 / leaf_size) + 1;
double lsenlarge = 1.05;
while (((dx * dy * dz) > maxIdxSize) && (lsenlarge < maxlsenlarge)) {
leaf_size *= lsenlarge;
dx = static_cast<int64_t>(d1 / leaf_size) + 1;
dy = static_cast<int64_t>(d2 / leaf_size) + 1;
dz = static_cast<int64_t>(d3 / leaf_size) + 1;
lsenlarge *= 1.05;
}
if ((dx * dy * dz) > maxIdxSize) {
//Go on without filtering
// *cloudOut.get() = *cloudIn.get();
cloudOut.resize(cloudIn->size());
std::iota(cloudOut.begin(), cloudOut.end(), 0);
return true;
}
}
}
}
voxelFilter.setLeafSize(leaf_size, leaf_size,
leaf_size);//1 pixel (when projected to the image plane) at near_depth + (medium depth - near_depth) / 2
try {
voxelFilter.initializeVoxelGrid();
}catch (exception &e){
std::cerr << "Exception during filtering background: " << e.what() << endl;
std::cerr << "Skipping filtering step." << endl;
//Go on without filtering
// *cloudOut.get() = *cloudIn.get();
cloudOut.resize(cloudIn->size());
std::iota(cloudOut.begin(), cloudOut.end(), 0);
return true;
}
pcl::PointCloud<pcl::PointXYZ>::Ptr cloudOccluded_;//(new pcl::PointCloud<pcl::PointXYZ>);
if (cloudOccluded.get() != nullptr) {
cloudOccluded_ = cloudOccluded;
} else {
cloudOccluded_.reset(new pcl::PointCloud<pcl::PointXYZ>);
}
for (int i = 0; i < (int)cloudIn->size(); i++) {
Eigen::Vector3i grid_coordinates = voxelFilter.getGridCoordinates(cloudIn->points[i].x,
cloudIn->points[i].y,
cloudIn->points[i].z);
int grid_state;
int ret = voxelFilter.occlusionEstimation(grid_state, grid_coordinates);
if ((ret == 0) && (grid_state == 0)) {
cloudOut.push_back(i);
} else if ((ret == 0) && (verbose & SHOW_BACKPROJECT_OCCLUSIONS_MOV_OBJ)) {
cloudOccluded_->push_back(cloudIn->points[i]);
}
}
if (visRes && (verbose & SHOW_BACKPROJECT_OCCLUSIONS_MOV_OBJ)) {
pcl::PointCloud<pcl::PointXYZ>::Ptr cloudOut_pts(new pcl::PointCloud<pcl::PointXYZ>);
if(!cloudOut.empty()){
cloudOut_pts->reserve(cloudOut.size());
for(auto& i : cloudOut){
cloudOut_pts->push_back(cloudIn->at((size_t)i));
}
}
visualizeOcclusions(cloudOut_pts, cloudOccluded_, (double) leaf_size);
}
float fracOcc = (float) (cloudOut.size()) / (float) (cloudIn->size());
if (fracOcc < 0.67)
return false;
return true;
}
void genStereoSequ::visualizeOcclusions(pcl::PointCloud<pcl::PointXYZ>::Ptr cloudVisible,
pcl::PointCloud<pcl::PointXYZ>::Ptr cloudOccluded,
double ptSize) {
if (cloudVisible->empty() && cloudOccluded->empty())
return;
boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer(
new pcl::visualization::PCLVisualizer("Occlusions within an object"));
Eigen::Affine3f m = initPCLViewerCoordinateSystems(viewer, absCamCoordinates[actFrameCnt].R,
absCamCoordinates[actFrameCnt].t);
pcl::PointCloud<pcl::PointXYZRGB>::Ptr basic_cloud_ptr(new pcl::PointCloud<pcl::PointXYZRGB>);
for (auto &i : *cloudVisible.get()) {
pcl::PointXYZRGB point;
point.x = i.x;
point.y = i.y;
point.z = i.z;
point.b = 0;
point.g = 255;
point.r = 0;
basic_cloud_ptr->push_back(point);
}
for (auto &i : *cloudOccluded.get()) {
pcl::PointXYZRGB point;
point.x = i.x;
point.y = i.y;
point.z = i.z;
point.b = 0;
point.g = 0;
point.r = 255;
basic_cloud_ptr->push_back(point);
}
viewer->addPointCloud<pcl::PointXYZRGB>(basic_cloud_ptr, "visible and occluded points");
viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, ptSize,
"visible and occluded points");
setPCLViewerCamPars(viewer, m.matrix(), K1);
startPCLViewer(viewer);
}
//Check if 2D correspondence is projection of 3D point
double genStereoSequ::project3DError(const cv::Mat &x, const cv::Mat &X, const cv::Mat &Ki){
cv::Mat x1 = Ki * x;
if(nearZero(x1.at<double>(2))) return DBL_MAX;
x1 *= X.at<double>(2) / x1.at<double>(2);
Mat diff = X - x1;
return cv::norm(diff);
}
//Check, if 2D correspondences of both stereo cameras are projections of corresponding 3D point
bool genStereoSequ::checkCorrespondenceConsisty(const cv::Mat &x1, const cv::Mat &x2, const cv::Mat &X){
double err1 = project3DError(x1, X, K1i);
double err2 = project3DError(x2, actR * X + actT, K2i);
double err = err1 + err2;
bool test = nearZero(err / 10.0);
return test;
}
//Set camera matrices from outside to be able to use function checkCorrespondenceConsisty
void genStereoSequ::setCamMats(const cv::Mat &K1_, const cv::Mat &K2_){
K1 = K1_.clone();
K2 = K2_.clone();
K1i = K1.inv();
K2i = K2.inv();
}
//Check, if 3D points are consistent with image projections
bool genStereoSequ::checkCorr3DConsistency(){
CV_Assert((actCorrsImg1TPFromLast.cols == actCorrsImg2TPFromLast.cols) && ((int)actCorrsImg12TPFromLast_Idx.size() == actCorrsImg1TPFromLast.cols));
for (int i = 0; i < actCorrsImg1TPFromLast.cols; ++i) {
Mat x1 = actCorrsImg1TPFromLast.col(i);
Mat x2 = actCorrsImg2TPFromLast.col(i);
Mat X = Mat(actImgPointCloudFromLast[actCorrsImg12TPFromLast_Idx[i]], true).reshape(1);
const bool check1 = checkCorrespondenceConsisty(x1, x2, X);
if (!check1){
return false;
}
}
return true;
}
//Perform the whole procedure of generating correspondences, new static, and dynamic 3D elements
void genStereoSequ::getNewCorrs() {
updateFrameParameters();
//Get pose of first camera in camera coordinates using a different coordinate system where X is forward, Y is up, and Z is right
getActEigenCamPose();
if (pars.nrMovObjs > 0) {
cv::Mat movObjMask;
int32_t corrsOnMovObjLF = 0;
bool calcNewMovObj = true;
if (actFrameCnt == 0) {
movObjMask = Mat::zeros(imgSize, CV_8UC1);
} else {
// Update the 3D world coordinates of movObj3DPtsWorld based on direction and velocity
updateMovObjPositions();
//Calculate movObj3DPtsCam from movObj3DPtsWorld: Get 3D-points of moving objects that are visible in the camera and transform them from the world coordinate system into camera coordinate system
getMovObjPtsCam();
//Generate maps (masks) of moving objects by backprojection from 3D for the first and second stereo camera: movObjMaskFromLast, movObjMaskFromLast2; create convex hulls: convhullPtsObj; and
//Check if some moving objects should be deleted
backProjectMovObj();
movObjMask = movObjMaskFromLast;
corrsOnMovObjLF = actCorrsOnMovObjFromLast;
//Generate seeds and areas for new moving objects
calcNewMovObj = getNewMovObjs();
if (calcNewMovObj) {
calcNewMovObj = getSeedsAreasMovObj();
}
}
if (calcNewMovObj) {
std::vector<cv::Point_<int32_t>> seeds;
std::vector<int32_t> areas;
if (getSeedAreaListFromReg(seeds, areas)) {
//Generate new moving objects and adapt the number of static correspondences per region
generateMovObjLabels(movObjMask, seeds, areas, corrsOnMovObjLF, actStereoImgsOverlapMask);
//Assign a depth category to each new moving object label and calculate all depth values for each label
genNewDepthMovObj();
//Generate correspondences and 3D points for new moving objects
getMovObjCorrs();
//Insert new 3D points (from moving objects) into world coordinate system
transMovObjPtsToWorld();
}
} else {
//Adapt the number of static correspondences based on the backprojected moving objects
adaptNrStaticCorrsBasedOnMovCorrs(movObjMask);
//Set the global mask for moving objects
combMovObjLabelsAll = movObjMaskFromLast;
movObjMask2All = movObjMaskFromLast2;
}
}
//Get 3D points of static elements and store them to actImgPointCloudFromLast
getCamPtsFromWorld();
//Backproject static 3D points
backProject3D();
//Generate seeds for generating depth areas and include the seeds found by backprojection of the 3D points of the last frames
checkDepthSeeds();
//Generate depth areas for the current image and static elements
genDepthMaps();
//Generates correspondences and 3D points in the camera coordinate system (including false matches) from static scene elements
getKeypoints();
//Combine correspondences of static and moving objects
combineCorrespondences();
//Insert new 3D coordinates into the world coordinate system
transPtsToWorld();
//Combine indices to the 3D world coordinates of all correspondences (from last, new generated, moving objects)
combineWorldCoordinateIndices();
}
//Start calculating the whole sequence
bool genStereoSequ::startCalc_internal() {
static unsigned char init = 1;
chrono::high_resolution_clock::time_point t1, t2;
t1 = chrono::high_resolution_clock::now();
if(init > 0) {
actFrameCnt = 0;
actCorrsPRIdx = 0;
actStereoCIdx = 0;
timePerFrame.clear();
if(init == 2){
resetInitVars();
}
init = 0;
}
if (actFrameCnt < totalNrFrames) {
getNewCorrs();
actFrameCnt++;
}else{
init = 2;
return false;
}
t2 = chrono::high_resolution_clock::now();
timePerFrame.emplace_back(chrono::duration_cast<chrono::microseconds>(t2 - t1).count());
return true;
}
void genStereoSequ::startCalc(){
if(!pars.parsAreValid){
cout << "Provide parameters for generating a sequence!" << endl;
return;
}
while(startCalc_internal());
}
|
The convex hull of a set $s$ is a subset of the span of $s$.
|
import tactic
import data.rel
import data.vector
import data.nat.basic
import ruby.defs
open rel vector nat
variables {α β γ δ ε φ ψ : Type}
/- Basic lemmas -/
--@[simp]
lemma rel_seq_assoc (r : rel α β) (s : rel β γ) (t : rel γ δ) :
r ;; (s ;; t) = (r ;; s) ;; t := (comp_assoc r s t).symm
@[simp]
lemma seq_par_dist (r : rel α β) (s : rel β γ) (t : rel δ ε) (u : rel ε φ) :
[r ;; s, t ;; u] = [r, t] ;; [s, u] :=
begin
ext ⟨a,d⟩ ⟨c,f⟩,
split,
{ rintro ⟨⟨b,rab,sbc⟩,⟨e,tde,uef⟩⟩,
exact ⟨⟨b,e⟩,⟨⟨rab,tde⟩,⟨sbc,uef⟩⟩⟩, },
{ rintro ⟨⟨b,e⟩,⟨⟨rab,tde⟩,⟨sbc,uef⟩⟩⟩,
exact ⟨⟨b,⟨rab,sbc⟩⟩,⟨e,⟨tde,uef⟩⟩⟩, }
end
@[simp]
lemma conv_seq (r : rel α β) (s : rel β γ) : (r ;; s)† = s† ;; r† := inv_comp r s
@[simp]
lemma conv_par (r : rel α β) (s : rel γ δ) : [r,s]† = [r†,s†] :=
begin
ext ⟨b,d⟩ ⟨a,c⟩,
split,
{ rintro ⟨rab,scd⟩,
exact ⟨(inv_def r a b).mpr rab,(inv_def s c d).mpr scd⟩, },
{ rintro ⟨h1,h2⟩,
simp at *,
exact ⟨h1,h2⟩, }
end
@[simp]
lemma conv_conv (r : rel α β) : r†† = r := inv_inv r
@[simp]
lemma conv_id : (@idd α)† = @idd α := inv_id
@[simp]
lemma seq_id_left (r : rel α β) : idd ;; r = r := comp_left_id r
@[simp]
lemma seq_id_right (r : rel α β) : r ;; idd = r := comp_right_id r
@[simp]
lemma par_id : [idd, idd] = @idd (α × β) :=
begin
ext ⟨a,b⟩ ⟨x,y⟩,
split,
rintro ⟨hax,hby⟩,
simp * at *,
intro h,
exact ⟨congr_arg prod.fst h,congr_arg prod.snd h⟩,
end
lemma from_conv {r s : rel α β} : r† = s† ↔ r=s :=
begin
split,
{ intro h,
ext x y,
unfold inv flip at h,
have w := congr_fun (congr_fun h y) x, dsimp at w,
simpa using w, },
{ intro h,
rw h, }
end
|
#ifndef _NAMASTER_H_
#define _NAMASTER_H_
#ifndef NO_DOXY
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <unistd.h>
#include <math.h>
#include <time.h>
#include <complex.h>
#include <gsl/gsl_vector.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_linalg.h>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_eigen.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_spline.h>
#include <gsl/gsl_integration.h>
#include <fftw3.h>
#endif //NO_DOXY
#define NMT_MAX(a,b) (((a)>(b)) ? (a) : (b)) // maximum
#define NMT_MIN(a,b) (((a)<(b)) ? (a) : (b)) // minimum
#ifdef _SPREC
typedef float flouble;
typedef float complex fcomplex;
#else //_SPREC
typedef double flouble;
typedef double complex fcomplex;
#endif //_SPREC
/*! \mainpage NaMaster C API
*
* Welcome to the documentation of NaMaster's C API. Navigate through the tabs above to learn more
* about the different functionality implemented in the code.
*
* \section general_notes General notes
* - Most users will prefer to use the python wrapper "pymaster", which mostly calls the
C-based functions.
* - NaMaster uses a "row-major" order to define the ordering of power spectra into vectors.
E.g. the cross-correlation of two spin-2 fields 'a' and 'b' would give rise to 4 power
spectra: Ea-Eb, Ea-Bb, Ba-Eb and Ba-Bb. These are stored into 1-dimensional arrays using
exactly that order. For the case of a spin-0 - spin-2 correlation, the ordering is
[T-E, T-B], where T is the spin-0 field and (E,B) are the harmonic components of the
spin-2 field.
* - The abbreviation MCM will often be used instead of "mode-coupling matrix".
* - SHT will sometimes be used for "Spherical Harmonic Transform". In the context of flat-sky
fields, this should be understood as a standard Fast Fourier Transform (FFT) (with
appropriate trigonometric factors if dealing with spin-2 fields).
* - FWHM will sometimes be used for "Full-width at half-max".
*
* \section more_info More info
*
* Please refer to the README and LICENSE files for further information on installation,
* credits and licensing. Do not hesitate to contact the authors (preferably via github
* issues on https://github.com/LSSTDESC/NaMaster) if you encounter any problems using
* the code.
*/
/**
* @brief Flat-sky bandpowers.
*
* This structure defines bandpowers for flat-sky power spectra.
* These are currently defined only by band edges (assumed
* flat weights within band).
*/
typedef struct {
int n_bands; //!< Number of bandpowers stored
flouble *ell_0_list; //!< Lower edge of each bandpower
flouble *ell_f_list; //!< Upper edge of each bandpower
} nmt_binning_scheme_flat;
/**
* @brief nmt_binning_scheme_flat constructor for constant bandpowers
*
* nmt_binning_scheme_flat constructor for bandpowers with
* constant width \p nlb, from ell = 2 to ell = \p lmax.
* @param nlb Constant band width
* @param lmax Maximum multipole
* @return Allocated binning structure.
*/
nmt_binning_scheme_flat *nmt_bins_flat_constant(int nlb,flouble lmax);
/**
* @brief nmt_binning_scheme_flat generic constructor.
*
* @param nell Number of bandpowers
* @param l0 Lower edge of all bandpowers (should be allocated to nell elements).
* @param lf Lower edge of all bandpowers (should be allocated to nell elements).
* @return Allocated binning structure.
*/
nmt_binning_scheme_flat *nmt_bins_flat_create(int nell,flouble *l0,flouble *lf);
/**
* @brief nmt_binning_scheme_flat destructor
*/
void nmt_bins_flat_free(nmt_binning_scheme_flat *bin);
/**
* @brief Returns average of input power spectrum into bandpowers.
*
* @param bin nmt_binning_scheme_flat structure defining the bandpowers.
* @param nl Number of elements in the input power spectra.
* @param larr Array containing the \p nl multipoles at which the input power
* spectrum is defined.
* @param cls_in Array of \p ncls input power spectra.
* @param cls_out Array of \p ncls averaged output power spectra.
* Should be allocated to the number of bandpowers defined \p bin.
* @param ncls Number of input/output power spectra.
*/
void nmt_bin_cls_flat(nmt_binning_scheme_flat *bin,int nl,flouble *larr,flouble **cls_in,
flouble **cls_out,int ncls);
/**
* @brief Returns binned power spectra interpolated into an given set of multipoles.
*
* Nearest-neighbours interpolation is used.
* @param bin nmt_binning_scheme_flat structure defining the bandpowers.
* @param cls_in Array of \p ncls input power spectra. Must have the same number of
* elements as bandpowers defined by \p bin.
* @param nl Number of elements in the output power spectra.
* @param larr Array containing the \p nl multipoles at which the output power
* spectrum are requested.
* @param cls_out Array of \p ncls interpolated output power spectra.
* @param ncls Number of input/output power spectra.
*/
void nmt_unbin_cls_flat(nmt_binning_scheme_flat *bin,flouble **cls_in,
int nl,flouble *larr,flouble **cls_out,int ncls);
/**
* @brief Returns effective multipoles.
*
* Returns the mid point of each bandpower defined in \p bin.
* @param bin nmt_binning_scheme_flat structure defining the bandpowers.
* @param larr Output array containing mid-points of the bandpowers.
* Should be preallocated to the correct number of bandpowers.
*/
void nmt_ell_eff_flat(nmt_binning_scheme_flat *bin,flouble *larr);
/**
* @brief Fast bin-searching routine for flat-sky bandpowers
*
* Returns the bandpower index in which a given ell falls. The functions is designed
* to be fast if a good guess for the bandpower index is supplied. A typical use would
* be to iterate over ell values and pass, as a guess index, the index found in the
* previous iteration.
* @param bin nmt_binning_scheme_flat structure defining the bandpowers.
* @param l Multipole for which you want the bandpower index.
* @param il Guessed bandpower index.
* @return Bandpower index.
*/
int nmt_bins_flat_search_fast(nmt_binning_scheme_flat *bin,flouble l,int il);
/**
* @brief Full-sky bandpowers.
*
* This structure defines bandpowers for full-sky power spectra.
* Although a given multipole ell can only contribute to one bandpower,
* the distribution of ells per bandpower and their relative weights
* is left completely free.
*/
typedef struct {
int n_bands; //!< Number of bandpowers.
int *nell_list; //!< Number of multipoles belonging to each bandpower.
int **ell_list; //!< List of multipoles in each bandpowers.
flouble **w_list; //!< List of weights associated to each multipole in \p ell_list.
flouble **f_ell; //!< Multiplicative ell factor
int ell_max; //!< Maximum multipole included.
} nmt_binning_scheme;
/**
* @brief nmt_binning_scheme constructor for constant bandpowers.
*
* nmt_binning_scheme constructor for bandpowers with constant
* width \p nlb, from ell = 2 to ell = \p lmax.
* @param nlb Constant band width
* @param lmax Maximum multipole
* @param is_l2 If not zero, will assume l*(l+1)/2pi weighting
* @return Allocated binning structure.
*/
nmt_binning_scheme *nmt_bins_constant(int nlb,int lmax,int is_l2);
/**
* @brief nmt_binning_scheme generic constructor.
*
* @param nell Number of elements in all subsequent arrays.
* @param bpws Array of bandpower indices.
* @param ells Array of multipole values. This function collects all multipoles
* into their associated bandpowers.
* @param weights Array of weights associated to each multipole. Weights are
* normalized to 1 within each bandpower.
* @param f_ell Array of ell-dependent prefactor (e.g. l*(l+1)/2pi is a typical choice).
* Pass NULL if you don't want any prefactor.
* normalized to 1 within each bandpower.
* @param lmax Maximum multipole to consider.
* @return Allocated binning structure.
*/
nmt_binning_scheme *nmt_bins_create(int nell,int *bpws,int *ells,flouble *weights,
flouble *f_ell,int lmax);
/**
* @brief nmt_binning_scheme constructor from file
*
* Builds a nmt_binning_scheme structure from an ASCII file.
* @param fname Path to file containing information to build bandpowers.
* The file should contain three columns, corresponding to:
* bandpower index, multipole and weight (in this order).
* See definition of nmt_bins_create().
* @param lmax Maximum multipole to be considered.
* @return Allocated binning structure.
*/
nmt_binning_scheme *nmt_bins_read(char *fname,int lmax);
/**
* @brief nmt_binning_scheme destructor
*/
void nmt_bins_free(nmt_binning_scheme *bin);
/**
* @brief Returns average of input power spectrum into bandpowers.
*
* @param bin nmt_binning_scheme structure defining the bandpowers.
* @param cls_in Array of \p ncls input power spectra. They should be
* defined in all ells that go into any bandpower defined by \p bin.
* @param cls_out Array of \p ncls averaged output power spectra.
* Should be allocated to the number of bandpowers defined \p bin.
* @param ncls Number of input/output power spectra.
*/
void nmt_bin_cls(nmt_binning_scheme *bin,flouble **cls_in,flouble **cls_out,int ncls);
/**
* @brief Returns binned power spectra interpolated into output multipoles.
*
* Top-hat interpolation is used (i.e. a given ell is associated with the binned power
* spectrum value at the bandpower that ell corresponds to).
* @param bin nmt_binning_scheme structure defining the bandpowers.
* @param cls_in Array of \p ncls input power spectra. Must have the same number of
* elements as bandpowers defined by \p bin.
* @param cls_out Array of \p ncls interpolated output power spectra.
* @param ncls Number of input/output power spectra.
*/
void nmt_unbin_cls(nmt_binning_scheme *bin,flouble **cls_in,flouble **cls_out,int ncls);
/**
* @brief Returns effective multipoles.
*
* Return the weighted average multipole values within each bandpower defined by \p bin.
* @param bin nmt_binning_scheme structure defining the bandpowers.
* @param larr Output array containing the effective multipole in each bandpower.
* Should be preallocated to the correct number of bandpowers.
*/
void nmt_ell_eff(nmt_binning_scheme *bin,flouble *larr);
/**
* @brief Flat-sky Fourier-space function
*
* Unlike multipoles in harmonic space, in the case of full-sky operations,
* wavenumbers k in Fourier space for flat-sky fields are in general continuous
* variables. This structure helps define functions of these continuous variables.
*/
typedef struct {
int is_const; //!< If >0, this function is just a constant
flouble x0; //!< Lower edge of spline interpolation
flouble xf; //!< Upper edge of spline interpolation
flouble y0; //!< Function will take this value for x < \p x0
flouble yf; //!< Function will take this value for x > \p xf
gsl_spline *spl; //!< GSL spline interpolator.
} nmt_k_function;
/**
* @brief nmt_k_function creator.
*
* @param nk Number of elements in input arrays.
* @param karr k-values at which the input function is sampled.
* @param farr Function values at k = \p karr.
* @param y0 Constant function value below interpolation range.
* @param yf Constant function value above interpolation range.
* @param is_const If non-zero, will create a constant function.
* In this case all previous arguments other than \p y0 are ignored
* and the function will take this value for all k.
*/
nmt_k_function *nmt_k_function_alloc(int nk,flouble *karr,flouble *farr,
flouble y0,flouble yf,int is_const);
/**
* @brief nmt_k_function destructor
*/
void nmt_k_function_free(nmt_k_function *f);
/**
* @brief nmt_k_function evaluator.
*
* Returns value of function at \p k.
* @param f nmt_k_function to evaluate.
* @param k Value of k for which you want f(k).
* @param intacc GSL interpolation accelerator. If you don't want any, just pass a NULL pointer.
*/
flouble nmt_k_function_eval(nmt_k_function *f,flouble k,gsl_interp_accel *intacc);
/**
* @brief Flat-sky information.
*
* This structure contains all the information defining a given rectangular flat-sky patch.
* The structure also contains information about the optimal way of sampling the Fourier
* version of this patch into rings of |k|.
*/
typedef struct {
int nx; //!< Number of grid points in the x dimension
int ny; //!< Number of grid points in the y dimension
long npix; //!< Total number of pixels (given by \p nx * \p ny
flouble lx; //!< Length of the x dimension (in steradians)
flouble ly; //!< Length of the y dimension (in steradians)
flouble pixsize; //!< Pixel area (given by \p lx * \p ly / ( \p nx * \p ny))
int n_ell; //!< Number of |k|-values for Fourier-space sampling.
flouble dell; //!< Width of the Fourier-space rings. This is found as min(2 π / \p lx,2 π / \p ly).
flouble i_dell; //!< 1 / \p dell
flouble *ell_min; //!< Array of \p n_ell values containing the lower edges of each of the |k| rings.
// int *n_cells;
} nmt_flatsky_info;
/**
* @brief nmt_flatsky_info constructor
*
* Builds nmt_flatsky_info from patch dimensions.
* @param nx Number of grid points in the x dimension
* @param ny Number of grid points in the y dimension
* @param lx Length of the x dimension (in steradians)
* @param ly Length of the y dimension (in steradians)
* @return Allocated nmt_flatsky_info structure.
*/
nmt_flatsky_info *nmt_flatsky_info_alloc(int nx,int ny,flouble lx,flouble ly);
/**
* @brief nmt_flatsky_info destructor.
*/
void nmt_flatsky_info_free(nmt_flatsky_info *fs);
/**
* @brief Flat-sky field
*
* This structure contains all the information defining a spin-s flat-sky field.
* This includes field values, masking, purification and contamination.
*/
typedef struct {
nmt_flatsky_info *fs; //!< Structure defining patch geometry.
long npix; //!< Number of pixels in all maps (also contained in \p fs).
int pure_e; //!< >0 if E-modes have been purified.
int pure_b; //!< >0 if B-modes have been purified.
flouble *mask; //!< Field's mask (an array of \p npix values).
fcomplex **a_mask; //!< Fourier transform of the mask. Only computed if E or B are purified.
int spin; //!< field's spin (>=0).
int nmaps; //!< Number of maps in the field (2 for spin-2, 1 for spin-0).
flouble **maps; //!< Observed field values. When initialized, these maps are already multiplied by the mask, contaminant deprojected and purified if requested.
fcomplex **alms; //!< Fourier-transfoms of the maps.
int ntemp; //!< Number of contaminant templates
flouble ***temp; //!< Contaminant template maps (mask-multiplied but NOT purified).
fcomplex ***a_temp; //!< Fourier-transfomrs of template maps (mask-multiplied AND purified if requested).
gsl_matrix *matrix_M; //!< Inverse contaminant covariance matrix (see scientific documentation or companion paper).
nmt_k_function *beam; //!< Function defining a circularly-symmetric beam function. Power spectra will be beam-deconvolved.
int lite; //!< lightweight field (no maps, temp, a_temp or a_mask)
int mask_only; //!< this field only contains a mask, and beam. No alms, maps or anything else.
} nmt_field_flat;
/**
* @brief nmt_field_flat destructor
*/
void nmt_field_flat_free(nmt_field_flat *fl);
/**
* @brief nmt_field_flat constructor
*
* Builds an nmt_field_flat structure from input maps and patch parameters.
* @param nx Number of grid points in the x dimension.
* @param ny Number of grid points in the y dimension.
* @param lx Length of the x dimension (in steradians).
* @param ly Length of the y dimension (in steradians).
* @param mask Field's mask (an array of \p nx * \p ny values).
* @param spin Field's spin.
* @param maps Observed field values BEFORE multiplying by the mask
(this is irrelevant for binary masks).
* @param ntemp Number of contaminant templates affecting this field.
* @param temp Contaminant template maps (again, NOT multiplied by the mask).
* @param nl_beam Number of multipole values defining this field's beam.
* @param l_beam Multipole values at which this field's beam is defined.
* @param beam Beam values at ell = \p l_beam. Pass a NULL pointer if you don't
want any beam (\p nl_beam and \p l_beam will be ignored).
* @param pure_e Set to >0 if you want purified E-modes.
* @param pure_b Set to >0 if you want purified B-modes.
* @param tol_pinv Contaminant deprojection requires the inversion of the template
covariance matrix. This could be ill-defined if some templates are linearly
related. In this case we use a pseudo-inverse that accounts for this
possibility in a consistent way. Effectively this is a singular-value
decomposition. All eigenvalues that are smaller than \p tol_pinv the largest
eigenvalue will be discarded.
* @param masked_input if not 0, input maps and templates have already been masked.
This is not advisable if using purification.
* @param is_lite if not 0, only the map alms and the mask will be stored. You can then
use this field to compute the standard pseudo-C_ell with deprojection and purification,
but you won't be able to compute the deprojection bias or examine any maps.
* @param mask_only if not 0, this field will only store a mask and a beam. You will
be able to use it to compute the PCL and covariance mode coupling matrices, but that's
it (no actual power spectra, deprojection biases etc.).
*/
nmt_field_flat *nmt_field_flat_alloc(int nx,int ny,flouble lx,flouble ly,
flouble *mask,int spin,flouble **maps,int ntemp,flouble ***temp,
int nl_beam,flouble *l_beam,flouble *beam,
int pure_e,int pure_b,double tol_pinv,int masked_input,
int is_lite,int mask_only);
/**
* @brief Gaussian realizations of flat-sky fields
*
* Generates a Gaussian realization of an arbitrary list of possibly-correlated
* fields with different spins.
* @param nx Number of grid points in the x dimension.
* @param ny Number of grid points in the y dimension.
* @param lx Length of the x dimension (in steradians).
* @param ly Length of the y dimension (in steradians).
* @param nfields Number of fields to generate.
* @param spin_arr Array (size \p nfields) containing the spins of the fields to be generated.
* @param nl_beam Number of multipoles at which the field beams are defined.
* @param l_beam Array of multipoles at which the field beams are defined.
* @param beam_fields Array of beams (one per field).
* @param nl_cell Number of multipole values at which the input power spectra are provided.
* @param l_cell Array of multipole values at which the input power spectra are provided.
* @param cell_fields Array of input power spectra. Shape should be [\p n_cls][\p nl_cell],
where \p n_cls is the number of power spectra needed to define all the fields.
This should be \p n_cls = n_maps * (n_maps + 1) / 2, where n_maps is the total
number of maps required (1 for each spin-0 field, 2 for each spin-2 field). Power
spectra must be provided only for the upper-triangular part in row-major order
(e.g. if n_maps is 3, there will be 6 power spectra ordered as [1-1,1-2,1-3,2-2,2-3,3-3].
* @param seed Seed for this particular realization.
* @return Gaussian realization.
*/
flouble **nmt_synfast_flat(int nx,int ny,flouble lx,flouble ly,int nfields,int *spin_arr,
int nl_beam,flouble *l_beam,flouble **beam_fields,
int nl_cell,flouble *l_cell,flouble **cell_fields,
int seed);
/**
* @brief E- or B-mode purifies a given pair of flat-sky (Q,U) maps.
*
* This function is mostly used internally by NaMaster, and its standalone use is discouraged.
* @param fl nmt_field_flat containing information about what should be purified.
* @param mask Sky mask (should be appropriately apodized - see scientific documentation).
* @param walm0 Fourier transform of the mask.
* @param maps_in Maps to be purified (should NOT be mask-multiplied).
* @param maps_out Output purified maps.
* @param alms Fourier transform of the output purified maps.
*/
void nmt_purify_flat(nmt_field_flat *fl,flouble *mask,fcomplex **walm0,
flouble **maps_in,flouble **maps_out,fcomplex **alms);
/**
* @brief Curved-sky information.
*
* This structure contains all the information defining a given full-sky patch.
* It describes either a HEALPix grid (in which case is_healpix!=0) or a CAR
* patch (for is_healpix==0). If the latter, then the CAR pixelization must
* conform to the Clenshaw-Curtis sampling. In this case the colatitude theta
* must be sampled at N points going from 0 to pi (including both), separated
* by an interval Dtheta = pi/(N-1). Not all iso-latitude rings must be stored
* in the patch (i.e. ny!=N necessarily). See the documentation for
* nmt_curvedsky_info_alloc for further information on the constraints that
* some of the members of this structure must fulfill.
*/
typedef struct {
int is_healpix; //!< is this HEALPix pixelization?
long n_eq; //!< equivalent of nside, number of pixels in the equatorial ring
int lmax_sht; //!< Maximum multipole to compute spherical harmonic transform
int nx_short; //!< Number of grid points in the x dimension before completing the circle
int nx; //!< Number of grid points in the phi dimension
int ny; //!< Number of grid points in the theta dimension
long npix; //!< Total number of pixels (given by \p nx * \p ny
flouble Delta_theta; //!< pixel size in theta direction
flouble Delta_phi; //!< pixel size in phi direction
flouble phi0; // longitude of first pixel
flouble theta0; // colatitude of last ring
} nmt_curvedsky_info;
/**
* @brief Makes a copy of a nmt_curvedsky_info structure
*
* @param cs_in input structure to be copied.
* @return copy of input nmt_curvedsky_info structure.
*/
nmt_curvedsky_info *nmt_curvedsky_info_copy(nmt_curvedsky_info *cs_in);
/**
* @brief nmt_curvedsky_info creator
*
* If generating a Clenshaw-Curtis grid, then Dtheta and Dphi must be (close to)
* exact divisor of pi and 2pi respectively. Likewise, theta0 must be an integer
* multiple of Dtheta, and the number of pixels in the theta direction must be
* such that the map actually fits on the sphere (i.e. theta0-(ny-1)*Dtheta >=0).
* @param is_healpix is this HEALPix pixelization.
* @param nside if is_healpix, this should be the HEALPix Nside parameter.
* @param lmax_sht maximum multipole up to which spherical harmonic transforms will be computed.
* @param nx0 number of pixels in the phi direction.
* @param ny0 number of pixels in the theta direction.
* @param Dtheta pixel size in the theta direction. In radians. Must be positive.
* @param Dphi pixel size in the phi direction. In radians, must be positive.
* @param theta0 colatitude of the last ring in the map. In radians.
* @param phi0 minimum azimuth covered by the map. In radians.
* @return nmt_curvedsky_info struct.
*/
nmt_curvedsky_info *nmt_curvedsky_info_alloc(int is_healpix,long nside,
int lmax_sht,
int nx0,int ny0,flouble Dtheta,flouble Dphi,
flouble phi0,flouble theta0);
/**
* @brief Compare two nmt_curvedsky_info structs.
*
* @return true (!=0) if both structs are equivalent, and false (0) if they aren't.
*/
int nmt_diff_curvedsky_info(nmt_curvedsky_info *c1, nmt_curvedsky_info *c2);
/**
* @brief Extend CAR map to cover the full circle.
*
* CAR maps only cover a particular part of the sky, but the SHT routines need as
* input maps that are complete in the azimuth direction. This routine takes in
* a raw CAR map with its corresponding nmt_curvedsky_info and returns the
* phi-complete map (with zeros in all pixels outside the original map).
* If the input map is in HEALPix, this routine just returns a copy of it.
* @param cs curved sky geometry info.
* @param map_in input incomplete map.
* @return phi-complete map.
*/
flouble *nmt_extend_CAR_map(nmt_curvedsky_info *cs,flouble *map_in);
/**
* @brief Full-sky field
*
* This structure contains all the information defining a spin-s full-sky field.
* This includes field values, masking, purification and contamination.
*/
typedef struct {
nmt_curvedsky_info *cs; //!< pixelization parameters
long npix; //!< Number of pixels in all maps
long nalms; //!< Number of complex harmonic coefficients
int lmax; //!< Maximum multipole used
int pure_e; //!< >0 if E-modes have been purified
int pure_b; //!< >0 if B-modes have been purified
flouble *mask; //!< Field's mask (an array of \p npix values).
fcomplex **a_mask; //!< Spherical transform of the mask. Only computed if E or B are purified.
int spin; //!< field's spin (>=0).
int nmaps; //!< Number of maps in the field (2 for spin-2, 1 for spin-0).
flouble **maps; //!< Observed field values. When initialized, these maps are already multiplied by the mask, contaminant-deprojected and purified if requested.
fcomplex **alms; //!< Spherical harmonic transfoms of the maps.
int ntemp; //!< Number of contaminant templates
flouble ***temp; //!< Contaminant template maps (mask-multiplied but NOT purified).
fcomplex ***a_temp; //!< Spherical harmonic transfomrs of template maps (mask-multiplied AND purified if requested).
gsl_matrix *matrix_M; //!< Inverse contaminant covariance matrix (see scientific documentation or companion paper).
flouble *beam; //!< Field's beam (defined on all multipoles up to \p lmax).
int lite; //!< lightweight field (no maps, temp, a_temp or a_mask)
int mask_only; //!< this field only contains a mask, and beam. No alms, maps or anything else.
} nmt_field;
/**
* @brief nmt_field destructor.
*/
void nmt_field_free(nmt_field *fl);
/**
* @brief nmt_field constructor
*
* Builds an nmt_field structure from input maps and resolution parameters.
* @param cs curved sky geometry info.
* @param mask Field's mask.
* @param spin Field's spin.
* @param maps Observed field values BEFORE multiplying by the mask
(this is irrelevant for binary masks).
* @param ntemp Number of contaminant templates affecting this field.
* @param temp Contaminant template maps (again, NOT multiplied by the mask).
* @param beam Harmonic coefficients of the beam (defined for all multipoles up to
* the maximum multipole sampled by the map). Pass a NULL pointer if you don't want any beam.
* @param pure_e Set to >0 if you want purified E-modes.
* @param pure_b Set to >0 if you want purified B-modes.
* @param n_iter_mask_purify E/B purification requires a number of harmonic-space
operations on an appropriately apodized mask. This parameter sets the
number of iterations requested to compute the spherical harmonic transform
of the field's mask. Higher values will produce more accurate results (at
the cost of computational time).
* @param tol_pinv Contaminant deprojection requires the inversion of the template
covariance matrix. This could be ill-defined if some templates are linearly
related. In this case we use a pseudo-inverse that accounts for this
possibility in a consistent way. Effectively this is a singular-value
decomposition. All eigenvalues that are smaller than \p tol_pinv the largest
eigenvalue will be discarded.
* @param niter number of iterations when computing alms (for all transforms other than the mask's).
* @param masked_input if not 0, input maps and templates have already been masked.
This is not advisable if using purification.
* @param is_lite if not 0, only the map alms and the mask will be stored. You can then
use this field to compute the standard pseudo-C_ell with deprojection and purification,
but you won't be able to compute the deprojection bias or examine any maps.
* @param mask_only if not 0, this field will only store a mask and a beam. You will
be able to use it to compute the PCL and covariance mode coupling matrices, but that's
it (no actual power spectra, deprojection biases etc.).
*/
nmt_field *nmt_field_alloc_sph(nmt_curvedsky_info *cs,flouble *mask,int spin,flouble **maps,
int ntemp,flouble ***temp,flouble *beam,
int pure_e,int pure_b,int n_iter_mask_purify,double tol_pinv,
int niter,int masked_input,int is_lite,int mask_only);
/**
* @brief nmt_field constructor from file.
*
* Builds an nmt_field structure from data written in files.
* @param is_healpix is the map stored in healpix format?
* @param fname_mask Path to FITS file containing the field's mask (single HEALPix map).
* @param spin Field's spin.
* @param fname_maps Path to FITS file containing the field's observed maps
(1(2) maps if \p spin=0(!=0)).
* @param fname_temp Path to FITS file containing the field's contaminant templates.
If \p spin > 0, the file should contain an even number
of files. Each consecutive pair of maps will be interpreted as the Q and U
components of a given contaminant. Pass "none" if you don't want any contaminants.
* @param fname_beam Path to ASCII file containing the field's beam. The file should
contain two columns: l (multipole) and b_l (beam SHT at that multipole).
Pass "none if you don't want a beam.
* @param pure_e >0 if you want E-mode purification.
* @param pure_b >0 if you want B-mode purification.
* @param n_iter_mask_purify E/B purification requires a number of harmonic-space
operations on an appropriately apodized mask. This parameter sets the
number of iterations requested to compute the spherical harmonic transform
of the field's mask. Higher values will produce more accurate results (at
the cost of computational time).
* @param tol_pinv Contaminant deprojection requires the inversion of the template
covariance matrix. This could be ill-defined if some templates are linearly
related. In this case we use a pseudo-inverse that accounts for this
possibility in a consistent way. Effectively this is a singular-value
decomposition. All eigenvalues that are smaller than \p tol_pinv the largest
eigenvalue will be discarded.
* @param niter number of iterations when computing alms (other than the mask's).
*/
nmt_field *nmt_field_read(int is_healpix,char *fname_mask,char *fname_maps,char *fname_temp,
char *fname_beam,int spin,int pure_e,int pure_b,
int n_iter_mask_purify,double tol_pinv,int niter);
/**
* @brief Gaussian realizations of full-sky fields
*
* Generates a Gaussian realization of an arbitrary list of possibly-correlated fields with different spins.
* @param cs curved sky geometry info.
* @param lmax Maximum multipole used.
* @param nfields Number of fields to generate.
* @param spin_arr Array (size \p nfields) containing the spins of the fields to be generated.
* @param beam_fields Array of beams (one per field). Must be defined at all ell <= \p lmax.
* @param cells Array of input power spectra (defined at all ell <= \p lmax). Shape
should be [\p n_cls][\p lmax+1], where \p n_cls is the number of power spectra
needed to define all the fields. This should be \p n_cls = n_maps * (n_maps + 1) / 2,
where n_maps is the total number of maps required (1 for each spin-0 field, 2 for
each spin-2 field). Power spectra must be provided only for the upper-triangular part
in row-major order (e.g. if n_maps is 3, there will be 6 power spectra ordered as
[1-1,1-2,1-3,2-2,2-3,3-3].
* @param seed Seed for this particular realization.
* @return Gaussian realization.
*/
flouble **nmt_synfast_sph(nmt_curvedsky_info *cs,int nfields,int *spin_arr,int lmax,
flouble **cells,flouble **beam_fields,int seed);
/**
* @brief E- or B-mode purifies a given pair of full-sky (Q,U) maps.
*
* This function is mostly used internally by NaMaster, and its standalone use is discouraged.
* @param fl nmt_field containing information about what should be purified.
* @param mask Sky mask (should be appropriately apodized - see scientific documentation).
* @param walm0 Spherical harmonic transform of the mask.
* @param maps_in Maps to be purified (should NOT be mask-multiplied).
* @param maps_out Output purified maps.
* @param alms Spherical harmonic transform of the output purified maps.
* @param niter number of iterations when computing alms.
*/
void nmt_purify(nmt_field *fl,flouble *mask,fcomplex **walm0,
flouble **maps_in,flouble **maps_out,fcomplex **alms,int niter);
/**
* @brief Apodize full-sky mask.
*
* Produces apodized version of a full-sky mask for a number of apodization schemes.
* @param nside HEALPix resolution parameter.
* @param mask_in Input mask to be apodized.
* @param mask_out Output apodized mask.
* @param aposize Apodization scale (in degrees).
* @param apotype String defining the apodization procedure. Three values allowed: 'C1', 'C2' and 'Smooth'. These correspond to:
* - \p apotype = "C1". All pixels are multiplied by a factor \f$f\f$, given by:
*\f[
* f=\left\{
* \begin{array}{cc}
* x-\sin(2\pi x)/(2\pi) & x<1\\
* 1 & {\rm otherwise}
* \end{array}
* \right.,
* \f]
where \f$x=\sqrt{(1-\cos\theta)/(1-\cos(\theta_*))}\f$, \f$\theta_*\f$ is the
apodization scale and \f$\theta\f$ is the angular separation between a pixel and
the nearest masked pixel (i.e. where the mask takes a zero value).
* - \p apotype = "C2". The same as the C1 case, but the function in this case is:
*\f[
* f=\left\{
* \begin{array}{cc}
* \frac{1}{2}\left[1-\cos(\pi x)\right] & x<1\\
* 1 & {\rm otherwise}
* \end{array}
* \right.,
* \f]
* - \p apotype = "Smooth". This apodization is carried out in three steps:
* -# All pixels within a disc of radius \f$2.5\theta_*\f$ of a masked pixel are masked.
* -# The resulting map is smooth with a Gaussian window function with standard
deviation \f$\sigma=\theta_*\f$.
* -# One final pass is made through all pixels to ensure that all originally masked
* pixels are still masked after the smoothing operation.
*/
void nmt_apodize_mask(long nside,flouble *mask_in,flouble *mask_out,flouble aposize,char *apotype);
/**
* @brief Apodize flat-sky mask.
*
* Produces apodized version of a flat-sky mask for a number of apodization schemes.
* @param nx Number of grid points in the x dimension
* @param ny Number of grid points in the y dimension
* @param lx Length of the x dimension (in steradians)
* @param ly Length of the y dimension (in steradians)
* @param mask_in Input mask to be apodized.
* @param mask_out Output apodized mask.
* @param aposize Apodization scale (in degrees).
* @param apotype String defining the apodization procedure. See definitions of nmt_apodize_mask().
*/
void nmt_apodize_mask_flat(int nx,int ny,flouble lx,flouble ly,
flouble *mask_in,flouble *mask_out,flouble aposize,char *apotype);
/**
* @brief Flat-sky mode-coupling matrix.
*
* Structure containing information about the mode-coupling matrix (MCM) for flat-sky pseudo-CLs.
*/
typedef struct {
int ncls; //!< Number of power spectra (1, 2 or 4 depending of the spins of the fields being correlated.
flouble ellcut_x[2]; //!< Range of ells in the x direction to be masked in Fourie space
flouble ellcut_y[2]; //!< Range of ells in the y direction to be masked in Fourie space
int pe1; //!< Is the E-mode component of the first field purified?
int pe2; //!< Is the E-mode component of the second field purified?
int pb1; //!< Is the B-mode component of the first field purified?
int pb2; //!< Is the B-mode component of the second field purified?
nmt_flatsky_info *fs; //!< Contains information about rectangular flat-sky patch.
int is_teb; //!< Does it hold all MCM elements to compute all of spin0-spin0, 0-2 and 2-2 correlations?
int *n_cells; //!< Number of unmasked Fourier-space grid points contributing to a given bandpower
flouble **coupling_matrix_unbinned; //!< Unbinned MCM
flouble **coupling_matrix_binned; //!< Binned MCM
nmt_binning_scheme_flat *bin; //!< Bandpowers defining the binning
flouble lmax; //!< Maximum k-mode used
gsl_matrix *coupling_matrix_binned_gsl; //!< GSL version of MCM (prepared for inversion)
gsl_permutation *coupling_matrix_perm; //!< Complements \p coupling_matrix_binned_gsl for inversion.
} nmt_workspace_flat;
/**
* @brief nmt_workspace_flat destructor
*/
void nmt_workspace_flat_free(nmt_workspace_flat *w);
/**
* @brief Computes mode-coupling matrix.
*
* Computes MCM for a given pair of flat-sky fields.
* @param fl1 nmt_field_flat structure defining the first field to correlate.
* @param fl2 nmt_field_flat structure defining the second field to correlate.
* @param bin nmt_binning_scheme_flat defining the power spectrum bandpowers.
* @param lmn_x Lower end of the range of multipoles in the x direction that should be masked.
* @param lmx_x Upper end of the range of multipoles in the x direction that should be masked.
* if \p lmx_x < \p lmn_x, no Fourier-space masked is performed.
* @param lmn_y Same as \p lmn_x for the y direction.
* @param lmx_y Same as \p lmx_x for the y direction.
* @param is_teb if !=0, all mode-coupling matrices (0-0,0-2,2-2) will be computed at the same time.
*/
nmt_workspace_flat *nmt_compute_coupling_matrix_flat(nmt_field_flat *fl1,nmt_field_flat *fl2,
nmt_binning_scheme_flat *bin,
flouble lmn_x,flouble lmx_x,
flouble lmn_y,flouble lmx_y,int is_teb);
/**
* @brief Computes deprojection bias.
*
* Computes contaminant deprojection bias for a pair of fields.
* See notes about power spectrum ordering in the main page of this documentation.
* @param fl1 nmt_field_flat structure defining the first field to correlate.
* @param fl2 nmt_field_flat structure defining the second field to correlate.
* @param bin nmt_binning_scheme_flat defining the power spectrum bandpowers.
* @param lmn_x Lower end of the range of multipoles in the x direction that should be masked.
* @param lmx_x Upper end of the range of multipoles in the x direction that should be masked.
* if \p lmx_x < \p lmn_x, no Fourier-space masked is performed.
* @param lmn_y Same as \p lmn_x for the y direction.
* @param lmx_y Same as \p lmx_x for the y direction.
* @param nl_prop Number of multipoles over which the proposed power spectrum is defined.
* @param l_prop Array of multipoles over which the proposed power spectrum is defined.
* @param cl_proposal Proposed power spectrum. Should have shape [ncls][\p nl_prop], where
\p ncls is the appropriate number of power spectra given the spins of the input
fields (e.g. \p ncls = 2*2 = 4 if both fields have spin=2).
* @param cl_bias Ouptput deprojection bias. Should be allocated to shape [ncls][nbpw],
where \p ncls is defined above and \p nbpw is the number of bandpowers
defined by \p bin.
*/
void nmt_compute_deprojection_bias_flat(nmt_field_flat *fl1,nmt_field_flat *fl2,
nmt_binning_scheme_flat *bin,
flouble lmn_x,flouble lmx_x,flouble lmn_y,flouble lmx_y,
int nl_prop,flouble *l_prop,flouble **cl_proposal,
flouble **cl_bias);
/**
* @brief Mode-couples an input power spectrum
*
* This function applies the effects of the mode-coupling the pseudo-CL estimator for a given
* input power spectrum. This function should be used in conjunction with nmt_decouple_cl_l_flat()
* to compute the theory prediction of the pseudo-CL estimator. See the scientific documentation
* or the companion paper for further details on how this is done in particular for the flat-sky
* approximation.
* See notes about power spectrum ordering in the main page of this documentation.
* @param w nmt_workspace_flat structure containing the mode-coupling matrix
* @param nl Number of multipoles on which the input power spectrum is defined.
* @param larr Array of multipoles on which the input power spectrum is defined.
* @param cl_in Array of input power spectra. Should have shape [ncls][nl], where ncls is the
appropriate number of power spectra given the fields being correlated (e.g. ncls=4=2*2
for two spin-2 fields.
* @param cl_out Array of output power spectra. Should have shape [ncls][nbpw], where ncls is
defined above and nbpw is the number of bandpowers used to define \p w.
*/
void nmt_couple_cl_l_flat_fast(nmt_workspace_flat *w,int nl,flouble *larr,flouble **cl_in,
flouble **cl_out);
/**
* @brief Mode-couples an input power spectrum
*
* Faster (but less accurate) version of nmt_couple_cl_l_flat_fast().
* @param w nmt_workspace_flat structure containing the mode-coupling matrix
* @param nl Number of multipoles on which the input power spectrum is defined.
* @param larr Array of multipoles on which the input power spectrum is defined.
* @param cl_in Array of input power spectra. Should have shape [ncls][nl], where ncls is the
appropriate number of power spectra given the fields being correlated (e.g. ncls=4=2*2
for two spin-2 fields.
* @param cl_out Array of output power spectra. Should have shape [ncls][nbpw], where ncls is
defined above and nbpw is the number of bandpowers used to define \p w.
*/
void nmt_couple_cl_l_flat_quick(nmt_workspace_flat *w,int nl,flouble *larr,flouble **cl_in,
flouble **cl_out);
/**
* @brief Inverts mode-coupling matrix
*
* Multiplies coupled power spectra by inverse mode-coupling matrix.
* See notes about power spectrum ordering in the main page of this documentation.
* @param w nmt_workspace_flat containing the mode-coupling matrix.
* @param cl_in Input coupled power spectra. Should have shape [ncls][nbpw], where
\p ncls is the appropriate number of power spectra given the fields used
to define \p w (e.g. 4=2*2 for two spin-2 fields) and \p nbpw is the number
of bandpowers used when defining \p w.
* @param cl_noise_in Noise bias (same shape as \p cl_in).
* @param cl_bias Deprojection bias (same shape as \p cl_in, see nmt_compute_deprojection_bias_flat()).
* @param cl_out Mode-decoupled power spectrum (same shape as \p cl_in).
*/
void nmt_decouple_cl_l_flat(nmt_workspace_flat *w,flouble **cl_in,flouble **cl_noise_in,
flouble **cl_bias,flouble **cl_out);
/**
* @brief Coupled pseudo-CL
*
* Computes the pseudo-CL power spectrum of two fields without accounting for the mode-coupling
* matrix.
* See notes about power spectrum ordering in the main page of this documentation.
* @param fl1 nmt_field_flat structure defining the first field to correlate.
* @param fl2 nmt_field_flat structure defining the second field to correlate.
* @param bin nmt_binning_scheme_flat defining the power spectrum bandpowers.
* @param lmn_x Lower end of the range of multipoles in the x direction that should be masked.
* @param lmx_x Upper end of the range of multipoles in the x direction that should be masked.
* if \p lmx_x < \p lmn_x, no Fourier-space masked is performed.
* @param lmn_y Same as \p lmn_x for the y direction.
* @param lmx_y Same as \p lmx_x for the y direction.
* @param cl_out Ouptput power spectrum. Should be allocated to shape [ncls][nbpw], where
\p ncls is the appropriate number of power spectra (e.g. 4=2*2 for two spin-2
fields), and \p nbpw is the number of bandpowers defined by \p bin.
*/
void nmt_compute_coupled_cell_flat(nmt_field_flat *fl1,nmt_field_flat *fl2,
nmt_binning_scheme_flat *bin,flouble **cl_out,
flouble lmn_x,flouble lmx_x,flouble lmn_y,flouble lmx_y);
/**
* @brief Computes pseudo-CL specrum.
*
* Wrapper function containing all the steps to compute a power spectrum. For performance
* reasons, the blind use of this function is discouraged against a smarter combination of
* nmt_workspace_flat structures and nmt_compute_coupled_cell_flat().
* See notes about power spectrum ordering in the main page of this documentation.
* @param fl1 nmt_field_flat structure defining the first field to correlate.
* @param fl2 nmt_field_flat structure defining the second field to correlate.
* @param bin nmt_binning_scheme_flat defining the power spectrum bandpowers.
* @param lmn_x Lower end of the range of multipoles in the x direction that should be masked.
* @param lmx_x Upper end of the range of multipoles in the x direction that should be masked.
* if \p lmx_x < \p lmn_x, no Fourier-space masked is performed.
* @param lmn_y Same as \p lmn_x for the y direction.
* @param lmx_y Same as \p lmx_x for the y direction.
* @param w0 nmt_workspace_flat structure containing the mode-coupling matrix. If NULL, a new
computation of the MCM will be carried out and stored in the output nmt_workspace_flat.
Otherwise, \p w0 will be used and returned by this function.
* @param nl_prop Number of multipoles over which the proposed power spectrum is defined.
* @param l_prop Array of multipoles over which the proposed power spectrum is defined.
* @param cl_prop Proposed power spectrum. Should have shape [ncls][\p nl_prop], where
\p ncls is the appropriate number of power spectra given the spins of the input
fields (e.g. \p ncls = 2*2 = 4 if both fields have spin=2).
* @param cl_noise Noise bias. Should have shape [ncls][nbpw], where \p ncls is
* defined above and \p nbpw is the number of bandpowers defined by \p bin.
* @param cl_out Ouptput power spectrum. Should be allocated to shape [ncls][nbpw],
where \p ncls is defined above and \p nbpw is the number of bandpowers defined
by \p bin.
* @return Newly allocated nmt_workspace_flat structure containing the mode-coupling matrix
if \p w0 is NULL (will return \p w0 otherwise).
*/
nmt_workspace_flat *nmt_compute_power_spectra_flat(nmt_field_flat *fl1,nmt_field_flat *fl2,
nmt_binning_scheme_flat *bin,
flouble lmn_x,flouble lmx_x,
flouble lmn_y,flouble lmx_y,
nmt_workspace_flat *w0,flouble **cl_noise,
int nl_prop,flouble *l_prop,flouble **cl_prop,
flouble **cl_out);
/**
* @brief Full-sky mode-coupling matrix.
*
* Structure containing information about the mode-coupling matrix (MCM) for full-sky pseudo-CLs.
*/
typedef struct {
int lmax; //!< Maximum multipole used
int lmax_fields; //!< Resolution of fields being correlated.
int lmax_mask; //!< Mask resolution
int is_teb; //!< Does it hold all MCM elements to compute all of spin0-spin0, 0-2 and 2-2 correlations?
int ncls; //!< Number of power spectra (1, 2 or 4 depending of the spins of the fields being correlated.
nmt_curvedsky_info *cs; //!< curved sky geometry information.
flouble *beam_prod; //!< Product of field beams.
flouble *pcl_masks; //!< Pseudo-CL of the masks.
flouble **coupling_matrix_unbinned; //!< Unbinned mode-coupling matrix
nmt_binning_scheme *bin; //!< Bandpowers defining the binning
gsl_matrix *coupling_matrix_binned; //!< GSL version of MCM (prepared for inversion)
gsl_permutation *coupling_matrix_perm; //!< Complements \p coupling_matrix_binned_gsl for inversion.
} nmt_workspace;
typedef struct {
int lmax;
int lmax_mask;
int npcl;
int s1;
int s2;
int has_00;
flouble ***xi_00;
int has_0s;
flouble ****xi_0s;
int has_ss;
flouble ****xi_pp;
flouble ****xi_mm;
int pure_e1;
int pure_e2;
int pure_b1;
int pure_b2;
int pure_any;
int npure_0s;
int npure_ss;
} nmt_master_calculator;
nmt_master_calculator *nmt_compute_master_coefficients(int lmax, int lmax_mask,
int npcl, flouble **pcl_masks,
int s1, int s2,
int pure_e1, int pure_b1,
int pure_e2, int pure_b2,
int do_teb, int l_toeplitz,
int l_exact, int dl_band);
void nmt_master_calculator_free(nmt_master_calculator *c);
/**
* @brief Computes mode-coupling matrix.
*
* Computes MCM for a given pair of full-sky fields.
* @param fl1 nmt_field structure defining the first field to correlate.
* @param fl2 nmt_field structure defining the second field to correlate.
* @param bin nmt_binning_scheme defining the power spectrum bandpowers.
* @param is_teb if !=0, all mode-coupling matrices (0-0,0-2,2-2) will be computed at the same time.
* @param niter number of iterations when computing alms.
* @param lmax_mask maximum multipole to which the masks should be resolved. If smaller than the maximum multipole of fl1/fl2, it will be set to that.
* @return Newly allocated nmt_workspace structure containing the mode-coupling matrix.
*/
nmt_workspace *nmt_compute_coupling_matrix(nmt_field *fl1,nmt_field *fl2,nmt_binning_scheme *bin,
int is_teb,int niter,int lmax_mask,
int l_toeplitz,int l_exact,int dl_band);
/**
* @brief Updates the mode coupling matrix with a new one.Saves nmt_workspace structure to file
*
* The new matrix must be provided as a single 1D array of size n_rows\f$^2\f$.
* Here n_rows=n_cls * n_ell is the size of the flattened power spectra, where n_cls is the number
* of power spectra (1, 2 or 4 for spin0-0, spin0-2 and spin2-2 correlations) and n_ells=lmax+1
* (by default lmax=3*nside-1 for HEALPix, and pi/dx for CAR (where dx is the minimum angular pixel size)). The ordering of the power spectra should be such that the
* l-th element of the i-th power spectrum is stored with index l * n_cls + i.
* @param w nmt_workspace to be updated.
* @param n_rows size of the flattened power spectra.
* @param new_matrix new mode-coupling matrix (flattened).
*/
void nmt_update_coupling_matrix(nmt_workspace *w,int n_rows,double *new_matrix);
/**
* @brief Updates the binning scheme associated to this workspace.
*
* Also rebins the MCM and re-inverts it.
* @param w nmt_workspace to be updated.
* @param bin new nmt_binning_scheme.
*/
void nmt_workspace_update_binning(nmt_workspace *w,
nmt_binning_scheme *bin);
/**
* @brief Updates the beams associated to this workspace.
*
* Also recomputes the binned MCM and its inverse
* @param w workspace.
* @param nl1 Number of elements of b1.
* @param b1 First field's beam (harmonic space). One element per multipole.
* @param nl2 Number of elements of b1.
* @param b2 Second field's beam (harmonic space). One element per multipole.
*/
void nmt_workspace_update_beams(nmt_workspace *w,
int nl1,double *b1,
int nl2,double *b2);
/**
* @brief nmt_workspace destructor
*/
void nmt_workspace_free(nmt_workspace *w);
/**
* @brief Computes deprojection bias.
*
* Computes contaminant deprojection bias for a pair of fields.
* See notes about power spectrum ordering in the main page of this documentation.
* @param fl1 nmt_field structure defining the first field to correlate.
* @param fl2 nmt_field structure defining the second field to correlate.
* @param cl_proposal Proposed power spectrum. Should have shape [ncls][lmax+1], where
\p ncls is the appropriate number of power spectra given the spins of the input
fields (e.g. \p ncls = 2*2 = 4 if both fields have spin=2).
* @param cl_bias Ouptput deprojection bias. Should be allocated to shape [ncls][lmax+1],
where \p ncls is defined above.
* @param niter number of iterations when computing alms.
*/
void nmt_compute_deprojection_bias(nmt_field *fl1,nmt_field *fl2,
flouble **cl_proposal,flouble **cl_bias,int niter);
/**
* @brief Noise bias from uncorrelated noise map
*
* Computes deprojection bias due to an source of uncorrelated noise given an input noise variance map.
* See companion paper for more details.
* @param fl1 nmt_field structure defining the properties of the field for which this noise bias
applies.
* @param map_var Noise variance map (should contain per-pixel noise variance).
* @param cl_bias Ouptput noise bias. Should be allocated to shape [ncls][lmax+1],
where \p ncls is the appropriate number of power spectra given the spins of the input
fields (e.g. \p ncls = 2*2 = 4 if both fields have spin=2).
* @param niter number of iterations when computing alms.
*/
void nmt_compute_uncorr_noise_deprojection_bias(nmt_field *fl1,flouble *map_var,flouble **cl_bias,
int niter);
/**
* @brief Mode-couples an input power spectrum
*
* This function applies the effects of the mode-coupling the pseudo-CL estimator for a given
* input power spectrum. This function should be used in conjunction with nmt_decouple_cl_l()
* to compute the theory prediction of the pseudo-CL estimator.
* See notes about power spectrum ordering in the main page of this documentation.
* @param w nmt_workspace structure containing the mode-coupling matrix
* @param cl_in Array of input power spectra. Should have shape [ncls][lmax+1], where ncls
is the appropriate number of power spectra given the fields being correlated
(e.g. ncls=4=2*2 for two spin-2 fields).
* @param cl_out Array of output power spectra. Should have shape [ncls][lmax+1], where
ncls is defined above.
*/
void nmt_couple_cl_l(nmt_workspace *w,flouble **cl_in,flouble **cl_out);
/**
* @brief Inverts mode-coupling matrix
*
* Multiplies coupled power spectra by inverse mode-coupling matrix.
* See notes about power spectrum ordering in the main page of this documentation.
* @param w nmt_workspace containing the mode-coupling matrix.
* @param cl_in Input coupled power spectra. Should have shape [ncls][lmax+1], where
\p ncls is the appropriate number of power spectra given the fields used
to define \p w (e.g. 4=2*2 for two spin-2 fields).
* @param cl_noise_in Noise bias (same shape as \p cl_in).
* @param cl_bias Deprojection bias (same shape as \p cl_in, see nmt_compute_deprojection_bias()).
* @param cl_out Mode-decoupled power spectrum. Should have shape [ncls][nbpw], where
ncls is defined above and nbpw is the number of bandpowers used to define \p w.
*/
void nmt_decouple_cl_l(nmt_workspace *w,flouble **cl_in,flouble **cl_noise_in,
flouble **cl_bias,flouble **cl_out);
/**
* @brief Returns the bandpower window functions for this workspace.
*
* This function returns, in a flattened array, the bandpower window functions associated with the
* mode-coupling matrix stored in this workspace. The effect of the PCL estimator on the unbinned
* theory prediction can be fully accounted for by convolving it with these window functions.
* @param w nmt_workspace containing the mode-coupling matrix.
* @param bpw_win_out output 1D array allocated to the right size (n_cls * n_bpw * n_cls * (lmax+1)).
*/
void nmt_compute_bandpower_windows(nmt_workspace *w,double *bpw_win_out);
/**
* @brief Coupled pseudo-CL
*
* Computes the pseudo-CL power spectrum of two fields without accounting for the mode-coupling
* matrix. This is essentially equivalent to running HEALPix's 'anafast' on the purified and
* contaminant-deprojected input fields.
* See notes about power spectrum ordering in the main page of this documentation.
* @param fl1 nmt_field structure defining the first field to correlate.
* @param fl2 nmt_field structure defining the second field to correlate.
* @param cl_out Ouptput power spectrum. Should be allocated to shape [ncls][lmax+1], where
\p ncls is the appropriate number of power spectra (e.g. 4=2*2 for two spin-2 fields).
*/
void nmt_compute_coupled_cell(nmt_field *fl1,nmt_field *fl2,flouble **cl_out);
/**
* @brief Computes pseudo-CL specrum.
*
* Wrapper function containing all the steps to compute a power spectrum. For performance
* reasons, the blind use of this function is discouraged against a smarter combination of
* nmt_workspace structures and nmt_compute_coupled_cell().
* See notes about power spectrum ordering in the main page of this documentation.
* @param fl1 nmt_field structure defining the first field to correlate.
* @param fl2 nmt_field structure defining the second field to correlate.
* @param bin nmt_binning_scheme defining the power spectrum bandpowers.
* @param w0 nmt_workspace structure containing the mode-coupling matrix. If NULL, a new
computation of the MCM will be carried out and stored in the output nmt_workspace.
Otherwise, \p w0 will be used and returned by this function.
* @param cl_proposal Proposed power spectrum. Should have shape [ncls][lmax+1], where
\p ncls is the appropriate number of power spectra given the spins of the input
fields (e.g. \p ncls = 2*2 = 4 if both fields have spin=2).
* @param cl_noise Noise bias (same shape as \p cl_prop).
* @param cl_out Ouptput power spectrum. Should be allocated to shape [ncls][nbpw],
where \p ncls is defined above and \p nbpw is the number of bandpowers defined
by \p bin.
* @param niter number of iterations when computing alms.
* @param lmax_mask maximum multipole to which the masks should be resolved. If smaller than the maximum multipole of fl1/fl2, it will be set to that.
* @return Newly allocated nmt_workspace structure containing the mode-coupling matrix
if \p w0 is NULL (will return \p w0 otherwise).
*/
nmt_workspace *nmt_compute_power_spectra(nmt_field *fl1,nmt_field *fl2,
nmt_binning_scheme *bin,nmt_workspace *w0,
flouble **cl_noise,flouble **cl_proposal,flouble **cl_out,
int niter,int lmax_mask,int l_toeplitz,
int l_exact,int dl_band);
/**
* @brief Flat-sky Gaussian covariance matrix
*
* Structure containing the information necessary to compute Gaussian covariance matrices
* for the pseudo-CL spectra of two flat-sky spin-0 fields.
*
*/
typedef struct {
nmt_binning_scheme_flat *bin; //!< Bandpowers defining the binning
flouble **xi00_1122; //!< First (a1b1-a2b2), 00, mode coupling matrix (see scientific documentation)
flouble **xi00_1221; //!< Second (a1b2-a2b1), 00, mode coupling matrix (see scientific documentation)
flouble **xi02_1122; //!< First (a1b1-a2b2), 02, mode coupling matrix (see scientific documentation)
flouble **xi02_1221; //!< Second (a1b2-a2b1), 02, mode coupling matrix (see scientific documentation)
flouble **xi22p_1122; //!< First (a1b1-a2b2), 22p, mode coupling matrix (see scientific documentation)
flouble **xi22p_1221; //!< Second (a1b2-a2b1), 22p, mode coupling matrix (see scientific documentation)
flouble **xi22m_1122; //!< First (a1b1-a2b2), 22m, mode coupling matrix (see scientific documentation)
flouble **xi22m_1221; //!< Second (a1b2-a2b1), 22m, mode coupling matrix (see scientific documentation)
} nmt_covar_workspace_flat;
/**
* @brief nmt_covar_workspace_flat destructor.
*/
void nmt_covar_workspace_flat_free(nmt_covar_workspace_flat *cw);
/**
* @brief nmt_covar_workspace_flat constructor
*
* Builds an nmt_covar_workspace_flat structure from two nmt_workspace_flat structures, corresponding
* to the two sets of power spectra for which the covariance is required.
* @param fla1 nmt_field_field for the first field going into the first (a-th) power spectrum.
* @param fla2 nmt_field_field for the second field going into the first (a-th) power spectrum.
* @param flb1 nmt_field_field for the first field going into the second (b-th) power spectrum.
* @param flb2 nmt_field_field for the second field going into the second (b-th) power spectrum.
* @param ba nmt_binning_scheme_flat used for the first power spectrum.
* @param bb nmt_binning_scheme_flat used for the second power spectrum.
*/
nmt_covar_workspace_flat *nmt_covar_workspace_flat_init(nmt_field_flat *fla1,nmt_field_flat *fla2,
nmt_binning_scheme_flat *ba,
nmt_field_flat *flb1,nmt_field_flat *flb2,
nmt_binning_scheme_flat *bb);
/**
* @brief Compute flat-sky Gaussian covariance matrix
*
* Computes the covariance matrix for two sets of power spectra given input predicted spectra
* and two nmt_covar_workspace_flat structures.
* @param cw nmt_covar_workspace_flat structure containing the information necessary to compute the
covariance matrix.
* @param spin_a field a spin.
* @param spin_b field b spin.
* @param spin_c field c spin.
* @param spin_d field d spin.
* @param wa nmt_workspace_flat structure containing the mode-coupling matrix for the first power spectra (between fields a and b).
* @param wb nmt_workspace_flat structure containing the mode-coupling matrix for the second power spectra (between fields c and d).
* @param nl Number of multipoles in which input power spectra are computed.
* @param larr Array of multipoles in which input power spectra are computed.
* @param clac Cross-power spectra between field 1 in the first set and field 1 in the second set (ac)
* @param clad Cross-power spectra between field 1 in the first set and field 2 in the second set (ad)
* @param clbc Cross-power spectra between field 2 in the first set and field 1 in the second set (bc)
* @param clbd Cross-power spectra between field 2 in the first set and field 2 in the second set (bd)
* @param covar_out flattened covariance matrix. Should be allocated to shape [ncls_1 * nbpw_1 * ncls_2 * nbpw_2],
where nbpw_X and ncls_X are the number of bandpowers and different power spectra in the X-th set of fields.
*/
void nmt_compute_gaussian_covariance_flat(nmt_covar_workspace_flat *cw,
int spin_a,int spin_b,int spin_c,int spin_d,
nmt_workspace_flat *wa,nmt_workspace_flat *wb,
int nl,flouble *larr,
flouble **clac,flouble **clad,
flouble **clbc,flouble **clbd,flouble *covar_out);
/**
* @brief Full-sky Gaussian covariance matrix
*
* Structure containing the information necessary to compute Gaussian covariance matrices
* for the pseudo-CL spectra of two full-sky spin-0 fields.
*/
typedef struct {
int lmax; //!< Maximum multipole for the first set of power spectra
flouble **xi00_1122; //!< First (a1b1-a2b2), 00, mode coupling matrix (see scientific documentation)
flouble **xi00_1221; //!< Second (a1b2-a2b1), 00, mode coupling matrix (see scientific documentation)
flouble **xi02_1122; //!< First (a1b1-a2b2), 02, mode coupling matrix (see scientific documentation)
flouble **xi02_1221; //!< Second (a1b2-a2b1), 02, mode coupling matrix (see scientific documentation)
flouble **xi22p_1122; //!< First (a1b1-a2b2), 22+, mode coupling matrix (see scientific documentation)
flouble **xi22p_1221; //!< Second (a1b2-a2b1), 22+, mode coupling matrix (see scientific documentation)
flouble **xi22m_1122; //!< First (a1b1-a2b2), 22-, mode coupling matrix (see scientific documentation)
flouble **xi22m_1221; //!< Second (a1b2-a2b1), 22-, mode coupling matrix (see scientific documentation)
} nmt_covar_workspace;
/**
* @brief nmt_covar_workspace destructor.
*/
void nmt_covar_workspace_free(nmt_covar_workspace *cw);
/**
* @brief nmt_covar_workspace constructor
*
* Builds an nmt_covar_workspace structure from two pairs of nmt_field structures, corresponding
* to the two sets of power spectra for which the covariance is required.
* @param fla1 nmt_field for the first field going into the first (a-th) power spectrum.
* @param fla2 nmt_field for the second field going into the first (a-th) power spectrum.
* @param flb1 nmt_field for the first field going into the second (b-th) power spectrum.
* @param flb2 nmt_field for the second field going into the second (b-th) power spectrum.
* @param lmax maximum multipole up to which the coupling coefficients will be calculated.
* @param niter number of iterations when computing alms.
*/
nmt_covar_workspace *nmt_covar_workspace_init(nmt_field *fla1,nmt_field *fla2,
nmt_field *flb1,nmt_field *flb2,
int lmax,int niter,
int l_toeplitz,int l_exact,int dl_band);
/**
* @brief Compute full-sky Gaussian covariance matrix
*
* Computes the covariance matrix for two sets of power spectra given input predicted spectra
* and a nmt_covar_workspace structure.
* @param cw nmt_covar_workspace structure containing the information necessary to compute the
covariance matrix.
* @param spin_a field a spin.
* @param spin_b field b spin.
* @param spin_c field c spin.
* @param spin_d field d spin.
* @param wa nmt_workspace structure containing the mode-coupling matrix for the first power spectra.
* @param wb nmt_workspace structure containing the mode-coupling matrix for the second power spectra.
* @param clac Cross-power spectra between field 1 in the first set and field 1 in the second set (ac)
All power spectra should be defined for all ell < lmax.
* @param clad Cross-power spectra between field 1 in the first set and field 2 in the second set (ad)
* @param clbc Cross-power spectra between field 2 in the first set and field 1 in the second set (bc)
* @param clbd Cross-power spectra between field 2 in the first set and field 2 in the second set (bd)
* @param covar_out flattened covariance matrix. Should be allocated to shape [ncls_1 * nbpw_1 * ncls_2 * nbpw_2],
where nbpw_X and ncls_X are the number of bandpowers and different power spectra in the X-th set of fields.
*/
void nmt_compute_gaussian_covariance(nmt_covar_workspace *cw,
int spin_a,int spin_b,int spin_c,int spin_d,
nmt_workspace *wa,nmt_workspace *wb,
flouble **clac,flouble **clad,
flouble **clbc,flouble **clbd,
flouble *covar_out);
/**
* @brief Compute full-sky Gaussian covariance matrix
*
* Computes the covariance matrix for two sets of power spectra given input predicted spectra
* and a nmt_covar_workspace structure. Calculation done for the mode-coupled pseudo-Cls.
* @param cw nmt_covar_workspace structure containing the information necessary to compute the
covariance matrix.
* @param spin_a field a spin.
* @param spin_b field b spin.
* @param spin_c field c spin.
* @param spin_d field d spin.
* @param wa nmt_workspace structure containing the mode-coupling matrix for the first power spectra.
* @param wb nmt_workspace structure containing the mode-coupling matrix for the second power spectra.
* @param clac Cross-power spectra between field 1 in the first set and field 1 in the second set (ac)
All power spectra should be defined for all ell < lmax.
* @param clad Cross-power spectra between field 1 in the first set and field 2 in the second set (ad)
* @param clbc Cross-power spectra between field 2 in the first set and field 1 in the second set (bc)
* @param clbd Cross-power spectra between field 2 in the first set and field 2 in the second set (bd)
* @param covar_out flattened covariance matrix. Should be allocated to shape [ncls_1 * nbpw_1 * ncls_2 * nbpw_2],
where nbpw_X and ncls_X are the number of bandpowers and different power spectra in the X-th set of fields.
*/
void nmt_compute_gaussian_covariance_coupled(nmt_covar_workspace *cw,
int spin_a,int spin_b,int spin_c,int spin_d,
nmt_workspace *wa,nmt_workspace *wb,
flouble **clac,flouble **clad,
flouble **clbc,flouble **clbd,
flouble *covar_out);
/**
* @brief Saves nmt_workspace structure to file
*
* The output file uses the FITS standard. In combination with nmt_workspace_read_fits(),
* this can be used to save the information contained in a given workspace and reuse it for
* future power spectrum computations. The same workspace can be used on any pair of fields
* with the same masks.
* @param w nmt_workspace to be saved.
* @param fname Path to output file.
*/
void nmt_workspace_write_fits(nmt_workspace *w,char *fname);
/**
* @brief Builds nmt_workspace structure from file
*
* The input file uses the FITS standard. In combination with nmt_workspace_write_fits(),
* this can be used to save the information contained in a given workspace and reuse it for
* future power spectrum computations. The same workspace can be used on any pair of fields
* with the same masks.
* @param fname Path to input file.
*/
nmt_workspace *nmt_workspace_read_fits(char *fname);
/**
* @brief Builds nmt_workspace_flat structure from file
*
* The input file uses the FITS standard. In combination with nmt_workspace_flat_write_fits(),
* this can be used to save the information contained in a given workspace and reuse it for
* future power spectrum computations. The same workspace can be used on any pair of fields
* with the same masks.
* @param fname Path to input file.
*/
nmt_workspace_flat *nmt_workspace_flat_read_fits(char *fname);
/**
* @brief Saves nmt_workspace_flat structure to file
*
* The output file uses the FITS standard. In combination with nmt_workspace_flat_read_fits(),
* this can be used to save the information contained in a given workspace and reuse it for
* future power spectrum computations. The same workspace can be used on any pair of fields
* with the same masks.
* @param w nmt_workspace_flat to be saved.
* @param fname Path to output file.
*/
void nmt_workspace_flat_write_fits(nmt_workspace_flat *w,char *fname);
/**
* @brief Saves nmt_covar_workspace structure to file
*
* The output file uses the FITS standard. In combination with nmt_covar_workspace_read_fits(),
* this can be used to save the information contained in a given workspace and reuse it for
* future covariance matrix computations. The same workspace can be used on any pair of power spectra
* between fields with the same masks.
* @param cw nmt_covar_workspace to be saved.
* @param fname Path to output file.
*/
void nmt_covar_workspace_write_fits(nmt_covar_workspace *cw,char *fname);
/**
* @brief Builds nmt_covar_workspace structure from file
*
* The input file uses the FITS standard. In combination with nmt_covar_workspace_write_fits(),
* this can be used to save the information contained in a given workspace and reuse it for
* future covariance matrix computations. The same workspace can be used on any pair of power spectra
* between fields with the same masks.
* @param fname Path to input file.
*/
nmt_covar_workspace *nmt_covar_workspace_read_fits(char *fname);
/**
* @brief Saves nmt_covar_workspace_flat structure to file
*
* The output file uses the FITS standard. In combination with nmt_covar_workspace_flat_read_fits(),
* this can be used to save the information contained in a given workspace and reuse it for
* future covariance matrix computations. The same workspace can be used on any pair of power spectra
* between fields with the same masks.
* @param cw nmt_covar_workspace_flat to be saved.
* @param fname Path to output file.
*/
void nmt_covar_workspace_flat_write_fits(nmt_covar_workspace_flat *cw,char *fname);
/**
* @brief Builds nmt_covar_workspace_flat structure from file
*
* The input file uses the FITS standard. In combination with nmt_covar_workspace_flat_write_fits(),
* this can be used to save the information contained in a given workspace and reuse it for
* future covariance matrix computations. The same workspace can be used on any pair of power spectra
* between fields with the same masks.
* @param fname Path to input file.
*/
nmt_covar_workspace_flat *nmt_covar_workspace_flat_read_fits(char *fname);
#endif //_NAMASTER_H_
|
/-
Copyright (c) 2021 Andrew Yang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Andrew Yang
-/
import ring_theory.ring_hom_properties
/-!
# The meta properties of finite ring homomorphisms.
-/
namespace ring_hom
open_locale tensor_product
open tensor_product algebra.tensor_product
lemma finite_stable_under_composition :
stable_under_composition @finite :=
by { introv R hf hg, exactI hg.comp hf }
lemma finite_respects_iso :
respects_iso @finite :=
begin
apply finite_stable_under_composition.respects_iso,
introsI,
exact finite.of_surjective _ e.to_equiv.surjective,
end
lemma finite_stable_under_base_change :
stable_under_base_change @finite :=
begin
refine stable_under_base_change.mk _ finite_respects_iso _,
classical,
introv h,
resetI,
replace h : module.finite R T := by { convert h, ext, rw algebra.smul_def, refl },
suffices : module.finite S (S ⊗[R] T),
{ change module.finite _ _, convert this, ext, rw algebra.smul_def, refl },
exactI infer_instance
end
end ring_hom
|
using Interp1d
using Random
@testset "duplicated exception test" begin
x = [0.0, 0.0, 3.0/2.0]
y = [2.0, 1.0, 3.0]
for mode in INTERP_MODE_LIST
@test_throws ArgumentError interp(x, y, mode);
end
end
|
module Text.Markup.Edda.CommonMark
import public Text.Markup.Edda.Model
import public Text.Markup.Edda.Walk
import public Text.Markup.Edda.Query
import public Text.Markup.Edda.Reader.CommonMark
import public Text.Markup.Edda.Writer.CommonMark
|
/-
Copyright (c) 2017 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.data.nat.default
import Mathlib.Lean3Lib.init.data.fin.basic
namespace Mathlib
namespace fin
protected def succ {n : ℕ} : fin n → fin (Nat.succ n) := sorry
def of_nat {n : ℕ} (a : ℕ) : fin (Nat.succ n) := { val := a % Nat.succ n, property := sorry }
protected def add {n : ℕ} : fin n → fin n → fin n := sorry
protected def mul {n : ℕ} : fin n → fin n → fin n := sorry
protected def sub {n : ℕ} : fin n → fin n → fin n := sorry
protected def mod {n : ℕ} : fin n → fin n → fin n := sorry
protected def div {n : ℕ} : fin n → fin n → fin n := sorry
protected instance has_zero {n : ℕ} : HasZero (fin (Nat.succ n)) :=
{ zero := { val := 0, property := nat.succ_pos n } }
protected instance has_one {n : ℕ} : HasOne (fin (Nat.succ n)) := { one := of_nat 1 }
protected instance has_add {n : ℕ} : Add (fin n) := { add := fin.add }
protected instance has_sub {n : ℕ} : Sub (fin n) := { sub := fin.sub }
protected instance has_mul {n : ℕ} : Mul (fin n) := { mul := fin.mul }
protected instance has_mod {n : ℕ} : Mod (fin n) := { mod := fin.mod }
protected instance has_div {n : ℕ} : Div (fin n) := { div := fin.div }
theorem of_nat_zero {n : ℕ} : of_nat 0 = 0 := rfl
theorem add_def {n : ℕ} (a : fin n) (b : fin n) :
subtype.val (a + b) = (subtype.val a + subtype.val b) % n :=
sorry
theorem mul_def {n : ℕ} (a : fin n) (b : fin n) :
subtype.val (a * b) = subtype.val a * subtype.val b % n :=
sorry
theorem sub_def {n : ℕ} (a : fin n) (b : fin n) :
subtype.val (a - b) = subtype.val a - subtype.val b :=
sorry
theorem mod_def {n : ℕ} (a : fin n) (b : fin n) :
subtype.val (a % b) = subtype.val a % subtype.val b :=
sorry
theorem div_def {n : ℕ} (a : fin n) (b : fin n) :
subtype.val (a / b) = subtype.val a / subtype.val b :=
sorry
theorem lt_def {n : ℕ} (a : fin n) (b : fin n) : a < b = (subtype.val a < subtype.val b) := sorry
theorem le_def {n : ℕ} (a : fin n) (b : fin n) : a ≤ b = (subtype.val a ≤ subtype.val b) := sorry
theorem val_zero {n : ℕ} : subtype.val 0 = 0 := rfl
def pred {n : ℕ} (i : fin (Nat.succ n)) : i ≠ 0 → fin n := sorry
end Mathlib
|
[STATEMENT]
lemma restrict_map_disj:
"S \<inter> T = {} \<Longrightarrow> h |` S \<bottom> h |` T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. S \<inter> T = {} \<Longrightarrow> h |` S \<bottom> h |` T
[PROOF STEP]
by (auto simp: map_disj_def restrict_map_def dom_def)
|
pbenford <- function(d){
return(log10(1+(1/d)))
}
get_lead_digit <- function(number){
return(as.numeric(substr(number,1,1)))
}
fib_iter <- function(n){
first <- 1
second <- 0
for(i in 1:n){
sum <- first + second
first <- second
second <- sum
}
return(sum)
}
fib_sequence <- mapply(fib_iter,c(1:1000))
lead_digits <- mapply(get_lead_digit,fib_sequence)
observed_frequencies <- table(lead_digits)/1000
expected_frequencies <- mapply(pbenford,c(1:9))
data <- data.frame(observed_frequencies,expected_frequencies)
colnames(data) <- c("digit","obs.frequency","exp.frequency")
dev_percentage <- abs((data$obs.frequency-data$exp.frequency)*100)
data <- data.frame(data,dev_percentage)
print(data)
|
/-
Copyright (c) 2018 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin
Nonnegative real numbers.
-/
import data.real.nnreal analysis.real analysis.topology.infinite_sum
noncomputable theory
open set topological_space
namespace nnreal
local notation ` ℝ≥0 ` := nnreal
instance : metric_space ℝ≥0 := by unfold nnreal; apply_instance
instance : topological_space ℝ≥0 := infer_instance
instance : topological_semiring ℝ≥0 :=
{ continuous_mul :=
continuous_subtype_mk _
(continuous_mul (continuous.comp continuous_fst continuous_subtype_val)
(continuous.comp continuous_snd continuous_subtype_val)),
continuous_add :=
continuous_subtype_mk _
(continuous_add (continuous.comp continuous_fst continuous_subtype_val)
(continuous.comp continuous_snd continuous_subtype_val)) }
instance : orderable_topology ℝ≥0 :=
⟨ le_antisymm
begin
apply induced_le_iff_le_coinduced.2,
rw [orderable_topology.topology_eq_generate_intervals ℝ],
apply generate_from_le,
assume s hs,
rcases hs with ⟨a, rfl | rfl⟩,
{ show topological_space.generate_open _ {b : ℝ≥0 | a < b },
by_cases ha : 0 ≤ a,
{ exact topological_space.generate_open.basic _ ⟨⟨a, ha⟩, or.inl rfl⟩ },
{ have : a < 0, from lt_of_not_ge ha,
have : {b : ℝ≥0 | a < b } = set.univ,
from (set.eq_univ_iff_forall.2 $ assume b, lt_of_lt_of_le this b.2),
rw [this],
exact topological_space.generate_open.univ _ } },
{ show (topological_space.generate_from _).is_open {b : ℝ≥0 | a > b },
by_cases ha : 0 ≤ a,
{ exact topological_space.generate_open.basic _ ⟨⟨a, ha⟩, or.inr rfl⟩ },
{ have : {b : ℝ≥0 | a > b } = ∅,
from (set.eq_empty_iff_forall_not_mem.2 $ assume b hb, ha $
show 0 ≤ a, from le_trans b.2 (le_of_lt hb)),
rw [this],
apply @is_open_empty } },
end
(generate_from_le $ assume s hs,
match s, hs with
| _, ⟨⟨a, ha⟩, or.inl rfl⟩ := ⟨{b : ℝ | a < b}, is_open_lt' a, rfl⟩
| _, ⟨⟨a, ha⟩, or.inr rfl⟩ := ⟨{b : ℝ | b < a}, is_open_gt' a, set.ext $ assume b, iff.refl _⟩
end) ⟩
section coe
variable {α : Type*}
open filter
lemma continuous_of_real : continuous nnreal.of_real :=
continuous_subtype_mk _ $ continuous_max continuous_id continuous_const
lemma continuous_coe : continuous (coe : nnreal → ℝ) :=
continuous_subtype_val
lemma tendsto_coe {f : filter α} {m : α → nnreal} :
∀{x : nnreal}, tendsto (λa, (m a : ℝ)) f (nhds (x : ℝ)) ↔ tendsto m f (nhds x)
| ⟨r, hr⟩ := by rw [nhds_subtype_eq_comap, tendsto_comap_iff]; refl
lemma tendsto_of_real {f : filter α} {m : α → ℝ} {x : ℝ} (h : tendsto m f (nhds x)):
tendsto (λa, nnreal.of_real (m a)) f (nhds (nnreal.of_real x)) :=
h.comp (continuous_iff_tendsto.1 continuous_of_real _)
lemma tendsto_sub {f : filter α} {m n : α → nnreal} {r p : nnreal}
(hm : tendsto m f (nhds r)) (hn : tendsto n f (nhds p)) :
tendsto (λa, m a - n a) f (nhds (r - p)) :=
tendsto_of_real $ tendsto_sub (tendsto_coe.2 hm) (tendsto_coe.2 hn)
lemma is_sum_coe {f : α → nnreal} {r : nnreal} : is_sum (λa, (f a : ℝ)) (r : ℝ) ↔ is_sum f r :=
by simp [is_sum, sum_coe.symm, tendsto_coe]
lemma has_sum_coe {f : α → nnreal} : has_sum (λa, (f a : ℝ)) ↔ has_sum f :=
begin
simp [has_sum],
split,
exact assume ⟨a, ha⟩, ⟨⟨a, is_sum_le (λa, (f a).2) is_sum_zero ha⟩, is_sum_coe.1 ha⟩,
exact assume ⟨a, ha⟩, ⟨a.1, is_sum_coe.2 ha⟩
end
lemma tsum_coe {f : α → nnreal} (hf : has_sum f) : (∑a, (f a : ℝ)) = ↑(∑a, f a) :=
tsum_eq_is_sum $ is_sum_coe.2 $ is_sum_tsum $ hf
end coe
end nnreal
|
it's still winter, darnit. | Anna Allen Clothing Blog: it's still winter, darnit.
I know, I haven't updated this spot in weeks! It's been busy around here. I think I can officially say that February is my busiest month. Seriously, everyone wants me to sew something or alter something for them all at the same time! I'm not complaining! I'm really grateful I have a lot of work, but I admit there was a moment a few weeks ago when I broke down because I was feeling totally overwhelmed. But I actually think that the break-down helped, because I started thinking about how un-productive I had been. I'm feeling a lot better about my schedule these days and I'm putting together a budget. (Did I mention I finished my taxes and I got a REFUND?!!!!! This never happens. NEVER.) PLUS, I am starting some new clothing patterns and hope to have some made for spring/summer. I've got about a trillion ideas up my sleeve with absolutely no time for any of them. This seems to happen a lot... About my clothing "line" or whatever you want to call it, I'm basically just going to make clothing I would wear. So it might not all coordinate, but we'll see. Maybe it will! I'm getting lots of inspiration from past collections of jackson, johnston & roe (i am jealous they came up with that name and I didn't...I love it!). I am not crazy about their current collection, except I love those button-crotch shirts (or whatever they call them). I have a 1940's pattern for a similar shirt and it sounds like a super nifty idea!
Okay, times up, I have to get to work. Have a great Wednesday! Hopefully I will post again before spring! Also, I have a cinnamon roll recipe I've been meaning to share... One of these days I'll get around to that too.
|
= = = Slayer = = =
|
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Product name: redemption, a FLOSS RDP proxy
* Copyright (C) Wallix 2010-2013
* Author(s): Christophe Grosjean, Raphael Zhou, Jonathan Poelen, Meng Tan
*/
#pragma once
#include <iostream>
#include <boost/timer.hpp>
class display_timer
{
boost::timer timer;
public:
display_timer()
{}
~display_timer()
{
const double elapsed = this->timer.elapsed();
std::ios::fmtflags old_flags = std::cout.setf(std::istream::fixed, std::istream::floatfield);
std::streamsize old_prec = std::cout.precision(2);
std::cout << elapsed << " s" << std::endl;
std::cout.flags(old_flags);
std::cout.precision(old_prec);
}
};
template<typename Test>
class basic_benchmark
{
const char * name_test;
Test test;
public:
explicit basic_benchmark(const char * name, const Test & t = Test())
: name_test(name)
, test(t)
{}
bool operator()(unsigned n, const char * s) const
{
std::cout << this->name_test << ":\t";
if (!this->test.check_pre_condition(s)) {
std::cout << "none\n";
return false;
}
display_timer timer;
for (unsigned i = 0; i < n; ++i) {
this->test.exec(s);
}
return true;
}
};
template<typename Test>
bool test(const basic_benchmark<Test> & bench, unsigned n, const char * s)
{
return bench(n, s);
}
struct basic_test
{
bool check_pre_condition(const char *) const
{ return true; }
void exec(const char *) const
{}
};
|
-- |
-- Module : Main
-- Description : Entry point module
-- Copyright : (c) Jonatan H Sundqvist, 2015
-- License : MIT
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : POSIX
-- TODO | - JSON key bindings and config
-- - Sharing scores and recordings
-- - Buying premade assets, scores, sound fonts, etc. (user content)
-- - Audio!
-- - 3D audio (move listener around)
-- - Serialisation
-- - IO Lenses (?)
-- - Better way of dealing with state, sharing IO resources
-- - Restrict access to AppState, 'fenced off' IORefs
--------------------------------------------------------------------------------------------------------------------------------------------
-- GHC pragmas
--------------------------------------------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------------------------------------------------------
-- API
--------------------------------------------------------------------------------------------------------------------------------------------
module Main where
--------------------------------------------------------------------------------------------------------------------------------------------
-- We'll need these
--------------------------------------------------------------------------------------------------------------------------------------------
import Control.Monad (forM_, when, forever, liftM)
import Control.Concurrent (threadDelay, forkIO)
import Control.Concurrent.MVar
import Text.Printf
import Data.IORef
import Data.Complex
import Data.StateVar
import Data.Functor
import qualified Data.Set as S
import qualified Data.Map as M
import qualified Graphics.Rendering.Cairo as Cairo
import Graphics.UI.Gtk as Gtk
import Graphics.UI.Gtk (AttrOp(..), on)
import Sound.OpenAL
-- import qualified Sound.ALUT as Alut
-- Internal module imports
import BattleHack.Types
import BattleHack.Lenses
import qualified BattleHack.Render as Render
import qualified BattleHack.Events as Events
import qualified BattleHack.Audio as Audio
import qualified BattleHack.Window as Window
--------------------------------------------------------------------------------------------------------------------------------------------
-- Entry point
--------------------------------------------------------------------------------------------------------------------------------------------
-- |
main :: IO ()
main = do
-- Create and configure window
(window, canvas) <- Window.create winsize
-- Audio
-- TODO: Utility function for accessing range indeces
-- TODO: Don't hard-code range
-- TODO: Check for errors
-- Just (context, device) <- Audio.setup -- TODO: Return context as well (probably a good idea) (✓)
-- [source] <- genObjectNames 1
-- claviature' <- Audio.makebuffersFromIndeces (zipWith const [0..] $ replicate 24 False)
-- App state
stateref <- newIORef (initalstate 24 origin' keysize') -- claviature')
--
-- mnotes <- newMVar . (-->piano.keys) <$> readIORef stateref
mnotes <- readIORef stateref >>= (newMVar . (-->piano.keys))
-- mnotes <- return [True, True, False, True] >>= newMVar -- (newMVar . (-->piano.keys))
-- forkIO $ Audio.stream (1.0/30.0) source mnotes
let audiofps = 5
forkIO $ Audio.stream (1.0/audiofps) mnotes
forkIO $ forever $ do
putStrLn "New write iteration"
playing <- (-->piano.keys) <$> readIORef stateref
putMVar mnotes playing
threadDelay . ceiling $ (1.0/audiofps) * 10^6
printf "Next batch coming up!\n"
printf "These notes: %s.\n\n\n\n" (show playing)
-- Register event handlers
Window.bindevents window canvas stateref
-- Animation
timeoutAdd (Events.onanimate canvas stateref) (1000 `div` fps)
-- Enter main loop
mainGUI
where
fps = 30 --
origin'@(ox:+oy) = 20:+20 --
keysize'@(sx:+sy) = (4:+13) * 40 --
winsize = (sx*7:+sy) + 2*origin' --
-- | Initial application state
-- TODO: Piano range
initalstate :: Int -> Vector -> Vector -> AppState
initalstate nkeys origin' keysize' = AppState { _piano = PianoSettings { _origin = origin',
_keysize = keysize',
_indent = 0.26,
_mid = 0.62,
_active = Nothing,
_keys = replicate nkeys False },
_animation = AnimationData { _frame = 0,
_fps = 30 },
_inputstate = InputState { _mouse=0:+0, _keyboard=S.empty },
_bindings = M.fromList [("Escape", Cairo.liftIO mainQuit)] }
-- | Just a little hello world snippet to make sure everything is set up properly.
goodbyeWorld :: IO ()
goodbyeWorld = do
putStrLn "Hello world!"
putStrLn "Counting down to launch..."
forM_ [10,9..0] ((>> threadDelay (10^6)) . print)
putStrLn "Launching. What have I done!?"
|
import numpy as np
from skimage.measure import compare_psnr as PSNR
from skimage.measure import compare_ssim as SSIM
from skimage.measure import compare_mse as MSE
from .kit import norm, getPointsFromHeatmap
def get_metric(s):
return {
'ssim': cal_ssim,
'psnr': cal_psnr,
'mse': cal_mse,
'mre': cal_mre,
'std': cal_std,
}[s]
def prepare(x):
if np.iscomplexobj(x):
x = np.abs(x)
return norm(x)
def cal_mse(x, y):
'''
result changes
if x,y are not normd to (0,1) or normd to different range
'''
x = prepare(x)
y = prepare(y)
return MSE(x, y)
def cal_ssim(x, y):
'''
result changes if x,y are not normd to (0,1)
won't change if normd to different range
'''
x = prepare(x)
y = prepare(y)
return SSIM(x, y, data_range=x.max() - x.min())
def cal_psnr(x, y):
'''
result rarely changes if x,y are not normd to (0,1)
won't change if normd to different range
'''
x = prepare(x)
y = prepare(y)
return PSNR(x, y, data_range=x.max() - x.min())
def cal_mre(x, y):
''' cal mean distance of the two heatmap's center
x: numpy.ndarray heatmap channel x imgshape
y: numpy.ndarray heatmap channel x imgshape
'''
# assert x.shape == y.shape
# assert x.ndim >= 3
p1 = getPointsFromHeatmap(x)
p2 = getPointsFromHeatmap(y)
li = [sum((i-j)**2 for i, j in zip(point, gt_point)) **
0.5 for point, gt_point in zip(p1, p2)]
return np.mean(li)
def cal_std(x, y):
''' cal std distance of the two heatmap's center
x: numpy.ndarray heatmap channel x imgshape
y: numpy.ndarray heatmap channel x imgshape
'''
# assert x.shape == y.shape
# assert x.ndim >= 3
p1 = getPointsFromHeatmap(x)
p2 = getPointsFromHeatmap(y)
li = [sum((i-j)**2 for i, j in zip(point, gt_point)) **
0.5 for point, gt_point in zip(p1, p2)]
return np.std(li)
|
*** Please note that this is a work in progress and I am constantly updating it between births and life!
Do you see inaccuracies on this chart? Has your organization updated it's requirements? Please feel free to message Kate Dewey at [email protected] with verifiable information (either a link to a public page, or information directly from a trainer or staffer at the organization) and we will be happy to update this.
|
[STATEMENT]
lemma eq_nb: "tmbound0 t \<Longrightarrow> bound0 (eq t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. tmbound0 t \<Longrightarrow> bound0 (eq t)
[PROOF STEP]
apply (simp add: eq_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. tmbound0 t \<Longrightarrow> bound0 (case t of CP (C c) \<Rightarrow> if c = 0\<^sub>N then T else F | CP _ \<Rightarrow> Eq t | _ \<Rightarrow> Eq t)
[PROOF STEP]
apply (cases t)
[PROOF STATE]
proof (prove)
goal (7 subgoals):
1. \<And>x1. \<lbrakk>tmbound0 t; t = CP x1\<rbrakk> \<Longrightarrow> bound0 (case t of CP (C c) \<Rightarrow> if c = 0\<^sub>N then T else F | CP _ \<Rightarrow> Eq t | _ \<Rightarrow> Eq t)
2. \<And>x2. \<lbrakk>tmbound0 t; t = tm.Bound x2\<rbrakk> \<Longrightarrow> bound0 (case t of CP (C c) \<Rightarrow> if c = 0\<^sub>N then T else F | CP _ \<Rightarrow> Eq t | _ \<Rightarrow> Eq t)
3. \<And>x31 x32. \<lbrakk>tmbound0 t; t = tm.Add x31 x32\<rbrakk> \<Longrightarrow> bound0 (case t of CP (C c) \<Rightarrow> if c = 0\<^sub>N then T else F | CP _ \<Rightarrow> Eq t | _ \<Rightarrow> Eq t)
4. \<And>x41 x42. \<lbrakk>tmbound0 t; t = tm.Mul x41 x42\<rbrakk> \<Longrightarrow> bound0 (case t of CP (C c) \<Rightarrow> if c = 0\<^sub>N then T else F | CP _ \<Rightarrow> Eq t | _ \<Rightarrow> Eq t)
5. \<And>x5. \<lbrakk>tmbound0 t; t = tm.Neg x5\<rbrakk> \<Longrightarrow> bound0 (case t of CP (C c) \<Rightarrow> if c = 0\<^sub>N then T else F | CP _ \<Rightarrow> Eq t | _ \<Rightarrow> Eq t)
6. \<And>x61 x62. \<lbrakk>tmbound0 t; t = tm.Sub x61 x62\<rbrakk> \<Longrightarrow> bound0 (case t of CP (C c) \<Rightarrow> if c = 0\<^sub>N then T else F | CP _ \<Rightarrow> Eq t | _ \<Rightarrow> Eq t)
7. \<And>x71 x72 x73. \<lbrakk>tmbound0 t; t = CNP x71 x72 x73\<rbrakk> \<Longrightarrow> bound0 (case t of CP (C c) \<Rightarrow> if c = 0\<^sub>N then T else F | CP _ \<Rightarrow> Eq t | _ \<Rightarrow> Eq t)
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x1. t = CP x1 \<Longrightarrow> bound0 (case x1 of C c \<Rightarrow> if c = 0\<^sub>N then T else F | _ \<Rightarrow> Eq (CP x1))
[PROOF STEP]
apply (rename_tac poly, case_tac poly)
[PROOF STATE]
proof (prove)
goal (8 subgoals):
1. \<And>poly x1. \<lbrakk>t = CP poly; poly = C x1\<rbrakk> \<Longrightarrow> bound0 (case poly of C c \<Rightarrow> if c = 0\<^sub>N then T else F | _ \<Rightarrow> Eq (CP poly))
2. \<And>poly x2. \<lbrakk>t = CP poly; poly = poly.Bound x2\<rbrakk> \<Longrightarrow> bound0 (case poly of C c \<Rightarrow> if c = 0\<^sub>N then T else F | _ \<Rightarrow> Eq (CP poly))
3. \<And>poly x31 x32. \<lbrakk>t = CP poly; poly = poly.Add x31 x32\<rbrakk> \<Longrightarrow> bound0 (case poly of C c \<Rightarrow> if c = 0\<^sub>N then T else F | _ \<Rightarrow> Eq (CP poly))
4. \<And>poly x41 x42. \<lbrakk>t = CP poly; poly = poly.Sub x41 x42\<rbrakk> \<Longrightarrow> bound0 (case poly of C c \<Rightarrow> if c = 0\<^sub>N then T else F | _ \<Rightarrow> Eq (CP poly))
5. \<And>poly x51 x52. \<lbrakk>t = CP poly; poly = poly.Mul x51 x52\<rbrakk> \<Longrightarrow> bound0 (case poly of C c \<Rightarrow> if c = 0\<^sub>N then T else F | _ \<Rightarrow> Eq (CP poly))
6. \<And>poly x6. \<lbrakk>t = CP poly; poly = poly.Neg x6\<rbrakk> \<Longrightarrow> bound0 (case poly of C c \<Rightarrow> if c = 0\<^sub>N then T else F | _ \<Rightarrow> Eq (CP poly))
7. \<And>poly x71 x72. \<lbrakk>t = CP poly; poly = Pw x71 x72\<rbrakk> \<Longrightarrow> bound0 (case poly of C c \<Rightarrow> if c = 0\<^sub>N then T else F | _ \<Rightarrow> Eq (CP poly))
8. \<And>poly x81 x82 x83. \<lbrakk>t = CP poly; poly = CN x81 x82 x83\<rbrakk> \<Longrightarrow> bound0 (case poly of C c \<Rightarrow> if c = 0\<^sub>N then T else F | _ \<Rightarrow> Eq (CP poly))
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
SUBROUTINE POLY_PAD ( kpad, kx, ky, ng, nc, npts, iret )
C************************************************************************
C* POLYT_PAD *
C* *
C* This subroutine adds extra padding to warning grids. *
C* *
C* POLY_PAD ( KPAD, KX, KY, NG, NC, NPTS, IRET ) *
C* *
C* Input parameters: *
C* KPAD INTEGER Padding type *
C* 0 = no padding *
C* 1 = positive x-dir *
C* 2 = positive y-dir *
C* 3 = negative x-dir *
C* 4 = negative y-dir *
C* KX INTEGER Grid point along x-dir *
C* KY INTEGER Grid point along y-dir *
C* *
C* Input and output parameter: *
C* NPTS INTEGER Number of points *
C* *
C* Output parameters: *
C* IRET INTEGER Return code *
C* 0 = normal return *
C* -8 = incorrect padding index *
C** *
C* Log: *
C* T. Lee/SAIC 03/08 *
C************************************************************************
INCLUDE 'gpolyg.cmn'
INCLUDE 'ERROR.PRM'
C-----------------------------------------------------------------------
iret = 0
C
C* Add padding if it is not on the boundaries.
C
IF ( kpad .eq. 0 ) THEN
npts = npts + 1
polygi ( npts, ng, nc ) = kx
polygj ( npts, ng, nc ) = ky
RETURN
ELSE IF ( kpad .eq. 1 ) THEN
IF ( kx .ge. igxd ) RETURN
npts = npts + 1
polygi ( npts, ng, nc ) = kx + 1
polygj ( npts, ng, nc ) = ky
ELSE IF ( kpad .eq. 2 ) THEN
IF ( ky .ge. igyd ) RETURN
npts = npts + 1
polygi ( npts, ng, nc ) = kx
polygj ( npts, ng, nc ) = ky + 1
ELSE IF ( kpad .eq. 3 ) THEN
IF ( kx .le. 1 ) RETURN
npts = npts + 1
polygi ( npts, ng, nc ) = kx - 1
polygj ( npts, ng, nc ) = ky
ELSE IF ( kpad .eq. 4 ) THEN
IF ( ky .le. 1 ) RETURN
npts = npts + 1
polygi ( npts, ng, nc ) = kx
polygj ( npts, ng, nc ) = ky - 1
ELSE
iret = -8
END IF
C*
RETURN
END
|
The inverse function tends to zero at infinity.
|
lemma measure_count_space: "measure (count_space A) X = (if X \<subseteq> A then of_nat (card X) else 0)"
|
program calc_stat
implicit none
character*128 :: fname,buf,camadir,outdir,assim_out
character*8 :: yyyymmdd
character*3 :: numch
integer*4 :: lon_cent,lat_cent
integer,parameter :: latpx=720,lonpx=1440
integer,dimension(lonpx,latpx) :: nextX,nextY,ocean
real,dimension(lonpx,latpx) :: AI,RMSE,NRMSEasm,NRMSEopn,rRMSE,VEasm,VEopn,NSA,NSC,NSE,PDRI,PTRI,ENSPR,PBIASasm,PBIASopn,KGEasm,KGEopn ! ensemble spread
real,allocatable :: opn(:,:,:,:),asm(:,:,:,:),org(:,:,:)
real,allocatable :: opn_mean(:),asm_mean(:),org_mean(:),err_mask(:)
integer :: ios,N,m,i,day
integer,allocatable :: days(:)
real :: dis_mean
real :: Qtp, Qap, Qcp
integer :: Ttp,Tap,Tcp,Tp0,Tp1
real,allocatable :: opn_max(:),opn_min(:),asm_max(:),asm_min(:)
real :: KGE
call getarg(1,buf)
read(buf,*) N ! number of days in year 366/365
write(*,*)N
call getarg(2,buf)
read(buf,*) m !number of ensembles
call getarg(3,buf)
read(buf,"(A)") outdir
write(*,*) outdir
call getarg(4,buf)
read(buf,"(A)") camadir
write(*,*) camadir
! read next grid information
! read nextX and nextY
fname=trim(adjustl(camadir))//"/map/glb_15min/nextxy.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*latpx*lonpx,status="old",iostat=ios)
if(ios==0)then
read(34,rec=1) nextX
read(34,rec=2) nextY
else
write(*,*) "no file nextXY at:",fname
end if
close(34)
!-----
write(*,*)"ocean"
ocean=(nextx==-9999)*(-1)
!-----
! read names of days in a particular year
allocate(days(N))
fname="year_day.txt"
open(34,file=fname,form="formatted",iostat=ios)
if(ios==0)then
read(34,*) days
else
write(*,*) "no days",fname
end if
close(34)
!--
allocate(org(N,lonpx,latpx),opn(N,m,lonpx,latpx),asm(N,m,lonpx,latpx))
org=0.0
opn=0.0
asm=0.0
write(*,*)"read rivout"
do day=1,N
write(yyyymmdd,'(i8.0)') days(day)
write(*,*) yyyymmdd !day,, days(day)
! true river dischrge
fname=trim(adjustl(outdir))//"/assim_out/rivout/true/rivout"//yyyymmdd//".bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="old",iostat=ios)
if(ios==0)then
read(34,rec=1) org(day,:,:)
else
write(*,*) "no true discharge",fname
end if
close(34)
do i=1,m
write(numch,'(i3.3)') i
! corrpted river discharge
fname=trim(adjustl(outdir))//"/assim_out/rivout/open/rivout"//yyyymmdd//"_"//numch//".bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="old",iostat=ios)
if(ios==0)then
read(34,rec=1) opn(day,i,:,:)
else
write(*,*) "no corrupted discharge",fname
end if
close(34)
! assimilated river discharge
fname=trim(adjustl(outdir))//"/assim_out/rivout/assim/rivout"//yyyymmdd//"_"//numch//".bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="old",iostat=ios)
if(ios==0)then
read(34,rec=1) asm(day,i,:,:)
else
write(*,*) "no assimilated discharge",fname
end if
close(34)
end do
end do
!--
allocate(org_mean(N),opn_mean(N),asm_mean(N),err_mask(N))
allocate(opn_max(N),opn_min(N),asm_max(N),asm_min(N))
do lon_cent = 1,lonpx
do lat_cent = 1, latpx
!remove ocean
if (ocean(lon_cent,lat_cent)==1) then
cycle
!continue
end if
! AI calculation
org_mean=org(:,lon_cent,lat_cent)
opn_mean=sum(opn(:,:,lon_cent,lat_cent),dim=2)/real(m)
asm_mean=sum(asm(:,:,lon_cent,lat_cent),dim=2)/real(m)
!write(*,*) shape(asm_mean) , N
err_mask=((abs(org_mean-asm_mean)/(org_mean+1.0e-20))>0.1)*(-1)
!write(*,*)sum(err_mask)
!write(*,*)sum(org_mean)/real(N)
AI(lon_cent,lat_cent)= sum((1.0 -(abs(org_mean-asm_mean)/abs(org_mean-opn_mean+1.0e-20))),mask=err_mask==0.0)/(sum(err_mask)+1.0e-20)
! RMSE
RMSE(lon_cent,lat_cent)=sqrt((1/real(N))*sum((asm_mean-org_mean)**2))
! rRMSE
rRMSE(lon_cent,lat_cent)=sqrt((1/real(N))*sum(((asm_mean-org_mean)/(org_mean+1.0e-20))**2))
! NRMSE
dis_mean=sum(org_mean)/(real(N))
NRMSEasm(lon_cent,lat_cent)=sqrt((1/real(N))*sum((asm_mean-org_mean)**2))/(dis_mean+1.0e-20)
NRMSEopn(lon_cent,lat_cent)=sqrt((1/real(N))*sum((opn_mean-org_mean)**2))/(dis_mean+1.0e-20)
! VEasm
VEasm(lon_cent,lat_cent)=1.0 - (sum(asm_mean-org_mean)/(sum(org_mean)+1.0e-20))
! VEopn
VEopn(lon_cent,lat_cent)=1.0 - (sum(opn_mean-org_mean)/(sum(org_mean)+1.0e-20))
! NSA
NSA(lon_cent,lat_cent)=1.0 - (sum((asm_mean-org_mean)**2)/(sum((dis_mean-org_mean)**2)+1.0e-20))
! NSC
NSC(lon_cent,lat_cent)=1.0 - (sum((opn_mean-org_mean)**2)/(sum((dis_mean-org_mean)**2)+1.0e-20))
! NSE
NSE(lon_cent,lat_cent)=((NSA(lon_cent,lat_cent)-NSC(lon_cent,lat_cent))/(1-NSC(lon_cent,lat_cent)))
if (NSE(lon_cent,lat_cent) == 0.0) then
if ( NSA(lon_cent,lat_cent)/= NSC(lon_cent,lat_cent) ) then
write(*,*)NSA(lon_cent,lat_cent),NSC(lon_cent,lat_cent),(NSA(lon_cent,lat_cent)-NSC(lon_cent,lat_cent)),1-NSC(lon_cent,lat_cent)
end if
end if
! PDRI & PTRI
! peak discharge and peak timing
Qtp=maxval(org_mean)
Ttp=maxloc(org_mean,dim=1)
! peak discharge and timing of assimilated and corrupted
Tp0=max(Ttp-15,0)
Tp1=min(Ttp+15,N)
Qap=maxval(asm_mean(Tp0:Tp1))
Tap=maxloc(asm_mean(Tp0:Tp1),dim=1)
Qcp=maxval(opn_mean(Tp0:Tp1))
Tcp=maxloc(opn_mean(Tp0:Tp1),dim=1)
! PDRI
PDRI(lon_cent,lat_cent)=1.0 - (abs(Qtp-Qap)/(abs(Qtp-Qcp)+1.0e-20))
if (Qtp==Qcp) then
PDRI(lon_cent,lat_cent)=0.0
end if
! PTRI
PTRI(lon_cent,lat_cent)=1.0 - (real(abs(Ttp-Tap))/(real(abs(Ttp-Tcp))+1.0e-20))
if (Ttp==Tcp) then
PTRI(lon_cent,lat_cent)=0.0
end if
! Ensemble Spread
opn_max=maxval(opn(:,:,lon_cent,lat_cent),dim=2)
opn_min=minval(opn(:,:,lon_cent,lat_cent),dim=2)
asm_max=maxval(asm(:,:,lon_cent,lat_cent),dim=2)
asm_min=minval(asm(:,:,lon_cent,lat_cent),dim=2)
!
ENSPR(lon_cent,lat_cent)=sum(1.0 -((asm_max-asm_min)/(opn_max-opn_min+1.0e-20)))/(real(N))
!
PBIASasm(lon_cent,lat_cent)=100.0*(sum(asm_mean-org_mean)/(sum(org_mean)+1.0e-20))
PBIASopn(lon_cent,lat_cent)=100.0*(sum(opn_mean-org_mean)/(sum(org_mean)+1.0e-20))
! KGE
KGEasm(lon_cent,lat_cent)=KGE(asm_mean,org_mean,N)
KGEopn(lon_cent,lat_cent)=KGE(opn_mean,org_mean,N)
!write(*,*)lon_cent,lat_cent,AI(lon_cent,lat_cent),NSA(lon_cent,lat_cent),NSC(lon_cent,lat_cent)!((NSA(lon_cent,lat_cent)-NSC(lon_cent,lat_cent))/(1-NSC(lon_cent,lat_cent))),KGEasm(lon_cent,lat_cent)
!,rRMSE(lon_cent,lat_cent),NRMSEasm(lon_cent,lat_cent),VE(lon_cent,lat_cent),NSE(lon_cent,lat_cent),,PBIASasm(lon_cent,lat_cent)
end do
end do
deallocate(org,opn,asm)
deallocate(org_mean,opn_mean,asm_mean,err_mask)
deallocate(days)
deallocate(opn_max,opn_min,asm_max,asm_min)
! remove AI<0.0
!AI=AI*((AI>0.0)*(1.0))
! NSE
!NSE= ((NSA-NSC)/(1.0-NSC))
! remove NSE<0.0
!NSE=NSE*((NSE>0.0)*(1.0))
! Assimilation Index
fname=trim(adjustl(outdir))//"/assim_out/stat/annualmeanAI.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) AI
else
write(*,*) "no AI",fname
end if
close(34)
! Ensemble Spread
fname=trim(adjustl(outdir))//"/assim_out/stat/annualmeanENSPR.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1)ENSPR
else
write(*,*) "no Ensemble Spread",fname
end if
close(34)
! RMSE
fname=trim(adjustl(outdir))//"/assim_out/stat/RMSEasm.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) RMSE
else
write(*,*) "no RMSE",fname
end if
close(34)
! rRMSE
fname=trim(adjustl(outdir))//"/assim_out/stat/rRMSEasm.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) rRMSE
else
write(*,*) "no rRMSE",fname
end if
close(34)
! NRMSE
fname=trim(adjustl(outdir))//"/assim_out/stat/NRMSEasm.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) NRMSEasm
else
write(*,*) "no NRMSEasm",fname
end if
close(34)
fname=trim(adjustl(outdir))//"/assim_out/stat/NRMSEopn.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) NRMSEopn
else
write(*,*) "no NRMSEopn",fname
end if
close(34)
! VEasm
fname=trim(adjustl(outdir))//"/assim_out/stat/VEasm.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) VEasm
else
write(*,*) "no VE assimilated",fname
end if
close(34)
! VE
fname=trim(adjustl(outdir))//"/assim_out/stat/VEopn.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) VEopn
else
write(*,*) "no VE corrupted",fname
end if
close(34)
! NSEasm
fname=trim(adjustl(outdir))//"/assim_out/stat/NSEasm.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) NSA
else
write(*,*) "no NSA",fname
end if
close(34)
! NSEopn
fname=trim(adjustl(outdir))//"/assim_out/stat/NSEopn.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) NSC
else
write(*,*) "no NSC",fname
end if
close(34)
! NSEAI
fname=trim(adjustl(outdir))//"/assim_out/stat/NSEAI.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) NSE
else
write(*,*) "no NSE",fname
end if
close(34)
! PDRI
fname=trim(adjustl(outdir))//"/assim_out/stat/annualPDRI.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) PDRI
else
write(*,*) "no PDRI",fname
end if
close(34)
! PTRI
fname=trim(adjustl(outdir))//"/assim_out/stat/annualPTRI.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) PTRI
else
write(*,*) "no PDRI",fname
end if
close(34)
! pBias assimilated
fname=trim(adjustl(outdir))//"/assim_out/stat/pBIASasm.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) PBIASasm
else
write(*,*) "no pBias assimilated",fname
end if
close(34)
! pBias corrupted
fname=trim(adjustl(outdir))//"/assim_out/stat/pBIASopn.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) PBIASopn
else
write(*,*) "no pBias corrupted",fname
end if
close(34)
! KGE corrupted
fname=trim(adjustl(outdir))//"/assim_out/stat/KGEopn.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) KGEopn
else
write(*,*) "no KGE corrupted",fname
end if
close(34)
! KGE assimilated
fname=trim(adjustl(outdir))//"/assim_out/stat/KGEasm.bin"
open(34,file=fname,form="unformatted",access="direct",recl=4*lonpx*latpx,status="replace",iostat=ios)
if(ios==0)then
write(34,rec=1) KGEasm
else
write(*,*) "no KGE simulated",fname
end if
close(34)
end program calc_stat
!**********************
function std(list,N)
implicit none
!--
integer :: N
real,dimension(N) :: list
!--
integer :: i
real :: var,std,mean
!--
var=0.0d0
mean=sum(list)/(real(N)+1.0e-20)
!--
do i=1, N
var=var+(list(i)-mean)**2
end do
!--
std=sqrt((var)/(real(N)+1.0e-20))
!--
return
end function
!************************
function cov(X,Y,N)
implicit none
!--
integer :: N
real,dimension(N) :: X,Y
real :: cov
!--
real :: X_mean,Y_mean
real :: C
integer :: i
!--
X_mean=sum(X)/(real(N)+1.0e-20)
Y_mean=sum(Y)/(real(N)+1.0e-20)
!--
C=0.0d0
!--
do i=1,N
C=C+(X(i)-X_mean)*(Y(i)-Y_mean)
end do
cov=C/((N-1)+1.0e-20)
!--
return
!--
end function cov
!************************
function KGE(sim,obs,N)
implicit none
!--
integer :: N
real,dimension(N) :: sim,obs
real :: KGE
!--
real :: sim_mean,obs_mean,sim_std,obs_std
real :: std,cov
real :: CC,BR,RV
!--
sim_mean=sum(sim)/(real(N)+1.0e-20)
obs_mean=sum(obs)/(real(N)+1.0e-20)
!--
sim_std=std(sim,N)
obs_std=std(obs,N)
!--
CC=cov(sim,obs,N)/((sim_std*obs_std)+1.0e-20)
BR=sim_mean/(obs_mean+1.0e-20)
RV=((sim_std/sim_mean)/((obs_std/obs_mean)+1.0e-20))
!--
KGE=1-sqrt((CC-1)**2+(BR-1)**2+(RV-1)**2)
return
!--
end function KGE
!*************************
|
#define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
#include "model/game/gameparameters.h"
#include <iostream>
#include <string>
BOOST_AUTO_TEST_SUITE( gameproperties_suite )
BOOST_AUTO_TEST_CASE(nodeCount)
{
GameParameters p;
BOOST_CHECK(p.setGridSideSize(10));
BOOST_CHECK(p.setRatio_NodeCountVsGridSpotCount(0.5));
BOOST_CHECK_EQUAL(p.nodeCount(),50);
BOOST_CHECK(p.setRatio_NodeCountVsGridSpotCount(1));
BOOST_CHECK_EQUAL(p.nodeCount(),100);
BOOST_CHECK(p.setGridSideSize(5));
BOOST_CHECK(p.setRatio_NodeCountVsGridSpotCount(0.5));
BOOST_CHECK_EQUAL(p.nodeCount(),13);
BOOST_CHECK(p.setGridSideSize(150) == false);
BOOST_CHECK(p.setRatio_NodeCountVsGridSpotCount(2) == false);
BOOST_CHECK(p.setRatio_NodeCountVsGridSpotCount(0) == false);
BOOST_CHECK_EQUAL(p.nodeCount(),13);
}
BOOST_AUTO_TEST_CASE(edgeTargetCount)
{
GameParameters p;
BOOST_CHECK(p.setGridSideSize(10));
BOOST_CHECK(p.setRatio_EdgeCountVsGridSpotCount(2));
BOOST_CHECK_EQUAL(p.edgeTargetCount(),200);
BOOST_CHECK(p.setRatio_EdgeCountVsGridSpotCount(0.5));
BOOST_CHECK_EQUAL(p.edgeTargetCount(),50);
BOOST_CHECK(p.setGridSideSize(5));
BOOST_CHECK(p.setRatio_EdgeCountVsGridSpotCount(0.5));
BOOST_CHECK_EQUAL(p.edgeTargetCount(),13);
BOOST_CHECK(p.setRatio_EdgeCountVsGridSpotCount(0) == false);
BOOST_CHECK_EQUAL(p.edgeTargetCount(),13);
}
BOOST_AUTO_TEST_CASE(itemCount)
{
GameParameters p;
p.setGridSideSize(10);
p.setRatio_NodeCountVsGridSpotCount(2);
BOOST_CHECK(p.setRatio_ItemCountVsNodeCount(2));
BOOST_CHECK_EQUAL(p.itemCount(),100);
BOOST_CHECK(p.setRatio_ItemCountVsNodeCount(0.5));
BOOST_CHECK_EQUAL(p.itemCount(),25);
BOOST_CHECK(p.setRatio_ItemCountVsNodeCount(0) == false);
BOOST_CHECK_EQUAL(p.itemCount(),25);
}
BOOST_AUTO_TEST_SUITE_END()
|
State Before: x y : ℝ
⊢ mk 1 = 1 State After: x y : ℝ
⊢ mk 1 = { cauchy := 1 } Tactic: rw [← ofCauchy_one] State Before: x y : ℝ
⊢ mk 1 = { cauchy := 1 } State After: no goals Tactic: rfl
|
\chapter{The in-medium similarity renormalization group}\label{ch:imsrg}
The in-medium similarity renormalization group
is a modern \abinitio{} many-body method
that extends the renormalization group approach of decoupling energy scales
to many-body calculations.
It is a non-perturbative many-body expansion method,
in the same vein as coupled cluster,
but it operates on the operators rather than the wave function.
It is remarkably flexible,
with favorable scaling with system size
and the ability to target many different observables,
and its invention and development has contributed strongly
to the rapid expansion of \abinitio{} theoretical calculations of medium-mass nuclei.
In this chapter, we introduce the IMSRG formalism.
We then discuss the topics of truncation scheme, generator selection,
and approaches to solving the flow equations.
\section{Basic formalism}
As a reminder from Section~\ref{sec:srg},
the idea behind the SRG is
the construction of a continuous unitary transformation of the Hamiltonian
\begin{equation}
H(s) = U(s) H U^{\dagger}(s)\,,
\end{equation}
which can be obtained by solving the flow equation
\begin{equation}\label{eq:srg_flow_equation_imsrg}
\frac{d H(s)}{ds} = [\eta(s), H(s)]\,,
\end{equation}
with the anti-Hermitian generator $\eta(s)$.
The solution of this flow equation with vacuum normal-ordered operators,
the free-space SRG,
is appealing in that the evolved operators are not system specific
and can be generally used for many-body calculations of nuclei and nuclear matter.
However, the free-space SRG evolution can only be done consistently
in the two- and three-body spaces for nuclear systems,
leaving out induced higher-body operators.
The idea behind the IMSRG is to solve Eq.~\eqref{eq:srg_flow_equation_imsrg} in-medium,
that is, to normal order with respect to a reference state before solving the flow equations.
Starting from a Hamiltonian with a one-, two- and three-body part
\begin{equation}
H = \onebodyop{H} + \twobodyop{H} + \threebodyop{H}\,,
\end{equation}
our normal-ordered Hamiltonian matrix elements are given by
\begin{align}
E & \equiv \hnozero = \sum_i \onebodyop{H}_{ii} + \frac{1}{2}\sum_{ij} \twobodyop{H}_{ijij} + \frac{1}{6} \sum_{ijk} \threebodyop{H}_{ijkijk}\,, \\
f_{pq} & \equiv \hnoone_{pq} = \onebodyop{H}_{pq} + \sum_i \twobodyop{H}_{piqi} + \frac{1}{2} \sum_{ij} \threebodyop{H}_{pijqij} \,, \\
\Gamma_{pqrs} & \equiv \hnotwo_{pqrs} = \twobodyop{H}_{pqrs} + \sum_i \threebodyop{H}_{pqirsi} \,, \\
W_{pqrstu} & \equiv \hnothree_{pqrstu} = \threebodyop{H}_{pqrstu} \,,
\end{align}
where we have introduced the conventional names
for zero-, one-, two-, and three-body normal-ordered parts of the Hamiltonian,
$E$, $f$, $\Gamma$, and $W$,
used in the literature.
As a reminder,
indices $p$, $q$, $r$, \ldots\ run over all single-particle states,
indices $i$, $j$, $k$, \ldots\ run over holes,
single-particle states occupied in the reference state,
and indices $a$, $b$, $c$, \ldots\ run over particles,
single-particle states unoccupied in the reference state.
In general, the generator $\eta(s)$ has one- through $A$-body normal-ordered parts
\begin{equation}
\eta(s) = \sum_{i=1}^A \eta^{(i)}(s)\,.
\end{equation}
For now, we leave $\eta(s)$ unspecified beyond its required anti-Hermiticity,
which causes it to not have a zero-body part.
We discuss the choice of generator in Section~\ref{sec:imsrg_generator}.
Concretely, our initial normal-ordered Hamiltonian is
\begin{equation}
\begin{split}
H = &\,E
+ \sum_{pq} f_{pq} \noref{\crea{p} \annih{q}} \\
&+ \frac{1}{{(2!)}^2} \sum_{pqrs} \Gamma_{pqrs} \noref{\crea{p} \crea{q} \annih{s} \annih{r}}\\
&+ \frac{1}{{(3!)}^2} \sum_{pqrstu} W_{pqrstu} \noref{\crea{p} \crea{q} \crea{r} \annih{u} \annih{t} \annih{s}}\,,
\end{split}
\end{equation}
and our generator is
\begin{equation}
\begin{split}
\eta(s) = &
\sum_{pq} \eta^{(1)}_{pq}(s) \noref{\crea{p} \annih{q}} \\
&+ \frac{1}{{(2!)}^2} \sum_{pqrs} \eta^{(2)}_{pqrs}(s) \noref{\crea{p} \crea{q} \annih{s} \annih{r}}\\
&+ \frac{1}{{(3!)}^2} \sum_{pqrstu} \eta^{(3)}_{pqrstu}(s) \noref{\crea{p} \crea{q} \crea{r} \annih{u} \annih{t} \annih{s}}\\
&+ \ldots \,.
\end{split}
\end{equation}
The evaluation of the right-hand side of Eq.~\eqref{eq:srg_flow_equation_imsrg} then reduces to
the evaluation of commutators of normal-ordered products,
\begin{equation}
\left[\noref{\crea{p_1} \ldots \crea{p_M} \annih{q_M} \ldots \annih{q_1}},
\noref{\crea{r_1} \ldots \crea{r_N} \annih{s_N} \ldots \annih{s_1}}\right]\,,
\end{equation}
which can be simplified into a sum of normal-ordered operators
using the generalized Wick's theorem [see Eq.~\eqref{eq:gen_wicks_theorem}].
The commutator of a $K$-body operator $A^{(K)}$ and an $L$-body operator $B^{(L)}$
will in general have contributions of $|K-L|$-body operators through $K+L-1$-body operators,
\begin{equation}
[A^{(K)}, B^{(L)}] = \sum_{M=|K-L|}^{K+L-1}C^{(M)}\,.
\end{equation}
The right-hand side can then be broken up into zero- through $A$-body parts.
We then identify the zero-body part of the right-hand side with $dE/ds$,
the one-body part with $df_{pq}/ds$, and so on.
This makes it obvious that
even if the initial Hamiltonian and generator contain only up to three-body operators
the IMSRG evolution induces higher-body operators,
all the way up to $A$-body operators after a couple integration steps,
resulting in coupled flow equations for the zero- through $A$-body parts
of the Hamiltonian.
Note that one must also ensure the antisymmetry of the right-hand sides of the two-,
three-, and higher-body flow equations
so the matrix elements remain antisymmetric.
The discussion here has been focused on the Hamiltonian,
but the IMSRG can also be used to evolve other operators,
\begin{equation}
\frac{dO}{ds} = \comm{\eta(s)}{O(s)}\,,
\end{equation}
where $O$ has also been normal ordered with respect to our reference state $\refgnd$.
Since the generator $\eta(s)$ should be the same for the evolution of $H$ and $O$
and the reconstruction of the unitary transformation from the evolved form of $H$
is not possible,
$H$ and $O$ must naively be evolved simultaneously.
In Section~\ref{sec:imsrg_magnus}, we discuss an alternative approach to solving the IMSRG flow equations
that allows for the construction of the unitary transformation,
resulting in easy evolution of other operators along with the Hamiltonian.
\section{Truncation schemes}
As is the case with the free-space SRG,
it is not feasible to do the full $A$-body evolution,
and the flow equations must be truncated at some $B$-body level.
However, the situation is not quite the same as with the free-space SRG.\@
Because the initial normal ordering shifts information about higher-body operators
into lower-body normal-ordered operators
and the continuous normal ordering absorbs information about induced higher-body operators
into lower-body normal-ordered operators,
the truncated IMSRG flow equations still approximately evolve higher-body operators
(in the free-space sense)
using only the reduced $B$-body flow equations.
In the following sections,
we discuss truncating the IMSRG at the two-body and three-body level,
yielding the so-called IMSRG(2) and IMSRG(3) truncations respectively.
\subsection{IMSRG(2)}
Truncating the flow equation at the two-body level amounts to assuming
\begin{align}
H(s) & \approx E(s) + f(s) + \Gamma(s)\,, \\
\eta(s) & \approx \eta^{(1)}(s) + \eta^{(2)}(s)\,.
\end{align}
In this approximation,
we are not including the three-body part of the initial Hamiltonian exactly.
However, the three-body force \textit{does} contribute
as it was used in obtaining the normal-ordered zero-, one-, and two-body parts
of the Hamiltonian.
The only part that is being discarded is the residual three-body part, $W$.
This is known as the normal-ordered two-body (NO2B) approximation,
which has been quite successful in nuclear many-body applications.
Using the generalized Wick's theorem,
which yields the fundamental commutators in Appendix~\ref{app:mscheme_fundamental_commutators}
or also in Appendix A of Ref.~\cite{Herg15imsrgphysrep},
one arrives at the flow equations for the Hamiltonian
\begin{align}
\phantom{\frac{d\Gamma_{ijkl}}{ds}}
& \begin{aligned}
\mathllap{\frac{dE}{ds}} & = \sum_{ab} n_{a} \bar{n}_{b}
(\genone_{ab} f_{ba} - f_{ab} \genone_{ba}) \\
& \quad + \frac{1}{4} \sum_{abcd} n_{a}n_{b}\bar{n}_c \bar{n}_d
(\gentwo_{abcd} \Gamma_{cdab} - \Gamma_{abcd}\gentwo_{cdab})\,,
\end{aligned}\label{eq:imsrg2_0body} \\
& \begin{aligned}
\mathllap{\frac{df_{ij}}{ds}} & =
\sum_{a}(\genone_{ia} f_{aj} - f_{ia} \genone_{aj}) \\
& \quad+\sum_{ab}(n_a - n_b)(\genone_{ab}\Gamma_{biaj} - f_{ab}\gentwo_{biaj}) \\
& \quad+\frac{1}{2}\sum_{abc}(\bar{n}_a \bar{n}_b n_c + n_a n_b \bar{n}_c)
(\gentwo_{ciab} \Gamma_{abcj} - \Gamma_{ciab} \gentwo_{abcj})\,,
\end{aligned}\label{eq:imsrg2_1body} \\
& \begin{aligned}
\mathllap{\frac{d\Gamma_{ijkl}}{ds}} & =
\sum_{a}(1 - P_{ij})(\genone_{ia}\Gamma_{ajkl} - f_{ia}\gentwo_{ajkl}) \\
& \quad - \sum_{a}(1 - P_{kl})(\genone_{ak}\Gamma_{ijal} - f_{ak}\gentwo_{ijal}) \\
& \quad + \frac{1}{2}\sum_{ab}(1 - n_{a} - n_{b})(\gentwo_{ijab}\Gamma_{abkl} - \Gamma_{ijab}\gentwo_{abkl}) \\
& \quad + \sum_{ab} (n_a - n_b)(1 - P_{ij})(1 - P_{kl})\gentwo_{aibk}\Gamma_{bjal}\,,
\end{aligned}\label{eq:imsrg2_2body}
\end{align}
where $n_p$ are the occupation numbers of the reference state,
$\bar{n}_p \equiv 1 - n_p$,
the $s$-dependence has been suppressed,
and the permutation operator $P_{pq}$ exchanges the indices $p$ and $q$
in the following expression.
We have broken our usual notation for single-particle index labels
to adopt the following convention for the IMSRG flow equations:
the indices $i$,~$j$,~\ldots\ are for external indices
(indices on the flowing Hamiltonian),
and the indices $a$,~$b$,~\ldots\ are for contracted indices,
which are summed over in the flow equation.
Eqs.~\eqref{eq:imsrg2_0body}-\eqref{eq:imsrg2_2body} are solved by integrating
from $s=0$ towards $s\rightarrow\infty$ with the initial conditions
$E(0) = E$, $f(0) = f$, and $\Gamma(0)=\Gamma$.
Given appropriate decoupling (see Section~\ref{sec:imsrg_generator}),
$E(\infty)$ gives the energy of the state targeted by the reference state,
for our applications typically the ground state.
The cost of this integration is dominated by the final two terms in Eq.~\eqref{eq:imsrg2_2body},
which scale like $\mathcal{O}(N^6)$,
where $N$ is the size of the single-particle basis for the calculation.
Another nice property is that the flow equation,
due to being a commutator many-body expansion,
generates only connected diagrams and thus ensures size extensivity~\cite{Herg15imsrgphysrep}.
This is true for any $B$-body truncation.
\subsection{IMSRG(3)}\label{sec:imsrgthree}
Truncating at the three-body level allows one to exactly include initial three-body forces.
One assumes
\begin{align}
H(s) & \approx E(s) + f(s) + \Gamma(s) + W(s)\,, \\
\eta(s) & \approx \genone(s) + \gentwo(s) + \genthree(s)\,,
\end{align}
yielding additional terms with commutators of $\genthree$ and $W$
with zero- through two-body operators
and a commutator between $\genthree$ and $W$.
The flow equations are
\begin{align}
\phantom{\frac{dW_{ijklmn}}{ds}}
& \begin{aligned}
\mathllap{\frac{dE}{ds}} & = \sum_{ab} n_{a} \bar{n}_{b}
(\genone_{ab} f_{ba} - f_{ab} \genone_{ba}) \\
& \quad + \frac{1}{4} \sum_{abcd} n_{a}n_{b}\bar{n}_c \bar{n}_d
(\gentwo_{abcd} \Gamma_{cdab} - \Gamma_{abcd}\gentwo_{cdab}) \\
& \quad + \frac{1}{36}\sum_{abcdef} n_a n_b n_c \bar{n}_d \bar{n}_e \bar{n}_f
(\genthree_{abcdef} W_{defabc} - W_{abcdef} \genthree_{defabc})\,,
\end{aligned}\label{eq:imsrg3_0body} \\
& \begin{aligned}
\mathllap{\frac{df_{ij}}{ds}} & =
\sum_{a}(\genone_{ia} f_{aj} - f_{ia} \genone_{aj}) \\
& \quad+\sum_{ab}(n_a - n_b)(\genone_{ab}\Gamma_{biaj} - f_{ab}\gentwo_{biaj}) \\
& \quad+\frac{1}{2}\sum_{abc}(\bar{n}_a \bar{n}_b n_c + n_a n_b \bar{n}_c)
(\gentwo_{ciab} \Gamma_{abcj} - \Gamma_{ciab} \gentwo_{abcj}) \\
& \quad - \frac{1}{4} \sum_{abcd}(n_a n_b \bar{n}_c \bar{n}_d -\bar{n}_a \bar{n}_b n_c n_d)
(\gentwo_{cdab} W_{abijcd} - \Gamma_{cdab} \genthree_{abijcd}) \\
& \quad + \frac{1}{12}\sum_{abcde}
(n_a n_b \bar{n}_c \bar{n}_d \bar{n}_e + \bar{n}_a \bar{n}_b n_c n_d n_e)
(\genthree_{abicde} W_{cdeabj} - W_{abicde} \genthree_{cdeabj})\,,
\end{aligned}\label{eq:imsrg3_1body} \\
& \begin{aligned}
\mathllap{\frac{d\Gamma_{ijkl}}{ds}} & =
\sum_{a}(1 - P_{ij})(\genone_{ia}\Gamma_{ajkl} - f_{ia}\gentwo_{ajkl}) \\
& \quad - \sum_{a}(1 - P_{kl})(\genone_{ak}\Gamma_{ijal} - f_{ak}\gentwo_{ijal}) \\
& \quad + \frac{1}{2}\sum_{ab}(1 - n_{a} - n_{b})(\gentwo_{ijab}\Gamma_{abkl} - \Gamma_{ijab}\gentwo_{abkl}) \\
& \quad + \sum_{ab} (n_a - n_b)(1 - P_{ij})(1 - P_{kl})\gentwo_{aibk}\Gamma_{bjal} \\
& \quad + \sum_{ab} (n_a - n_b)(\genone_{ab} W_{bijakl} - f_{ab} \genthree_{bijakl}) \\
& \quad - \frac{1}{2} \sum_{abc} (n_a \bar{n}_b \bar{n}_c + \bar{n}_a n_b n_c)
(1 - P_{kl})(\gentwo_{bcak} W_{aijbcl} - \Gamma_{bcak} \genthree_{aijbcl}) \\
& \quad + \frac{1}{2} \sum_{abc} (n_a \bar{n}_b \bar{n}_c + \bar{n}_a n_b n_c)
(1 - P_{ij})(\gentwo_{bcai} W_{aklbcj} - \Gamma_{bcai} \genthree_{aklbcj}) \\
& \quad + \frac{1}{6} \sum_{abcd}(n_a \bar{n}_b \bar{n}_c \bar{n}_d -\bar{n}_a n_b n_c n_d)
(\genthree_{aijbcd} W_{bcdakl} - W_{aijbcd} \genthree_{bcdakl}) \\
& \quad + \frac{1}{4} \sum_{abcd}(\bar{n}_a \bar{n}_b n_c n_d -n_a n_b \bar{n}_c \bar{n}_d)
(1 - P_{ij})(1 - P_{kl}) \genthree_{abicdl} W_{cdjabk}\,,
\end{aligned}\label{eq:imsrg3_2body} \\
& \begin{aligned}
\mathllap{\frac{dW_{ijklmn}}{ds}} & =
\sum_{a} P(i/jk) (\genone_{ia} W_{ajklmn} - f_{ia} \genthree_{ajklmn}) \\
& \quad - \sum_{a} P(l/mn) (\genone_{al} W_{ijkamn} - f_{al} \genthree_{ijkamn}) \\
& \quad + \sum_{a} P(ij/k) P(l/mn)
(\gentwo_{ijla} \Gamma_{akmn} - \Gamma_{ijla} \gentwo_{akmn}) \\
& \quad + \frac{1}{2} \sum_{ab} (1 - n_a - n_b) P(ij/k)
(\gentwo_{ijab} W_{abklmn} - \Gamma_{ijab} \genthree_{abklmn}) \\
& \quad - \frac{1}{2} \sum_{ab} (1 - n_a - n_b) P(l/mn)
(\gentwo_{abmn} W_{ijklab} - \Gamma_{abmn} \genthree_{ijklab}) \\
& \quad + \frac{1}{6} \sum_{abc} (n_a n_b n_c + \bar{n}_a \bar{n}_b \bar{n}_c)
(\genthree_{ijkabc} W_{abclmn} - W_{ijkabc} \genthree_{abclmn}) \\
& \quad + \frac{1}{2} \sum_{abc} (n_a n_b \bar{n}_c + \bar{n}_a \bar{n}_b n_c)
P(ij/k)P(l/mn) \\
& \qquad \qquad \times (\genthree_{abkcmn} W_{cijabl} -\genthree_{cjkabn} W_{iablmc})\,,
\end{aligned}\label{eq:imsrg3_3body}
\end{align}
Here, $P(pq/r) = 1 - P_{pr} - P_{qr}$ and $P(p/qr) = 1 - P_{pq} - P_{pr}$
ensure the antisymmetry of the three-body indices.
It is clear that the final two terms in Eq.~\eqref{eq:imsrg3_3body}
dominate the cost of the integration,
scaling like $\mathcal{O}(N^9)$ in the size of our single-particle basis.
In the absence of an initial three-body operator,
the third term in Eq.~\eqref{eq:imsrg3_3body},
the three-body part of the commutator of two two-body operators,
induces a three-body operator,
which leads to the contribution of all other terms later in the integration.
We would also like to note that
in the derivation of the flow equations here
the only assumption that was made was that
two- and three-body matrix elements are appropriately antisymmetric.
In particular, unlike several other references in the literature,
for instance Refs.~\cite{Herg15imsrgphysrep,Tsuk10imsrg},
the flow equations here do not assume $f$, $\Gamma$, and $W$ are Hermitian
(which of course they are).
While this assumption allows for the simplification of certain terms
(nothing that changes the scaling of the terms however),
it limits the expressions to only commutators of anti-Hermitian and Hermitian operators.
In Section~\ref{sec:imsrg_magnus},
we require the commutator of two anti-Hermitian operators,
for which the right-hand sides of
Eqs.~\eqref{eq:imsrg2_0body}-\eqref{eq:imsrg2_2body}
and Eqs.~\eqref{eq:imsrg3_0body}-\eqref{eq:imsrg3_3body}
are equally valid.
For numerical implementations,
it is a good idea to implement each of the fundamental commutators separately,
to allow for validation and fine-grained optimization in the most expensive commutators.
\section{Generator selection}\label{sec:imsrg_generator}
\begin{figure}[t]
\setlength{\unitlength}{0.8\columnwidth}
\begin{center}
\begin{picture}(1.0000,0.5500)
\put(0.0350,0.0450){\includegraphics[width=0.46\unitlength]{thesis/doc/images/external/H_initial.eps}}
\put(0.5400,0.0450){\includegraphics[width=0.46\unitlength]{thesis/doc/images/external/H_IMSRG_3ph_decoupling.eps}}
\put(0.0100,0.0000){\parbox{0.5\unitlength}{\centering$\braket{i|H(0)|j}$}}
\put(0.5200,0.0000){\parbox{0.5\unitlength}{\centering$\braket{i|H(\infty)|j}$}}
\put(0.0500,0.5100){\parbox{0.11\unitlength}{\centering\footnotesize0p0h}}
\put(0.1600,0.5100){\parbox{0.11\unitlength}{\centering\footnotesize1p1h}}
\put(0.2630,0.5100){\parbox{0.11\unitlength}{\centering\footnotesize2p2h}}
\put(0.3650,0.5100){\parbox{0.11\unitlength}{\centering\footnotesize3p3h}}
\put(0.5500,0.5100){\parbox{0.11\unitlength}{\centering\footnotesize0p0h}}
\put(0.6600,0.5100){\parbox{0.11\unitlength}{\centering\footnotesize1p1h}}
\put(0.7630,0.5100){\parbox{0.11\unitlength}{\centering\footnotesize2p2h}}
\put(0.8650,0.5100){\parbox{0.11\unitlength}{\centering\footnotesize3p3h}}
%
\put(0.0100,0.4320){\parbox{0.11\unitlength}{\rotatebox{90}{\centering\footnotesize0p0h}}}
\put(0.0100,0.3235){\parbox{0.11\unitlength}{\rotatebox{90}{\centering\footnotesize1p1h}}}
\put(0.0100,0.2175){\parbox{0.11\unitlength}{\rotatebox{90}{\centering\footnotesize2p2h}}}
\put(0.0100,0.1100){\parbox{0.11\unitlength}{\rotatebox{90}{\centering\footnotesize3p3h}}}
\put(0.5100,0.4320){\parbox{0.11\unitlength}{\rotatebox{90}{\centering\footnotesize0p0h}}}
\put(0.5100,0.3235){\parbox{0.11\unitlength}{\rotatebox{90}{\centering\footnotesize1p1h}}}
\put(0.5100,0.2175){\parbox{0.11\unitlength}{\rotatebox{90}{\centering\footnotesize2p2h}}}
\put(0.5100,0.1100){\parbox{0.11\unitlength}{\rotatebox{90}{\centering\footnotesize3p3h}}}
\end{picture}
\end{center}
\caption[
Schematic diagram
showing the minimal decoupling scheme
taken in the IMSRG.\@
]{\label{fig:imsrg_decoupling}
Schematic diagram
showing the minimal decoupling scheme
taken in the IMSRG.\@
Figure taken from Ref.~\cite{Herg15imsrgphysrep}.
}
\end{figure}
So far, we have not discussed specific choices of our generator $\eta$.
The possible definitions of $\eta$ hinge on the specification
of our so-called ``off-diagonal'' Hamiltonian $H_{od}$,
the part of the Hamiltonian we wish to suppress
to ensure appropriate decoupling in the remaining ``diagonal'' part.
In Section~\ref{sec:srg},
we saw that the generator choice
$\eta(s) = \comm{T_{\text{rel}}}{H(s)}$
leads to the decoupling of any two states
with a decay scale set by the difference in their kinetic energy expectation values.
The result is that the Hamiltonian evolves towards a diagonal form,
providing vastly improved convergence
and allowing many-body calculations to use significantly smaller model spaces.
To identify the desired decoupling for the IMSRG,
we begin by considering the Hamiltonian in the basis spanned by
our reference state $\refgnd$ and
$n$-particle $n$-hole ($npnh$) excitations of the reference state,
\begin{equation}
\{\refgnd, \refhp{i}{a}, \refhp{ij}{ab}, \refhp{ijk}{abc}, \ldots \}\,.
\end{equation}
For a Hamiltonian with only one- and two-body operators,
as is the approximation after normal ordering for the IMSRG(2) truncation,
the Hamiltonian in this basis is schematically represented
in the left panel of Fig.~\ref{fig:imsrg_decoupling}.
It is band-diagonal and only able to couple an $npnh$ excitation
to $(n\pm2)p(n\pm2)h$ excitations.
For a Hamiltonian with a three-body part,
the band diagonal grows to include $(n\pm3)p(n\pm3)h$ excitations.
For the IMSRG,
a decoupling towards a true diagonal form is no longer a good idea,
as one must avoid inducing significant three-body terms in the IMSRG(2)
or four- and higher-body terms in the IMSRG(3)
to maintain the validity of the truncation.
The alternative is a minimal decoupling scheme,
where the sole objective is to decouple the reference state $\refgnd$
from all $npnh$ excitations,
as shown in the right panel of Fig.~\ref{fig:imsrg_decoupling}.
Achieving this decoupling gives us the energy of the state $E$
in the zero-body part of the normal-ordered Hamiltonian
and the corresponding eigenstate by applying the unitary transformation
to the reference state, $U^{\dagger}(\infty)\refgnd$.
For some finite truncation of the flow equations,
this result is of course only approximate.
Now that we know we want to suppress the matrix elements
that couple $\refgnd$ to its excitations,
we want to identify which parts of our normal-ordered Hamiltonian
these matrix elements correspond to.
For the couplings between $\refgnd$ and $1p1h$ excitations, we find
\begin{align}
\phantom{\braket{\Phi | H |\Phi_{i}^{a}}}
& \begin{aligned}
\mathllap{\braket{\Phi | H |\Phi_{i}^{a}}} & = \braket{\Phi | H \noref{\crea{a} \annih{i}}| \Phi}
\end{aligned} \\
& \begin{aligned}
\mathllap{} & = E \braket{\Phi | \noref{\crea{a} \annih{i}} | \Phi} \\
& \quad + \sum_{pq} f_{pq} \braket{\Phi |\noref{\crea{p}\annih{q}} \noref{\crea{a} \annih{i}} | \Phi} \\
& \quad + \sum_{pqrs} \Gamma_{pqrs}
\braket{\Phi |\noref{\crea{p} \crea{q} \annih{s} \annih{r}}\noref{\crea{a} \annih{i}} | \Phi}
\end{aligned} \\
& \begin{aligned}
\mathllap{} & = \sum_{pq} f_{pq} \delta_{pi} \delta_{qa} n_{i} \bar{n}_{a}
\end{aligned} \\
& \begin{aligned}
\mathllap{} & = f_{ia}\,.
\end{aligned}
\end{align}
Via similar calculations, one finds
\begin{samepage}
\begin{subequations}
\begin{align}
\begin{split}
\braket{\Phi_{i}^{a} | H | \Phi} &= f_{ai}\,,
\end{split} \\
\begin{split}
\braket{\Phi | H | \Phi_{ij}^{ab}} &= \Gamma_{ijab}\,,
\end{split} \\
\begin{split}
\braket{\Phi_{ij}^{ab} | H | \Phi} &= \Gamma_{abij}\,,
\end{split} \\
\begin{split}
\braket{\Phi | H | \Phi_{ijk}^{abc}} &= W_{ijkabc}\,,
\end{split} \\
\begin{split}
\braket{\Phi_{ijk}^{abc} | H | \Phi} &= W_{abcijk}\,.
\end{split}
\end{align}
\end{subequations}
\end{samepage}
We define our ``off-diagonal'' normal-ordered Hamiltonian then to be
\begin{equation}
\begin{split}
H_{od} \equiv & \sum_{ia}\left(f_{ia} \noref{\crea{i}\annih{a}}
+ f_{ai} \noref{\crea{a}\annih{i}}\right) \\
& + \frac{1}{{(2!)}^2}\sum_{ijab} \left(
\Gamma_{ijab} \noref{\crea{i}\crea{j}\annih{b}\annih{a}}
+ \Gamma_{abij} \noref{\crea{a}\crea{b}\annih{j}\annih{i}}
\right) \\
& + \frac{1}{{(3!)}^2}\sum_{ijkabc} \left(
W_{ijkabc} \noref{\crea{i}\crea{j}\crea{k}\annih{c}\annih{b}\annih{a}}
+ W_{abcijk} \noref{\crea{a}\crea{b}\crea{c}\annih{k}\annih{j}\annih{i}}
\right)\,.
\end{split}
\end{equation}
We are now in a position where we can define generators that suppress these matrix elements
over the course of the flow.
Wegner's original ansatz for the generator of the SRG flow equation is
\begin{equation}
\eta(s) = \comm{H_d(s)}{H_{od}(s)}\,,
\end{equation}
where $H_d = H - H_{od}$~\cite{Wegn94srg}.
When using $H_{od}$ as defined above,
one can evaluate the commutator truncating at the two- or three-body level
depending on the truncation scheme,
giving the one-, two-, and three-body components of $\eta$
in the same form as Eqs.~\eqref{eq:imsrg3_0body}-\eqref{eq:imsrg3_3body}.
A perturbative analysis of the flow equations with this choice of generator,
as is done in Ref.~\cite{Herg16imsrglecnotes},
reveals that the two-body ``off-diagonal'' matrix elements are suppressed like
\begin{equation}
\Gamma_{abij}(s) \approx \Gamma_{abij}(0) \exp(-{(\Delta_{abij})}^{2} s)\,,
\end{equation}
where $\Delta_{abij}$ are the energy denominators.
There are multiple options for these energy denominators,
corresponding to different partitionings in MBPT (see Section~\ref{sec:mbpt}).
We choose to focus on the M{\o}ller-Plesset denominators,
with $\Delta_{abij} = \epsilon_{abij}$ from Eq.~\eqref{eq:mp_energy_denom}.
Another alternative is the Epstein-Nesbet denominators
(see Ref.~\cite{Shav09mbpt_cc_book} for details).
We have included the Wegner generator in this discussion for completeness
but will not use it in any applications.
One reason for this is that the Wegner generator is simply more expensive to construct
than the alternatives,
which give explicit expressions for the matrix elements of $\eta$,
resulting in scaling of $\mathcal{O}(N^4)$ and $\mathcal{O}(N^6)$ for IMSRG(2) and IMSRG(3),
respectively.
This is to be contrasted with the evaluation of a full commutator,
which scales like $\mathcal{O}(N^6)$ and $\mathcal{O}(N^9)$ for IMSRG(2) and IMSRG(3).
Additionally, the Wegner generator causes the system of differential equations
to be much more stiff,
making the integration much more expensive
(in terms of storage and computational requirements)
than for other generators.
The following generators directly construct the matrix elements of $\eta$, working with a basic form of
\begin{equation}
\begin{split}
\eta \equiv & \sum_{ia}\left(\genone_{ia} \noref{\crea{i}\annih{a}}
+ \genone_{ai} \noref{\crea{a}\annih{i}}\right) \\
& + \frac{1}{{(2!)}^2} \sum_{ijab} \left(
\gentwo_{ijab} \noref{\crea{i}\crea{j}\annih{b}\annih{a}}
+ \gentwo_{abij} \noref{\crea{a}\crea{b}\annih{j}\annih{i}}
\right) \\
& + \frac{1}{{(3!)}^2} \sum_{ijkabc} \left(
\genthree_{ijkabc} \noref{\crea{i}\crea{j}\crea{k}\annih{c}\annih{b}\annih{a}}
+ \genthree_{abcijk} \noref{\crea{a}\crea{b}\crea{c}\annih{k}\annih{j}\annih{i}}
\right)\,,
\end{split}
\end{equation}
where we note that for $\eta$ to be anti-Hermitian, the matrix elements must fulfill
\begin{subequations}
\begin{align}
\begin{split}
\genone_{ia} &= - \genone_{ai}\,,
\end{split} \\
\begin{split}
\gentwo_{ijab} &= - \gentwo_{abij}\,,
\end{split} \\
\begin{split}
\genthree_{ijkabc} &= - \genthree_{abcijk}\,.
\end{split}
\end{align}
\end{subequations}
The White generator corresponds to the choice
\begin{align}
\genone_{ia}(s) & = \frac{f_{ia}(s)}{\Delta_{ia}(s)}\,, \\
\gentwo_{ijab}(s) & = \frac{\Gamma_{ijab}(s)}{\Delta_{ijab}(s)}\,, \\
\genthree_{ijkabc}(s) & = \frac{W_{ijkabc}(s)}{\Delta_{ijkabc}(s)}\,,
\end{align}
where the antisymmetry of the denominators automatically gives the desired anti-Hermiticity~\cite{Whit02generator}.
The White generator suppresses off-diagonal matrix elements like
\begin{equation}
\Gamma_{abij}(s) \approx \Gamma_{abij}(0) \exp(-s)\,,
\end{equation}
that is, it suppresses all off-diagonal matrix elements with the same decay scale,
regardless of the energy differences between the states.
This is unusual and not strictly speaking in line with the renormalization group approach,
where large energy-difference modes are integrated out first.
However, we are interested in $H(\infty)$ and $E(\infty)$,
and in this limit all generators that suppress $H_{od}$ produce identical results
for $E(\infty)$ and $U^{\dagger}(\infty)\refgnd$,
up to truncation effects.
A potential difficulty with the White generator arises
when one of the energy denominators becomes very small,
leading to large matrix elements of $\eta$
and thus extremely large derivatives in the right-hand side of the flow equation.
This can be mitigated by a variation of the standard White generator,
the arctan generator,
with generator matrix elements defined as
\begin{align}
\genone_{ia}(s) & = \frac{1}{2}\text{arctan}\left(\frac{2f_{ia}(s)}{\Delta_{ia}(s)}\right)\,, \\
\gentwo_{ijab}(s) & = \frac{1}{2}\text{arctan}\left(\frac{2\Gamma_{ijab}(s)}{\Delta_{ijab}(s)}\right)\,, \\
\genthree_{ijkabc}(s) & = \frac{1}{2}\text{arctan}\left(\frac{2W_{ijkabc}(s)}{\Delta_{ijkabc}(s)}\right)\,,
\end{align}
where the arctan function regularizes any possible large matrix elements that arise due to small energy denominators.
The final generator we discuss here is the imaginary-time generator,
which was ostensibly inspired by imaginary-time evolution techniques
in Quantum Monte Carlo methods~\cite{Herg15imsrgphysrep}.
Its matrix elements are defined as
\begin{align}
\genone_{ia}(s) & = \text{sign}(\Delta_{ia}(s))f_{ia}(s)\,, \\
\gentwo_{ijab}(s) & = \text{sign}(\Delta_{ijab}(s))\Gamma_{ijab}(s)\,, \\
\genthree_{ijkabc}(s) & = \text{sign}(\Delta_{ijkabc}(s))W_{ijkabc}(s)\,.
\end{align}
A perturbative analysis of the flow equations with this generator choice shows
that off-diagonal matrix elements are suppressed like
\begin{equation}
\Gamma_{abij}(s) \approx \Gamma_{abij}(0) \exp(-|\Delta_{abij}|s)\,.
\end{equation}
The sign function in the definition of the generator ensures that
there is an absolute value around the energy denominator in the exponential,
giving a suppression for all matrix elements instead of an enhancement for some.
We also note here that the imaginary-time generator produces a ``proper'' RG flow,
where matrix elements coupling large energy differences are suppressed
before those coupling smaller energy differences.
\section{The Magnus expansion}\label{sec:imsrg_magnus}
Working with Eqs.~\eqref{eq:imsrg2_0body}-\eqref{eq:imsrg2_2body}
and~\eqref{eq:imsrg3_0body}-\eqref{eq:imsrg3_3body},
one can solve the IMSRG by numerically integrating
the system of ordinary differential equations (ODEs)
to obtain the energy and the expectation values of other observables in the targeted state.
This approach has two main challenges:
First, the flow equations need to be solved to high precision,
as otherwise numerical effects destroy the unitarity of the transformation
even in the absence of any truncation.
This necessitates the application of sophisticated ODE solvers
to minimize this numerical error.
These solvers require the allocation of several times the memory requirement
of two- and three-body operators,
which is already quite a lot (on the order of GB or tens of GB),
and the evaluation of each integration step is substantially more expensive
(providing the benefit of reduced accumulated numerical error)
than an Euler method.
This is incidentally also a reason the White, arctan, and imaginary-time generators
are preferred over the Wegner generator,
as the stiffness of the flow equations with the Wegner generator
requires the use of stiff ODE solvers,
which are even more expensive in terms of storage and computational cost
than their non-stiff counterparts.
Second, the evolution of other operators along with the Hamiltonian
requires them to be evolved in parallel in this approach.
This means that for every additional operator the memory and computational cost
increases by the amount that it would cost to just solve the Hamiltonian.
Furthermore, additional operators may increase the stiffness of the system of ODEs
as the integration scales for their matrix elements may differ from the Hamiltonian.
This challenge can be alleviated by the ability to construct the unitary transformation
for the evolution, $U(s)$.
This is the goal of the Magnus expansion approach to solving the IMSRG flow equations~\cite{Morr15magnus}.
Given our definition of $\eta(s)$ following Eq.~\eqref{eq:srg_flow_eq},
we get a differential equation for $U(s)$,
\begin{equation}
\frac{dU(s)}{ds} = - \eta(s) U(s)\,,
\end{equation}
where $U(0) = 1$.
The formal integral of this differential equation is
\begin{equation}
U(s) = \mathcal{T}_s\left[\exp(-\int_{0}^{s} ds' \eta(s'))\right]\,,
\end{equation}
where $\mathcal{T}_s$ is the time-ordering operator with respect to $s$~\cite{Dyso49timeorder}.
The Magnus expansion postulates that a solution of the form
\begin{equation}
U(s) = \exp(\Omega(s))
\end{equation}
exists, where $\Omega(s)$ is anti-Hermitian and $\Omega(0) = 0$~\cite{Magn54magnus}.
To obtain $\Omega(s)$, one solves the differential equation of its expansion in $\eta(s)$,
\begin{equation}\label{eq:magnus_expansion}
\frac{d \Omega(s)}{ds} = \sum_{k=0}^{\infty} \frac{B_{k}}{k!} ad^{k}_{\Omega(s)}(\eta(s))\,,
\end{equation}
where $B_{k}$ are the Bernoulli numbers
and $ad^{k}_{\Omega}$ are the recursively defined commutators,
\begin{align}
ad^{0}_{\Omega(s)}(\eta(s)) & = \eta(s)\,, \\
ad^{k}_{\Omega(s)}(\eta(s)) & = \comm{\Omega(s)}{ad^{k-1}_{\Omega(s)}(\eta(s))}\,.
\end{align}
The interested reader may refer to Ref.~\cite{Blan09magnusreview} for detailed review
of the Magnus expansion.
Here we note that $ad^{k}_{\Omega(s)}(\eta(s))$ is anti-Hermitian for all $k$,
thus truncating Eq.~\eqref{eq:magnus_expansion} at any order
gives an exactly anti-Hermitian approximation to $d\Omega(s)/ds$,
which when integrated gives an exactly anti-Hermitian approximation to $\Omega(s)$.
Thus $U(s) = \exp(\Omega(s))$ is always exactly unitary,
regardless of accumulated numerical error in the solution for $\Omega(s)$.
This alleviates the requirement of using high-order ODE solvers to avoid numerical error,
and the solution of the IMSRG flow equations in this approach
can proceed using a cheap numerical integrator,
for example a simple Euler method.
To apply the obtained unitary transformation to our Hamiltonian or some other operator,
we use the Baker-Campbell-Hausdorff (BCH) formula,
\begin{equation}\label{eq:bch_formula}
H(s) = e^{\Omega(s)} H(0) e^{-\Omega(s)} = \sum_{k=0}^{\infty} \frac{1}{k!} ad^{k}_{\Omega(s)}(H(0))\,.
\end{equation}
To be concrete, the evaluation of the IMSRG flow equations in the Magnus formalism
proceeds as follows:
\begin{enumerate}
\item the generator $\eta(s)$ is constructed from $H(s)$,
\item the derivative $d\Omega(s)/ds$ is obtained via Eq.~\eqref{eq:magnus_expansion}
and applied via a simple Euler method,
\item the new evolved Hamiltonian $H(s+ds)$ is obtained via Eq.~\eqref{eq:bch_formula},
\end{enumerate}
repeating these steps until $E$ is sufficiently converged.
For practical calculations, a few truncations must be made.
First, $H(s)$, $\eta(s)$, $\Omega(s)$, and all commutators must be truncated
at the $B$-body level,
leading to the Magnus(2) and Magnus(3) analogs to the IMSRG(2) and IMSRG(3) truncations.
Additionally, the Magnus and BCH expansions
[Eqs.~\eqref{eq:magnus_expansion} and~\eqref{eq:bch_formula}]
must be truncated at some finite $k$.
For the Magnus expansion, we truncate the series when the norm of the $k$-th term
drops below a threshold $\epsilon_{\text{deriv}}$,
\begin{equation}
\left\vert \frac{B_k || ad^{k}_{\Omega(s)}(\eta(s))||}{k!||\Omega(s)||} \right\vert < \epsilon_{\text{deriv}}\,.
\end{equation}
A similar condition can also be used for the truncation of the BCH expansion,
with the threshold $\epsilon_{\text{BCH}}$,
\begin{equation}
\left\vert \frac{|| ad^{k}_{\Omega(s)}(H(0))||}{k!||\Omega(s)||} \right\vert < \epsilon_{\text{BCH}}\,.
\end{equation}
An alternative, for when one is only interested in the zero-body part of the evolving Hamiltonian,
is
\begin{equation}
\left\vert \frac{\zerobodyop{ad^{k}_{\Omega(s)}(H(0))}}{k!} \right\vert < \epsilon_{\text{BCH}}\,.
\end{equation}
The Magnus expansion makes very clear the similarities and differences between
the IMSRG and coupled cluster.
Both use a nested commutator expansion,
ensuring the connected nature of the expansion
and guaranteeing size extensivity.
The IMSRG and CC seek to generate a similarity transformation
that decouples the reference state expectation value from the rest of the Hamiltonian.
However, in coupled cluster the cluster operator $T$ is non-Hermitian,
meaning that the BCH expansion for the similarity transformation truncates at a finite order.
The IMSRG generates a unitary transformation,
which means $\Omega$, the Magnus analog of the cluster operator,
is anti-Hermitian.
This leads to an infinite BCH expansion that must be truncated at some order.
In this thesis, we present various results
for many-body calculations obtained using the IMSRG.\@
Nearly all of these are done using the Magnus formalism,
as its computational benefits are invaluable when doing calculations
with three-body operators.
\section{Application to \texorpdfstring{${}^4\text{He}$}{helium 4}}\label{sec:imsrg2_he4_mscheme}
Here we consider ${}^4\text{He}$,
the lightest closed-shell nucleus.
Before we begin a discussion about the details of the system,
a few comments are in order.
The following calculation is restricted to a very small model space,
one insufficient to achieve converged results for observables,
and is also only for the IMSRG(2) truncation.
In Chapter~\ref{ch:ang_mom_coupling},
we discuss the formalism that exploits the spherical symmetry
of closed-shell systems like ${}^{4}\text{He}$
to cast the IMSRG flow equations into a more computationally tractable form,
which allows us to reach larger model spaces for the IMSRG(2)
and compute results for small model spaces for the IMSRG(3).
Thus, this implementation is a benchmark implementation for the IMSRG(2),
which we have compared against an existing publicly available IMSRG(2) implementation~\cite{Stro15imsrgcpp}.
This serves as a validation of our implementation
in addition to some IMSRG(2) and IMSRG(3) results for the pairing Hamiltonian
discussed in Appendix~\ref{app:pairing_hamiltonian_imsrg3}.
As input into our calculation, we start with the intrinsic $A$-body Hamiltonian
with only an initial two-body interaction,
\begin{equation}
H_{\text{int}} = T_{\text{int}} + \twobodyop{V}\,,
\end{equation}
with the intrinsic kinetic energy
\begin{align}
T_{\text{int}} & = T - T_{\text{cm}} \\
& = \left(1 - \frac{1}{A}\right) \sum_{i} \frac{p_{i}^{2}}{2m}
- \frac{1}{A}\sum_{i<j}\frac{p_{i} \cdot p_{j}}{m}\,.
\end{align}
We note that the first term gives us our one-body Hamiltonian
and the second term contributes to the two-body Hamiltonian along with $\twobodyop{V}$~\cite{Herg09intrinsicham}.
We work in the single-particle harmonic-oscillator basis
at several different values of $\hbar \Omega$
(see Section~\ref{sec:sp_ho}),
with the single-particle states
\begin{equation}
\ket{n_a (l_a s_a) j_a m_{j_a} t_a m_{t_a}} \equiv \ket{\alpha_a}\,,
\end{equation}
where $s_a=1/2$ and $t_a=1/2$.
A natural ordering of these states is according to their principal quantum number,
$e = 2 n + l$.
For the following calculation, we truncate the single-particle basis at $\emax=2$.
The resulting size of our single-particle basis is $N=40$.
As our reference state for ${}^4\text{He}$ we choose to fill the four $e=0$ HO states,
the most reasonable choice to target the ground state
without solving for and transforming to the Hartree-Fock basis.
\begin{figure}[t]
\centering
\includegraphics[width=0.45\textwidth]{thesis/doc/images/he4_imsrg2_energies.pdf}
\includegraphics[width=0.45\textwidth]{thesis/doc/images/he4_imsrg2_flow.pdf}
\caption[
The left panel shows $E(s=0)$ and $E(s \rightarrow \infty)$
for an IMSRG(2) calculation of ${}^4\text{He}$
for $\hbar \Omega$ ranging from $16\mev$ to $32\mev$.
The interaction used is the EM NN interaction
with a regulator cutoff $\Lambda=500\mev$
and SRG-evolved to $\lambda=1.8\invfm$.
The right panel shows the flowing energy $E(s)$
along with the energy with second- and third-order MBPT corrections included
for $\hbar \Omega=32\mev$.
The right panel is similar for the different $\hbar \Omega$ of the left panel.
]{
The left panel shows $E(s=0)$ and $E(s \rightarrow \infty)$
for an IMSRG(2) calculation of ${}^4\text{He}$
for $\hbar \Omega$ ranging from $16\mev$ to $32\mev$.
The interaction used is the EM NN interaction
with a regulator cutoff $\Lambda=500\mev$
and SRG-evolved to $\lambda=1.8\invfm$~\cite{Ente03n3lonn}.
The right panel shows the flowing energy $E(s)$
along with the energy with second- and third-order MBPT corrections included
for $\hbar \Omega=32\mev$.
The right panel is similar for the different $\hbar \Omega$ of the left panel.
}\label{fig:imsrg2_he4_results}
\end{figure}
For $\twobodyop{V}$, we use the EM NN potential from Ref.~\cite{Ente03n3lonn} at \nthreelo{}
with a regulator cutoff at $\Lambda=500\mev$ and SRG-evolved to $\lambda=1.8\invfm$.
The results from normal ordering our Hamiltonians at the different $\hbar \Omega$
with respect to our HO reference state
and evaluating the IMSRG(2) evolution are shown
in the left panel of Fig.~\ref{fig:imsrg2_he4_results}.
We find that the unevolved energy $E(s=0)$,
that is, the energy expectation value of the reference state,
is already good to within 30\% of the exact result,
a consequence of the SRG-softened interaction we are using.
Still, the IMSRG evolution absorbs up to $8\mev$ of correlation energy into the ground-state energy.
We also find that our implementation agrees with the implementation from Ref.~\cite{Stro15imsrgcpp}
to within $10^{-5}\mev$.
In the right panel of Fig.~\ref{fig:imsrg2_he4_results},
we show the flowing ground-state energy
as well as the ground-state energy with second- and third-order MBPT corrections.
We find that these corrections vanish as the correlations are absorbed into the ground-state energy,
indicating that we are achieving the desired decoupling.
We emphasize once again that these results are intended to be interpreted as validation
(for example, as a nuclear-like toy model)
and not as physically meaningful.
We consider the agreement between our implementation and that of Ref.~\cite{Stro15imsrgcpp}
to be \textit{a posteriori} evidence of the correctness of our implementation.
|
[STATEMENT]
lemma prj_emb: "prj\<cdot>(emb\<cdot>x) = coerce\<cdot>x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. PRJ('a)\<cdot>(EMB('b)\<cdot>x) = COERCE('b, 'a)\<cdot>x
[PROOF STEP]
by (simp add: coerce_def)
|
struct LineInfoNode
mod::Module
method::Symbol
file::Symbol
line::Int
inlined_at::Int
end
const NullLineInfo = LineInfoNode(@__MODULE__, Symbol(""), Symbol(""), 0, 0)
include("compiler/ssair/ir.jl")
include("compiler/ssair/domtree.jl")
include("compiler/ssair/slot2ssa.jl")
include("compiler/ssair/queries.jl")
include("compiler/ssair/passes.jl")
include("compiler/ssair/verify.jl")
include("compiler/ssair/legacy.jl")
macro show(s)
# return :(println($(QuoteNode(s)), " = ", $(esc(s))))
end
function normalize(@nospecialize(stmt), meta::Vector{Any}, table::Vector{LineInfoNode}, loc::RefValue{Int})
if isa(stmt, Expr)
if stmt.head == :meta
args = stmt.args
if length(args) > 0
a1 = args[1]
if a1 === :push_loc
let
current = loc[]
filename = args[2]::Symbol
methodname = NullLineInfo.method
mod = table[current].mod
line = 0
for i = 3:length(args)
ai = args[i]
if ai isa Symbol
methodname = ai
elseif ai isa Int32
line = Int(ai)
elseif ai isa Int64
line = Int(ai)
elseif ai isa Module
mod = ai
end
end
push!(table, LineInfoNode(mod, methodname, filename, line, current))
loc[] = length(table)
end
elseif a1 === :pop_loc
n = (length(args) > 1) ? args[2]::Int : 1
for i in 1:n
current = loc[]
current = table[current].inlined_at
current == 0 && break
loc[] = current
end
else
push!(meta, stmt)
end
end
return nothing
elseif stmt.head === :line
return nothing # deprecated - we shouldn't encounter this
elseif stmt.head === :gotoifnot
return GotoIfNot(stmt.args...)
elseif stmt.head === :return
return ReturnNode{Any}(stmt.args...)
end
elseif isa(stmt, LabelNode)
return nothing
elseif isa(stmt, LineNumberNode)
let # need to expand this node so that it is source-location independent
current = loc[]
info = table[current]
methodname = info.method
mod = info.mod
file = stmt.file
file isa Symbol || (file = info.file)
line = stmt.line
push!(table, LineInfoNode(mod, methodname, file, line, info.inlined_at))
loc[] = length(table)
end
return nothing
end
return stmt
end
function run_passes(ci::CodeInfo, nargs::Int, linetable::Vector{LineInfoNode})
mod = linetable[1].mod
ci.code = copy(ci.code)
meta = Any[]
lines = fill(0, length(ci.code))
let loc = RefValue(1)
for i = 1:length(ci.code)
stmt = ci.code[i]
stmt = normalize(stmt, meta, linetable, loc)
ci.code[i] = stmt
if !(stmt === nothing)
lines[i] = loc[]
end
end
end
ci.code = strip_trailing_junk!(ci.code, lines)
cfg = compute_basic_blocks(ci.code)
defuse_insts = scan_slot_def_use(nargs, ci)
domtree = construct_domtree(cfg)
ir = let code = Any[nothing for _ = 1:length(ci.code)]
argtypes = ci.slottypes[1:(nargs+1)]
IRCode(code, lines, cfg, argtypes, mod, meta)
end
ir = construct_ssa!(ci, ir, domtree, defuse_insts, nargs)
ir = compact!(ir)
verify_ir(ir)
ir = type_lift_pass!(ir)
ir = compact!(ir)
verify_ir(ir)
return ir
end
|
State Before: F : Type ?u.306222
α : Type u
β : Type v
γ : Type w
inst✝³ : TopologicalSpace α
inst✝² : PseudoMetricSpace β
inst✝¹ : PseudoMetricSpace γ
f g : α →ᵇ β
x : α
C : ℝ
inst✝ : IsEmpty α
⊢ dist f g = 0 State After: no goals Tactic: rw [(ext isEmptyElim : f = g), dist_self]
|
lemma homeomorphic_imp_homotopy_equivalent_space: "X homeomorphic_space Y \<Longrightarrow> X homotopy_equivalent_space Y"
|
```python
#Import and set magics
import numpy as np
%matplotlib inline
import pandas as pd
import datetime
import pandas_datareader
import pydst
import statsmodels.api as stat
import statsmodels.formula.api as statf
from statsmodels.sandbox.regression.predstd import wls_prediction_std
import sympy as sm
import math
import random
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
from matplotlib import cm
%matplotlib inline
from mpl_toolkits.mplot3d import Axes3D
```
# Estimating income processes
Consider $N$ households indexed by $i$ who are in the labor market for $T$ periods indexed by $t \in ${1,2,...,T}$,
Their **wage income** is stochastic and given by,
$P_{i,t}$ = $\psi_{i,t}$ $P_{i,t-1}$
$\tilde{Y}_{i,t}$ = $xi_{i,t}$ $P_{i,t}$
$Y_{i,t}$ = \left\begin{array}{rcl}$
\overline{\overline{0}+\text{if}\mu_{i,t}<\pi}
\\tilde{Y}_{i,t} +\text{else}
\end{array}\right.
$\psi_{i,t}$ ~ LogNormal(-0.5$\sigma_{\psi}^2$,$\sigma_{\psi})$
$\xi_{i,t}$ ~ LogNormal(-0.5$\sigma_{\xi}^2$,$\sigma_{\xi})$
$\mu_{i,t}$ ~ $Uniform(0,1)$
$P_{0} = 1$
where
$\sigma_{\psi}$ is the standard deviation of the *permanent* shocks, $\psi_{i,t}$
$\sigma_{\xi}$ is the standard deviation of the *transitory* shocks, $\xi_{i,t}$
$\pi$ is the risk of unemployment
The data you have access to is:
```python
import numpy as np
dataY = np.load(r'C:\Users\musse\Documents\GitHub\projects-2020-zjr-hrv\projects-2020-zjr-hrv\dataY.npy')
T,N = dataY.shape
print(dataY)
print(dataY.shape)
```
[[0.83317569 0.72881172 0. ... 1.16771806 0.93207083 0.86711803]
[1.18542294 0.92826337 1.62913142 ... 1.13903869 0.94479246 0.78842682]
[1.14813596 0.90542496 0.70634401 ... 1.49584791 1.08969956 0. ]
...
[0.73818651 0.59958039 0.56135238 ... 2.60075173 1.07070489 0.43010036]
[1.14130879 0.85728537 0.54530761 ... 3.79294856 0.67764143 0.38720822]
[0.64908127 0.85101393 0.59517395 ... 3.32800991 0.82400879 0.5814573 ]]
(20, 50000)
**Question 1:** Calculate income growth rates as log-changes
```python
#Defining income growth rate by finding the first differences. Thereafter replacing the negative Y-values with .nan
logY = np.diff(np.log(dataY))
#diff_dataY = np.diff(dataY)
# printing initial arrays
print("Initial diff", logY)
# code to replace all negative value with 0
logY[logY<=0] = np.nan
# printing result
print("New resulting diff: ", logY)
```
Initial diff [[-0.13382911 -inf inf ... 0.45991757 -0.22539794
-0.0722337 ]
[-0.2445394 0.56248678 -0.45247293 ... 0.10650339 -0.18697465
-0.18092569]
[-0.2374906 -0.24830202 0.49367077 ... 0.38133059 -0.31679118
-inf]
...
[-0.20796646 -0.06588123 0.6550227 ... 1.08826931 -0.88748332
-0.9120539 ]
[-0.2861601 -0.45242079 0.43603115 ... 1.35354726 -1.7222807
-0.5596557 ]
[ 0.27087056 -0.35757477 0.31896575 ... 1.52939372 -1.39594858
-0.34864367]]
New resulting diff: [[ nan nan inf ... 0.45991757 nan nan]
[ nan 0.56248678 nan ... 0.10650339 nan nan]
[ nan nan 0.49367077 ... 0.38133059 nan nan]
...
[ nan nan 0.6550227 ... 1.08826931 nan nan]
[ nan nan 0.43603115 ... 1.35354726 nan nan]
[0.27087056 nan 0.31896575 ... 1.52939372 nan nan]]
**Question 2:** Calculate the following 3 statistics from the data:
$s_{1}$: Share of observations with $Y_{i,t}$=0
$s_{2}$: Variance of income growth rate, Var($\Delta$ Log $Y_{i,t}$)
$s_{3}$: Covariance of income growth rates one period apart, Cov($\Delta$ Log $Y_{i,t}$,$\Delta$ Log $Y_{i,t-1}$ )
```python
#First, we define moments to use in both question 2 and 3:
def momments(Y):
s_sim1 = np.count_nonzero(Y==0)/np.size(Y)
Y[Y<0] = np.nan
Ydiff = np.diff(np.log(Y))
cov = pd.DataFrame(Ydiff).cov()
s_sim2 = np.zeros(Ydiff.shape[1])
for i in range(Ydiff.shape[1]):
s_sim2[i] = cov.iloc[i,i]
s_sim2 = np.mean(s_sim2)
s_sim3 = np.zeros(Ydiff.shape[1]-1)
for i in range(Ydiff.shape[1]-1):
s_sim3[i] = cov.iloc[i,(i+1)]
s_sim3 = np.mean(s_sim3)
return s_sim1, s_sim2, s_sim3
```
The figures in question 2 are now calculated as follows:
```python
#S1_data
s_data = [None]*3
#S2_data
s_data[0], s_data[1], s_data[2] = momments(dataY.T)
#S3_data
s_data
```
[0.049925, 0.054993983413371354, -0.022504802437365256]
Question 3: Simulate the income process using your own choice of $\sigma_{\psi}$, $\sigma_{\xi}$, $\pi$, T and N. Calculate the 3 same statistics. Compare with the data statistics
```python
#Simulated figures
T = 50
N = 1000
pi = 0.3
s_psi = 0.23
s_xi = 0.4
P_0 = 1
#Defining simulation
def sim(s_psi,s_xi,pi,N,T):
#Model
Y = np.zeros((N,T))
np.random.seed(1)
for t in range(T):
psi = np.random.lognormal(-0.5 * s_psi, s_psi, N)
xi = np.random.lognormal(-0.5 * s_xi,s_xi, N)
P = psi * P_0
Y_tilde = xi * P
mu = np.random.uniform(0,1,N)
Y[(pi<mu),t] = Y_tilde[(pi<mu)]
return Y
Y_sim = sim(s_psi,s_xi,pi,N,T)
#S1_sim
s_sim = [None]*3
#S2_sim
s_sim[0], s_sim[1], s_sim[2] = momments(Y_sim)
#S3_sim
s_sim
```
[0.29988, 0.425019139369132, -0.21104509754699205]
Comparison of the simulated data and the factual dataset
s1_sim =0.29988
s1_data = 0.049925
s2_sim = 0.425019139369132
s2_data = 0.054993983413371354
s3_sim = -0.21104509754699205
s3_data = -0.022504802437365256
Overall, the dataset has lower numbers than the simulated data.
**Question 4:** Solve the following minimization problem to estimate $\sigma_{\psi}$, $\sigma_{\xi}$ and $\pi$
$\sigma_{\psi}^*$, $\sigma_{\xi}^*$, $\pi^*$ = $arg_{\sigma_{\psi}>=0,\sigma_{\xi}>=0,\pi\in [0,1]}$ min $(s_{1}^{sim}-s_{1}^{data})^2$ + $(s_{2}^{sim}-s_{2}^{data})^2$ + $(s_{3}^{sim}-s_{3}^{data})^2$
where for each new guess of $\sigma_{\psi}$, $\sigma_{\xi}$ and $\pi$ you should be re-simulating the data with the same seed and re-calculate the 3 statistics
```python
#Defining the objective function
def objective(s):
Y = sim(s[0],s[1],s[2],N,T)
s_sim1, s_sim2, s_sim3 = momments(Y)
#obj = (s_sim1 - s_data1)**2 + (s_sim2 - s_data2)**2 + (s_sim3 - s_data3)**2
obj = (s_sim1 - s_data[0])**2 + (s_sim2 - s_data[1])**2 + (s_sim3 - s_data[2])**2
return obj
x0 = np.ones(3)/100
#Minimizing
res = optimize.minimize(objective,x0,method='L-BFGS-B',options={'eps':1e-4},bounds=((0,None),(0,None),(0,None),))
res
```
fun: 1.6144875720339336e-05
hess_inv: <3x3 LbfgsInvHessProduct with dtype=float64>
jac: array([ 2.25305304e-05, 6.20294454e-06, -2.49368487e-04])
message: b'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH'
nfev: 104
nit: 16
status: 0
success: True
x: array([0.11493023, 0.11775912, 0.04956846])
# Wealth in the utility function
In the final period, t=T, the household solves the following problem:
$$
\begin{aligned}
v_{T}(a_{T-1})&= \max_{c_{T}}\frac{(c_{T}^{1-\rho})}{1-\rho} + \kappa \frac{(a_{T}+a\underline)^{1-\sigma}}{1-\sigma} \\
\text{s.t.} \\
a_{T} = (1+r)a_{T-1}+y-c_{T}
\end{aligned}
$$
where
* $a_t$ is end-of-period assets in period $t$
* $c_t$ is consumption in period $t$
* $\rho$ is the CRRA-coefficient for consumption utility
* $\sigma$ is the CRRA-coefficient for wealth utility
* $\underline{a}$ is an *additive* scaling factor for wealth utility
* $\kappa$ is a *multiplicative* scaling factor for wealth utility
* $r$ is the rate of return
* $y$ is income
The optimal **consumption function** is denoted $c_t^{*}(a_{t-1})$
The optimal **savings function** is denoted $a_t^{*}(a_{t-1}) = (1+r)a_{t-1} + y - c_t^{*}(a_{t-1})$.
```python
# a. parameters
rho = 2.0
sigma = 1.2
kappa = 0.6
a_ubar = 2.0
r = 0.04
y = 1.0
# b. grids
a_lag_vec = np.linspace(0,300,300)
```
**Question 1:** Find and plot the functions $v_{T}(a_{T-1})$, $c_T^{*}(a_{T-1})$, and $a_T^{*}(a_{T-1})$
```python
#Definining the given utility function
def utility (c,y,r,rho,kappa,a_lag,a_ubar,sigma):
return c**(1-rho)/(1-rho) + (kappa*(((1+r)*a_lag+y-c)+a_ubar)**(1-sigma))/(1-sigma)
```
```python
#Defining the solution function for period T
def solve_period_T(rho,kappa,a_ubar,sigma,y):
vT_grid = np.empty(a_lag_vec.size)
cT_grid = np.empty(a_lag_vec.size)
aT_grid = np.empty(a_lag_vec.size)
# solve for a_lag in grid
for i, a_lag in enumerate(a_lag_vec):
# Objective function
obj = lambda c: -utility(c,y,r,rho,kappa,a_lag_vec[i],a_ubar,sigma)
#Initial guess of consumption
x0 = a_lag_vec[i]/10 + y
#Optimization
result = optimize.minimize(obj,[x0],method='L-BFGS-B')
#Saving our results
vT_grid[i] = -result.fun
cT_grid[i] = result.x
aT_grid[i] = (1+r)*a_lag_vec[i]+y-cT_grid[i]
return vT_grid,cT_grid,aT_grid
```
```python
v_grid = [None]*(T+1)
c_grid = [None]*(T+1)
a_grid = [None]*(T+1)
v_interp_list = [None]*(T+1)
```
```python
#Solving the max problem in period T
v_grid[T],c_grid[T],a_grid[T] = solve_period_T(rho,kappa,a_ubar,sigma,y)
```
```python
#Constructing interpolator
v_interp_list[T] = interpolate.RegularGridInterpolator([a_lag_vec],v_grid[T], bounds_error=False)
```
```python
#Plotting vT
fig, ax = plt.subplots()
ax.plot(a_lag_vec,v_grid[T])
ax.set(xlabel='$[a_T]$', ylabel='$v_T[aT-1]$')
ax.grid()
fig.savefig("$v_T$")
plt.show()
```
```python
#Plotting cT
fig, ax = plt.subplots()
ax.plot(a_lag_vec,c_grid[T])
ax.set(xlabel='$[a_T]$', ylabel='$c^*_T[aT-1]$')
ax.grid()
fig.savefig("$c_T$")
plt.show()
```
```python
#Plotting aT
fig, ax = plt.subplots()
ax.plot(a_lag_vec,a_grid[T])
ax.set(xlabel='$a_[T-1]$', ylabel='$a_T^*(aT-1)$')
ax.grid()
fig.savefig("$a_T$")
plt.show()
```
**Question 2:** Find and plot $v_{T-1}(a_{T-2})$ and $c_{T-1}^{*}(a_{T-2})$.
In all periods *before the last*, $t < T$, the household solves:
$v_{t}(a_{t-1})$ = $max_{c_{t}} \frac{c_t^{1-\rho}}{1-\rho} + \kappa \frac{(a_t+\underline{a})^{1-\sigma}}{1-\sigma} + \beta v_{t+1}(a_t)$
{s.t.}
$a_{t} = (1+r)a_{t-1} + y - c_{t}$
where $\beta$ is the discount factor for future utility.
```python
beta = 0.97
T= 20
```
```python
#solve function for period t
def solve_period_t(rho,kappa,a_ubar,sigma,y,t):
vt_grid = np.empty(a_lag_vec.size)
ct_grid = np.empty(a_lag_vec.size)
at_grid = np.empty(a_lag_vec.size)
#solve for each lag in grid
for i, a_lag in enumerate(a_lag_vec):
#objective function
obj = lambda c: -utility(c,y,r,rho,kappa,a_lag_vec[i],a_ubar,sigma) - beta*v_interp_list[t+1]([((1+r)*a_lag+y-c)])
#Initial guess
x0 = a_lag_vec[i]/10 + y
#optimazation
result = optimize.minimize(obj,[x0],method='L-BFGS-B',bounds=((1e-8,None),))
#saving results
vt_grid[i] = -result.fun
ct_grid[i] = result.x
at_grid[i] = (1+r)*a_lag_vec[i]+y-ct_grid[i]
return vt_grid,ct_grid,at_grid
```
```python
#Creating a loop to calculate for both question 2 and 3
for i in range(T):
t = T-i-1
v_gridt,c_gridt,a_gridt = solve_period_t(rho,kappa,a_ubar,sigma,y,t)
v_grid[t] = v_gridt
c_grid[t] = c_gridt
a_grid[t] = a_gridt
v_interp_list[t] = interpolate.RegularGridInterpolator([a_lag_vec],v_grid[t], bounds_error=False, fill_value=None)
```
```python
#Plotting vt
fig, ax = plt.subplots()
ax.plot(a_lag_vec,v_grid[19])
ax.set(xlabel='$a_[T-2]$', ylabel='$v_T^*(aT-2)$')
ax.grid()
fig.savefig("$v_[T-1]$")
plt.show()
```
```python
#Plotting c_t-1
fig, ax = plt.subplots()
ax.plot(a_lag_vec,c_grid[19])
ax.set(xlabel='$a_[T-2]$', ylabel='$c_T^*(aT-2)$')
ax.grid()
fig.savefig("$c_[T-1]$")
plt.show()
```
**Question 3:** Find $c_t^{*}(a_{t-1})$ for $t \in {0,1,...,T}$ and plot them in a single figure.
```python
#for i in range(T):
#t = T-i-1
#v_gridt,c_gridt,a_gridt = solve_period_t(rho,kappa,a_ubar,sigma,y,t)
#v_grid[t] = v_gridt
#c_grid[t] = c_gridt
#a_grid[t] = a_gridt
#v_interp_list[t] = interpolate.RegularGridInterpolator([a_lag_vec],v_grid[t], bounds_error=False, fill_value=None)
```
```python
#Plotting c_t from 1 to T
plt.plot (a_lag_vec,c_grid[1])
plt.plot (a_lag_vec,c_grid[2])
plt.plot (a_lag_vec,c_grid[3])
plt.plot (a_lag_vec,c_grid[4])
plt.plot (a_lag_vec,c_grid[5])
plt.plot (a_lag_vec,c_grid[6])
plt.plot (a_lag_vec,c_grid[7])
plt.plot (a_lag_vec,c_grid[8])
plt.plot (a_lag_vec,c_grid[9])
plt.plot (a_lag_vec,c_grid[10])
plt.plot (a_lag_vec,c_grid[11])
plt.plot (a_lag_vec,c_grid[12])
plt.plot (a_lag_vec,c_grid[13])
plt.plot (a_lag_vec,c_grid[14])
plt.plot (a_lag_vec,c_grid[15])
plt.plot (a_lag_vec,c_grid[16])
plt.plot (a_lag_vec,c_grid[17])
plt.plot (a_lag_vec,c_grid[18])
plt.plot (a_lag_vec,c_grid[19])
plt.plot (a_lag_vec,c_grid[20])
plt.legend()
```
Define the saving rate as:
$s_{t}^* (a_{t-1})$ = $\frac{(a_{t}-a_{t-1}+y-c_{t}^*(a_{t-1}))-a_{t-1}}{y+ra_{t-1}}$
**Question 4:** Plot $s_0^{*}(a_{-1})$. Do the rich or the poor save the most?
```python
s_0 = (r*a_lag_vec + y - c_grid[0])/(y+r*a_lag_vec)
```
```python
#Plot savingsrate
fig = plt.figure(figsize=(10,4))
ax = fig.add_subplot(1,2,1)
ax.plot(a_lag_vec,s_0)
ax.set_xlabel('$a_(t-1)$')
ax.set_ylabel('$s_0$')
ax.set_title('Savingsrate')
```
The saving rate have a u shape. If you are really poor then you save a lot and when you very rich then you save a lot too. Yet there is a interval where not either very rich or poor, save that much. So it is difficult to say because of the shape of the savingsrate.
**Question 5:** Can you change the parameter choices such that $s_0^{*}(a_{-1})$ is monotonically decreasing in $a_{-1}$?
```python
#Changing the parameter r, to see if the saving rate is decreasing
r1=0
s_00 = (r1*a_lag_vec + y - c_grid[0])/(y+r1*a_lag_vec)
```
```python
fig = plt.figure(figsize=(10,4))
ax = fig.add_subplot(1,2,1)
ax.plot(a_lag_vec,s_00)
ax.set_xlabel('$a_(t-1)$')
ax.set_ylabel('$s_0$')
ax.set_title('Savingsrate')
```
By setting the rate of return equal to 0, then we get a monotonically decreasing in a_t-1
# Refined grid search
Let $\boldsymbol{x} = \left[\begin{array}{c}
x_1 \\
x_2\\
\end{array}\right]$ be a two-dimensional vector. Consider the following algorithm:
**Algorithm:** `grid_search()`
**Goal:** Minimize the function $f(\boldsymbol{x})$.
1. Choose a grid size $N$ and minimum and maximum values of $x_{1}$ and $x_{2}$ denoted $\overline{x_{1}}$>$\underline{x_{1}}$ and $\overline{x_{2}}$>$\underline{x_{2}}$
2. Calculate step sizes
$\Delta_{1}$ =($\overline{x_{1}}$-$\underline{x_{1}}$)/(N-1)
$\Delta_{2}$ =($\overline{x_{2}}$-$\underline{x_{2}}$)/(N-1)
3. Find the grid point with the lowest function value solving
$j^{*}_{1}$, $j^{*}_{2}$ = $arg_{j_{1}\in{0,...,N-1}, j_{2}\in{0,...,N-1}}$ f( $\underline{x_{1}+j_{1}$\Delta_{1}$,$\underline{x_{2}+j_{2}$\Delta_{2}$)
1. Return
**Question 1:** Implement the grid_search() algorithm to minimize the rosen function.
```python
def rosen(x):
return (1.0-x[0])**2+2*(x[1]-x[0]**2)**2
```
```python
# settings
x1_min = 0
x1_max = 5
x2_min = 0
x2_max = 4
N = 1000
#Implementing grid_search()
def grid_search(f,x1_min,x1_max,x2_min,x2_max,N):
#calculate step sizes
delta1=(x1_max-x1_min)/(N-1)
delta2=(x2_max-x2_min)/(N-1)
f_min = np.inf
arg_x_min = None
x = None
#making a loop
for i in range(N):
for j in range(N):
x = [x1_min+i*delta1,x2_min+j*delta2]
f_val = f(x)
if f_val < f_min:
f_min = f_val
arg_x_min = x
return arg_x_min,f_min
# apply grid search
x,f = grid_search(rosen,x1_min,x1_max,x2_min,x2_max,N)
print('minimum found at' , x)
print('with the function value' ,f)
```
minimum found at [1.001001001001001, 1.001001001001001]
with the function value 3.0100230440752395e-06
**Question 2:** Implement the refined_grid_search() algorithm to minimize the rosen function
```python
K = 10
def refined_grid_search(f,x1_min,x1_max,x2_min,x2_max,N,K):
x = [None]*2
k = 0
#making a loop
for i in range(K):
if k > 0:
x1_delta = 3*(x1_max-x1_min)/(N-1)
x2_delta = 3*(x2_max-x2_min)/(N-1)
x1_min = np.maximum(x1_min,x[0]-x1_delta)
x2_min = np.maximum(x2_min,x[1]-x2_delta)
x1_max = np.minimum(x1_max,x[0]+x1_delta)
x2_max = np.minimum(x2_max,x[1]+x2_delta)
x,f_max = grid_search(f,x1_min,x1_max,x2_min,x2_max,N)
k = k + 1
if k >= K:
return x, f_max
# apply refined grid search
x,f_max = refined_grid_search(rosen,x1_min,x1_max,x2_min,x2_max,N,K)
```
```python
np.minimum(1,2)
```
1
```python
print('minimum found at', x)
print('at the function value', f_max)
```
minimum found at [1.0, 1.0]
at the function value 0.0
|
import .X
import .Idem
import group_theory.group_action
open X
namespace idem_action_X
def mul_action' (R : Type)[comm_ring R] : Idem(R) → X(R) → X(R) := λ g η,
begin
exact {
x := g.e * (η.x-η.y) + η .y,
y := η.x + g.e * (η .y -η.x) ,
inv := η.inv,
certif :=
begin
have h : g.e * (η.x - η.y) + η.y - (η.x + g.e * (η.y - η.x)) = g.e * (η.x - η.y) - (1-g.e) * (η.x - η.y),
ring,
rw h,
rw ← Idem.Idem.calculus.square_idem,
exact η.certif,
end
}
end
instance (R : Type)[comm_ring R] : has_scalar (Idem(R)) (X(R)) := ⟨mul_action' R⟩
variables (R :Type)[comm_ring R]
lemma mul_action_comp_x (g : Idem R) (η : X R): (g • η).x = g.e * (η.x-η.y) + η.y := rfl
lemma mul_action_comp_y (g : Idem R) (η : X R): (g • η).y = η.x + g.e * (η .y -η.x) := rfl
lemma mul_action_comp_inv (g : Idem R) (η : X R): (g • η).inv = η.inv := rfl
--- Ecrire un revriter !
open Idem
open X
meta def idem_ring : tactic unit :=
`[simp only [one_e, mul_action_comp_x,mul_action_comp_y, e_comp], ring]
run_cmd add_interactive [`idem_ring]
def one_smul' (η : X R) : (1 : Idem R) • η = η := begin
ext ; idem_ring,
end
def mul_smul' (g1 g2 : Idem R) (η : X(R)) : (g1 * g2) • η = g1 • g2 • η :=
begin
ext ; idem_ring,
end
instance (R : Type)[comm_ring R]: mul_action (Idem R) (X(R)) := ⟨ one_smul' R, mul_smul' R⟩
open mul_action
def A := orbit_rel (Idem (R)) (X(R))
#check (A(R)).r
end idem_action_X
|
{-#LANGUAGE BangPatterns #-}
module Tests.Correlation
( tests ) where
import Test.Framework
import Test.Framework.Providers.QuickCheck2
import Test.Framework.Providers.HUnit
import Test.HUnit (Assertion, (@=?))
import qualified Data.Vector as V
import Statistics.Correlation.Kendall
tests :: Test
tests = testGroup "Correlation"
[ testProperty "Kendall test -- general" testKendall
, testCase "Kendall test -- special cases" testKendallSpecial
]
testKendall :: [(Double, Double)] -> Bool
testKendall xy | isNaN r1 = isNaN r2
| otherwise = r1 == r2
where
r1 = kendallBruteForce xy
r2 = kendall $ V.fromList xy
testKendallSpecial :: Assertion
testKendallSpecial = ys @=? map (kendall.V.fromList) xs
where
(xs, ys) = unzip testData
testData :: [([(Double, Double)], Double)]
testData = [ ( [(1,1), (2,2), (3,1), (1,5), (2,2)], -0.375 )
, ( [(1,3), (1,3), (1,3), (3,2), (3,5)], 0)
]
kendallBruteForce :: [(Double, Double)] -> Double
kendallBruteForce xy = (n_c - n_d) / sqrt ((n_0 - n_1) * (n_0 - n_2))
where
allPairs = f xy
(n_c, n_d, n_1, n_2) = foldl g (0,0,0,0) allPairs
n_0 = fromIntegral.length $ allPairs
g (!nc, !nd, !n1, !n2) ((x1, y1), (x2, y2))
| (x2 - x1) * (y2 - y1) > 0 = (nc+1, nd, n1, n2)
| (x2 - x1) * (y2 - y1) < 0 = (nc, nd+1, n1, n2)
| otherwise = if x1 == x2
then if y1 == y2
then (nc, nd, n1+1, n2+1)
else (nc, nd, n1+1, n2)
else (nc, nd, n1, n2+1)
f (x:xs) = zip (repeat x) xs ++ f xs
f _ = []
|
[STATEMENT]
lemma ip_constant:
"paodv i \<TTurnstile> onl \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V (\<lambda>(\<xi>, _). ip \<xi> = i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. paodv i \<TTurnstile> onl \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V (\<lambda>(\<xi>, uu_). ip \<xi> = i)
[PROOF STEP]
by (inv_cterms simp add: \<sigma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V_def)
|
# Computing the 4-Velocity Time-Component $u^0$, the Magnetic Field Measured by a Comoving Observer $b^{\mu}$, and the Poynting Vector $S^i$
## Authors: Zach Etienne & Patrick Nelson
[comment]: <> (Abstract: TODO)
**Notebook Status:** <font color='green'><b> Validated </b></font>
**Validation Notes:** This module has been validated against a trusted code (the hand-written smallbPoynET in WVUThorns_diagnostics, which itself is based on expressions in IllinoisGRMHD... which was validated against the original GRMHD code of the Illinois NR group)
### NRPy+ Source Code for this module: [u0_smallb_Poynting__Cartesian.py](../edit/u0_smallb_Poynting__Cartesian/u0_smallb_Poynting__Cartesian.py)
[comment]: <> (Introduction: TODO)
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#u0bu): Computing $u^0$ and $b^{\mu}$
1. [Step 1.a](#4metric): Compute the 4-metric $g_{\mu\nu}$ and its inverse $g^{\mu\nu}$ from the ADM 3+1 variables, using the [`BSSN.ADMBSSN_tofrom_4metric`](../edit/BSSN/ADMBSSN_tofrom_4metric.py) ([**tutorial**](Tutorial-ADMBSSN_tofrom_4metric.ipynb)) NRPy+ module
1. [Step 1.b](#u0): Compute $u^0$ from the Valencia 3-velocity
1. [Step 1.c](#uj): Compute $u_j$ from $u^0$, the Valencia 3-velocity, and $g_{\mu\nu}$
1. [Step 1.d](#gamma): Compute $\gamma=$ `gammaDET` from the ADM 3+1 variables
1. [Step 1.e](#beta): Compute $b^\mu$
1. [Step 2](#poynting_flux): Defining the Poynting Flux Vector $S^{i}$
1. [Step 2.a](#g): Computing $g^{i\nu}$
1. [Step 2.b](#s): Computing $S^{i}$
1. [Step 3](#code_validation): Code Validation against `u0_smallb_Poynting__Cartesian` NRPy+ module
1. [Step 4](#appendix): Appendix: Proving Eqs. 53 and 56 in [Duez *et al* (2005)](https://arxiv.org/pdf/astro-ph/0503420.pdf)
1. [Step 5](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='u0bu'></a>
# Step 1: Computing $u^0$ and $b^{\mu}$ \[Back to [top](#toc)\]
$$\label{u0bu}$$
First some definitions. The spatial components of $b^{\mu}$ are simply the magnetic field as measured by an observer comoving with the plasma $B^{\mu}_{\rm (u)}$, divided by $\sqrt{4\pi}$. In addition, in the ideal MHD limit, $B^{\mu}_{\rm (u)}$ is orthogonal to the plasma 4-velocity $u^\mu$, which sets the $\mu=0$ component.
Note also that $B^{\mu}_{\rm (u)}$ is related to the magnetic field as measured by a *normal* observer $B^i$ via a simple projection (Eq 21 in [Duez *et al* (2005)](https://arxiv.org/pdf/astro-ph/0503420.pdf)), which results in the expressions (Eqs 23 and 24 in [Duez *et al* (2005)](https://arxiv.org/pdf/astro-ph/0503420.pdf)):
\begin{align}
\sqrt{4\pi} b^0 = B^0_{\rm (u)} &= \frac{u_j B^j}{\alpha} \\
\sqrt{4\pi} b^i = B^i_{\rm (u)} &= \frac{B^i + (u_j B^j) u^i}{\alpha u^0}\\
\end{align}
$B^i$ is related to the actual magnetic field evaluated in IllinoisGRMHD, $\tilde{B}^i$ via
$$B^i = \frac{\tilde{B}^i}{\gamma},$$
where $\gamma$ is the determinant of the spatial 3-metric.
The above expressions will require that we compute
1. the 4-metric $g_{\mu\nu}$ from the ADM 3+1 variables
1. $u^0$ from the Valencia 3-velocity
1. $u_j$ from $u^0$, the Valencia 3-velocity, and $g_{\mu\nu}$
1. $\gamma$ from the ADM 3+1 variables
<a id='4metric'></a>
## Step 1.a: Compute the 4-metric $g_{\mu\nu}$ and its inverse $g^{\mu\nu}$ from the ADM 3+1 variables, using the [`BSSN.ADMBSSN_tofrom_4metric`](../edit/BSSN/ADMBSSN_tofrom_4metric.py) ([**tutorial**](Tutorial-ADMBSSN_tofrom_4metric.ipynb)) NRPy+ module \[Back to [top](#toc)\]
$$\label{4metric}$$
We are given $\gamma_{ij}$, $\alpha$, and $\beta^i$ from ADMBase, so let's first compute
$$
g_{\mu\nu} = \begin{pmatrix}
-\alpha^2 + \beta^k \beta_k & \beta_i \\
\beta_j & \gamma_{ij}
\end{pmatrix}.
$$
```python
# Step 1: Initialize needed Python/NRPy+ modules
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import NRPy_param_funcs as par # NRPy+: Parameter interface
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
from outputC import outputC # NRPy+: Basic C code output functionality
import BSSN.ADMBSSN_tofrom_4metric as AB4m # NRPy+: ADM/BSSN <-> 4-metric conversions
# Set spatial dimension = 3
DIM=3
thismodule = "smallbPoynET"
# Step 1.a: Compute the 4-metric $g_{\mu\nu}$ and its inverse
# $g^{\mu\nu}$ from the ADM 3+1 variables, using the
# BSSN.ADMBSSN_tofrom_4metric NRPy+ module
import BSSN.ADMBSSN_tofrom_4metric as AB4m
gammaDD,betaU,alpha = AB4m.setup_ADM_quantities("ADM")
AB4m.g4DD_ito_BSSN_or_ADM("ADM",gammaDD,betaU,alpha)
g4DD = AB4m.g4DD
AB4m.g4UU_ito_BSSN_or_ADM("ADM",gammaDD,betaU,alpha)
g4UU = AB4m.g4UU
```
<a id='u0'></a>
## Step 1.b: Compute $u^0$ from the Valencia 3-velocity \[Back to [top](#toc)\]
$$\label{u0}$$
According to Eqs. 9-11 of [the IllinoisGRMHD paper](https://arxiv.org/pdf/1501.07276.pdf), the Valencia 3-velocity $v^i_{(n)}$ is related to the 4-velocity $u^\mu$ via
\begin{align}
\alpha v^i_{(n)} &= \frac{u^i}{u^0} + \beta^i \\
\implies u^i &= u^0 \left(\alpha v^i_{(n)} - \beta^i\right)
\end{align}
Defining $v^i = \frac{u^i}{u^0}$, we get
$$v^i = \alpha v^i_{(n)} - \beta^i,$$
and in terms of this variable we get
\begin{align}
g_{00} \left(u^0\right)^2 + 2 g_{0i} u^0 u^i + g_{ij} u^i u^j &= \left(u^0\right)^2 \left(g_{00} + 2 g_{0i} v^i + g_{ij} v^i v^j\right)\\
\implies u^0 &= \pm \sqrt{\frac{-1}{g_{00} + 2 g_{0i} v^i + g_{ij} v^i v^j}} \\
&= \pm \sqrt{\frac{-1}{(-\alpha^2 + \beta^2) + 2 \beta_i v^i + \gamma_{ij} v^i v^j}} \\
&= \pm \sqrt{\frac{1}{\alpha^2 - \gamma_{ij}\left(\beta^i + v^i\right)\left(\beta^j + v^j\right)}}\\
&= \pm \sqrt{\frac{1}{\alpha^2 - \alpha^2 \gamma_{ij}v^i_{(n)}v^j_{(n)}}}\\
&= \pm \frac{1}{\alpha}\sqrt{\frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}}}
\end{align}
Generally speaking, numerical errors will occasionally drive expressions under the radical to either negative values or potentially enormous values (corresponding to enormous Lorentz factors). Thus a reliable approach for computing $u^0$ requires that we first rewrite the above expression in terms of the Lorentz factor squared: $\Gamma^2=\left(\alpha u^0\right)^2$:
\begin{align}
u^0 &= \pm \frac{1}{\alpha}\sqrt{\frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}}}\\
\implies \left(\alpha u^0\right)^2 &= \frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}} \\
\implies \gamma_{ij}v^i_{(n)}v^j_{(n)} &= 1 - \frac{1}{\left(\alpha u^0\right)^2} \\
&= 1 - \frac{1}{\Gamma^2}
\end{align}
In order for the bottom expression to hold true, the left-hand side must be between 0 and 1. Again, this is not guaranteed due to the appearance of numerical errors. In fact, a robust algorithm will not allow $\Gamma^2$ to become too large (which might contribute greatly to the stress-energy of a given gridpoint), so let's define $\Gamma_{\rm max}$, the largest allowed Lorentz factor.
Then our algorithm for computing $u^0$ is as follows:
If
$$R=\gamma_{ij}v^i_{(n)}v^j_{(n)}>1 - \frac{1}{\Gamma_{\rm max}^2},$$
then adjust the 3-velocity $v^i$ as follows:
$$v^i_{(n)} = \sqrt{\frac{1 - \frac{1}{\Gamma_{\rm max}^2}}{R}}v^i_{(n)}.$$
After this rescaling, we are then guaranteed that if $R$ is recomputed, it will be set to its ceiling value $R=R_{\rm max} = 1 - \frac{1}{\Gamma_{\rm max}^2}$.
Then, regardless of whether the ceiling on $R$ was applied, $u^0$ can be safely computed via
$$
u^0 = \frac{1}{\alpha \sqrt{1-R}}.
$$
```python
ValenciavU = ixp.register_gridfunctions_for_single_rank1("AUX","ValenciavU",DIM=3)
# Step 1: Compute R = 1 - 1/max(Gamma)
R = sp.sympify(0)
for i in range(DIM):
for j in range(DIM):
R += gammaDD[i][j]*ValenciavU[i]*ValenciavU[j]
GAMMA_SPEED_LIMIT = par.Cparameters("REAL",thismodule,"GAMMA_SPEED_LIMIT",10.0) # Default value based on
# IllinoisGRMHD.
# GiRaFFE default = 2000.0
Rmax = 1 - 1/(GAMMA_SPEED_LIMIT*GAMMA_SPEED_LIMIT)
rescaledValenciavU = ixp.zerorank1()
for i in range(DIM):
rescaledValenciavU[i] = ValenciavU[i]*sp.sqrt(Rmax/R)
rescaledu0 = 1/(alpha*sp.sqrt(1-Rmax))
regularu0 = 1/(alpha*sp.sqrt(1-R))
computeu0_Cfunction = """
/* Function for computing u^0 from Valencia 3-velocity. */
/* Inputs: ValenciavU[], alpha, gammaDD[][], GAMMA_SPEED_LIMIT (C parameter) */
/* Output: u0=u^0 and velocity-limited ValenciavU[] */\n\n"""
computeu0_Cfunction += outputC([R,Rmax],["const double R","const double Rmax"],"returnstring",
params="includebraces=False,CSE_varprefix=tmpR,outCverbose=False")
computeu0_Cfunction += "if(R <= Rmax) "
computeu0_Cfunction += outputC(regularu0,"u0","returnstring",
params="includebraces=True,CSE_varprefix=tmpnorescale,outCverbose=False")
computeu0_Cfunction += " else "
computeu0_Cfunction += outputC([rescaledValenciavU[0],rescaledValenciavU[1],rescaledValenciavU[2],rescaledu0],
["ValenciavU0","ValenciavU1","ValenciavU2","u0"],"returnstring",
params="includebraces=True,CSE_varprefix=tmprescale,outCverbose=False")
print(computeu0_Cfunction)
```
/* Function for computing u^0 from Valencia 3-velocity. */
/* Inputs: ValenciavU[], alpha, gammaDD[][], GAMMA_SPEED_LIMIT (C parameter) */
/* Output: u0=u^0 and velocity-limited ValenciavU[] */
const double R = ((ValenciavU0)*(ValenciavU0))*gammaDD00 + 2*ValenciavU0*ValenciavU1*gammaDD01 + 2*ValenciavU0*ValenciavU2*gammaDD02 + ((ValenciavU1)*(ValenciavU1))*gammaDD11 + 2*ValenciavU1*ValenciavU2*gammaDD12 + ((ValenciavU2)*(ValenciavU2))*gammaDD22;
const double Rmax = 1 - 1/((GAMMA_SPEED_LIMIT)*(GAMMA_SPEED_LIMIT));
if(R <= Rmax) {
u0 = 1/(alpha*sqrt(-((ValenciavU0)*(ValenciavU0))*gammaDD00 - 2*ValenciavU0*ValenciavU1*gammaDD01 - 2*ValenciavU0*ValenciavU2*gammaDD02 - ((ValenciavU1)*(ValenciavU1))*gammaDD11 - 2*ValenciavU1*ValenciavU2*gammaDD12 - ((ValenciavU2)*(ValenciavU2))*gammaDD22 + 1));
}
else {
const double tmprescale_1 = sqrt((1 - 1/((GAMMA_SPEED_LIMIT)*(GAMMA_SPEED_LIMIT)))/(((ValenciavU0)*(ValenciavU0))*gammaDD00 + 2*ValenciavU0*ValenciavU1*gammaDD01 + 2*ValenciavU0*ValenciavU2*gammaDD02 + ((ValenciavU1)*(ValenciavU1))*gammaDD11 + 2*ValenciavU1*ValenciavU2*gammaDD12 + ((ValenciavU2)*(ValenciavU2))*gammaDD22));
ValenciavU0 = ValenciavU0*tmprescale_1;
ValenciavU1 = ValenciavU1*tmprescale_1;
ValenciavU2 = ValenciavU2*tmprescale_1;
u0 = fabs(GAMMA_SPEED_LIMIT)/alpha;
}
<a id='uj'></a>
## Step 1.c: Compute $u_j$ from $u^0$, the Valencia 3-velocity, and $g_{\mu\nu}$ \[Back to [top](#toc)\]
$$\label{uj}$$
The basic equation is
\begin{align}
u_j &= g_{\mu j} u^{\mu} \\
&= g_{0j} u^0 + g_{ij} u^i \\
&= \beta_j u^0 + \gamma_{ij} u^i \\
&= \beta_j u^0 + \gamma_{ij} u^0 \left(\alpha v^i_{(n)} - \beta^i\right) \\
&= u^0 \left(\beta_j + \gamma_{ij} \left(\alpha v^i_{(n)} - \beta^i\right) \right)\\
&= \alpha u^0 \gamma_{ij} v^i_{(n)} \\
\end{align}
```python
u0 = par.Cparameters("REAL",thismodule,"u0",1e300) # Will be overwritten in C code. Set to crazy value to ensure this.
uD = ixp.zerorank1()
for i in range(DIM):
for j in range(DIM):
uD[j] += alpha*u0*gammaDD[i][j]*ValenciavU[i]
```
<a id='beta'></a>
## Step 1.d: Compute $b^\mu$ \[Back to [top](#toc)\]
$$\label{beta}$$
We compute $b^\mu$ from the above expressions:
\begin{align}
\sqrt{4\pi} b^0 = B^0_{\rm (u)} &= \frac{u_j B^j}{\alpha} \\
\sqrt{4\pi} b^i = B^i_{\rm (u)} &= \frac{B^i + (u_j B^j) u^i}{\alpha u^0}\\
\end{align}
$B^i$ is exactly equal to the $B^i$ evaluated in IllinoisGRMHD/GiRaFFE.
Pulling this together, we currently have available as input:
+ $\tilde{B}^i$
+ $u_j$
+ $u^0$,
with the goal of outputting now $b^\mu$ and $b^2$:
```python
M_PI = par.Cparameters("#define",thismodule,"M_PI","")
BU = ixp.register_gridfunctions_for_single_rank1("AUX","BU",DIM=3)
# uBcontraction = u_i B^i
uBcontraction = sp.sympify(0)
for i in range(DIM):
uBcontraction += uD[i]*BU[i]
# uU = 3-vector representing u^i = u^0 \left(\alpha v^i_{(n)} - \beta^i\right)
uU = ixp.zerorank1()
for i in range(DIM):
uU[i] = u0*(alpha*ValenciavU[i] - betaU[i])
smallb4U = ixp.zerorank1(DIM=4)
smallb4U[0] = uBcontraction/(alpha*sp.sqrt(4*M_PI))
for i in range(DIM):
smallb4U[1+i] = (BU[i] + uBcontraction*uU[i])/(alpha*u0*sp.sqrt(4*M_PI))
```
<a id='poynting_flux'></a>
# Step 2: Defining the Poynting Flux Vector $S^{i}$ \[Back to [top](#toc)\]
$$\label{poynting_flux}$$
The Poynting flux is defined in Eq. 11 of [Kelly *et al.*](https://arxiv.org/pdf/1710.02132.pdf) (note that we choose the minus sign convention so that the Poynting luminosity across a spherical shell is $L_{\rm EM} = \int (-\alpha T^i_{\rm EM\ 0}) \sqrt{\gamma} d\Omega = \int S^r \sqrt{\gamma} d\Omega$, as in [Farris *et al.*](https://arxiv.org/pdf/1207.3354.pdf):
$$
S^i = -\alpha T^i_{\rm EM\ 0} = -\alpha\left(b^2 u^i u_0 + \frac{1}{2} b^2 g^i{}_0 - b^i b_0\right)
$$
<a id='s'></a>
## Step 2.a: Computing $S^{i}$ \[Back to [top](#toc)\]
$$\label{s}$$
Given $g^{\mu\nu}$ computed above, we focus first on the $g^i{}_{0}$ term by computing
$$
g^\mu{}_\delta = g^{\mu\nu} g_{\nu \delta},
$$
and then the rest of the Poynting flux vector can be immediately computed from quantities defined above:
$$
S^i = -\alpha T^i_{\rm EM\ 0} = -\alpha\left(b^2 u^i u_0 + \frac{1}{2} b^2 g^i{}_0 - b^i b_0\right)
$$
```python
# Step 2.a.i: compute g^\mu_\delta:
g4UD = ixp.zerorank2(DIM=4)
for mu in range(4):
for delta in range(4):
for nu in range(4):
g4UD[mu][delta] += g4UU[mu][nu]*g4DD[nu][delta]
# Step 2.a.ii: compute b_{\mu}
smallb4D = ixp.zerorank1(DIM=4)
for mu in range(4):
for nu in range(4):
smallb4D[mu] += g4DD[mu][nu]*smallb4U[nu]
# Step 2.a.iii: compute u_0 = g_{mu 0} u^{mu} = g4DD[0][0]*u0 + g4DD[i][0]*uU[i]
u_0 = g4DD[0][0]*u0
for i in range(DIM):
u_0 += g4DD[i+1][0]*uU[i]
# Step 2.a.iv: compute b^2, setting b^2 = smallb2etk, as gridfunctions with base names ending in a digit
# are forbidden in NRPy+.
smallb2etk = sp.sympify(0)
for mu in range(4):
smallb2etk += smallb4U[mu]*smallb4D[mu]
# Step 2.a.v: compute S^i
PoynSU = ixp.zerorank1()
for i in range(DIM):
PoynSU[i] = -alpha * (smallb2etk*uU[i]*u_0 + sp.Rational(1,2)*smallb2etk*g4UD[i+1][0] - smallb4U[i+1]*smallb4D[0])
```
<a id='code_validation'></a>
# Step 3: Code Validation against `u0_smallb_Poynting__Cartesian` NRPy+ module \[Back to [top](#toc)\]
$$\label{code_validation}$$
Here, as a code validation check, we verify agreement in the SymPy expressions for u0, smallbU, smallb2etk, and PoynSU between
1. this tutorial and
2. the NRPy+ [u0_smallb_Poynting__Cartesian module](../edit/u0_smallb_Poynting__Cartesian/u0_smallb_Poynting__Cartesian.py).
```python
import sys
import u0_smallb_Poynting__Cartesian.u0_smallb_Poynting__Cartesian as u0etc
u0etc.compute_u0_smallb_Poynting__Cartesian(gammaDD,betaU,alpha,ValenciavU,BU)
if u0etc.computeu0_Cfunction != computeu0_Cfunction:
print("FAILURE: u0 C code has changed!")
sys.exit(1)
else:
print("PASSED: u0 C code matches!")
for i in range(4):
print("u0etc.smallb4U["+str(i)+"] - smallb4U["+str(i)+"] = "
+ str(u0etc.smallb4U[i]-smallb4U[i]))
print("u0etc.smallb2etk - smallb2etk = " + str(u0etc.smallb2etk-smallb2etk))
for i in range(DIM):
print("u0etc.PoynSU["+str(i)+"] - PoynSU["+str(i)+"] = "
+ str(u0etc.PoynSU[i]-PoynSU[i]))
```
PASSED: u0 C code matches!
u0etc.smallb4U[0] - smallb4U[0] = 0
u0etc.smallb4U[1] - smallb4U[1] = 0
u0etc.smallb4U[2] - smallb4U[2] = 0
u0etc.smallb4U[3] - smallb4U[3] = 0
u0etc.smallb2etk - smallb2etk = 0
u0etc.PoynSU[0] - PoynSU[0] = 0
u0etc.PoynSU[1] - PoynSU[1] = 0
u0etc.PoynSU[2] - PoynSU[2] = 0
<a id='appendix'></a>
# Step 4: Appendix: Proving Eqs. 53 and 56 in [Duez *et al* (2005)](https://arxiv.org/pdf/astro-ph/0503420.pdf)
$$\label{appendix}$$
$u^\mu u_\mu = -1$ implies
\begin{align}
g^{\mu\nu} u_\mu u_\nu &= g^{00} \left(u_0\right)^2 + 2 g^{0i} u_0 u_i + g^{ij} u_i u_j = -1 \\
\implies &g^{00} \left(u_0\right)^2 + 2 g^{0i} u_0 u_i + g^{ij} u_i u_j + 1 = 0\\
& a x^2 + b x + c = 0
\end{align}
Thus we have a quadratic equation for $u_0$, with solution given by
\begin{align}
u_0 &= \frac{-b \pm \sqrt{b^2 - 4 a c}}{2 a} \\
&= \frac{-2 g^{0i}u_i \pm \sqrt{\left(2 g^{0i} u_i\right)^2 - 4 g^{00} (g^{ij} u_i u_j + 1)}}{2 g^{00}}\\
&= \frac{-g^{0i}u_i \pm \sqrt{\left(g^{0i} u_i\right)^2 - g^{00} (g^{ij} u_i u_j + 1)}}{g^{00}}\\
\end{align}
Notice that (Eq. 4.49 in [Gourgoulhon](https://arxiv.org/pdf/gr-qc/0703035.pdf))
$$
g^{\mu\nu} = \begin{pmatrix}
-\frac{1}{\alpha^2} & \frac{\beta^i}{\alpha^2} \\
\frac{\beta^i}{\alpha^2} & \gamma^{ij} - \frac{\beta^i\beta^j}{\alpha^2}
\end{pmatrix},
$$
so we have
\begin{align}
u_0 &= \frac{-\beta^i u_i/\alpha^2 \pm \sqrt{\left(\beta^i u_i/\alpha^2\right)^2 + 1/\alpha^2 (g^{ij} u_i u_j + 1)}}{1/\alpha^2}\\
&= -\beta^i u_i \pm \sqrt{\left(\beta^i u_i\right)^2 + \alpha^2 (g^{ij} u_i u_j + 1)}\\
&= -\beta^i u_i \pm \sqrt{\left(\beta^i u_i\right)^2 + \alpha^2 \left(\left[\gamma^{ij} - \frac{\beta^i\beta^j}{\alpha^2}\right] u_i u_j + 1\right)}\\
&= -\beta^i u_i \pm \sqrt{\left(\beta^i u_i\right)^2 + \alpha^2 \left(\gamma^{ij}u_i u_j + 1\right) - \beta^i\beta^j u_i u_j}\\
&= -\beta^i u_i \pm \sqrt{\alpha^2 \left(\gamma^{ij}u_i u_j + 1\right)}\\
\end{align}
Now, since
$$
u^0 = g^{\alpha 0} u_\alpha = -\frac{1}{\alpha^2} u_0 + \frac{\beta^i u_i}{\alpha^2},
$$
we get
\begin{align}
u^0 &= \frac{1}{\alpha^2} \left(u_0 + \beta^i u_i\right) \\
&= \pm \frac{1}{\alpha^2} \sqrt{\alpha^2 \left(\gamma^{ij}u_i u_j + 1\right)}\\
&= \pm \frac{1}{\alpha} \sqrt{\gamma^{ij}u_i u_j + 1}\\
\end{align}
By convention, the relativistic Gamma factor is positive and given by $\alpha u^0$, so we choose the positive root. Thus we have derived Eq. 53 in [Duez *et al* (2005)](https://arxiv.org/pdf/astro-ph/0503420.pdf):
$$
u^0 = \frac{1}{\alpha} \sqrt{\gamma^{ij}u_i u_j + 1}.
$$
Next we evaluate
\begin{align}
u^i &= u_\mu g^{\mu i} \\
&= u_0 g^{0 i} + u_j g^{i j}\\
&= u_0 \frac{\beta^i}{\alpha^2} + u_j \left(\gamma^{ij} - \frac{\beta^i\beta^j}{\alpha^2}\right)\\
&= \gamma^{ij} u_j + u_0 \frac{\beta^i}{\alpha^2} - u_j \frac{\beta^i\beta^j}{\alpha^2}\\
&= \gamma^{ij} u_j + \frac{\beta^i}{\alpha^2} \left(u_0 - u_j \beta^j\right)\\
&= \gamma^{ij} u_j - \beta^i u^0,\\
\implies v^i &= \frac{\gamma^{ij} u_j}{u^0} - \beta^i
\end{align}
which is equivalent to Eq. 56 in [Duez *et al* (2005)](https://arxiv.org/pdf/astro-ph/0503420.pdf). Notice in the last step, we used the above definition of $u^0$.
<a id='latex_pdf_output'></a>
# Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-u0_smallb_Poynting-Cartesian.pdf](Tutorial-u0_smallb_Poynting-Cartesian.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```python
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-u0_smallb_Poynting-Cartesian")
```
[pandoc warning] Duplicate link reference `[comment]' "source" (line 22, column 1)
Created Tutorial-u0_smallb_Poynting-Cartesian.tex, and compiled LaTeX file
to PDF file Tutorial-u0_smallb_Poynting-Cartesian.pdf
|
\section{Defensive Mode Parsing}
\label{sec:defmode}
Binary code that defends itself against analysis may violate the
assumptions made by the the ParseAPI's standard parsing algorithm.
Enabling defensive mode parsing activates more conservative
assumptions that substantially reduce the percentage of code that is
analyzed by the ParseAPI. For this reason, defensive mode parsing is
best-suited for use of ParseAPI in conjunction with dynamic analysis
techniques that can compensate for its limited coverage of the binary
code. This mode of parsing will be brought to full functionality in
an upcoming release.
|
module upwind_mod
implicit none
integer, parameter :: r8 = selected_real_kind(12, 100)
type upwind_type
! number of space dimensions
integer :: ndims
! total number of cells in the domain
integer :: ntot
! number of cells in the ndims directions
integer, allocatable :: numCells(:)
! upwind direction
integer, allocatable :: upDirection(:)
! cell sizes in the ndims directions
real(r8), allocatable :: deltas(:)
! velocity
real(r8), allocatable :: v(:)
! domain lengths
real(r8), allocatable :: lengths(:)
! field as a flat array
real(r8), allocatable :: f(:)
! product of the dimensions, used to switch back and forth
! between the flat index and the multi-index representations
integer, allocatable :: dimProd(:)
contains
procedure :: new => upwind_new
procedure :: del => upwind_del
procedure :: advect => upwind_advect
procedure :: saveVTK => upwind_saveVTK
procedure :: getIndexSet => upwind_getIndexSet
procedure :: getFlatIndex => upwind_getFlatIndex
end type
contains
! Constructor
! @param velocity velocity field (constant)
! @param lengths domain lengths
! @param numCells number of cells in the x, y, ... directions
subroutine upwind_new(this, velocity, lengths, numCells)
class(upwind_type) :: this
real(r8), intent(in) :: velocity(:)
real(r8), intent(in) :: lengths(:)
integer, intent(in) :: numCells(:)
integer :: j
this % ndims = size(velocity)
allocate(this % upDirection(this % ndims))
allocate(this % deltas(this % ndims))
allocate(this % v(this % ndims))
allocate(this % lengths(this % ndims))
allocate(this % numCells(this % ndims))
allocate(this % dimProd(this % ndims))
this % v = velocity
this % lengths = lengths
this % numCells = numCells
! compute the total number of cells and other stuff
this % ntot = 1
do j = 1, this % ndims
this % upDirection(j) = -1
if (velocity(j) < 0.) then
this % upDirection(j) = 1
endif
this % deltas(j) = lengths(j) / numCells(j)
this % ntot = this % ntot * numCells(j)
enddo
this % dimProd(this % ndims) = 1
do j = this % ndims - 1, 1, -1
this % dimProd(j) = this % dimProd(j + 1) * this % numCells(j + 1)
enddo
allocate(this % f(this % ntot))
! initialize the field, zero everywhere except for the
! low corner cell where the field is one
this % f = 0
this % f(1) = 1
end subroutine
! Destructor
subroutine upwind_del(this)
class(upwind_type) :: this
deallocate(this % v)
deallocate(this % lengths)
deallocate(this % numCells)
deallocate(this % upDirection)
deallocate(this % deltas)
deallocate(this % dimProd)
deallocate(this % f)
end subroutine
! Advance by one time step
! @param deltaTime time step
subroutine upwind_advect(this, deltaTime)
class(upwind_type) :: this
real(r8), intent(in) :: deltaTime
real(r8), allocatable :: oldF(:)
integer :: i, j, oldIndex, upI
integer :: inds(this % ndims)
! allocate and copy the field
allocate(oldF(this % ntot))
oldF = this % f
! iterate over the cells
do concurrent (i = 1:this % ntot)
! compute the index set of this cell
call this % getIndexSet(i, inds)
do j = 1, this % ndims
! cache the cell index
oldIndex = inds(j)
! increment the cell index
inds(j) = inds(j) + this % upDirection(j)
! apply periodic BCs
inds(j) = modulo(inds(j) + this % numCells(j) - 1, this % numCells(j)) + 1
! compute the new flat index
upI = this % getFlatIndex(inds)
! update the field
this % f(i) = this % f(i) - &
& deltaTime*this % v(j)*this % upDirection(j)*(oldF(upI) - oldF(i))/this % deltas(j)
! reset the index
inds(j) = oldIndex
enddo
enddo
end subroutine
subroutine upwind_saveVTK(this, filename)
class(upwind_type) :: this
character(len=*), intent(in) :: filename
integer iunit, i
! f2008
!open(newunit = iunit, file = filename, status = 'unknown')
iunit = 10
! f95
open(unit = iunit, file = filename, status = 'unknown')
write(iunit, '(a)') '# vtk DataFile Version 2.0'
write(iunit, '(a)') 'upwind.f90'
write(iunit, '(a)') 'ASCII'
write(iunit, '(a)') 'DATASET RECTILINEAR_GRID'
! in VTK the first dimension varies fastest so need
! to invert the order of the dimensions
if (this % ndims > 2) then
write(iunit, '(a, i10, i10, i10)') 'DIMENSIONS ', &
& this % numCells(3) + 1, this % numCells(2) + 1, this % numCells(1) + 1
else
if (this % ndims > 1) then
write(iunit, '(a, i10, i10)') 'DIMENSIONS 1', &
& this % numCells(2) + 1, this % numCells(1) + 1
else
write(iunit, '(a, i10)') 'DIMENSIONS 1 1', this % numCells(1) + 1
endif
endif
write(iunit, '(a, i10, a)') 'X_COORDINATES ', this % numCells(1) + 1, ' double'
do i = 1, this % numCells(1) + 1
write(iunit, '(e20.7)') 0.0 + this % deltas(1) * (i - 1)
enddo
write(iunit, *)
if (this % ndims > 1) then
write(iunit, '(a, i10, a)') 'Y_COORDINATES ', this % numCells(2) + 1, ' double'
do i = 1, this % numCells(2) + 1
write(iunit, '(e20.7)') 0.0 + this % deltas(2) * (i - 1)
enddo
else
write(iunit, '(a)') 'Y_COORDINATES 1 double'
write(iunit, '(a)') '0.0'
endif
write(iunit, *)
if (this % ndims > 2) then
write(iunit, '(a, i10, a)') 'Z_COORDINATES ', this % numCells(3) + 1, ' double'
do i = 1, this % numCells(3) + 1
write(iunit, '(e20.7)') 0.0 + this % deltas(3) * (i - 1)
enddo
else
write(iunit, '(a)') 'Z_COORDINATES 1 double'
write(iunit, '(a)') '0.0'
endif
write(iunit, '(a, i20)') 'CELL_DATA ', this % ntot
write(iunit, '(a)') 'SCALARS f double 1'
write(iunit, '(a)') 'LOOKUP_TABLE default'
do i = 1, this % ntot
write(iunit, '(e20.7)') this % f(i)
enddo
close(iunit)
end subroutine
subroutine upwind_print(this)
class(upwind_type), intent(in) :: this
integer :: i
do i = 1, this % ntot
write(*, '(a, i10, a, e20.13)') 'i = ', i, ' f = ', this % f(i)
enddo
end subroutine
pure subroutine upwind_getIndexSet(this, flatIndex, res)
class(upwind_type), intent(in) :: this
integer, intent(in) :: flatIndex
integer, intent(out) :: res(:)
integer :: i
do i = 1, this % ndims
res(i) = mod((flatIndex - 1)/this % dimProd(i), this % numCells(i)) + 1
enddo
end subroutine
pure function upwind_getFlatIndex(this, inds) result(res)
class(upwind_type), intent(in) :: this
integer, intent(in) :: inds(:)
integer :: res
res = dot_product(this % dimProd, inds - 1) + 1
end function
end module
! Converts a string into an integer
subroutine str2int(str, i, stat)
implicit none
! Arguments
character(len=*), intent(in) :: str
integer, intent(out) :: i
integer, intent(out) :: stat
read(str,*,iostat=stat) i
end subroutine str2int
program main
use upwind_mod
implicit none
integer, parameter :: ndims = 3
integer :: argc, numCells(ndims), n, ier, numTimeSteps, i, j, &
& numThreads, maxNumThreads, threadId
logical :: doVtk
character(len=32) :: argv
real(r8) :: velocity(ndims)
real(r8) :: lengths(ndims)
real(r8) :: courant, dt, dx, val, chksum
type(upwind_type) :: up
numCells = -1
doVtk = .FALSE.
! default number of steps
numTimeSteps = 100
argc = 0
do
call get_command_argument(argc, argv)
if (len_trim(argv) == 0) exit
call str2int(argv, n, ier)
if (argc == 1) then
numCells = n
else if (argc == 2) then
numTimeSteps = n
else if (argc == 3 .and. argv == 'vtk') then
doVtk = .TRUE.
endif
argc = argc + 1
enddo
if (argc < 2) then
stop 'must specify number of cells in each direction.'
endif
write(*, '(a)', advance='no') 'number of cells: '
do i = 1, ndims
write(*, '(i10, a)', advance='no') numCells(i), ' '
enddo
write(*, *) ' ' ! new line
write(*, '(a,i10)') 'number of time steps: ', numTimeSteps
! velocity field
velocity = 1
! domain lengths
lengths = 1
! compute time step from Courant's condition
courant = 0.1_r8
dt = huge(1.0_r8)
do j = 1, ndims
dx = lengths(j) / real(numCells(j), r8)
val = courant * dx / velocity(j)
dt = min(val, dt)
enddo
! instantiate up
call up % new(velocity, lengths, numCells)
! call up % saveVTK('up0.vtk')
! advance
do i = 1, numTimeSteps
call up % advect(dt)
enddo
write(*,'(a, f15.9)') 'check sum: ', sum(up % f)
if (doVtk) then
call up % saveVTK('up.vtk')
endif
! clean up
call up % del()
end program
|
```python
import numpy as np
```
```python
x = np.ones(5)
```
```python
x
```
array([1., 1., 1., 1., 1.])
```python
print(x)
```
[1. 1. 1. 1. 1.]
```python
def f(x):
return 2+x
```
```python
f
```
<function __main__.f>
```python
print(f)
```
<function f at 0x7f8834126598>
```python
repr(f)
```
'<function f at 0x7f8834126598>'
```python
repr(x)
```
'array([1., 1., 1., 1., 1.])'
```python
str(x)
```
'[1. 1. 1. 1. 1.]'
```python
str(f)
```
'<function f at 0x7f8834126598>'
```python
print(x)
```
[1. 1. 1. 1. 1.]
```python
x.__str__()
```
'[1. 1. 1. 1. 1.]'
```python
x.__repr__()
```
'array([1., 1., 1., 1., 1.])'
```python
import sympy as sm
```
```python
x, y = sm.symbols('x, y')
```
```python
x
```
$\displaystyle x$
```python
x + y
```
$\displaystyle x + y$
```python
repr(x)
```
'x'
```python
repr(x + y)
```
'x + y'
```python
str(x + y)
```
'x + y'
```python
x = np.ones((2, 2))
```
```python
x
```
array([[1., 1.],
[1., 1.]])
```python
x[0, 0]
```
1.0
```python
a = [1, 2, 3]
type(a)
```
list
```python
a
```
[1, 2, 3]
```python
y = ['a', 1, 9.0]
```
```python
y
```
['a', 1, 9.0]
```python
import numpy as np
```
```python
np.array(y)
```
array(['a', '1', '9.0'], dtype='<U3')
```python
my_list = []
for i in range(20):
print(i)
my_list.append(i**2)
```
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
```python
my_list
```
[0,
1,
4,
9,
16,
25,
36,
49,
64,
81,
100,
121,
144,
169,
196,
225,
256,
289,
324,
361]
```python
my_array = np.array(my_list)
```
```python
my_array
```
array([ 0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144,
169, 196, 225, 256, 289, 324, 361])
```python
my_list * 2
```
[0,
1,
4,
9,
16,
25,
36,
49,
64,
81,
100,
121,
144,
169,
196,
225,
256,
289,
324,
361,
0,
1,
4,
9,
16,
25,
36,
49,
64,
81,
100,
121,
144,
169,
196,
225,
256,
289,
324,
361]
```python
my_array * 2
```
array([ 0, 2, 8, 18, 32, 50, 72, 98, 128, 162, 200, 242, 288,
338, 392, 450, 512, 578, 648, 722])
```python
np.random.random(5)
```
array([0.64327535, 0.64564919, 0.78193309, 0.03636536, 0.37608049])
```python
a = np.random.random(5)
b = np.random.random(5)
```
```python
a
```
array([0.83327995, 0.2396199 , 0.74695168, 0.06965017, 0.39579413])
```python
b
```
array([0.42479002, 0.95725464, 0.4073338 , 0.30301574, 0.3486592 ])
```python
np.sqrt(a**2 + b**2)
```
array([0.93530852, 0.98678982, 0.85079823, 0.31091749, 0.52746206])
```python
for thing in ['a', 'b', 'c']:
print(thing)
```
a
b
c
```python
for i, thing in enumerate(['a', 'b', 'c']):
print(i)
print(thing)
```
0
a
1
b
2
c
```python
from utils2 import period2freq, freq2period
```
```python
period2freq(5.0)
```
1.2566370614359172
```python
period2freq?
```
[0;31mSignature:[0m [0mperiod2freq[0m[0;34m([0m[0mperiod[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m
[0;31mDocstring:[0m Returns frequency in rad/s given period in seconds.
[0;31mFile:[0m ~/eng122-2020w-notebook-02/utils2.py
[0;31mType:[0m function
```python
```
|
\section{Introduction}\label{sec:intro}
Runtime Verification (RV), where monitors detect and respond to
property violations at runtime, can help address several of the
verification challenges facing ultra-critical
systems~\cite{pike-rv-11,rvRushby}. As RV matures it will be employed to
verify increasingly complex properties such as checking complex
stability properties of a control system or ensuring that a critical
system is fault-tolerant. As RV is applied to more complex systems, the
monitors themselves will become increasingly sophisticated and as prone to error
as the system being monitored. Applying formal verification
tools to the monitors to ensure they are correct can help safeguard
that the last line of defense is actually effective.
%RV may be a
%way to regain the necessary level of predictability required of
%ultra-critical systems in fully autonomous vehicles that
%utilize learning to adapt to unpredictable environments.
The work reported here is part of a larger
program aimed at creating a framework for \emph{high assurance RV}. In
order to be used in ultra-critical environments, high-assurance RV
must:
\begin{enumerate}
\item \label{req:a} Provide evidence for a safety case that the RV enforces safety guarantees.
\item \label{req:b} Support verification that the specification of the monitors
is correct.
\item \label{req:c} Ensure that monitor code generated implements the specification of the
monitor.
\end{enumerate}
\noindent
These guiding principles inform the continued development of the
Copilot language and framework that is intended to
be used in RV of ultra-critical systems~\cite{copilot,pike-isse-13}. Earlier work
focused on verifying that the monitor synthesis \emph{process} is correct
(Requirement~\ref{req:c} above)~\cite{pike-icfp-12}. Here, the focus
is on the second requirement for high-assurance RV - making sure the monitor
specification is correct. Requirement~\ref{req:a}, in the spirit of Rushby's
proposal~\cite{rvRushby} is future work.
\paragraph{Contributions}
In this paper we describe the theory and implementation of a $k$-induction based
model-checker~\cite{Sheeran00,EenS03} for Copilot called
\texttt{copilot-kind}. More precisely, \texttt{copilot-kind} is a model-checking
\emph{framework} for Copilot, with two existing backends: a lightweight
implementation of $k$-induction using Yices~\cite{Dutertre:cav2014} and a backend based on
\emph{Kind2}, implementing both $k$-induction and the IC3~algorithm~\cite{Somenzi-FMCAD11}.
After providing a brief introduction to Copilot in Section~\ref{sec:co-intro}
and to Satisfiability Modulo Theories (\textsc{smt})-based $k$-induction in Section~\ref{sec:background}, we introduce
\texttt{copilot-kind} in Section~\ref{sec:prover}. Illustrative examples of
\texttt{copilot-kind} are provided in Section~\ref{sec:example}, and
implementation details are given in Section~\ref{sec:structure}. The final two
sections discuss related work and concluding remarks, respectively.
Copilot and \texttt{copilot-kind} are open-source (BSD3) and in current use at
NASA.\footnote{\url{https://github.com/Copilot-Language}}
%This paper documents our efforts in incorporating formal verification
%of monitors into the Copilot RV framework. Initial efforts have
%focused on verifying invariants using advanced Satisfiability Modulo
%Theories (SMT) based bounded model
%checking~\cite{ClarkeBounded01}.
%The next section provides the reader with the necessary background on
%Copilot. Section~\ref{sec:background} gives a brief introduction to
%the $k-$induction proof technique we apply to verify
%monitors.
%\jonathan{To be exact, we don't limit ourselves to k-induction as Kind2 relies heavily on the IC3 algorithm. Maybe we should use the term "SMT-based model-checking" instead. That being said, as I'll try to explain, it's true IC3 didn't work significantly better than basic k-induction in all the test cases I ran.}
%Section~\ref{sec:prover} introduces the new Copilot prover
%interface. In Section~\ref{sec:example}, several examples illustrate
%the application of the tool to the Boyer-Moore majority vote
%algorithm. Section~\ref{sec:structure} provides an overview of
%implementation details. Section~\ref{sec:related} discusses related
%works. Finally, Section~\ref{sec:conclusion}
%discusses future work and concludes.
%Copilot is a stream
% language for generating embedded C-code monitors for verifying
% system properties of hard real time embedded systems.
% In this short paper, we describe
%initial results of an ongoing effort to integrate model checking
%technology into the Copilot RV framework in order to verify
%our monitors before they are deployed in unltra-critical systems.
%As interest grows in techniques such as
%adaptive flight control and fully autonomous vehicles that utilize
%learning to adapt to unpredictable environments, architectures have
%been proposed~\cite{XX} that use RV to switch from the more exotic
%uncertified software to a certified, but less capable, system when
%safety properties are violated.
|
A shapely single woman wearing an open leather jacket is pumping gas.
Another car stops at the pump next to hers.
A conservative man gets out and starts pumping gas.
He doesn’t like staring but, try as he may, he simply cannot stop staring at this woman.
Being a conservative man, he starts feeling guilty after a while.
To make amends with his guilty conscience, he calls out to her as she is about to leave.
|
-- Andreas, 2012-05-09
module DontPrune where
open import Common.Equality
open import Common.Product
data Bool : Set where
true false : Bool
test : (A : Set) →
let IF : Bool → A → A → A
IF = _
in (a b : A) →
(IF true a b ≡ a) × (IF false a b ≡ b)
test A a b = refl , refl
-- Expected result: unsolved metas
--
-- (unless someone implemented unification that produces definitions by case).
--
-- The test case should prevent overzealous pruning:
-- If the first equation pruned away the b, then the second
-- would have an unbound rhs.
|
function x = is_Nd_distributed(x)
x = iscell(x) && length(x) == 2 && ischar(x{1}) && strcmp(x{1},'mvn') && iscell(x{2});
|
(* Title: CCPO_Topology.thy
Author: Johannes Hölzl, TU Munich
*)
section \<open>CCPO topologies\<close>
theory CCPO_Topology
imports
"HOL-Analysis.Extended_Real_Limits"
"../Coinductive_Nat"
begin
lemma dropWhile_append:
"dropWhile P (xs @ ys) = (if \<forall>x\<in>set xs. P x then dropWhile P ys else dropWhile P xs @ ys)"
by auto
lemma dropWhile_False: "(\<And>x. x \<in> set xs \<Longrightarrow> P x) \<Longrightarrow> dropWhile P xs = []"
by simp
abbreviation (in order) "chain \<equiv> Complete_Partial_Order.chain (\<le>)"
lemma (in linorder) chain_linorder: "chain C"
by (simp add: chain_def linear)
lemma continuous_add_ereal:
assumes "0 \<le> t"
shows "continuous_on {-\<infinity>::ereal <..} (\<lambda>x. t + x)"
proof (subst continuous_on_open_vimage, (intro open_greaterThan allI impI)+)
fix B :: "ereal set" assume "open B"
show "open ((\<lambda>x. t + x) -` B \<inter> {- \<infinity><..})"
proof (cases t)
case (real t')
then have *: "(\<lambda>x. t + x) -` B \<inter> {- \<infinity><..} = (\<lambda>x. 1 * x + (-t)) ` (B \<inter> {-\<infinity> <..})"
apply (simp add: set_eq_iff image_iff Bex_def)
apply (intro allI iffI)
apply (rule_tac x= "x + ereal t'" in exI)
apply (case_tac x)
apply (auto simp: ac_simps)
done
show ?thesis
unfolding *
apply (rule ereal_open_affinity_pos)
using \<open>open B\<close>
apply (auto simp: real)
done
qed (insert \<open>0 \<le> t\<close>, auto)
qed
lemma tendsto_add_ereal:
"0 \<le> x \<Longrightarrow> 0 \<le> y \<Longrightarrow> (f \<longlongrightarrow> y) F \<Longrightarrow> ((\<lambda>z. x + f z :: ereal) \<longlongrightarrow> x + y) F"
apply (rule tendsto_compose[where f=f])
using continuous_add_ereal[where t=x]
unfolding continuous_on_def
apply (auto simp add: at_within_open[where S="{- \<infinity> <..}"])
done
lemma tendsto_LimI: "(f \<longlongrightarrow> y) F \<Longrightarrow> (f \<longlongrightarrow> Lim F f) F"
by (metis tendsto_Lim tendsto_bot)
subsection \<open>The filter \<open>at'\<close>\<close>
abbreviation (in ccpo) "compact_element \<equiv> ccpo.compact Sup (\<le>)"
lemma tendsto_unique_eventually:
fixes x x' :: "'a :: t2_space"
shows "F \<noteq> bot \<Longrightarrow> eventually (\<lambda>x. f x = g x) F \<Longrightarrow> (f \<longlongrightarrow> x) F \<Longrightarrow> (g \<longlongrightarrow> x') F \<Longrightarrow> x = x'"
by (metis tendsto_unique filterlim_cong)
lemma (in ccpo) ccpo_Sup_upper2: "chain C \<Longrightarrow> x \<in> C \<Longrightarrow> y \<le> x \<Longrightarrow> y \<le> Sup C"
by (blast intro: ccpo_Sup_upper order_trans)
lemma tendsto_open_vimage: "(\<And>B. open B \<Longrightarrow> open (f -` B)) \<Longrightarrow> f \<midarrow>l\<rightarrow> f l"
using continuous_on_open_vimage[of UNIV f] continuous_on_def[of UNIV f] by simp
lemma open_vimageI: "(\<And>x. f \<midarrow>x\<rightarrow> f x) \<Longrightarrow> open A \<Longrightarrow> open (f -` A)"
using continuous_on_open_vimage[of UNIV f] continuous_on_def[of UNIV f] by simp
lemma principal_bot: "principal x = bot \<longleftrightarrow> x = {}"
by (auto simp: filter_eq_iff eventually_principal)
definition "at' x = (if open {x} then principal {x} else at x)"
lemma at'_bot: "at' x \<noteq> bot"
by (simp add: at'_def at_eq_bot_iff principal_bot)
lemma tendsto_id_at'[simp, intro]: "((\<lambda>x. x) \<longlongrightarrow> x) (at' x)"
by (simp add: at'_def topological_tendstoI eventually_principal tendsto_ident_at)
lemma cont_at': "(f \<longlongrightarrow> f x) (at' x) \<longleftrightarrow> f \<midarrow>x\<rightarrow> f x"
using at_eq_bot_iff[of x] by (auto split: if_split_asm intro!: topological_tendstoI simp: eventually_principal at'_def)
subsection \<open>The type class \<open>ccpo_topology\<close>\<close>
text \<open>Temporarily relax type constraints for @{term "open"}.\<close>
setup \<open>Sign.add_const_constraint
(@{const_name "open"}, SOME @{typ "'a::open set \<Rightarrow> bool"})\<close>
class ccpo_topology = "open" + ccpo +
assumes open_ccpo: "open A \<longleftrightarrow> (\<forall>C. chain C \<longrightarrow> C \<noteq> {} \<longrightarrow> Sup C \<in> A \<longrightarrow> C \<inter> A \<noteq> {})"
begin
lemma open_ccpoD:
assumes "open A" "chain C" "C \<noteq> {}" "Sup C \<in> A"
shows "\<exists>c\<in>C. \<forall>c'\<in>C. c \<le> c' \<longrightarrow> c' \<in> A"
proof (rule ccontr)
assume "\<not> ?thesis"
then have *: "\<And>c. c \<in> C \<Longrightarrow> \<exists>c'\<in>C. c \<le> c' \<and> c' \<notin> A"
by auto
with \<open>chain C\<close> \<open>C \<noteq> {}\<close> have "chain (C - A)" "C - A \<noteq> {}"
by (auto intro: chain_Diff)
moreover have "Sup C = Sup (C - A)"
proof (safe intro!: antisym ccpo_Sup_least \<open>chain C\<close> chain_Diff)
fix c assume "c \<in> C"
with * obtain c' where "c' \<in> C" "c \<le> c'" "c' \<notin> A"
by auto
with \<open>c\<in>C\<close> show "c \<le> \<Squnion>(C - A)"
by (intro ccpo_Sup_upper2 \<open>chain (C - A)\<close>) auto
qed (auto intro: \<open>chain C\<close> ccpo_Sup_upper)
ultimately show False
using \<open>open A\<close> \<open>Sup C \<in> A\<close> by (auto simp: open_ccpo)
qed
lemma open_ccpo_Iic: "open {.. b}"
by (auto simp: open_ccpo) (metis Int_iff atMost_iff ccpo_Sup_upper empty_iff order_trans)
subclass topological_space
proof
show "open (UNIV::'a set)"
unfolding open_ccpo by auto
next
fix S T :: "'a set" assume "open S" "open T"
show "open (S \<inter> T)"
unfolding open_ccpo
proof (intro allI impI)
fix C assume C: "chain C" "C \<noteq> {}" and "\<Squnion>C \<in> S \<inter> T"
with open_ccpoD[OF \<open>open S\<close> C] open_ccpoD[OF \<open>open T\<close> C]
show "C \<inter> (S \<inter> T) \<noteq> {}"
unfolding chain_def by blast
qed
next
fix K :: "'a set set" assume *: "\<forall>D\<in>K. open D"
show "open (\<Union>K)"
unfolding open_ccpo
proof (intro allI impI)
fix C assume "chain C" "C \<noteq> {}" "\<Squnion>C \<in> (\<Union>K)"
with * obtain D where "D \<in> K" "\<Squnion>C \<in> D" "C \<inter> D \<noteq> {}"
by (auto simp: open_ccpo)
then show "C \<inter> (\<Union>K) \<noteq> {}"
by auto
qed
qed
lemma closed_ccpo: "closed A \<longleftrightarrow> (\<forall>C. chain C \<longrightarrow> C \<noteq> {} \<longrightarrow> C \<subseteq> A \<longrightarrow> Sup C \<in> A)"
unfolding closed_def open_ccpo by auto
lemma closed_admissible: "closed {x. P x} \<longleftrightarrow> ccpo.admissible Sup (\<le>) P"
unfolding closed_ccpo ccpo.admissible_def by auto
lemma open_singletonI_compact: "compact_element x \<Longrightarrow> open {x}"
using admissible_compact_neq[of Sup "(\<le>)" x]
by (simp add: closed_admissible[symmetric] open_closed Collect_neg_eq)
lemma closed_Ici: "closed {.. b}"
by (auto simp: closed_ccpo intro: ccpo_Sup_least)
lemma closed_Iic: "closed {b ..}"
by (auto simp: closed_ccpo intro: ccpo_Sup_upper2)
text \<open>
@{class ccpo_topology}s are also @{class t2_space}s.
This is necessary to have a unique continuous extension.
\<close>
subclass t2_space
proof
fix x y :: 'a assume "x \<noteq> y"
show "\<exists>U V. open U \<and> open V \<and> x \<in> U \<and> y \<in> V \<and> U \<inter> V = {}"
proof cases
{ fix x y assume "x \<noteq> y" "x \<le> y"
then have "open {..x} \<and> open (- {..x}) \<and> x \<in> {..x} \<and> y \<in> - {..x} \<and> {..x} \<inter> - {..x} = {}"
by (auto intro: open_ccpo_Iic closed_Ici) }
moreover assume "x \<le> y \<or> y \<le> x"
ultimately show ?thesis
using \<open>x \<noteq> y\<close> by (metis Int_commute)
next
assume "\<not> (x \<le> y \<or> y \<le> x)"
then have "open ({..x} \<inter> - {..y}) \<and> open ({..y} \<inter> - {..x}) \<and>
x \<in> {..x} \<inter> - {..y} \<and> y \<in> {..y} \<inter> - {..x} \<and> ({..x} \<inter> - {..y}) \<inter> ({..y} \<inter> - {..x}) = {}"
by (auto intro: open_ccpo_Iic closed_Ici)
then show ?thesis by auto
qed
qed
end
lemma tendsto_le_ccpo:
fixes f g :: "'a \<Rightarrow> 'b::ccpo_topology"
assumes F: "\<not> trivial_limit F"
assumes x: "(f \<longlongrightarrow> x) F" and y: "(g \<longlongrightarrow> y) F"
assumes ev: "eventually (\<lambda>x. g x \<le> f x) F"
shows "y \<le> x"
proof (rule ccontr)
assume "\<not> y \<le> x"
show False
proof cases
assume "x \<le> y"
with \<open>\<not> y \<le> x\<close>
have "open {..x}" "open (- {..x})" "x \<in> {..x}" "y \<in> - {..x}" "{..x} \<inter> - {..x} = {}"
by (auto intro: open_ccpo_Iic closed_Ici)
with topological_tendstoD[OF x, of "{..x}"] topological_tendstoD[OF y, of "- {..x}"]
have "eventually (\<lambda>z. f z \<le> x) F" "eventually (\<lambda>z. \<not> g z \<le> x) F"
by auto
with ev have "eventually (\<lambda>x. False) F" by eventually_elim (auto intro: order_trans)
with F show False by (auto simp: eventually_False)
next
assume "\<not> x \<le> y"
with \<open>\<not> y \<le> x\<close> have "open ({..x} \<inter> - {..y})" "open ({..y} \<inter> - {..x})"
"x \<in> {..x} \<inter> - {..y}" "y \<in> {..y} \<inter> - {..x}" "({..x} \<inter> - {..y}) \<inter> ({..y} \<inter> - {..x}) = {}"
by (auto intro: open_ccpo_Iic closed_Ici)
with topological_tendstoD[OF x, of "{..x} \<inter> - {..y}"]
topological_tendstoD[OF y, of "{..y} \<inter> - {..x}"]
have "eventually (\<lambda>z. f z \<le> x \<and> \<not> f z \<le> y) F" "eventually (\<lambda>z. g z \<le> y \<and> \<not> g z \<le> x) F"
by auto
with ev have "eventually (\<lambda>x. False) F" by eventually_elim (auto intro: order_trans)
with F show False by (auto simp: eventually_False)
qed
qed
lemma tendsto_ccpoI:
fixes f :: "'a::ccpo_topology \<Rightarrow> 'b::ccpo_topology"
shows "(\<And>C. chain C \<Longrightarrow> C \<noteq> {} \<Longrightarrow> chain (f ` C) \<and> f (Sup C) = Sup (f`C)) \<Longrightarrow> f \<midarrow>x\<rightarrow> f x"
by (intro tendsto_open_vimage) (auto simp: open_ccpo)
lemma tendsto_mcont:
assumes mcont: "mcont Sup (\<le>) Sup (\<le>) (f :: 'a :: ccpo_topology \<Rightarrow> 'b :: ccpo_topology)"
shows "f \<midarrow>l\<rightarrow> f l"
proof (intro tendsto_ccpoI conjI)
fix C :: "'a set" assume C: "chain C" "C \<noteq> {}"
show "chain (f`C)"
using mcont
by (intro chain_imageI[where le_a="(\<le>)"] C) (simp add: mcont_def monotone_def)
show "f (\<Squnion>C) = \<Squnion>(f ` C)"
using mcont C by (simp add: mcont_def cont_def)
qed
subsection \<open>Instances for @{class ccpo_topology}s and continuity theorems\<close>
instantiation set :: (type) ccpo_topology
begin
definition open_set :: "'a set set \<Rightarrow> bool" where
"open_set A \<longleftrightarrow> (\<forall>C. chain C \<longrightarrow> C \<noteq> {} \<longrightarrow> Sup C \<in> A \<longrightarrow> C \<inter> A \<noteq> {})"
instance
by intro_classes (simp add: open_set_def)
end
instantiation enat :: ccpo_topology
begin
instance
proof
fix A :: "enat set"
show "open A = (\<forall>C. chain C \<longrightarrow> C \<noteq> {} \<longrightarrow> \<Squnion>C \<in> A \<longrightarrow> C \<inter> A \<noteq> {})"
proof (intro iffI allI impI)
fix C x assume "open A" "chain C" "C \<noteq> {}" "\<Squnion>C \<in> A"
show "C \<inter> A \<noteq> {}"
proof cases
assume "\<Squnion>C = \<infinity>"
with \<open>\<Squnion>C \<in> A\<close> \<open>open A\<close> obtain n where "{enat n <..} \<subseteq> A"
unfolding open_enat_iff by auto
with \<open>\<Squnion>C = \<infinity>\<close> Sup_eq_top_iff[of C] show ?thesis
by (auto simp: top_enat_def)
next
assume "\<Squnion>C \<noteq> \<infinity>"
then obtain n where "C \<subseteq> {.. enat n}"
unfolding Sup_eq_top_iff top_enat_def[symmetric] by (auto simp: not_less top_enat_def)
moreover have "finite {.. enat n}"
by (auto intro: finite_enat_bounded)
ultimately have "finite C"
by (auto intro: finite_subset)
from in_chain_finite[OF \<open>chain C\<close> \<open>finite C\<close> \<open>C \<noteq> {}\<close>] \<open>\<Squnion>C \<in> A\<close> show ?thesis
by auto
qed
next
assume C: "\<forall>C. chain C \<longrightarrow> C \<noteq> {} \<longrightarrow> \<Squnion>C \<in> A \<longrightarrow> C \<inter> A \<noteq> {}"
show "open A"
unfolding open_enat_iff
proof safe
assume "\<infinity> \<in> A"
{ fix C :: "enat set" assume "infinite C"
then have "\<Squnion>C = \<infinity>"
by (auto simp: Sup_enat_def)
with \<open>infinite C\<close> C[THEN spec, of C] \<open>\<infinity> \<in> A\<close> have "C \<inter> A \<noteq> {}"
by auto }
note inf_C = this
show "\<exists>x. {enat x<..} \<subseteq> A"
proof (rule ccontr)
assume "\<not> (\<exists>x. {enat x<..} \<subseteq> A)"
with \<open>\<infinity> \<in> A\<close> have "\<And>x. \<exists>y>x. enat y \<notin> A"
by (simp add: subset_eq Bex_def) (metis enat.exhaust enat_ord_simps(2))
then have "infinite {n. enat n \<notin> A}"
unfolding infinite_nat_iff_unbounded by auto
then have "infinite (enat ` {n. enat n \<notin> A})"
by (auto dest!: finite_imageD)
from inf_C[OF this] show False
by auto
qed
qed
qed
qed
end
lemmas tendsto_inf2[THEN tendsto_compose, tendsto_intros] =
tendsto_mcont[OF mcont_inf2]
lemma isCont_inf2[THEN isCont_o2[rotated]]:
"isCont (\<lambda>x. x \<sqinter> y) (z :: _ :: {ccpo_topology, complete_distrib_lattice})"
by(simp add: isCont_def tendsto_inf2 tendsto_ident_at)
lemmas tendsto_sup1[THEN tendsto_compose, tendsto_intros] =
tendsto_mcont[OF mcont_sup1]
lemma isCont_If: "isCont f x \<Longrightarrow> isCont g x \<Longrightarrow> isCont (\<lambda>x. if Q then f x else g x) x"
by (cases Q) auto
lemma isCont_enat_case: "isCont (f (epred n)) x \<Longrightarrow> isCont g x \<Longrightarrow> isCont (\<lambda>x. co.case_enat (g x) (\<lambda>n. f n x) n) x"
by (cases n rule: enat_coexhaust) auto
end
|
program coins_change
use, intrinsic :: iso_fortran_env, only : error_unit
use greedy_coins_mod, only : nr_change_coins_greedy => nr_change_coins, &
change_coins_greedy => change_coins
use dyn_prog_coins_mod, only : nr_change_coins_dyn_prog => nr_change_coins, &
change_coins_dyn_prog => change_coins
implicit none
integer :: amount
integer, dimension(:), allocatable :: coins, change
call get_arguments(amount, coins)
print '(A, I0)', 'greedy: ', nr_change_coins_greedy(amount, coins)
change = change_coins_greedy(amount, coins)
call show_change(coins, change)
deallocate (change)
print '(A, I0)', 'dynamic programming:: ', nr_change_coins_dyn_prog(amount, coins)
change = change_coins_dyn_prog(amount, coins)
call show_change(coins, change)
deallocate (coins, change)
contains
subroutine show_change(coins, change)
implicit none
integer, dimension(:), intent(in) :: coins, change
print '(A, *(I4))', ' coins : ', coins
print '(A, *(I4))', ' change: ', change
end subroutine show_change
subroutine get_arguments(amount, coins)
implicit none
integer, intent(out) :: amount
integer, dimension(:), allocatable, intent(out) :: coins
character(len=1024) :: buffer, msg
integer :: status, i, nr_coins
if (command_argument_count() < 2) then
write (unit=error_unit, fmt='(A)') 'error: expecting amount and coins'
stop 1
end if
call get_command_argument(1, buffer)
read (buffer, fmt=*, iostat=status, iomsg=msg) amount
if (status /= 0) then
write (unit=error_unit, fmt='(2A)') 'error: ', trim(msg)
stop 2
end if
nr_coins = command_argument_count() - 1
allocate (coins(nr_coins), stat=status)
if (status /= 0) then
write (unit=error_unit, fmt='(A)') 'error: can not allocate coins'
stop 3
end if
do i = 1, nr_coins
call get_command_argument(1 + i, buffer)
read (buffer, fmt=*, iostat=status, iomsg=msg) coins(i)
if (status /= 0) then
write (unit=error_unit, fmt='(2A)') 'error: ', trim(msg)
stop 2
end if
end do
end subroutine
end program coins_change
|
#!/usr/bin/env python
# -*- coding: utf-8; -*-
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
import numpy as np
from ads.common.utils import _is_dask_dataframe, _is_dask_series
class ADSData(object):
def __init__(self, X=None, y=None, name="", dataset_type=None):
r"""
This class wraps the input dataframe to various models, evaluation, and explanation frameworks.
It's primary purpose is to hold any metadata relevant to these tasks. This can include it's:
- X - the independent variables as some dataframe-like structure,
- y - the dependent variable or target column as some array-like structure,
- name - a string to name the data for user convenience,
- dataset_type - the type of the X value.
As part of this initiative, ADSData knows how to turn itself into an onnxruntime compatible data
structure with the method .to_onnxrt(), which takes and onnx session as input.
Parameters
----------
X : Union[pandas.DataFrame, dask.DataFrame, numpy.ndarray, scipy.sparse.csr.csr_matrix]
If str, URI for the dataset. The dataset could be read from local or network file system, hdfs, s3 and gcs
Should be none if X_train, y_train, X_test, Y_test are provided
y: Union[str, pandas.DataFrame, dask.DataFrame, pandas.Series, dask.Series, numpy.ndarray]
If str, name of the target in X, otherwise series of labels corresponding to X
name: str, optional
Name to identify this data
dataset_type: ADSDataset optional
When this value is available, would be used to evaluate the ads task type
kwargs:
Additional keyword arguments that would be passed to the underlying Pandas read API.
"""
self.X = X
self.y = y
self.name = name
self.dataset_type = dataset_type
@staticmethod
def build(X=None, y=None, name="", dataset_type=None, **kwargs):
r"""
Returns an ADSData object built from the (source, target) or (X,y)
Parameters
----------
X : Union[pandas.DataFrame, dask.DataFrame, numpy.ndarray, scipy.sparse.csr.csr_matrix]
If str, URI for the dataset. The dataset could be read from local or network file system, hdfs, s3 and gcs
Should be none if X_train, y_train, X_test, Y_test are provided
y: Union[str, pandas.DataFrame, dask.DataFrame, pandas.Series, dask.Series, numpy.ndarray]
If str, name of the target in X, otherwise series of labels corresponding to X
name: str, optional
Name to identify this data
dataset_type: ADSDataset, optional
When this value is available, would be used to evaluate the ads task
type
kwargs:
Additional keyword arguments that would be passed to the underlying Pandas read API.
Returns
-------
ads_data: ads.common.data.ADSData
A built ADSData object
Examples
--------
>>> data = open_csv("my.csv")
>>> data_ads = ADSData(data, 'target').build(data, 'target')
"""
if X is None or y is None:
raise ValueError("Both X and y are required.")
if _is_dask_dataframe(X):
X = X.compute()
if _is_dask_series(y):
y = y.compute()
if dataset_type is None:
dataset_type = type(X)
if isinstance(y, str):
try:
return ADSData(
X.drop(y, axis=1), X[y], name=name, dataset_type=dataset_type
)
except AttributeError:
raise ValueError(
"If y is a string, then X must be a pandas or dask dataframe"
)
else:
return ADSData(X, y, name=name, dataset_type=dataset_type)
def __repr__(self):
return "%sShape of X:%s\nShape of y:%s" % (
self.name + "\n",
str(self.X.shape),
str(self.y.shape),
)
def to_onnxrt(
self, sess, idx_range=None, model=None, impute_values={}, **kwargs
): # pragma: no cover
r"""
Returns itself formatted as an input for the onnxruntime session inputs passed in.
Parameters
----------
sess: Session
The session object
idx_range: Range
The range of inputs to convert to onnx
model: SupportedModel
A model that supports being serialized for the onnx runtime.
kwargs: additional keyword arguments
- sess_inputs - Pass in the output from onnxruntime.InferenceSession("model.onnx").get_inputs()
- input_dtypes (list) - If sess_inputs cannot be passed in, pass in the numpy dtypes of each input
- input_shapes (list) - If sess_inputs cannot be passed in, pass in the shape of each input
- input_names (list) -If sess_inputs cannot be passed in, pass in the name of each input
Returns
-------
ort: Array
array of inputs formatted for the given session.
"""
if model._underlying_model in ["torch"]:
sess_inputs = sess.get_inputs()
in_shape, in_name, in_type = [], [], []
for i, ftr in enumerate(sess_inputs):
in_type.append(ftr.type)
in_shape.append(ftr.shape)
in_name.append(ftr.name)
ret = {}
for i, name in enumerate(in_name):
idx_range = (0, len(self.X)) if idx_range is None else idx_range
batch_size = idx_range[1] - idx_range[0]
ret[name] = (
self.X[:batch_size]
.reshape([batch_size] + list(self.X[:1].shape))
.detach()
.cpu()
.numpy()
.astype(np.float32)
)
return ret
elif model._underlying_model in ["automl"]:
X_trans = model._onnx_data_transformer(
X=self.X, impute_values=impute_values
)
inputs = {}
for idx, c in enumerate(X_trans.columns):
inputs[sess.get_inputs()[idx].name] = (
X_trans[c]
.values.reshape((X_trans.shape[0], 1))
.astype(X_trans.dtypes[idx])
)
return inputs
elif model._underlying_model in ["lightgbm", "xgboost", "sklearn"]:
idx_range = (0, len(self.X)) if idx_range is None else idx_range
inputs = []
for name, row in self.X[idx_range[0] : idx_range[1]].iterrows():
inputs.append(list(row))
return {"input": inputs}
|
//
// Copyright 2010 Scott McMurray.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_HASH_BLOCK_CYPHERS_DETAIL_SHACAL_FUNCTIONS_HPP
#define BOOST_HASH_BLOCK_CYPHERS_DETAIL_SHACAL_FUNCTIONS_HPP
#include <boost/hash/block_cyphers/detail/basic_functions.hpp>
namespace boost {
namespace hashes {
namespace block_cyphers {
namespace detail {
//
// Implemented directly from the standard as found at
// http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
//
// Specifically, subsection 4.1
template <unsigned word_bits_>
struct basic_shacal_functions : basic_functions<word_bits_> {
typedef typename basic_functions<word_bits_>::word_type word_type;
static word_type Ch(word_type x, word_type y, word_type z) {
return (x & y) ^ (~x & z);
}
static word_type Maj(word_type x, word_type y, word_type z) {
return (x & y) ^ (x & z) ^ (y & z);
}
};
struct shacal_functions : public basic_shacal_functions<32> {
static word_type Parity(word_type x, word_type y, word_type z) {
return x ^ y ^ z;
}
static word_type f(unsigned t, word_type x, word_type y, word_type z) {
if (t < 40) {
if (t < 20) return Ch(x, y, z);
} else {
if (t < 60) return Maj(x, y, z);
}
return Parity(x, y, z);
}
};
typedef shacal_functions shacal0_functions;
typedef shacal_functions shacal1_functions;
template <unsigned word_bits_>
struct shacal2_functions;
template <>
struct shacal2_functions<32> : public basic_shacal_functions<32> {
static word_type Sigma_0(word_type x) {
return ROTR< 2>(x) ^ ROTR<13>(x) ^ ROTR<22>(x);
}
static word_type Sigma_1(word_type x) {
return ROTR< 6>(x) ^ ROTR<11>(x) ^ ROTR<25>(x);
}
static word_type sigma_0(word_type x) {
return ROTR< 7>(x) ^ ROTR<18>(x) ^ SHR< 3>(x);
}
static word_type sigma_1(word_type x) {
return ROTR<17>(x) ^ ROTR<19>(x) ^ SHR<10>(x);
}
};
template <>
struct shacal2_functions<64> : public basic_shacal_functions<64> {
static word_type Sigma_0(word_type x) {
return ROTR<28>(x) ^ ROTR<34>(x) ^ ROTR<39>(x);
}
static word_type Sigma_1(word_type x) {
return ROTR<14>(x) ^ ROTR<18>(x) ^ ROTR<41>(x);
}
static word_type sigma_0(word_type x) {
return ROTR< 1>(x) ^ ROTR< 8>(x) ^ SHR< 7>(x);
}
static word_type sigma_1(word_type x) {
return ROTR<19>(x) ^ ROTR<61>(x) ^ SHR< 6>(x);
}
};
} // namespace detail
} // namespace block_cyphers
} // namespace hashes
} // namespace boost
#endif // BOOST_HASH_BLOCK_CYPHERS_DETAIL_SHACAL_FUNCTIONS_HPP
|
#ifndef _BASE_ITRT_H
#define _BASE_ITRT_H
#include <boost/iterator/iterator_facade.hpp>
#include "../pythonmodule.hpp"
namespace nctx { namespace python{
template <typename T, typename Iter, typename G>
class base_iterator : public b::iterator_facade<base_iterator<T,Iter,G>, T const, b::random_access_traversal_tag>
{
public:
typedef base_iterator<T, Iter, G> iterator;
T const* dereference() const {return cur;} //
T next(){
if(i == last){
PyErr_SetString(PyExc_StopIteration, "No more data.");
py::throw_error_already_set();
}
T prev(cur);
increment();
return prev;
}
void increment(){
i++;
//~ std::advance(i);
if(i != last)
update(cur);
}
void decrement(){
i--;
//~ std::advance(i,-1);
if(i != first)
update(cur);
}
void advance(int n){
i += n;
//~ std::advance(i,n);
if(i >= first && i < last)
update(cur);
}
//~ auto distance_to(iterator const &other) const {
//~ std::cout << "distance to " << other.cur << ": " << this->i - other.i << std::endl;
//~ return this->i - other.i;
//~ }
bool equal(iterator const &other) const{
return this->i == other.i;
}
T operator*() {return cur;} //
size_t get_size(){
return last - first;
}
bool operator==(const iterator& rhs) const {return i == rhs.i;}
//~ bool operator!=(const base_iterator& rhs) const {return !(*this == rhs);}
bool operator!=(const iterator& rhs) const {return i != rhs.last;}
iterator begin(){ return *this; }
iterator end(){ return *this; }
protected:
Iter first, i, last;
T cur;
base_iterator(){}
void init(){
i = Iter(first);
if(i != last)
update(cur);
}
void update(T& out){
out = *i;
}
};
}}// proc
#endif
|
!******************************************************************************
!******************************************************************************
MODULE recording_estimation
!/* external modules */
USE recording_warning
USE shared_interface
!/* setup */
IMPLICIT NONE
PRIVATE
PUBLIC :: record_estimation
!/* explicit interface */
INTERFACE record_estimation
MODULE PROCEDURE record_estimation_eval, record_estimation_final, record_scaling, record_estimation_stop, record_estimation_scalability, record_estimation_auto_npt, record_estimation_auto_rhobeg
END INTERFACE
CONTAINS
!******************************************************************************
!******************************************************************************
SUBROUTINE record_estimation_auto_rhobeg(algorithm, rhobeg, rhoend)
!/* external objects */
CHARACTER(*), INTENT(IN) :: algorithm
REAL(our_dble), INTENT(IN) :: rhobeg
REAL(our_dble), INTENT(IN) :: rhoend
!/* internal objects */
CHARACTER(25) :: rho_char(2)
INTEGER(our_int) :: u
!------------------------------------------------------------------------------
! Algorithm
!------------------------------------------------------------------------------
WRITE(rho_char(1), '(f25.15)') rhobeg
WRITE(rho_char(2), '(f25.15)') rhoend
OPEN(NEWUNIT=u, FILE='est.respy.log', POSITION='APPEND', ACTION='WRITE')
WRITE(u, *) 'Warning: Automatic adjustment of rhobeg/rhoend for ' // algorithm // ' required. Both are set to their recommended values of (rhobeg/rhoend): (' // TRIM(ADJUSTL(rho_char(1))) // ',' // TRIM(ADJUSTL(rho_char(2))) // ')'
WRITE(u, *)
CLOSE(u)
END SUBROUTINE
!******************************************************************************
!******************************************************************************
SUBROUTINE record_estimation_auto_npt(algorithm, npt)
!/* external objects */
INTEGER(our_int), INTENT(IN) :: npt
CHARACTER(*), INTENT(IN) :: algorithm
!/* internal objects */
CHARACTER(10) :: npt_char
INTEGER(our_int) :: u
!------------------------------------------------------------------------------
! Algorithm
!------------------------------------------------------------------------------
WRITE(npt_char, '(i10)') npt
OPEN(NEWUNIT=u, FILE='est.respy.log', POSITION='APPEND', ACTION='WRITE')
WRITE(u, *) 'Warning: Automatic adjustment of NPT for ' // algorithm // ' required. NPT set to its recommended value of ' // TRIM(ADJUSTL(npt_char)) // '.'
WRITE(u, *)
CLOSE(u)
END SUBROUTINE
!******************************************************************************
!******************************************************************************
SUBROUTINE record_estimation_scalability(which)
!/* external objects */
CHARACTER(*), INTENT(IN) :: which
!/* internal objects */
CHARACTER(55) :: today
CHARACTER(55) :: now
INTEGER(our_int) :: u
!------------------------------------------------------------------------------
! Algorithm
!------------------------------------------------------------------------------
115 FORMAT(3x,A5,6X,A10,5X,A8)
125 FORMAT(3x,A6,5X,A10,5X,A8)
CALL get_time(today, now)
IF (which == 'Start') THEN
OPEN(NEWUNIT=u, FILE='.scalability.respy.log', ACTION='WRITE')
WRITE(u, 115) which, today, now
ELSE
OPEN(NEWUNIT=u, FILE='.scalability.respy.log', POSITION='APPEND', ACTION='WRITE')
WRITE(u, 125) which, today, now
END IF
CLOSE(u)
END SUBROUTINE
!******************************************************************************
!******************************************************************************
SUBROUTINE record_estimation_stop()
!/* internal objects */
INTEGER(our_int) :: u
!------------------------------------------------------------------------------
! Algorithm
!------------------------------------------------------------------------------
OPEN(NEWUNIT=u, FILE='est.respy.info', POSITION='APPEND', ACTION='WRITE')
WRITE(u, *)
WRITE(u, *) 'TERMINATED'
CLOSE(u)
END SUBROUTINE
!******************************************************************************
!******************************************************************************
SUBROUTINE record_estimation_eval(x_optim_free_scaled, x_optim_all_unscaled, val_current, num_eval, num_paras, num_types, optim_paras, start)
! We record all things related to the optimization in est.respy.log. That is why we print the values actually relevant for the optimization, i.e. free and scaled. In est.respy.info we switch to the users perspective, all parameter are printed with their economic interpreation intact.
!/* external objects */
TYPE(OPTIMPARAS_DICT), INTENT(IN) :: optim_paras
INTEGER(our_int), INTENT(IN) :: num_paras
INTEGER(our_int), INTENT(IN) :: num_types
INTEGER(our_int), INTENT(IN) :: num_eval
REAL(our_dble), INTENT(IN) :: x_optim_free_scaled(num_free)
REAL(our_dble), INTENT(IN) :: x_optim_all_unscaled(num_paras)
REAL(our_dble), INTENT(IN) :: val_current
REAL(our_dble), INTENT(IN) :: start
!/* internal objects */
INTEGER(our_int), SAVE :: num_step = - one_int
! Automatic objects cannot have the SAVE attribute
REAL(our_dble), SAVE :: x_optim_container(100, 3) = -HUGE_FLOAT
REAL(our_dble), SAVE :: x_econ_container(100, 3) = -HUGE_FLOAT
REAL(our_dble), SAVE :: crit_vals(3)
REAL(our_dble) :: x_optim_shares((num_types - 1) * 2)
REAL(our_dble) :: shocks_cholesky(4, 4)
REAL(our_dble) :: shocks_cov(3, 4, 4)
REAL(our_dble) :: flattened_cov(3, 10)
REAL(our_dble) :: cond(3)
REAL(our_dble) :: finish
INTEGER(our_int) :: i
INTEGER(our_int) :: j
INTEGER(our_int) :: k
INTEGER(our_int) :: l
INTEGER(our_int) :: u
LOGICAL :: is_large(3) = .False.
LOGICAL :: is_start
LOGICAL :: is_step
CHARACTER(55) :: today_char
CHARACTER(55) :: now_char
CHARACTER(155) :: val_char
CHARACTER(50) :: tmp_char
!------------------------------------------------------------------------------
! Algorithm
!------------------------------------------------------------------------------
crit_vals(3) = val_current
! Determine events
is_start = (num_eval == 1)
IF (is_start) THEN
crit_vals(1) = val_current
crit_vals(2) = HUGE_FLOAT
END IF
is_step = (crit_vals(2) .GT. val_current)
! Update counters
IF (is_step) THEN
num_step = num_step + 1
crit_vals(2) = val_current
END IF
! Sometimes on the path of the optimizer, the value of the criterion
! function is just too large for pretty printing.
DO i = 1, 3
is_large(i) = (ABS(crit_vals(i)) > LARGE_FLOAT)
END DO
! Create the container for the *.log file. The subsetting is required as an automatic object cannot be saved.
If(is_start) x_optim_container(:num_free, 1) = x_optim_free_scaled
If(is_step) x_optim_container(:num_free, 2) = x_optim_free_scaled
x_optim_container(:num_free, 3) = x_optim_free_scaled
! Create the container for the *.info file.
DO i = 1, 3
CALL extract_cholesky(shocks_cholesky, x_optim_all_unscaled)
shocks_cov(i, :, :) = MATMUL(shocks_cholesky, TRANSPOSE(shocks_cholesky))
CALL spectral_condition_number(cond(i), shocks_cov(i, :, :))
k = 1
DO j = 1, 4
DO l = j, 4
flattened_cov(i, k) = shocks_cov(i, j, l)
IF (j == l) flattened_cov(i, k) = SQRT(flattened_cov(i, k))
k = k + 1
END DO
END DO
END DO
x_optim_shares = x_optim_all_unscaled(54:54 + (num_types - 1) * 2 - 1)
DO i = 1, 3
IF ((i == 1) .AND. (.NOT. is_start)) CYCLE
IF ((i == 2) .AND. (.NOT. is_step)) CYCLE
x_econ_container(:43, i) = x_optim_all_unscaled(:43)
x_econ_container(44:53, i) = flattened_cov(i, :)
x_econ_container(54:54 + (num_types - 1) * 2 - 1, i) = x_optim_shares
x_econ_container(54 + (num_types - 1) * 2:num_paras, i) = x_optim_all_unscaled(54 + (num_types - 1) * 2:num_paras)
END DO
CALL get_time(today_char, now_char)
finish = get_wtime()
100 FORMAT(1x,A4,i13,10x,A4,i10)
110 FORMAT(3x,A4,25X,A10)
120 FORMAT(3x,A4,27X,A8)
125 FORMAT(3x,A8,23X,i8)
130 FORMAT(3x,A9,5X,A25)
140 FORMAT(3x,A10,3(4x,A25))
150 FORMAT(3x,i10,3(4x,A25))
155 FORMAT(3x,A9,1x,3(4x,f25.15))
OPEN(NEWUNIT=u, FILE='est.respy.log', POSITION='APPEND', ACTION='WRITE')
WRITE(u, 100) 'EVAL', num_eval, 'STEP', num_step
WRITE(u, *)
WRITE(u, 110) 'Date', today_char
WRITE(u, 120) 'Time', now_char
WRITE(u, 125) 'Duration', INT(finish - start)
WRITE(u, *)
WRITE(u, 130) 'Criterion', char_floats(crit_vals(3:3))
WRITE(u, *)
WRITE(u, 140) 'Identifier', 'Start', 'Step', 'Current'
WRITE(u, *)
j = 1
DO i = 1, num_paras
IF(optim_paras%paras_fixed(i)) CYCLE
WRITE(u, 150) i - 1, char_floats(x_optim_container(j, :))
j = j + 1
END DO
WRITE(u, *)
WRITE(u, 155) 'Condition', LOG(cond)
WRITE(u, *)
CLOSE(u)
200 FORMAT(A25,3(4x,A25))
210 FORMAT(A25,A87)
220 FORMAT(A25,3(4x,A25))
230 FORMAT(i25,3(4x,A25))
250 FORMAT(A25)
270 FORMAT(1x,A15,13x,i25)
280 FORMAT(1x,A21,7x,i25)
val_char = ''
DO i = 1, 3
IF (is_large(i)) THEN
WRITE(tmp_char, '(4x,A25)') '---'
ELSE
WRITE(tmp_char, '(4x,f25.15)') crit_vals(i)
END IF
val_char = TRIM(val_char) // TRIM(tmp_char)
END DO
OPEN(NEWUNIT=u, FILE='est.respy.info', ACTION='WRITE')
WRITE(u, *)
WRITE(u, 250) 'Criterion Function'
WRITE(u, *)
WRITE(u, 200) '', 'Start', 'Step', 'Current'
WRITE(u, *)
WRITE(u, 210) '', val_char
WRITE(u, *)
WRITE(u, *)
WRITE(u, 250) 'Economic Parameters'
WRITE(u, *)
WRITE(u, 220) 'Identifier', 'Start', 'Step', 'Current'
WRITE(u, *)
DO i = 1, num_paras
WRITE(u, 230) (i - 1), char_floats(x_econ_container(i, :))
END DO
WRITE(u, *)
WRITE(u, 270) 'Number of Steps', num_step
WRITE(u, *)
WRITE(u, 280) 'Number of Evaluations', num_eval
CLOSE(u)
DO i = 1, 3
IF (is_large(i)) CALL record_warning(i)
END do
END SUBROUTINE
!******************************************************************************
!******************************************************************************
SUBROUTINE record_estimation_final(success, message)
!/* external objects */
LOGICAL, INTENT(IN) :: success
CHARACTER(*), INTENT(IN) :: message
!/* internal objects */
INTEGER(our_int) :: u
!------------------------------------------------------------------------------
! Algorithm
!------------------------------------------------------------------------------
OPEN(NEWUNIT=u, FILE='est.respy.log', POSITION='APPEND', ACTION='WRITE')
WRITE(u, *) 'ESTIMATION REPORT'
WRITE(u, *)
IF (success) THEN
WRITE(u, *) ' Success True'
ELSE
WRITE(u, *) ' Success False'
END IF
WRITE(u, *) ' Message ', TRIM(message)
CLOSE(u)
END SUBROUTINE
!******************************************************************************
!******************************************************************************
SUBROUTINE record_scaling(precond_matrix, x_free_start, optim_paras, is_setup)
!/* external objects */
TYPE(OPTIMPARAS_DICT), INTENT(IN) :: optim_paras
REAL(our_dble), INTENT(IN) :: precond_matrix(num_free, num_free)
REAL(our_dble), INTENT(IN) :: x_free_start(num_free)
LOGICAL, INTENT(IN) :: is_setup
!/* internal objects */
REAL(our_dble) :: x_free_scaled(num_free)
REAL(our_dble) :: floats(3)
INTEGER(our_int) :: i
INTEGER(our_int) :: j
INTEGER(our_int) :: k
INTEGER(our_int) :: u
CHARACTER(155) :: val_char
CHARACTER(50) :: tmp_char
LOGICAL :: no_bounds
!------------------------------------------------------------------------------
! Algorithm
!------------------------------------------------------------------------------
x_free_scaled = apply_scaling(x_free_start, precond_matrix, 'do')
120 FORMAT(3x,A10,5(4x,A25))
135 FORMAT(3x,i10,3(4x,A25),A58)
OPEN(NEWUNIT=u, FILE='est.respy.log', POSITION='APPEND', ACTION='WRITE')
! The initial setup serves to remind users that scaling is going on in the background. Otherwise, they remain puzzled as there is no output for quite some time if the gradient evaluations are time consuming.
IF (is_setup) THEN
WRITE(u, *) 'PRECONDITIONING'
WRITE(u, *)
WRITE(u, 120) 'Identifier', 'Original', 'Scale', 'Transformed Value', 'Transformed Lower', 'Transformed Upper'
WRITE(u, *)
ELSE
! Sometimes on the bounds are just too large for pretty printing
j = 1
DO i = 1, num_paras
IF(optim_paras%paras_fixed(i)) CYCLE
! We need to do some pre-processing for the transformed bounds.
val_char = ''
DO k = 1, 2
no_bounds = (ABS(x_optim_bounds_free_scaled(k, j)) > Large_FLOAT)
IF(no_bounds) THEN
WRITE(tmp_char, '(4x,A25)') '---'
ELSE
WRITE(tmp_char, '(4x,f25.15)') x_optim_bounds_free_scaled(k, j)
END IF
val_char = TRIM(val_char) // TRIM(tmp_char)
END DO
floats = (/ x_free_start(j), precond_matrix(j, j), x_free_scaled(j) /)
WRITE(u, 135) i - 1, char_floats(floats) , val_char
j = j + 1
END DO
WRITE(u, *)
END IF
CLOSE(u)
END SUBROUTINE
!******************************************************************************
!******************************************************************************
SUBROUTINE get_time(today_char, now_char)
!/* external objects */
CHARACTER(*), INTENT(OUT) :: today_char
CHARACTER(*), INTENT(OUT) :: now_char
!/* internal objects */
INTEGER(our_int) :: values(8)
!------------------------------------------------------------------------------
! Algorithm
!------------------------------------------------------------------------------
CALL DATE_AND_TIME(VALUES=values)
5503 FORMAT(i0.2,'/',i0.2,'/',i0.4)
5504 FORMAT(i0.2,':',i0.2,':',i0.2)
WRITE(today_char, 5503) values(3), values(2), values(1)
WRITE(now_char, 5504) values(5:7)
END SUBROUTINE
!******************************************************************************
!******************************************************************************
FUNCTION char_floats(floats)
!/* external objects */
REAL(our_dble), INTENT(IN) :: floats(:)
CHARACTER(50) :: char_floats(SIZE(floats))
!/* internal objects */
INTEGER(our_int) :: i
!------------------------------------------------------------------------------
! Algorithm
!------------------------------------------------------------------------------
910 FORMAT(f25.15)
900 FORMAT(A25)
DO i = 1, SIZE(floats)
IF (ABS(floats(i)) > LARGE_FLOAT) THEN
WRITE(char_floats(i), 900) '---'
ELSE
WRITE(char_floats(i), 910) floats(i)
END IF
END DO
END FUNCTION
!******************************************************************************
!******************************************************************************
END MODULE
|
function y = cvx_isaffine( x, full )
error( nargchk( 1, 2, nargin ) );
if nargin == 1,
y = true;
else
y = true( size( x ) );
end
% Copyright 2010 Michael C. Grant and Stephen P. Boyd.
% See the file COPYING.txt for full copyright information.
% The command 'cvx_where' will show where this file is located.
|
\chapter{``N'' Standard Extension for User-Level Interrupts, Version 1.1}
\label{chap:n}
\begin{commentary}
This is a placeholder for a more complete writeup of the N
extension, and to form a basis for discussion.
An ongoing topic of discussion is whether, for systems needing only M and
U privilege modes, the N extension should be supplanted by S-mode without
virtual memory (i.e., with {\tt satp} hardwired to zero).
This approach would have similar hardware cost and would simplify the
architecture.
\end{commentary}
This chapter presents a proposal for adding RISC-V user-level
interrupt and exception handling. When the N extension is present,
and the outer execution environment has delegated designated
interrupts and exceptions to user-level, then hardware can transfer
control directly to a user-level trap handler without invoking the
outer execution environment.
\begin{commentary}
User-level interrupts are primarily intended to support secure
embedded systems with only M-mode and U-mode present, but can also be
supported in systems running Unix-like operating systems to support
user-level trap handling.
When used in an Unix environment, the user-level interrupts would
likely not replace conventional signal handling, but could be used as
a building block for further extensions that generate user-level
events such as garbage collection barriers, integer overflow,
floating-point traps.
\end{commentary}
\section{Additional CSRs}
New user-visible CSRs are added to support the N extension.
Their encodings are listed in Table~\ref{ucsrnames} in
Chapter~\ref{chap:priv-csrs}.
\subsection{User Status Register ({\tt ustatus})}
The {\tt ustatus} register is a UXLEN-bit read/write register
formatted as shown in Figure~\ref{ustatusreg}. The {\tt ustatus}
register keeps track of and controls the hart's current operating
state.
\begin{figure*}[h!]
\begin{center}
\setlength{\tabcolsep}{4pt}
\begin{tabular}{KccFc}
\\
\instbitrange{UXLEN}{5} &
\instbit{4} &
\instbitrange{3}{1} &
\instbit{0} \\
\hline
\multicolumn{1}{|c|}{\wpri} &
\multicolumn{1}{c|}{UPIE} &
\multicolumn{1}{c|}{\wpri} &
\multicolumn{1}{c|}{UIE} \\
\hline
UXLEN-5 & 1 & 3 & 1 \\
\end{tabular}
\end{center}
\vspace{-0.1in}
\caption{User-mode status register ({\tt ustatus}).}
\label{ustatusreg}
\end{figure*}
The user interrupt-enable bit UIE disables user-level interrupts when
clear. The value of UIE is copied into UPIE when a user-level trap is
taken, and the value of UIE is set to zero to provide atomicity for
the user-level trap handler.
The UIE and UPIE bits are mirrored in the {\tt mstatus} and {\tt sstatus}
registers in the same bit positions.
\begin{commentary}
There is no UPP bit to hold the previous privilege mode as it can
only be user mode.
\end{commentary}
A new instruction, URET, is used to return from traps in U-mode.
URET copies UPIE into UIE, then sets UPIE, before copying {\tt uepc}
to the {\tt pc}.
\begin{commentary}
UPIE is set after the UPIE/UIE stack is popped to enable interrupts
and help catch coding errors.
\end{commentary}
\subsection{User Interrupt Registers ({\tt uip} and {\tt uie})}
The {\tt uip} register is a UXLEN-bit read/write register containing
information on pending interrupts, while {\tt uie} is the corresponding
UXLEN-bit read/write register containing interrupt enable bits.
\begin{figure*}[h!]
{\footnotesize
\begin{center}
\setlength{\tabcolsep}{4pt}
\begin{tabular}{KcFcFc}
\instbitrange{UXLEN-1}{9} &
\instbit{8} &
\instbitrange{7}{5} &
\instbit{4} &
\instbitrange{3}{1} &
\instbit{0} \\
\hline
\multicolumn{1}{|c|}{\wpri} &
\multicolumn{1}{c|}{UEIP} &
\multicolumn{1}{c|}{\wpri} &
\multicolumn{1}{c|}{UTIP} &
\multicolumn{1}{c|}{\wpri} &
\multicolumn{1}{c|}{USIP} \\
\hline
UXLEN-9 & 1 & 3 & 1 & 3 & 1 \\
\end{tabular}
\end{center}
}
\vspace{-0.1in}
\caption{User interrupt-pending register ({\tt uip}).}
\label{uipreg}
\end{figure*}
\begin{figure*}[h!]
{\footnotesize
\begin{center}
\setlength{\tabcolsep}{4pt}
\begin{tabular}{KcFcFc}
\instbitrange{UXLEN-1}{9} &
\instbit{8} &
\instbitrange{7}{5} &
\instbit{4} &
\instbitrange{3}{1} &
\instbit{0} \\
\hline
\multicolumn{1}{|c|}{\wpri} &
\multicolumn{1}{c|}{UEIE} &
\multicolumn{1}{c|}{\wpri} &
\multicolumn{1}{c|}{UTIE} &
\multicolumn{1}{c|}{\wpri} &
\multicolumn{1}{c|}{USIE} \\
\hline
UXLEN-9 & 1 & 3 & 1 & 3 & 1 \\
\end{tabular}
\end{center}
}
\vspace{-0.1in}
\caption{User interrupt-enable register ({\tt uie}).}
\label{uiereg}
\end{figure*}
Three types of interrupts are defined: software interrupts, timer interrupts,
and external interrupts. A user-level software interrupt is triggered
on the current hart by writing 1 to its user software interrupt-pending
(USIP) bit in the {\tt uip} register. A pending user-level software
interrupt can be cleared by writing 0 to the USIP bit in {\tt uip}.
User-level software interrupts are disabled when the USIE bit in the
{\tt uie} register is clear.
The ABI should provide a mechanism to send interprocessor interrupts to other
harts, which will ultimately cause the USIP bit to be set in the recipient
hart's {\tt uip} register.
All bits besides USIP in the {\tt uip} register are read-only.
A user-level timer interrupt is pending if the UTIP bit in the {\tt uip}
register is set. User-level timer interrupts are disabled when the UTIE
bit in the {\tt uie} register is clear. The ABI should provide a
mechanism to clear a pending timer interrupt.
A user-level external interrupt is pending if the UEIP bit in the
{\tt uip} register is set. User-level external interrupts are disabled
when the UEIE bit in the {\tt uie} register is clear. The ABI
should provide facilities to mask, unmask, and query the cause of external
interrupts.
The {\tt uip} and {\tt uie} registers are subsets of the {\tt mip} and {\tt
mie} registers.
Reading any field, or writing any writable field, of {\tt uip}/{\tt uie}
effects a read or write of the homonymous field of {\tt mip}/{\tt mie}.
If S-mode is implemented, the {\tt uip} and {\tt uie} registers are also
subsets of the {\tt sip} and {\tt sie} registers.
\subsection{Machine Trap Delegation Registers ({\tt medeleg} and {\tt mideleg})}
In systems with the N extension, the {\tt medeleg} and {\tt mideleg}
registers, described in Chapter~\ref{machine}, must be implemented.
In systems that implement S-mode, {\tt medeleg} and {\tt mideleg}
behave as described in Chapter~\ref{machine}.
In systems with only M and U privilege modes, setting a bit in {\tt medeleg}
or {\tt mideleg} delegates the corresponding trap in U-mode to the U-mode trap
handler.
\subsection{Supervisor Trap Delegation Registers ({\tt sedeleg} and {\tt sideleg})}
For systems with both S-mode and the N extension, new CSRs {\tt
sedeleg} and {\tt sideleg} are added.
These registers have the same layout as the machine trap delegation registers,
{\tt medeleg} and {\tt mideleg}.
{\tt sedeleg} and {\tt sideleg} allow S-mode to delegate traps to U-mode.
Only bits corresponding to traps that have been delegated to S-mode are
writable; the others are hardwired to zero.
Setting a bit in {\tt sedeleg} or {\tt sideleg} delegates the corresponding
trap in U-mode to the U-mode trap handler.
\subsection{Other CSRs}
The {\tt uscratch}, {\tt uepc}, {\tt ucause}, {\tt utvec}, and {\tt utval}
CSRs are defined analogously to the {\tt mscratch}, {\tt mepc}, {\tt mcause},
{\tt mtvec}, and {\tt mtval} CSRs.
\begin{commentary}
A more complete writeup is to follow.
\end{commentary}
\section{N Extension Instructions}
The URET instruction is added to perform the analogous function to
MRET and SRET.
\section{Reducing Context-Swap Overhead}
The user-level interrupt-handling registers add considerable state to
the user-level context, yet will usually rarely be active in normal
use. In particular, {\tt uepc}, {\tt ucause}, and {\tt utval} are
only valid during execution of a trap handler.
An NS field can be added to {\tt mstatus} and {\tt sstatus} following
the format of the FS and XS fields to reduce context-switch overhead
when the values are not live. Execution of URET will place the {\tt
uepc}, {\tt ucause}, and {\tt utval} back into initial state.
|
[STATEMENT]
lemma node_sos_cases [elim]:
"(NodeS i p R, a, NodeS i p' R') \<in> node_sos S \<Longrightarrow>
(\<And>m . \<lbrakk> a = R:*cast(m); R' = R; (p, broadcast m, p') \<in> S \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
(\<And>m D. \<lbrakk> a = (R \<inter> D):*cast(m); R' = R; (p, groupcast D m, p') \<in> S \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
(\<And>d m. \<lbrakk> a = {d}:*cast(m); R' = R; (p, unicast d m, p') \<in> S; d \<in> R \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
(\<And>d. \<lbrakk> a = \<tau>; R' = R; (p, \<not>unicast d, p') \<in> S; d \<notin> R \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
(\<And>d. \<lbrakk> a = i:deliver(d); R' = R; (p, deliver d, p') \<in> S \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
(\<And>m. \<lbrakk> a = {i}\<not>{}:arrive(m); R' = R; (p, receive m, p') \<in> S \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
( \<lbrakk> a = \<tau>; R' = R; (p, \<tau>, p') \<in> S \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
(\<And>m. \<lbrakk> a = {}\<not>{i}:arrive(m); R' = R; p = p' \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
(\<And>i i'. \<lbrakk> a = connect(i, i'); R' = R \<union> {i'}; p = p' \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
(\<And>i i'. \<lbrakk> a = connect(i', i); R' = R \<union> {i'}; p = p' \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
(\<And>i i'. \<lbrakk> a = disconnect(i, i'); R' = R - {i'}; p = p' \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
(\<And>i i'. \<lbrakk> a = disconnect(i', i); R' = R - {i'}; p = p' \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
(\<And>i i' i''. \<lbrakk> a = connect(i', i''); R' = R; p = p'; i \<noteq> i'; i \<noteq> i'' \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
(\<And>i i' i''. \<lbrakk> a = disconnect(i', i''); R' = R; p = p'; i \<noteq> i'; i \<noteq> i'' \<rbrakk> \<Longrightarrow> P) \<Longrightarrow>
P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(NodeS i p R, a, NodeS i p' R') \<in> node_sos S; \<And>m. \<lbrakk>a = R:*cast(m); R' = R; (p, broadcast m, p') \<in> S\<rbrakk> \<Longrightarrow> P; \<And>m D. \<lbrakk>a = (R \<inter> D):*cast(m); R' = R; (p, groupcast D m, p') \<in> S\<rbrakk> \<Longrightarrow> P; \<And>d m. \<lbrakk>a = {d}:*cast(m); R' = R; (p, unicast d m, p') \<in> S; d \<in> R\<rbrakk> \<Longrightarrow> P; \<And>d. \<lbrakk>a = \<tau>; R' = R; (p, \<not>unicast d, p') \<in> S; d \<notin> R\<rbrakk> \<Longrightarrow> P; \<And>d. \<lbrakk>a = i:deliver(d); R' = R; (p, deliver d, p') \<in> S\<rbrakk> \<Longrightarrow> P; \<And>m. \<lbrakk>a = {i}\<not>{}:arrive(m); R' = R; (p, receive m, p') \<in> S\<rbrakk> \<Longrightarrow> P; \<lbrakk>a = \<tau>; R' = R; (p, \<tau>, p') \<in> S\<rbrakk> \<Longrightarrow> P; \<And>m. \<lbrakk>a = {}\<not>{i}:arrive(m); R' = R; p = p'\<rbrakk> \<Longrightarrow> P; \<And>i i'. \<lbrakk>a = connect(i, i'); R' = R \<union> {i'}; p = p'\<rbrakk> \<Longrightarrow> P; \<And>i i'. \<lbrakk>a = connect(i', i); R' = R \<union> {i'}; p = p'\<rbrakk> \<Longrightarrow> P; \<And>i i'. \<lbrakk>a = disconnect(i, i'); R' = R - {i'}; p = p'\<rbrakk> \<Longrightarrow> P; \<And>i i'. \<lbrakk>a = disconnect(i', i); R' = R - {i'}; p = p'\<rbrakk> \<Longrightarrow> P; \<And>i i' i''. \<lbrakk>a = connect(i', i''); R' = R; p = p'; i \<noteq> i'; i \<noteq> i''\<rbrakk> \<Longrightarrow> P; \<And>i i' i''. \<lbrakk>a = disconnect(i', i''); R' = R; p = p'; i \<noteq> i'; i \<noteq> i''\<rbrakk> \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P
[PROOF STEP]
by (erule node_sos.cases) simp_all
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Propositional (intensional) equality
------------------------------------------------------------------------
module Relation.Binary.PropositionalEquality where
open import Function
open import Function.Equality using (Π; _⟶_; ≡-setoid)
open import Data.Product
open import Data.Unit.Core
open import Level
open import Relation.Binary
import Relation.Binary.Indexed as I
open import Relation.Binary.Consequences
open import Relation.Binary.HeterogeneousEquality.Core as H using (_≅_)
-- Some of the definitions can be found in the following modules:
open import Relation.Binary.Core public using (_≡_; refl; _≢_)
open import Relation.Binary.PropositionalEquality.Core public
------------------------------------------------------------------------
-- Some properties
subst₂ : ∀ {a b p} {A : Set a} {B : Set b} (P : A → B → Set p)
{x₁ x₂ y₁ y₂} → x₁ ≡ x₂ → y₁ ≡ y₂ → P x₁ y₁ → P x₂ y₂
subst₂ P refl refl p = p
cong : ∀ {a b} {A : Set a} {B : Set b}
(f : A → B) {x y} → x ≡ y → f x ≡ f y
cong f refl = refl
cong-app : ∀ {a b} {A : Set a} {B : A → Set b} {f g : (x : A) → B x} →
f ≡ g → (x : A) → f x ≡ g x
cong-app refl x = refl
cong₂ : ∀ {a b c} {A : Set a} {B : Set b} {C : Set c}
(f : A → B → C) {x y u v} → x ≡ y → u ≡ v → f x u ≡ f y v
cong₂ f refl refl = refl
proof-irrelevance : ∀ {a} {A : Set a} {x y : A} (p q : x ≡ y) → p ≡ q
proof-irrelevance refl refl = refl
setoid : ∀ {a} → Set a → Setoid _ _
setoid A = record
{ Carrier = A
; _≈_ = _≡_
; isEquivalence = isEquivalence
}
decSetoid : ∀ {a} {A : Set a} → Decidable (_≡_ {A = A}) → DecSetoid _ _
decSetoid dec = record
{ _≈_ = _≡_
; isDecEquivalence = record
{ isEquivalence = isEquivalence
; _≟_ = dec
}
}
isPreorder : ∀ {a} {A : Set a} → IsPreorder {A = A} _≡_ _≡_
isPreorder = record
{ isEquivalence = isEquivalence
; reflexive = id
; trans = trans
}
preorder : ∀ {a} → Set a → Preorder _ _ _
preorder A = record
{ Carrier = A
; _≈_ = _≡_
; _∼_ = _≡_
; isPreorder = isPreorder
}
------------------------------------------------------------------------
-- Pointwise equality
infix 4 _≗_
_→-setoid_ : ∀ {a b} (A : Set a) (B : Set b) → Setoid _ _
A →-setoid B = ≡-setoid A (Setoid.indexedSetoid (setoid B))
_≗_ : ∀ {a b} {A : Set a} {B : Set b} (f g : A → B) → Set _
_≗_ {A = A} {B} = Setoid._≈_ (A →-setoid B)
:→-to-Π : ∀ {a b₁ b₂} {A : Set a} {B : I.Setoid _ b₁ b₂} →
((x : A) → I.Setoid.Carrier B x) → Π (setoid A) B
:→-to-Π {B = B} f = record { _⟨$⟩_ = f; cong = cong′ }
where
open I.Setoid B using (_≈_)
cong′ : ∀ {x y} → x ≡ y → f x ≈ f y
cong′ refl = I.Setoid.refl B
→-to-⟶ : ∀ {a b₁ b₂} {A : Set a} {B : Setoid b₁ b₂} →
(A → Setoid.Carrier B) → setoid A ⟶ B
→-to-⟶ = :→-to-Π
------------------------------------------------------------------------
-- The old inspect idiom
-- The old inspect idiom has been deprecated, and may be removed in
-- the future. Use inspect on steroids instead.
module Deprecated-inspect where
-- The inspect idiom can be used when you want to pattern match on
-- the result r of some expression e, and you also need to
-- "remember" that r ≡ e.
-- The inspect idiom has a problem: sometimes you can only pattern
-- match on the p part of p with-≡ eq if you also pattern match on
-- the eq part, and then you no longer have access to the equality.
-- Inspect on steroids solves this problem.
data Inspect {a} {A : Set a} (x : A) : Set a where
_with-≡_ : (y : A) (eq : x ≡ y) → Inspect x
inspect : ∀ {a} {A : Set a} (x : A) → Inspect x
inspect x = x with-≡ refl
-- Example usage:
-- f x y with inspect (g x)
-- f x y | c z with-≡ eq = ...
------------------------------------------------------------------------
-- Inspect on steroids
-- Inspect on steroids can be used when you want to pattern match on
-- the result r of some expression e, and you also need to "remember"
-- that r ≡ e.
data Reveal_is_ {a} {A : Set a} (x : Hidden A) (y : A) : Set a where
[_] : (eq : reveal x ≡ y) → Reveal x is y
inspect : ∀ {a b} {A : Set a} {B : A → Set b}
(f : (x : A) → B x) (x : A) → Reveal (hide f x) is (f x)
inspect f x = [ refl ]
-- Example usage:
-- f x y with g x | inspect g x
-- f x y | c z | [ eq ] = ...
------------------------------------------------------------------------
-- Convenient syntax for equational reasoning
import Relation.Binary.EqReasoning as EqR
-- Relation.Binary.EqReasoning is more convenient to use with _≡_ if
-- the combinators take the type argument (a) as a hidden argument,
-- instead of being locked to a fixed type at module instantiation
-- time.
module ≡-Reasoning where
module _ {a} {A : Set a} where
open EqR (setoid A) public
hiding (_≡⟨_⟩_) renaming (_≈⟨_⟩_ to _≡⟨_⟩_)
infixr 2 _≅⟨_⟩_
_≅⟨_⟩_ : ∀ {a} {A : Set a} (x : A) {y z : A} →
x ≅ y → y IsRelatedTo z → x IsRelatedTo z
_ ≅⟨ x≅y ⟩ y≡z = _ ≡⟨ H.≅-to-≡ x≅y ⟩ y≡z
------------------------------------------------------------------------
-- Functional extensionality
-- If _≡_ were extensional, then the following statement could be
-- proved.
Extensionality : (a b : Level) → Set _
Extensionality a b =
{A : Set a} {B : A → Set b} {f g : (x : A) → B x} →
(∀ x → f x ≡ g x) → f ≡ g
-- If extensionality holds for a given universe level, then it also
-- holds for lower ones.
extensionality-for-lower-levels :
∀ {a₁ b₁} a₂ b₂ →
Extensionality (a₁ ⊔ a₂) (b₁ ⊔ b₂) → Extensionality a₁ b₁
extensionality-for-lower-levels a₂ b₂ ext f≡g =
cong (λ h → lower ∘ h ∘ lift) $
ext (cong (lift {ℓ = b₂}) ∘ f≡g ∘ lower {ℓ = a₂})
-- Functional extensionality implies a form of extensionality for
-- Π-types.
∀-extensionality :
∀ {a b} →
Extensionality a (suc b) →
{A : Set a} (B₁ B₂ : A → Set b) →
(∀ x → B₁ x ≡ B₂ x) → (∀ x → B₁ x) ≡ (∀ x → B₂ x)
∀-extensionality ext B₁ B₂ B₁≡B₂ with ext B₁≡B₂
∀-extensionality ext B .B B₁≡B₂ | refl = refl
|
(*
Author: René Thiemann
Akihisa Yamada
License: BSD
*)
section \<open>Complex Roots of Real Valued Polynomials\<close>
text \<open>We provide conversion functions between polynomials over the real and the complex numbers,
and prove that the complex roots of real-valued polynomial always come in conjugate pairs.
We further show that also the order of the complex conjugate roots is identical.
As a consequence, we derive that every real-valued polynomial can be factored into real factors of
degree at most 2, and we prove that every polynomial over the reals with odd degree has a real
root.\<close>
theory Complex_Roots_Real_Poly
imports
"HOL-Computational_Algebra.Fundamental_Theorem_Algebra"
Polynomial_Factorization.Order_Polynomial
Polynomial_Factorization.Explicit_Roots
Polynomial_Interpolation.Ring_Hom_Poly
begin
interpretation of_real_poly_hom: map_poly_idom_hom complex_of_real..
lemma real_poly_real_coeff: assumes "set (coeffs p) \<subseteq> \<real>"
shows "coeff p x \<in> \<real>"
proof -
have "coeff p x \<in> range (coeff p)" by auto
from this[unfolded range_coeff] assms show ?thesis by auto
qed
lemma complex_conjugate_root:
assumes real: "set (coeffs p) \<subseteq> \<real>" and rt: "poly p c = 0"
shows "poly p (cnj c) = 0"
proof -
let ?c = "cnj c"
{
fix x
have "coeff p x \<in> \<real>"
by (rule real_poly_real_coeff[OF real])
hence "cnj (coeff p x) = coeff p x" by (cases "coeff p x", auto)
} note cnj_coeff = this
have "poly p ?c = poly (\<Sum>x\<le>degree p. monom (coeff p x) x) ?c"
unfolding poly_as_sum_of_monoms ..
also have "\<dots> = (\<Sum>x\<le>degree p . coeff p x * cnj (c ^ x))"
unfolding poly_sum poly_monom complex_cnj_power ..
also have "\<dots> = (\<Sum>x\<le>degree p . cnj (coeff p x * c ^ x))"
unfolding complex_cnj_mult cnj_coeff ..
also have "\<dots> = cnj (\<Sum>x\<le>degree p . coeff p x * c ^ x)"
unfolding cnj_sum ..
also have "(\<Sum>x\<le>degree p . coeff p x * c ^ x) =
poly (\<Sum>x\<le>degree p. monom (coeff p x) x) c"
unfolding poly_sum poly_monom ..
also have "\<dots> = 0" unfolding poly_as_sum_of_monoms rt ..
also have "cnj 0 = 0" by simp
finally show ?thesis .
qed
context
fixes p :: "complex poly"
assumes coeffs: "set (coeffs p) \<subseteq> \<real>"
begin
lemma map_poly_Re_poly: fixes x :: real
shows "poly (map_poly Re p) x = poly p (of_real x)"
proof -
have id: "map_poly (of_real o Re) p = p"
by (rule map_poly_idI, insert coeffs, auto)
show ?thesis unfolding arg_cong[OF id, of poly, symmetric]
by (subst map_poly_map_poly[symmetric], auto)
qed
lemma map_poly_Re_coeffs:
"coeffs (map_poly Re p) = map Re (coeffs p)"
proof (rule coeffs_map_poly)
have "lead_coeff p \<in> range (coeff p)" by auto
hence x: "lead_coeff p \<in> \<real>" using coeffs by (auto simp: range_coeff)
show "(Re (lead_coeff p) = 0) = (p = 0)"
using of_real_Re[OF x] by auto
qed
lemma map_poly_Re_0: "map_poly Re p = 0 \<Longrightarrow> p = 0"
using map_poly_Re_coeffs by auto
end
lemma real_poly_add:
assumes "set (coeffs p) \<subseteq> \<real>" "set (coeffs q) \<subseteq> \<real>"
shows "set (coeffs (p + q)) \<subseteq> \<real>"
proof -
define pp where "pp = coeffs p"
define qq where "qq = coeffs q"
show ?thesis using assms
unfolding coeffs_plus_eq_plus_coeffs pp_def[symmetric] qq_def[symmetric]
by (induct pp qq rule: plus_coeffs.induct, auto simp: cCons_def)
qed
lemma real_poly_sum:
assumes "\<And> x. x \<in> S \<Longrightarrow> set (coeffs (f x)) \<subseteq> \<real>"
shows "set (coeffs (sum f S)) \<subseteq> \<real>"
using assms
proof (induct S rule: infinite_finite_induct)
case (insert x S)
hence id: "sum f (insert x S) = f x + sum f S" by auto
show ?case unfolding id
by (rule real_poly_add[OF _ insert(3)], insert insert, auto)
qed auto
lemma real_poly_smult: fixes p :: "'a :: {idom,real_algebra_1} poly"
assumes "c \<in> \<real>" "set (coeffs p) \<subseteq> \<real>"
shows "set (coeffs (smult c p)) \<subseteq> \<real>"
using assms by (auto simp: coeffs_smult)
lemma real_poly_pCons:
assumes "c \<in> \<real>" "set (coeffs p) \<subseteq> \<real>"
shows "set (coeffs (pCons c p)) \<subseteq> \<real>"
using assms by (auto simp: cCons_def)
lemma real_poly_mult: fixes p :: "'a :: {idom,real_algebra_1} poly"
assumes p: "set (coeffs p) \<subseteq> \<real>" and q: "set (coeffs q) \<subseteq> \<real>"
shows "set (coeffs (p * q)) \<subseteq> \<real>" using p
proof (induct p)
case (pCons a p)
show ?case unfolding mult_pCons_left
by (intro real_poly_add real_poly_smult real_poly_pCons pCons(2) q,
insert pCons(1,3), auto simp: cCons_def if_splits)
qed simp
lemma real_poly_power: fixes p :: "'a :: {idom,real_algebra_1} poly"
assumes p: "set (coeffs p) \<subseteq> \<real>"
shows "set (coeffs (p ^ n)) \<subseteq> \<real>"
proof (induct n)
case (Suc n)
from real_poly_mult[OF p this]
show ?case by simp
qed simp
lemma real_poly_prod: fixes f :: "'a \<Rightarrow> 'b :: {idom,real_algebra_1} poly"
assumes "\<And> x. x \<in> S \<Longrightarrow> set (coeffs (f x)) \<subseteq> \<real>"
shows "set (coeffs (prod f S)) \<subseteq> \<real>"
using assms
proof (induct S rule: infinite_finite_induct)
case (insert x S)
hence id: "prod f (insert x S) = f x * prod f S" by auto
show ?case unfolding id
by (rule real_poly_mult[OF _ insert(3)], insert insert, auto)
qed auto
lemma real_poly_uminus:
assumes "set (coeffs p) \<subseteq> \<real>"
shows "set (coeffs (-p)) \<subseteq> \<real>"
using assms unfolding coeffs_uminus by auto
lemma real_poly_minus:
assumes "set (coeffs p) \<subseteq> \<real>" "set (coeffs q) \<subseteq> \<real>"
shows "set (coeffs (p - q)) \<subseteq> \<real>"
using assms unfolding diff_conv_add_uminus
by (intro real_poly_uminus real_poly_add, auto)
lemma fixes p :: "'a :: real_field poly"
assumes p: "set (coeffs p) \<subseteq> \<real>" and *: "set (coeffs q) \<subseteq> \<real>"
shows real_poly_div: "set (coeffs (q div p)) \<subseteq> \<real>"
and real_poly_mod: "set (coeffs (q mod p)) \<subseteq> \<real>"
proof (atomize(full), insert *, induct q)
case 0
thus ?case by auto
next
case (pCons a q)
from pCons(1,3) have a: "a \<in> \<real>" and q: "set (coeffs q) \<subseteq> \<real>" by auto
note res = pCons
show ?case
proof (cases "p = 0")
case True
with res pCons(3) show ?thesis by auto
next
case False
from pCons have IH: "set (coeffs (q div p)) \<subseteq> \<real>" "set (coeffs (q mod p)) \<subseteq> \<real>" by auto
define c where "c = coeff (pCons a (q mod p)) (degree p) / coeff p (degree p)"
{
have "coeff (pCons a (q mod p)) (degree p) \<in> \<real>"
by (rule real_poly_real_coeff, insert IH a, intro real_poly_pCons)
moreover have "coeff p (degree p) \<in> \<real>"
by (rule real_poly_real_coeff[OF p])
ultimately have "c \<in> \<real>" unfolding c_def by simp
} note c = this
from False
have r: "pCons a q div p = pCons c (q div p)" and s: "pCons a q mod p = pCons a (q mod p) - smult c p"
unfolding c_def div_pCons_eq mod_pCons_eq by simp_all
show ?thesis unfolding r s using a p c IH by (intro conjI real_poly_pCons real_poly_minus real_poly_smult)
qed
qed
lemma real_poly_factor: fixes p :: "'a :: real_field poly"
assumes "set (coeffs (p * q)) \<subseteq> \<real>"
"set (coeffs p) \<subseteq> \<real>"
"p \<noteq> 0"
shows "set (coeffs q) \<subseteq> \<real>"
proof -
have "q = p * q div p" using \<open>p \<noteq> 0\<close> by simp
hence id: "coeffs q = coeffs (p * q div p)" by simp
show ?thesis unfolding id
by (rule real_poly_div, insert assms, auto)
qed
lemma complex_conjugate_order: assumes real: "set (coeffs p) \<subseteq> \<real>"
"p \<noteq> 0"
shows "order (cnj c) p = order c p"
proof -
define n where "n = degree p"
have "degree p \<le> n" unfolding n_def by auto
thus ?thesis using assms
proof (induct n arbitrary: p)
case (0 p)
{
fix x
have "order x p \<le> degree p"
by (rule order_degree[OF 0(3)])
hence "order x p = 0" using 0 by auto
}
thus ?case by simp
next
case (Suc m p)
note order = order[OF \<open>p \<noteq> 0\<close>]
let ?c = "cnj c"
show ?case
proof (cases "poly p c = 0")
case True note rt1 = this
from complex_conjugate_root[OF Suc(3) True]
have rt2: "poly p ?c = 0" .
show ?thesis
proof (cases "c \<in> \<real>")
case True
hence "?c = c" by (cases c, auto)
thus ?thesis by auto
next
case False
hence neq: "?c \<noteq> c" by (simp add: Reals_cnj_iff)
let ?fac1 = "[: -c, 1 :]"
let ?fac2 = "[: -?c, 1 :]"
let ?fac = "?fac1 * ?fac2"
from rt1 have "?fac1 dvd p" unfolding poly_eq_0_iff_dvd .
from this[unfolded dvd_def] obtain q where p: "p = ?fac1 * q" by auto
from rt2[unfolded p poly_mult] neq have "poly q ?c = 0" by auto
hence "?fac2 dvd q" unfolding poly_eq_0_iff_dvd .
from this[unfolded dvd_def] obtain r where q: "q = ?fac2 * r" by auto
have p: "p = ?fac * r" unfolding p q by algebra
from \<open>p \<noteq> 0\<close> have nz: "?fac1 \<noteq> 0" "?fac2 \<noteq> 0" "?fac \<noteq> 0" "r \<noteq> 0" unfolding p by auto
have id: "?fac = [: ?c * c, - (?c + c), 1 :]" by simp
have cfac: "coeffs ?fac = [ ?c * c, - (?c + c), 1 ]" unfolding id by simp
have cfac: "set (coeffs ?fac) \<subseteq> \<real>" unfolding cfac by (cases c, auto simp: Reals_cnj_iff)
have "degree p = degree ?fac + degree r" unfolding p
by (rule degree_mult_eq, insert nz, auto)
also have "degree ?fac = degree ?fac1 + degree ?fac2"
by (rule degree_mult_eq, insert nz, auto)
finally have "degree p = 2 + degree r" by simp
with Suc have deg: "degree r \<le> m" by auto
from real_poly_factor[OF Suc(3)[unfolded p] cfac] nz have "set (coeffs r) \<subseteq> \<real>" by auto
from Suc(1)[OF deg this \<open>r \<noteq> 0\<close>] have IH: "order ?c r = order c r" .
{
fix cc
have "order cc p = order cc ?fac + order cc r" using \<open>p \<noteq> 0\<close> unfolding p
by (rule order_mult)
also have "order cc ?fac = order cc ?fac1 + order cc ?fac2"
by (rule order_mult, rule nz)
also have "order cc ?fac1 = (if cc = c then 1 else 0)"
unfolding order_linear' by simp
also have "order cc ?fac2 = (if cc = ?c then 1 else 0)"
unfolding order_linear' by simp
finally have "order cc p =
(if cc = c then 1 else 0) + (if cc = cnj c then 1 else 0) + order cc r" .
} note order = this
show ?thesis unfolding order IH by auto
qed
next
case False note rt1 = this
{
assume "poly p ?c = 0"
from complex_conjugate_root[OF Suc(3) this] rt1
have False by auto
}
hence rt2: "poly p ?c \<noteq> 0" by auto
from rt1 rt2 show ?thesis
unfolding order_root by simp
qed
qed
qed
lemma map_poly_of_real_Re: assumes "set (coeffs p) \<subseteq> \<real>"
shows "map_poly of_real (map_poly Re p) = p"
by (subst map_poly_map_poly, force+, rule map_poly_idI, insert assms, auto)
lemma map_poly_Re_of_real: "map_poly Re (map_poly of_real p) = p"
by (subst map_poly_map_poly, force+, rule map_poly_idI, auto)
lemma map_poly_Re_mult: assumes p: "set (coeffs p) \<subseteq> \<real>"
and q: "set (coeffs q) \<subseteq> \<real>" shows "map_poly Re (p * q) = map_poly Re p * map_poly Re q"
proof -
let ?r = "map_poly Re"
let ?c = "map_poly complex_of_real"
have "?r (p * q) = ?r (?c (?r p) * ?c (?r q))"
unfolding map_poly_of_real_Re[OF p] map_poly_of_real_Re[OF q] by simp
also have "?c (?r p) * ?c (?r q) = ?c (?r p * ?r q)" by (simp add: hom_distribs)
also have "?r \<dots> = ?r p * ?r q" unfolding map_poly_Re_of_real ..
finally show ?thesis .
qed
lemma map_poly_Re_power: assumes p: "set (coeffs p) \<subseteq> \<real>"
shows "map_poly Re (p^n) = (map_poly Re p)^n"
proof (induct n)
case (Suc n)
let ?r = "map_poly Re"
have "?r (p^Suc n) = ?r (p * p^n)" by simp
also have "\<dots> = ?r p * ?r (p^n)"
by (rule map_poly_Re_mult[OF p real_poly_power[OF p]])
also have "?r (p^n) = (?r p)^n" by (rule Suc)
finally show ?case by simp
qed simp
lemma real_degree_2_factorization_exists_complex: fixes p :: "complex poly"
assumes pR: "set (coeffs p) \<subseteq> \<real>"
shows "\<exists> qs. p = prod_list qs \<and> (\<forall> q \<in> set qs. set (coeffs q) \<subseteq> \<real> \<and> degree q \<le> 2)"
proof -
obtain n where "degree p = n" by auto
thus ?thesis using pR
proof (induct n arbitrary: p rule: less_induct)
case (less n p)
hence pR: "set (coeffs p) \<subseteq> \<real>" by auto
show ?case
proof (cases "n \<le> 2")
case True
thus ?thesis using pR
by (intro exI[of _ "[p]"], insert less(2), auto)
next
case False
hence degp: "degree p \<ge> 2" using less(2) by auto
hence "\<not> constant (poly p)" by (simp add: constant_degree)
from fundamental_theorem_of_algebra[OF this] obtain x where x: "poly p x = 0" by auto
from x have dvd: "[: -x, 1 :] dvd p" using poly_eq_0_iff_dvd by blast
have "\<exists> f. f dvd p \<and> set (coeffs f) \<subseteq> \<real> \<and> 1 \<le> degree f \<and> degree f \<le> 2"
proof (cases "x \<in> \<real>")
case True
with dvd show ?thesis
by (intro exI[of _ "[: -x, 1:]"], auto)
next
case False
let ?x = "cnj x"
let ?a = "?x * x"
let ?b = "- ?x - x"
from complex_conjugate_root[OF pR x]
have xx: "poly p ?x = 0" by auto
from False have diff: "x \<noteq> ?x" by (simp add: Reals_cnj_iff)
from dvd obtain r where p: "p = [: -x, 1 :] * r" unfolding dvd_def by auto
from xx[unfolded this] diff have "poly r ?x = 0" by simp
hence "[: -?x, 1 :] dvd r" using poly_eq_0_iff_dvd by blast
then obtain s where r: "r = [: -?x, 1 :] * s" unfolding dvd_def by auto
have "p = ([: -x, 1:] * [: -?x, 1 :]) * s" unfolding p r by algebra
also have "[: -x, 1:] * [: -?x, 1 :] = [: ?a, ?b, 1 :]" by simp
finally have "[: ?a, ?b, 1 :] dvd p" unfolding dvd_def by auto
moreover have "?a \<in> \<real>" by (simp add: Reals_cnj_iff)
moreover have "?b \<in> \<real>" by (simp add: Reals_cnj_iff)
ultimately show ?thesis by (intro exI[of _ "[:?a,?b,1:]"], auto)
qed
then obtain f where dvd: "f dvd p" and fR: "set (coeffs f) \<subseteq> \<real>" and degf: "1 \<le> degree f" "degree f \<le> 2" by auto
from dvd obtain r where p: "p = f * r" unfolding dvd_def by auto
from degp have p0: "p \<noteq> 0" by auto
with p have f0: "f \<noteq> 0" and r0: "r \<noteq> 0" by auto
from real_poly_factor[OF pR[unfolded p] fR f0] have rR: "set (coeffs r) \<subseteq> \<real>" .
have deg: "degree p = degree f + degree r" unfolding p
by (rule degree_mult_eq[OF f0 r0])
with degf less(2) have degr: "degree r < n" by auto
from less(1)[OF this refl rR] obtain qs
where IH: "r = prod_list qs" "(\<forall>q\<in>set qs. set (coeffs q) \<subseteq> \<real> \<and> degree q \<le> 2)" by auto
from IH(1) have p: "p = prod_list (f # qs)" unfolding p by auto
with IH(2) fR degf show ?thesis
by (intro exI[of _ "f # qs"], auto)
qed
qed
qed
lemma real_degree_2_factorization_exists: fixes p :: "real poly"
shows "\<exists> qs. p = prod_list qs \<and> (\<forall> q \<in> set qs. degree q \<le> 2)"
proof -
let ?cp = "map_poly complex_of_real"
let ?rp = "map_poly Re"
let ?p = "?cp p"
have "set (coeffs ?p) \<subseteq> \<real>" by auto
from real_degree_2_factorization_exists_complex[OF this]
obtain qs where p: "?p = prod_list qs" and
qs: "\<And> q. q \<in> set qs \<Longrightarrow> set (coeffs q) \<subseteq> \<real> \<and> degree q \<le> 2" by auto
have p: "p = ?rp (prod_list qs)" unfolding arg_cong[OF p, of ?rp, symmetric]
by (subst map_poly_map_poly, force, rule sym, rule map_poly_idI, auto)
from qs have "\<exists> rs. prod_list qs = ?cp (prod_list rs) \<and> (\<forall> r \<in> set rs. degree r \<le> 2)"
proof (induct qs)
case Nil
show ?case by (auto intro!: exI[of _ Nil])
next
case (Cons q qs)
then obtain rs where qs: "prod_list qs = ?cp (prod_list rs)"
and rs: "\<And> q. q\<in>set rs \<Longrightarrow> degree q \<le> 2" by force+
from Cons(2)[of q] have q: "set (coeffs q) \<subseteq> \<real>" and dq: "degree q \<le> 2" by auto
define r where "r = ?rp q"
have q: "q = ?cp r" unfolding r_def
by (subst map_poly_map_poly, force, rule sym, rule map_poly_idI, insert q, auto)
have dr: "degree r \<le> 2" using dq unfolding q by (simp add: degree_map_poly)
show ?case
by (rule exI[of _ "r # rs"], unfold prod_list.Cons qs q, insert dr rs, auto simp: hom_distribs)
qed
then obtain rs where id: "prod_list qs = ?cp (prod_list rs)" and deg: "\<forall> r \<in> set rs. degree r \<le> 2" by auto
show ?thesis unfolding p id
by (intro exI, rule conjI[OF _ deg], subst map_poly_map_poly, force, rule map_poly_idI, auto)
qed
lemma odd_degree_imp_real_root: assumes "odd (degree p)"
shows "\<exists> x. poly p x = (0 :: real)"
proof -
from real_degree_2_factorization_exists[of p] obtain qs where
id: "p = prod_list qs" and qs: "\<And> q. q \<in> set qs \<Longrightarrow> degree q \<le> 2" by auto
show ?thesis using assms qs unfolding id
proof (induct qs)
case (Cons q qs)
from Cons(3)[of q] have dq: "degree q \<le> 2" by auto
show ?case
proof (cases "degree q = 1")
case True
from roots1[OF this] show ?thesis by auto
next
case False
with dq have deg: "degree q = 0 \<or> degree q = 2" by arith
from Cons(2) have "q * prod_list qs \<noteq> 0" by fastforce
hence "q \<noteq> 0" "prod_list qs \<noteq> 0" by auto
from degree_mult_eq[OF this]
have "degree (prod_list (q # qs)) = degree q + degree (prod_list qs)" by simp
from Cons(2)[unfolded this] deg have "odd (degree (prod_list qs))" by auto
from Cons(1)[OF this Cons(3)] obtain x where "poly (prod_list qs) x = 0" by auto
thus ?thesis by auto
qed
qed simp
qed
end
|
import cPickle
import gzip
import numpy as np
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
"""
f = gzip.open('../data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = cPickle.load(f)
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
"""Return a tuple containing (training_data, validation_data, test_data)."""
training_data2, validation_data2, test_data2 = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in training_data2[0]]
training_results = [vectorized_result(y) for y in training_data2[1]]
training_data = zip(training_inputs, training_results)
validation_inputs = [np.reshape(x, (784, 1)) for x in validation_data2[0]]
validation_data = zip(validation_inputs, validation_data2[1])
test_inputs = [np.reshape(x, (784, 1)) for x in test_data2[0]]
test_data = zip(test_inputs, test_data2[1])
return (training_data, validation_data, test_data)
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network."""
output_result = np.zeros((10, 1))
output_result[j] = 1.0
return output_result
|
"""Functions for preparing scores."""
from tqdm import tqdm
import scipy.interpolate
import pandas as pd
import numpy as np
from datetime import date, datetime, timedelta
import matplotlib.pyplot as plt
"""Functions for preparing scores."""
from tqdm import tqdm
import scipy.interpolate
import pandas as pd
import numpy as np
from datetime import date, datetime, timedelta
import matplotlib.pyplot as plt
import os
import os
from . import scoresplots, datagrab
def getleaderboard(Scoreboard, WeeksAhead, leaderboardin=None, quiet=False):
Scoreboard4 = Scoreboard[Scoreboard['deltaW']==WeeksAhead].copy()
scoresframe = (Scoreboard4.groupby(['model'],as_index=False)[['score']].agg(lambda x: np.median(x))).sort_values(by=['score'], ascending=False)
scoresframe.reset_index(inplace=True,drop=True)
scoresframe = scoresframe.rename(columns={'score':'median of past scores'})
ranksframe = (Scoreboard4.groupby(['model'],as_index=False)[['rank']].agg(lambda x: np.mean(x))).sort_values(by=['rank'], ascending=True)
ranksframe.reset_index(inplace=True,drop=True)
ranksframe = ranksframe.rename(columns={'rank':'average of past rankings'})
leaderboard = scoresframe.merge(ranksframe, left_on=['model'], right_on=['model']).copy()
leaderboard['deltaW'] = WeeksAhead
auxstr = ' as of ' + Scoreboard['target_end_date'].max().strftime('%Y-%m-%d')
if 'cases' in Scoreboard.columns:
if not quiet:
print('Leaderboard for ' + str(WeeksAhead) + '-week-ahead weekly incidental case forecasts ' + auxstr)
leaderboard['forecasttype'] = 'cases'
else:
if not quiet:
print('Leaderboard for ' + str(WeeksAhead) + '-week-ahead cumulative deaths forecasts' + auxstr)
leaderboard['forecasttype'] = 'deaths'
leaderboard['asofdate'] = Scoreboard['target_end_date'].max().strftime('%Y-%m-%d')
if leaderboardin:
leaderboard = pd.concat([leaderboardin, leaderboard], sort=False)
return leaderboard
def giveweightsformodels(Scoreboardx, datepred, weekcut):
#str datecut e.g. '2020-07-01'
#Make sure we take only one prediction per model
datecut = datetime.strptime(datepred,'%Y-%m-%d') - timedelta(days=(weekcut-1)*7)
Scoreboard = Scoreboardx[Scoreboardx['deltaW']==weekcut].copy()
Scoreboardearly = Scoreboard[Scoreboard['target_end_date']<datecut].copy()
#Scoreboardearly.dropna(subset=['score'],inplace=True)
Scoreboardearly.reset_index(inplace=True)
listofavailablemodels = Scoreboardearly['model'].unique().tolist()
scoresframe = (Scoreboardearly.groupby(['model'],as_index=False)[['score']].agg(lambda x: np.median(x))).sort_values(by=['score'], ascending=False)
scoresframe.reset_index(inplace=True,drop=True)
scoresframe = scoresframe.rename(columns={'score':'pastscores'})
# ranksframe = (Scoreboardearly.groupby(['model'],as_index=False)[['rank']].agg(lambda x: np.mean(x))).sort_values(by=['rank'], ascending=False)
# ranksframe.reset_index(inplace=True,drop=True)
# ranksframe = ranksframe.rename(columns={'rank':'pastranks'})
return (scoresframe,listofavailablemodels,Scoreboardearly)
def getscoresforweightedmodels(Scoreboardx,datepred,weekcut,case,runtype):
"""Generates all model weighted/unweighted ensembles
Args:
scoreboardx (pd.DataFrame): The scoreboard
datepred (str): Start date on which first ensemble will be formed
case (str): 'Case' or 'Death'
weekcut (int): number of weeks ahead forecast ensemble formation
runtype (str): weighted or unweighted ensemble
Returns:
scoreboard (pd.DataFrame): scoreboard with the added ensemble for nwk
"""
#str datecut e.g. '2020-07-01'
#Make sure we take only one prediction per model
Scoreboard = Scoreboardx.copy()
datepredindate = datetime.strptime(datepred,'%Y-%m-%d')
datecut = datepredindate - timedelta(days=(weekcut-1)*7)
[scoresframe,listofavailablemodels,Scoreboardearly] = giveweightsformodels(Scoreboard,datepred,weekcut)
predday = Scoreboard[(Scoreboard['target_end_date']==datepred)&(Scoreboard['deltaW']==weekcut)].copy()
# #We exclude COVIDhub:ensemble from our own ensemble as we know it is an ensemble of the models here
# predday.drop(predday[predday['model'] == 'COVIDhub:ensemble'].index, inplace = True)
predday.drop(predday[predday['model'] == 'FDANIH:Sweight'].index, inplace = True)
predday.drop(predday[predday['model'] == 'FDANIH:Sunweight'].index, inplace = True)
preddaymerged = predday.merge(scoresframe, left_on=['model'], right_on=['model']).copy()
#preddaymerged = tempframe.merge(ranksframe, left_on=['model'], right_on=['model']).copy()
if runtype=='weighted':
preddaymerged['weights'] = np.exp(preddaymerged['pastscores'].astype(np.float64)/2)
modelname='FDANIH:Sweight'
elif runtype=='unweighted':
preddaymerged['weights'] = 1
modelname='FDANIH:Sunweight'
sumweights = preddaymerged['weights'].sum()
preddaymerged['weights'] = preddaymerged['weights']/sumweights
if preddaymerged.empty:
print('DataFrame is empty!')
print(datepred)
else:
(qso,vso) = givescoreweightedforecast(preddaymerged,case)
#plt.plot(qso,vso)
if case=='Cases':
new_row = {'model':modelname,
'target_end_date':datepredindate,
'forecast_date':datecut,
'delta':weekcut*7,
'deltaW':weekcut,
'proper':True,
'quantile':qso,
'value':vso,
'CILO':min(vso),
'PE':np.median(vso),
'CIHI':max(vso),
'cases':Scoreboard[(Scoreboard['target_end_date']==datepred)]['cases'].mean()}
elif case=='Deaths':
new_row = {'model':modelname,
'target_end_date':datepredindate,
'forecast_date':datecut,
'delta':weekcut*7,
'deltaW':weekcut,
'proper':True,
'quantile':qso,
'value':vso,
'CILO':min(vso),
'PE':np.median(vso),
'CIHI':max(vso),
'deaths':Scoreboard[(Scoreboard['target_end_date']==datepred)]['deaths'].mean()}
Scoreboard = Scoreboard.append(new_row, ignore_index=True)
Index = len(Scoreboard)-1
result = giveqandscore(Scoreboard,Index)
Scoreboard.iloc[Index, Scoreboard.columns.get_loc('score')] = result[0]
Scoreboard.iloc[Index, Scoreboard.columns.get_loc('sumpdf')] = result[1]
Scoreboard.iloc[Index, Scoreboard.columns.get_loc('prange')] = result[2]
Scoreboard.iloc[Index, Scoreboard.columns.get_loc('p')] = result[3]
#leaderboard = preddaymerged[['model', 'deltaW', 'pastscores', 'pastranks','weights']].copy()
#leaderboard['datecut'] = datecut
#leaderboard['target_end_date'] = datepred
return Scoreboard
def givescoreweightedforecast(Scoreboardx,case):
Scoreboard = Scoreboardx.copy()
if case=='Cases':
mylist = [0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975]
elif case=='Deaths':
mylist = [0.01,0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4,
0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 0.975, 0.99]
vso = [0] * len(mylist)
for Index in range(0,len(Scoreboard)):
qs = Scoreboard.iloc[Index, Scoreboard.columns.get_loc('quantile')]
vs = Scoreboard.iloc[Index, Scoreboard.columns.get_loc('value')]
wmodel = Scoreboard.iloc[Index, Scoreboard.columns.get_loc('weights')]
for i in mylist:
loc = qs.index(i)
vso[loc] += wmodel * vs[loc]
qso = mylist
return (qso,vso)
def getweightedmodelalldates(scoreboardx, startdate, case, nwk, runtype):
"""Generates all model weighted/unweighted ensembles for an nwk
Args:
scoreboardx (pd.DataFrame): The scoreboard
startdate (str): Start date on which first ensemble will be formed
case (str): 'Case' or 'Death'
nwk (int): number of weeks ahead forecast ensemble formation
runtype (str): weighted or unweighted ensemble
Returns:
scoreboard (pd.DataFrame): scoreboard with the added ensemble for nwk
"""
#e.g. startdate '2020-08-01'
#case e.g. Cases or Deaths
scoreboard = scoreboardx.copy()
#cumleaderboard = pd.DataFrame(columns = ['model', 'deltaW', 'pastscores', 'pastranks','weights', 'datecut', 'target_end_date'])
daterange = pd.date_range(start=startdate, end=pd.to_datetime('today'),freq='W-SAT')
for datepred in daterange:
#(scoreboard,leaderboard) = getscoresforweightedmodels(scoreboard,datepred.strftime('%Y-%m-%d'),nwk,case,runtype)
#cumleaderboard = pd.concat([cumleaderboard, leaderboard], sort=False)
scoreboard = getscoresforweightedmodels(scoreboard,datepred.strftime('%Y-%m-%d'),nwk,case,runtype)
#cumleaderboard.reset_index(inplace=True,drop=True)
return scoreboard
def getscoreboard(groundtruth,model_target,otpfile='ScoreboardDataCases.pkl')-> pd.DataFrame:
"""Generates primary scores for all model competition entries
Args:
groundtruth (pd.DataFrame): The observed data
model_target (str): 'Case' or 'Death'
otpfile (str): Name of the scoreboard .pkl output file
Returns:
FirstForecasts (pd.DataFrame): check the forecast upload chronology
"""
model_targets = ['Case', 'Death']
if model_target not in model_targets:
raise ValueError("Invalid sim type. Expected one of: %s" % model_targets)
#Read the predictions file
dfPREDx = pd.read_csv('../Data/all_dataONY.csv',
na_values = ['NA', 'no info', '.'], parse_dates=True)
dfPREDx.drop_duplicates(subset=None, keep = 'first', inplace = True)
#Get the chronology of team entries to the competition
FirstForecasts = dfPREDx.sort_values('forecast_date').drop_duplicates(subset=['team'], keep='first').copy()
FirstForecasts['teamexist'] = 1
FirstForecasts['cumnumteams'] = FirstForecasts['teamexist'].cumsum()
if model_target == 'Case':
dfPRED = dfPREDx[dfPREDx['target'].str.contains('inc case')].copy()
elif model_target == 'Death':
dfPRED = dfPREDx[dfPREDx['target'].str.contains('cum death')].copy()
dfPRED.reset_index(inplace=True)
#New dataframe with merged values - this forms a single forecast unit (quantiles&corresponding values)
MerdfPRED = dfPRED.copy()
MerdfPRED = (MerdfPRED.groupby(['team','model','forecast_date','target_end_date'],
as_index=False)[['quantile','value']].agg(lambda x: list(x)))
#Develop the ultimate scoreboard including the corresponding observed data
MerdfPRED['target_end_date'] = pd.to_datetime(MerdfPRED['target_end_date'])
groundtruth['DateObserved'] = pd.to_datetime(groundtruth['DateObserved'])
Scoreboard = (MerdfPRED.merge(groundtruth, left_on=['target_end_date'], right_on=['DateObserved'])).copy()
Scoreboard.drop(columns=['DateObserved'],inplace=True)
if 'Cases' in Scoreboard.columns:
Scoreboard.rename(columns={'Cases':'cases'},inplace=True)
mylist = [0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975]
if 'Deaths' in Scoreboard.columns:
Scoreboard.rename(columns={'Deaths':'deaths'},inplace=True)
mylist = [0.01,0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4,
0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 0.975, 0.99]
Scoreboard['target_end_date']= pd.to_datetime(Scoreboard['target_end_date'])
Scoreboard['forecast_date']= pd.to_datetime(Scoreboard['forecast_date'])
Scoreboard.insert(3,'delta',(Scoreboard.target_end_date-Scoreboard.forecast_date).dt.days)
new = Scoreboard['model'].copy()
Scoreboard['team']= Scoreboard['team'].str.cat(new, sep =":")
Scoreboard.drop(columns=['model'],inplace=True)
Scoreboard.rename(columns={'team':'model'},inplace=True)
Scoreboard['deltaW'] = np.ceil(Scoreboard.delta/7)
Scoreboard['proper'] = ''
for Index in tqdm(range(0,len(Scoreboard))):
modellist = Scoreboard['quantile'].iloc[Index]
proper = all(item in modellist for item in mylist)
Scoreboard.iloc[Index, Scoreboard.columns.get_loc('proper')] = proper
#Calculate the scores and merge those with the Scoreboard dataframe
Scoreboard['score'] = ''
Scoreboard['sumpdf'] = ''
Scoreboard['prange'] = ''
Scoreboard['p'] = ''
for Index in tqdm(range(0,len(Scoreboard))):
result = giveqandscore(Scoreboard,Index)
Scoreboard.iloc[Index, Scoreboard.columns.get_loc('score')] = result[0]
Scoreboard.iloc[Index, Scoreboard.columns.get_loc('sumpdf')] = result[1]
Scoreboard.iloc[Index, Scoreboard.columns.get_loc('prange')] = result[2]
Scoreboard.iloc[Index, Scoreboard.columns.get_loc('p')] = result[3]
Scoreboard['CIHI']=pd.DataFrame(Scoreboard['value'].to_list()).max(axis=1)
Scoreboard['CILO']=pd.DataFrame(Scoreboard['value'].to_list()).min(axis=1)
Scoreboard['PE']=pd.DataFrame(Scoreboard['value'].to_list()).median(axis=1)
#Scoreboard.replace([np.inf, -np.inf], np.nan,inplace=True)
#Scoreboard.dropna(inplace=True)
Scoreboardx = Scoreboard.sort_values('forecast_date').drop_duplicates(subset=['model', 'target_end_date','deltaW'], keep='last').copy()
Scoreboardx.reset_index(drop=True,inplace=True)
Scoreboardx['scorecontr']=np.exp(Scoreboardx['score'].astype(np.float64)/2)
Scoreboardx.to_pickle(otpfile)
return FirstForecasts
def cdfpdf(df,Index,dV,withplot: bool = False, figuresdirectory: str = ''):
'''Get pdf from cdf using Scoreboard dataset.
Args:
df (pandas pd): Scoreboard dataframe
Index (int): Scoreboard Row index selection (model, forecast date,
target date, quantiles and values)
dV (int): x-axis grid point delta
withplot (bool, optional): If True, plot cdf and pdf. Defaults to False.
Returns:
xout (int array): x-axis values
pdfout (float array): pdf at xout
sum(pdfout) (float): integrated version of calculated pdf
max(cdf)-min(cdf) (float): the cdf - sum(pdfout) should be close to max(cdf)-min(cdf)
'''
#Get quantiles and values from the dataset
mydf = pd.DataFrame(list(zip(df['quantile'].iloc[Index], df['value'].iloc[Index])),
columns =['cdf', 'dp'])
#If duplicate forecasts exist for any particular day, then use the average
mydf = mydf.groupby('cdf', as_index=False).mean()
mydf.sort_values(by=['cdf'],inplace=True)
cdf = mydf.cdf.to_numpy()
dp = mydf.dp.to_numpy().round() #number of cases or deaths
#create grid x-axis
dpgrid = np.arange(np.round(min(dp))+0.5,np.round(max(dp))+0.5,1)
if len(dpgrid)<3:
#Some predictions have an extremely sharp - impulse-like distributions
xout=[np.nan,np.round(min(dp))]
pdfout=[np.nan,(max(cdf)-min(cdf))]
sumpdfout = (max(cdf)-min(cdf))
else:
#Take care of CASE:~dirac/discontinuous CDF
u, c = np.unique(dp, return_counts=True)
dup = u[c > 1]
while len(dup)>0:
for i in range(len(dup)):
dupindex = np.where(dp==dup[i])
if len(dupindex[0])>0:
for i in range(len(dupindex[0])):
dp[dupindex[0][i]] = dp[dupindex[0][i]]-(len(dupindex[0])-1)+(2*i)
u, c = np.unique(dp, return_counts=True)
dup = u[c > 1]
dp = np.sort(dp)
#recreate grid
dpgrid = np.arange(np.round(min(dp))+0.5,np.round(max(dp))+0.5,1)
#Do PCHIP interpolation
pchip_obj1 = scipy.interpolate.PchipInterpolator(dp, cdf)
#Get PDF based on the PCHIPed cdf
pdf2 = np.gradient(np.array(pchip_obj1(dpgrid), dtype=float),
np.array(dpgrid, dtype=float))
#Get the integer values xout and corresponding pdf
N=2
xout=np.convolve(dpgrid, np.ones((N,))/N, mode='valid')
pdfout=np.convolve(pdf2, np.ones((N,))/N, mode='valid')
sumpdfout=sum(pdfout)
if withplot==True:
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
plt.rc('ytick', labelsize=16) # fontsize of the tick labels
print(df['model'].iloc[Index])
#Linear Interpolation for CDF based on dpgrid
probgrid = np.interp(dpgrid, dp, cdf)
#PDF based on linear interpolation
pdf = np.gradient(np.array(probgrid, dtype=float),
np.array(dpgrid, dtype=float))
if 'deaths' in df.columns:
xlab = 'Cumulative Deaths'
actual = df['deaths'].iloc[Index]
else:
xlab = 'Weekly Incidental Cases'
actual = df['cases'].iloc[Index]
#Start figure
plt.figure(num=None, figsize=(8, 12), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(2, 1, 1)
plt.scatter(dp, cdf, s=80, facecolors='none', edgecolors='y')
plt.scatter(actual, 0.0, s=80, facecolors='k', edgecolors='k', marker="^")
plt.plot(dpgrid, probgrid, 'r--')
plt.plot(dpgrid,pchip_obj1(dpgrid), 'g--')
#plt.xlabel(xlab, fontsize=20)
plt.ylabel('CDF', fontsize=20)
plt.xticks(rotation=45)
#----------#
plt.subplot(2, 1, 2)
plt.plot(dpgrid, pdf, 'g', label='Linear Interpolation')
plt.plot(dpgrid, pdf2, 'r', label='Monotone Piecewise Cubic Interpolation')
plt.legend(loc='best', prop={"size":14})
plt.xlabel(xlab, fontsize=20)
plt.ylabel('PDF', fontsize=20)
plt.xticks(rotation=45)
plt.savefig(figuresdirectory+'/exampleconversion.svg',
bbox_inches = 'tight',
dpi=300)
return (xout,pdfout,sum(pdfout),max(cdf)-min(cdf))
def giveqandscore(df,Index) -> tuple:
'''Give score for a model.
Args:
df (pandas pd): Scoreboard dataframe
Index (int): Scoreboard Row index selection (model, forecast date,
target date, quantiles and values)
Returns: -> tuple
thescore (float): x-axis values
sumpdfout (float): integrated version of calculated pdf
prange (float): the cdf - sum(pdfout), should be close to max(cdf)-min(cdf)
p (float): probability of actual data given model cdf
'''
(xout,pdfout,sumpdfout,prange)=cdfpdf(df,Index,1)
if 'cases' in df.columns:
actual = df['cases'].iloc[Index]
if 'deaths' in df.columns:
actual = df['deaths'].iloc[Index]
indexofactual = np.where(xout == actual)
if indexofactual[0].size == 0:
p = 0
thescore = np.NINF
else:
p = pdfout[indexofactual][0]
thescore = 2*np.log(p)+1+np.log(actual)+np.log(2*np.pi)
return (thescore, sumpdfout, prange, p, xout,pdfout)
def givePivotScoreFORECAST(Scoreboard,modeltypes) -> tuple:
'''Give pivot table and averaged scoreboard with modeltypes.
Pivot around forecast_date.
Args:
Scoreboard (pandas pd): Scoreboard dataframe
modeltypes (int): Scoreboard Row index selection (model, forecast date,
target date, quantiles and values)
Returns: -> tuple
MerdfPRED (pandas pd): merged scoreboard on model scores vs delta in days
pivMerdfPRED (pandas pd): pivoted MerdfPRED around forecast_date
'''
#Drop predictions from the same groups that were made on the same exact date and only
#take the final prediction
Scoreboardxx = Scoreboard.sort_values('forecast_date').drop_duplicates(subset=['model',
'target_end_date'], keep='last').copy()
MerdfPRED = (Scoreboardxx.merge(modeltypes, on=['model'])).copy()
MerdfPRED.replace([np.inf, -np.inf], np.nan,inplace=True)
MerdfPRED = (MerdfPRED.groupby(['model','modeltype','forecast_date'],
as_index=False)[['delta','score']].agg(lambda x: list(x)))
#MerdfPRED.dropna(subset=['score'],inplace=True)
MerdfPRED['median'] = MerdfPRED.apply(lambda row : np.median(row['score']), axis = 1)
MerdfPRED['nanstd'] = MerdfPRED.apply(lambda row : np.nanstd(row['score']), axis = 1)
pivMerdfPRED = MerdfPRED.pivot(index='forecast_date', columns='model', values='median')
return (MerdfPRED,pivMerdfPRED)
def givePivotScoreTARGET(Scoreboard,modeltypes) -> tuple:
'''Give pivot table and averaged scoreboard with modeltypes.
Pivot around target_end_date.
Args:
Scoreboard (pandas pd): Scoreboard dataframe
modeltypes (int): Scoreboard Row index selection (model, forecast date,
target date, quantiles and values)
Returns: -> tuple
MerdfPRED (pandas pd): merged scoreboard on model scores vs delta in days
pivMerdfPRED (pandas pd): pivoted MerdfPRED around target_end_date
'''
MerdfPRED = (Scoreboard.merge(modeltypes, on=['model'])).copy()
MerdfPRED.replace([np.inf, -np.inf], np.nan,inplace=True)
MerdfPRED = (MerdfPRED.groupby(['model','modeltype','target_end_date'],
as_index=False)[['delta','score']].agg(lambda x: list(x)))
#MerdfPRED.dropna(subset=['score'],inplace=True)
MerdfPRED['median'] = MerdfPRED.apply(lambda row : np.median(row['score']), axis = 1)
MerdfPRED['nanstd'] = MerdfPRED.apply(lambda row : np.nanstd(row['score']), axis = 1)
pivMerdfPRED = MerdfPRED.pivot(index='target_end_date', columns='model', values='median')
return (MerdfPRED,pivMerdfPRED)
def fix_scoreboard(scoreboard, kind='Case', quiet=False, plot=True):
#Eliminate scores that do not have the proper score quantiles
delete_row = scoreboard[scoreboard["proper"]==False].index
scoreboard.drop(delete_row,inplace=True)
scoreboard.reset_index(drop=True, inplace=True)
if plot:
scoresplots.plotdifferencescdfpdf(scoreboard, kind, quiet=quiet)
modeltypesCases = datagrab.getmodeltypes(scoreboard, quiet=quiet)
#Get the weekly forecast score rankings
grouped = scoreboard.groupby(['target_end_date','deltaW'])
scoreboard['rank'] = grouped['score'].transform(lambda x: pd.factorize(-x, sort=True)[0]+1)
return scoreboard
|
/* filter/impulse.c
*
* Impulse detecting filters
*
* Copyright (C) 2018 Patrick Alken
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <config.h>
#include <stdlib.h>
#include <math.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_vector.h>
#include <gsl/gsl_filter.h>
static int filter_impulse(const double scale, const double epsilon, const double t, const gsl_vector * x, const gsl_vector * xmedian,
gsl_vector * y, gsl_vector * xsigma, size_t * noutlier, gsl_vector_int * ioutlier);
/*
gsl_filter_impulse_alloc()
Allocate a workspace for impulse detection filtering.
Inputs: K - number of samples in window; if even, it is rounded up to
the next odd, to have a symmetric window
Return: pointer to workspace
*/
gsl_filter_impulse_workspace *
gsl_filter_impulse_alloc(const size_t K)
{
gsl_filter_impulse_workspace *w;
w = calloc(1, sizeof(gsl_filter_impulse_workspace));
if (w == 0)
{
GSL_ERROR_NULL ("failed to allocate space for workspace", GSL_ENOMEM);
}
w->movstat_workspace_p = gsl_movstat_alloc(K);
if (w->movstat_workspace_p == 0)
{
gsl_filter_impulse_free(w);
return NULL;
}
return w;
}
void
gsl_filter_impulse_free(gsl_filter_impulse_workspace * w)
{
if (w->movstat_workspace_p)
gsl_movstat_free(w->movstat_workspace_p);
free(w);
}
/*
gsl_filter_impulse()
Apply an impulse detection filter to an input vector. The filter output is
y_i = { x_i, |x_i - m_i| <= t * S_i
{ m_i, |x_i - m_i| > t * S_i
where m_i is the median of the window W_i^H and S_i is the scale estimate (MAD, IQR, S_n, Q_n)
Inputs: endtype - how to handle signal end points
scale_type - which statistic to use for scale estimate (MAD, IQR, etc)
t - number of standard deviations required to identity outliers (>= 0)
x - input vector, size n
y - (output) filtered vector, size n
xmedian - (output) vector of median values of x, size n
xmedian_i = median of window centered on x_i
xsigma - (output) vector of estimated local standard deviations of x, size n
xsigma_i = sigma for i-th window: scale*MAD
noutlier - (output) number of outliers detected
ioutlier - (output) int array indicating outliers identified, size n; may be NULL
ioutlier_i = 1 if outlier detected, 0 if not
w - workspace
Notes:
*/
int
gsl_filter_impulse(const gsl_filter_end_t endtype, const gsl_filter_scale_t scale_type, const double t,
const gsl_vector * x, gsl_vector * y, gsl_vector * xmedian, gsl_vector * xsigma, size_t * noutlier,
gsl_vector_int * ioutlier, gsl_filter_impulse_workspace * w)
{
const size_t n = x->size;
if (n != y->size)
{
GSL_ERROR("input and output vectors must have same length", GSL_EBADLEN);
}
else if (xmedian->size != n)
{
GSL_ERROR("xmedian vector must match input size", GSL_EBADLEN);
}
else if (xsigma->size != n)
{
GSL_ERROR("xsigma vector must match input size", GSL_EBADLEN);
}
else if ((ioutlier != NULL) && (ioutlier->size != n))
{
GSL_ERROR("ioutlier vector must match input size", GSL_EBADLEN);
}
else if (t < 0.0)
{
GSL_ERROR("t must be non-negative", GSL_EDOM);
}
else
{
int status;
double scale = 1.0;
switch (scale_type)
{
case GSL_FILTER_SCALE_MAD:
{
/* compute window medians and MADs */
gsl_movstat_mad(endtype, x, xmedian, xsigma, w->movstat_workspace_p);
break;
}
case GSL_FILTER_SCALE_IQR:
{
/* multiplication factor for IQR to estimate stddev for Gaussian signal */
scale = 0.741301109252801;
/* calculate the window medians */
gsl_movstat_median(endtype, x, xmedian, w->movstat_workspace_p);
/* calculate window IQRs */
gsl_movstat_qqr(endtype, x, 0.25, xsigma, w->movstat_workspace_p);
break;
}
case GSL_FILTER_SCALE_SN:
{
/* calculate the window medians */
gsl_movstat_median(endtype, x, xmedian, w->movstat_workspace_p);
/* calculate window S_n values */
gsl_movstat_Sn(endtype, x, xsigma, w->movstat_workspace_p);
break;
}
case GSL_FILTER_SCALE_QN:
{
/* calculate the window medians */
gsl_movstat_median(endtype, x, xmedian, w->movstat_workspace_p);
/* calculate window Q_n values */
gsl_movstat_Qn(endtype, x, xsigma, w->movstat_workspace_p);
break;
}
default:
GSL_ERROR("unknown scale type", GSL_EDOM);
break;
}
/* apply impulse detecting filter using previously computed scale estimate */
status = filter_impulse(scale, 0.0, t, x, xmedian, y, xsigma, noutlier, ioutlier);
return status;
}
}
/*
filter_impulse()
Apply an impulse detection filter to an input vector. The filter output is
y_i = { x_i, |x_i - m_i| <= t * S_i OR S_i < epsilon
{ m_i, |x_i - m_i| > t * S_i
where m_i is the median of the window W_i^H and S_i is the scale estimate (MAD, IQR, etc)
Inputs: scale - scale factor to multiply xsigma to get unbiased estimate of stddev for Gaussian data
epsilon - minimum allowed scale estimate for identifying outliers
t - number of standard deviations required to identity outliers (>= 0)
x - input vector, size n
xmedian - vector of median values of x, size n
xmedian_i = median of window centered on x_i
y - (output) filtered vector, size n
xsigma - (output) vector of estimated local standard deviations of x, size n
xsigma_i = S_n for i-th window
noutlier - (output) number of outliers detected
ioutlier - (output) int array indicating outliers identified, size n; may be NULL
ioutlier_i = 1 if outlier detected, 0 if not
Notes:
1) If S_i = 0 or is very small for a particular sample, then the filter may erroneously flag the
sample as an outlier, since it will act as a standard median filter. To avoid this scenario, the
parameter epsilon specifies the minimum value of S_i which can be used in the filter test. Any
samples for which S_i < epsilon are passed through unchanged.
*/
static int
filter_impulse(const double scale, const double epsilon, const double t, const gsl_vector * x, const gsl_vector * xmedian,
gsl_vector * y, gsl_vector * xsigma, size_t * noutlier, gsl_vector_int * ioutlier)
{
const size_t n = x->size;
if (n != y->size)
{
GSL_ERROR("input and output vectors must have same length", GSL_EBADLEN);
}
else if (xmedian->size != n)
{
GSL_ERROR("xmedian vector must match input size", GSL_EBADLEN);
}
else if (xsigma->size != n)
{
GSL_ERROR("xsigma vector must match input size", GSL_EBADLEN);
}
else if ((ioutlier != NULL) && (ioutlier->size != n))
{
GSL_ERROR("ioutlier vector must match input size", GSL_EBADLEN);
}
else if (t < 0.0)
{
GSL_ERROR("t must be non-negative", GSL_EDOM);
}
else
{
size_t i;
*noutlier = 0;
/* build output vector */
for (i = 0; i < n; ++i)
{
double xi = gsl_vector_get(x, i);
double xmedi = gsl_vector_get(xmedian, i);
double absdevi = fabs(xi - xmedi); /* absolute deviation for this sample */
double *xsigmai = gsl_vector_ptr(xsigma, i);
/* multiply by scale factor to get estimate of standard deviation */
*xsigmai *= scale;
/*
* If the absolute deviation for this sample is more than t stddevs
* for this window (and S_i is sufficiently large to avoid scale implosion),
* set the output value to the window median, otherwise use the original sample
*/
if ((*xsigmai >= epsilon) && (absdevi > t * (*xsigmai)))
{
gsl_vector_set(y, i, xmedi);
++(*noutlier);
if (ioutlier)
gsl_vector_int_set(ioutlier, i, 1);
}
else
{
gsl_vector_set(y, i, xi);
if (ioutlier)
gsl_vector_int_set(ioutlier, i, 0);
}
}
return GSL_SUCCESS;
}
}
|
# SIRモデル
## 感染ダイナミクス (連続)
$$
\begin{align}
\dfrac{\mathrm{d} S(t)}{\mathrm{d}t} &= - \beta S(t)I(t)
\\
\dfrac{\mathrm{d} I(t)}{\mathrm{d}t} &= (\beta S(t) - \gamma)I(t)
\\
\dfrac{\mathrm{d} R(t)}{\mathrm{d}t} &= \gamma I(t)
\end{align}
$$
## 感染ダイナミクス (離散)
$$
\begin{align}
S(t+\Delta t) &= S(t) \mathrm{e}^{-\beta \Delta t I(t)}
\\
I(t+\Delta t) &= I(t)+ S(t) (1-\mathrm{e}^{-\beta \Delta t I(t)}) - \gamma \Delta t I(t)
\\
R(t+\Delta t) &= R(t) + \gamma \Delta t I(t)
\end{align}
$$
## 定式化およびアルゴリズム詳細
* [report.pdf](report.pdf)
```python
import numpy as np
import math
import matplotlib.pyplot as plt
%matplotlib inline
import SIR_Solver as SS
plt.style.use('default')
```
```python
prm = SS.Prameter(R0=2.0, gamma=1)
dt = 0.1
model = SS.Model(prm=prm, I=0.001, T=20, dt=dt)
model_Euler = SS.Model(prm=prm, I=0.001, T=20, dt=dt)
model_Heun = SS.Model(prm=prm, I=0.001, T=20, dt=dt)
model_Runge_Kutta = SS.Model(prm=prm, I=0.001, T=20, dt=dt)
```
```python
model.solve_Anly()
model_Euler.solve_Euler()
model_Heun.solve_Heun()
model_Runge_Kutta.solve_Runge_Kutta()
```
```python
plt.plot(model.I_list, color='black', label='Discrete model')
plt.plot(model_Euler.I_list, color='red', label='Euler')
plt.plot(model_Heun.I_list, color='blue', label='Heun')
plt.plot(model_Runge_Kutta.I_list, color='green', label='Runge-Kutta')
plt.xticks([i*50 for i in range(6)], [i*5 for i in range(6)])
plt.xlim([0,200])
plt.ylim([0.0,0.17])
plt.legend(fontsize=8)
```
|
section \<open>Prisms\<close>
theory Prisms
imports Main
begin
text \<open>Prisms are like lenses, but they act on sum types rather than product types. For now
we do not support many properties about them. See \url{https://hackage.haskell.org/package/lens-4.15.2/docs/Control-Lens-Prism.html}
for more information.\<close>
record ('v, 's) prism =
prism_match :: "'s \<Rightarrow> 'v option" ("match\<index>")
prism_build :: "'v \<Rightarrow> 's" ("build\<index>")
locale wb_prism =
fixes x :: "('v, 's) prism" (structure)
assumes match_build: "match (build v) = Some v"
and build_match: "match s = Some v \<Longrightarrow> s = build v"
begin
lemma build_match_iff: "match s = Some v \<longleftrightarrow> s = build v"
using build_match match_build by blast
lemma range_build: "range build = dom match"
using build_match match_build by fastforce
end
definition prism_suml :: "('a, 'a + 'b) prism" where
"prism_suml = \<lparr> prism_match = (\<lambda> v. case v of Inl x \<Rightarrow> Some x | _ \<Rightarrow> None), prism_build = Inl \<rparr>"
lemma wb_prim_suml: "wb_prism prism_suml"
apply (unfold_locales)
apply (simp_all add: prism_suml_def sum.case_eq_if)
apply (metis option.inject option.simps(3) sum.collapse(1))
done
definition prism_diff :: "('a, 's) prism \<Rightarrow> ('b, 's) prism \<Rightarrow> bool" (infix "\<nabla>" 50) where
"prism_diff X Y = (range build\<^bsub>X\<^esub> \<inter> range build\<^bsub>Y\<^esub> = {})"
lemma prism_diff_build: "X \<nabla> Y \<Longrightarrow> build\<^bsub>X\<^esub> u \<noteq> build\<^bsub>Y\<^esub> v"
by (simp add: disjoint_iff_not_equal prism_diff_def)
definition prism_plus :: "('a, 's) prism \<Rightarrow> ('b, 's) prism \<Rightarrow> ('a + 'b, 's) prism" (infixl "+\<^sub>P" 85) where
"X +\<^sub>P Y = \<lparr> prism_match = (\<lambda> s. case (match\<^bsub>X\<^esub> s, match\<^bsub>Y\<^esub> s) of
(Some u, _) \<Rightarrow> Some (Inl u) |
(None, Some v) \<Rightarrow> Some (Inr v) |
(None, None) \<Rightarrow> None),
prism_build = (\<lambda> v. case v of Inl x \<Rightarrow> build\<^bsub>X\<^esub> x | Inr y \<Rightarrow> build\<^bsub>Y\<^esub> y) \<rparr>"
end
|
# For loop
- To iterate over the List.
- for **int** in range():
- range(end no)
- range(start no.,end no.)
- range(start no.,end no.,step)
```python
d=[1,2,3,4,5]
```
```python
d.append(107)
d
```
[1, 2, 3, 4, 5, 107]
```python
L = []
for i in range(10):
L.append(i)
L
```
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
```python
a = []
for k in range(2,10):
a.append(k)
a
```
[2, 3, 4, 5, 6, 7, 8, 9]
```python
b = []
for i in range(1,21):
b.append(i)
b
```
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
```python
sum(b)
```
210
```python
a+b;
```
```python
sum(a+b)
```
254
```python
D = []
for i in range(1,5):
for j in range(1,5):
D.append(i+2*j)
print(D)
```
[3]
[3, 5]
[3, 5, 7]
[3, 5, 7, 9]
[3, 5, 7, 9, 4]
[3, 5, 7, 9, 4, 6]
[3, 5, 7, 9, 4, 6, 8]
[3, 5, 7, 9, 4, 6, 8, 10]
[3, 5, 7, 9, 4, 6, 8, 10, 5]
[3, 5, 7, 9, 4, 6, 8, 10, 5, 7]
[3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9]
[3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9, 11]
[3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9, 11, 6]
[3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9, 11, 6, 8]
[3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9, 11, 6, 8, 10]
[3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9, 11, 6, 8, 10, 12]
```python
D = []
for i in range(1,5):
print(D)
for j in range(1,5):
D.append(i+2*j)
```
[]
[3, 5, 7, 9]
[3, 5, 7, 9, 4, 6, 8, 10]
[3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9, 11]
```python
D = []
for i in range(1,5):
for j in range(1,5):
D.append(i+2*j)
D
```
[3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9, 11, 6, 8, 10, 12]
```python
p = [] # empty list l
for i in range(1,5):
for j in range(1,5):
p.append(i+2*j)
print('í=',i,'j=',j,'p=',p)
```
í= 1 j= 1 p= [3]
í= 1 j= 2 p= [3, 5]
í= 1 j= 3 p= [3, 5, 7]
í= 1 j= 4 p= [3, 5, 7, 9]
í= 2 j= 1 p= [3, 5, 7, 9, 4]
í= 2 j= 2 p= [3, 5, 7, 9, 4, 6]
í= 2 j= 3 p= [3, 5, 7, 9, 4, 6, 8]
í= 2 j= 4 p= [3, 5, 7, 9, 4, 6, 8, 10]
í= 3 j= 1 p= [3, 5, 7, 9, 4, 6, 8, 10, 5]
í= 3 j= 2 p= [3, 5, 7, 9, 4, 6, 8, 10, 5, 7]
í= 3 j= 3 p= [3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9]
í= 3 j= 4 p= [3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9, 11]
í= 4 j= 1 p= [3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9, 11, 6]
í= 4 j= 2 p= [3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9, 11, 6, 8]
í= 4 j= 3 p= [3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9, 11, 6, 8, 10]
í= 4 j= 4 p= [3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9, 11, 6, 8, 10, 12]
```python
A = [10*k for k in range(10)]
print(A)
```
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
```python
AA = [[10*x+y for x in range(4)] for y in range(3)]
print(AA)
```
[[0, 10, 20, 30], [1, 11, 21, 31], [2, 12, 22, 32]]
```python
summ = 0
for i in range(1,11):
summ = summ+i
print(summ)
```
1
3
6
10
15
21
28
36
45
55
```python
product = 1
for i in range(1,11):
product = product*i
print(product)
```
1
2
6
24
120
720
5040
40320
362880
3628800
### Projectile
- Range
\begin{equation}
R=\frac{u^2\sin2\theta}{2g}
\end{equation}
- Max.Height
\begin{equation}
H=\frac{u^2\sin^2\theta}{2g}
\end{equation}
- using list
```python
import numpy as np
u=1000
g=9.8
angle=[]
Range=[]
PI=np.pi
for i in range(0,90):
angle.append(i)
Range.append(u**2*np.sin(2*PI*i/180)/(2*g))
```
- import matplotlib.pyplot as plt
- %matplotlib inline
- plt.plot(x,y)
```python
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(angle,Range)
plt.xlabel('Angle')
plt.ylabel('Range')
plt.title('Projectile')
```
- using dictionary
```python
angle = np.arange(1,90)
M = {"Range": [u**2*np.sin(2*PI*x/180)/(2*g) for x in angle],\
"Max.height": [u**2*np.sin(PI*x/180)**2/(2*g) for x in angle]}
```
```python
import pandas as pd
DF = pd.DataFrame(M)
DF
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Range</th>
<th>Max.height</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1780.586566</td>
<td>15.540127</td>
</tr>
<tr>
<th>1</th>
<td>3559.003762</td>
<td>62.141575</td>
</tr>
<tr>
<th>2</th>
<td>5333.084861</td>
<td>139.747567</td>
</tr>
<tr>
<th>3</th>
<td>7100.668416</td>
<td>248.263553</td>
</tr>
<tr>
<th>4</th>
<td>8859.600901</td>
<td>387.557321</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>84</th>
<td>8859.600901</td>
<td>50632.850842</td>
</tr>
<tr>
<th>85</th>
<td>7100.668416</td>
<td>50772.144611</td>
</tr>
<tr>
<th>86</th>
<td>5333.084861</td>
<td>50880.660596</td>
</tr>
<tr>
<th>87</th>
<td>3559.003762</td>
<td>50958.266588</td>
</tr>
<tr>
<th>88</th>
<td>1780.586566</td>
<td>51004.868036</td>
</tr>
</tbody>
</table>
<p>89 rows × 2 columns</p>
</div>
```python
DF.plot()
plt.xlabel('Angle(degree)')
plt.ylabel('Distance(m)')
plt.title('Projectile')
```
```python
import numpy as np
u=1000
g=9.8
angle=[]
range=[]
PI=np.pi
for i in range(0,90):
angle.append(i)
Range.append(u**2*np.sin(2*PI*i/180)/(2*g))
```
```python
import matplotlib.pyplot as plt
%matplotlib. inline
plt.plot(angle,range)
plt.xlabel("Angle")
plt.ylabel("Range")
plt.title("projecticle")
```
UsageError: Line magic function `%matplotlib.` not found.
```python
```
|
(* Title: JinjaThreads/Examples/AppenticeChallenge.thy
Author: Andreas Lochbihler
*)
chapter \<open>Examples\<close>
section \<open>Apprentice challenge\<close>
theory ApprenticeChallenge
imports
"../Execute/Code_Generation"
begin
text \<open>This theory implements the apprentice challenge by Porter and Moore \cite{MoorePorter2002TOPLAS}.\<close>
definition ThreadC :: "addr J_mb cdecl"
where
"ThreadC =
(Thread, Object, [],
[(run, [], Void, \<lfloor>([], unit)\<rfloor>),
(start, [], Void, Native),
(join, [], Void, Native),
(interrupt, [], Void, Native),
(isInterrupted, [], Boolean, Native)])"
definition Container :: cname
where "Container = STR ''Container''"
definition ContainerC :: "addr J_mb cdecl"
where "ContainerC = (Container, Object, [(STR ''counter'', Integer, \<lparr>volatile=False\<rparr>)], [])"
definition String :: cname
where "String = STR ''String''"
definition StringC :: "addr J_mb cdecl"
where
"StringC = (String, Object, [], [])"
definition Job :: cname
where "Job = STR ''Job''"
definition JobC :: "addr J_mb cdecl"
where
"JobC =
(Job, Thread, [(STR ''objref'', Class Container, \<lparr>volatile=False\<rparr>)],
[(STR ''incr'', [], Class Job, \<lfloor>([],
sync(Var (STR ''objref''))
((Var (STR ''objref''))\<bullet>STR ''counter''{STR ''''} := ((Var (STR ''objref''))\<bullet>STR ''counter''{STR ''''} \<guillemotleft>Add\<guillemotright> Val (Intg 1)));;
Var this)\<rfloor>),
(STR ''setref'', [Class Container], Void, \<lfloor>([STR ''o''],
LAss (STR ''objref'') (Var (STR ''o'')))\<rfloor>),
(run, [], Void, \<lfloor>([],
while (true) (Var this\<bullet>STR ''incr''([])))\<rfloor>)
])"
definition Apprentice :: cname
where "Apprentice = STR ''Apprentice''"
definition ApprenticeC :: "addr J_mb cdecl"
where
"ApprenticeC =
(Apprentice, Object, [],
[(STR ''main'', [Class String\<lfloor>\<rceil>], Void, \<lfloor>([STR ''args''],
{STR ''container'':Class Container=None;
(STR ''container'' := new Container);;
(while (true)
{STR ''job'':Class Job=None;
(STR ''job'' := new Job);;
(Var (STR ''job'')\<bullet>STR ''setref''([Var (STR ''container'')]));;
(Var (STR ''job'')\<bullet>Type.start([]))
}
)
})\<rfloor>)])"
definition ApprenticeChallenge
where
"ApprenticeChallenge = Program (SystemClasses @ [StringC, ThreadC, ContainerC, JobC, ApprenticeC])"
definition ApprenticeChallenge_annotated
where "ApprenticeChallenge_annotated = annotate_prog_code ApprenticeChallenge"
lemma "wf_J_prog ApprenticeChallenge_annotated"
by eval
lemmas [code_unfold] =
Container_def Job_def String_def Apprentice_def
definition main :: "String.literal" where "main = STR ''main''"
ML_val \<open>
val _ = tracing "started";
val program = @{code ApprenticeChallenge_annotated};
val _ = tracing "prg";
val compiled = @{code J2JVM} program;
val _ = tracing "compiled";
@{code exec_J_rr}
@{code "1 :: nat"}
program
@{code Apprentice}
@{code main}
[ @{code Null}];
val _ = tracing "J_rr";
@{code exec_JVM_rr}
@{code "1 :: nat"}
compiled
@{code Apprentice}
@{code main}
[ @{code Null}];
val _ = tracing "JVM_rr";
\<close>
end
|
import similaripy as sim
import scipy.sparse as sps
from evaluator import Evaluator
import numpy as np
import pandas as pd
from tqdm import tqdm
class CBFRecommender(object):
"""
A random recommender. It recommends 10 random tracks for each playlist.
"""
def __init__(self, datareader):
self.datareader = datareader
self.prediction = []
def __str__(self):
return "CBFRec"
def fit(self, mode="cosine", al_id=True, ar_id=True, top_k=100):
self.urm = self.datareader.get_urm()
# self.icm = self.datareader.get_icm(alid=al_id, arid=ar_id)
self.icm = self.datareader.get_icm(alid=True, arid=False)
self.icm -= 0.45*self.datareader.get_icm(alid=False, arid=True)
# Train the model
print("["+mode+"]")
if mode == "cosine":
self.model = sim.cosine(self.icm,
k=top_k,
verbose=True)
elif mode == "as_cosine":
self. model = sim.asymmetric_cosine(self.icm,
alpha=0.7,
k=top_k,
verbose=True)
elif mode == "dot":
self. model = sim.dot_product(self.icm,
k=top_k,
verbose=True)
def recommend(self, remove_seed=True):
"""
Compute a single recommendation for a target playlist.
:param remove_seed: removed seed tracks
:return: recommended_tracks or recommended_tracks_uri
"""
# Compute user recommendations
user_recommendations = sim.dot_product(self.urm,
self.model,
target_rows=list(self.datareader.target_playlists),
k=100,
verbose=False)
# Recommend random tracks
for t in self.datareader.target_playlists:
scores = user_recommendations[t].toarray()[0]
tracks = scores.argsort()[-100:][::-1]
if remove_seed:
hold_ix = ~np.in1d(tracks, self.urm.indices[self.urm.indptr[t]:self.urm.indptr[t+1]])
recommended_tracks = tracks[hold_ix]
recommended_tracks = recommended_tracks[0:10]
recommended_tracks_str = ' '.join([str(i) for i in recommended_tracks])
self.prediction.append([t, recommended_tracks_str])
else:
recommended_tracks_str = ' '.join([str(i) for i in tracks[:10]])
self.prediction.append([t, recommended_tracks_str])
# Save CSV
df = pd.DataFrame(self.prediction, columns=['playlist_id', 'track_ids'])
df.to_csv(str(self) + '.csv', sep=',', index=False)
if __name__ == '__main__':
from datareader import Datareader
dr = Datareader()
rec = CBFRecommender(dr)
rec.fit(mode="as_cosine", al_id=True, ar_id=True, top_k=50)
rec.recommend()
ev = Evaluator()
prova_da_valutare = pd.read_csv(str(rec) + '.csv')
dict_tua_sol = ev.csv_to_dict(prova_da_valutare)
print(ev.evaluate_dict( dict_tua_sol ))
|
{-# OPTIONS --without-K --safe #-}
module Data.Binary.Conversion.Fast.Strict where
open import Data.Binary.Definition
open import Data.Nat.DivMod
open import Data.Nat.Base using (ℕ; suc; zero)
open import Strict
open import Data.Bool
⟦_⇑⟧⟨_⟩ : ℕ → ℕ → 𝔹
⟦ suc n ⇑⟧⟨ suc w ⟩ =
let! m =! even n in!
let! ms =! ⟦ n ÷ 2 ⇑⟧⟨ w ⟩ in!
if m then 1ᵇ ms else 2ᵇ ms
⟦ zero ⇑⟧⟨ _ ⟩ = 0ᵇ
⟦ suc _ ⇑⟧⟨ zero ⟩ = 0ᵇ -- will not happen
⟦_⇑⟧ : ℕ → 𝔹
⟦ n ⇑⟧ = ⟦ n ⇑⟧⟨ n ⟩
{-# INLINE ⟦_⇑⟧ #-}
-- Without the added argument to the recursor, the function does not
-- pass the termination checker:
-- {-# TERMINATING #-}
-- ⟦_⇑⟧″ : ℕ → 𝔹
-- ⟦ zero ⇑⟧″ = 0ᵇ
-- ⟦ suc n ⇑⟧″ =
-- if rem n 2 ℕ.≡ᴮ 0
-- then 1ᵇ ⟦ n ÷ 2 ⇑⟧″
-- else 2ᵇ ⟦ n ÷ 2 ⇑⟧″
-- The "principled" version (which uses well-founded recursion) is
-- incredibly slow. (and the following doesn't even compute, because of
-- cubical)
-- open import Data.Nat.WellFounded
-- ⟦_⇑⟧‴ : ℕ → 𝔹
-- ⟦ n ⇑⟧‴ = go n (≤-wellFounded n)
-- where
-- go : ∀ n → Acc _<_ n → 𝔹
-- go zero wf = 0ᵇ
-- go (suc n) (acc wf) =
-- if rem n 2 ℕ.≡ᴮ 0
-- then 1ᵇ go (n ÷ 2) (wf (n ÷ 2) (s≤s (div2≤ n)))
-- else 2ᵇ go (n ÷ 2) (wf (n ÷ 2) (s≤s (div2≤ n)))
|
lemma continuous_on_closure_norm_le: fixes f :: "'a::metric_space \<Rightarrow> 'b::real_normed_vector" assumes "continuous_on (closure s) f" and "\<forall>y \<in> s. norm(f y) \<le> b" and "x \<in> (closure s)" shows "norm (f x) \<le> b"
|
[STATEMENT]
lemma insert_before_removes_child:
assumes "heap_is_wellformed h" and "type_wf h" and "known_ptrs h"
assumes "ptr \<noteq> ptr'"
assumes "h \<turnstile> insert_before ptr node child \<rightarrow>\<^sub>h h'"
assumes "h \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r node # children"
shows "h' \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. h' \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. h' \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
obtain owner_document h2 h3 disc_nodes reference_child where
"h \<turnstile> (if Some node = child then a_next_sibling node else return child) \<rightarrow>\<^sub>r reference_child" and
"h \<turnstile> get_owner_document ptr \<rightarrow>\<^sub>r owner_document" and
h2: "h \<turnstile> adopt_node owner_document node \<rightarrow>\<^sub>h h2" and
"h2 \<turnstile> get_disconnected_nodes owner_document \<rightarrow>\<^sub>r disc_nodes" and
h3: "h2 \<turnstile> set_disconnected_nodes owner_document (remove1 node disc_nodes) \<rightarrow>\<^sub>h h3" and
h': "h3 \<turnstile> a_insert_node ptr node reference_child \<rightarrow>\<^sub>h h'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>reference_child owner_document h2 disc_nodes h3. \<lbrakk>h \<turnstile> (if Some node = child then local.a_next_sibling node else return child) \<rightarrow>\<^sub>r reference_child; h \<turnstile> get_owner_document ptr \<rightarrow>\<^sub>r owner_document; h \<turnstile> adopt_node owner_document node \<rightarrow>\<^sub>h h2; h2 \<turnstile> get_disconnected_nodes owner_document \<rightarrow>\<^sub>r disc_nodes; h2 \<turnstile> set_disconnected_nodes owner_document (remove1 node disc_nodes) \<rightarrow>\<^sub>h h3; h3 \<turnstile> local.a_insert_node ptr node reference_child \<rightarrow>\<^sub>h h'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms(5)
[PROOF STATE]
proof (prove)
using this:
h \<turnstile> insert_before ptr node child \<rightarrow>\<^sub>h h'
goal (1 subgoal):
1. (\<And>reference_child owner_document h2 disc_nodes h3. \<lbrakk>h \<turnstile> (if Some node = child then local.a_next_sibling node else return child) \<rightarrow>\<^sub>r reference_child; h \<turnstile> get_owner_document ptr \<rightarrow>\<^sub>r owner_document; h \<turnstile> adopt_node owner_document node \<rightarrow>\<^sub>h h2; h2 \<turnstile> get_disconnected_nodes owner_document \<rightarrow>\<^sub>r disc_nodes; h2 \<turnstile> set_disconnected_nodes owner_document (remove1 node disc_nodes) \<rightarrow>\<^sub>h h3; h3 \<turnstile> local.a_insert_node ptr node reference_child \<rightarrow>\<^sub>h h'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(auto simp add: insert_before_def a_ensure_pre_insertion_validity_def
elim!: bind_returns_heap_E bind_returns_result_E
bind_returns_heap_E2[rotated, OF get_child_nodes_pure, rotated]
bind_returns_heap_E2[rotated, OF get_parent_pure, rotated]
bind_returns_heap_E2[rotated, OF get_ancestors_pure, rotated]
bind_returns_heap_E2[rotated, OF get_owner_document_pure, rotated]
bind_returns_heap_E2[rotated, OF next_sibling_pure, rotated]
bind_returns_heap_E2[rotated, OF get_disconnected_nodes_pure, rotated]
split: if_splits option.splits)
[PROOF STATE]
proof (state)
this:
h \<turnstile> (if Some node = child then local.a_next_sibling node else return child) \<rightarrow>\<^sub>r reference_child
h \<turnstile> get_owner_document ptr \<rightarrow>\<^sub>r owner_document
h \<turnstile> adopt_node owner_document node \<rightarrow>\<^sub>h h2
h2 \<turnstile> get_disconnected_nodes owner_document \<rightarrow>\<^sub>r disc_nodes
h2 \<turnstile> set_disconnected_nodes owner_document (remove1 node disc_nodes) \<rightarrow>\<^sub>h h3
h3 \<turnstile> local.a_insert_node ptr node reference_child \<rightarrow>\<^sub>h h'
goal (1 subgoal):
1. h' \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
have "h2 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. h2 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
using h2 adopt_node_removes_first_child assms(1) assms(2) assms(3) assms(6)
[PROOF STATE]
proof (prove)
using this:
h \<turnstile> adopt_node owner_document node \<rightarrow>\<^sub>h h2
\<lbrakk>heap_is_wellformed ?h; type_wf ?h; known_ptrs ?h; ?h \<turnstile> adopt_node ?owner_document ?node \<rightarrow>\<^sub>h ?h'; ?h \<turnstile> get_child_nodes ?ptr' \<rightarrow>\<^sub>r ?node # ?children\<rbrakk> \<Longrightarrow> ?h' \<turnstile> get_child_nodes ?ptr' \<rightarrow>\<^sub>r ?children
heap_is_wellformed h
type_wf h
known_ptrs h
h \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r node # children
goal (1 subgoal):
1. h2 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
h2 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
goal (1 subgoal):
1. h' \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
h2 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
have "h3 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children"
[PROOF STATE]
proof (prove)
using this:
h2 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
goal (1 subgoal):
1. h3 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
using h3
[PROOF STATE]
proof (prove)
using this:
h2 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
h2 \<turnstile> set_disconnected_nodes owner_document (remove1 node disc_nodes) \<rightarrow>\<^sub>h h3
goal (1 subgoal):
1. h3 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
by(auto simp add: set_disconnected_nodes_get_child_nodes
dest!: reads_writes_separate_forwards[OF get_child_nodes_reads set_disconnected_nodes_writes])
[PROOF STATE]
proof (state)
this:
h3 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
goal (1 subgoal):
1. h' \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
h3 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
h3 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
goal (1 subgoal):
1. h' \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
using h' assms(4)
[PROOF STATE]
proof (prove)
using this:
h3 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
h3 \<turnstile> local.a_insert_node ptr node reference_child \<rightarrow>\<^sub>h h'
ptr \<noteq> ptr'
goal (1 subgoal):
1. h' \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
apply(auto simp add: a_insert_node_def
elim!: bind_returns_heap_E bind_returns_heap_E2[rotated, OF get_child_nodes_pure, rotated])[1]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>h3 \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children; ptr \<noteq> ptr'; h3 \<turnstile> get_child_nodes ptr \<rightarrow>\<^sub>r x; h3 \<turnstile> set_child_nodes ptr (insert_before_list node reference_child x) \<rightarrow>\<^sub>h h'\<rbrakk> \<Longrightarrow> h' \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
[PROOF STEP]
by(auto simp add: set_child_nodes_get_child_nodes_different_pointers
elim!: reads_writes_separate_forwards[OF get_child_nodes_reads set_child_nodes_writes])
[PROOF STATE]
proof (state)
this:
h' \<turnstile> get_child_nodes ptr' \<rightarrow>\<^sub>r children
goal:
No subgoals!
[PROOF STEP]
qed
|
import data.rat
open function
namespace mth1001
section composite
def q₁ (x : ℕ) : ℤ := x + 3
def q₂ (x : ℤ) : ℚ := 2 * x
/-
When a function `f` takes values from a type (or set) `α` and returns values in a type (or set) `β`,
we write that the *domain* of `f` is `α` and the *codomain* of `f` is `β`. This is denoted
`f : α → β`.
-/
/-
Given `f : α → β` and `g : β → γ`, the *composite* of `g` and `f`, denoted `g ∘ f` is the function
`g ∘ f : α → γ` with the property that `(g ∘ f) x = g (f x)`, for every `x : α`.
-/
-- With `q₁` and `q₂` as above, `q₁ : ℕ → ℤ` and `q₂ : Z → ℚ`. So `q₂ ∘ q₁ : ℕ → ℚ`.
#check q₁
#check q₂
#check q₂ ∘ q₁
-- We verify, that `(q₂ ∘ q₁) 5 = q₂ (q₁ 5)`.
#eval (q₂ ∘ q₁) 5
#eval q₂ (q₁ 5)
/-
With the above functions, `q₁ ∘ q₂` is *not defined* as the codomain of `q₂` differs from the
domain of `q₁`.
-/
/-
If all the domains and codomains of two functions, say `p₁` and `p₂` are equal, then it makes sense
to consider both composites. However, `p₂ ∘ p₁` will not (in general) be equal to `p₁ ∘ p₂`.
-/
def p₁ (x : ℤ) : ℤ := 3 * x
def p₂ (y : ℤ) : ℤ := y + 4
#eval (p₂ ∘ p₁) 6 -- `(p₂ ∘ p₁) 6 = p₂ (p₁ 6) = p₂ (3*6) = p₂ 18 = 18 + 4 = 22`, but
#eval (p₁ ∘ p₂) 6 -- `(p₁ ∘ p₂) 6 = p₁ (p₂ 6) = p₁ (6 + 4) = p₁ 10 = 3 * 10 = 30`.
/-
We'll prove that the composite of two injective functions is injective.
-/
variable {α : Type*}
variable {β : Type*}
variable {γ : Type*}
theorem injective_comp {f : α → β} {g : β → γ} (h₁ : injective f) (h₂ : injective g) :
injective (g ∘ f) :=
begin
unfold injective at *, -- We use the definition of injective.
intros a₁ a₂ h, -- Assume `a₁ a₂ : α`. Assume `h : (g ∘ f) a₁ = (g ∘ f) a₂`.
have h₄ : f a₁ = f a₂,
from h₂ h, -- By injectivity of `g`, applied to `h`, we have `h₄ : f a₁ = f a₂`.
show a₁ = a₂, from h₁ h₄, -- We show `a₁ = a₂` by injectivity of `f`, applied to `h₄`.
end
/-
We'll prove that the composite of two surjective functions is surjective. The proof is
more involved that the corresponding injectivity result.
-/
theorem surjective_comp {f : α → β} {g : β → γ} (h₁ : surjective f) (h₂ : surjective g) :
surjective (g ∘ f) :=
begin
unfold surjective at *, -- We use the definition of surjective.
intro c, -- Assume `c : γ`. It suffices to show `∃ a : α, (g ∘ f) a = c`.
sorry
end
-- Exercise 145:
-- From these two results, we have that the composite of two bijective functions is bijective.
theorem bijective_comp {f : α → β} {g : β → γ} (h₁ : bijective f) (h₂ : bijective g) :
bijective (g ∘ f) :=
begin
sorry
end
end composite
end mth1001
|
```python
%load_ext sympyprinting
%matplotlib inline
import matplotlib.pyplot as plt
import sympy
from IPython.display import display
sympy.init_printing(use_unicode=False, wrap_line=False, no_global=True)
import scipy.constants
import numpy as np
```
/home/ash/anaconda2/envs/python3/lib/python3.5/site-packages/IPython/extensions/sympyprinting.py:31: UserWarning: The sympyprinting extension has moved to `sympy`, use `from sympy import init_printing; init_printing()`
warnings.warn("The sympyprinting extension has moved to `sympy`, "
# Equation of motion - SDE to be solved
### $\ddot{q}(t) + \Gamma_0\dot{q}(t) + \Omega_0^2 q(t) - \dfrac{1}{m} F(t) = 0 $
#### where q = x, y or z
Where $F(t) = \mathcal{F}_{fluct}(t) + F_{feedback}(t)$
Taken from page 46 of 'Dynamics of optically levitated nanoparticles in high vacuum' - Thesis by Jan Gieseler
Using $\mathcal{F}_{fluct}(t) = \sqrt{2m \Gamma_0 k_B T_0}\dfrac{dW(t)}{dt}$
and $F_{feedback}(t) = \Omega_0 \eta q^2 \dot{q}$
Taken from page 49 of 'Dynamics of optically levitated nanoparticles in high vacuum' - Thesis by Jan Gieseler
we get the following SDE:
$\dfrac{d^2q(t)}{dt^2} + (\Gamma_0 - \Omega_0 \eta q(t)^2)\dfrac{dq(t)}{dt} + \Omega_0^2 q(t) - \sqrt{\dfrac{2\Gamma_0 k_B T_0}{m}} \dfrac{dW(t)}{dt} = 0$
split into 2 first order ODE/SDE s
letting $v = \dfrac{dq}{dt}$
$\dfrac{dv(t)}{dt} + (\Gamma_0 - \Omega_0 \eta q(t)^2)v + \Omega_0^2 q(t) - \sqrt{\dfrac{2\Gamma_0 k_B T_0}{m}} \dfrac{dW(t)}{dt} = 0$
therefore
$\dfrac{dv(t)}{dt} = -(\Gamma_0 - \Omega_0 \eta q(t)^2)v - \Omega_0^2 q(t) + \sqrt{\dfrac{2\Gamma_0 k_B T_0}{m}} \dfrac{dW(t)}{dt} $
$v = \dfrac{dq}{dt}$ therefore $dq = v~dt$
\begin{align}
dq&=v\,dt\\
dv&=[-(\Gamma_0-\Omega_0 \eta q(t)^2)v(t) - \Omega_0^2 q(t)]\,dt + \sqrt{\frac{2\Gamma_0 k_B T_0}m}\,dW
\end{align}
### Apply Milstein Method to solve
Consider the autonomous Itō stochastic differential equation
${\mathrm {d}}X_{t}=a(X_{t})\,{\mathrm {d}}t+b(X_{t})\,{\mathrm {d}}W_{t}$
Taking $X_t = q_t$ for the 1st equation above (i.e. $dq = v~dt$) we get:
$$ a(q_t) = v $$
$$ b(q_t) = 0 $$
Taking $X_t = v_t$ for the 2nd equation above (i.e. $dv = ...$) we get:
$$a(v_t) = -(\Gamma_0-\Omega_0\eta q(t)^2)v - \Omega_0^2 q(t)$$
$$b(v_t) = \sqrt{\dfrac{2\Gamma_0 k_B T_0}m}$$
${\displaystyle b'(v_{t})=0}$ therefore the diffusion term does not depend on ${\displaystyle v_{t}}$ , the Milstein's method in this case is therefore equivalent to the Euler–Maruyama method.
We then construct these functions in python:
```python
def a_q(t, v, q):
return v
def a_v(t, v, q):
return -(Gamma0 - Omega0*eta*q**2)*v - Omega0**2*q
def b_v(t, v, q):
return np.sqrt(2*Gamma0*k_b*T_0/m)
```
Using values obtained from fitting to data from a real particle we set the following constant values describing the system. Cooling has been assumed to be off by setting $\eta = 0$.
```python
Gamma0 = 4000 # radians/second
Omega0 = 75e3*2*np.pi # radians/second
eta = 0.5e7
T_0 = 300 # K
k_b = scipy.constants.Boltzmann # J/K
m = 3.1e-19 # KG
```
partition the interval [0, T] into N equal subintervals of width $\Delta t>0$:
$ 0=\tau _{0}<\tau _{1}<\dots <\tau _{N}=T{\text{ with }}\tau _{n}:=n\Delta t{\text{ and }}\Delta t={\frac {T}{N}}$
```python
dt = 1e-10
tArray = np.arange(0, 100e-6, dt)
```
```python
print("{} Hz".format(1/dt))
```
10000000000.0 Hz
set $Y_{0}=x_{0}$
```python
q0 = 0
v0 = 0
q = np.zeros_like(tArray)
v = np.zeros_like(tArray)
q[0] = q0
v[0] = v0
```
Generate independent and identically distributed normal random variables with expected value 0 and variance dt
```python
np.random.seed(88)
dwArray = np.random.normal(0, np.sqrt(dt), len(tArray)) # independent and identically distributed normal random variables with expected value 0 and variance dt
```
Apply Milstein's method (Euler Maruyama if $b'(Y_{n}) = 0$ as is the case here):
recursively define $Y_{n}$ for $ 1\leq n\leq N $ by
$ Y_{{n+1}}=Y_{n}+a(Y_{n})\Delta t+b(Y_{n})\Delta W_{n}+{\frac {1}{2}}b(Y_{n})b'(Y_{n})\left((\Delta W_{n})^{2}-\Delta t\right)$
Perform this for the 2 first order differential equations:
```python
#%%timeit
for n, t in enumerate(tArray[:-1]):
dw = dwArray[n]
v[n+1] = v[n] + a_v(t, v[n], q[n])*dt + b_v(t, v[n], q[n])*dw + 0
q[n+1] = q[n] + a_q(t, v[n], q[n])*dt + 0
```
We now have an array of positions, $v$, and velocities $p$ with time $t$.
```python
plt.plot(tArray*1e6, v)
plt.xlabel("t (us)")
plt.ylabel("v")
```
```python
plt.plot(tArray*1e6, q)
plt.xlabel("t (us)")
plt.ylabel("q")
```
Alternatively we can use a derivative-free version of Milsteins method as a two-stage kind-of Runge-Kutta method, documented in wikipedia (https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_method_%28SDE%29) or the original in arxiv.org https://arxiv.org/pdf/1210.0933.pdf.
```python
q0 = 0
v0 = 0
X = np.zeros([len(tArray), 2])
X[0, 0] = q0
X[0, 1] = v0
```
```python
def a(t, X):
q, v = X
return np.array([v, -(Gamma0 - Omega0*eta*q**2)*v - Omega0**2*q])
def b(t, X):
q, v = X
return np.array([0, np.sqrt(2*Gamma0*k_b*T_0/m)])
```
```python
%%timeit
S = np.array([-1,1])
for n, t in enumerate(tArray[:-1]):
dw = dwArray[n]
K1 = a(t, X[n])*dt + b(t, X[n])*(dw - S*np.sqrt(dt))
Xh = X[n] + K1
K2 = a(t, Xh)*dt + b(t, Xh)*(dw + S*np.sqrt(dt))
X[n+1] = X[n] + 0.5 * (K1+K2)
```
1 loop, best of 3: 2.33 s per loop
```python
q = X[:, 0]
v = X[:, 1]
```
```python
plt.plot(tArray*1e6, v)
plt.xlabel("t (us)")
plt.ylabel("v")
```
```python
plt.plot(tArray*1e6, q)
plt.xlabel("t (us)")
plt.ylabel("q")
```
The form of $F_{feedback}(t)$ is still questionable
On page 49 of 'Dynamics of optically levitated nanoparticles in high vacuum' - Thesis by Jan Gieseler he uses the form: $F_{feedback}(t) = \Omega_0 \eta q^2 \dot{q}$
On page 2 of 'Parametric feeedback cooling of levitated optomechancs in a parabolic mirror trap' Paper by Jamie and Muddassar they use the form: $F_{feedback}(t) = \dfrac{\Omega_0 \eta q^2 \dot{q}}{q_0^2}$ where $q_0$ is the amplitude of the motion: $q(t) = q_0(sin(\omega_0t)$
However it always shows up as a term $\delta \Gamma$ like so:
$\dfrac{d^2q(t)}{dt^2} + (\Gamma_0 + \delta \Gamma)\dfrac{dq(t)}{dt} + \Omega_0^2 q(t) - \sqrt{\dfrac{2\Gamma_0 k_B T_0}{m}} \dfrac{dW(t)}{dt} = 0$
By fitting to data we extract the following 3 parameters:
1) $A = \gamma^2 \dfrac{k_B T_0}{\pi m}\Gamma_0 $
Where:
- $\gamma$ is the conversion factor between Volts and nanometres. This parameterises the amount of light/ number of photons collected from the nanoparticle. With unchanged allignment and the same particle this should remain constant with changes in pressure.
- $m$ is the mass of the particle, a constant
- $T_0$ is the temperature of the environment
- $\Gamma_0$ the damping due to the environment only
2) $\Omega_0$ - the natural frequency at this trapping power
3) $\Gamma$ - the total damping on the system including environment and feedback etc...
By taking a reference save with no cooling we have $\Gamma = \Gamma_0$ and therefore we can extract $A' = \gamma^2 \dfrac{k_B T_0}{\pi m}$. Since $A'$ should be constant with pressure we can therefore extract $\Gamma_0$ at any pressure (if we have a reference save and therefore a value of $A'$) and therefore can extract $\delta \Gamma$, the damping due to cooling, we can then plug this into our SDE instead in order to include cooling in the SDE model.
For any dataset at any pressure we can do:
$\Gamma_0 = \dfrac{A}{A'}$
And then $\delta \Gamma = \Gamma - \Gamma_0$
Using this form and the same derivation as above we arrive at the following form of the 2 1st order differential equations:
\begin{align}
dq&=v\,dt\\
dv&=[-(\Gamma_0 + \delta \Gamma)v(t) - \Omega_0^2 v(t)]\,dt + \sqrt{\frac{2\Gamma_0 k_B T_0}m}\,dW
\end{align}
```python
def a_q(t, v, q):
return v
def a_v(t, v, q):
return -(Gamma0 + deltaGamma)*v - Omega0**2*q
def b_v(t, v, q):
return np.sqrt(2*Gamma0*k_b*T_0/m)
```
values below are taken from a ~1e-2 mbar cooled save
```python
Gamma0 = 15 # radians/second
deltaGamma = 2200
Omega0 = 75e3*2*np.pi # radians/second
eta = 0.5e7
T_0 = 300 # K
k_b = scipy.constants.Boltzmann # J/K
m = 3.1e-19 # KG
```
```python
dt = 1e-10
tArray = np.arange(0, 100e-6, dt)
```
```python
q0 = 0
v0 = 0
q = np.zeros_like(tArray)
v = np.zeros_like(tArray)
q[0] = q0
v[0] = v0
```
```python
np.random.seed(88)
dwArray = np.random.normal(0, np.sqrt(dt), len(tArray)) # independent and identically distributed normal random variables with expected value 0 and variance dt
```
```python
for n, t in enumerate(tArray[:-1]):
dw = dwArray[n]
v[n+1] = v[n] + a_v(t, v[n], q[n])*dt + b_v(t, v[n], q[n])*dw + 0
q[n+1] = q[n] + a_q(t, v[n], q[n])*dt + 0
```
```python
plt.plot(tArray*1e6, v)
plt.xlabel("t (us)")
plt.ylabel("v")
```
```python
plt.plot(tArray*1e6, q)
plt.xlabel("t (us)")
plt.ylabel("q")
```
```python
```
```python
```
|
function isobars
% eeg_topo_murks_contour - illustrate isocontours of a surface
clear; close all;
s = 40;
w = 1/150;
cvec = 10:60:300;
p = -s:s;
[x,y] = meshgrid(p);
f = exp(-w*(x.^2 + y.^2));
f = max(f,0);
f = sqrt(f);
surf(x,y,f); shading interp; hold on; rotate3D
V = (x + 10).^2 + (y + 10).^2 + f.^2;
for k = 1:length(cvec)
c = cvec(k);
Vc = (V > c);
% Vcdil = dilate(Vc); Vcdil = double(Vcdil);
se = [0 1 0 ; 1 1 1 ; 0 1 0];
% se = ones(1);
Vcdil = conv2(Vc,se,'same');
Vcdil = Vcdil > 0;
Vc = Vcdil - Vc;
Vcvec = find(Vc);
plot3(x(Vcvec),y(Vcvec),f(Vcvec),'w.')
end
hold off
|
This timeline shows a graph from 1971 to 2015 of Philippines. No data until 1970. Number of actual observations by date: 30.
|
\chapter{RV32I Base Integer Instruction Set, Version 2.0}
\label{rv32}
This chapter describes version 2.0 of the RV32I base integer
instruction set. Much of the commentary also applies to the RV64I
variant.
\begin{commentary}
RV32I was designed to be sufficient to form a compiler target and to
support modern operating system environments. The ISA was also
designed to reduce the hardware required in a minimal implementation.
RV32I contains 47 unique instructions, though a simple implementation
might cover the eight ECALL/EBREAK/CSRR* instructions with a single
SYSTEM hardware instruction that always traps and might be able to
implement the FENCE and FENCE.I instructions as NOPs, reducing
hardware instruction count to 38 total. RV32I can emulate almost any
other ISA extension (except the A extension, which requires additional
hardware support for atomicity).
Subsets of the base integer ISA might be useful for pedagogical
purposes, but the base has been defined such that there should be
little incentive to subset a real hardware implementation beyond
omitting support for misaligned memory accesses and treating all SYSTEM
instructions as a single trap.
\end{commentary}
\section{Programmers' Model for Base Integer Subset}
Figure~\ref{gprs} shows the user-visible state for the base integer
subset. There are 31 general-purpose registers {\tt x1}--{\tt x31},
which hold integer values. Register {\tt x0} is hardwired to the
constant 0. There is no hardwired subroutine return address link
register, but the standard software calling convention uses register
{\tt x1} to hold the return address on a call. For RV32, the {\tt x}
registers are 32 bits wide, and for RV64, they are 64 bits wide. This
document uses the term XLEN to refer to the current width of an {\tt
x} register in bits (either 32 or 64).
There is one additional user-visible register: the program counter {\tt pc}
holds the address of the current instruction.
\begin{commentary}
The number of available architectural registers can have large impacts
on code size, performance, and energy consumption. Although 16
registers would arguably be sufficient for an integer ISA running
compiled code, it is impossible to encode a complete ISA with 16
registers in 16-bit instructions using a 3-address format. Although a
2-address format would be possible, it would increase instruction
count and lower efficiency. We wanted to avoid intermediate
instruction sizes (such as Xtensa's 24-bit instructions) to simplify
base hardware implementations, and once a 32-bit instruction size was
adopted, it was straightforward to support 32 integer registers. A
larger number of integer registers also helps performance on
high-performance code, where there can be extensive use of loop
unrolling, software pipelining, and cache tiling.
For these reasons, we chose a conventional size of 32 integer
registers for the base ISA. Dynamic register usage tends to be
dominated by a few frequently accessed registers, and regfile
implementations can be optimized to reduce access energy for the
frequently accessed registers~\cite{jtseng:sbbci}. The optional
compressed 16-bit instruction format mostly only accesses 8 registers
and hence can provide a dense instruction encoding, while additional
instruction-set extensions could support a much larger register space
(either flat or hierarchical) if desired.
For resource-constrained embedded applications, we have defined the
RV32E subset, which only has 16 registers (Chapter~\ref{rv32e}).
\end{commentary}
\begin{figure}[H]
{\footnotesize
\begin{center}
\begin{tabular}{p{2in}}
\instbitrange{XLEN-1}{0} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ \ \ \ x0 / zero}} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ \ x1\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ \ x2\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ \ x3\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ \ x4\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ \ x5\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ \ x6\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ \ x7\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ \ x8\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ \ x9\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x10\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x11\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x12\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x13\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x14\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x15\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x16\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x17\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x18\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x19\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x20\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x21\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x22\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x23\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x24\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x25\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x26\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x27\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x28\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x29\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x30\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{\ \ \ x31\ \ \ \ \ }} \\ \cline{1-1}
\multicolumn{1}{c}{XLEN} \\
\instbitrange{XLEN-1}{0} \\ \cline{1-1}
\multicolumn{1}{|c|}{\reglabel{pc}} \\ \cline{1-1}
\multicolumn{1}{c}{XLEN} \\
\end{tabular}
\end{center}
}
\caption{RISC-V user-level base integer register state.}
\label{gprs}
\end{figure}
\newpage
\section{Base Instruction Formats}
In the base ISA, there are four core instruction formats (R/I/S/U), as
shown in Figure~\ref{fig:baseinstformats}. All are a fixed 32 bits in
length and must be aligned on a four-byte boundary in memory. An instruction address misaligned exception is generated on a
taken branch or unconditional jump if the target address is not
four-byte aligned. No instruction fetch misaligned exception is
generated for a conditional branch that is not taken.
\begin{commentary}
The alignment constraint for base ISA instructions is relaxed to a
two-byte boundary when instruction extensions with 16-bit lengths or
other odd multiples of 16-bit lengths are added.
\end{commentary}
\vspace{-0.2in}
\begin{figure}[h]
\begin{center}
\setlength{\tabcolsep}{4pt}
\begin{tabular}{p{1.2in}@{}p{0.8in}@{}p{0.8in}@{}p{0.6in}@{}p{0.8in}@{}p{1in}l}
\\
\instbitrange{31}{25} &
\instbitrange{24}{20} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\cline{1-6}
\multicolumn{1}{|c|}{funct7} &
\multicolumn{1}{c|}{rs2} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} &
R-type \\
\cline{1-6}
\\
\cline{1-6}
\multicolumn{2}{|c|}{imm[11:0]} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} &
I-type \\
\cline{1-6}
\\
\cline{1-6}
\multicolumn{1}{|c|}{imm[11:5]} &
\multicolumn{1}{c|}{rs2} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{imm[4:0]} &
\multicolumn{1}{c|}{opcode} &
S-type \\
\cline{1-6}
\\
\cline{1-6}
\multicolumn{4}{|c|}{imm[31:12]} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} &
U-type \\
\cline{1-6}
\end{tabular}
\end{center}
\caption{RISC-V base instruction formats. Each immediate subfield is
labeled with the bit position (imm[{\em x}\,]) in the immediate
value being produced, rather than the bit position within the
instruction's immediate field as is usually done. }
\label{fig:baseinstformats}
\end{figure}
The RISC-V ISA keeps the source ({\em rs1} and {\em rs2}) and
destination ({\em rd}) registers at the same position in all formats
to simplify decoding. Except for the 5-bit immediates used in CSR
instructions (Section~\ref{sec:csrinsts}), immediates are always
sign-extended, and are generally packed towards the leftmost available
bits in the instruction and have been allocated to reduce hardware
complexity. In particular, the sign bit for all immediates is always
in bit 31 of the instruction to speed sign-extension circuitry.
\begin{commentary}
Decoding register specifiers is usually on the critical paths in
implementations, and so the instruction format was chosen to keep all
register specifiers at the same position in all formats at the expense
of having to move immediate bits across formats (a property shared
with RISC-IV aka. SPUR~\cite{spur-jsscc1989}).
In practice, most immediates are either small or require all XLEN
bits. We chose an asymmetric immediate split (12 bits in regular
instructions plus a special load upper immediate instruction with 20
bits) to increase the opcode space available for regular instructions.
Immediates are sign-extended because we did not observe a benefit to
using zero-extension for some immediates as in the MIPS ISA and wanted
to keep the ISA as simple as possible.
\end{commentary}
\section{Immediate Encoding Variants}
There are a further two variants of the instruction formats (B/J)
based on the handling of immediates, as shown in
Figure~\ref{fig:baseinstformatsimm}.
The only difference between the S and B formats is that the 12-bit
immediate field is used to encode branch offsets in multiples of 2 in
the B format. Instead of shifting all bits in the
instruction-encoded immediate left by one in hardware as is
conventionally done, the middle bits (imm[10:1]) and sign bit stay in
fixed positions, while the lowest bit in S format (inst[7]) encodes a
high-order bit in B format.
Similarly, the only difference between the U and J formats is
that the 20-bit immediate is shifted left by 12 bits to form U
immediates and by 1 bit to form J immediates. The location of
instruction bits in the U and J format immediates is chosen to
maximize overlap with the other formats and with each other.
\begin{figure}[h]
\begin{small}
\begin{center}
\setlength{\tabcolsep}{4pt}
\begin{tabular}{p{0.3in}@{}p{0.8in}@{}p{0.6in}@{}p{0.18in}@{}p{0.7in}@{}p{0.6in}@{}p{0.6in}@{}p{0.3in}@{}p{0.5in}l}
\\
\multicolumn{1}{c}{\instbit{31}} &
\instbitrange{30}{25} &
\instbitrange{24}{21} &
\multicolumn{1}{c}{\instbit{20}} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{8} &
\multicolumn{1}{c}{\instbit{7}} &
\instbitrange{6}{0} \\
\cline{1-9}
\multicolumn{2}{|c|}{funct7} &
\multicolumn{2}{c|}{rs2} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{2}{c|}{rd} &
\multicolumn{1}{c|}{opcode} &
R-type \\
\cline{1-9}
\\
\cline{1-9}
\multicolumn{4}{|c|}{imm[11:0]} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{2}{c|}{rd} &
\multicolumn{1}{c|}{opcode} &
I-type \\
\cline{1-9}
\\
\cline{1-9}
\multicolumn{2}{|c|}{imm[11:5]} &
\multicolumn{2}{c|}{rs2} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{2}{c|}{imm[4:0]} &
\multicolumn{1}{c|}{opcode} &
S-type \\
\cline{1-9}
\\
\cline{1-9}
\multicolumn{1}{|c|}{imm[12]} &
\multicolumn{1}{c|}{imm[10:5]} &
\multicolumn{2}{c|}{rs2} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{imm[4:1]} &
\multicolumn{1}{c|}{imm[11]} &
\multicolumn{1}{c|}{opcode} &
B-type \\
\cline{1-9}
\\
\cline{1-9}
\multicolumn{6}{|c|}{imm[31:12]} &
\multicolumn{2}{c|}{rd} &
\multicolumn{1}{c|}{opcode} &
U-type \\
\cline{1-9}
\\
\cline{1-9}
\multicolumn{1}{|c|}{imm[20]} &
\multicolumn{2}{c|}{imm[10:1]} &
\multicolumn{1}{c|}{imm[11]} &
\multicolumn{2}{c|}{imm[19:12]} &
\multicolumn{2}{c|}{rd} &
\multicolumn{1}{c|}{opcode} &
J-type \\
\cline{1-9}
\end{tabular}
\end{center}
\end{small}
\caption{RISC-V base instruction formats showing immediate variants.}
\label{fig:baseinstformatsimm}
\end{figure}
Figure~\ref{fig:immtypes} shows the immediates produced by each of the
base instruction formats, and is labeled to show which instruction
bit (inst[{\em y}\,]) produces each bit of the immediate value.
\begin{figure}[h]
\begin{center}
\setlength{\tabcolsep}{4pt}
\begin{tabular}{p{0.2in}@{}p{1.2in}@{}p{1.0in}@{}p{0.2in}@{}p{0.7in}@{}p{0.7in}@{}p{0.2in}l}
\\
\multicolumn{1}{c}{\instbit{31}} &
\instbitrange{30}{20} &
\instbitrange{19}{12} &
\multicolumn{1}{c}{\instbit{11}} &
\instbitrange{10}{5} &
\instbitrange{4}{1} &
\multicolumn{1}{c}{\instbit{0}} &
\\
\cline{1-7}
\multicolumn{4}{|c|}{--- inst[31] ---} &
\multicolumn{1}{c|}{inst[30:25]} &
\multicolumn{1}{c|}{inst[24:21]} &
\multicolumn{1}{c|}{inst[20]} &
I-immediate \\
\cline{1-7}
\\
\cline{1-7}
\multicolumn{4}{|c|}{--- inst[31] ---} &
\multicolumn{1}{c|}{inst[30:25]} &
\multicolumn{1}{c|}{inst[11:8]} &
\multicolumn{1}{c|}{inst[7]} &
S-immediate \\
\cline{1-7}
\\
\cline{1-7}
\multicolumn{3}{|c|}{--- inst[31] ---} &
\multicolumn{1}{c|}{inst[7]} &
\multicolumn{1}{c|}{inst[30:25]} &
\multicolumn{1}{c|}{inst[11:8]} &
\multicolumn{1}{c|}{0} &
B-immediate \\
\cline{1-7}
\\
\cline{1-7}
\multicolumn{1}{|c|}{inst[31]} &
\multicolumn{1}{c|}{inst[30:20]} &
\multicolumn{1}{c|}{inst[19:12]} &
\multicolumn{4}{c|}{--- 0 ---} &
U-immediate \\
\cline{1-7}
\\
\cline{1-7}
\multicolumn{2}{|c|}{--- inst[31] ---} &
\multicolumn{1}{c|}{inst[19:12]} &
\multicolumn{1}{c|}{inst[20]} &
\multicolumn{1}{c|}{inst[30:25]} &
\multicolumn{1}{c|}{inst[24:21]} &
\multicolumn{1}{c|}{0} &
J-immediate \\
\cline{1-7}
\end{tabular}
\end{center}
\caption{Types of immediate produced by RISC-V instructions. The fields are labeled with the
instruction bits used to construct their value. Sign extension
always uses inst[31].}
\label{fig:immtypes}
\end{figure}
\begin{commentary}
Sign-extension is one of the most critical operations on immediates
(particularly in RV64I), and in RISC-V the sign bit for all immediates
is always held in bit 31 of the instruction to allow sign-extension to
proceed in parallel with instruction decoding.
Although more complex implementations might have separate adders for
branch and jump calculations and so would not benefit from keeping the
location of immediate bits constant across types of instruction, we
wanted to reduce the hardware cost of the simplest implementations.
By rotating bits in the instruction encoding of B and J immediates
instead of using dynamic hardware muxes to multiply the immediate by
2, we reduce instruction signal fanout and immediate mux costs by
around a factor of 2. The scrambled immediate encoding will add
negligible time to static or ahead-of-time compilation. For dynamic
generation of instructions, there is some small additional
overhead, but the most common short forward branches have
straightforward immediate encodings.
\end{commentary}
\section{Integer Computational Instructions}
Most integer computational instructions operate on XLEN bits of values
held in the integer register file. Integer computational instructions
are either encoded as register-immediate operations using the I-type
format or as register-register operations using the R-type format.
The destination is register {\em rd} for both register-immediate and
register-register instructions. No integer computational instructions
cause arithmetic exceptions.
\begin{commentary}
We did not include special instruction-set support for overflow checks
on integer arithmetic operations in the base instruction set, as many
overflow checks can be cheaply implemented using RISC-V branches.
Overflow checking for unsigned addition requires only a single
additional branch instruction after the addition:
\verb! add t0, t1, t2; bltu t0, t1, overflow!.
For signed addition, if one operand's sign is known, overflow checking
requires only a single branch after the addition:
\verb! addi t0, t1, +imm; blt t0, t1, overflow!. This covers the
common case of addition with an immediate operand.
For general signed addition, three additional instructions after the
addition are required, leveraging the observation that the sum should
be less than one of the operands if and only if the other operand is
negative.
\begin{verbatim}
add t0, t1, t2
slti t3, t2, 0
slt t4, t0, t1
bne t3, t4, overflow
\end{verbatim}
In RV64, checks of 32-bit signed additions can be optimized further by
comparing the results of ADD and ADDW on the operands.
\end{commentary}
\subsubsection*{Integer Register-Immediate Instructions}
\vspace{-0.4in}
\begin{center}
\begin{tabular}{M@{}R@{}S@{}R@{}O}
\\
\instbitrange{31}{20} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{imm[11:0]} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} \\
\hline
12 & 5 & 3 & 5 & 7 \\
I-immediate[11:0] & src & ADDI/SLTI[U] & dest & OP-IMM \\
I-immediate[11:0] & src & ANDI/ORI/XORI & dest & OP-IMM \\
\end{tabular}
\end{center}
ADDI adds the sign-extended 12-bit immediate to register {\em rs1}.
Arithmetic overflow is ignored and the result is simply the low
XLEN bits of the result. ADDI {\em rd, rs1, 0} is used to implement the
MV {\em rd, rs1} assembler pseudoinstruction.
SLTI (set less than immediate) places the value 1 in register {\em rd}
if register {\em rs1} is less than the sign-extended immediate when
both are treated as signed numbers, else 0 is written to {\em rd}.
SLTIU is similar but compares the values as unsigned numbers (i.e.,
the immediate is first sign-extended to XLEN bits then treated as an
unsigned number). Note, SLTIU {\em rd, rs1, 1} sets {\em rd}
to 1 if {\em rs1} equals zero, otherwise sets {\em rd} to 0 (assembler
pseudoinstruction SEQZ {\em rd, rs}).
ANDI, ORI, XORI are logical operations that perform bitwise AND, OR,
and XOR on register {\em rs1} and the sign-extended 12-bit immediate
and place the result in {\em rd}. Note, XORI {\em rd, rs1, -1}
performs a bitwise logical inversion of register {\em rs1} (assembler
pseudoinstruction NOT {\em rd, rs}).
\vspace{-0.2in}
\begin{center}
\begin{tabular}{S@{}R@{}R@{}S@{}R@{}O}
\\
\instbitrange{31}{25} &
\instbitrange{24}{20} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{imm[11:5]} &
\multicolumn{1}{c|}{imm[4:0]} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} \\
\hline
7 & 5 & 5 & 3 & 5 & 7 \\
0000000 & shamt[4:0] & src & SLLI & dest & OP-IMM \\
0000000 & shamt[4:0] & src & SRLI & dest & OP-IMM \\
0100000 & shamt[4:0] & src & SRAI & dest & OP-IMM \\
\end{tabular}
\end{center}
Shifts by a constant are encoded as a specialization of the
I-type format. The operand to be shifted is in {\em rs1}, and the
shift amount is encoded in the lower 5 bits of the I-immediate field.
The right shift type is encoded in bit 30.
SLLI is a logical left shift (zeros are shifted into the lower bits);
SRLI is a logical right shift (zeros are shifted into the upper bits);
and SRAI is an arithmetic right shift (the original sign bit is copied
into the vacated upper bits).
\vspace{-0.2in}
\begin{center}
\begin{tabular}{U@{}R@{}O}
\\
\instbitrange{31}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{imm[31:12]} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} \\
\hline
20 & 5 & 7 \\
U-immediate[31:12] & dest & LUI \\
U-immediate[31:12] & dest & AUIPC
\end{tabular}
\end{center}
LUI (load upper immediate) is used to build 32-bit constants and uses
the U-type format. LUI places the U-immediate value in the top 20
bits of the destination register {\em rd}, filling in the lowest 12
bits with zeros.
AUIPC (add upper immediate to {\tt pc}) is used to build {\tt pc}-relative
addresses and uses the U-type format. AUIPC forms a 32-bit offset from the
20-bit U-immediate, filling in the lowest 12 bits with zeros, adds this offset
to the {\tt pc}, then places the result in register {\em rd}.
\begin{commentary}
The AUIPC instruction supports two-instruction sequences to access
arbitrary offsets from the PC for both control-flow transfers and data
accesses. The combination of an AUIPC and the 12-bit immediate in a
JALR can transfer control to any 32-bit PC-relative address, while an
AUIPC plus the 12-bit immediate offset in regular load or store
instructions can access any 32-bit PC-relative data address.
The current PC can be obtained by setting the U-immediate to 0. Although
a JAL +4 instruction could also be used to obtain the PC, it might cause
pipeline breaks in simpler microarchitectures or pollute the BTB structures in
more complex microarchitectures.
\end{commentary}
\subsubsection*{Integer Register-Register Operations}
RV32I defines several arithmetic R-type operations. All operations
read the {\em rs1} and {\em rs2} registers as source operands and
write the result into register {\em rd}. The {\em funct7} and {\em
funct3} fields select the type of operation.
\vspace{-0.2in}
\begin{center}
\begin{tabular}{S@{}R@{}R@{}S@{}R@{}O}
\\
\instbitrange{31}{25} &
\instbitrange{24}{20} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{funct7} &
\multicolumn{1}{c|}{rs2} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} \\
\hline
7 & 5 & 5 & 3 & 5 & 7 \\
0000000 & src2 & src1 & ADD/SLT/SLTU & dest & OP \\
0000000 & src2 & src1 & AND/OR/XOR & dest & OP \\
0000000 & src2 & src1 & SLL/SRL & dest & OP \\
0100000 & src2 & src1 & SUB/SRA & dest & OP \\
\end{tabular}
\end{center}
ADD performs the addition of {\em rs1} and {\em rs2}. SUB performs the
subtraction of {\em rs2} from {\em rs1}. Overflows are ignored and the low XLEN
bits of results are written to the destination {\em rd}.
SLT and SLTU perform signed and unsigned compares
respectively, writing 1 to {\em rd} if $\mbox{\em rs1} < \mbox{\em
rs2}$, 0 otherwise. Note, SLTU {\em rd}, {\em x0}, {\em rs2} sets
{\em rd} to 1 if {\em rs2} is not equal to zero, otherwise sets {\em
rd} to zero (assembler pseudoinstruction SNEZ {\em rd, rs}). AND, OR, and
XOR perform bitwise logical operations.
SLL, SRL, and SRA perform logical left, logical right, and arithmetic
right shifts on the value in register {\em rs1} by the shift amount
held in the lower 5 bits of register {\em rs2}.
\subsubsection*{NOP Instruction}
\vspace{-0.4in}
\begin{center}
\begin{tabular}{M@{}R@{}S@{}R@{}O}
\\
\instbitrange{31}{20} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{imm[11:0]} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} \\
\hline
12 & 5 & 3 & 5 & 7 \\
0 & 0 & ADDI & 0 & OP-IMM \\
\end{tabular}
\end{center}
The NOP instruction does not change any architecturally visible state, except for
advancing the {\tt pc} and incrementing any applicable performance
counters. NOP is encoded as ADDI {\em x0, x0, 0}.
\begin{commentary}
NOPs can be used to align code segments to microarchitecturally
significant address boundaries, or to leave space for inline code
modifications. Although there are many possible ways to encode a NOP,
we define a canonical NOP encoding to allow microarchitectural
optimizations as well as for more readable disassembly output. The
other NOP encodings are made available for HINT instructions
(Section~\ref{sec:rv32i-hints}).
\end{commentary}
\section{Control Transfer Instructions}
RV32I provides two types of control transfer instructions:
unconditional jumps and conditional branches. Control transfer
instructions in RV32I do {\em not} have architecturally visible delay
slots.
\subsubsection*{Unconditional Jumps}
\vspace{-0.1in} The jump and link (JAL) instruction uses the J-type
format, where the J-immediate encodes a signed offset in multiples of
2 bytes. The offset is sign-extended and added to the {\tt pc}
to form the jump target address. Jumps can therefore target a
$\pm$\wunits{1}{MiB} range. JAL stores the address of the instruction
following the jump ({\tt pc}+4) into register {\em rd}. The standard
software calling convention uses {\tt x1} as the return address
register and {\tt x5} as an alternate link register.
\begin{commentary}
The alternate link register supports calling millicode routines (e.g.,
those to save and restore registers in compressed code) while
preserving the regular return address register. The register {\tt x5}
was chosen as the alternate link register as it maps to a temporary in
the standard calling convention, and has an encoding that is only one
bit different than the regular link register.
\end{commentary}
Plain unconditional jumps (assembler pseudoinstruction J) are encoded as a JAL
with {\em rd}={\tt x0}.
\vspace{-0.2in}
\begin{center}
\begin{tabular}{W@{}E@{}W@{}R@{}R@{}O}
\\
\multicolumn{1}{c}{\instbit{31}} &
\instbitrange{30}{21} &
\multicolumn{1}{c}{\instbit{20}} &
\instbitrange{19}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{imm[20]} &
\multicolumn{1}{c|}{imm[10:1]} &
\multicolumn{1}{c|}{imm[11]} &
\multicolumn{1}{c|}{imm[19:12]} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} \\
\hline
1 & 10 & \multicolumn{1}{c}{1} & 8 & 5 & 7 \\
\multicolumn{4}{c}{offset[20:1]} & dest & JAL \\
\end{tabular}
\end{center}
The indirect jump instruction JALR (jump and link register) uses the
I-type encoding. The target address is obtained by adding the sign-extended
12-bit I-immediate to the register {\em rs1}, then setting the
least-significant bit of the result to zero. The address of
the instruction following the jump ({\tt pc}+4) is written to register
{\em rd}. Register {\tt x0} can be used as the destination if the
result is not required.
\vspace{-0.4in}
\begin{center}
\begin{tabular}{M@{}R@{}F@{}R@{}O}
\\
\instbitrange{31}{20} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{imm[11:0]} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} \\
\hline
12 & 5 & 3 & 5 & 7 \\
offset[11:0] & base & 0 & dest & JALR \\
\end{tabular}
\end{center}
\begin{commentary}
The unconditional jump instructions all use PC-relative addressing to
help support position-independent code. The JALR instruction was
defined to enable a two-instruction sequence to jump anywhere in a
32-bit absolute address range. A LUI instruction can first load {\em
rs1} with the upper 20 bits of a target address, then JALR can add
in the lower bits. Similarly, AUIPC then JALR can jump
anywhere in a 32-bit {\tt pc}-relative address range.
Note that the JALR instruction does not treat the 12-bit immediate as
multiples of 2 bytes, unlike the conditional branch instructions.
This avoids one more immediate format in hardware. In
practice, most uses of JALR will have either a zero immediate or be
paired with a LUI or AUIPC, so the slight reduction in range is not
significant.
Clearing the least-significant bit when calculating the JALR target
address both simplifies the hardware slightly and allows the
low bit of function pointers to be used to store auxiliary
information. Although there is potentially a slight loss of error
checking in this case, in practice jumps to an incorrect instruction
address will usually quickly raise an exception.
When used with a base {\em rs1}$=${\tt x0}, JALR can be used to implement
a single instruction subroutine call to the lowest \wunits{2}{KiB} or highest
\wunits{2}{KiB} address region from anywhere in the address space, which could
be used to implement fast calls to a small runtime library.
\end{commentary}
The JAL and JALR instructions will generate a misaligned instruction
fetch exception if the target address is not aligned to a four-byte
boundary.
\begin{commentary}
Instruction fetch misaligned exceptions are not possible on machines
that support extensions with 16-bit aligned instructions, such as the
compressed instruction-set extension, C.
\end{commentary}
Return-address prediction stacks are a common feature of
high-performance instruction-fetch units, but require accurate
detection of instructions used for procedure calls and returns to be
effective. For RISC-V, hints as to the instructions' usage are encoded
implicitly via the register numbers used. A JAL instruction should
push the return address onto a return-address stack (RAS) only when
{\em rd}$=${\tt x1}/{\tt x5}. JALR instructions should push/pop a
RAS as shown in the Table~\ref{rashints}.
\begin{table}[hbt]
\centering
\begin{tabular}{|c|c|c|l|}
\hline
\em rd & \em rs1 & {\em rs1}$=${\em rd} & RAS action \\
\hline
!{\em link} & !{\em link} & - & none \\
!{\em link} & {\em link} & - & pop \\
{\em link} & !{\em link} & - & push \\
{\em link} & {\em link} & 0 & pop, then push \\
{\em link} & {\em link} & 1 & push \\
\hline
\end{tabular}
\caption{Return-address stack prediction hints encoded in register
specifiers used in the instruction. In the above, {\em link} is
true when the register is either {\tt x1} or {\tt x5}.}
\label{rashints}
\end{table}
\begin{commentary}
Some other ISAs added explicit hint bits to their indirect-jump instructions
to guide return-address stack manipulation. We use implicit hinting tied to
register numbers and the calling convention to reduce the encoding space used
for these hints.
When two different link registers ({\tt x1} and {\tt x5}) are given as
{\em rs1} and {\em rd}, then the RAS is both popped and pushed to
support coroutines. If {\em rs1} and {\em rd} are the same link
register (either {\tt x1} or {\tt x5}), the RAS is only pushed to
enable macro-op fusion of the sequences:\linebreak
{\tt lui ra, imm20; jalr ra, imm12(ra)} \ and \
{\tt auipc ra, imm20; jalr ra, imm12(ra)}
\end{commentary}
\subsubsection*{Conditional Branches}
All branch instructions use the B-type instruction format. The
12-bit B-immediate encodes signed offsets in multiples of 2, and is
added to the current {\tt pc} to give the target address. The
conditional branch range is $\pm$\wunits{4}{KiB}.
\vspace{-0.2in}
\begin{center}
\begin{tabular}{W@{}R@{}F@{}F@{}R@{}R@{}F@{}S}
\\
\multicolumn{1}{c}{\instbit{31}} &
\instbitrange{30}{25} &
\instbitrange{24}{20} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{8} &
\multicolumn{1}{c}{\instbit{7}} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{imm[12]} &
\multicolumn{1}{c|}{imm[10:5]} &
\multicolumn{1}{c|}{rs2} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{imm[4:1]} &
\multicolumn{1}{c|}{imm[11]} &
\multicolumn{1}{c|}{opcode} \\
\hline
1 & 6 & 5 & 5 & 3 & 4 & 1 & 7 \\
\multicolumn{2}{c}{offset[12,10:5]} & src2 & src1 & BEQ/BNE & \multicolumn{2}{c}{offset[11,4:1]} & BRANCH \\
\multicolumn{2}{c}{offset[12,10:5]} & src2 & src1 & BLT[U] & \multicolumn{2}{c}{offset[11,4:1]} & BRANCH \\
\multicolumn{2}{c}{offset[12,10:5]} & src2 & src1 & BGE[U] & \multicolumn{2}{c}{offset[11,4:1]} & BRANCH \\
\end{tabular}
\end{center}
Branch instructions compare two registers. BEQ and BNE take the
branch if registers {\em rs1} and {\em rs2} are equal or unequal
respectively. BLT and BLTU take the branch if {\em rs1} is less than
{\em rs2}, using signed and unsigned comparison respectively. BGE and
BGEU take the branch if {\em rs1} is greater than or equal to {\em rs2},
using signed and unsigned comparison respectively. Note, BGT, BGTU,
BLE, and BLEU can be synthesized by reversing the operands to BLT,
BLTU, BGE, and BGEU, respectively.
\begin{commentary}
Signed array bounds may be checked with a single BLTU instruction, since
any negative index will compare greater than any nonnegative bound.
\end{commentary}
Software should be optimized such that the sequential code path is the
most common path, with less-frequently taken code paths placed out of
line. Software should also assume that backward branches will be
predicted taken and forward branches as not taken, at least the
first time they are encountered. Dynamic predictors should quickly
learn any predictable branch behavior.
Unlike some other architectures, the RISC-V jump (JAL with {\em
rd}={\tt x0}) instruction should always be used for unconditional
branches instead of a conditional branch instruction with an always-true
condition. RISC-V jumps are also PC-relative and support a much
wider offset range than branches, and will not pressure conditional
branch prediction tables.
\begin{commentary}
The conditional branches were designed to include arithmetic
comparison operations between two registers (as also done in PA-RISC
and Xtensa ISA), rather than use condition codes (x86, ARM, SPARC,
PowerPC), or to only compare one register against zero (Alpha, MIPS),
or two registers only for equality (MIPS). This design was motivated
by the observation that a combined compare-and-branch instruction fits
into a regular pipeline, avoids additional condition code state or use
of a temporary register, and reduces static code size and dynamic
instruction fetch traffic. Another point is that comparisons against
zero require non-trivial circuit delay (especially after the move to
static logic in advanced processes) and so are almost as expensive as
arithmetic magnitude compares. Another advantage of a fused
compare-and-branch instruction is that branches are observed earlier
in the front-end instruction stream, and so can be predicted earlier.
There is perhaps an advantage to a design with condition codes in the
case where multiple branches can be taken based on the same condition
codes, but we believe this case to be relatively rare.
We considered but did not include static branch hints in the
instruction encoding. These can reduce the pressure on dynamic
predictors, but require more instruction encoding space and
software profiling for best results, and can result in poor
performance if production runs do not match profiling runs.
We considered but did not include conditional moves or predicated
instructions, which can effectively replace unpredictable short
forward branches. Conditional moves are the simpler of the two, but
are difficult to use with conditional code that might cause exceptions
(memory accesses and floating-point operations). Predication adds
additional flag state to a system, additional instructions to set and
clear flags, and additional encoding overhead on every instruction.
Both conditional move and predicated instructions add complexity to
out-of-order microarchitectures, adding an implicit third source
operand due to the need to copy the original value of the destination
architectural register into the renamed destination physical register
if the predicate is false. Also, static compile-time decisions to use
predication instead of branches can result in lower performance on
inputs not included in the compiler training set, especially given
that unpredictable branches are rare, and becoming rarer as branch
prediction techniques improve.
We note that various microarchitectural techniques exist to
dynamically convert unpredictable short forward branches into
internally predicated code to avoid the cost of flushing pipelines on
a branch mispredict~\cite{heil-tr1996,Klauser-1998,Kim-micro2005} and
have been implemented in commercial processors~\cite{ibmpower7}.
The simplest techniques just reduce the penalty of recovering from a
mispredicted short forward branch by only flushing instructions in the
branch shadow instead of the entire fetch pipeline, or by fetching
instructions from both sides using wide instruction fetch or idle
instruction fetch slots. More complex techniques for out-of-order
cores add internal predicates on instructions in the branch shadow,
with the internal predicate value written by the branch instruction,
allowing the branch and following instructions to be executed
speculatively and out-of-order with respect to other code~\cite{ibmpower7}.
\end{commentary}
\section{Load and Store Instructions}
RV32I is a load-store architecture, where only load and store
instructions access memory and arithmetic instructions only operate on
CPU registers. RV32I provides a 32-bit user address space that is
byte-addressed and little-endian. The execution environment will
define what portions of the address space are legal to access. Loads
with a destination of {\tt x0} must still raise any exceptions and
action any other side effects even though the load value is discarded.
\vspace{-0.4in}
\begin{center}
\begin{tabular}{M@{}R@{}F@{}R@{}O}
\\
\instbitrange{31}{20} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{imm[11:0]} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} \\
\hline
12 & 5 & 3 & 5 & 7 \\
offset[11:0] & base & width & dest & LOAD \\
\end{tabular}
\end{center}
\vspace{-0.2in}
\begin{center}
\begin{tabular}{O@{}R@{}R@{}F@{}R@{}O}
\\
\instbitrange{31}{25} &
\instbitrange{24}{20} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{imm[11:5]} &
\multicolumn{1}{c|}{rs2} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{imm[4:0]} &
\multicolumn{1}{c|}{opcode} \\
\hline
7 & 5 & 5 & 3 & 5 & 7 \\
offset[11:5] & src & base & width & offset[4:0] & STORE \\
\end{tabular}
\end{center}
Load and store instructions transfer a value between the registers and
memory. Loads are encoded in the I-type format and stores are
S-type. The effective byte address is obtained by adding register
{\em rs1} to the sign-extended 12-bit offset. Loads copy a value
from memory to register {\em rd}. Stores copy the value in register
{\em rs2} to memory.
The LW instruction loads a 32-bit value from memory into {\em rd}. LH
loads a 16-bit value from memory, then sign-extends to 32-bits before
storing in {\em rd}. LHU loads a 16-bit value from memory but then
zero extends to 32-bits before storing in {\em rd}. LB and LBU are
defined analogously for 8-bit values. The SW, SH, and SB instructions
store 32-bit, 16-bit, and 8-bit values from the low bits of register
{\em rs2} to memory.
For best performance, the effective address for all loads and stores
should be naturally aligned for each data type (i.e., on a four-byte
boundary for 32-bit accesses, and a two-byte boundary for 16-bit
accesses). The base ISA supports misaligned accesses, but these might
run extremely slowly depending on the implementation. Furthermore,
naturally aligned loads and stores are guaranteed to execute
atomically, whereas misaligned loads and stores might not, and hence
require additional synchronization to ensure atomicity.
\begin{commentary}
Misaligned accesses are occasionally required when porting legacy
code, and are essential for good performance on many applications when
using any form of packed-SIMD extension. Our rationale for supporting
misaligned accesses via the regular load and store instructions is to
simplify the addition of misaligned hardware support. One option
would have been to disallow misaligned accesses in the base ISA and
then provide some separate ISA support for misaligned accesses, either
special instructions to help software handle misaligned accesses or a
new hardware addressing mode for misaligned accesses. Special
instructions are difficult to use, complicate the ISA, and often add
new processor state (e.g., SPARC VIS align address offset register) or
complicate access to existing processor state (e.g., MIPS LWL/LWR
partial register writes). In addition, for loop-oriented packed-SIMD
code, the extra overhead when operands are misaligned motivates
software to provide multiple forms of loop depending on operand
alignment, which complicates code generation and adds to loop startup
overhead. New misaligned hardware addressing modes take considerable
space in the instruction encoding or require very simplified
addressing modes (e.g., register indirect only).
We do not mandate atomicity for misaligned accesses so simple
implementations can just use a machine trap and software handler to
handle some or all misaligned accesses. If hardware misaligned support is
provided, software can exploit this by simply using regular load and
store instructions. Hardware can then automatically optimize accesses
depending on whether runtime addresses are aligned.
\end{commentary}
\section{Control and Status Register Instructions}
\label{sec:csrinsts}
SYSTEM instructions are used to access system functionality that might
require privileged access and are encoded using the I-type instruction
format. These can be divided into two main classes: those that
atomically read-modify-write control and status registers (CSRs), and
all other potentially privileged instructions. CSR instructions are
described in this section, with the two other user-level SYSTEM
instructions described in the following section.
\begin{commentary}
The SYSTEM instructions are defined to allow simpler implementations
to always trap to a single software trap handler. More sophisticated
implementations might execute more of each system instruction in
hardware.
\end{commentary}
\subsubsection*{CSR Instructions}
We define the full set of CSR instructions here, although in the standard
user-level base ISA, only a handful of read-only counter CSRs are accessible.
\vspace{-0.2in}
\begin{center}
\begin{tabular}{M@{}R@{}F@{}R@{}S}
\\
\instbitrange{31}{20} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{csr} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} \\
\hline
12 & 5 & 3 & 5 & 7 \\
source/dest & source & CSRRW & dest & SYSTEM \\
source/dest & source & CSRRS & dest & SYSTEM \\
source/dest & source & CSRRC & dest & SYSTEM \\
source/dest & uimm[4:0] & CSRRWI & dest & SYSTEM \\
source/dest & uimm[4:0] & CSRRSI & dest & SYSTEM \\
source/dest & uimm[4:0] & CSRRCI & dest & SYSTEM \\
\end{tabular}
\end{center}
The CSRRW (Atomic Read/Write CSR) instruction atomically swaps values
in the CSRs and integer registers. CSRRW reads the old value of the
CSR, zero-extends the value to XLEN bits, then writes it to integer
register {\em rd}. The initial value in {\em rs1} is written to the
CSR. If {\em rd}={\tt x0}, then the instruction shall not read the CSR
and shall not cause any of the side-effects that might occur on a CSR
read.
The CSRRS (Atomic Read and Set Bits in CSR) instruction reads the
value of the CSR, zero-extends the value to XLEN bits, and writes it
to integer register {\em rd}. The initial value in integer register
{\em rs1} is treated as a bit mask that specifies bit positions to be
set in the CSR. Any bit that is high in {\em rs1} will cause the
corresponding bit to be set in the CSR, if that CSR bit is writable.
Other bits in the CSR are unaffected (though CSRs might have side
effects when written).
The CSRRC (Atomic Read and Clear Bits in CSR) instruction reads the
value of the CSR, zero-extends the value to XLEN bits, and writes it
to integer register {\em rd}. The initial value in integer register
{\em rs1} is treated as a bit mask that specifies bit positions to be
cleared in the CSR. Any bit that is high in {\em rs1} will cause the
corresponding bit to be cleared in the CSR, if that CSR bit is
writable. Other bits in the CSR are unaffected.
For both CSRRS and CSRRC, if {\em rs1}={\tt x0}, then the instruction
will not write to the CSR at all, and so shall not cause any of the
side effects that might otherwise occur on a CSR write, such as
raising illegal instruction exceptions on accesses to read-only CSRs.
Note that if {\em rs1} specifies a register holding a zero value other
than {\tt x0}, the instruction will still attempt to write the
unmodified value back to the CSR and will cause any attendant side effects.
The CSRRWI, CSRRSI, and CSRRCI variants are similar to CSRRW, CSRRS,
and CSRRC respectively, except they update the CSR using an XLEN-bit
value obtained by zero-extending a 5-bit unsigned immediate (uimm[4:0]) field
encoded in the {\em rs1} field instead of a value from an integer
register. For CSRRSI and CSRRCI, if the uimm[4:0] field is zero, then
these instructions will not write to the CSR, and shall not cause any
of the side effects that might otherwise occur on a CSR write. For
CSRRWI, if {\em rd}={\tt x0}, then the instruction shall not read the
CSR and shall not cause any of the side-effects that might occur on a
CSR read.
Some CSRs, such as the instructions retired counter, {\tt instret}, may be
modified as side effects of instruction execution. In these cases, if a CSR
access instruction reads a CSR, it reads the value prior to the execution of
the instruction. If a CSR access instruction writes a CSR, the update occurs
after the execution of the instruction. In particular, a value written to
{\tt instret} by one instruction will be the value read by the following
instruction (i.e., the increment of {\tt instret} caused by the first
instruction retiring happens before the write of the new value).
The assembler pseudoinstruction to read a CSR, CSRR {\em rd, csr}, is
encoded as CSRRS {\em rd, csr, x0}. The assembler pseudoinstruction
to write a CSR, CSRW {\em csr, rs1}, is encoded as CSRRW {\em x0, csr,
rs1}, while CSRWI {\em csr, uimm}, is encoded as CSRRWI {\em x0,
csr, uimm}.
Further assembler pseudoinstructions are defined to set and clear
bits in the CSR when the old value is not required: CSRS/CSRC {\em
csr, rs1}; CSRSI/CSRCI {\em csr, uimm}.
\subsubsection*{Timers and Counters}
\vspace{-0.2in}
\begin{center}
\begin{tabular}{M@{}R@{}F@{}R@{}S}
\\
\instbitrange{31}{20} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{csr} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} \\
\hline
12 & 5 & 3 & 5 & 7 \\
RDCYCLE[H] & 0 & CSRRS & dest & SYSTEM \\
RDTIME[H] & 0 & CSRRS & dest & SYSTEM \\
RDINSTRET[H] & 0 & CSRRS & dest & SYSTEM \\
\end{tabular}
\end{center}
RV32I provides a number of 64-bit read-only user-level counters, which
are mapped into the 12-bit CSR address space and accessed in 32-bit
pieces using CSRRS instructions.
\begin{commentary}
Some execution environments might prohibit access to counters to
impede timing side-channel attacks.
\end{commentary}
The RDCYCLE pseudoinstruction reads the low XLEN bits of the {\tt
cycle} CSR which holds a count of the number of clock cycles
executed by the processor core on which the hart is running from
an arbitrary start time in the past. RDCYCLEH is
an RV32I-only instruction that reads bits 63--32 of the same cycle
counter. The underlying 64-bit counter should never overflow in
practice. The rate at which the cycle counter advances will depend on
the implementation and operating environment. The execution
environment should provide a means to determine the current rate
(cycles/second) at which the cycle counter is incrementing.
\begin{commentary}
RDCYCLE is intended to return the number of cycles executed by the
processor core, not the hart. Precisely defining what is a ``core'' is
difficult given some implementation choices (e.g., AMD Bulldozer).
Precisely defining what is a ``clock cycle'' is also difficult given the
range of implementations (including software emulations), but the
intent is that RDCYCLE is used for performance monitoring along with the
other performance counters. In particular, where there is one
hart/core, one would expect cycle-count/instructions-retired to
measure CPI for a hart.
Cores don't have to be exposed to software at all, and an implementor
might choose to pretend multiple harts on one physical core are
running on separate cores with one hart/core, and provide separate
cycle counters for each hart. This might make sense in a simple
barrel processor (e.g., CDC 6600 peripheral processors) where
inter-hart timing interactions are non-existent or minimal.
Where there is more than one hart/core and dynamic multithreading, it
is not generally possible to separate out cycles per hart (especially
with SMT). It might be possible to define a separate performance
counter that tried to capture the number of cycles a particular hart
was running, but this definition would have to be very fuzzy to cover
all the possible threading implementations. For example, should we
only count cycles for which any instruction was issued to execution
for this hart, and/or cycles any instruction retired, or include
cycles this hart was occupying machine resources but couldn't execute
due to stalls while other harts went into execution? Likely, ``all of
the above'' would be needed to have understandable performance stats.
This complexity of defining a per-hart cycle count, and also the need
in any case for a total per-core cycle count when tuning multithreaded
code led to just standardizing the per-core cycle counter, which also
happens to work well for the common single hart/core case.
Standardizing what happens during ``sleep'' is not practical given
that what ``sleep'' means is not standardized across execution
environments, but if the entire core is paused (entirely clock-gated
or powered-down in deep sleep), then it is not executing clock cycles,
and the cycle count shouldn't be increasing per the spec. There are
many details, e.g., whether clock cycles required to reset a processor
after waking up from a power-down event should be counted, and these
are considered execution-environment-specific details.
Even though there is no precise definition that works for all
platforms, this is still a useful facility for most platforms, and an
imprecise, common, ``usually correct'' standard here is better than no
standard. The intent of RDCYCLE was primarily performance
monitoring/tuning, and the specification was written with that goal in
mind.
\end{commentary}
The RDTIME pseudoinstruction reads the low XLEN bits of the {\tt
time} CSR, which counts wall-clock real time that has passed from an
arbitrary start time in the past. RDTIMEH is an RV32I-only instruction
that reads bits 63--32 of the same real-time counter. The underlying 64-bit
counter should never overflow in practice. The execution environment
should provide a means of determining the period of the real-time
counter (seconds/tick). The period must be constant. The
real-time clocks of all harts in a single user application
should be synchronized to within one tick of the real-time clock. The
environment should provide a means to determine the accuracy of the
clock.
\begin{commentary}
On some simple platforms, cycle count might represent a valid
implementation of RDTIME, but in this case, platforms should implement
the RDTIME instruction as an alias for RDCYCLE to make code more
portable, rather than using RDCYCLE to measure wall-clock time.
\end{commentary}
The RDINSTRET pseudoinstruction reads the low XLEN bits of the {\tt
instret} CSR, which counts the number of instructions retired by
this hart from some arbitrary start point in the past. RDINSTRETH is
an RV32I-only instruction that reads bits 63--32 of the same
instruction counter. The underlying 64-bit counter that should never
overflow in practice.
The following code sequence will read a valid 64-bit cycle counter value into
{\tt x3}:{\tt x2}, even if the counter overflows between reading its upper
and lower halves.
\begin{figure}[h!]
\begin{center}
\begin{verbatim}
again:
rdcycleh x3
rdcycle x2
rdcycleh x4
bne x3, x4, again
\end{verbatim}
\end{center}
\caption{Sample code for reading the 64-bit cycle counter in RV32.}
\label{rdcycle}
\end{figure}
\begin{commentary}
We would like these basic counters be provided in all implementations as
they are essential for basic performance analysis, adaptive and
dynamic optimization, and to allow an application to work with
real-time streams. Additional counters should be provided to help
diagnose performance problems and these should be made accessible from
user-level application code with low overhead.
We required the counters be 64 bits wide, even on RV32, as otherwise
it is very difficult for software to determine if values have
overflowed. For a low-end implementation, the upper 32 bits of each
counter can be implemented using software counters incremented by a
trap handler triggered by overflow of the lower 32 bits. The sample
code described above shows how the full 64-bit width value can be
safely read using the individual 32-bit instructions.
In some applications, it is important to be able to read multiple
counters at the same instant in time. When run under a multitasking
environment, a user thread can suffer a context switch while
attempting to read the counters. One solution is for the user thread
to read the real-time counter before and after reading the other
counters to determine if a context switch occurred in the middle of the
sequence, in which case the reads can be retried. We considered
adding output latches to allow a user thread to snapshot the counter
values atomically, but this would increase the size of the user
context, especially for implementations with a richer set of counters.
\end{commentary}
\section{Environment Call and Breakpoints}
\vspace{-0.2in}
\begin{center}
\begin{tabular}{M@{}R@{}F@{}R@{}S}
\\
\instbitrange{31}{20} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{funct12} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} \\
\hline
12 & 5 & 3 & 5 & 7 \\
ECALL & 0 & PRIV & 0 & SYSTEM \\
EBREAK & 0 & PRIV & 0 & SYSTEM \\
\end{tabular}
\end{center}
The ECALL instruction is used to make a request to the supporting
execution environment, which is usually an operating system. The ABI
for the system will define how parameters for the environment request
are passed, but usually these will be in defined locations in the
integer register file.
The EBREAK instruction is used by debuggers to cause control to be
transferred back to a debugging environment.
\begin{commentary}
ECALL and EBREAK were previously named SCALL and SBREAK. The
instructions have the same functionality and encoding, but were
renamed to reflect that they can be used more generally than to call a
supervisor-level operating system or debugger.
\end{commentary}
\section{Memory Ordering Instructions}
\label{sec:fence}
\vspace{-0.2in}
\begin{center}
\begin{tabular}{F@{}IIIIIIIIF@{}F@{}F@{}S}
\\
\instbitrange{31}{28} &
\multicolumn{1}{c}{\instbit{27}} &
\multicolumn{1}{c}{\instbit{26}} &
\multicolumn{1}{c}{\instbit{25}} &
\multicolumn{1}{c}{\instbit{24}} &
\multicolumn{1}{c}{\instbit{23}} &
\multicolumn{1}{c}{\instbit{22}} &
\multicolumn{1}{c}{\instbit{21}} &
\multicolumn{1}{c}{\instbit{20}} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{fm} &
\multicolumn{1}{c|}{PI} &
\multicolumn{1}{c|}{PO} &
\multicolumn{1}{c|}{PR} &
\multicolumn{1}{c|}{PW} &
\multicolumn{1}{|c|}{SI} &
\multicolumn{1}{c|}{SO} &
\multicolumn{1}{c|}{SR} &
\multicolumn{1}{c|}{SW} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} \\
\hline
4 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 5 & 3 & 5 & 7 \\
FM & \multicolumn{4}{c}{predecessor} & \multicolumn{4}{c}{successor} & 0 & FENCE & 0 & MISC-MEM \\
\end{tabular}
\end{center}
The FENCE instruction is used to order device I/O and
memory accesses as viewed by other RISC-V harts and external devices
or coprocessors. Any combination of device input (I), device output
(O), memory reads (R), and memory writes (W) may be ordered with
respect to any combination of the same. Informally, no other RISC-V
hart or external device can observe any operation in the {\em
successor} set following a FENCE before any operation in the {\em
predecessor} set preceding the FENCE.
Chapter~\ref{ch:memorymodel} provides a precise description of the
RISC-V memory consistency model.
The execution environment
will define what I/O operations are possible, and in particular, which
load and store instructions might be treated and ordered as device
input and device output operations respectively rather than memory
reads and writes. For example, memory-mapped I/O devices will
typically be accessed with uncached loads and stores that are ordered
using the I and O bits rather than the R and W bits. Instruction-set
extensions might also describe new coprocessor I/O instructions that
will also be ordered using the I and O bits in a FENCE.
\begin{table}[htp]
\begin{small}
\begin{center}
\begin{tabular}{|c|c|l|}
\hline
{\em fm} field & Mnemonic & Meaning \\
\hline
0000 & \em none & Normal Fence \\
\hline
\multirow{2}{*}{1000} & \multirow{2}{*}{TSO} & With FENCE RW,RW: exclude write-to-read ordering \\
& & Otherwise: \em Reserved for future use. \\
\hline
\multicolumn{2}{|c|}{\em other} & \em Reserved for future use. \\
\hline
\end{tabular}
\end{center}
\end{small}
\caption{Fence mode encoding.}
\label{fm}
\end{table}
The fence mode field {\em fm} defines the semantics of the FENCE.
A FENCE with {\em fm}=0000 orders all memory operations in its predecessor set before all memory operations in its successor set.
A FENCE.TSO instruction orders all load operations in its predecessor set before all memory operations in its successor set, and all store operations in its predecessor set before all store operations in its successor set.
This leaves non-AMO store operations in the FENCE.TSO's predecessor set unordered with non-AMO loads in its successor set.
The unused fields in the FENCE instructions---{\em rs1} and {\em rd}---are
reserved for finer-grain fences in future extensions. For forward
compatibility, base implementations shall ignore these fields, and standard
software shall zero these fields. Likewise, many {\em fm} and
predecessor/successor set settings in Table~\ref{fm} are also reserved
for future use. Base implementations shall treat all such reserved
configurations as normal fences with {\em fm}=0000, and standard
software shall use only non-reserved configurations.
\begin{commentary}
We chose a relaxed memory model to allow high performance from simple
machine implementations and from likely future
coprocessor or accelerator extensions. We separate out I/O ordering
from memory R/W ordering to avoid unnecessary serialization within a
device-driver hart and also to support alternative non-memory paths
to control added coprocessors or I/O devices. Simple implementations
may additionally ignore the {\em predecessor} and {\em successor}
fields and always execute a conservative fence on all operations.
\end{commentary}
\vspace{-0.4in}
\begin{center}
\begin{tabular}{M@{}R@{}S@{}R@{}O}
\\
\instbitrange{31}{20} &
\instbitrange{19}{15} &
\instbitrange{14}{12} &
\instbitrange{11}{7} &
\instbitrange{6}{0} \\
\hline
\multicolumn{1}{|c|}{imm[11:0]} &
\multicolumn{1}{c|}{rs1} &
\multicolumn{1}{c|}{funct3} &
\multicolumn{1}{c|}{rd} &
\multicolumn{1}{c|}{opcode} \\
\hline
12 & 5 & 3 & 5 & 7 \\
0 & 0 & FENCE.I & 0 & MISC-MEM \\
\end{tabular}
\end{center}
The FENCE.I instruction is used to synchronize the instruction and
data streams. RISC-V does not guarantee that stores to instruction
memory will be made visible to instruction fetches on the same RISC-V
hart until a FENCE.I instruction is executed. A FENCE.I instruction
only ensures that a subsequent instruction fetch on a RISC-V hart
will see any previous data stores already visible to the same RISC-V
hart. FENCE.I does {\em not} ensure that other RISC-V harts'
instruction fetches will observe the local hart's stores in a
multiprocessor system. To make a store to instruction memory visible
to all RISC-V harts, the writing hart has to execute a data FENCE
before requesting that all remote RISC-V harts execute a FENCE.I.
The unused fields in the FENCE.I instruction, {\em imm[11:0]}, {\em rs1}, and
{\em rd}, are reserved for finer-grain fences in future extensions. For
forward compatibility, base implementations shall ignore these fields, and
standard software shall zero these fields.
\begin{commentary}
Because FENCE.I only orders stores with a hart's own instruction fetches,
application code should only rely upon FENCE.I if the application thread will
not be migrated to a different hart. The ABI will provide mechanisms for
multiprocessor instruction-stream synchronization.
\end{commentary}
\begin{commentary}
The FENCE.I instruction was designed to support a wide variety of
implementations. A simple implementation can flush the local
instruction cache and the instruction pipeline when the FENCE.I is
executed. A more complex implementation might snoop the instruction
(data) cache on every data (instruction) cache miss, or use an
inclusive unified private L2 cache to invalidate lines from the
primary instruction cache when they are being written by a local store
instruction. If instruction and data caches are kept coherent in this
way, then only the pipeline needs to be flushed at a FENCE.I.
We considered but did not include a ``store instruction word''
instruction (as in MAJC~\cite{majc}). JIT compilers may generate a
large trace of instructions before a single FENCE.I, and amortize any
instruction cache snooping/invalidation overhead by writing translated
instructions to memory regions that are known not to reside in the
I-cache.
\end{commentary}
\section{HINT Instructions}
\label{sec:rv32i-hints}
RV32I reserves a large encoding space for HINT instructions, which are
usually used to communicate performance hints to the
microarchitecture. HINTs are encoded as integer computational
instructions with {\em rd}={\tt x0}. Hence, like the NOP instruction,
HINTs do not change any architecturally visible state, except for
advancing the {\tt pc} and any applicable performance counters.
Implementations are always allowed to ignore the encoded hints.
\begin{commentary}
This HINT encoding has been chosen so that simple implementations can ignore
HINTs altogether, and instead execute a HINT as a regular computational
instruction that happens not to mutate the architectural state. For example, ADD is
a HINT if the destination register is {\tt x0}; the five-bit {\em rs1} and {\em
rs2} fields encode arguments to the HINT. However, a simple implementation can
simply execute the HINT as an ADD of {\em rs1} and {\em rs2} that writes {\tt
x0}, which has no architecturally visible effect.
\end{commentary}
Table~\ref{tab:rv32i-hints} lists all RV32I HINT code points. 91\% of the HINT
space is reserved for standard HINTs, but none are presently defined. The
remainder of the HINT space is reserved for custom HINTs: no standard HINTs
will ever be defined in this subspace.
\begin{commentary}
No standard hints are presently defined (except the privileged WFI
instruction which uses a separately reserved encoding). We anticipate
standard hints to eventually include memory-system spatial and
temporal locality hints, branch prediction hints, thread-scheduling
hints, security tags, and instrumentation flags for
simulation/emulation.
\end{commentary}
\begin{table}[hbt]
\centering
\begin{tabular}{|l|l|c|l|}
\hline
Instruction & Constraints & Code Points & Purpose \\ \hline \hline
LUI & {\em rd}={\tt x0} & $2^{20}$ & \multirow{15}{*}{\em Reserved for future standard use} \\ \cline{1-3}
AUIPC & {\em rd}={\tt x0} & $2^{20}$ & \\ \cline{1-3}
\multirow{2}{*}{ADDI} & {\em rd}={\tt x0}, and either & \multirow{2}{*}{$2^{17}-1$} & \\
& {\em rs1}$\neq${\tt x0} or {\em imm}$\neq$0 & & \\ \cline{1-3}
ANDI & {\em rd}={\tt x0} & $2^{17}$ & \\ \cline{1-3}
ORI & {\em rd}={\tt x0} & $2^{17}$ & \\ \cline{1-3}
XORI & {\em rd}={\tt x0} & $2^{17}$ & \\ \cline{1-3}
ADD & {\em rd}={\tt x0} & $2^{10}$ & \\ \cline{1-3}
SUB & {\em rd}={\tt x0} & $2^{10}$ & \\ \cline{1-3}
AND & {\em rd}={\tt x0} & $2^{10}$ & \\ \cline{1-3}
OR & {\em rd}={\tt x0} & $2^{10}$ & \\ \cline{1-3}
XOR & {\em rd}={\tt x0} & $2^{10}$ & \\ \cline{1-3}
SLL & {\em rd}={\tt x0} & $2^{10}$ & \\ \cline{1-3}
SRL & {\em rd}={\tt x0} & $2^{10}$ & \\ \cline{1-3}
SRA & {\em rd}={\tt x0} & $2^{10}$ & \\ \hline \hline
SLTI & {\em rd}={\tt x0} & $2^{17}$ & \multirow{7}{*}{\em Reserved for custom use} \\ \cline{1-3}
SLTIU & {\em rd}={\tt x0} & $2^{17}$ & \\ \cline{1-3}
SLLI & {\em rd}={\tt x0} & $2^{10}$ & \\ \cline{1-3}
SRLI & {\em rd}={\tt x0} & $2^{10}$ & \\ \cline{1-3}
SRAI & {\em rd}={\tt x0} & $2^{10}$ & \\ \cline{1-3}
SLT & {\em rd}={\tt x0} & $2^{10}$ & \\ \cline{1-3}
SLTU & {\em rd}={\tt x0} & $2^{10}$ & \\ \hline
\end{tabular}
\caption{RV32I HINT instructions.}
\label{tab:rv32i-hints}
\end{table}
|
import Data.Vect
import Data.Vect.Elem
data Typ : Type where
TLam : Typ -> Typ -> Typ
TNat : Typ
data Term : Typ -> Vect len Typ -> Type where
Var : Elem a ctx -> Term a ctx
Lam : Term b (a :: ctx) -> Term (TLam a b) ctx
Fix : Term a (a :: ctx) -> Term a ctx
lookup : Vect len Typ -> Fin len -> Typ
lookup (a :: ctx) FZ = a
lookup (_ :: ctx) (FS n) = lookup ctx n
count : {ctx : Vect len Typ} -> (n : Fin len) -> Elem (lookup ctx n) ctx
count {ctx = _ :: ctx} FZ = Here
count {ctx = _ :: ctx} (FS n) = There (count n)
segfaults : {len : _} -> {ctx : Vect len Typ} ->
Term (TLam TNat TNat) ctx
segfaults = Fix (Lam (Var (count 0)))
cycleDetected : {len : _} -> {ctx : Vect len Typ} ->
Term (TLam TNat TNat) ctx
cycleDetected = Fix (Var (count 0))
|
Credits lifted from the album 's liner notes .
|
lemma continuous_finite_range_constant: fixes f :: "'a::topological_space \<Rightarrow> 'b::real_normed_algebra_1" assumes "connected S" and "continuous_on S f" and "finite (f ` S)" shows "f constant_on S"
|
C
C file hstplr.f
C
SUBROUTINE HSTPLR (A,B,M,MBDCND,BDA,BDB,C,D,N,NBDCND,BDC,BDD,
1 ELMBDA,F,IDIMF,PERTRB,IERROR,W)
C
C * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
C * *
C * copyright (c) 1999 by UCAR *
C * *
C * UNIVERSITY CORPORATION for ATMOSPHERIC RESEARCH *
C * *
C * all rights reserved *
C * *
C * FISHPACK version 4.1 *
C * *
C * A PACKAGE OF FORTRAN SUBPROGRAMS FOR THE SOLUTION OF *
C * *
C * SEPARABLE ELLIPTIC PARTIAL DIFFERENTIAL EQUATIONS *
C * *
C * BY *
C * *
C * JOHN ADAMS, PAUL SWARZTRAUBER AND ROLAND SWEET *
C * *
C * OF *
C * *
C * THE NATIONAL CENTER FOR ATMOSPHERIC RESEARCH *
C * *
C * BOULDER, COLORADO (80307) U.S.A. *
C * *
C * WHICH IS SPONSORED BY *
C * *
C * THE NATIONAL SCIENCE FOUNDATION *
C * *
C * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
C
C
C
C DIMENSION OF BDA(N),BDB(N),BDC(M),BDD(M),F(IDIMF,N),
C ARGUMENTS W(SEE ARGUMENT LIST)
C
C LATEST REVISION NOVEMBER 1988
C
C PURPOSE SOLVES THE STANDARD FIVE-POINT FINITE
C DIFFERENCE APPROXIMATION ON A STAGGERED
C GRID TO THE HELMHOLTZ EQUATION IN POLAR
C COORDINATES. THE EQUATION IS
C
C (1/R)(D/DR)(R(DU/DR)) +
C (1/R**2)(D/DTHETA)(DU/DTHETA) +
C LAMBDA*U = F(R,THETA)
C
C USAGE CALL HSTPLR (A,B,M,MBDCND,BDA,BDB,C,D,N,
C NBDCND,BDC,BDD,ELMBDA,F,
C IDIMF,PERTRB,IERROR,W)
C
C ARGUMENTS
C ON INPUT A,B
C
C THE RANGE OF R, I.E. A .LE. R .LE. B.
C A MUST BE LESS THAN B AND A MUST BE
C NON-NEGATIVE.
C
C M
C THE NUMBER OF GRID POINTS IN THE INTERVAL
C (A,B). THE GRID POINTS IN THE R-DIRECTION
C ARE GIVEN BY R(I) = A + (I-0.5)DR FOR
C I=1,2,...,M WHERE DR =(B-A)/M.
C M MUST BE GREATER THAN 2.
C
C MBDCND
C INDICATES THE TYPE OF BOUNDARY CONDITIONS
C AT R = A AND R = B.
C
C = 1 IF THE SOLUTION IS SPECIFIED AT R = A
C AND R = B.
C
C = 2 IF THE SOLUTION IS SPECIFIED AT R = A
C AND THE DERIVATIVE OF THE SOLUTION
C WITH RESPECT TO R IS SPECIFIED AT R = B.
C (SEE NOTE 1 BELOW)
C
C = 3 IF THE DERIVATIVE OF THE SOLUTION
C WITH RESPECT TO R IS SPECIFIED AT
C R = A (SEE NOTE 2 BELOW) AND R = B.
C
C = 4 IF THE DERIVATIVE OF THE SOLUTION
C WITH RESPECT TO R IS SPECIFIED AT
C SPECIFIED AT R = A (SEE NOTE 2 BELOW)
C AND THE SOLUTION IS SPECIFIED AT R = B.
C
C
C = 5 IF THE SOLUTION IS UNSPECIFIED AT
C R = A = 0 AND THE SOLUTION IS
C SPECIFIED AT R = B.
C
C = 6 IF THE SOLUTION IS UNSPECIFIED AT
C R = A = 0 AND THE DERIVATIVE OF THE
C SOLUTION WITH RESPECT TO R IS SPECIFIED
C AT R = B.
C
C NOTE 1:
C IF A = 0, MBDCND = 2, AND NBDCND = 0 OR 3,
C THE SYSTEM OF EQUATIONS TO BE SOLVED IS
C SINGULAR. THE UNIQUE SOLUTION IS
C IS DETERMINED BY EXTRAPOLATION TO THE
C SPECIFICATION OF U(0,THETA(1)).
C BUT IN THIS CASE THE RIGHT SIDE OF THE
C SYSTEM WILL BE PERTURBED BY THE CONSTANT
C PERTRB.
C
C NOTE 2:
C IF A = 0, DO NOT USE MBDCND = 3 OR 4,
C BUT INSTEAD USE MBDCND = 1,2,5, OR 6.
C
C BDA
C A ONE-DIMENSIONAL ARRAY OF LENGTH N THAT
C SPECIFIES THE BOUNDARY VALUES (IF ANY) OF
C THE SOLUTION AT R = A.
C
C WHEN MBDCND = 1 OR 2,
C BDA(J) = U(A,THETA(J)) , J=1,2,...,N.
C
C WHEN MBDCND = 3 OR 4,
C BDA(J) = (D/DR)U(A,THETA(J)) ,
C J=1,2,...,N.
C
C WHEN MBDCND = 5 OR 6, BDA IS A DUMMY
C VARIABLE.
C
C BDB
C A ONE-DIMENSIONAL ARRAY OF LENGTH N THAT
C SPECIFIES THE BOUNDARY VALUES OF THE
C SOLUTION AT R = B.
C
C WHEN MBDCND = 1,4, OR 5,
C BDB(J) = U(B,THETA(J)) , J=1,2,...,N.
C
C WHEN MBDCND = 2,3, OR 6,
C BDB(J) = (D/DR)U(B,THETA(J)) ,
C J=1,2,...,N.
C
C C,D
C THE RANGE OF THETA, I.E. C .LE. THETA .LE. D.
C C MUST BE LESS THAN D.
C
C N
C THE NUMBER OF UNKNOWNS IN THE INTERVAL
C (C,D). THE UNKNOWNS IN THE THETA-
C DIRECTION ARE GIVEN BY THETA(J) = C +
C (J-0.5)DT, J=1,2,...,N, WHERE
C DT = (D-C)/N. N MUST BE GREATER THAN 2.
C
C NBDCND
C INDICATES THE TYPE OF BOUNDARY CONDITIONS
C AT THETA = C AND THETA = D.
C
C = 0 IF THE SOLUTION IS PERIODIC IN THETA,
C I.E. U(I,J) = U(I,N+J).
C
C = 1 IF THE SOLUTION IS SPECIFIED AT
C THETA = C AND THETA = D
C (SEE NOTE BELOW).
C
C = 2 IF THE SOLUTION IS SPECIFIED AT
C THETA = C AND THE DERIVATIVE OF THE
C SOLUTION WITH RESPECT TO THETA IS
C SPECIFIED AT THETA = D
C (SEE NOTE BELOW).
C
C = 3 IF THE DERIVATIVE OF THE SOLUTION
C WITH RESPECT TO THETA IS SPECIFIED
C AT THETA = C AND THETA = D.
C
C = 4 IF THE DERIVATIVE OF THE SOLUTION
C WITH RESPECT TO THETA IS SPECIFIED
C AT THETA = C AND THE SOLUTION IS
C SPECIFIED AT THETA = D
C (SEE NOTE BELOW).
C
C NOTE:
C WHEN NBDCND = 1, 2, OR 4, DO NOT USE
C MBDCND = 5 OR 6 (THE FORMER INDICATES THAT
C THE SOLUTION IS SPECIFIED AT R = 0; THE
C LATTER INDICATES THE SOLUTION IS UNSPECIFIED
C AT R = 0). USE INSTEAD MBDCND = 1 OR 2.
C
C BDC
C A ONE DIMENSIONAL ARRAY OF LENGTH M THAT
C SPECIFIES THE BOUNDARY VALUES OF THE
C SOLUTION AT THETA = C.
C
C WHEN NBDCND = 1 OR 2,
C BDC(I) = U(R(I),C) , I=1,2,...,M.
C
C WHEN NBDCND = 3 OR 4,
C BDC(I) = (D/DTHETA)U(R(I),C),
C I=1,2,...,M.
C
C WHEN NBDCND = 0, BDC IS A DUMMY VARIABLE.
C
C BDD
C A ONE-DIMENSIONAL ARRAY OF LENGTH M THAT
C SPECIFIES THE BOUNDARY VALUES OF THE
C SOLUTION AT THETA = D.
C
C WHEN NBDCND = 1 OR 4,
C BDD(I) = U(R(I),D) , I=1,2,...,M.
C
C WHEN NBDCND = 2 OR 3,
C BDD(I) =(D/DTHETA)U(R(I),D), I=1,2,...,M.
C
C WHEN NBDCND = 0, BDD IS A DUMMY VARIABLE.
C
C ELMBDA
C THE CONSTANT LAMBDA IN THE HELMHOLTZ
C EQUATION. IF LAMBDA IS GREATER THAN 0,
C A SOLUTION MAY NOT EXIST. HOWEVER, HSTPLR
C WILL ATTEMPT TO FIND A SOLUTION.
C
C F
C A TWO-DIMENSIONAL ARRAY THAT SPECIFIES THE
C VALUES OF THE RIGHT SIDE OF THE HELMHOLTZ
C EQUATION.
C
C FOR I=1,2,...,M AND J=1,2,...,N
C F(I,J) = F(R(I),THETA(J)) .
C
C F MUST BE DIMENSIONED AT LEAST M X N.
C
C IDIMF
C THE ROW (OR FIRST) DIMENSION OF THE ARRAY
C F AS IT APPEARS IN THE PROGRAM CALLING
C HSTPLR. THIS PARAMETER IS USED TO SPECIFY
C THE VARIABLE DIMENSION OF F.
C IDIMF MUST BE AT LEAST M.
C
C W
C A ONE-DIMENSIONAL ARRAY THAT MUST BE
C PROVIDED BY THE USER FOR WORK SPACE.
C W MAY REQUIRE UP TO 13M + 4N +
C M*INT(LOG2(N)) LOCATIONS.
C THE ACTUAL NUMBER OF LOCATIONS USED IS
C COMPUTED BY HSTPLR AND IS RETURNED IN
C THE LOCATION W(1).
C
C
C ON OUTPUT
C
C F
C CONTAINS THE SOLUTION U(I,J) OF THE FINITE
C DIFFERENCE APPROXIMATION FOR THE GRID POINT
C (R(I),THETA(J)) FOR I=1,2,...,M,
C J=1,2,...,N.
C
C PERTRB
C IF A COMBINATION OF PERIODIC, DERIVATIVE,
C OR UNSPECIFIED BOUNDARY CONDITIONS IS
C SPECIFIED FOR A POISSON EQUATION
C (LAMBDA = 0), A SOLUTION MAY NOT EXIST.
C PERTRB IS A CONSTANT CALCULATED AND
C SUBTRACTED FROM F, WHICH ENSURES THAT A
C SOLUTION EXISTS. HSTPLR THEN COMPUTES THIS
C SOLUTION, WHICH IS A LEAST SQUARES SOLUTION
C TO THE ORIGINAL APPROXIMATION.
C THIS SOLUTION PLUS ANY CONSTANT IS ALSO
C A SOLUTION; HENCE, THE SOLUTION IS NOT
C UNIQUE. THE VALUE OF PERTRB SHOULD BE
C SMALL COMPARED TO THE RIGHT SIDE F.
C OTHERWISE, A SOLUTION IS OBTAINED TO AN
C ESSENTIALLY DIFFERENT PROBLEM.
C THIS COMPARISON SHOULD ALWAYS BE MADE TO
C INSURE THAT A MEANINGFUL SOLUTION HAS BEEN
C OBTAINED.
C
C IERROR
C AN ERROR FLAG THAT INDICATES INVALID INPUT
C PARAMETERS. EXCEPT TO NUMBERS 0 AND 11,
C A SOLUTION IS NOT ATTEMPTED.
C
C = 0 NO ERROR
C
C = 1 A .LT. 0
C
C = 2 A .GE. B
C
C = 3 MBDCND .LT. 1 OR MBDCND .GT. 6
C
C = 4 C .GE. D
C
C = 5 N .LE. 2
C
C = 6 NBDCND .LT. 0 OR NBDCND .GT. 4
C
C = 7 A = 0 AND MBDCND = 3 OR 4
C
C = 8 A .GT. 0 AND MBDCND .GE. 5
C
C = 9 MBDCND .GE. 5 AND NBDCND .NE. 0 OR 3
C
C = 10 IDIMF .LT. M
C
C = 11 LAMBDA .GT. 0
C
C = 12 M .LE. 2
C
C SINCE THIS IS THE ONLY MEANS OF INDICATING
C A POSSIBLY INCORRECT CALL TO HSTPLR, THE
C USER SHOULD TEST IERROR AFTER THE CALL.
C
C W
C W(1) CONTAINS THE REQUIRED LENGTH OF W.
C
C I/O NONE
C
C PRECISION SINGLE
C
C REQUIRED LIBRARY COMF, GENBUN, GNBNAUX, AND POISTG
C FILES FROM FISHPACK
C
C LANGUAGE FORTRAN
C
C HISTORY WRITTEN BY ROLAND SWEET AT NCAR IN 1977.
C RELEASED ON NCAR'S PUBLIC SOFTWARE LIBRARIES
C IN JANUARY 1980.
C
C PORTABILITY FORTRAN 77.
C
C ALGORITHM THIS SUBROUTINE DEFINES THE FINITE-
C DIFFERENCE EQUATIONS, INCORPORATES BOUNDARY
C DATA, ADJUSTS THE RIGHT SIDE WHEN THE SYSTEM
C IS SINGULAR AND CALLS EITHER POISTG OR GENBUN
C WHICH SOLVES THE LINEAR SYSTEM OF EQUATIONS.
C
C TIMING FOR LARGE M AND N, THE OPERATION COUNT
C IS ROUGHLY PROPORTIONAL TO M*N*LOG2(N).
C
C ACCURACY THE SOLUTION PROCESS EMPLOYED RESULTS IN
C A LOSS OF NO MORE THAN FOUR SIGNIFICANT
C DIGITS FOR N AND M AS LARGE AS 64.
C MORE DETAILED INFORMATION ABOUT ACCURACY
C CAN BE FOUND IN THE DOCUMENTATION FOR
C ROUTINE POISTG WHICH IS THE ROUTINE THAT
C ACTUALLY SOLVES THE FINITE DIFFERENCE
C EQUATIONS.
C
C REFERENCES U. SCHUMANN AND R. SWEET, "A DIRECT METHOD
C FOR THE SOLUTION OF POISSON'S EQUATION WITH
C NEUMANN BOUNDARY CONDITIONS ON A STAGGERED
C GRID OF ARBITRARY SIZE," J. COMP. PHYS.
C 20(1976), PP. 171-182.
C***********************************************************************
DIMENSION F(IDIMF,1)
DIMENSION BDA(*) ,BDB(*) ,BDC(*) ,BDD(*) ,
1 W(*)
C
IERROR = 0
IF (A .LT. 0.) IERROR = 1
IF (A .GE. B) IERROR = 2
IF (MBDCND.LE.0 .OR. MBDCND.GE.7) IERROR = 3
IF (C .GE. D) IERROR = 4
IF (N .LE. 2) IERROR = 5
IF (NBDCND.LT.0 .OR. NBDCND.GE.5) IERROR = 6
IF (A.EQ.0. .AND. (MBDCND.EQ.3 .OR. MBDCND.EQ.4)) IERROR = 7
IF (A.GT.0. .AND. MBDCND.GE.5) IERROR = 8
IF (MBDCND.GE.5 .AND. NBDCND.NE.0 .AND. NBDCND.NE.3) IERROR = 9
IF (IDIMF .LT. M) IERROR = 10
IF (M .LE. 2) IERROR = 12
IF (IERROR .NE. 0) RETURN
DELTAR = (B-A)/FLOAT(M)
DLRSQ = DELTAR**2
DELTHT = (D-C)/FLOAT(N)
DLTHSQ = DELTHT**2
NP = NBDCND+1
ISW = 1
MB = MBDCND
IF (A.EQ.0. .AND. MBDCND.EQ.2) MB = 6
C
C DEFINE A,B,C COEFFICIENTS IN W-ARRAY.
C
IWB = M
IWC = IWB+M
IWR = IWC+M
DO 101 I=1,M
J = IWR+I
W(J) = A+(FLOAT(I)-0.5)*DELTAR
W(I) = (A+FLOAT(I-1)*DELTAR)/DLRSQ
K = IWC+I
W(K) = (A+FLOAT(I)*DELTAR)/DLRSQ
K = IWB+I
W(K) = (ELMBDA-2./DLRSQ)*W(J)
101 CONTINUE
DO 103 I=1,M
J = IWR+I
A1 = W(J)
DO 102 J=1,N
F(I,J) = A1*F(I,J)
102 CONTINUE
103 CONTINUE
C
C ENTER BOUNDARY DATA FOR R-BOUNDARIES.
C
GO TO (104,104,106,106,108,108),MB
104 A1 = 2.*W(1)
W(IWB+1) = W(IWB+1)-W(1)
DO 105 J=1,N
F(1,J) = F(1,J)-A1*BDA(J)
105 CONTINUE
GO TO 108
106 A1 = DELTAR*W(1)
W(IWB+1) = W(IWB+1)+W(1)
DO 107 J=1,N
F(1,J) = F(1,J)+A1*BDA(J)
107 CONTINUE
108 GO TO (109,111,111,109,109,111),MB
109 A1 = 2.*W(IWR)
W(IWC) = W(IWC)-W(IWR)
DO 110 J=1,N
F(M,J) = F(M,J)-A1*BDB(J)
110 CONTINUE
GO TO 113
111 A1 = DELTAR*W(IWR)
W(IWC) = W(IWC)+W(IWR)
DO 112 J=1,N
F(M,J) = F(M,J)-A1*BDB(J)
112 CONTINUE
C
C ENTER BOUNDARY DATA FOR THETA-BOUNDARIES.
C
113 A1 = 2./DLTHSQ
GO TO (123,114,114,116,116),NP
114 DO 115 I=1,M
J = IWR+I
F(I,1) = F(I,1)-A1*BDC(I)/W(J)
115 CONTINUE
GO TO 118
116 A1 = 1./DELTHT
DO 117 I=1,M
J = IWR+I
F(I,1) = F(I,1)+A1*BDC(I)/W(J)
117 CONTINUE
118 A1 = 2./DLTHSQ
GO TO (123,119,121,121,119),NP
119 DO 120 I=1,M
J = IWR+I
F(I,N) = F(I,N)-A1*BDD(I)/W(J)
120 CONTINUE
GO TO 123
121 A1 = 1./DELTHT
DO 122 I=1,M
J = IWR+I
F(I,N) = F(I,N)-A1*BDD(I)/W(J)
122 CONTINUE
123 CONTINUE
C
C ADJUST RIGHT SIDE OF SINGULAR PROBLEMS TO INSURE EXISTENCE OF A
C SOLUTION.
C
PERTRB = 0.
IF (ELMBDA) 133,125,124
124 IERROR = 11
GO TO 133
125 GO TO (133,133,126,133,133,126),MB
126 GO TO (127,133,133,127,133),NP
127 CONTINUE
ISW = 2
DO 129 J=1,N
DO 128 I=1,M
PERTRB = PERTRB+F(I,J)
128 CONTINUE
129 CONTINUE
PERTRB = PERTRB/(FLOAT(M*N)*0.5*(A+B))
DO 131 I=1,M
J = IWR+I
A1 = PERTRB*W(J)
DO 130 J=1,N
F(I,J) = F(I,J)-A1
130 CONTINUE
131 CONTINUE
A2 = 0.
DO 132 J=1,N
A2 = A2+F(1,J)
132 CONTINUE
A2 = A2/W(IWR+1)
133 CONTINUE
C
C MULTIPLY I-TH EQUATION THROUGH BY R(I)*DELTHT**2
C
DO 135 I=1,M
J = IWR+I
A1 = DLTHSQ*W(J)
W(I) = A1*W(I)
J = IWC+I
W(J) = A1*W(J)
J = IWB+I
W(J) = A1*W(J)
DO 134 J=1,N
F(I,J) = A1*F(I,J)
134 CONTINUE
135 CONTINUE
LP = NBDCND
W(1) = 0.
W(IWR) = 0.
C
C CALL POISTG OR GENBUN TO SOLVE THE SYSTEM OF EQUATIONS.
C
IF (LP .EQ. 0) GO TO 136
CALL POISTG (LP,N,1,M,W,W(IWB+1),W(IWC+1),IDIMF,F,IERR1,W(IWR+1))
GO TO 137
136 CALL GENBUN (LP,N,1,M,W,W(IWB+1),W(IWC+1),IDIMF,F,IERR1,W(IWR+1))
137 CONTINUE
W(1) = W(IWR+1)+3.*FLOAT(M)
IF (A.NE.0. .OR. MBDCND.NE.2 .OR. ISW.NE.2) GO TO 141
A1 = 0.
DO 138 J=1,N
A1 = A1+F(1,J)
138 CONTINUE
A1 = (A1-DLRSQ*A2/16.)/FLOAT(N)
IF (NBDCND .EQ. 3) A1 = A1+(BDD(1)-BDC(1))/(D-C)
A1 = BDA(1)-A1
DO 140 I=1,M
DO 139 J=1,N
F(I,J) = F(I,J)+A1
139 CONTINUE
140 CONTINUE
141 CONTINUE
RETURN
C
C REVISION HISTORY---
C
C SEPTEMBER 1973 VERSION 1
C APRIL 1976 VERSION 2
C JANUARY 1978 VERSION 3
C DECEMBER 1979 VERSION 3.1
C FEBRUARY 1985 DOCUMENTATION UPGRADE
C NOVEMBER 1988 VERSION 3.2, FORTRAN 77 CHANGES
C-----------------------------------------------------------------------
END
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj12synthconj5 : forall (lv0 : natural) (lv1 : natural), (@eq natural (Succ (plus lv0 lv1)) (plus (Succ lv1) lv0)).
Admitted.
QuickChick conj12synthconj5.
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Categories.Sets where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Isomorphism
open import Cubical.Categories.Category
open import Cubical.Categories.Functor
open import Cubical.Categories.NaturalTransformation
open Precategory
module _ ℓ where
SET : Precategory (ℓ-suc ℓ) ℓ
SET .ob = Σ (Type ℓ) isSet
SET .Hom[_,_] (A , _) (B , _) = A → B
SET .id _ = λ x → x
SET ._⋆_ f g = λ x → g (f x)
SET .⋆IdL f = refl
SET .⋆IdR f = refl
SET .⋆Assoc f g h = refl
module _ {ℓ} where
isSetExpIdeal : {A B : Type ℓ} → isSet B → isSet (A → B)
isSetExpIdeal B/set = isSetΠ λ _ → B/set
isSetLift : {A : Type ℓ} → isSet A → isSet (Lift {ℓ} {ℓ-suc ℓ} A)
isSetLift = isOfHLevelLift 2
module _ {A B : SET ℓ .ob} where
-- monic/surjectiveness
open import Cubical.Categories.Morphism
isSurjSET : (f : SET ℓ [ A , B ]) → Type _
isSurjSET f = ∀ (b : fst B) → Σ[ a ∈ fst A ] f a ≡ b
-- isMonic→isSurjSET : {f : SET ℓ [ A , B ]}
-- → isEpic {C = SET ℓ} {x = A} {y = B} f
-- → isSurjSET f
-- isMonic→isSurjSET ism b = {!!} , {!!}
instance
SET-category : isCategory (SET ℓ)
SET-category .isSetHom {_} {B , B/set} = isSetExpIdeal B/set
private
variable
ℓ ℓ' : Level
open Functor
-- Hom functors
_[-,_] : (C : Precategory ℓ ℓ') → (c : C .ob) → ⦃ isCat : isCategory C ⦄ → Functor (C ^op) (SET _)
(C [-, c ]) ⦃ isCat ⦄ .F-ob x = (C [ x , c ]) , isCat .isSetHom
(C [-, c ]) .F-hom f k = f ⋆⟨ C ⟩ k
(C [-, c ]) .F-id = funExt λ _ → C .⋆IdL _
(C [-, c ]) .F-seq _ _ = funExt λ _ → C .⋆Assoc _ _ _
_[_,-] : (C : Precategory ℓ ℓ') → (c : C .ob) → ⦃ isCat : isCategory C ⦄ → Functor C (SET _)
(C [ c ,-]) ⦃ isCat ⦄ .F-ob x = (C [ c , x ]) , isCat .isSetHom
(C [ c ,-]) .F-hom f k = k ⋆⟨ C ⟩ f
(C [ c ,-]) .F-id = funExt λ _ → C .⋆IdR _
(C [ c ,-]) .F-seq _ _ = funExt λ _ → sym (C .⋆Assoc _ _ _)
module _ {C : Precategory ℓ ℓ'} ⦃ _ : isCategory C ⦄ {F : Functor C (SET ℓ')} where
open NatTrans
-- natural transformations by pre/post composition
preComp : {x y : C .ob}
→ (f : C [ x , y ])
→ C [ x ,-] ⇒ F
→ C [ y ,-] ⇒ F
preComp f α .N-ob c k = (α ⟦ c ⟧) (f ⋆⟨ C ⟩ k)
preComp f α .N-hom {x = c} {d} k
= (λ l → (α ⟦ d ⟧) (f ⋆⟨ C ⟩ (l ⋆⟨ C ⟩ k)))
≡[ i ]⟨ (λ l → (α ⟦ d ⟧) (⋆Assoc C f l k (~ i))) ⟩
(λ l → (α ⟦ d ⟧) (f ⋆⟨ C ⟩ l ⋆⟨ C ⟩ k))
≡[ i ]⟨ (λ l → (α .N-hom k) i (f ⋆⟨ C ⟩ l)) ⟩
(λ l → (F ⟪ k ⟫) ((α ⟦ c ⟧) (f ⋆⟨ C ⟩ l)))
∎
-- properties
-- TODO: move to own file
open CatIso renaming (inv to cInv)
open Iso
Iso→CatIso : ∀ {A B : (SET ℓ) .ob}
→ Iso (fst A) (fst B)
→ CatIso {C = SET ℓ} A B
Iso→CatIso is .mor = is .fun
Iso→CatIso is .cInv = is .inv
Iso→CatIso is .sec = funExt λ b → is .rightInv b -- is .rightInv
Iso→CatIso is .ret = funExt λ b → is .leftInv b -- is .rightInv
|
State Before: 𝕜 : Type u_1
E : Type u_2
F : Type u_3
β : Type ?u.240342
inst✝⁴ : OrderedRing 𝕜
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t : Set E
f : E →ᵃ[𝕜] F
hs : Convex 𝕜 s
⊢ Convex 𝕜 (↑f '' s) State After: case intro.intro
𝕜 : Type u_1
E : Type u_2
F : Type u_3
β : Type ?u.240342
inst✝⁴ : OrderedRing 𝕜
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t : Set E
f : E →ᵃ[𝕜] F
hs : Convex 𝕜 s
x : E
hx : x ∈ s
⊢ StarConvex 𝕜 (↑f x) (↑f '' s) Tactic: rintro _ ⟨x, hx, rfl⟩ State Before: case intro.intro
𝕜 : Type u_1
E : Type u_2
F : Type u_3
β : Type ?u.240342
inst✝⁴ : OrderedRing 𝕜
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t : Set E
f : E →ᵃ[𝕜] F
hs : Convex 𝕜 s
x : E
hx : x ∈ s
⊢ StarConvex 𝕜 (↑f x) (↑f '' s) State After: no goals Tactic: exact (hs hx).affine_image _
|
#ifndef PyGSL_ERROR_HELPER_H
#define PyGSL_ERROR_HELPER_H 1
#include <pygsl/intern.h>
#include <pygsl/utils.h>
#include <gsl/gsl_errno.h>
#include <pygsl/errorno.h>
/*
* 22 Sep. 2009 Pierre Schnizer
* Uncomment only if trouble with the gsl error handler (e.g. when using
* Python with threading support (typical ufuncs). At the time of this writing
* the error handler would call python to find the approbriate python exception
*
* So I used to uncomment the macro as well as the function to ensure that
* gsl_error was not called any more within the pygsl wrappper
*/
/*
#undef GSL_ERROR
#undef GSL_ERROR_VAL
#undef GSL_ERROR_NULL
#define gsl_error()
*/
/*
* handle gsl error flags.
*
* If a flag arrives check if there was already a python error. If so leave it alone.
* We cannot return two exceptions.
*
* Otherwise:
* Should I put an exception up? E.g. some function not conforming to GSL
* Convention returning a flag, instead of calling gsl_error?
* Currently I follow that idea. But I have no more information about the reason
* than the flag.
*
* Return:
* GSL_SUCCESS ... No errornous call
* GSL_FAILURE ... errornous call
*
* If you need to return the flag e.g. "int gsl_odeiv_iterate( ... " use
* PyGSL_error_flag_to_pyint instead!
*
*/
PyGSL_API_EXTERN int
PyGSL_error_flag(long flag);
/*
* Handles gsl_error flags.
* It differs from the above that it returns the integer.
*
* Negative values mean something like go one with the iteration. These are
* converted to an python integer. Positive values flag a problem. These are
* converted to python exceptions.
*/
PyGSL_API_EXTERN PyObject *
PyGSL_error_flag_to_pyint(long flag);
/*
* Add a Python trace back frame to the python interpreter.
* Input :
* module ... the module. Pass NULL if not known.
* filename ... The filename to list in the stack frame. Pass NULL if not
* known.
* funcname ... The function name to list in the stack frame. Pass NULL if
* not known.
* lineno ... The Linenumber where the error occurred.
*/
PyGSL_API_EXTERN void
PyGSL_add_traceback(PyObject *module, const char *filename, const char *funcname, int lineno);
PyGSL_API_EXTERN int
PyGSL_warning(const char *, const char*, int, int);
#ifndef _PyGSL_API_MODULE
/* Section for modules importing the functions */
#define PyGSL_error_flag (*(int (*)(long)) PyGSL_API[PyGSL_error_flag_NUM])
#define PyGSL_error_flag_to_pyint (*(PyObject * (*)(long)) PyGSL_API[PyGSL_error_flag_to_pyint_NUM])
#define PyGSL_add_traceback (*(void (*)(PyObject *, const char *, const char *, int)) PyGSL_API[PyGSL_add_traceback_NUM])
#define PyGSL_warning (*(int (*)(const char *, const char *, int, int)) PyGSL_API[PyGSL_warning_NUM])
#endif /* _PyGSL_API_MODULE */
#define PyGSL_ERROR_FLAG(flag) \
(((long) flag == GSL_SUCCESS) && (!PyErr_Occurred())) ? GSL_SUCCESS : \
PyGSL_error_flag((long) (flag))
#define PyGSL_ERROR_FLAG_TO_PYINT(flag) \
(((long) flag <= 0) && (!PyErr_Occurred())) ? PyInt_FromLong((long) flag) : \
PyGSL_error_flag_to_pyint((long) (flag))
#endif /* PyGSL_ERROR_HELPER_H */
|
-----------------------------------------------------------------------------
-- |
-- Module : DSP.Matrix.Matrix
-- Copyright : (c) Matthew Donadio 2003
-- License : GPL
--
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : portable
--
-- Basic matrix routines
--
-----------------------------------------------------------------------------
module Matrix.Matrix where
import Data.Array
import Data.Complex
-- | Matrix-matrix multiplication: A x B = C
mm_mult :: (Ix a, Integral a, Num b) => Array (a,a) b -- ^ A
-> Array (a,a) b -- ^ B
-> Array (a,a) b -- ^ C
mm_mult a b = if ac /= br
then error "mm_mult: inside dimensions inconsistent"
else array bnds [ ((i,j), mult i j) | (i,j) <- range bnds ]
where mult i j = sum [ a!(i,k) * b!(k,j) | k <- [1..ac] ]
((_,_),(ar,ac)) = bounds a
((_,_),(br,bc)) = bounds b
bnds = ((1,1),(ar,bc))
-- | Matrix-vector multiplication: A x b = c
mv_mult :: (Ix a, Integral a, Num b) => Array (a,a) b -- ^ A
-> Array a b -- ^ b
-> Array a b -- ^ c
mv_mult a b = if ac /= br
then error "mv_mult: dimensions inconsistent"
else array bnds [ (i, mult i) | i <- range bnds ]
where mult i = sum [ a!(i,k) * b!(k) | k <- [1..ac] ]
((_,_),(ar,ac)) = bounds a
(_,br) = bounds b
bnds = (1,ar)
-- | Transpose of a matrix
m_trans :: (Ix a, Integral a, Num b) => Array (a,a) b -- ^ A
-> Array (a,a) b -- ^ A^T
m_trans a = array bnds [ ((i,j), a!(j,i)) | (i,j) <- range bnds ]
where (_,(m,n)) = bounds a
bnds = ((1,1),(n,m))
-- | Hermitian transpose (conjugate transpose) of a matrix
m_hermit :: (Ix a, Integral a, RealFloat b) => Array (a,a) (Complex b) -- ^ A
-> Array (a,a) (Complex b) -- ^ A^H
m_hermit a = array bnds [ ((i,j), conjugate (a!(j,i))) | (i,j) <- range bnds ]
where (_,(m,n)) = bounds a
bnds = ((1,1),(n,m))
|
% Syllabus Template from Arman Shokrollahi
% https://www.overleaf.com/latex/templates/syllabus-template-course-info/gbqbpcdgvxjs
\documentclass[11pt, letterpaper]{article}
%\usepackage{geometry}
\usepackage[inner=2cm,outer=2cm,top=2.5cm,bottom=2.5cm]{geometry}
\pagestyle{empty}
\usepackage{graphicx}
\usepackage{fancyhdr, lastpage, bbding, pmboxdraw}
\usepackage[usenames,dvipsnames]{color}
\definecolor{darkblue}{rgb}{0,0,.6}
\definecolor{darkred}{rgb}{.7,0,0}
\definecolor{darkgreen}{rgb}{0,.6,0}
\definecolor{red}{rgb}{.98,0,0}
\usepackage[colorlinks,pagebackref,pdfusetitle,urlcolor=darkblue,citecolor=darkblue,linkcolor=darkred,bookmarksnumbered,plainpages=false]{hyperref}
\renewcommand{\thefootnote}{\fnsymbol{footnote}}
\pagestyle{fancyplain}
\fancyhf{}
\lhead{ \fancyplain{}{Political Analysis in R} }
%\chead{ \fancyplain{}{} }
\rhead{ \fancyplain{}{Fall 2021} }%\today
%\rfoot{\fancyplain{}{page \thepage\ of \pageref{LastPage}}}
\fancyfoot[RO, LE] {page \thepage\ of \pageref{LastPage} }
\thispagestyle{plain}
%%%%%%%%%%%% LISTING %%%
\usepackage{listings}
\usepackage{caption}
\DeclareCaptionFont{white}{\color{white}}
\DeclareCaptionFormat{listing}{\colorbox{gray}{\parbox{\textwidth}{#1#2#3}}}
\captionsetup[lstlisting]{format=listing,labelfont=white,textfont=white}
\usepackage{verbatim} % used to display code
\usepackage{fancyvrb}
\usepackage{acronym}
\usepackage{amsthm}
\VerbatimFootnotes % Required, otherwise verbatim does not work in footnotes!
\definecolor{OliveGreen}{cmyk}{0.64,0,0.95,0.40}
\definecolor{CadetBlue}{cmyk}{0.62,0.57,0.23,0}
\definecolor{lightlightgray}{gray}{0.93}
\lstset{
%language=bash, % Code langugage
basicstyle=\ttfamily, % Code font, Examples: \footnotesize, \ttfamily
keywordstyle=\color{OliveGreen}, % Keywords font ('*' = uppercase)
commentstyle=\color{gray}, % Comments font
numbers=left, % Line nums position
numberstyle=\tiny, % Line-numbers fonts
stepnumber=1, % Step between two line-numbers
numbersep=5pt, % How far are line-numbers from code
backgroundcolor=\color{lightlightgray}, % Choose background color
frame=none, % A frame around the code
tabsize=2, % Default tab size
captionpos=t, % Caption-position = bottom
breaklines=true, % Automatic line breaking?
breakatwhitespace=false, % Automatic breaks only at whitespace?
showspaces=false, % Dont make spaces visible
showtabs=false, % Dont make tabls visible
columns=flexible, % Column format
morekeywords={__global__, __device__}, % CUDA specific keywords
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\begin{center}
{\Large \textsc{POLS 3230: Political Analysis in \texttt{R}}}
\end{center}
\begin{center}
{\large Fall 2021}
\end{center}
\begin{center}
\rule{6.5in}{0.4pt}
\begin{minipage}[t]{.96\textwidth}
\begin{tabular}{llcccll}
\textbf{Professor:} & Joe Ornstein & & & & \textbf{Time:} & MWF 1:50 -- 2:40pm \\
\textbf{Email:} & \href{mailto:[email protected]}{[email protected]} & & & & \textbf{Place:} & 101D Baldwin Hall\\
\textbf{Website:} & \href{https://joeornstein.github.io/pols-3230/}{https://joeornstein.github.io/pols-3230/} & & & & &
\end{tabular}
\end{minipage}
\rule{6.5in}{0.4pt}
\end{center}
\vspace{.15cm}
\setlength{\unitlength}{1in}
\renewcommand{\arraystretch}{2}
\noindent In this course, you will learn the fundamentals of working with data using \texttt{R}, a programming language widely used among professional data scientists and academic researchers. You'll learn how to write code, explore new datasets, build visualizations, and think carefully about what conclusions you can and cannot draw from data.
\begin{figure}[h]
\centering
\href{https://xkcd.com/523/}{\includegraphics[width=0.6\textwidth]{img/decline.png}}
\end{figure}
%\begin{quotation}
% \noindent``\textit{You can't really know anything if you just remember isolated facts. If the facts don't hang together on a latticework of theory, you don't have them in a usable form. You've got to have models in your head.}''\\
% \\
% --Charlie Munger (investor, vice chairman of Berkshire Hathaway)
%\end{quotation}
% \noindent
\section*{Course Objectives}
%\vskip.15in
%\noindent\textbf{Course Objectives:}
By the end of this course, you will be able to:
\begin{itemize}
\item Write \texttt{R} scripts to import, tidy, and summarize datasets
\item Create beautiful and informative data visualizations
\item Draw thoughtful conclusions from data
\item Organize your work so that it is transparent and reproducible
% \item Manipulate, wrangle, and clean datasets using the \texttt{R} programming language
% \item Create beautiful data visualizations
% \item Organize your work so that it is transparent and reproducible
% \item Compute derivatives and solve systems of linear equations
% \item Explain the properties of probability distributions and expected values
% \item Perform hypothesis tests and fit models to data
\end{itemize}
\section*{Readings}
Before each class session, I will assign a reading that walks you through a new \texttt{R} programming skill. I will expect you to read and annotate each assignment using \href{https://joeornstein.github.io/pols-3230/index.html#hypothesis}{Hypothesis}. All the readings will be available free online (including the books listed below!), but if you're the type of person who enjoys reading a hard copy, here is a list of books you can purchase:
\begin{itemize}
\item \href{https://r4ds.had.co.nz/}{Wickham, H., \& Grolemund, G. (2016). \textit{R For Data Science: import, tidy, transform, visualize, and model data}. O'Reilly Media, Inc.}
\item \href{https://clauswilke.com/dataviz/}{Wilke, Clause O. (2019). \textit{Fundamentals of Data Visualization: A Primer on Making Informative and Compelling Figures}}
\item \href{https://socviz.co/}{Healy, Kieran (2018). \textit{Data Visualization: A Practical Introduction}. Princeton University Press.}
\end{itemize}
\section*{Assignments \& Grading}
To earn your course grade, I will expect the following:
\begin{itemize}
\item \textbf{Reading (10\%)}: Read all the assigned texts, and actively contribute to the annotated reading discussions. I will grade this on a four-point scale (check-plus, check, check-minus, frowny face) based on how regularly you post.
\item \textbf{Quizzes (30\%)}: There will be three in-class quizzes throughout the semester. I will give you a piece of code with a bunch of errors in it, and your job will be to fix the code so that it works. Points assigned based on how many errors you spot and fix. For Fall 2021, the quiz dates will be \textbf{September 15}, \textbf{October 13}, and \textbf{November 17}.
\item \textbf{Team Projects (40\%):} Every day in class, you will work in teams to explore some dataset. Roughly once per week, your team will submit a report on your findings. Reports that are error-free, reproducible, thoughtful, and visually appealing will earn full credit.
\item \textbf{Final Project (20\%):} To cap off the semester, you will create an original data visualization that explores a topic of your choice. Projects that are error-free, reproducible, thoughtful, and visually appealing will earn full credit, and my 3-5 favorites will receive a prize (your dataviz on a poster or coffee mug)! You can find a copy of the grading rubric \href{https://joeornstein.github.io/pols-3230/syllabus/POLS-3230-final-rubric.xlsx}{here}.
\end{itemize}
%\vskip.15in
%\noindent\textbf{Office Hours:}
\section*{Office Hours}
I will be available for meetings every Wednesday before and after class, and you can sign up for 15 minute appointments \href{https://calendly.com/jornstein/15min}{here}. My office is Baldwin 304C, but if you prefer Zoom let me know and I'll send you a link.
\section*{Tentative Course Outline}
Moltke the Elder writes that no battle plan survives first contact with the enemy. The same is true for course outlines. We may need to be flexible, and deviate from the plan if some topics require more or less attention, or we think of something completely unexpected that we want to do, and it takes up a few weeks. Caveats aside, here is what I have planned!
%\begin{center}
%\begin{minipage}{6in}
%\begin{flushleft}
%Chapter 1 \dotfill ~$\approx$ 3 days \\
%{\color{darkgreen}{\Rectangle}} ~A little of probability theory and graph theory
\subsubsection*{Week 1: Getting Started}
\textit{Pre-Class Survey, Overcoming Fear, Setting up Software}
\subsubsection*{Week 2: Intro To Data Visualization}
\textit{ggplot2, The Grammar of Graphics, Design Principles, Scatterplots}
\subsubsection*{Week 3: Fancier Data Visualizations}
\textit{Lines, Facets, Histograms, Distributions, Color, Themes}
\subsubsection*{Weeks 4-6: Tidying Messy Data}
\textit{Making New Variables, Grouping, Summarizing, Importing Filtering, Merging}
\subsubsection*{Week 7-8: Space}
\textit{Working with geographic data, Drawing maps}
\subsubsection*{Week 9-10: Time}
\textit{Working with dates, Difference-in-difference}
\subsubsection*{Weeks 11-12: Text As Data}
\textit{Strings, Twitter, Sentiment Analysis}
\subsubsection*{Week 13-15: Final Projects}
\textit{Work on whatever you want, then show it off}
%\end{flushleft}
%\end{minipage}
%\end{center}
%\vskip.15in
%\noindent\textbf{Important Dates:}
%\begin{center} \begin{minipage}{3.8in}
%\begin{flushleft}
%Midterm \#1 \dotfill ~\={A}b\={a}n 16, 1393 \\
%Midterm \#2 \dotfill ~\={A}zar 21, 1393 \\
%%Project Deadline \dotfill ~Month Day \\
%Final Exam \dotfill ~Dey 18, 1393 \\
%\end{flushleft}
%\end{minipage}
%\end{center}
\subsection*{Academic Honesty}
Remember that when you joined the University of Georgia community, you agreed to abide by a code of conduct outlined in the academic honesty policy called \href{https://honesty.uga.edu/Academic-Honesty-Policy/Introduction/}{\textit{A Culture of Honesty}}. Team projects may, of course, be completed in teams, but you may not consult other people for help on the quizzes, and I expect your final projects to be your original work.
\subsection*{Mental Health and Wellness Resources}
\begin{itemize}
\item If you or someone you know needs assistance, you are encouraged to contact Student Care and Outreach in the Division of Student Affairs at 706-542-7774 or visit \href{https://sco.uga.edu}{https://sco.uga.edu}. They will help you navigate any difficult circumstances you may be facing by connecting you with the appropriate resources or services.
\item UGA has several resources for a student seeking \href{https://www.uhs.uga.edu/bewelluga/bewelluga}{mental health services} or \href{https://www.uhs.uga.edu/info/emergencies}{crisis support}.
\item If you need help managing stress anxiety, relationships, etc., please visit \href{https://www.uhs.uga.edu/bewelluga/bewelluga}{BeWellUGA} for a list of FREE workshops, classes, mentoring, and health coaching led by licensed clinicians and health educators in the University Health Center.
\item Additional resources can be accessed through the UGA App.
\end{itemize}
%%%%%% THE END
\end{document}
|
scrape.btx <- function() {
# fail if not installed
library("jsonlite")
# scrape raw data
rawdata = readLines("https://bittrex.com/api/v1.1/public/getmarketsummaries")
edges = jsonlite::fromJSON(rawdata)
print("requests done")
# parsed edge data
market.names = edges$result$MarketName
pairs = apply(as.matrix(market.names), 1, function(row) {
# original data is a nested list
strsplit(row, "-")[[1]]
})
# so it becomes and N by 2 matrix
pairs = as.matrix(t(pairs))
# bittrex returns flipped coin pairs
reverse.pairs = cbind(pairs[,2], pairs[,1])
bids = as.numeric(as.character(edges$result$Bid))
asks = 1/as.numeric(as.character(edges$result$Ask))
asks[which(asks==Inf)] = 0
bids.pairs = cbind(reverse.pairs, bids)
asks.pairs = cbind(pairs, asks)
graph = rbind(asks.pairs, bids.pairs)
# return
return(graph)
}
|
\chapter{On cellular signaling}
\label{introduction:introduction}
\section{Introduction}
Both in the context of multi-cellular and single-celled
organisms, cells are constantly
challenged to stay alive and perform
tasks in the face of unpredictable environmental
changes. For single-cell organisms these changes
can be particularly dramatic, as the external temperature,
osmolarity, and other properties are outside of cellular
control \cite{Bennett2008,Acar2008}.
For cells within multi-cellular organisms,
microenvironmental changes fluctuate much less due to
controlled modification of the environment by neighboring
cells. However, in order to exert control over the environment,
cells must constantly communicate with one another. The
messages sent from cell to cell are themselves a form
of unpredictable environmental change that cells must deal with.
Here, I focus on this latter problem. That is, how do
cells within multi-cellular organisms accurately interpret
messages sent from their neighbors?
The potential variety of cellular signals that cells face
is explosively large \cite{Natarajan2006},
and yet cells must somehow be able to tell these signals apart.
Mammalian cells must generally be able to respond
to changes within a highly complex biochemical milieu that contains
proteins, small molecules, and ions.
Adult stem cells must be able to reliably
divide and make differentiation decisions so as to
recreate functional units of organs. Embryonic
stem cells must be able to generate entire organisms,
going from a single cell to billions that each have different
functional and morphological properties. And those embryonic stem cells must perform this
task with extreme accuracy, since even a small error at the early
stages would be compounded through the developmental process \cite{Balazsi2011}.
It is amazing that cells can respond to such an unpredictable, complex, and
ever-changing environment. Even more amazing is that they do so
using the interactions between finite numbers
of molecules, both in quantity and type, to perform computational tasks.
In order for cells to be so responsive, they must first be
able to recognize that the environment has changed: they must have
sensors. In order for a cell to ``understand'' what has happened,
it must convert the influx of
sensory information into an internal model of its environment.
Finally, cells must map that model
onto a decision regarding what action to take in response. I refer
to the first part of this process, the conversion of external
information into an internal model, as ``signal transduction'' or, in
short, ``signaling.'' The second
part, the conversion of the internal model into a behavior, I refer
to as ``cellular decision-making.''
Understanding how cells make
decisions, as a consequence of environmental or
pathological perturbation, is at the core of cell biology.
In experimental cell biology, we purposely break the ability of a cell
to accurately process information,
or its ability to make a correct decision after processing that information,
in order to understand the
decision-making process. A cell, on the other hand, may
``unintentionally'' break those same processes, thus
resulting in pathology.
If we can understand the basis of cellular signaling and decision-making,
then we can intervene to correct such pathologies.
In this way, we hope that discoveries made in basic biology will eventually
show utility in the clinical treatment of human patients.
Cellular signaling is difficult to study, and so the degree
of uncertainty in even the best-studied systems is astonishing (as exemplified in
\ar{pathways:introduction}).
In this chapter, I outline an abstraction of the problem of cellular
signaling to give some perspective on why it is so difficult to understand.
This same abstract framework can be used to rigorously
define cell biological problems, and thus serves as a tool
for designing meaningful experiments.
By approaching the problem of cellular signal processing in this way,
we become more able to directly
answer the most basic questions in cell biology: what signals
do cells ``listen to,'' how do they model these signals internally, and how
do they use those models to make decisions?
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\input{TEXT/introduction/canon}
\input{TEXT/introduction/hierarchy}
\input{TEXT/introduction/encoding}
\input{TEXT/introduction/encodingSolution}
\input{TEXT/introduction/aims}
|
abstract type AbstractRegression end
struct LocalLinear <: AbstractRegression
k::Int64
ivar::Vector{Int64}
ivar_neighboor::Vector{Int64}
res::Array{Float64,2}
beta::Array{Float64,2}
Xm::Array{Float64,2}
Xr::Array{Float64,2}
Cxx::Array{Float64,2}
Cxx2::Array{Float64,2}
Cxy::Array{Float64,2}
pred::Array{Float64,2}
X0r::Array{Float64,2}
function LocalLinear(k, ivar, ivar_neighboor)
#X = zeros(Float64,(nvn, k))
#Y = zeros(Float64,(nv, k))
#w = zeros(Float64, k)
nv = length(ivar)
nvn = length(ivar_neighboor)
res = zeros(Float64, (nv, k))
beta = zeros(Float64, (nv, nvn + 1))
Xm = zeros(Float64, (nvn, 1))
Xr = ones(Float64, (nvn + 1, k))
Cxx = zeros(Float64, (nvn + 1, nvn + 1))
Cxx2 = zeros(Float64, (nvn + 1, nvn + 1))
Cxy = zeros(Float64, (nv, nvn + 1))
pred = zeros(Float64, (nv, k))
X0r = ones(Float64, (nvn + 1, 1))
new(k, ivar, ivar_neighboor, res, beta, Xm, Xr, Cxx, Cxx2, Cxy, pred, X0r)
end
end
function compute(ll::LocalLinear, x, xf_tmp, xf_mean, ip, X, Y, w)
ivar = ll.ivar
ivar_neighboor = ll.ivar_neighboor
ll.Xm .= sum(X .* w', dims = 2)
ll.Xr[2:end, :] .= X .- ll.Xm
mul!(ll.Cxx, (ll.Xr .* w'), ll.Xr')
mul!(ll.Cxx2, (ll.Xr .* w' .^ 2), ll.Xr')
mul!(ll.Cxy, (Y .* w'), ll.Xr')
ll.Cxx .= pinv(ll.Cxx, rtol = 0.01)
ll.Cxx2 .= ll.Cxx2 * ll.Cxx
# regression on principal components
mul!(ll.beta, ll.Cxy, ll.Cxx)
ll.X0r[2:end, :] .= x[ivar_neighboor, ip] .- ll.Xm
# weighted mean
xf_mean[ivar, ip] = ll.beta * ll.X0r
mul!(ll.pred, ll.beta, ll.Xr)
Y .-= ll.pred
xf_tmp[ivar, :] .= xf_mean[ivar, ip] .+ Y
# weigthed covariance
cov_xf = (Y * (w .* Y')) ./ (1 .- tr(ll.Cxx2))
cov_xf .= Symmetric(cov_xf .* (1 .+ tr(ll.Cxx2 * ll.X0r * ll.X0r' * ll.Cxx)))
return cov_xf
end
|
{-# LANGUAGE RecordWildCards #-}
import Control.Monad.Trans.Except
import Data.Word
import Data.Complex
import Data.Maybe
import Data.Monoid
import Control.Error.Util
import Pipes as P
import Pipes.Prelude as P
import Options.Applicative
import Data.Vector.Storable as VS hiding ((++))
import Data.Vector.Generic as VG hiding ((++))
import SDR.Util as U
import SDR.RTLSDRStream
import SDR.Filter
import SDR.ArgUtils
import SDR.FilterDesign
import SDR.Pulse
import SDR.PipeUtils
import SDR.CPUID
data Options = Options {
frequency :: Word32,
bandwidth :: Maybe Int
}
optParser :: Parser Options
optParser = Options
<$> option (fmap fromIntegral parseSize) (
long "frequency"
<> short 'f'
<> metavar "FREQUENCY"
<> help "Frequency to tune to"
)
<*> optional (option (fmap fromIntegral parseSize) (
long "bandwidth"
<> short 'b'
<> metavar "BANDWIDTH"
<> help "Filter bandwidth. From 0 to 32K."
))
opt :: ParserInfo Options
opt = info (helper <*> optParser) (fullDesc <> progDesc "Receive AM Radio" <> header "RTLSDR AM")
size = 4096
doIt Options{..} = do
info <- lift getCPUInfo
str <- sdrStream (defaultRTLSDRParams (frequency + 256000) 1024000) 1 (fromIntegral $ size * 2)
let coeffsDecim :: [Float]
coeffsDecim = VS.toList $ windowedSinc 71 0.4 blackman
deci <- lift $ fastDecimatorC info 2 coeffsDecim
let coeffsFilt :: [Float]
coeffsFilt = VS.toList $ windowedSinc 71 (fromIntegral (fromMaybe 16000 bandwidth) / 32000) blackman
filt <- lift $ fastFilterC info coeffsFilt
let coeffsResp :: [Float]
coeffsResp = VS.toList $ windowedSinc 71 0.25 blackman
resp <- lift $ fastResamplerR info 3 2 coeffsResp
pulseSink <- lift pulseAudioSink
lift $ runEffect $ str
>-> P.map (interleavedIQUnsignedByteToFloatFast info)
>-> P.map (VG.zipWith (*) (quarterBandUp size))
>-> firDecimator deci size
>-> firDecimator deci size
>-> firDecimator deci size
>-> firDecimator deci size
>-> firDecimator deci size
>-> firFilter filt size
>-> P.map (VG.map magnitude)
>-> dcBlockingFilter
>-> firResampler resp size
>-> P.map (VG.map (* 6))
>-> pulseSink
main = execParser opt >>= exceptT putStrLn return . doIt
|
module And.A.Proof
import Yet.Another.Path as Val
%default total
equality : Val.val === 2+3
equality = Refl
|
[STATEMENT]
lemma state_abscD[simp]:
assumes "cfg \<in> MDP.cfg_on s"
shows "state (absc cfg) = abss s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. state (absc cfg) = abss s
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
cfg \<in> MDP.cfg_on s
goal (1 subgoal):
1. state (absc cfg) = abss s
[PROOF STEP]
unfolding absc_def
[PROOF STATE]
proof (prove)
using this:
cfg \<in> MDP.cfg_on s
goal (1 subgoal):
1. state (cfg_corec (abss (state cfg)) (abst \<circ> action) (\<lambda>cfg s. cont cfg (THE x. abss x = s \<and> x \<in> set_pmf (action cfg))) cfg) = abss s
[PROOF STEP]
by auto
|
module Issue4022.Import where
open import Agda.Builtin.Nat
Binary : Set
Binary = Nat → Nat → Nat
-- Search should be able to find `plus` if:
-- * either we do not normalise the type and look for `Binary`
-- * or we do normalise the type and look for `Nat`
plus : Binary
plus = _+_
|
= Species of Allosaurus =
|
%\section{References}\label{sec:references}
%http://www.wintercorp.com/VLDB/2005\_TopTen\_Survey/2005TopTenWinners.pdf
%http://www.research.att.com/~daytona/
%http://www.research.att.com/~daytona/inuse.php
%http://nms.csail.mit.edu/~stavros/pubs/osfa.pdf
%http://nms.csail.mit.edu/~stavros/pubs/vldb2006.pdf
%Something on schema evolution although I haven't found the right thing yet.
%How to wring a table dry: ... by Vijayshankar Raman and Garret Swart
%-- clever use of huffman encoding to compact field codes, convert to
%tuple codes by concatenation, sort and delta-code tuples. Use clever
%huffman coding to be able to do some operations on compressed form.
\bibliography{references}
\bibliographystyle{abbrv}
|
Formal statement is: lemma continuous_at_sequentially: fixes f :: "'a::metric_space \<Rightarrow> 'b::topological_space" shows "continuous (at a) f \<longleftrightarrow> (\<forall>x. (x \<longlongrightarrow> a) sequentially --> ((f \<circ> x) \<longlongrightarrow> f a) sequentially)" Informal statement is: A function $f$ is continuous at $a$ if and only if for every sequence $x_n$ converging to $a$, the sequence $f(x_n)$ converges to $f(a)$.
|
/* movstat/test_Sn.c
*
* Copyright (C) 2018 Patrick Alken
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <gsl/gsl_math.h>
#include <gsl/gsl_vector.h>
#include <gsl/gsl_statistics.h>
#include <gsl/gsl_sort_vector.h>
#include <gsl/gsl_test.h>
/* calculate S_n statistic for input vector using slow/naive algorithm */
static int
slow_movSn(const gsl_movstat_end_t etype, const gsl_vector * x, gsl_vector * y,
const int H, const int J)
{
const size_t n = x->size;
const int K = H + J + 1;
double *window = malloc(K * sizeof(double));
double *work = malloc(K * sizeof(double));
size_t i;
for (i = 0; i < n; ++i)
{
size_t wsize = gsl_movstat_fill(etype, x, i, H, J, window);
double Sn;
gsl_sort(window, 1, wsize);
Sn = gsl_stats_Sn_from_sorted_data(window, 1, wsize, work);
gsl_vector_set(y, i, Sn);
}
free(window);
free(work);
return GSL_SUCCESS;
}
static double
func_Sn(const size_t n, double x[], void * params)
{
double *work = malloc(n * sizeof(double));
double Sn;
(void) params;
gsl_sort(x, 1, n);
Sn = gsl_stats_Sn_from_sorted_data(x, 1, n, work);
free(work);
return Sn;
}
static void
test_Sn_proc(const double tol, const size_t n, const size_t H, const size_t J,
const gsl_movstat_end_t etype, gsl_rng *rng_p)
{
gsl_movstat_workspace *w;
gsl_vector *x = gsl_vector_alloc(n);
gsl_vector *y = gsl_vector_alloc(n);
gsl_vector *z = gsl_vector_alloc(n);
gsl_movstat_function F;
char buf[2048];
F.function = func_Sn;
F.params = NULL;
if (H == J)
w = gsl_movstat_alloc(2*H + 1);
else
w = gsl_movstat_alloc2(H, J);
/* test moving median with random input */
random_vector(x, rng_p);
/* y = S_n(x) with slow brute force algorithm */
slow_movSn(etype, x, y, H, J);
/* z = S_n(x) */
gsl_movstat_Sn(etype, x, z, w);
/* test y = z */
sprintf(buf, "n=%zu H=%zu J=%zu endtype=%u Sn random", n, H, J, etype);
compare_vectors(tol, z, y, buf);
/* z = S_n(x) in-place */
gsl_vector_memcpy(z, x);
gsl_movstat_Sn(etype, z, z, w);
sprintf(buf, "n=%zu H=%zu J=%zu endtype=%u Sn random in-place", n, H, J, etype);
compare_vectors(tol, z, y, buf);
/* z = S_n(x) with user-defined function */
gsl_movstat_apply(etype, &F, x, z, w);
sprintf(buf, "n=%zu H=%zu J=%zu endtype=%u Sn user", n, H, J, etype);
compare_vectors(tol, z, y, buf);
gsl_vector_free(x);
gsl_vector_free(y);
gsl_vector_free(z);
gsl_movstat_free(w);
}
static void
test_Sn(gsl_rng * rng_p)
{
test_Sn_proc(GSL_DBL_EPSILON, 1000, 0, 0, GSL_MOVSTAT_END_PADZERO, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 1000, 5, 5, GSL_MOVSTAT_END_PADZERO, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 1000, 5, 2, GSL_MOVSTAT_END_PADZERO, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 1000, 2, 5, GSL_MOVSTAT_END_PADZERO, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 2000, 50, 0, GSL_MOVSTAT_END_PADZERO, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 2000, 0, 50, GSL_MOVSTAT_END_PADZERO, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 20, 50, 50, GSL_MOVSTAT_END_PADZERO, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 20, 1, 50, GSL_MOVSTAT_END_PADZERO, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 20, 50, 1, GSL_MOVSTAT_END_PADZERO, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 1000, 0, 0, GSL_MOVSTAT_END_PADVALUE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 1000, 5, 5, GSL_MOVSTAT_END_PADVALUE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 1000, 5, 2, GSL_MOVSTAT_END_PADVALUE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 1000, 2, 5, GSL_MOVSTAT_END_PADVALUE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 2000, 50, 0, GSL_MOVSTAT_END_PADVALUE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 2000, 0, 50, GSL_MOVSTAT_END_PADVALUE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 20, 50, 50, GSL_MOVSTAT_END_PADVALUE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 20, 1, 50, GSL_MOVSTAT_END_PADVALUE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 20, 50, 1, GSL_MOVSTAT_END_PADVALUE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 1000, 0, 0, GSL_MOVSTAT_END_TRUNCATE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 1000, 5, 5, GSL_MOVSTAT_END_TRUNCATE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 1000, 5, 2, GSL_MOVSTAT_END_TRUNCATE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 1000, 2, 5, GSL_MOVSTAT_END_TRUNCATE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 2000, 50, 0, GSL_MOVSTAT_END_TRUNCATE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 2000, 0, 50, GSL_MOVSTAT_END_TRUNCATE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 20, 50, 50, GSL_MOVSTAT_END_TRUNCATE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 20, 1, 50, GSL_MOVSTAT_END_TRUNCATE, rng_p);
test_Sn_proc(GSL_DBL_EPSILON, 20, 50, 1, GSL_MOVSTAT_END_TRUNCATE, rng_p);
}
|
# NRPy+'s Reference Metric Interface
## Author: Zach Etienne
### Formatting improvements courtesy Brandon Clark
### NRPy+ Source Code for this module: [reference_metric.py](../edit/reference_metric.py)
## Introduction:
### Why use a reference metric? Benefits of choosing the best coordinate system for the problem
When solving a partial differential equation on the computer, it is useful to first pick a coordinate system well-suited to the geometry of the problem. For example, if we are modeling a spherically-symmetric star, it would be hugely wasteful to model the star in 3-dimensional Cartesian coordinates ($x$,$y$,$z$). This is because in Cartesian coordinates, we would need to choose high sampling in all three Cartesian directions. If instead we chose to model the star in spherical coordinates ($r$,$\theta$,$\phi$), so long as the star is centered at $r=0$, we would not need to model the star with more than one point in the $\theta$ and $\phi$ directions!
A similar argument holds for stars that are *nearly* spherically symmetric. Such stars may exhibit density distributions that vary slowly in $\theta$ and $\phi$ directions (e.g., isolated neutron stars or black holes). In these cases the number of points needed to sample the angular directions will still be much smaller than in the radial direction.
Thus choice of an appropriate reference metric may directly mitigate the [Curse of Dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality).
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follow
1. [Step 1](#define_ref_metric): Defining a reference metric, [`reference_metric.py`](../edit/reference_metric.py)
1. [Step 2](#define_geometric): Defining geometric quantities, **`ref_metric__hatted_quantities()`**
1. [Step 3](#prescribed_ref_metric): Prescribed reference metrics in [`reference_metric.py`](../edit/reference_metric.py)
1. [Step 3.a](#sphericallike): Spherical-like coordinate systems
1. [Step 3.a.i](#spherical): **`reference_metric::CoordSystem = "Spherical"`**
1. [Step 3.a.ii](#sinhspherical): **`reference_metric::CoordSystem = "SinhSpherical"`**
1. [Step 3.a.iii](#sinhsphericalv2): **`reference_metric::CoordSystem = "SinhSphericalv2"`**
1. [Step 3.b](#cylindricallike): Cylindrical-like coordinate systems
1. [Step 3.b.i](#cylindrical): **`reference_metric::CoordSystem = "Cylindrical"`**
1. [Step 3.b.ii](#sinhcylindrical): **`reference_metric::CoordSystem = "SinhCylindrical"`**
1. [Step 3.b.iii](#sinhcylindricalv2): **`reference_metric::CoordSystem = "SinhCylindricalv2"`**
1. [Step 3.c](#cartesianlike): Cartesian-like coordinate systems
1. [Step 3.c.i](#cartesian): **`reference_metric::CoordSystem = "Cartesian"`**
1. [Step 3.d](#prolatespheroidal): Prolate spheroidal coordinates
1. [Step 3.d.i](#symtp): **`reference_metric::CoordSystem = "SymTP"`**
1. [Step 3.d.ii](#sinhsymtp): **`reference_metric::CoordSystem = "SinhSymTP"`**
1. [Step 4](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='define_ref_metric'></a>
# Step 1: Defining a reference metric, [`reference_metric.py`](../edit/reference_metric.py) \[Back to [top](#toc)\]
$$\label{define_ref_metric}$$
***Note that currently only orthogonal reference metrics of dimension 3 or fewer are supported. This can be extended if desired.***
NRPy+ assumes all curvilinear coordinate systems map directly from a uniform, Cartesian numerical grid with coordinates $(x,y,z)$=(`xx[0]`,`xx[1]`,`xx[2]`). Thus when defining reference metrics, all defined coordinate quantities must be in terms of the `xx[]` array. As we will see, this adds a great deal of flexibility
For example, [**reference_metric.py**](../edit/reference_metric.py) requires that the *orthogonal coordinate scale factors* be defined. As described [here](https://en.wikipedia.org/wiki/Curvilinear_coordinates), the $i$th scale factor is the positive root of the metric element $g_{ii}$. In ordinary spherical coordinates $(r,\theta,\phi)$, with line element $ds^2 = g_{ij} dx^i dx^j = dr^2+ r^2 d \theta^2 + r^2 \sin^2\theta \ d\phi^2$, we would first define
* $r = xx_0$
* $\theta = xx_1$
* $\phi = xx_2$,
so that the scale factors are defined as
* `scalefactor_orthog[0]` = $1$
* `scalefactor_orthog[1]` = $r$
* `scalefactor_orthog[2]` = $r \sin \theta$
Here is the corresponding code:
```python
import sympy as sp
import NRPy_param_funcs as par
import reference_metric as rfm
r = rfm.xx[0]
th = rfm.xx[1]
ph = rfm.xx[2]
rfm.scalefactor_orthog[0] = 1
rfm.scalefactor_orthog[1] = r
rfm.scalefactor_orthog[2] = r*sp.sin(th)
# Notice that the scale factor will be given
# in terms of the fundamental Cartesian
# grid variables, and not {r,th,ph}:
print("r*sin(th) = "+str(rfm.scalefactor_orthog[2]))
```
r*sin(th) = xx0*sin(xx1)
Next suppose we wish to modify our radial coordinate $r(xx_0)$ to be an exponentially increasing function, so that our numerical grid $(xx_0,xx_1,xx_2)$ will map to a spherical grid with radial grid spacing ($\Delta r$) that *increases* with $r$. Generally we will find it useful to define $r(xx_0)$ to be an odd function, so let's choose
$$r(xx_0) = a \sinh(xx_0/s),$$
where $a$ is an overall radial scaling factor, and $s$ denotes the scale (in units of $xx_0$) over which exponential growth will take place. In our implementation below, note that we use the relation
$$\sinh(x) = \frac{e^x - e^{-x}}{2},$$
as SymPy finds it easier to evaluate exponentials than hyperbolic trigonometric functions.
```python
a,s = sp.symbols('a s',positive=True)
xx0_rescaled = rfm.xx[0] / s
r = a*(sp.exp(xx0_rescaled) - sp.exp(-xx0_rescaled))/2
# Must redefine the scalefactors since 'r' has been updated!
rfm.scalefactor_orthog[0] = 1
rfm.scalefactor_orthog[1] = r
rfm.scalefactor_orthog[2] = r*sp.sin(th)
print(rfm.scalefactor_orthog[2])
```
a*(exp(xx0/s) - exp(-xx0/s))*sin(xx1)/2
Often we will find it useful to also define the appropriate mappings from (`xx[0]`,`xx[1]`,`xx[2]`) to Cartesian coordinates (for plotting purposes) and ordinary spherical coordinates (e.g., in case initial data when solving a PDE are naturally written in spherical coordinates). For this purpose, reference_metric.py also declares lists **`xxCart[]`** and **`xxSph[]`**, which in this case are defined as
```python
rfm.xxSph[0] = r
rfm.xxSph[1] = th
rfm.xxSph[2] = ph
rfm.xxCart[0] = r*sp.sin(th)*sp.cos(ph)
rfm.xxCart[1] = r*sp.sin(th)*sp.sin(ph)
rfm.xxCart[2] = r*sp.cos(th)
# Here we show off SymPy's pretty_print()
# and simplify() functions. Nice, no?
sp.pretty_print(sp.simplify(rfm.xxCart[0]))
```
⎛xx₀⎞
a⋅sin(xx₁)⋅cos(xx₂)⋅sinh⎜───⎟
⎝ s ⎠
<a id='define_geometric'></a>
# Step 2: Define geometric quantities, `ref_metric__hatted_quantities()` \[Back to [top](#toc)\]
$$\label{define_geometric}$$
Once `scalefactor_orthog[]` has been defined, the function **`ref_metric__hatted_quantities()`** within [reference_metric.py](../edit/reference_metric.py) can be called to define a number of geometric quantities useful for solving PDEs in curvilinear coordinate systems.
Adopting the notation of [Baumgarte, Montero, Cordero-Carrión, and Müller, PRD 87, 044026 (2012)](https://arxiv.org/abs/1211.6632), geometric quantities related to the reference metric are named "hatted" quantities, . For example, the reference metric is defined as $\hat{g}_{ij}$=`ghatDD[i][j]`:
```python
rfm.ref_metric__hatted_quantities()
sp.pretty_print(sp.Matrix(sp.simplify(rfm.ghatDD)))
```
⎡1 0 0 ⎤
⎢ ⎥
⎢ 2 ⎥
⎢ ⎛ xx₀ -xx₀ ⎞ ⎥
⎢ ⎜ ─── ─────⎟ ⎥
⎢ 2 ⎜ s s ⎟ ⎥
⎢ a ⋅⎝ℯ - ℯ ⎠ ⎥
⎢0 ─────────────────── 0 ⎥
⎢ 4 ⎥
⎢ ⎥
⎢ 2 ⎥
⎢ ⎛ xx₀ -xx₀ ⎞ ⎥
⎢ ⎜ ─── ─────⎟ ⎥
⎢ 2 ⎜ s s ⎟ 2 ⎥
⎢ a ⋅⎝ℯ - ℯ ⎠ ⋅sin (xx₁)⎥
⎢0 0 ─────────────────────────────⎥
⎣ 4 ⎦
In addition to $\hat{g}_{ij}$, **`ref_metric__hatted_quantities()`** also provides:
* The rescaling "matrix" `ReDD[i][j]`, used for separating singular (due to chosen coordinate system) pieces of smooth rank-2 tensor components from the smooth parts, so that the smooth parts can be used within temporal and spatial differential operators.
* Inverse reference metric: $\hat{g}^{ij}$=`ghatUU[i][j]`.
* Reference metric determinant: $\det\left(\hat{g}_{ij}\right)$=`detgammahat`.
* First and second derivatives of the reference metric: $\hat{g}_{ij,k}$=`ghatDD_dD[i][j][k]`; $\hat{g}_{ij,kl}$=`ghatDD_dDD[i][j][k][l]`
* Christoffel symbols associated with the reference metric, $\hat{\Gamma}^i_{jk}$ = `GammahatUDD[i][j][k]` and their first derivatives $\hat{\Gamma}^i_{jk,l}$ = `GammahatUDD_dD[i][j][k][l]`
For example, the Christoffel symbol $\hat{\Gamma}^{xx_1}_{xx_2 xx_2}=\hat{\Gamma}^1_{22}$ is given by `GammahatUDD[1][2][2]`:
```python
sp.pretty_print(sp.simplify(rfm.GammahatUDD[1][2][2]))
```
-sin(2⋅xx₁)
────────────
2
Given the trigonometric identity $2\sin(x)\cos(x) = \sin(2x)$, notice that the above expression is equivalent to Eq. 18 of [Baumgarte, Montero, Cordero-Carrión, and Müller, PRD 87, 044026 (2012)](https://arxiv.org/abs/1211.6632). This is expected since the sinh-radial spherical coordinate system is equivalent to ordinary spherical coordinates in the angular components.
<a id='prescribed_ref_metric'></a>
# Step 3: Prescribed reference metrics in [`reference_metric.py`](../edit/reference_metric.py) \[Back to [top](#toc)\]
$$\label{prescribed_ref_metric}$$
One need not manually define scale factors or other quantities for reference metrics, as a number of prescribed reference metrics are already defined in [reference_metric.py](../edit/reference_metric.py). These can be accessed by first setting the parameter **reference_metric::CoordSystem** to one of the following, and then calling the function **`rfm.reference_metric()`**.
```python
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
# Step 0a: Initialize parameters
thismodule = __name__
par.initialize_param(par.glb_param("char", thismodule, "CoordSystem", "Spherical"))
# Step 0b: Declare global variables
xx = gri.xx
xxCart = ixp.zerorank1(DIM=4) # Must be set in terms of xx[]s
Cart_to_xx = ixp.zerorank1(DIM=4) # Must be set in terms of xx[]s
Cartx,Carty,Cartz = sp.symbols("Cartx Carty Cartz", real=True)
Cart = [Cartx,Carty,Cartz]
xxSph = ixp.zerorank1(DIM=4) # Must be set in terms of xx[]s
scalefactor_orthog = ixp.zerorank1(DIM=4) # Must be set in terms of xx[]s
have_already_called_reference_metric_function = False
CoordSystem = par.parval_from_str("reference_metric::CoordSystem")
M_PI,M_SQRT1_2 = par.Cparameters("#define",thismodule,["M_PI","M_SQRT1_2"],"")
global xxmin
global xxmax
global UnitVectors
UnitVectors = ixp.zerorank2(DIM=3)
```
We will find the following plotting function useful for analyzing coordinate systems in which the radial coordinate is rescaled.
```python
def create_r_of_xx0_plots(CoordSystem, r_of_xx0,rprime_of_xx0):
import matplotlib.pyplot as plt
plt.clf()
Nr = 20
dxx0 = 1.0 / float(Nr)
xx0s = []
rs = []
deltars = []
rprimes = []
for i in range(Nr):
xx0 = (float(i) + 0.5)*dxx0
xx0s.append(xx0)
rs.append( sp.sympify(str(r_of_xx0 ).replace("xx0",str(xx0))))
rprimes.append(sp.sympify(str(rprime_of_xx0).replace("xx0",str(xx0))))
if i>0:
deltars.append(sp.log(rs[i]-rs[i-1],10))
else:
deltars.append(sp.log(2*rs[0],10))
# fig, ax = plt.subplots()
fig = plt.figure(figsize=(12,12)) # 8 in x 8 in
ax = fig.add_subplot(221)
ax.set_title('$r(xx_0)$ for '+CoordSystem,fontsize='x-large')
ax.set_xlabel('$xx_0$',fontsize='x-large')
ax.set_ylabel('$r(xx_0)$',fontsize='x-large')
ax.plot(xx0s, rs, 'k.', label='Spacing between\nadjacent gridpoints')
# legend = ax.legend(loc='lower right', shadow=True, fontsize='x-large')
# legend.get_frame().set_facecolor('C1')
ax = fig.add_subplot(222)
ax.set_title('Grid spacing for '+CoordSystem,fontsize='x-large')
ax.set_xlabel('$xx_0$',fontsize='x-large')
ax.set_ylabel('$\log_{10}(\Delta r)$',fontsize='x-large')
ax.plot(xx0s, deltars, 'k.', label='Spacing between\nadjacent gridpoints\nin $r(xx_0)$ plot')
legend = ax.legend(loc='lower right', shadow=True, fontsize='x-large')
legend.get_frame().set_facecolor('C1')
ax = fig.add_subplot(223)
ax.set_title('$r\'(xx_0)$ for '+CoordSystem,fontsize='x-large')
ax.set_xlabel('$xx_0$',fontsize='x-large')
ax.set_ylabel('$r\'(xx_0)$',fontsize='x-large')
ax.plot(xx0s, rprimes, 'k.', label='Nr=96')
# legend = ax.legend(loc='upper left', shadow=True, fontsize='x-large')
# legend.get_frame().set_facecolor('C1')
plt.tight_layout(pad=2)
plt.show()
```
<a id='sphericallike'></a>
## Step 3.a: Spherical-like coordinate systems \[Back to [top](#toc)\]
$$\label{sphericallike}$$
<a id='spherical'></a>
### Step 3.a.i: **`reference_metric::CoordSystem = "Spherical"`** \[Back to [top](#toc)\]
$$\label{spherical}$$
Standard spherical coordinates, with $(r,\theta,\phi)=(xx_0,xx_1,xx_2)$
```python
if CoordSystem == "Spherical":
# Adding assumption real=True can help simplify expressions involving xx[0] & xx[1] below.
xx[0] = sp.symbols("xx0", real=True)
xx[1] = sp.symbols("xx1", real=True)
RMAX = par.Cparameters("REAL", thismodule, ["RMAX"],10.0)
xxmin = [sp.sympify(0), sp.sympify(0), -M_PI]
xxmax = [ RMAX, M_PI, M_PI]
r = xx[0]
th = xx[1]
ph = xx[2]
Cart_to_xx[0] = sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2)
Cart_to_xx[1] = sp.acos(Cartz / Cart_to_xx[0])
Cart_to_xx[2] = sp.atan2(Carty, Cartx)
xxSph[0] = r
xxSph[1] = th
xxSph[2] = ph
# Now define xCart, yCart, and zCart in terms of x0,xx[1],xx[2].
# Note that the relation between r and x0 is not necessarily trivial in SinhSpherical coordinates. See above.
xxCart[0] = xxSph[0]*sp.sin(xxSph[1])*sp.cos(xxSph[2])
xxCart[1] = xxSph[0]*sp.sin(xxSph[1])*sp.sin(xxSph[2])
xxCart[2] = xxSph[0]*sp.cos(xxSph[1])
scalefactor_orthog[0] = sp.diff(xxSph[0],xx[0])
scalefactor_orthog[1] = xxSph[0]
scalefactor_orthog[2] = xxSph[0]*sp.sin(xxSph[1])
# Set the unit vectors
UnitVectors = [[ sp.sin(xxSph[1])*sp.cos(xxSph[2]), sp.sin(xxSph[1])*sp.sin(xxSph[2]), sp.cos(xxSph[1])],
[ sp.cos(xxSph[1])*sp.cos(xxSph[2]), sp.cos(xxSph[1])*sp.sin(xxSph[2]), -sp.sin(xxSph[1])],
[ -sp.sin(xxSph[2]), sp.cos(xxSph[2]), sp.sympify(0) ]]
```
Now let's analyze $r(xx_0)$ for **"Spherical"** coordinates.
```python
%matplotlib inline
import sympy as sp
import reference_metric as rfm
import NRPy_param_funcs as par
CoordSystem = "Spherical"
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem)
rfm.reference_metric()
RMAX = 10.0
r_of_xx0 = sp.sympify(str(rfm.xxSph[0] ).replace("RMAX",str(RMAX)))
rprime_of_xx0 = sp.sympify(str(sp.diff(rfm.xxSph[0],rfm.xx[0])).replace("RMAX",str(RMAX)))
create_r_of_xx0_plots(CoordSystem, r_of_xx0,rprime_of_xx0)
```
<a id='sinhspherical'></a>
### Step 3.a.ii: **`reference_metric::CoordSystem = "SinhSpherical"`** \[Back to [top](#toc)\]
$$\label{sinhspherical}$$
Spherical coordinates, but with $$r(xx_0) = \text{AMPL} \frac{\sinh\left(\frac{xx_0}{\text{SINHW}}\right)}{\sinh\left(\frac{1}{\text{SINHW}}\right)}.$$
SinhSpherical uses two parameters: `AMPL` and `SINHW`. `AMPL` sets the outer boundary distance; and `SINHW` sets the focusing of the coordinate points near $r=0$, where a small `SINHW` ($\sim 0.125$) will greatly focus the points near $r=0$ and a large `SINHW` will look more like an ordinary spherical polar coordinate system.
```python
if CoordSystem == "SinhSpherical":
xxmin = [sp.sympify(0), sp.sympify(0), -M_PI]
xxmax = [sp.sympify(1), M_PI, M_PI]
AMPL, SINHW = par.Cparameters("REAL",thismodule,["AMPL","SINHW"],[10.0,0.2])
# Set SinhSpherical radial coordinate by default; overwrite later if CoordSystem == "SinhSphericalv2".
r = AMPL * (sp.exp(xx[0] / SINHW) - sp.exp(-xx[0] / SINHW)) / \
(sp.exp(1 / SINHW) - sp.exp(-1 / SINHW))
th = xx[1]
ph = xx[2]
Cart_to_xx[0] = SINHW*sp.asinh(sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2)*sp.sinh(1/SINHW)/AMPL)
Cart_to_xx[1] = sp.acos(Cartz / sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2))
Cart_to_xx[2] = sp.atan2(Carty, Cartx)
xxSph[0] = r
xxSph[1] = th
xxSph[2] = ph
# Now define xCart, yCart, and zCart in terms of x0,xx[1],xx[2].
# Note that the relation between r and x0 is not necessarily trivial in SinhSpherical coordinates. See above.
xxCart[0] = xxSph[0]*sp.sin(xxSph[1])*sp.cos(xxSph[2])
xxCart[1] = xxSph[0]*sp.sin(xxSph[1])*sp.sin(xxSph[2])
xxCart[2] = xxSph[0]*sp.cos(xxSph[1])
scalefactor_orthog[0] = sp.diff(xxSph[0],xx[0])
scalefactor_orthog[1] = xxSph[0]
scalefactor_orthog[2] = xxSph[0]*sp.sin(xxSph[1])
# Set the unit vectors
UnitVectors = [[ sp.sin(xxSph[1])*sp.cos(xxSph[2]), sp.sin(xxSph[1])*sp.sin(xxSph[2]), sp.cos(xxSph[1])],
[ sp.cos(xxSph[1])*sp.cos(xxSph[2]), sp.cos(xxSph[1])*sp.sin(xxSph[2]), -sp.sin(xxSph[1])],
[ -sp.sin(xxSph[2]), sp.cos(xxSph[2]), sp.sympify(0) ]]
```
Now we explore $r(xx_0)$ for `SinhSpherical` assuming `AMPL=10.0` and `SINHW=0.2`:
```python
%matplotlib inline
import sympy as sp
import reference_metric as rfm
import NRPy_param_funcs as par
CoordSystem = "SinhSpherical"
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem)
rfm.reference_metric()
AMPL = 10.0
SINHW = 0.2
r_of_xx0 = sp.sympify(str(rfm.xxSph[0] ).replace("AMPL",str(AMPL)).replace("SINHW",str(SINHW)))
rprime_of_xx0 = sp.sympify(str(sp.diff(rfm.xxSph[0],rfm.xx[0])).replace("AMPL",str(AMPL)).replace("SINHW",str(SINHW)))
create_r_of_xx0_plots(CoordSystem, r_of_xx0,rprime_of_xx0)
```
<a id='sinhsphericalv2'></a>
### Step 3.a.iii: **`reference_metric::CoordSystem = "SinhSphericalv2"`** \[Back to [top](#toc)\]
$$\label{sinhsphericalv2}$$
The same as SinhSpherical coordinates, but with an additional `AMPL*const_dr*xx_0` term:
$$r(xx_0) = \text{AMPL} \left[\text{const_dr}\ xx_0 + \frac{\sinh\left(\frac{xx_0}{\text{SINHW}}\right)}{\sinh\left(\frac{1}{\text{SINHW}}\right)}\right].$$
```python
if CoordSystem == "SinhSphericalv2":
# SinhSphericalv2 adds the parameter "const_dr", which allows for a region near xx[0]=0 to have
# constant radial resolution of const_dr, provided the sinh() term does not dominate near xx[0]=0.
xxmin = [sp.sympify(0), sp.sympify(0), -M_PI]
xxmax = [sp.sympify(1), M_PI, M_PI]
AMPL, SINHW = par.Cparameters("REAL",thismodule,["AMPL","SINHW"],[10.0,0.2])
const_dr = par.Cparameters("REAL",thismodule,["const_dr"],0.0625)
r = AMPL*( const_dr*xx[0] + (sp.exp(xx[0] / SINHW) - sp.exp(-xx[0] / SINHW)) /
(sp.exp(1 / SINHW) - sp.exp(-1 / SINHW)) )
th = xx[1]
ph = xx[2]
# NO CLOSED-FORM EXPRESSION FOR RADIAL INVERSION.
# Cart_to_xx[0] = "NewtonRaphson"
# Cart_to_xx[1] = sp.acos(Cartz / sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2))
# Cart_to_xx[2] = sp.atan2(Carty, Cartx)
xxSph[0] = r
xxSph[1] = th
xxSph[2] = ph
# Now define xCart, yCart, and zCart in terms of x0,xx[1],xx[2].
# Note that the relation between r and x0 is not necessarily trivial in SinhSpherical coordinates. See above.
xxCart[0] = xxSph[0]*sp.sin(xxSph[1])*sp.cos(xxSph[2])
xxCart[1] = xxSph[0]*sp.sin(xxSph[1])*sp.sin(xxSph[2])
xxCart[2] = xxSph[0]*sp.cos(xxSph[1])
scalefactor_orthog[0] = sp.diff(xxSph[0],xx[0])
scalefactor_orthog[1] = xxSph[0]
scalefactor_orthog[2] = xxSph[0]*sp.sin(xxSph[1])
# Set the unit vectors
UnitVectors = [[ sp.sin(xxSph[1])*sp.cos(xxSph[2]), sp.sin(xxSph[1])*sp.sin(xxSph[2]), sp.cos(xxSph[1])],
[ sp.cos(xxSph[1])*sp.cos(xxSph[2]), sp.cos(xxSph[1])*sp.sin(xxSph[2]), -sp.sin(xxSph[1])],
[ -sp.sin(xxSph[2]), sp.cos(xxSph[2]), sp.sympify(0) ]]
```
Now we explore $r(xx_0)$ for `SinhSphericalv2` assuming `AMPL=10.0`, `SINHW=0.2`, and `const_dr=0.05`. Notice that the `const_dr` term significantly increases the grid spacing near $xx_0=0$ relative to `SinhSpherical` coordinates.
```python
%matplotlib inline
import sympy as sp
import reference_metric as rfm
import NRPy_param_funcs as par
CoordSystem = "SinhSphericalv2"
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem)
rfm.reference_metric()
AMPL = 10.0
SINHW = 0.2
const_dr = 0.05
r_of_xx0 = sp.sympify(str(rfm.xxSph[0] ).replace("AMPL",str(AMPL)).replace("SINHW",str(SINHW)).replace("const_dr",str(const_dr)))
rprime_of_xx0 = sp.sympify(str(sp.diff(rfm.xxSph[0],rfm.xx[0])).replace("AMPL",str(AMPL)).replace("SINHW",str(SINHW)).replace("const_dr",str(const_dr)))
create_r_of_xx0_plots(CoordSystem, r_of_xx0,rprime_of_xx0)
```
<a id='cylindricallike'></a>
## Step 3.b: Cylindrical-like coordinate systems \[Back to [top](#toc)\]
$$\label{cylindricallike}$$
<a id='cylindrical'></a>
### Step 3.b.i: **`reference_metric::CoordSystem = "Cylindrical"`** \[Back to [top](#toc)\]
$$\label{cylindrical}$$
Standard cylindrical coordinates, with $(\rho,\phi,z)=(xx_0,xx_1,xx_2)$
```python
if CoordSystem == "Cylindrical":
# Assuming the cylindrical radial coordinate
# is positive makes nice simplifications of
# unit vectors possible.
xx[0] = sp.symbols("xx0", real=True)
RHOMAX,ZMIN,ZMAX = par.Cparameters("REAL",thismodule,["RHOMAX","ZMIN","ZMAX"],[10.0,-10.0,10.0])
xxmin = [sp.sympify(0), -M_PI, ZMIN]
xxmax = [ RHOMAX, M_PI, ZMAX]
RHOCYL = xx[0]
PHICYL = xx[1]
ZCYL = xx[2]
Cart_to_xx[0] = sp.sqrt(Cartx ** 2 + Carty ** 2)
Cart_to_xx[1] = sp.atan2(Carty, Cartx)
Cart_to_xx[2] = Cartz
xxCart[0] = RHOCYL*sp.cos(PHICYL)
xxCart[1] = RHOCYL*sp.sin(PHICYL)
xxCart[2] = ZCYL
xxSph[0] = sp.sqrt(RHOCYL**2 + ZCYL**2)
xxSph[1] = sp.acos(ZCYL / xxSph[0])
xxSph[2] = PHICYL
scalefactor_orthog[0] = sp.diff(RHOCYL,xx[0])
scalefactor_orthog[1] = RHOCYL
scalefactor_orthog[2] = sp.diff(ZCYL,xx[2])
# Set the unit vectors
UnitVectors = [[ sp.cos(PHICYL), sp.sin(PHICYL), sp.sympify(0)],
[-sp.sin(PHICYL), sp.cos(PHICYL), sp.sympify(0)],
[ sp.sympify(0), sp.sympify(0), sp.sympify(1)]]
```
Next let's plot **"Cylindrical"** coordinates.
```python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
R = np.linspace(0, 2, 24)
h = 2
u = np.linspace(0, 2*np.pi, 24)
x = np.outer(R, np.cos(u))
y = np.outer(R, np.sin(u))
z = h * np.outer(np.ones(np.size(u)), np.ones(np.size(u)))
r = np.arange(0,2,0.25)
theta = 2*np.pi*r*0
fig = plt.figure(figsize=(12,12)) # 8 in x 8 in
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1 = plt.axes(projection='polar')
ax1.set_rmax(2)
ax1.set_rgrids(r,labels=[])
thetas = np.linspace(0,360,24, endpoint=True)
ax1.set_thetagrids(thetas,labels=[])
# ax.grid(True)
ax1.grid(True,linewidth='1.0')
ax1.set_title("Top Down View")
plt.show()
ax2 = plt.axes(projection='3d', xticklabels=[], yticklabels=[], zticklabels=[])
#ax2.plot_surface(x,y,z, alpha=.75, cmap = 'viridis') # z in case of disk which is parallel to XY plane is constant and you can directly use h
x=np.linspace(-2, 2, 100)
z=np.linspace(-2, 2, 100)
Xc, Zc=np.meshgrid(x, z)
Yc = np.sqrt(4-Xc**2)
rstride = 10
cstride = 10
ax2.plot_surface(Xc, Yc, Zc, alpha=1.0, rstride=rstride, cstride=cstride, cmap = 'viridis')
ax2.plot_surface(Xc, -Yc, Zc, alpha=1.0, rstride=rstride, cstride=cstride, cmap = 'viridis')
ax2.set_title("Standard Cylindrical Grid in 3D")
ax2.grid(False)
plt.axis('off')
plt.show()
```
<a id='sinhcylindrical'></a>
### Step 3.b.ii" **`reference_metric::CoordSystem = "SinhCylindrical"`** \[Back to [top](#toc)\]
$$\label{sinhcylindrical}$$
Cylindrical coordinates, but with
$$\rho(xx_0) = \text{AMPLRHO} \frac{\sinh\left(\frac{xx_0}{\text{SINHWRHO}}\right)}{\sinh\left(\frac{1}{\text{SINHWRHO}}\right)}$$
and
$$z(xx_2) = \text{AMPLZ} \frac{\sinh\left(\frac{xx_2}{\text{SINHWZ}}\right)}{\sinh\left(\frac{1}{\text{SINHWZ}}\right)}$$
```python
if CoordSystem == "SinhCylindrical":
# Assuming the cylindrical radial coordinate
# is positive makes nice simplifications of
# unit vectors possible.
xx[0] = sp.symbols("xx0", real=True)
xxmin = [sp.sympify(0), -M_PI, sp.sympify(-1)]
xxmax = [sp.sympify(1), M_PI, sp.sympify(+1)]
AMPLRHO, SINHWRHO, AMPLZ, SINHWZ = par.Cparameters("REAL",thismodule,
["AMPLRHO","SINHWRHO","AMPLZ","SINHWZ"],
[ 10.0, 0.2, 10.0, 0.2])
# Set SinhCylindrical radial & z coordinates by default; overwrite later if CoordSystem == "SinhCylindricalv2".
RHOCYL = AMPLRHO * (sp.exp(xx[0] / SINHWRHO) - sp.exp(-xx[0] / SINHWRHO)) / (sp.exp(1 / SINHWRHO) - sp.exp(-1 / SINHWRHO))
# phi coordinate remains unchanged.
PHICYL = xx[1]
ZCYL = AMPLZ * (sp.exp(xx[2] / SINHWZ) - sp.exp(-xx[2] / SINHWZ)) / (sp.exp(1 / SINHWZ) - sp.exp(-1 / SINHWZ))
Cart_to_xx[0] = SINHWRHO*sp.asinh(sp.sqrt(Cartx ** 2 + Carty ** 2)*sp.sinh(1/SINHWRHO)/AMPLRHO)
Cart_to_xx[1] = sp.atan2(Carty, Cartx)
Cart_to_xx[2] = SINHWZ*sp.asinh(Cartz*sp.sinh(1/SINHWZ)/AMPLZ)
xxCart[0] = RHOCYL*sp.cos(PHICYL)
xxCart[1] = RHOCYL*sp.sin(PHICYL)
xxCart[2] = ZCYL
xxSph[0] = sp.sqrt(RHOCYL**2 + ZCYL**2)
xxSph[1] = sp.acos(ZCYL / xxSph[0])
xxSph[2] = PHICYL
scalefactor_orthog[0] = sp.diff(RHOCYL,xx[0])
scalefactor_orthog[1] = RHOCYL
scalefactor_orthog[2] = sp.diff(ZCYL,xx[2])
# Set the unit vectors
UnitVectors = [[ sp.cos(PHICYL), sp.sin(PHICYL), sp.sympify(0)],
[-sp.sin(PHICYL), sp.cos(PHICYL), sp.sympify(0)],
[ sp.sympify(0), sp.sympify(0), sp.sympify(1)]]
```
Next let's plot **"SinhCylindrical"** coordinates.
```python
fig=plt.figure()
plt.clf()
fig = plt.figure()
ax = plt.subplot(1,1,1, projection='polar')
ax.set_rmax(2)
Nr = 20
xx0s = np.linspace(0,2,Nr, endpoint=True) + 1.0/(2.0*Nr)
rs = []
AMPLRHO = 1.0
SINHW = 0.4
for i in range(Nr):
rs.append(AMPLRHO * (np.exp(xx0s[i] / SINHW) - np.exp(-xx0s[i] / SINHW)) / \
(np.exp(1.0 / SINHW) - np.exp(-1.0 / SINHW)))
ax.set_rgrids(rs,labels=[])
thetas = np.linspace(0,360,25, endpoint=True)
ax.set_thetagrids(thetas,labels=[])
# ax.grid(True)
ax.grid(True,linewidth='1.0')
plt.show()
```
<a id='sinhcylindricalv2'></a>
### Step 3.b.iii: **`reference_metric::CoordSystem = "SinhCylindricalv2"`** \[Back to [top](#toc)\]
$$\label{sinhcylindricalv2}$$
Cylindrical coordinates, but with
$$\rho(xx_0) = \text{AMPLRHO} \left[\text{const_drho}\ xx_0 + \frac{\sinh\left(\frac{xx_0}{\text{SINHWRHO}}\right)}{\sinh\left(\frac{1}{\text{SINHWRHO}}\right)}\right]$$
and
$$z(xx_2) = \text{AMPLZ} \left[\text{const_dz}\ xx_2 + \frac{\sinh\left(\frac{xx_2}{\text{SINHWZ}}\right)}{\sinh\left(\frac{1}{\text{SINHWZ}}\right)}\right]$$
```python
if CoordSystem == "SinhCylindricalv2":
# Assuming the cylindrical radial coordinate
# is positive makes nice simplifications of
# unit vectors possible.
xx[0] = sp.symbols("xx0", real=True)
# SinhCylindricalv2 adds the parameters "const_drho", "const_dz", which allows for regions near xx[0]=0
# and xx[2]=0 to have constant rho and z resolution of const_drho and const_dz, provided the sinh() terms
# do not dominate near xx[0]=0 and xx[2]=0.
xxmin = [sp.sympify(0), -M_PI, sp.sympify(-1)]
xxmax = [sp.sympify(1), M_PI, sp.sympify(+1)]
AMPLRHO, SINHWRHO, AMPLZ, SINHWZ = par.Cparameters("REAL",thismodule,
["AMPLRHO","SINHWRHO","AMPLZ","SINHWZ"],
[ 10.0, 0.2, 10.0, 0.2])
const_drho, const_dz = par.Cparameters("REAL",thismodule,["const_drho","const_dz"],[0.0625,0.0625])
RHOCYL = AMPLRHO * ( const_drho*xx[0] + (sp.exp(xx[0] / SINHWRHO) - sp.exp(-xx[0] / SINHWRHO)) / (sp.exp(1 / SINHWRHO) - sp.exp(-1 / SINHWRHO)) )
PHICYL = xx[1]
ZCYL = AMPLZ * ( const_dz *xx[2] + (sp.exp(xx[2] / SINHWZ ) - sp.exp(-xx[2] / SINHWZ )) / (sp.exp(1 / SINHWZ ) - sp.exp(-1 / SINHWZ )) )
# NO CLOSED-FORM EXPRESSION FOR RADIAL OR Z INVERSION.
# Cart_to_xx[0] = "NewtonRaphson"
# Cart_to_xx[1] = sp.atan2(Carty, Cartx)
# Cart_to_xx[2] = "NewtonRaphson"
xxCart[0] = RHOCYL*sp.cos(PHICYL)
xxCart[1] = RHOCYL*sp.sin(PHICYL)
xxCart[2] = ZCYL
xxSph[0] = sp.sqrt(RHOCYL**2 + ZCYL**2)
xxSph[1] = sp.acos(ZCYL / xxSph[0])
xxSph[2] = PHICYL
scalefactor_orthog[0] = sp.diff(RHOCYL,xx[0])
scalefactor_orthog[1] = RHOCYL
scalefactor_orthog[2] = sp.diff(ZCYL,xx[2])
# Set the unit vectors
UnitVectors = [[ sp.cos(PHICYL), sp.sin(PHICYL), sp.sympify(0)],
[-sp.sin(PHICYL), sp.cos(PHICYL), sp.sympify(0)],
[ sp.sympify(0), sp.sympify(0), sp.sympify(1)]]
```
For example, let's set up **`SinhCylindricalv2`** coordinates and output the Christoffel symbol $\hat{\Gamma}^{xx_2}_{xx_2 xx_2}$, or more simply $\hat{\Gamma}^2_{22}$:
```python
par.set_parval_from_str("reference_metric::CoordSystem","SinhCylindricalv2")
rfm.reference_metric()
sp.pretty_print(sp.simplify(rfm.GammahatUDD[2][2][2]))
```
⎛ 2⋅xx₂ ⎞ 1
⎜ ────── ⎟ ──────
⎜ SINHWZ ⎟ SINHWZ
-⎝ℯ - 1⎠⋅ℯ
────────────────────────────────────────────────────────────────────────
⎛ ⎛ 2 ⎞ xx₂ ⎛ 2⋅xx₂ ⎞ 1 ⎞
⎜ ⎜ ────── ⎟ ────── ⎜ ────── ⎟ ──────⎟
⎜ ⎜ SINHWZ ⎟ SINHWZ ⎜ SINHWZ ⎟ SINHWZ⎟
SINHWZ⋅⎝- SINHWZ⋅const_dz⋅⎝ℯ - 1⎠⋅ℯ - ⎝ℯ + 1⎠⋅ℯ ⎠
As we will soon see, defining these "hatted" quantities will be quite useful when expressing hyperbolic ([wave-equation](https://en.wikipedia.org/wiki/Wave_equation)-like) PDEs in non-Cartesian coordinate systems.
<a id='cartesianlike'></a>
## Step 3.c: Cartesian-like coordinate systems \[Back to [top](#toc)\]
$$\label{cartesianlike}$$
<a id='cartesian'></a>
### Step 3.c.i: **`reference_metric::CoordSystem = "Cartesian"`** \[Back to [top](#toc)\]
$$\label{cartesian}$$
Standard Cartesian coordinates, with $(x,y,z)=$ `(xx0,xx1,xx2)`
```python
if CoordSystem == "Cartesian":
xmin, xmax, ymin, ymax, zmin, zmax = par.Cparameters("REAL",thismodule,
["xmin","xmax","ymin","ymax","zmin","zmax"],
[ -10.0, 10.0, -10.0, 10.0, -10.0, 10.0])
xxmin = ["xmin", "ymin", "zmin"]
xxmax = ["xmax", "ymax", "zmax"]
xxCart[0] = xx[0]
xxCart[1] = xx[1]
xxCart[2] = xx[2]
xxSph[0] = sp.sqrt(xx[0] ** 2 + xx[1] ** 2 + xx[2] ** 2)
xxSph[1] = sp.acos(xx[2] / xxSph[0])
xxSph[2] = sp.atan2(xx[1], xx[0])
Cart_to_xx[0] = Cartx
Cart_to_xx[1] = Carty
Cart_to_xx[2] = Cartz
scalefactor_orthog[0] = sp.sympify(1)
scalefactor_orthog[1] = sp.sympify(1)
scalefactor_orthog[2] = sp.sympify(1)
# Set the transpose of the matrix of unit vectors
UnitVectors = [[sp.sympify(1), sp.sympify(0), sp.sympify(0)],
[sp.sympify(0), sp.sympify(1), sp.sympify(0)],
[sp.sympify(0), sp.sympify(0), sp.sympify(1)]]
```
```python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.clf()
fig = plt.figure()
ax = fig.gca()
Nx = 16
ax.set_xticks(np.arange(0, 1., 1./Nx))
ax.set_yticks(np.arange(0, 1., 1./Nx))
# plt.scatter(x, y)
ax.set_aspect('equal')
plt.grid()
# plt.savefig("Cartgrid.png",dpi=300)
plt.show()
# plt.close(fig)
```
<a id='prolatespheroidal'></a>
## Step 3.d: [Prolate spheroidal](https://en.wikipedia.org/wiki/Prolate_spheroidal_coordinates)-like coordinate systems \[Back to [top](#toc)\]
$$\label{prolatespheroidal}$$
<a id='symtp'></a>
### Step 3.d.i: **`reference_metric::CoordSystem = "SymTP"`** \[Back to [top](#toc)\]
$$\label{symtp}$$
Symmetric TwoPuncture coordinates, with $(\rho,\phi,z)=(xx_0\sin(xx_1), xx_2, \sqrt{xx_0^2 + \text{bScale}^2}\cos(xx_1))$
```python
if CoordSystem == "SymTP":
var1, var2= sp.symbols('var1 var2',real=True)
bScale, AW, AMAX, RHOMAX, ZMIN, ZMAX = par.Cparameters("REAL",thismodule,
["bScale","AW","AMAX","RHOMAX","ZMIN","ZMAX"],
[0.5, 0.2, 10.0, 10.0, -10.0, 10.0])
# Assuming xx0, xx1, and bScale
# are positive makes nice simplifications of
# unit vectors possible.
xx[0],xx[1] = sp.symbols("xx0 xx1", real=True)
xxmin = [sp.sympify(0), sp.sympify(0),-M_PI]
xxmax = [ AMAX, M_PI, M_PI]
AA = xx[0]
if CoordSystem == "SinhSymTP":
AA = (sp.exp(xx[0]/AW)-sp.exp(-xx[0]/AW))/2
var1 = sp.sqrt(AA**2 + (bScale * sp.sin(xx[1]))**2)
var2 = sp.sqrt(AA**2 + bScale**2)
RHOSYMTP = AA*sp.sin(xx[1])
PHSYMTP = xx[2]
ZSYMTP = var2*sp.cos(xx[1])
xxCart[0] = AA *sp.sin(xx[1])*sp.cos(xx[2])
xxCart[1] = AA *sp.sin(xx[1])*sp.sin(xx[2])
xxCart[2] = ZSYMTP
xxSph[0] = sp.sqrt(RHOSYMTP**2 + ZSYMTP**2)
xxSph[1] = sp.acos(ZSYMTP / xxSph[0])
xxSph[2] = PHSYMTP
rSph = sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2)
thSph = sp.acos(Cartz / rSph)
phSph = sp.atan2(Carty, Cartx)
# Mathematica script to compute Cart_to_xx[]
# AA = x1;
# var2 = Sqrt[AA^2 + bScale^2];
# RHOSYMTP = AA*Sin[x2];
# ZSYMTP = var2*Cos[x2];
# Solve[{rSph == Sqrt[RHOSYMTP^2 + ZSYMTP^2],
# thSph == ArcCos[ZSYMTP/Sqrt[RHOSYMTP^2 + ZSYMTP^2]],
# phSph == x3},
# {x1, x2, x3}]
Cart_to_xx[0] = sp.sqrt(-bScale**2 + rSph**2 +
sp.sqrt(bScale**4 + 2*bScale**2*rSph**2 + rSph**4 -
4*bScale**2*rSph**2*sp.cos(thSph)**2))*M_SQRT1_2 # M_SQRT1_2 = 1/sqrt(2); define this way for UnitTesting
# The sign() function in the following expression ensures the correct root is taken.
Cart_to_xx[1] = sp.acos(sp.sign(Cartz)*(
sp.sqrt(1 + rSph**2/bScale**2 -
sp.sqrt(bScale**4 + 2*bScale**2*rSph**2 + rSph**4 -
4*bScale**2*rSph**2*sp.cos(thSph)**2)/bScale**2)*M_SQRT1_2)) # M_SQRT1_2 = 1/sqrt(2); define this way for UnitTesting
Cart_to_xx[2] = phSph
```
<a id='sinhsymtp'></a>
### Step 3.d.ii: **`reference_metric::CoordSystem = "SinhSymTP"`** \[Back to [top](#toc)\]
$$\label{sinhsymtp}$$
Symmetric TwoPuncture coordinates, but with $$xx_0 \to \sinh(xx_0/\text{AW})$$
```python
if CoordSystem == "SinhSymTP":
var1, var2= sp.symbols('var1 var2',real=True)
bScale, AW, AMAX, RHOMAX, ZMIN, ZMAX = par.Cparameters("REAL",thismodule,
["bScale","AW","AMAX","RHOMAX","ZMIN","ZMAX"],
[0.5, 0.2, 10.0, 10.0, -10.0, 10.0])
# Assuming xx0, xx1, and bScale
# are positive makes nice simplifications of
# unit vectors possible.
xx[0],xx[1] = sp.symbols("xx0 xx1", real=True)
xxmin = [sp.sympify(0), sp.sympify(0),-M_PI]
xxmax = [ AMAX, M_PI, M_PI]
AA = xx[0]
if CoordSystem == "SinhSymTP":
# With xxmax[0] == AMAX, sinh(xx0/AMAX) will evaluate to a number between 0 and 1.
# Similarly, sinh(xx0/(AMAX*SINHWAA)) / sinh(1/SINHWAA) will also evaluate to a number between 0 and 1.
# Then AA = AMAX*sinh(xx0/(AMAX*SINHWAA)) / sinh(1/SINHWAA) will evaluate to a number between 0 and AMAX.
AA = AMAX * (sp.exp(xx[0] / (AMAX*SINHWAA)) - sp.exp(-xx[0] / (AMAX*SINHWAA))) / (sp.exp(1 / SINHWAA) - sp.exp(-1 / AMAX))
var1 = sp.sqrt(AA**2 + (bScale * sp.sin(xx[1]))**2)
var2 = sp.sqrt(AA**2 + bScale**2)
RHOSYMTP = AA*sp.sin(xx[1])
PHSYMTP = xx[2]
ZSYMTP = var2*sp.cos(xx[1])
xxCart[0] = AA *sp.sin(xx[1])*sp.cos(xx[2])
xxCart[1] = AA *sp.sin(xx[1])*sp.sin(xx[2])
xxCart[2] = ZSYMTP
xxSph[0] = sp.sqrt(RHOSYMTP**2 + ZSYMTP**2)
xxSph[1] = sp.acos(ZSYMTP / xxSph[0])
xxSph[2] = PHSYMTP
scalefactor_orthog[0] = sp.diff(AA,xx[0]) * var1 / var2
scalefactor_orthog[1] = var1
scalefactor_orthog[2] = AA * sp.sin(xx[1])
# Set the transpose of the matrix of unit vectors
UnitVectors = [[sp.sin(xx[1]) * sp.cos(xx[2]) * var2 / var1,
sp.sin(xx[1]) * sp.sin(xx[2]) * var2 / var1,
AA * sp.cos(xx[1]) / var1],
[AA * sp.cos(xx[1]) * sp.cos(xx[2]) / var1,
AA * sp.cos(xx[1]) * sp.sin(xx[2]) / var1,
-sp.sin(xx[1]) * var2 / var1],
[-sp.sin(xx[2]), sp.cos(xx[2]), sp.sympify(0)]]
```
<a id='latex_pdf_output'></a>
# Step 4: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-Reference_Metric.pdf](Tutorial-Reference_Metric.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```python
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-Reference_Metric.ipynb
!pdflatex -interaction=batchmode Tutorial-Reference_Metric.tex
!pdflatex -interaction=batchmode Tutorial-Reference_Metric.tex
!pdflatex -interaction=batchmode Tutorial-Reference_Metric.tex
!rm -f Tut*.out Tut*.aux Tut*.log
```
[NbConvertApp] Converting notebook Tutorial-Reference_Metric.ipynb to latex
[NbConvertApp] Support files will be in Tutorial-Reference_Metric_files/
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Making directory Tutorial-Reference_Metric_files
[NbConvertApp] Writing 135717 bytes to Tutorial-Reference_Metric.tex
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
|
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
```
# Reading Data and Preprocessing
```python
data = pd.read_csv('agaricus-lepiota-subset.csv') # reading data
```
```python
data.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>p</th>
<th>k</th>
<th>y</th>
<th>e</th>
<th>f</th>
<th>s</th>
<th>f.1</th>
<th>c</th>
<th>n</th>
<th>b</th>
<th>...</th>
<th>k.1</th>
<th>p.1</th>
<th>w</th>
<th>p.2</th>
<th>w.1</th>
<th>o</th>
<th>e.1</th>
<th>w.2</th>
<th>v</th>
<th>p.3</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>e</td>
<td>k</td>
<td>s</td>
<td>n</td>
<td>f</td>
<td>n</td>
<td>a</td>
<td>c</td>
<td>b</td>
<td>y</td>
<td>...</td>
<td>s</td>
<td>o</td>
<td>o</td>
<td>p</td>
<td>n</td>
<td>o</td>
<td>p</td>
<td>o</td>
<td>v</td>
<td>l</td>
</tr>
<tr>
<th>1</th>
<td>p</td>
<td>k</td>
<td>s</td>
<td>e</td>
<td>f</td>
<td>f</td>
<td>f</td>
<td>c</td>
<td>n</td>
<td>b</td>
<td>...</td>
<td>s</td>
<td>w</td>
<td>w</td>
<td>p</td>
<td>w</td>
<td>o</td>
<td>e</td>
<td>w</td>
<td>v</td>
<td>d</td>
</tr>
<tr>
<th>2</th>
<td>p</td>
<td>k</td>
<td>y</td>
<td>c</td>
<td>f</td>
<td>m</td>
<td>a</td>
<td>c</td>
<td>b</td>
<td>y</td>
<td>...</td>
<td>y</td>
<td>c</td>
<td>c</td>
<td>p</td>
<td>w</td>
<td>n</td>
<td>n</td>
<td>w</td>
<td>c</td>
<td>d</td>
</tr>
<tr>
<th>3</th>
<td>p</td>
<td>k</td>
<td>s</td>
<td>n</td>
<td>f</td>
<td>y</td>
<td>f</td>
<td>c</td>
<td>n</td>
<td>b</td>
<td>...</td>
<td>s</td>
<td>p</td>
<td>p</td>
<td>p</td>
<td>w</td>
<td>o</td>
<td>e</td>
<td>w</td>
<td>v</td>
<td>d</td>
</tr>
<tr>
<th>4</th>
<td>e</td>
<td>x</td>
<td>s</td>
<td>g</td>
<td>f</td>
<td>n</td>
<td>f</td>
<td>w</td>
<td>b</td>
<td>g</td>
<td>...</td>
<td>k</td>
<td>w</td>
<td>w</td>
<td>p</td>
<td>w</td>
<td>t</td>
<td>p</td>
<td>w</td>
<td>n</td>
<td>g</td>
</tr>
</tbody>
</table>
<p>5 rows × 23 columns</p>
</div>
```python
print(data.columns)
```
Index(['p', 'k', 'y', 'e', 'f', 's', 'f.1', 'c', 'n', 'b', 't', '?', 's.1',
'k.1', 'p.1', 'w', 'p.2', 'w.1', 'o', 'e.1', 'w.2', 'v', 'p.3'],
dtype='object')
```python
data = data.reindex(np.random.permutation(data.index)) # shuffling the data
y = data['p'].to_numpy() # labels from dataframe to numpy array
x = data.drop(columns='p').to_numpy() # data from dataframe to numpy array
```
```python
# replacing 'p' with '1' and 'e' with '0'
y = np.where(y=='p', '1', y)
y = np.where(y=='e', '0', y)
```
```python
y
```
array(['1', '0', '1', '0', '1', '1', '1', '0', '0', '1', '0', '0', '0',
'1', '0', '0', '1', '1', '1', '0', '1', '1', '1', '0', '1', '0',
'0', '1', '0', '1', '0', '0', '0', '0', '0', '0', '0', '1', '1',
'0', '0', '0', '0', '1', '1', '1', '1', '1', '0', '0', '0', '1',
'0', '1', '1', '1', '1', '0', '1', '1', '1', '0', '1', '0', '1',
'1', '1', '1', '0', '0', '0', '0', '1', '0', '1', '0', '0', '1',
'0', '1', '0', '0', '0', '0', '1', '0', '1', '1', '1', '1', '0',
'0', '1', '0', '0', '0', '0', '1', '0', '1', '0', '1', '0', '1',
'0', '0', '0', '0', '1', '0', '1', '0', '0', '0', '0', '1', '1',
'1', '1', '1', '0', '0', '1', '1', '0', '0', '0', '0', '1', '0',
'0', '0', '1', '1', '0', '1', '1', '0', '0', '0', '1', '0', '0',
'1', '1'], dtype=object)
```python
# Data is fairly balanced
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
labels = ['p', 'e']
num = [(y == '1').sum(), (y == '0').sum()]
ax.bar(labels,num)
plt.show()
```
```python
print(x.shape)
print(y.shape)
```
(145, 22)
(145,)
```python
# computing different possible outcomes for every feature
outcomes = []
for i in range(22):
outcomes.append(list(set(x[...,i])))
```
```python
outcomes
```
[['k', 'b', 'x', 'f'],
['s', 'f', 'y'],
['g', 'c', 'e', 'w', 'n'],
['t', 'f'],
['f', 'y', 's', 'm', 'n'],
['a', 'f'],
['w', 'c'],
['b', 'n'],
['g', 'o', 'y', 'p', 'w', 'b', 'n'],
['t', 'e'],
['b', '?', 'c'],
['s', 'k', 'y'],
['s', 'k', 'y'],
['o', 'c', 'p', 'w', 'n'],
['o', 'c', 'p', 'w', 'n'],
['p'],
['w', 'o', 'n'],
['t', 'o', 'n'],
['n', 'e', 'p'],
['o', 'y', 'w', 'b', 'n'],
['c', 'y', 's', 'v', 'n'],
['d', 'l', 'g', 'p']]
# Support and Confidence
```python
possible_outcomes = []
for i in data.columns:
for j in set(data[i]):
possible_outcomes.append((i,j))
```
```python
support = [] # list of support
econf = [] # list of confidence for features and e output
pconf = [] # list of confidence for features and p output
rule = [] # list of rules based on the confidence of the outcome
for feat, out in possible_outcomes:
v = np.array(data[feat])
sup = (v==out).sum()
support.append(sup)
e_conf = (np.dot((y=='0').astype(int), (v==out).astype(int)))/sup
p_conf = (np.dot((y=='1').astype(int), (v==out).astype(int)))/sup
econf.append(e_conf)
pconf.append(p_conf)
```
```python
df = pd.DataFrame([support, econf, pconf], columns=possible_outcomes)
df.columns = pd.MultiIndex.from_tuples(df.columns, names=['feature', 'outcomes'])
```
```python
# first row is support, second row is 'e' confidence, third row is 'p' confidence
df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead tr th {
text-align: left;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr>
<th>feature</th>
<th colspan="2" halign="left">p</th>
<th colspan="4" halign="left">k</th>
<th colspan="3" halign="left">y</th>
<th>e</th>
<th>...</th>
<th>w.2</th>
<th colspan="5" halign="left">v</th>
<th colspan="4" halign="left">p.3</th>
</tr>
<tr>
<th>outcomes</th>
<th>e</th>
<th>p</th>
<th>k</th>
<th>b</th>
<th>x</th>
<th>f</th>
<th>s</th>
<th>f</th>
<th>y</th>
<th>g</th>
<th>...</th>
<th>n</th>
<th>y</th>
<th>s</th>
<th>c</th>
<th>v</th>
<th>n</th>
<th>d</th>
<th>l</th>
<th>g</th>
<th>p</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>78.000000</td>
<td>67.000000</td>
<td>83.000000</td>
<td>19.000000</td>
<td>25.00</td>
<td>18.0</td>
<td>91.000000</td>
<td>16.0000</td>
<td>38.000000</td>
<td>12.000000</td>
<td>...</td>
<td>12.000000</td>
<td>4.00</td>
<td>13.000000</td>
<td>22.000000</td>
<td>94.000000</td>
<td>12.000000</td>
<td>25.00</td>
<td>69.000000</td>
<td>25.00</td>
<td>26.000000</td>
</tr>
<tr>
<th>1</th>
<td>0.512821</td>
<td>0.567164</td>
<td>0.566265</td>
<td>0.473684</td>
<td>0.52</td>
<td>0.5</td>
<td>0.527473</td>
<td>0.5625</td>
<td>0.552632</td>
<td>0.583333</td>
<td>...</td>
<td>0.583333</td>
<td>0.25</td>
<td>0.692308</td>
<td>0.272727</td>
<td>0.606383</td>
<td>0.416667</td>
<td>0.56</td>
<td>0.550725</td>
<td>0.56</td>
<td>0.461538</td>
</tr>
<tr>
<th>2</th>
<td>0.487179</td>
<td>0.432836</td>
<td>0.433735</td>
<td>0.526316</td>
<td>0.48</td>
<td>0.5</td>
<td>0.472527</td>
<td>0.4375</td>
<td>0.447368</td>
<td>0.416667</td>
<td>...</td>
<td>0.416667</td>
<td>0.75</td>
<td>0.307692</td>
<td>0.727273</td>
<td>0.393617</td>
<td>0.583333</td>
<td>0.44</td>
<td>0.449275</td>
<td>0.44</td>
<td>0.538462</td>
</tr>
</tbody>
</table>
<p>3 rows × 79 columns</p>
</div>
# Functions
```python
def split(k, x, y):
# in this function, I am construction the data fold and also splitting the data into train and test sets
# k: number of folds
# x: input
# y: output
x_train = []
y_train = []
x_test = []
y_test = []
l = math.ceil(x.shape[0]/k) # len of each fold
for i in range(k):
# in each step I am using 1 fold as a test set and the others as train
x_test.append(x[l*i: min(l*(i+1), x.shape[0])])
y_test.append(y[l*i: min(l*(i+1), x.shape[0])])
x_train.append(np.concatenate((x[0:l*i], x[min(l*(i+1), x.shape[0]):x.shape[0]])))
y_train.append(np.concatenate((y[0:l*i], y[min(l*(i+1), y.shape[0]):y.shape[0]])))
return x_train, y_train, x_test, y_test
```
```python
def train(x, y, outcomes):
# In this function, I am finding rules for each possible outcome of the feature based on support and confidence for each output
# x: input
# y: output
# outomes: possible outcomes of features
rule = [] # list of rules for different features and outcomes
for i in range(x.shape[1]):
r = []
for j in outcomes[i]:
v = x[...,i] # v is a feature column and j can be different outcomes of that feature
sup = (v == j).sum() # support for that outcome
e_conf = (np.dot((y=='0').astype(int), (v==j).astype(int)))/sup # e confidence
p_conf = (np.dot((y=='1').astype(int), (v==j).astype(int)))/sup # p confidence
if e_conf > p_conf:
r.append('0')
else:
r.append('1')
rule.append(r)
return rule
```
```python
def feature_selection(x, y, rule, outcomes):
# In this function, I am selecting the best feature to use as a classifier based on accuracy
# x: input
# y: output
# rule: rules found in training
# outcomes: possible outcomes of features
acc = [] # list of accuracies for features
for i in range(x.shape[1]):
count = 0
for j in range(x.shape[0]):
if rule[i][outcomes[i].index(x[j][i])] == y[j]:
count += 1
acc.append(count/x.shape[0])
# Bar plot of accuracy based on each feature
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.bar(list(data.columns[1:]), acc)
plt.show()
return int(np.array(acc).argmax()) # returning the index of highest accuracy
```
```python
def predict(x_test, best_feature, rule, outcomes):
# In this function, I am making prediction based on the rules
# x_test: test set for prediction
# best_feature: highest accuracy feature selected
# rule: rules found in training
# outcomes: possible outcomes of features
pred = []
for i in x_test[...,best_feature]:
pred.append(rule[best_feature][outcomes[best_feature].index(i)])
return pred
```
## Precision, Recall, Accuracy and F-Measure
\begin{equation}
\text { Precision }=\frac{t p}{t p+f p}
\end{equation}
\begin{equation}
\text { Recall }=\frac{t p}{t p+f n}
\end{equation}
\begin{equation}
\text { Accuracy }=\frac{t p+t n}{t p+t n+f p+f n}
\end{equation}
\begin{equation}
F=2 \cdot \frac{\text { precision } \cdot \text { recall }}{\text { precision }+\text { recall }}
\end{equation}
```python
def evaluate(pred, y):
# In this function, I am computing evaluation metrics based on the equations above
# pred: array of prediction
# y: correct labels
tp = np.dot((pred == '1'), (y == '1')).sum()
fp = np.dot((pred == '1'), (y == '0')).sum()
fn = np.dot((pred == '0'), (y == '1')).sum()
tn = np.dot((pred == '0'), (y == '0')).sum()
precision = tp/(tp+fp)
recall = tp/(tp+fn)
acc = (tp+tn)/(tp+tn+fp+fn)
f_measure = (2*precision*recall)/(precision+recall)
return precision, recall, acc, f_measure
```
```python
def kfold(k, x, y):
# In this function, I am integrating the functions for training and kfold validation
# k: number of folds
# x: input
# y: labels
x_train, y_train, x_test, y_test = split(k, x, y)
for i in range(k):
rule = train(x_train[i], y_train[i], outcomes)
best_feature = feature_selection(x_train[i], y_train[i], rule, outcomes)
pred = predict(x_test[i], best_feature, rule, outcomes)
pred = np.array(pred)
precision, recall, acc, f = evaluate(pred, y_test[i])
print("precision: {} \t recall: {} \t accuracy: {} \t f-measure: {} ".format(precision, recall, acc, f))
```
```python
kfold(5, x, y)
```
|
(*
Copyright (C) 2019 Susi Lehtola
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: mgga_exc *)
$include "mgga_c_scan.mpl"
(* Coefficients of the rSCAN switching function from SI, in reversed(!) order: 7, 6, ..., 0 *)
rscan_fc := [-0.051848879792, 0.516884468372, -1.915710236206, 3.061560252175, -1.535685604549, -0.4352, -0.64, 1]:
np53 := rs -> n_total(rs)^(5/3):
(* First regularization: tau^u -> tau^u + tau^r *)
rscan_alpha0 := (rs, z, xt, ts0, ts1) ->
(np53(rs)*m_max(t_total(z, ts0, ts1) - xt^2/8, 0))/((K_FACTOR_C*np53(rs) + 2^(2/3)*params_a_taur)*t_total(z, 1, 1)):
(* Second regularization: alpha -> alpha^3/(alpha^2 + alpha_r) *)
rscan_alpha := (rs, z, xt, ts0, ts1) -> rscan_alpha0(rs, z, xt, ts0, ts1)^3/(rscan_alpha0(rs, z, xt, ts0, ts1)^2 + params_a_alphar):
(* f(alpha) replaced with a polynomial for alpha in [0, 2.5] *)
rscan_f_alpha_small := (a,ff) -> add(ff[8-i]*a^i, i=0..7):
rscan_f_alpha_large := a -> -params_a_d*exp(params_a_c2/(1 - a)):
rscan_f_alpha := (a, ff) -> my_piecewise3( a <= 2.5, rscan_f_alpha_small(m_min(a, 2.5),ff), rscan_f_alpha_large(m_max(a, 2.5)) ):
(* set parameters of f_alpha *)
params_a_alphar := 1e-3:
params_a_taur := 1e-4:
rscan_f := (rs, z, xt, xs0, xs1, ts0, ts1) ->
f_pbe(rs, z, xt, xs0, xs1) + rscan_f_alpha(rscan_alpha(rs, z, xt, ts0, ts1), rscan_fc)*(
+ scan_e0(rs, z, X2S*2^(1/3)*xt)
- f_pbe(rs, z, xt, xs0, xs1)
):
f := (rs, z, xt, xs0, xs1, us0, us1, ts0, ts1) ->
rscan_f(rs, z, xt, xs0, xs1, ts0, ts1):
|
-- Andreas, 2012-03-30
module Issue593 where
import Common.Level
open import Common.Equality
open import Common.Irrelevance
record Unit : Set where
constructor unit
bla6 : (F : Unit -> Set) ->
let X : Unit -> Unit -> Set
X = _
in (z : Unit) -> X z z ≡ F z
bla6 F z = refl
-- non-linearity for singleton types should not matter
bla7 : (F : Unit -> Set) ->
let X : Set
X = _
in (z : Unit) -> X ≡ F z
bla7 F z = refl
-- should eta expand z to unit
-- * a more involved singleton type:
record R (A : Set) : Set where
constructor r
field
f1 : A -> Unit
f2 : A
Sing : Set1
Sing = (A : Set) -> A -> R (A -> Unit)
test : (F : Sing -> Set) ->
let X : Set
X = _
in (z : Sing) -> X ≡ F z
test F z = refl
-- * something with irrelevance
Sing' : Set1
Sing' = (A : Set) -> A -> R (Squash A)
test' : (F : Sing' -> Set) ->
let X : Sing' -> Sing' -> Set
X = _
in (z : Sing') -> X z z ≡ F z
test' F z = refl
-- non-linearity should not matter
|
lemma assumes "eventually (\<lambda>z. f z = g z) (at z)" "z = z'" shows zorder_cong:"zorder f z = zorder g z'" and zor_poly_cong:"zor_poly f z = zor_poly g z'"
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas64_2e384m317_7limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
{-# OPTIONS --without-K --safe #-}
module Categories.Adjoint.Instance.Core where
-- The adjunction between the forgetful functor from Cats to Groupoids
-- and the Core functor.
open import Level using (_⊔_)
import Function
open import Categories.Adjoint using (_⊣_)
open import Categories.Category using (Category)
import Categories.Category.Construction.Core as C
open import Categories.Category.Groupoid using (Groupoid)
open import Categories.Category.Instance.Cats using (Cats)
open import Categories.Category.Instance.Groupoids using (Groupoids)
open import Categories.Functor using (Functor; _∘F_; id)
open import Categories.Functor.Instance.Core using (Core)
import Categories.Morphism as Morphism
import Categories.Morphism.Reasoning as MR
open import Categories.Morphism.IsoEquiv using (⌞_⌟)
open import Categories.NaturalTransformation using (ntHelper)
open import Categories.NaturalTransformation.NaturalIsomorphism using (refl; _≃_)
-- The forgetful functor from Groupoids to Cats
Forgetful : ∀ {o ℓ e} → Functor (Groupoids o ℓ e) (Cats o ℓ e)
Forgetful = record
{ F₀ = category
; F₁ = Function.id
; identity = refl
; homomorphism = refl
; F-resp-≈ = Function.id
}
where open Groupoid
-- Core is right-adjoint to the forgetful functor from Groupoids to
-- Cats
CoreAdj : ∀ {o ℓ e} → Forgetful {o} {ℓ ⊔ e} {e} ⊣ Core
CoreAdj = record
{ unit = ntHelper record { η = unit ; commute = λ {G} {H} F → unit-commute {G} {H} F }
; counit = ntHelper record { η = counit ; commute = counit-commute }
; zig = λ {G} → zig {G}
; zag = zag
}
where
open Groupoid using (category)
module Core = Functor Core
unit : ∀ G → Functor (category G) (C.Core (category G))
unit G = record
{ F₀ = Function.id
; F₁ = λ f → record { from = f ; to = f ⁻¹ ; iso = iso }
; identity = ⌞ Equiv.refl ⌟
; homomorphism = ⌞ Equiv.refl ⌟
; F-resp-≈ = λ eq → ⌞ eq ⌟
}
where open Groupoid G
unit-commute : ∀ {G H} (F : Functor (category G) (category H)) →
unit H ∘F F ≃ Core.F₁ F ∘F unit G
unit-commute {G} {H} F = record
{ F⇒G = record { η = λ _ → ≅.refl ; commute = λ _ → ⌞ MR.id-comm-sym (category H) ⌟ ; sym-commute = λ _ → ⌞ MR.id-comm (category H) ⌟ }
; F⇐G = record { η = λ _ → ≅.refl ; commute = λ _ → ⌞ MR.id-comm-sym (category H) ⌟ ; sym-commute = λ _ → ⌞ MR.id-comm (category H) ⌟ }
; iso = λ _ → record { isoˡ = ⌞ identityˡ ⌟ ; isoʳ = ⌞ identityˡ ⌟ }
}
where
open Category (category H)
open Morphism (category H)
counit : ∀ C → Functor (C.Core C) C
counit C = record
{ F₀ = Function.id
; F₁ = _≅_.from
; identity = Equiv.refl
; homomorphism = Equiv.refl
; F-resp-≈ = λ where ⌞ eq ⌟ → eq
}
where
open Category C
open Morphism C
counit-commute : ∀ {C D} (F : Functor C D) →
counit D ∘F Core.F₁ F ≃ F ∘F counit C
counit-commute {C} {D} F = record
{ F⇒G = record { η = λ _ → D.id ; commute = λ _ → MR.id-comm-sym D ; sym-commute = λ _ → MR.id-comm D }
; F⇐G = record { η = λ _ → D.id ; commute = λ _ → MR.id-comm-sym D ; sym-commute = λ _ → MR.id-comm D }
; iso = λ _ → _≅_.iso ≅.refl
}
where
module D = Category D
open Morphism D
zig : ∀ {G} → counit (category G) ∘F unit G ≃ id
zig {G} = record
{ F⇒G = record { η = λ _ → G.id ; commute = λ _ → MR.id-comm-sym (category G) ; sym-commute = λ _ → MR.id-comm (category G) }
; F⇐G = record { η = λ _ → G.id ; commute = λ _ → MR.id-comm-sym (category G) ; sym-commute = λ _ → MR.id-comm (category G) }
; iso = λ _ → _≅_.iso ≅.refl
}
where
module G = Groupoid G
open Morphism G.category
zag : ∀ {B} → Core.F₁ (counit B) ∘F unit (Core.F₀ B) ≃ id
zag {B} = record
{ F⇒G = record { η = λ _ → ≅.refl ; commute = λ _ → ⌞ MR.id-comm-sym B ⌟ ; sym-commute = λ _ → ⌞ MR.id-comm B ⌟ }
; F⇐G = record { η = λ _ → ≅.refl ; commute = λ _ → ⌞ MR.id-comm-sym B ⌟ ; sym-commute = λ _ → ⌞ MR.id-comm B ⌟ }
; iso = λ _ → record { isoˡ = ⌞ identityˡ ⌟ ; isoʳ = ⌞ identityˡ ⌟ }
}
where
open Category B
open Morphism B
|
module Autoencoder where
import AI.HNN.FF.Network (Network)
import Numeric.LinearAlgebra.HMatrix (Vector, Matrix)
import System.Exit
import qualified AI.HNN.FF.Network as Network
import qualified Data.Vector
import qualified Numeric.LinearAlgebra.HMatrix as Vector
newtype Encoder = Encoder (Matrix Double)
deriving (Show, Read, Eq)
newtype Decoder = Decoder (Matrix Double)
deriving (Show, Read, Eq)
type LayerCount = Int
type EpochCount = Int
type Autoencoder = (Encoder, Decoder)
generateAutoencoderIO :: [Vector Double] -> LayerCount -> EpochCount -> IO Autoencoder
generateAutoencoderIO v l e = do
net <- Network.createNetwork (Vector.size $ head v) [l] (Vector.size $ head v)
let Network.Network x = Network.trainNTimes e 0.8 Network.tanh Network.tanh' net (zipWith (,) v v)
case Data.Vector.toList x of
[enc, dec] -> return (Encoder enc, Decoder dec)
_ -> error "colossal failure"
encode :: Encoder -> Vector Double -> Vector Double
encode (Encoder m) v = Vector.app m $ Vector.vjoin [v, 1]
decode :: Decoder -> Vector Double -> Vector Double
decode (Decoder m) v = Vector.app m $ Vector.vjoin [v, 1]
|
[STATEMENT]
lemma mono_if: "\<lbrakk>le t t'; le e e'\<rbrakk> \<Longrightarrow> le (If b t e) (If b t' e')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>le t t'; le e e'\<rbrakk> \<Longrightarrow> le (if b then t else e) (if b then t' else e')
[PROOF STEP]
by auto
|
function gb=gabor_fn(sigma,theta,lambda,psi,gamma)
sigma_x = sigma;
sigma_y = sigma/gamma;
% Bounding box
nstds = 3;
xmax = max(abs(nstds*sigma_x*cos(theta)),abs(nstds*sigma_y*sin(theta)));
xmax = ceil(max(1,xmax));
ymax = max(abs(nstds*sigma_x*sin(theta)),abs(nstds*sigma_y*cos(theta)));
ymax = ceil(max(1,ymax));
xmin = -xmax; ymin = -ymax;
[x,y] = meshgrid(xmin:xmax,ymin:ymax);
% Rotation
x_theta=x*cos(theta)+y*sin(theta);
y_theta=-x*sin(theta)+y*cos(theta);
gb= 1/(2*pi*sigma_x *sigma_y) * exp(-.5*(x_theta.^2/sigma_x^2+y_theta.^2/sigma_y^2)).*cos(2*pi/lambda*x_theta+psi);
|
using ThreeBodyDecay
tbs = let m1 = 0.938, m2 = 0.49367, m3 = 0.13957, m0 = 2.46867
ThreeBodySystem(m1,m2,m3, m0=m0)
end
using Plots
let
σ1v = LinRange(tbs.mthsq[1], tbs.sthsq[1],300)
σ3m = [σ3of1(-1.0,σ,tbs.msq) for σ in σ1v]
σ3p = [σ3of1( 1.0,σ,tbs.msq) for σ in σ1v]
plot(σ1v, [σ3m σ3p], lab="")
end
let
σ3v, σ1v = flatDalitzPlotSample31(tbs; Nev=1000000)
histogram2d(σ1v, σ3v, lab="", bins=100)
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.