text
stringlengths 0
3.34M
|
---|
Countryside 4900 Francis Rd, Richmond, BC V7C 1J8 Strata No NWS578. Built in 1977. Two levels. Frame-wood construction. 93 units in Strata and Development. One pet allowed. Maintanance fee includes cabel/satellite, gardening, management, recreation facility. Countryside, very popular complex in West Richmond with fabulous amenities, clubhouse, outdoor pool, pool table, party room and more. It also features garden area, green belt, fireplace and buil-in vacuum. This is a central location that is close to transit, restaurants, coffee shops, just a few minutes walk to Seafair Shopping Centre with Safeway, Shoppers Drug Mart, KFC, CIBC Bank, TD Bank, medical services and recreation facilities. Direct access to No 1 Road, Francis Road and other major routes allows an easy commute to surrounding destinations including Steveston Village and Terra Nova Rural Park. Parksville Estatesis just a short stroll to Alfred B Dixon Elementary, Grauer Elementary and Hugh Boyd Secondary Schools, West Richmond Community Centre with its great sport facilities, Hugh Boyd Park, West Richmond Pitch & Putt Golf Course, Stagecoach Theatre Arts and popular restaurants including Takeya Sushi, KFC and many others, minutes to popular Dyke walk and Garry Point park.
Listings Listed By: 1. RE/MAX Westcoast 2. Royal LePage Regency Realty 3. Interlink Realty 4. Domicile Real Estate Corp.
705 4900 FRANCIS R.. 3 Bed, 2 Bath,1362 Sqft. $685,800 Domicile Real Estate Corp.
|
function [traj, infStates] = tapas_hgf(r, p, varargin)
% Calculates the trajectories of the agent's representations under the HGF
%
% This function can be called in two ways:
%
% (1) tapas_hgf(r, p)
%
% where r is the structure generated by tapas_fitModel and p is the parameter vector in native space;
%
% (2) tapas_hgf(r, ptrans, 'trans')
%
% where r is the structure generated by tapas_fitModel, ptrans is the parameter vector in
% transformed space, and 'trans' is a flag indicating this.
%
% --------------------------------------------------------------------------------------------------
% Copyright (C) 2012-2013 Christoph Mathys, TNU, UZH & ETHZ
%
% This file is part of the HGF toolbox, which is released under the terms of the GNU General Public
% Licence (GPL), version 3. You can redistribute it and/or modify it under the terms of the GPL
% (either version 3 or, at your option, any later version). For further details, see the file
% COPYING or <http://www.gnu.org/licenses/>.
% Transform paramaters back to their native space if needed
if ~isempty(varargin) && strcmp(varargin{1},'trans');
p = tapas_hgf_transp(r, p);
end
% Number of levels
l = length(p)/5;
if l ~= floor(l)
error('tapas:hgf:UndetNumLevels', 'Cannot determine number of levels');
end
% Unpack parameters
mu_0 = p(1:l);
sa_0 = p(l+1:2*l);
rho = p(2*l+1:3*l);
ka = p(3*l+1:4*l-1);
om = p(4*l:5*l-2);
th = exp(p(5*l-1));
al = 1/p(5*l);
% Add dummy "zeroth" trial
u = [0; r.u(:,1)];
% Number of trials (including prior)
n = length(u);
% Assume that if u has more than one column, the last contains t
try
if r.c_prc.irregular_intervals
if size(u,2) > 1
t = [0; r.u(:,end)];
else
error('tapas:hgf:InputSingleColumn', 'Input matrix must contain more than one column if irregular_intervals is set to true.');
end
else
t = ones(n,1);
end
catch
if size(u,2) > 1
t = [0; r.u(:,end)];
else
t = ones(n,1);
end
end
% Initialize updated quantities
% Representations
mu = NaN(n,l);
pi = NaN(n,l);
% Other quantities
muhat = NaN(n,l);
pihat = NaN(n,l);
v = NaN(n,l);
w = NaN(n,l-1);
da = NaN(n,l);
dau = NaN(n,1);
% Representation priors
% Note: first entries of the other quantities remain
% NaN because they are undefined and are thrown away
% at the end; their presence simply leads to consistent
% trial indices.
mu(1,:) = mu_0;
pi(1,:) = 1./sa_0;
% Representation update loop
% Pass through trials
for k = 2:1:n
if not(ismember(k-1, r.ign))
%%%%%%%%%%%%%%%%%%%%%%
% Effect of input u(k)
%%%%%%%%%%%%%%%%%%%%%%
% 1st level
% ~~~~~~~~~
% Prediction
muhat(k,1) = mu(k-1,1) +t(k) *rho(1);
% Precision of prediction
pihat(k,1) = 1/(1/pi(k-1,1) +t(k) *exp(ka(1) *mu(k-1,2) +om(1)));
% Input prediction error
dau(k) = u(k) -muhat(k,1);
% Updates
pi(k,1) = pihat(k,1) +1/al;
mu(k,1) = muhat(k,1) +1/pihat(k,1) *1/(1/pihat(k,1) +al) *dau(k);
% Volatility prediction error
da(k,1) = (1/pi(k,1) +(mu(k,1) -muhat(k,1))^2) *pihat(k,1) -1;
if l > 2
% Pass through higher levels
% ~~~~~~~~~~~~~~~~~~~~~~~~~~
for j = 2:l-1
% Prediction
muhat(k,j) = mu(k-1,j) +t(k) *rho(j);
% Precision of prediction
pihat(k,j) = 1/(1/pi(k-1,j) +t(k) *exp(ka(j) *mu(k-1,j+1) +om(j)));
% Weighting factor
v(k,j-1) = t(k) *exp(ka(j-1) *mu(k-1,j) +om(j-1));
w(k,j-1) = v(k,j-1) *pihat(k,j-1);
% Updates
pi(k,j) = pihat(k,j) +1/2 *ka(j-1)^2 *w(k,j-1) *(w(k,j-1) +(2 *w(k,j-1) -1) *da(k,j-1));
if pi(k,j) <= 0
error('tapas:hgf:NegPostPrec', 'Negative posterior precision. Parameters are in a region where model assumptions are violated.');
end
mu(k,j) = muhat(k,j) +1/2 *1/pi(k,j) *ka(j-1) *w(k,j-1) *da(k,j-1);
% Volatility prediction error
da(k,j) = (1/pi(k,j) +(mu(k,j) -muhat(k,j))^2) *pihat(k,j) -1;
end
end
% Last level
% ~~~~~~~~~~
% Prediction
muhat(k,l) = mu(k-1,l) +t(k) *rho(l);
% Precision of prediction
pihat(k,l) = 1/(1/pi(k-1,l) +t(k) *th);
% Weighting factor
v(k,l) = t(k) *th;
v(k,l-1) = t(k) *exp(ka(l-1) *mu(k-1,l) +om(l-1));
w(k,l-1) = v(k,l-1) *pihat(k,l-1);
% Updates
pi(k,l) = pihat(k,l) +1/2 *ka(l-1)^2 *w(k,l-1) *(w(k,l-1) +(2 *w(k,l-1) -1) *da(k,l-1));
if pi(k,l) <= 0
error('tapas:hgf:NegPostPrec', 'Negative posterior precision. Parameters are in a region where model assumptions are violated.');
end
mu(k,l) = muhat(k,l) +1/2 *1/pi(k,l) *ka(l-1) *w(k,l-1) *da(k,l-1);
% Volatility prediction error
da(k,l) = (1/pi(k,l) +(mu(k,l) -muhat(k,l))^2) *pihat(k,l) -1;
else
mu(k,:) = mu(k-1,:);
pi(k,:) = pi(k-1,:);
muhat(k,:) = muhat(k-1,:);
pihat(k,:) = pihat(k-1,:);
v(k,:) = v(k-1,:);
w(k,:) = w(k-1,:);
da(k,:) = da(k-1,:);
end
end
% Remove representation priors
mu(1,:) = [];
pi(1,:) = [];
% Check validity of trajectories
if any(isnan(mu(:))) || any(isnan(pi(:)))
error('tapas:hgf:VarApproxInvalid', 'Variational approximation invalid. Parameters are in a region where model assumptions are violated.');
else
% Check for implausible jumps in trajectories
dmu = diff(mu);
dpi = diff(pi);
rmdmu = repmat(sqrt(mean(dmu.^2)),length(dmu),1);
rmdpi = repmat(sqrt(mean(dpi.^2)),length(dpi),1);
jumpTol = 256;
if any(abs(dmu(:)) > jumpTol*rmdmu(:)) || any(abs(dpi(:)) > jumpTol*rmdpi(:))
error('tapas:hgf:VarApproxInvalid', 'Variational approximation invalid. Parameters are in a region where model assumptions are violated.');
end
end
% Remove other dummy initial values
muhat(1,:) = [];
pihat(1,:) = [];
v(1,:) = [];
w(1,:) = [];
da(1,:) = [];
dau(1) = [];
% Create result data structure
traj = struct;
traj.mu = mu;
traj.sa = 1./pi;
traj.muhat = muhat;
traj.sahat = 1./pihat;
traj.v = v;
traj.w = w;
traj.da = da;
traj.dau = dau;
% Updates with respect to prediction
traj.ud = mu -muhat;
% Psi (precision weights on prediction errors)
psi = NaN(n-1,l);
psi(:,1) = 1./(al*pi(:,1));
psi(:,2:l) = pihat(:,1:l-1)./pi(:,2:l);
traj.psi = psi;
% Epsilons (precision-weighted prediction errors)
epsi = NaN(n-1,l);
epsi(:,1) = psi(:,1) .*dau;
epsi(:,2:l) = psi(:,2:l) .*da(:,1:l-1);
traj.epsi = epsi;
% Full learning rate (full weights on prediction errors)
wt = NaN(n-1,l);
wt(:,1) = psi(:,1);
wt(:,2:l) = 1/2 *(v(:,1:l-1) *diag(ka(1:l-1))) .*psi(:,2:l);
traj.wt = wt;
% Create matrices for use by the observation model
infStates = NaN(n-1,l,4);
infStates(:,:,1) = traj.muhat;
infStates(:,:,2) = traj.sahat;
infStates(:,:,3) = traj.mu;
infStates(:,:,4) = traj.sa;
return;
|
struct SemDiffProximal{A, B, C, D} <: SemDiff
algorithm::A
options::B
operator_g::C
operator_h::D
end
SemDiffProximal(;algorithm = ProximalAlgorithms.PANOC(), options = Dict{Symbol, Any}(), operator_g, operator_h = nothing, kwargs...) =
SemDiffProximal(algorithm, options, operator_g, operator_h)
############################################################################
### Pretty Printing
############################################################################
function Base.show(io::IO, struct_inst::SemDiffProximal)
print_type_name(io, struct_inst)
print_field_types(io, struct_inst)
end
|
Formal statement is: lemma homeomorphic_affinity: fixes S :: "'a::real_normed_vector set" assumes "c \<noteq> 0" shows "S homeomorphic ((\<lambda>x. a + c *\<^sub>R x) ` S)" Informal statement is: If $S$ is a set in a real vector space and $c \neq 0$, then $S$ is homeomorphic to the set $a + cS = \{a + cx : x \in S\}$.
|
\documentclass[a4paper]{article}
\usepackage{fullpage}
\usepackage{latexsym} % for some symbols
\usepackage{amsmath} % for maths
\usepackage{amssymb} % for Real number symbol
\usepackage{graphicx}
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{subfigure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Notation
\newcommand{\normal}{\mathcal{N}}
\newcommand{\thetaall}{\tilde{\Theta}}
\newcommand{\vect}[2]{\begin{bmatrix} #1 \\ #2 \end{bmatrix}}
\newcommand{\mat}[4]{\begin{bmatrix} #1 & #2\\ #3& #4 \end{bmatrix}}
\newcommand{\dotprod}[2]{\langle #1 , #2 \rangle}
\newcommand{\trace}{\mathrm{tr}}
\newcommand{\deter}{\mathrm{det}}
% end notation
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\title{Distance between two 2D Gaussians}
\author{Cheng Soon Ong}
\date{24 March 2017}
\begin{document}
\maketitle
\section{Motivation}
In astronomy cross identification is the task of finding the same object in the sky in two images.
The location of each pixel is known, but the challenge lies in the fact that the two images are
measured in different wavelengths.
When doing radio cross identification we need to compare two distributions, one from the optical
image and a second from the radio image.
We assume here that each empirical distribution is well approximated by a single two dimensional
Gaussian.
The first step to cross identification is then to compute the distance between the two Gaussians.
\section{General abstract setup}
One way to measure the difference between two distributions is by using the Kullback-Leibler (KL)
divergence. This is asymmetric, and one often looks at the average of the KL divergence in
both directions. This section selects the relevant results
from~\cite{nielsen09clumnd,nielsen11staefd}.
Let $\normal(\mu, S)$ denote the $d$ dimensional normal distribution with mean $\mu$ and covariance
matrix $S$. We rewrite this Gaussian in its canonical decomposition in terms of its natural
parameters. The sufficient statistics are stacked onto a tuple containing a $d$ dimensional vector
and a $d\times d$ matrix
\[
\tilde{x} = (x, -\frac{1}{2}xx^\top)
\]
associated with the natural parameters
\begin{equation}
\label{eq:nat-param}
\thetaall = (\theta, \Theta) = \left(S^{-1}\mu, \frac{1}{2} S^{-1}\right)
\end{equation}
The KL divergence between two Gaussian distributions
\[
N_p = \normal(\mu_p, S_p)\qquad\mbox{and}\qquad N_q = \normal(\mu_q, S_q)
\]
is given by the Bregman divergence with generator $F$
\begin{align}
KL(N_p||N_q) &= D_F(\thetaall_q || \thetaall_p)\\
&= F(\thetaall_q) - F(\thetaall_p) - \dotprod{(\thetaall_q - \thetaall_p)}{\nabla F(\thetaall_p)}
\label{eq:bregman-div}
\end{align}
The function $F$ turns out to be the log normaliser specifying the exponential family
(of the Gaussian)
\begin{equation}
\label{eq:log-normaliser}
F(\thetaall) = \frac{1}{4}\trace(\Theta^{-1} \theta\theta^\top)
-\frac{1}{2} \log \deter \Theta + \frac{d}{2}\log 2\pi.
\end{equation}
The gradient of $F$ is given by
\begin{equation}
\label{eq:grad-log-normaliser}
\nabla F(\thetaall) = \left(
\frac{1}{2} \Theta^{-1}\theta
,
-\frac{1}{2} \Theta^{-1} - \frac{1}{4} (\Theta^{-1}\theta)(\Theta^{-1}\theta)^\top
\right).
\end{equation}
The inner product in \eqref{eq:bregman-div} is the sum of the inner products of the
vectors and matrices,
\[
\dotprod{\thetaall_p}{\thetaall_q} = \dotprod{\theta_p}{\theta_q} + \dotprod{\Theta_p}{\Theta_q}
\]
where the matrix inner product is $\dotprod{\Theta_p}{\Theta_q} = \trace(\Theta_p\Theta_q^\top)$.
\section{Two dimensional Gaussian}
We explicitly show each element of the vector and matrix for a 2 dimensional Gaussian
mean $\mu$ and variance $S$,
\[
\normal\left( \vect{\mu_1}{\mu_2}, \mat{s_{11}}{s_{12}}{s_{21}}{s_{22}}\right).
\]
The determinant and inverse of $S$ is given by
\begin{equation}
\label{eq:det-S}
a := \deter S = |s_{11} s_{22} - s_{21} s_{22}|
\end{equation}
and
\begin{equation}
\label{eq:inv-S}
S^{-1} = \frac{1}{a}\mat{s_{22}}{-s_{12}}{-s_{21}}{s_{11}}
\end{equation}
respectively.
We can then explicitly compute the parameters for the KL divergence in the previous section.
Starting from the right most term in \eqref{eq:bregman-div},
since $d=2$ the constant is $\log 2\pi$.
Recall the relationship between determinants and inverses:
\[
\deter S^{-1} = \frac{1}{\deter S}
\]
Because we are only considering a two dimensional problem, constants are squared in the determinant,
i.e. $\deter(c S) = c^2 \deter S$.
By the definition of $\Theta$, we have
\begin{align*}
\frac{1}{2} \log \deter \Theta &= \frac{1}{2} \log \deter (\frac{1}{2} S^{-1}) \\
&= \frac{1}{2} \log \frac{1}{4} \frac{1}{\deter S}\\
&= \frac{1}{2} \log \frac{1}{4a}
\end{align*}
where the last line substitutes \eqref{eq:det-S}.
By the definition of $\theta$,
\begin{equation}
\label{eq:theta}
\theta = S^{-1}m = \frac{1}{a}\vect{s_{22}\mu_1 - s_{12}\mu_2}{s_{11}\mu_2 - s_{21}\mu_1}.
\end{equation}
We also require
\begin{align}
\Theta^{-1}\theta &= 2SS^{-1}m = 2m\nonumber\\
&=\vect{2\mu_1}{2\mu_2}\label{eq:2mean},
\end{align}
which allows us to compute terms in the gradient. By multiplying
\eqref{eq:2mean} and \eqref{eq:theta} we have
\begin{align*}
\Theta^{-1}\theta\theta^\top &= \frac{1}{a}\vect{2\mu_1}{2\mu_2}
\vect{s_{22}\mu_1 - s_{12}\mu_2}{s_{11}\mu_2 - s_{21}\mu_1}^\top\\
&=\frac{2}{a}\mat{\mu_1(s_{22}\mu_1 - s_{12}\mu_2)}{\mu_1(s_{11}\mu_2 - s_{21}\mu_1)}{\mu_2(s_{22}\mu_1 - s_{12}\mu_2)}{\mu_2(s_{11}\mu_2 - s_{21}\mu_1)},
\end{align*}
allowing us to calculate the first term of the log normaliser
\[
\frac{1}{4}\trace(\Theta^{-1} \theta\theta^\top)= \frac{1}{2a}
\left(\mu_1(s_{22}\mu_1 - s_{12}\mu_2) + \mu_2(s_{11}\mu_2 - s_{21}\mu_1)\right).
\]
Substituting into \eqref{eq:log-normaliser}, we have
\begin{equation}
\label{eq:gauss-log-normaliser}
F(\thetaall) = \frac{1}{2a}
\left(\mu_1(s_{22}\mu_1 - s_{12}\mu_2) + \mu_2(s_{11}\mu_2 - s_{21}\mu_1)\right)
- \frac{1}{2} \log \frac{1}{4a} + \log 2\pi.
\end{equation}
Substituting into \eqref{eq:grad-log-normaliser}, we have
\begin{equation}
\label{eq:gauss-grad-log-normaliser-vec}
\frac{1}{2}\Theta^{-1}\theta = \mu
\end{equation}
and
\begin{equation}
\label{eq:gauss-grad-log-normaliser-mat}
-\frac{1}{2} \Theta^{-1} - \frac{1}{4} (\Theta^{-1}\theta)(\Theta^{-1}\theta)^\top
=
- S - \mu\mu^\top.
\end{equation}
It is likely that \eqref{eq:gauss-log-normaliser} and \eqref{eq:gauss-grad-log-normaliser-mat}
are true in general for all Gaussians (not just 2D Gaussians).
\bibliographystyle{alpha}
\bibliography{gaussian}
\end{document}
|
## Copyright (c) 2018-2021, Carnegie Mellon University
## See LICENSE for details
Import(simt);
Class(FFTXCUDAOpts, FFTXOpts, simt.TitanVDefaults, rec(
tags := [],
operations := rec(Print := s -> Print("<FFTX CUDA options record>")),
max_threads := 2048,
max_blocks := 1024,
max_heap := 1024 * 1024 * 1024
));
cudaOpts := function(arg)
local opts;
opts := Copy(FFTXCUDAOpts);
opts.breakdownRules.Circulant := [Circulant_PRDFT_FDataNT];
opts.breakdownRules.PRDFT := List([PRDFT1_Base1, PRDFT1_Base2, PRDFT1_CT, PRDFT1_PF, PRDFT_PD, PRDFT_Rader], _noT);
opts.breakdownRules.IPRDFT := List([ IPRDFT1_Base1, IPRDFT1_Base2, IPRDFT1_CT, IPRDFT_PD, IPRDFT_Rader ], _noT);
opts.breakdownRules.PRDFT3 := List([ PRDFT3_Base1, PRDFT3_Base2, PRDFT3_CT ], _noT);
return opts;
return opts;
end;
Declare(ParseOptsCUDA);
Class(FFTXCUDADefaultConf, rec(
getOpts := (self, t) >> ParseOptsCUDA(self, t),
operations := rec(Print := s -> Print("<FFTX CUDA Default Configuration>")),
useCUDA := true
));
Class(FFTXCUDADeviceDefaultConf, rec(
getOpts := (self, t) >> ParseOptsCUDA(self, t),
operations := rec(Print := s -> Print("<FFTX CUDA Device Default Configuration>")),
useCUDADevice := true
));
cudaConf := rec(
defaultName := "defaultCUDAConf",
defaultOpts := (arg) >> FFTXCUDADefaultConf,
devFunc := true,
confHandler := cudaOpts
);
fftx.FFTXGlobals.registerConf(cudaConf);
getTargetOS := function()
local tgt;
if LocalConfig.osinfo.isWindows() then
tgt := "win-x64-cuda";
elif LocalConfig.osinfo.isLinux() then
tgt := "linux-cuda";
elif LocalConfig.osinfo.isDarwin() then
tgt := "linux-cuda"; ## may work
fi;
return tgt;
end;
#--
Class(FFTXCUDADeviceOpts, FFTXCUDAOpts, simt.TitanVDefaults, rec(
tags := [],
devFunc := true,
target := rec ( name := getTargetOS() ),
operations := rec(Print := s -> Print("<FFTX CUDA Device options record>"))
));
cudaDeviceOpts := function(arg) # specific to WarpX size 100...
local opts;
opts := Copy(FFTXCUDADeviceOpts);
opts.breakdownRules.Circulant := [Circulant_PRDFT_FDataNT];
opts.breakdownRules.PRDFT := List([PRDFT1_Base1, PRDFT1_Base2, CopyFields(PRDFT1_CT,
rec(allChildren := P ->Filtered(PRDFT1_CT.allChildren(P), i->When(P[1] = 100, Cols(i[1]) = 4, true)))),
PRDFT_PD], _noT);
opts.breakdownRules.IPRDFT := List([ IPRDFT1_Base1, IPRDFT1_Base2, IPRDFT1_CT, IPRDFT_PD ], _noT);
opts.breakdownRules.PRDFT3 := List([ PRDFT3_Base1, PRDFT3_Base2, PRDFT3_CT ], _noT);
opts.breakdownRules.DFT := [ DFT_Base,
CopyFields(DFT_CT, rec(children := nt ->Filtered(DFT_CT.children(nt), i->When(nt.params[1] = 100, Cols(i[1]) = 4, true)))),
DFT_PD ];
opts.breakdownRules.TTensorInd := [dsA_base, L_dsA_L_base, dsA_L_base, L_dsA_base];
return opts;
end;
cudaDeviceConf := rec(
defaultName := "defaultCUDADeviceConf",
defaultOpts := (arg) >> FFTXCUDADeviceDefaultConf,
confHandler := cudaDeviceOpts
);
fftx.FFTXGlobals.registerConf(cudaDeviceConf);
# this is a first experimental opts-deriving logic. This needs to be done extensible and properly
ParseOptsCUDA := function(conf, t)
local tt, _tt, _tt2, _conf, _opts, _HPCSupportedSizesCUDA, _thold,
MAX_KERNEL, MAX_PRIME, MIN_SIZE, MAX_SIZE, size1, filter;
# all dimensions need to be inthis array for the high perf MDDFT conf to kick in for now
# size 320 is problematic at this point and needs attention. Need support for 3 stages to work first
MAX_KERNEL := 21;
MAX_PRIME := 13;
MIN_SIZE := 32;
MAX_SIZE := 320;
_thold := MAX_KERNEL;
filter := (e) -> When(e[1] * e[2] <= _thold ^ 2, e[1] <= _thold and e[2] <= _thold, e[1] <= _thold and e[2] >= _thold);
size1 := Filtered([MIN_SIZE..MAX_SIZE], i -> ForAny(DivisorPairs(i), filter) and ForAll(Factors(i), j -> not IsPrime(j) or j <= MAX_PRIME));
_HPCSupportedSizesCUDA := size1;
# _HPCSupportedSizesCUDA := [80, 96, 100, 224, 320];
# _thold := 16;
if IsBound(conf.useCUDADevice) then
# # detect real MD convolution
# _tt := Collect(t, RCDiag)::Collect(t, MDPRDFT)::Collect(t, IMDPRDFT)::Collect(t, TTensorI);
# if Length(_tt) = 4 then
# _conf := FFTXGlobals.confWarpXCUDADevice();
# _opts := FFTXGlobals.getOpts(_conf);
# return _opts;
# fi;
# detect batch of DFT/PRDFT
if ((Length(Collect(t, TTensorInd)) >= 1) or let(lst := Collect(t, TTensorI), (Length(lst) >= 1) and ForAll(lst, l->l.params[2] > 1))) and
((Length(Collect(t, DFT)) = 1) or (Length(Collect(t, PRDFT)) = 1) or (Length(Collect(t, IPRDFT)) = 1)) then
_conf := FFTXGlobals.confBatchFFTCUDADevice();
_opts := FFTXGlobals.getOpts(_conf);
# opts for high performance CUDA cuFFT
if ForAll(Flat(List(Collect(t, @(1, [DFT, PRDFT, IPRDFT])), j-> j.params[1])), i -> i in _HPCSupportedSizesCUDA) then
_opts.breakdownRules.TTwiddle := [ TTwiddle_Tw1 ];
_opts.tags := [ASIMTKernelFlag(ASIMTGridDimX), ASIMTBlockDimY, ASIMTBlockDimX];
_opts.globalUnrolling := 2*_thold + 1;
_opts.breakdownRules.TTensorI := [CopyFields(IxA_L_split, rec(switch := true)),
fftx.platforms.cuda.L_IxA_SIMT, fftx.platforms.cuda.IxA_L_SIMT]::_opts.breakdownRules.TTensorI;
_opts.breakdownRules.DFT := [CopyFields(DFT_tSPL_CT, rec(switch := true,
filter := e-> When(e[1]*e[2] <= _thold^2, e[1] <= _thold and e[2] <= _thold, e[1] <= _thold and e[2] >= _thold)))]::_opts.breakdownRules.DFT;
_opts.unparser.simt_synccluster := _opts.unparser.simt_syncblock;
_opts.postProcessSums := (s, opts) -> let(s1 := ApplyStrategy(s, [ MergedRuleSet(RulesFuncSimp, RulesSums, RulesSIMTFission) ], BUA, opts),
When(Collect(t, PRDFT)::Collect(t, IPRDFT) = [],
FixUpCUDASigmaSPL_3Stage(s1, opts),
FixUpCUDASigmaSPL_3Stage_Real(s1, opts)));
_opts.postProcessCode := (c, opts) -> FixUpTeslaV_Code(c, opts);
# _opts.postProcessCode := (c, opts) -> FixUpTeslaV_Code(PingPong_3Stages(c, opts), opts);
_opts.fixUpTeslaV_Code := true;
_opts.operations.Print := s -> Print("<FFTX CUDA HPC Batch DFT options record>");
fi;
return _opts;
fi;
# detect 3D DFT/Batch DFT
_tt := Collect(t, MDDFT)::Collect(t, MDPRDFT)::Collect(t, IMDPRDFT);
if Length(_tt) = 1 and Length(_tt[1].params[1]) = 3 then
_conf := FFTXGlobals.confFFTCUDADevice();
_opts := FFTXGlobals.getOpts(_conf);
# opts for high performance CUDA cuFFT
if ForAll(_tt[1].params[1], i-> i in _HPCSupportedSizesCUDA) then
_opts.breakdownRules.MDDFT := [fftx.platforms.cuda.MDDFT_tSPL_Pease_SIMT];
_opts.breakdownRules.MDPRDFT := [fftx.platforms.cuda.MDPRDFT_tSPL_Pease_SIMT];
_opts.breakdownRules.IMDPRDFT := [fftx.platforms.cuda.IMDPRDFT_tSPL_Pease_SIMT];
_opts.breakdownRules.TTwiddle := [ TTwiddle_Tw1 ];
_opts.globalUnrolling := 2*_thold + 1;
_opts.breakdownRules.TTensorI := [CopyFields(IxA_L_split, rec(switch := true)),
fftx.platforms.cuda.L_IxA_SIMT, fftx.platforms.cuda.IxA_L_SIMT]::_opts.breakdownRules.TTensorI;
_opts.breakdownRules.DFT := [CopyFields(DFT_tSPL_CT, rec(switch := true,
filter := e-> When(e[1]*e[2] <= _thold^2, e[1] <= _thold and e[2] <= _thold, e[1] <= _thold and e[2] >= _thold)))]::_opts.breakdownRules.DFT;
_opts.unparser.simt_synccluster := _opts.unparser.simt_syncblock;
# _opts.postProcessSums := (s, opts) -> let(s1 := ApplyStrategy(s, [ MergedRuleSet(RulesFuncSimp, RulesSums, RulesSIMTFission) ], BUA, opts),
# FixUpCUDASigmaSPL_3Stage(s1, opts));
_opts.postProcessSums := (s, opts) -> let(s1 := ApplyStrategy(s, [ MergedRuleSet(RulesFuncSimp, RulesSums, RulesSIMTFission) ], BUA, opts),
When(Collect(t, MDPRDFT)::Collect(t, IMDPRDFT) = [],
FixUpCUDASigmaSPL_3Stage(s1, opts),
FixUpCUDASigmaSPL_3Stage_Real(s1, opts)));
_opts.postProcessCode := (c, opts) -> FixUpTeslaV_Code(c, opts);
# _opts.postProcessCode := (c, opts) -> FixUpTeslaV_Code(PingPong_3Stages(c, opts), opts);
_opts.fixUpTeslaV_Code := true;
if ((Length(Collect(t, TTensorInd)) >= 1) or let(lst := Collect(t, TTensorI), (Length(lst) >= 1) and ForAll(lst, l->l.params[2] > 1))) then
_opts.operations.Print := s -> Print("<FFTX CUDA HPC Batch MDDFT/MDPRDFT/MDIPRDFT options record>");
_opts.tags := [ASIMTKernelFlag(ASIMTGridDimX), ASIMTGridDimY, ASIMTBlockDimY, ASIMTBlockDimX];
else
_opts.operations.Print := s -> Print("<FFTX CUDA HPC MDDFT/MDPRDFT/MDIPRDFT options record>");
_opts.tags := [ASIMTKernelFlag(ASIMTGridDimX), ASIMTBlockDimY, ASIMTBlockDimX];
fi;
_opts.HPCSupportedSizesCUDA := _HPCSupportedSizesCUDA;
fi;
return _opts;
fi;
# detect 3D DFT/iDFT but non-convolution case
_tt := Collect(t, MDDFT);
if Length(_tt) = 2 and ForAll(_tt, i->Length(i.params[1]) = 3) and Sum(List(_tt, i->i.params[2])) = Product(_tt[1].params[1]) then
_conf := FFTXGlobals.confFFTCUDADevice();
_opts := FFTXGlobals.getOpts(_conf);
# opts for high performance CUDA cuFFT
if Length(Filtered(_tt, i -> ObjId(i) = MDDFT)) > 0 and ForAll(_tt[1].params[1], i-> i in _HPCSupportedSizesCUDA) then
_opts.breakdownRules.MDDFT := [fftx.platforms.cuda.MDDFT_tSPL_Pease_SIMT];
_opts.breakdownRules.MDPRDFT := [fftx.platforms.cuda.MDPRDFT_tSPL_Pease_SIMT];
_opts.breakdownRules.IMDPRDFT := [fftx.platforms.cuda.IMDPRDFT_tSPL_Pease_SIMT];
_opts.breakdownRules.TTwiddle := [ TTwiddle_Tw1 ];
_opts.tags := [ASIMTKernelFlag(ASIMTGridDimX), ASIMTBlockDimY, ASIMTBlockDimX];
_opts.globalUnrolling := 2*_thold + 1;
_opts.breakdownRules.TTensorI := [CopyFields(IxA_L_split, rec(switch := true)),
fftx.platforms.cuda.L_IxA_SIMT, fftx.platforms.cuda.IxA_L_SIMT]::_opts.breakdownRules.TTensorI;
_opts.breakdownRules.DFT := [CopyFields(DFT_tSPL_CT, rec(switch := true,
filter := e-> When(e[1]*e[2] <= _thold^2, e[1] <= _thold and e[2] <= _thold, e[1] <= _thold and e[2] >= _thold)))]::_opts.breakdownRules.DFT;
_opts.unparser.simt_synccluster := _opts.unparser.simt_syncblock;
_opts.postProcessSums := (s, opts) -> let(s1 := ApplyStrategy(s, [ MergedRuleSet(RulesFuncSimp, RulesSums, RulesSIMTFission) ], BUA, opts),
FixUpCUDASigmaSPL_3Stage(s1, opts));
_opts.postProcessCode := (c, opts) -> FixUpTeslaV_Code(PingPong_3Stages(c, opts), opts);
_opts.fixUpTeslaV_Code := true;
_opts.operations.Print := s -> Print("<FFTX CUDA HPC MDDFT options record>");
fi;
return _opts;
fi;
# promote with default conf rules
tt := _promote1(Copy(t));
if ObjId(tt) = TFCall then
_tt := tt.params[1];
# check for convolution
if (ObjId(_tt) in [MDRConv, MDRConvR, IOPrunedMDRConv]) or ((ObjId(_tt) in [TTensorI, TTensorInd]) and (ObjId(_tt.params[1]) in [MDRConv, MDRConvR])) then
_conf := FFTXGlobals.confMDRConvCUDADevice();
_opts := FFTXGlobals.getOpts(_conf);
# opts for high performance CUDA cuFFT
if (ObjId(_tt) in [MDRConv, MDRConvR, IOPrunedMDRConv] and ForAll(_tt.params[1], i-> i in _HPCSupportedSizesCUDA)) or
(ObjId(_tt) in [TTensorI, TTensorInd] and ForAll(_tt.params[1].params[1], i-> i in _HPCSupportedSizesCUDA)) then
_opts.breakdownRules.MDDFT := [fftx.platforms.cuda.MDDFT_tSPL_Pease_SIMT];
_opts.breakdownRules.MDPRDFT := [fftx.platforms.cuda.MDPRDFT_tSPL_Pease_SIMT];
_opts.breakdownRules.IMDPRDFT := [fftx.platforms.cuda.IMDPRDFT_tSPL_Pease_SIMT];
_opts.breakdownRules.TTwiddle := [ TTwiddle_Tw1 ];
_opts.globalUnrolling := 2*_thold + 1;
_opts.breakdownRules.TTensorI := [CopyFields(IxA_L_split, rec(switch := true)),
fftx.platforms.cuda.L_IxA_SIMT, fftx.platforms.cuda.IxA_L_SIMT]::_opts.breakdownRules.TTensorI;
_opts.breakdownRules.DFT := [CopyFields(DFT_tSPL_CT, rec(switch := true,
filter := e-> When(e[1]*e[2] <= _thold^2, e[1] <= _thold and e[2] <= _thold, e[1] <= _thold and e[2] >= _thold)))]::_opts.breakdownRules.DFT;
_opts.unparser.simt_synccluster := _opts.unparser.simt_syncblock;
# _opts.postProcessSums := (s, opts) -> let(s1 := ApplyStrategy(s, [ MergedRuleSet(RulesFuncSimp, RulesSums, RulesSIMTFission) ], BUA, opts),
# FixUpCUDASigmaSPL_3Stage(s1, opts));
_opts.postProcessSums := (s, opts) -> let(s1 := ApplyStrategy(s, [ MergedRuleSet(RulesFuncSimp, RulesSums, RulesSIMTFission) ], BUA, opts),
When(Collect(t, MDPRDFT)::Collect(t, IMDPRDFT) = [],
FixUpCUDASigmaSPL_3Stage(s1, opts),
FixUpCUDASigmaSPL_3Stage_Real(s1, opts)));
_opts.postProcessCode := (c, opts) -> FixUpTeslaV_Code(c, opts);
# _opts.postProcessCode := (c, opts) -> FixUpTeslaV_Code(PingPong_3Stages(c, opts), opts);
_opts.fixUpTeslaV_Code := true;
if ((Length(Collect(t, TTensorInd)) >= 1) or let(lst := Collect(t, TTensorI), (Length(lst) >= 1) and ForAll(lst, l->l.params[2] > 1))) then
_opts.operations.Print := s -> Print("<FFTX CUDA HPC Batch MDRConv/MDRConvR/IOPrunedMDRConv options record>");
_opts.tags := [ASIMTKernelFlag(ASIMTGridDimX), ASIMTGridDimY, ASIMTBlockDimY, ASIMTBlockDimX];
else
_opts.operations.Print := s -> Print("<FFTX CUDA HPC MDRConv/MDRConvR/IOPrunedMDRConv options record>");
_opts.tags := [ASIMTKernelFlag(ASIMTGridDimX), ASIMTBlockDimY, ASIMTBlockDimX];
fi;
_opts.HPCSupportedSizesCUDA := _HPCSupportedSizesCUDA;
fi;
return _opts;
fi;
# check for Hockney. This is for N=130
if ObjId(_tt) = IOPrunedMDRConv and _tt.params[1] = [130,130,130] then
_conf := FFTXGlobals.confHockneyMlcCUDADevice();
_opts := FFTXGlobals.getOpts(_conf);
return _opts;
fi;
# check for general Hockney.
if ObjId(_tt) = IOPrunedMDRConv then
_conf := FFTXGlobals.confMDRConvCUDADevice();
_opts := FFTXGlobals.getOpts(_conf);
_opts.tags := [ASIMTKernelFlag(ASIMTGridDimY), ASIMTGridDimX, ASIMTBlockDimZ];
return _opts;
fi;
fi;
# check for WarpX
_conf := FFTXGlobals.confWarpXCUDADevice();
_opts := FFTXGlobals.getOpts(_conf);
tt := _opts.preProcess(Copy(t));
if ObjId(tt) = TFCall and ObjId(tt.params[1]) = TCompose then
_tt := tt.params[1].params[1];
# detect promoted WarpX
if IsList(_tt) and Length(_tt) = 3 and List(_tt, ObjId) = [ TNoDiagPullinRight, TRC, TNoDiagPullinLeft ] then
return _opts;
fi;
fi;
# we are doing nothing special
return FFTXGlobals.getOpts(conf);
fi;
if IsBound(conf.useCUDA) then
return FFTXGlobals.getOpts(conf);
fi;
# Here we have to handle GPU configs
Error("Don't know how to derive opts!\n");
end;
|
Formal statement is: lemma real_affinity_eq: "m \<noteq> 0 \<Longrightarrow> m * x + c = y \<longleftrightarrow> x = inverse m * y + - (c / m)" for m :: "'a::linordered_field" Informal statement is: If $m \neq 0$, then $m x + c = y$ if and only if $x = \frac{1}{m} y - \frac{c}{m}$.
|
setmerge = function(X, Y, varname, filter, variable, index=NULL) {
factors = c("trip", "set")
#browser()
print(varname)
if (!is.null(filter)) {
i = filter.class(Y, filter )
} else {
i = index # "index" is the override mechanism
}
if (length(i)>0) {
y = sum.data(Y[i,], factors, variable)
names(y) = c(factors, varname)
X = merge(x=X, y=y, by=factors, all.x=T )
bi = X[which(X$trip == 'S01122015' & X$set==1 & X$station ==101),]
#print('before')
#print(bi$R0.mass)
#print(bi$sa)
X[,varname] = X[,varname] / X$sa # express as x / km2
#ci = X[which(X$trip == 'S01122015' & X$set==1 & X$station ==101),]
#print(ci$R0.mass)
X[!is.finite(X[,varname]),varname] = 0
#if (varname=='R0.mass'){
# stop("......")
#}
} else {
dummy = rep(0, dim(X)[1])
oldnames = names(X)
X = cbind(X, dummy)
names(X) = c(oldnames, varname)
}
return(X)
}
|
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
class WholeDatasetFit(BaseEstimator, TransformerMixin):
"""
WholeDatasetFit is a transformer that fits on a predefined dataset and ignores X passed in fit.
This is useful in pipelines with cross validation used in a competition. This estimator is able to leak data from the test set for example.
"""
def __init__(self, transformer, whole_dataset):
self.transformer = transformer
self.whole_dataset = whole_dataset
def transform(self, X):
return self.transformer.transform(X)
def fit(self, X, y):
return self.transformer.fit(self.whole_dataset, None)
|
If the degree of $p$ is at most $n$ and the degree of $q$ is at most $n$, then the degree of $p + q$ is at most $n$.
|
Sanders County has three active airports in Thompson Falls, Plains and Hot Springs. Addresses are featured below. If you have any questions, please contact the appropriate airport manager.
THOMPSON FALLS AIRPORT HAS FUEL-The new fuel system at the TF Airport is in operation as of December 2016, and is comparable to those systems at other Montana general aviation airports, such as Libby, Superior, Polson, Ronan, and Stevensville. Thompson Falls Airport Board members have been lobbying for a county-owned fuel system at the airport for 8 years, and project contributors include: Sanders County (Owner), Robert Peccia & Associates (Engineer), Bjorn Johnson Construction (Contractor), Mascott Equipment (Equipment and assembly). The project was funded with FAA non-primary entitlement funds. This money comes from aviation fuel taxes and passenger ticket taxes. Additional funding came from a MT Dept. of Transportation grant. The MDT grant came from a fund collected from MT state aviation fuel taxes. There was also a private donor who contributed $15,000 to help with the initial fuel purchase. A portion of the taxes paid on the sale of the fuel from this tank will go back to the state and federal funds that helped to fund this fuel system. The new fuel system installed at the Thompson Falls Airport will contain up to 6000 gallons of 100 octane Low Lead (100LL) aviation fuel for piston engine aircraft. It will be available to the flying public 24/7/365 via a self-serve credit card terminal. If any problems incur while using the credit card system, please call 827-6924.
|
SUBROUTINE RU_TEMP ( field, t, td, iret )
C************************************************************************
C* RU_TEMP *
C* *
C* This subroutine decodes a temperature/dewpoint field. The field *
C* is encoded as TTtDD where TT is temperature in degrees Celsius, *
C* t is approximate tenths of degree and sign indicator, and DD is *
C* the dewpoint depression in degrees Celsius. *
C* *
C* RU_TEMP ( FIELD, T, TD, IRET ) *
C* *
C* Input parameters: *
C* FIELD CHAR* Encoded group *
C* *
C* Output parameters: *
C* T REAL Temperature *
C* TD REAL Dewpoint temperature *
C* IRET INTEGER Return code *
C* 0 = normal return *
C** *
C* Log: *
C* M. desJardins/GSFC 6/86 *
C************************************************************************
INCLUDE 'GEMPRM.PRM'
C*
CHARACTER*(*) field
C------------------------------------------------------------------------
iret = 0
C*
t = RMISSD
td = RMISSD
C
C* The first three digits of the field contain encoded temperature.
C* If the temperature is missing (as indicated by a /) then the
C* conversion to integer will fail.
C
CALL ST_INTG ( field (1:3), itemp, ier )
IF ( ier .eq. 0 ) THEN
C
C* If the integer is even, the temperature is positive. Otherwise,
C* the temperature is negative.
C
isign = MOD ( itemp, 2 )
IF ( isign .eq. 1 ) itemp = - itemp
t = FLOAT ( itemp ) / 10.
C
C* The last two digits contain the encoded dewpoint depression.
C
CALL ST_INTG ( field (4:5), idep, ier )
C
C* Check that the dewpoint value is not missing.
C
IF ( ier .eq. 0 ) THEN
C
C* If the dewpoint given is less than 50, the dewpoint is
C* given in tenths of a degree. For numbers greater than
C* 55, the dewpoint is given in whole degrees plus 50.
C* The values 51 - 55 are not used.
C
IF ( idep .le. 50 ) THEN
ddep = FLOAT ( idep ) / 10.
ELSE
ddep = FLOAT ( idep - 50 )
END IF
td = t - ddep
END IF
END IF
C*
RETURN
END
|
```python
import numpy as np
from scipy.integrate import simps
#my things
from FermatPrincipleCartesian import *
from Geometry import *
from Symbolic import *
from sympy import Matrix
from RealData import PrepareData
from ForwardEquation import *
def LMSolContinous(dataDict,mu = 0.5):
'''
``rays`` origin and dir are in ENU frame.
data is d = dtec = int_i ne ds - int_i0 ne ds.
neFunc = f(beta)
g(beta) = int_i f(beta) + rho_i ds - int_i0 f(beta) + rho_i0 ds
minimize (dobs - d)Cdinv(dobs - d) + mu (log(neFunc) - log(neprior))Cminv(log(neFunc) - log(neprior))
Solve in continuous basis.
Steps:
1. propagate rays
2. dd = d - g
3. wdd = Cdinv.dd
4. S = G^t.Cdinv.G + mu*lambda^t.Cminv.lambda
5. T = Sinv
6. dm = T.G^t.wdd
'''
#first fit just iri layers and global offsets
Nsol = 0
print("Constructing the model with {0} solitons".format(Nsol))
model = ForwardModel(dataDict['numAntennas'],dataDict['numDirections'],dataDict['numTimes'],
pathlength=2000,filename='ww-background',numThreads=1,
numSolitons = Nsol,radioArray = None)
#a priori
params = model.getForwardKernelParams()
g = model.doForward(dataDict['rays'],N=100,load=False)
dd = dataDict['dtec'] - g
Cd = np.eye(np.size(params))*np.var(g)*1.2
Cdinv = np.linalg.pinv(Cd)
wdd = Cdinv.dot(dd)
rays = model.calcRays(dataDict['rays'],load=True)
plotWavefront(lambda x,y,z : model.generateSolitonModel()(x,y,z,0),rays,*getSolitonCube(model))
g = model.doForward(dataDict['rays'],N=100,load=True)
dd = dataDict['dtec'] - g
print("Computing observation covariance.")
Cd = np.eye(np.size(params))*np.var(g)*1.2
Cdinv = np.linalg.pinv(Cd)
J = self.doJkernel(inRays,N=100,load=True)
S = J.transpose().dot(Cdinv).dot(J)
T = np.linalg.pinv(S)
wdd = J.transpose().dot(Cdinv).dot(dd)
dbeta = T.dot(wdd)
params += dbeta
model.setModelParams(params)
#monte carlo L.Cminv.L
#neFunc = model.solitonModelSymbolic
#paramDict = self.getModelParamDict()
#L = []
#for param i paramDict.keys():
# L.append(neFunc.diff(param))
def testForwardProblem():
sol = SolitonModel(8)
neFunc = sol.generateSolitonModel()
theta = np.linspace(-np.pi/8.,np.pi/8.,2)
#phi = np.linspace(0,2*np.pi,6)
rays = []
origin = ac.ITRS(sol.enu.location).cartesian.xyz.to(au.km).value
for t in theta:
for p in theta:
direction = ac.SkyCoord(np.sin(t),
np.sin(p),
1.,frame=sol.enu).transform_to('itrs').cartesian.xyz.value
rays.append(Ray(origin,direction))
forwardProblem = ForwardProblem(sol)
times = np.zeros(len(rays))
d = forwardProblem.doForward(rays,times,N=1000)
print(d)
#plotWavefront(f.nFunc.subs({'t':0}),rays,*getSolitonCube(sol))
#plotFuncCube(f.nFunc.subs({'t':0}), *getSolitonCube(sol),rays=rays)
if __name__ == '__main__':
np.random.seed(1234)
#testForwardProblem()
dataDict = PrepareData(infoFile='SB120-129/WendysBootes.npz',
dataFolder='SB120-129/',
timeStart = 0, timeEnd = 0,
arrayFile='arrays/lofar.hba.antenna.cfg',load=True)
LMSolContinous(dataDict,mu = 0.5)
#LMSolContinous(**dataDict)
```
```python
import pylab as plt
plt.hist(dataDict['dtec'])
plt.show()
```
```python
```
|
Formal statement is: lemma bilinear_rneg: "bilinear h \<Longrightarrow> h x (- y) = - h x y" Informal statement is: If $h$ is bilinear, then $h(x, -y) = -h(x, y)$.
|
/-
Copyright (c) 2022 Arthur Paulino, Damiano Testa. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Arthur Paulino, Damiano Testa
! This file was ported from Lean 3 source module tactic.move_add
! leanprover-community/mathlib commit 205a628e1fdc9220cb86832d66a50d1381abc103
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Tactic.Core
import Mathbin.Algebra.Group.Basic
/-!
# `move_add`: a tactic for moving summands
Calling `move_add [a, ← b, c]`, recursively looks inside the goal for expressions involving a sum.
Whenever it finds one, it moves the summands that unify to `a, b, c`, removing all parentheses.
See the doc-string for `tactic.interactive.move_add` for more information.
## Implementation notes
This file defines a general `move_op` tactic, intended for reordering terms in an expression
obtained by repeated applications of a given associative, commutative binary operation. The
user decides the final reordering. Applying `move_op` without specifying the order will simply
remove all parentheses from the expression.
The main user-facing tactics are `move_add` and `move_mul`, dealing with addition and
multiplication, respectively.
In what is below, we talk about `move_add` for definiteness, but everything applies
to `move_mul` and to the more general `move_op`.
The implementation of `move_add` only moves the terms specified by the user (and rearranges
parentheses).
Note that the tactic `abel` already implements a very solid heuristic for normalizing terms in an
additive commutative semigroup and produces expressions in more or less standard form.
The scope of `move_add` is different: it is designed to make it easy to move individual terms
around a sum.
## Future work
* Add support for `neg/div/inv` in additive/multiplicative groups?
* Currently the tactic has special support for `+` and `*`. Every other operation is outsourced
to `ac_refl` (see the proof of `reorder_hyp`). Should there be the desire for specialized support
of other operations (e.g. `∪, ∩, ⊓, ⊔, ...`), that is the definition to modify, at least in the
first instance.
* Add functionality for moving terms across the two sides of an in/dis/equality.
E.g. it might be desirable to have `to_lhs [a]` converting `b + c = a + d` to `- a + b + c = d`.
* Add a non-recursive version for use in `conv` mode.
* Revise tests?
-/
namespace Tactic
namespace MoveOp
/-!
Throughout this file, `op : pexpr` denotes an arbitrary (binary) operation. We do not use,
but implicitly imagine, that this operation is associative, since we extract iterations of
such operations, with complete disregard of the order in which these iterations arise.
-/
/-- Given a list `un` of `α`s and a list `bo` of `bool`s, return the sublist of `un`
consisting of the entries of `un` whose corresponding entry in `bo` is `tt`.
Used for error management: `un` is the list of user inputs, `bo` is the list encoding which input
is unused (`tt`) and which input is used (`ff`).
`return_unused` returns the unused user inputs.
If `bo` is shorter than `un`, `return_unused` will include the remainder of `un`.
-/
def returnUnused {α : Type _} : List α → List Bool → List α
| un, [] => un
| [], bo => []
| u :: us, b :: bs => if b then u :: return_unused us bs else return_unused us bs
#align tactic.move_op.return_unused Tactic.MoveOp.returnUnused
/-- Given a list `lp` of `bool × pexpr` and a list `l_un` of `expr`, scan the elements of `lp` one
at a time and produce 3 sublists of `l_un`.
If `(tf,pe)` is the first element of `lp`, we look for the first element of `l_un` that unifies with
`pe.to_expr`. If no such element exists, then we discard `(tf,pe)` and move along.
If `eu ∈ l_un` is the first element of `l_un` that unifies with `pe.to_expr`, then we add `eu` as
the next element of either the first or the second list, depending on the boolean `tf` and we remove
`eu` from the list `l_un`. In this case, we continue our scanning with the next element of `lp`,
replacing `l_un` by `l_un.erase eu`.
Once we exhaust the elements of `lp`, we return the four lists:
* `l_tt`: the list of elements of `l_un` that came from an element of `lp` whose boolean was `tt`,
* `l_ff`: the list of elements of `l_un` that came from an element of `lp` whose boolean was `ff`,
* `l_un`: the un-unified elements of `l_un`,
* `l_m`: a "mask" list of booleans corresponding to the elements of `lp` that were placed in `l_un`.
The ununified elements of `l_un` get used for error management: they keep track of which user inputs
are superfluous. -/
unsafe def move_left_or_right :
List (Bool × expr) →
List expr → List Bool → tactic (List expr × List expr × List expr × List Bool)
| [], l_un, l_m => return ([], [], l_un, l_m)
| be :: l, l_un, l_m => do
let ex :: _ ← l_un.filterM fun e' => succeeds <| unify be.2 e' |
move_left_or_right l l_un (l_m.append [true])
let (l_tt, l_ff, l_un, l_m) ← move_left_or_right l (l_un.eraseₓ ex) (l_m.append [false])
if be.1 then return (ex :: l_tt, l_ff, l_un, l_m) else return (l_tt, ex :: l_ff, l_un, l_m)
#align tactic.move_op.move_left_or_right tactic.move_op.move_left_or_right
/-- We adapt `move_left_or_right` to our goal:
1. we convert a list of pairs `bool × pexpr` to a list of pairs `bool × expr`,
2. we use the extra input `sl : list expr` to perform the unification and sorting step
`move_left_or_right`,
3. we jam the third factor inside the first two.
-/
unsafe def final_sort (lp : List (Bool × pexpr)) (sl : List expr) :
tactic (List expr × List Bool) := do
let lp_exp : List (Bool × expr) ←
lp.mapM fun x => do
let e ← to_expr x.2 true false
return (x.1, e)
let (l1, l2, l3, is_unused) ← move_left_or_right lp_exp sl []
return (l1 ++ l3 ++ l2, is_unused)
#align tactic.move_op.final_sort tactic.move_op.final_sort
/-- `as_given_op op e` unifies the head term of `e`, which is a ≥2-argument function application,
with the binary operation `op`, failing if it cannot. -/
unsafe def as_given_op (op : pexpr) : expr → tactic expr
| expr.app (expr.app F a) b => do
to_expr op tt ff >>= unify F
return F
| _ => failed
#align tactic.move_op.as_given_op tactic.move_op.as_given_op
/-- `(e, unused) ← reorder_oper op lp e` converts an expression `e` to a similar looking one.
The tactic scans the expression `e` looking for subexpressions that begin with the given binary
operation `op`. As soon as `reorder_oper` finds one such subexpression,
* it extracts the "`op`-summands" in the subexpression,
* it rearranges them according to the rules determined by `lp`,
* it recurses into each `op`-summand.
The `unused` output is a list of booleans. It is keeping track of which of the inputs provided
by `lp` is actually used to perform the rearrangements. It is useful to report unused inputs.
Here are two examples:
```lean
#eval trace $ reorder_oper ``((=)) [(ff,``(2)), (tt,``(7))] `(∀ x y : ℕ, 2 = 0)
-- (ℕ → ℕ → 0 = 2, [ff, tt])
-- the input `[(ff,``(2)), (tt,``(7))]` instructs Lean to move `2` to the right and `7`
-- to the left. Lean reports that `2` is not unused and `7` is unused as `[ff, tt]`.
#eval trace $ reorder_oper ``((+)) [(ff,``(2)), (tt,``(5))]
`(λ (e : ℕ), ∀ (x : ℕ), ∃ (y : ℕ),
2 + x * (y + (e + 5)) + y = x + 2 + e → 2 + x = x + 5 + (2 + y))
/- `2` moves to the right, `5` moves to the left. Lean reports that `2, 5` are not unused
as `[ff,ff]`
(λ (e : ℕ), ∀ (x : ℕ), ∃ (y : ℕ),
x * (5 + y + e) + y + 2 = x + e + 2 → x + 2 = 5 + x + y + 2, [ff, ff]) -/
```
TODO: use `ext_simplify_core` instead of traversing the expression manually
-/
unsafe def reorder_oper (op : pexpr) (lp : List (Bool × pexpr)) : expr → tactic (expr × List Bool)
| F'@(expr.app F b) => do
let is_op ← try_core (as_given_op op F')
match is_op with
| some op => do
let (sort_list, is_unused) ← list_binary_operands op F' >>= final_sort lp
let sort_all ←
sort_list fun e => do
let (e, lu) ← reorder_oper e
pure (e, [lu, is_unused].transpose.map List.and)
let (recs, list_unused) := sort_all
let recs_0 :: recs_rest ← pure recs |
throwError"internal error: cannot have 0 operands"
let summed := recs_rest (fun e f => op [e, f]) recs_0
return (summed, list_unused List.and)
| none => do
let [(Fn, unused_F), (bn, unused_b)] ← [F, b].mapM <| reorder_oper
return <| (expr.app Fn bn, [unused_F, unused_b].transpose.map List.and)
| expr.pi na bi e f => do
let [en, fn] ← [e, f].mapM <| reorder_oper
return (expr.pi na bi en.1 fn.1, [en.2, fn.2].transpose.map List.and)
| expr.lam na bi e f => do
let [en, fn] ← [e, f].mapM <| reorder_oper
return (expr.lam na bi en.1 fn.1, [en.2, fn.2].transpose.map List.and)
| expr.mvar na pp e => do
let en
←-- is it really needed to recurse here?
reorder_oper
e
return (expr.mvar na pp en.1, [en.2].transpose.map List.and)
| expr.local_const na pp bi e => do
let en
←-- is it really needed to recurse here?
reorder_oper
e
return (expr.local_const na pp bi en.1, [en.2].transpose.map List.and)
| expr.elet na e f g => do
let [en, fn, gn] ← [e, f, g].mapM <| reorder_oper
return (expr.elet na en.1 fn.1 gn.1, [en.2, fn.2, gn.2].transpose.map List.and)
| expr.macro ma le => do
let len
←-- is it really needed to recurse here?
le.mapM <|
reorder_oper
let (lee, lb) := len.unzip
return (expr.macro ma lee, lb List.and)
| e => pure (e, lp.map fun _ => true)
#align tactic.move_op.reorder_oper tactic.move_op.reorder_oper
/- ./././Mathport/Syntax/Translate/Expr.lean:330:4: warning: unsupported (TODO): `[tacs] -/
/- ./././Mathport/Syntax/Translate/Expr.lean:330:4: warning: unsupported (TODO): `[tacs] -/
/-- Passes the user input `na` to `reorder_oper` at a single location, that could either be
`none` (referring to the goal) or `some name` (referring to hypothesis `name`). Replaces the
given hypothesis/goal with the rearranged one that `reorder_hyp` receives from `reorder_oper`.
Returns a pair consisting of a boolean and a further list of booleans.
The single boolean is `tt` iff the tactic did *not* change the goal on which it was acting.
The list of booleans records which variable in `ll` has been unified in the application:
`tt` means that the corresponding variable has *not* been unified.
This definition is useful to streamline error catching. -/
unsafe def reorder_hyp (op : pexpr) (lp : List (Bool × pexpr)) (na : Option Name) :
tactic (Bool × List Bool) := do
let (thyp, hyploc) ←
match na with
| none => do
let t ← target
return (t, none)
| some na => do
let hl ← get_local na
let th ← infer_type hl
return (th, some hl)
let (reordered, is_unused) ← reorder_oper op lp thyp
unify reordered thyp >> return (tt, is_unused) <|> do
let neq
←-- the current `do` block takes place where the reordered expression is not equal to the original
mk_app
`eq [thyp, reordered]
let nop ← to_expr op tt ff
let pre ← pp reordered
let (_, prf) ←
solve_aux neq <|
match nop with
| q(Add.add) => sorry
| q(Mul.mul) => sorry
| _ =>
ac_refl <|>
fail
f! "the associative/commutative lemmas used do not suffice to prove that the initial goal equals:
{pre}
Hint: try adding `is_associative` or `is_commutative` instances.
"
match hyploc with
| none => replace_target reordered prf
| some hyploc => replace_hyp hyploc reordered prf >> skip
return (ff, is_unused)
#align tactic.move_op.reorder_hyp tactic.move_op.reorder_hyp
section ParsingArgumentsForMoveOp
/- ./././Mathport/Syntax/Translate/Tactic/Mathlib/Core.lean:38:34: unsupported: setup_tactic_parser -/
/-- `move_op_arg` is a single elementary argument that `move_op` takes for the
variables to be moved. It is either a `pexpr`, or a `pexpr` preceded by a `←`. -/
unsafe def move_op_arg (prec : Nat) : parser (Bool × pexpr) :=
Prod.mk <$> Option.isSome <$> (tk "<-")? <*> parser.pexpr prec
#align tactic.move_op.move_op_arg tactic.move_op.move_op_arg
/-- `move_pexpr_list_or_texpr` is either a list of `move_op_arg`, possibly empty, or a single
`move_op_arg`. -/
unsafe def move_pexpr_list_or_texpr : parser (List (Bool × pexpr)) :=
list_of (move_op_arg 0) <|> List.ret <$> move_op_arg tac_rbp <|> return []
#align tactic.move_op.move_pexpr_list_or_texpr tactic.move_op.move_pexpr_list_or_texpr
end ParsingArgumentsForMoveOp
end MoveOp
/- ./././Mathport/Syntax/Translate/Tactic/Mathlib/Core.lean:38:34: unsupported: setup_tactic_parser -/
open MoveOp
/-- `move_op args locat op` is the non-interactive version of the main tactics `move_add` and
`move_mul` of this file. Given as input `args` (a list of terms of a sequence of operands),
`locat` (hypotheses or goal where the tactic should act) and `op` (the operation to use),
`move_op` attempts to perform the rearrangement of the terms determined by `args`.
Currently, the tactic uses only `add/mul_comm, add/mul_assoc, add/mul_left_comm`, so other
operations will not actually work.
-/
unsafe def move_op (args : parse move_pexpr_list_or_texpr) (locat : parse location) (op : pexpr) :
tactic Unit := do
let locas ← locat.get_locals
let tg ← target
let locas_with_tg := if locat.include_goal then locas ++ [tg] else locas
let ner ←
locas_with_tg.mapM fun e => reorder_hyp op args e.local_pp_name <|> reorder_hyp op args none
let (unch_tgts, unus_vars) := ner.unzip
let str_unva ←
match
(returnUnused args (unus_vars.transpose.map List.and)).map fun e : Bool × pexpr => e.2 with
| [] => pure []
| [pe] => do
let nm ← to_expr pe true false >>= fun ex => pp ex.replace_mvars
return [f! "'{nm}' is an unused variable"]
| pes => do
let nms ←
(pes.mapM fun e => to_expr e true false) >>= fun exs =>
(exs.map expr.replace_mvars).mapM pp
return [f! "'{nms}' are unused variables"]
let str_tgts :=
match locat with
| loc.wildcard => if unch_tgts.and then [f!"nothing changed"] else []
| loc.ns names =>
let linames := returnUnused locas unch_tgts
(if none ∈ returnUnused names unch_tgts then [f!"Goal did not change"] else []) ++
if linames ≠ [] then [f! "'{linames.reverse}' did not change"] else []
let [] ← pure (str_tgts ++ str_unva) |
fail (format.intercalate "\n" (str_tgts ++ str_unva))
assumption <|> try (tactic.reflexivity reducible)
#align tactic.move_op tactic.move_op
namespace Interactive
/--
Calling `move_add [a, ← b, c]`, recursively looks inside the goal for expressions involving a sum.
Whenever it finds one, it moves the summands that unify to `a, b, c`, removing all parentheses.
Repetitions are allowed, and are processed following the user-specified ordering.
The terms preceded by a `←` get placed to the left, the ones without the arrow get placed to the
right. Unnamed terms stay in place. Due to re-parenthesizing, doing `move_add` with no argument
may change the goal. Also, the *order* in which the terms are provided matters: the tactic reads
them from left to right. This is especially important if there are multiple matches for the typed
terms in the given expressions.
A single call of `move_add` moves terms across different sums in the same expression.
Here is an example.
```lean
import tactic.move_add
example {a b c d : ℕ} (h : c = d) : c + b + a = b + a + d :=
begin
move_add [← a, b], -- Goal: `a + c + b = a + d + b` -- both sides changed
congr,
exact h
end
example {a b c d : ℕ} (h : c = d) : c + b * c + a * c = a * d + d + b * d :=
begin
move_add [_ * c, ← _ * c], -- Goal: `a * c + c + b * c = a * d + d + b * d`
-- the first `_ * c` unifies with `b * c` and moves to the right
-- the second `_ * c` unifies with `a * c` and moves to the left
congr;
assumption
end
```
The list of expressions that `move_add` takes is optional and a single expression can be passed
without brackets. Thus `move_add ← f` and `move_add [← f]` mean the same.
Finally, `move_add` can also target one or more hypotheses. If `hp₁, hp₂` are in the
local context, then `move_add [f, ← g] at hp₁ hp₂` performs the rearranging at `hp₁` and `hp₂`.
As usual, passing `⊢` refers to acting on the goal.
## Reporting sub-optimal usage
The tactic could fail to prove the reordering. One potential cause is when there are multiple
matches for the rearrangements and an earlier rewrite makes a subsequent one fail. Another
possibility is that the rearranged expression changes the *Type* of some expression and the
tactic gets stumped. Please, report bugs and failures in the Zulip chat!
There are three kinds of unwanted use for `move_add` that result in errors, where the tactic fails
and flags the unwanted use.
1. `move_add [vars]? at *` reports globally unused variables and whether *all* goals
are unchanged, not *each unchanged goal*.
2. If a target of `move_add [vars]? at targets` is left unchanged by the tactic, then this will be
flagged (unless we are using `at *`).
3. If a user-provided expression never unifies, then the variable is flagged.
In these cases, the tactic produces an error, reporting unused inputs and unchanged targets as
appropriate.
For instance, `move_add ← _` always fails reporting an unchanged goal, but never an unused variable.
## Comparison with existing tactics
* `tactic.interactive.abel`
performs a "reduction to normal form" that allows it to close goals involving sums with higher
success rate than `move_add`. If the goal is an equality of two sums that are simply obtained by
reparenthesizing and permuting summands, then `move_add [appropriate terms]` can close the goal.
Compared to `abel`, `move_add` has the advantage of allowing the user to specify the beginning and
the end of the final sum, so that from there the user can continue with the proof.
* `tactic.interactive.ac_change`
supports a wide variety of operations. At the moment, `move_add` works with addition, `move_mul`
works with multiplication. There is the possibility of supporting other operations, using the
non-interactive tactic `tactic.move_op`.
Still, on several experiments, `move_add` had a much quicker performance than `ac_change`.
Also, for `move_add` the user need only specify a few terms: the tactic itself takes care of
producing the full rearrangement and proving it "behind the scenes".
### Remark:
It is still possible that the same output of `move_add [exprs]` can be achieved by a proper sublist
of `[exprs]`, even if the tactic does not flag anything. For instance, giving the full re-ordering
of the expressions in the target that we want to achieve will not complain that there are unused
variables, since all the user-provided variables have been matched. Of course, specifying the order
of all-but-the-last variable suffices to determine the permutation. E.g., with a goal of
`a + b = 0`, applying either one of `move_add [b,a]`, or `move_add a`, or `move_add ← b` has the
same effect and changes the goal to `b + a = 0`. These are all valid uses of `move_add`.
-/
unsafe def move_add (args : parse move_pexpr_list_or_texpr) (locat : parse location) :
tactic Unit :=
move_op args locat ``((· + ·))
#align tactic.interactive.move_add tactic.interactive.move_add
/-- See the doc-string for `tactic.interactive.move_add` and mentally
replace addition with multiplication throughout. ;-) -/
unsafe def move_mul (args : parse move_pexpr_list_or_texpr) (locat : parse location) :
tactic Unit :=
move_op args locat ``(Mul.mul)
#align tactic.interactive.move_mul tactic.interactive.move_mul
/-- `move_oper` behaves like `move_add` except that it also takes an associative, commutative,
binary operation as input. The operation must be passed as a list consisting of a single element.
For instance
```lean
example (a b : ℕ) : max a b = max b a :=
by move_oper [max] [← a, b] at *
```
solves the goal. For more details, see the `move_add` doc-string, replacing `add` with your
intended operation.
-/
unsafe def move_oper (op : parse pexpr_list) (args : parse move_pexpr_list_or_texpr)
(locat : parse location) : tactic Unit := do
let [op] ← pure op |
fail "only one operation is allowed"
move_op args locat op
#align tactic.interactive.move_oper tactic.interactive.move_oper
add_tactic_doc
{ Name := "move_add"
category := DocCategory.tactic
declNames := [`tactic.interactive.move_add]
tags := ["arithmetic"] }
add_tactic_doc
{ Name := "move_mul"
category := DocCategory.tactic
declNames := [`tactic.interactive.move_mul]
tags := ["arithmetic"] }
end Interactive
end Tactic
|
[STATEMENT]
lemma rem_condless_valid_10:
fixes PROB as
assumes "as \<in> (valid_plans PROB)"
shows "(rem_condless_act s [] as \<in> valid_plans PROB)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rem_condless_act s [] as \<in> valid_plans PROB
[PROOF STEP]
using assms valid_plans_def rem_condless_valid_1 rem_condless_valid_4
[PROOF STATE]
proof (prove)
using this:
as \<in> valid_plans PROB
valid_plans ?prob \<equiv> {as. set as \<subseteq> ?prob}
exec_plan ?s ?as = exec_plan ?s (rem_condless_act ?s [] ?as)
set ?as \<subseteq> ?A \<Longrightarrow> set (rem_condless_act ?s [] ?as) \<subseteq> ?A
goal (1 subgoal):
1. rem_condless_act s [] as \<in> valid_plans PROB
[PROOF STEP]
by blast
|
%kcpRGBNTSCtoRC 'converts an image from RGB NTSC to Retinal Cone '
% This MatLab function was automatically generated by a converter (KhorosToMatLab) from the Khoros cpRGBNTSCtoRC.pane file
%
% Parameters:
% InputFile: i 'Input ', required: 'First Input data object'
% OutputFile: o 'Output', required: 'Resulting output data object'
%
% Example: o = kcpRGBNTSCtoRC(i, {'i','';'o',''})
%
% Khoros helpfile follows below:
%
% PROGRAM
% cpRGBNTSCtoRC - converts an image from RGB NTSC to Retinal Cone
%
% DESCRIPTION
% call ckfromRGBNTSC with the parameter -retinalcone to convert an image from RGB NTSC to Retinal Cone
%
%
%
% EXAMPLES
%
% "SEE ALSO"
%
% RESTRICTIONS
%
% REFERENCES
% All references for the COLOR toolbox are on the file $COLOR/Readme.html
%
% COPYRIGHT
% Copyright (C) 1997 Rafael Santos. Khoros (C) Khoral Research, Inc.
%
function varargout = kcpRGBNTSCtoRC(varargin)
if nargin ==0
Inputs={};arglist={'',''};
elseif nargin ==1
Inputs=varargin{1};arglist={'',''};
elseif nargin ==2
Inputs=varargin{1}; arglist=varargin{2};
else error('Usage: [out1,..] = kcpRGBNTSCtoRC(Inputs,arglist).');
end
if size(arglist,2)~=2
error('arglist must be of form {''ParameterTag1'',value1;''ParameterTag2'',value2}')
end
narglist={'i', '__input';'o', '__output'};
maxval={0,0};
minval={0,0};
istoggle=[0,0];
was_set=istoggle * 0;
paramtype={'InputFile','OutputFile'};
% identify the input arrays and assign them to the arguments as stated by the user
if ~iscell(Inputs)
Inputs = {Inputs};
end
NumReqOutputs=1; nextinput=1; nextoutput=1;
for ii=1:size(arglist,1)
wasmatched=0;
for jj=1:size(narglist,1)
if strcmp(arglist{ii,1},narglist{jj,1}) % a given argument was matched to the possible arguments
wasmatched = 1;
was_set(jj) = 1;
if strcmp(narglist{jj,2}, '__input')
if (nextinput > length(Inputs))
error(['Input ' narglist{jj,1} ' has no corresponding input!']);
end
narglist{jj,2} = 'OK_in';
nextinput = nextinput + 1;
elseif strcmp(narglist{jj,2}, '__output')
if (nextoutput > nargout)
error(['Output nr. ' narglist{jj,1} ' is not present in the assignment list of outputs !']);
end
if (isempty(arglist{ii,2}))
narglist{jj,2} = 'OK_out';
else
narglist{jj,2} = arglist{ii,2};
end
nextoutput = nextoutput + 1;
if (minval{jj} == 0)
NumReqOutputs = NumReqOutputs - 1;
end
elseif isstr(arglist{ii,2})
narglist{jj,2} = arglist{ii,2};
else
if strcmp(paramtype{jj}, 'Integer') & (round(arglist{ii,2}) ~= arglist{ii,2})
error(['Argument ' arglist{ii,1} ' is of integer type but non-integer number ' arglist{ii,2} ' was supplied']);
end
if (minval{jj} ~= 0 | maxval{jj} ~= 0)
if (minval{jj} == 1 & maxval{jj} == 1 & arglist{ii,2} < 0)
error(['Argument ' arglist{ii,1} ' must be bigger or equal to zero!']);
elseif (minval{jj} == -1 & maxval{jj} == -1 & arglist{ii,2} > 0)
error(['Argument ' arglist{ii,1} ' must be smaller or equal to zero!']);
elseif (minval{jj} == 2 & maxval{jj} == 2 & arglist{ii,2} <= 0)
error(['Argument ' arglist{ii,1} ' must be bigger than zero!']);
elseif (minval{jj} == -2 & maxval{jj} == -2 & arglist{ii,2} >= 0)
error(['Argument ' arglist{ii,1} ' must be smaller than zero!']);
elseif (minval{jj} ~= maxval{jj} & arglist{ii,2} < minval{jj})
error(['Argument ' arglist{ii,1} ' must be bigger than ' num2str(minval{jj})]);
elseif (minval{jj} ~= maxval{jj} & arglist{ii,2} > maxval{jj})
error(['Argument ' arglist{ii,1} ' must be smaller than ' num2str(maxval{jj})]);
end
end
end
if ~strcmp(narglist{jj,2},'OK_out') & ~strcmp(narglist{jj,2},'OK_in')
narglist{jj,2} = arglist{ii,2};
end
end
end
if (wasmatched == 0 & ~strcmp(arglist{ii,1},''))
error(['Argument ' arglist{ii,1} ' is not a valid argument for this function']);
end
end
% match the remaining inputs/outputs to the unused arguments and test for missing required inputs
for jj=1:size(narglist,1)
if strcmp(paramtype{jj}, 'Toggle')
if (narglist{jj,2} ==0)
narglist{jj,1} = '';
end;
narglist{jj,2} = '';
end;
if ~strcmp(narglist{jj,2},'__input') && ~strcmp(narglist{jj,2},'__output') && istoggle(jj) && ~ was_set(jj)
narglist{jj,1} = '';
narglist{jj,2} = '';
end;
if strcmp(narglist{jj,2}, '__input')
if (minval{jj} == 0) % meaning this input is required
if (nextinput > size(Inputs))
error(['Required input ' narglist{jj,1} ' has no corresponding input in the list!']);
else
narglist{jj,2} = 'OK_in';
nextinput = nextinput + 1;
end
else % this is an optional input
if (nextinput <= length(Inputs))
narglist{jj,2} = 'OK_in';
nextinput = nextinput + 1;
else
narglist{jj,1} = '';
narglist{jj,2} = '';
end;
end;
else
if strcmp(narglist{jj,2}, '__output')
if (minval{jj} == 0) % this is a required output
if (nextoutput > nargout & nargout > 1)
error(['Required output ' narglist{jj,1} ' is not stated in the assignment list!']);
else
narglist{jj,2} = 'OK_out';
nextoutput = nextoutput + 1;
NumReqOutputs = NumReqOutputs-1;
end
else % this is an optional output
if (nargout - nextoutput >= NumReqOutputs)
narglist{jj,2} = 'OK_out';
nextoutput = nextoutput + 1;
else
narglist{jj,1} = '';
narglist{jj,2} = '';
end;
end
end
end
end
if nargout
varargout = cell(1,nargout);
else
varargout = cell(1,1);
end
global KhorosRoot
if exist('KhorosRoot') && ~isempty(KhorosRoot)
w=['"' KhorosRoot];
else
if ispc
w='"C:\Program Files\dip\khorosBin\';
else
[s,w] = system('which cantata');
w=['"' w(1:end-8)];
end
end
[varargout{:}]=callKhoros([w 'ckfromRGBNTSC" -retinalcone'],Inputs,narglist);
|
/*
Copyright 2014, 2015 Rogier van Dalen.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef MATH_SEQUENCE_HPP_INCLUDED
#define MATH_SEQUENCE_HPP_INCLUDED
#include <vector>
#include <list>
#include <stdexcept>
#include <algorithm> // for std::reverse.
#include <iosfwd>
#include <boost/utility/enable_if.hpp>
#include <boost/mpl/bool.hpp>
#include <boost/optional.hpp>
#include <boost/functional/hash_fwd.hpp>
#include "utility/overload_order.hpp"
#include "utility/disable_if_same.hpp"
#include "range/core.hpp"
#include "range/for_each_macro.hpp"
#include "range/empty_view.hpp"
#include "range/tuple.hpp"
#include "range/std/view_optional.hpp"
#include "range/std/container.hpp"
#include "range/equal.hpp"
#include "range/less_lexicographical.hpp"
#include "range/hash_range.hpp"
#include "rime/if.hpp"
#include "rime/call_if.hpp"
#include "rime/assert.hpp"
#include "magma.hpp"
namespace math {
/**
Semiring that contains a sequence of zero or more symbols, or it can be
the multiplicative annihilator.
All specialised classes can be converted to this one implicitly; the other
way around the conversion is explicit, because they may not be possible.
\ref times concatenates two sequences.
\ref plus returns the longest common prefix (if \a Direction is \ref left) or
suffix (if \a Direction is \ref right).
\ref divide is only defined from \a Direction, and then only if the divisor is
a prefix (or suffix) of the dividend.
\ref compare implements a strict weak ordering by sorting elements
lexicographically from \a Direction.
\ref choose picks the shortest sequence first, and uses lexicographical order
from \a Direction as a tie-breaker.
This makes the sequence a semiring with \ref times and \ref choose in both
directions, whatever the value of \a Direction
Sequences support Boost.Hash, if \c boost/functional/hash.hpp is included.
Sequences of different types that compare equal have the same hash value.
The details of how the hashes are computed (in what direction, for example) are
not defined, and likely to change in future versions.
\sa math::empty_sequence, math::single_sequence, math::optional_sequence,
math::sequence_annihilator
\todo Should a bi-directional version be implemented, which would not provide
the plus operation, but allow division in either direction?
*/
template <class Symbol, class Direction = left> class sequence;
template <class Symbol, class Direction = left> class empty_sequence;
template <class Symbol, class Direction = left> class single_sequence;
template <class Symbol, class Direction = left> class optional_sequence;
template <class Symbol, class Direction = left> class sequence_annihilator;
template <class Symbol, class Direction> struct sequence_tag;
template <class Symbol, class Direction>
struct decayed_magma_tag <sequence <Symbol, Direction>>
{ typedef sequence_tag <Symbol, Direction> type; };
template <class Symbol, class Direction>
struct decayed_magma_tag <empty_sequence <Symbol, Direction>>
{ typedef sequence_tag <Symbol, Direction> type; };
template <class Symbol, class Direction>
struct decayed_magma_tag <single_sequence <Symbol, Direction>>
{ typedef sequence_tag <Symbol, Direction> type; };
template <class Symbol, class Direction>
struct decayed_magma_tag <optional_sequence <Symbol, Direction>>
{ typedef sequence_tag <Symbol, Direction> type; };
template <class Symbol, class Direction>
struct decayed_magma_tag <sequence_annihilator <Symbol, Direction>>
{ typedef sequence_tag <Symbol, Direction> type; };
template <class Symbol, class Direction> class sequence {
private:
bool is_annihilator_;
std::vector <Symbol> symbols_;
public:
/**
Initialise with no symbols.
(Multiplicative identity.)
*/
sequence() : is_annihilator_ (false), symbols_() {}
/**
Initialise with the empty sequence.
*/
sequence (empty_sequence <Symbol, Direction> const &)
: is_annihilator_ (false), symbols_() {}
/**
Initialise with a sequence with one element.
*/
sequence (single_sequence <Symbol, Direction> const & s)
: is_annihilator_ (false), symbols_ (1, s.symbol()) {}
/**
Initialise with a sequence with zero or one element.
*/
sequence (optional_sequence <Symbol, Direction> const & s)
: is_annihilator_ (false)
{
if (!s.empty())
symbols_.push_back (s.symbol().get());
assert (s.empty() == this->empty());
}
/**
Initialise as the multiplicative annihilator.
(Additive identity.)
*/
sequence (sequence_annihilator <Symbol, Direction> const &)
: is_annihilator_ (true) {}
/**
Initialise with a range of symbols.
*/
template <class Range, class Enable = typename
boost::enable_if <range::is_range <Range>>::type>
explicit sequence (Range && range)
: is_annihilator_ (false)
{
RANGE_FOR_EACH (element, range)
symbols_.push_back (element);
}
/**
Initialise with a range of symbols.
(Optimised version using the move constructor of std::vector.)
*/
template <class Range> explicit sequence (std::vector <Symbol> && range)
: is_annihilator_ (false), symbols_ (std::move (range)) {}
/**
Return \c true iff this is an annihilator.
*/
bool is_annihilator() const { return is_annihilator_; }
/**
Return \c true iff this contains a symbol sequence of zero elements.
\pre This is not an annihilator.
*/
bool empty() const {
assert (!is_annihilator());
return range::empty (symbols_);
}
/**
Return a range containing the symbols.
\pre This is not an annihilator.
*/
std::vector <Symbol> const & symbols() const {
assert (!is_annihilator());
return symbols_;
}
};
/**
Sequence that is known at compile time to be of zero length.
\sa math::sequence, math::single_sequence, math::optional_sequence
*/
template <class Symbol, class Direction> class empty_sequence {
public:
empty_sequence() {}
/**
Initialise with a sequence, which must be empty.
\throw magma_not_convertible If the sequence is non-empty.
*/
explicit empty_sequence (sequence <Symbol, Direction> const & s) {
if (s.is_annihilator() || !s.empty())
throw magma_not_convertible();
}
/**
Initialise with single_sequence, which is never possible and always throws.
\throw magma_not_convertible
*/
explicit empty_sequence (single_sequence <Symbol, Direction> const &)
{ throw magma_not_convertible(); }
/**
Initialise with optional_sequence, which must be empty.
\throw magma_not_convertible If the sequence is non-empty.
*/
explicit empty_sequence (optional_sequence <Symbol, Direction> const & s) {
if (!s.empty())
throw magma_not_convertible();
}
/**
Initialise with a range of symbols.
This range must be empty.
*/
template <class Range, class Enable = typename
boost::enable_if <range::is_range <Range>>::type>
explicit empty_sequence (Range && range)
{ rime::assert_ (range::empty (range)); }
rime::false_type is_annihilator() const { return rime::false_; }
rime::true_type empty() const { return rime::true_; }
range::empty_view symbols() const { return range::empty_view(); }
};
/**
Sequence that is known at compile time to be of length one.
\sa math::sequence, math::empty_sequence, math::optional_sequence
*/
template <class Symbol, class Direction> class single_sequence {
private:
Symbol symbol_;
/**
Extract the first symbol from a symbol_sequence.
If the range has another length than 1, throw \c magma_not_convertible.
*/
static Symbol get_only_symbol_from (sequence <Symbol, Direction> const & s)
{
if (s.is_annihilator())
throw magma_not_convertible();
auto && symbols = s.symbols();
if (range::empty (symbols) || !range::empty (range::drop (symbols)))
throw magma_not_convertible();
else
return range::first (symbols);
}
public:
/**
Initialise with an explicit symbol.
\todo Add overloads for Symbol && and Symbol &?
*/
explicit single_sequence (Symbol const & symbol)
: symbol_ (symbol) {}
/**
Initialise with a range of symbols, which must have one element.
*/
template <class Range,
class Enable1 = typename
boost::enable_if <range::is_range <Range>>::type,
class Enable2 = typename
utility::disable_if_same_or_derived <Symbol, Range>::type>
explicit single_sequence (Range && range)
: symbol_ (range::first (range))
{ rime::assert_ (range::empty (range::drop (range))); }
/**
Convert from sequence: explicit.
\pre The sequence must contain exactly one element.
\throw magma_not_convertible If the sequence has another length than 1.
*/
explicit single_sequence (sequence <Symbol, Direction> const & that)
: symbol_ (get_only_symbol_from (that)) {}
rime::false_type empty() const { return rime::false_; }
rime::false_type is_annihilator() const { return rime::false_; }
Symbol const & symbol() const { return symbol_; }
range::tuple <Symbol const &> symbols() const
{ return range::tie (symbol_); }
};
/**
Sequence that is known at compile time to be of length zero or one.
\sa math::sequence
*/
template <class Symbol, class Direction> class optional_sequence {
private:
boost::optional <Symbol> symbol_;
/**
Extract an optional element from a range of length zero or one.
*/
template <class Range>
static boost::optional <Symbol> extract_from_range (Range && range)
{
if (range::empty (range))
return boost::optional <Symbol>();
else {
auto chopped = range::chop (range);
assert (range::empty (chopped.rest()));
return boost::optional <Symbol> (chopped.move_first());
}
}
public:
/**
Initialise empty.
*/
optional_sequence() {}
/**
Initialise with an explicit symbol.
*/
explicit optional_sequence (Symbol const & symbol)
: symbol_ (symbol) {}
/**
Convert empty, from empty_sequence.
*/
optional_sequence (empty_sequence <Symbol, Direction> const & that)
: symbol_ () {}
/**
Convert with one symbol, from single_sequence.
*/
optional_sequence (
single_sequence <Symbol, Direction> const & that)
: symbol_ (that.symbol()) {}
/**
Convert from sequence: explicit.
\pre The sequence must contain either zero or one element.
\throw magma_not_convertible
If the sequence is an annihilator or has a greater length.
*/
explicit optional_sequence (sequence <Symbol, Direction> const & that) {
if (that.is_annihilator())
throw magma_not_convertible();
auto && symbols = that.symbols();
if (!range::empty (symbols)) {
if (!range::empty (range::drop (symbols)))
throw magma_not_convertible();
else
symbol_ = range::first (symbols);
}
assert (that.empty() == this->empty());
}
/**
Initialise with a range of symbols, which must have one element.
*/
template <class Range,
class Enable1 = typename
boost::enable_if <range::is_range <Range>>::type,
class Enable2 = typename
utility::disable_if_same_or_derived <Symbol, Range>::type>
explicit optional_sequence (Range && range)
: symbol_ (extract_from_range (std::forward <Range> (range))) {}
rime::false_type is_annihilator() const { return rime::false_; }
bool empty() const { return !symbol_; }
boost::optional <Symbol> const & symbol() const { return symbol_; }
typename range::result_of <range::callable::view_optional (
boost::optional <Symbol> const &)>::type
symbols() const
{ return range::view_optional (symbol_); }
};
/**
Sequence that is known at compile time to be the multiplicative annihilator:
Multiplying this with any sequence yields a sequence_annihilator.
\sa math::sequence
*/
template <class Symbol, class Direction> class sequence_annihilator {
public:
sequence_annihilator() {}
/**
Construct from a sequence, which must be an annihilator.
\throw magma_not_convertible If the argument is not an annihilator.
*/
explicit sequence_annihilator (sequence <Symbol, Direction> const & s) {
if (!s.is_annihilator())
throw magma_not_convertible();
}
rime::true_type is_annihilator() const { return rime::true_; }
};
namespace detail {
template <class Type> struct is_sequence_tag : boost::mpl::false_ {};
template <class Symbol, class Direction>
struct is_sequence_tag <sequence_tag <Symbol, Direction>>
: boost::mpl::true_ {};
} // namespace detail
MATH_MAGMA_GENERATE_OPERATORS (detail::is_sequence_tag)
namespace operation {
template <bool value> struct return_bool {
template <class ... Arguments>
rime::bool_ <value> operator() (Arguments const & ...) const
{ return rime::bool_ <value>(); }
};
namespace sequence_detail {
/**
Return the range direction associated with the direction.
<c>range_direction<left>::type</c> is <c>::direction::front</c>.
<c>range_direction<right>::type</c> is <c>::direction::back</c>.
*/
template <class Direction> struct range_direction;
template <> struct range_direction <left>
{ typedef ::direction::front type; };
template <> struct range_direction <right>
{ typedef ::direction::back type; };
} // namespace sequence_detail
/* Queries. */
template <class Symbol, class Direction>
struct equal <sequence_tag <Symbol, Direction>>
{
private:
struct when_first_annihilator {
template <class Sequence1, class Sequence2> auto operator() (
Sequence1 const & sequence1, Sequence2 const & sequence2) const
RETURNS (sequence2.is_annihilator());
};
struct when_neither_annihilator {
template <class Sequence1, class Sequence2> auto operator() (
Sequence1 const & sequence1, Sequence2 const & sequence2) const
RETURNS (range::equal (sequence1.symbols(), sequence2.symbols()));
};
struct when_first_not_annihilator {
template <class Sequence1, class Sequence2> auto operator() (
Sequence1 const & sequence1, Sequence2 const & sequence2) const
RETURNS (rime::call_if (sequence2.is_annihilator(),
return_bool <false>(), when_neither_annihilator(),
sequence1, sequence2));
};
public:
template <class Sequence1, class Sequence2> auto operator () (
Sequence1 const & sequence1, Sequence2 const & sequence2) const
RETURNS (rime::call_if (sequence1.is_annihilator(),
when_first_annihilator(), when_first_not_annihilator(),
sequence1, sequence2));
};
template <class Symbol, class Direction>
struct compare <sequence_tag <Symbol, Direction>>
{
private:
// It is recommended that the "best" element comes first in the strict
// weak order, so the annihilator is last in the sorting order.
struct when_first_annihilator {
template <class Sequence1, class Sequence2> auto operator() (
Sequence1 const & sequence1, Sequence2 const & sequence2) const
RETURNS (rime::false_);
};
struct when_neither_annihilator {
template <class Sequence1, class Sequence2> auto operator() (
Sequence1 const & sequence1, Sequence2 const & sequence2) const
RETURNS (range::less_lexicographical (
sequence1.symbols(), sequence2.symbols(),
typename sequence_detail::range_direction <Direction>::type()));
};
struct when_first_not_annihilator {
template <class Sequence1, class Sequence2> auto operator() (
Sequence1 const & sequence1, Sequence2 const & sequence2) const
RETURNS (rime::call_if (sequence2.is_annihilator(),
return_bool <true>(), when_neither_annihilator(),
sequence1, sequence2));
};
public:
template <class Sequence1, class Sequence2> auto operator () (
Sequence1 const & sequence1, Sequence2 const & sequence2) const
RETURNS (rime::call_if (sequence1.is_annihilator(),
when_first_annihilator(), when_first_not_annihilator(),
sequence1, sequence2));
};
/* Produce. */
template <class Symbol, class Direction, class Sequence>
struct identity <sequence_tag <Symbol, Direction>, callable::times,
Sequence>
{
empty_sequence <Symbol, Direction> operator() () const
{ return empty_sequence <Symbol, Direction>(); }
};
template <class Symbol, class Direction, class Sequence>
struct identity <sequence_tag <Symbol, Direction>, callable::plus,
Sequence>
{
sequence_annihilator <Symbol, Direction> operator() () const
{ return sequence_annihilator <Symbol, Direction>(); }
};
// identity for choose: forward to identity for plus.
template <class Symbol, class Direction, class Sequence>
struct identity <sequence_tag <Symbol, Direction>, callable::choose,
Sequence>
: identity <sequence_tag <Symbol, Direction>, callable::plus, Sequence> {};
template <class Symbol, class Direction, class Sequence>
struct annihilator <sequence_tag <Symbol, Direction>, callable::times,
Sequence>
{
sequence_annihilator <Symbol, Direction> operator() () const
{ return sequence_annihilator <Symbol, Direction>(); }
};
/* Operations. */
/**
To make this a semiring with "choose" and "times", the order for "choose"
prefers shorter sequences and uses a lexicographical comparison as a
tie-breaker.
*/
template <class Symbol, class Direction>
struct order <sequence_tag <Symbol, Direction>, callable::choose>
{
typedef typename sequence_detail::range_direction <Direction>::type
range_direction;
typedef empty_sequence <Symbol, Direction> empty;
typedef single_sequence <Symbol, Direction> single;
typedef optional_sequence <Symbol, Direction> optional;
typedef sequence <Symbol, Direction> sequence_type;
typedef sequence_annihilator <Symbol, Direction> annihilator;
// Left is empty_sequence.
rime::false_type operator() (
empty const & left, empty const & right) const
{ return rime::false_; }
rime::true_type operator() (
empty const & left, single const & right) const
{ return rime::true_; }
bool operator() (empty const & left, optional const & right) const
{ return !right.empty(); }
rime::true_type operator() (
empty const & left, annihilator const & right) const
{ return rime::true_; }
bool operator() (empty const & left, sequence_type const & right) const
{ return right.is_annihilator() || !right.empty(); }
// Left is single_sequence.
rime::false_type operator() (
single const & left, empty const & right) const
{ return rime::false_; }
bool operator() (single const & left, single const & right) const
{ return left.symbol() < right.symbol(); }
bool operator() (single const & left, optional const & right) const {
if (right.empty())
return false;
else
return left.symbol() < right.symbol();
}
rime::true_type operator() (
single const & left, annihilator const & right) const
{ return rime::true_; }
bool operator() (single const & left, sequence_type const & right) const
{
if (right.is_annihilator())
return true;
if (right.empty())
return false;
if (range::size (right.symbols()) == 1)
return left.symbol()
< range::first (right.symbols(), range_direction());
return true;
}
// Left is optional_sequence.
rime::false_type operator() (
optional const & left, empty const & right) const
{ return rime::false_; }
bool operator() (optional const & left, single const & right) const {
if (left.empty())
return true;
else
return left.symbol() < right.symbol();
}
bool operator() (optional const & left, optional const & right) const {
if (left.empty())
return !right.empty();
if (right.empty())
return false;
return left.symbol() < right.symbol();
}
rime::true_type operator() (
optional const & left, annihilator const & right) const
{ return rime::true_; }
bool operator() (optional const & left, sequence_type const & right)
const
{
if (right.is_annihilator())
return true;
if (left.empty())
return !right.empty();
if (right.empty())
return false;
if (range::size (right.symbols()) == 1)
return left.symbol()
< range::first (right.symbols(), range_direction());
return true;
}
// Left is an annihilator.
template <class Sequence>
rime::false_type operator() (
annihilator const & left, Sequence const & right) const
{ return rime::false_; }
// Left is a sequence.
bool operator() (sequence_type const & left, empty const & right) const
{ return !left.is_annihilator() && left.empty(); }
bool operator() (sequence_type const & left, single const & right) const
{
if (left.is_annihilator())
return false;
if (left.empty())
return true;
if (range::size (left.symbols()) == 1)
return range::first (left.symbols(), range_direction())
< right.symbol();
return false;
}
bool operator() (sequence_type const & left, optional const & right)
const
{
if (left.is_annihilator())
return false;
if (left.empty())
return !right.empty();
if (right.empty())
return false;
if (range::size (left.symbols()) == 1)
return range::first (left.symbols(), range_direction())
< right.symbol();
return false;
}
rime::false_type operator() (
sequence_type const & left, annihilator const & right) const
{ return rime::false_; }
bool operator() (
sequence_type const & left, sequence_type const & right) const
{
if (left.is_annihilator())
return false;
if (right.is_annihilator())
return true;
auto && left_symbols = left.symbols();
auto && right_symbols = right.symbols();
if (range::size (left_symbols) < range::size (right_symbols))
return true;
if (range::size (right_symbols) < range::size (left_symbols))
return false;
return range::less_lexicographical (left_symbols, right_symbols);
}
};
// Even though "order" is defined, this overrides the default implementation
// to get the types right.
template <class Symbol, class Direction>
struct choose <sequence_tag <Symbol, Direction>>
: associative, commutative, path_operation
{
typedef typename sequence_detail::range_direction <Direction>::type
range_direction;
typedef empty_sequence <Symbol, Direction> empty;
typedef single_sequence <Symbol, Direction> single;
typedef optional_sequence <Symbol, Direction> optional;
typedef sequence <Symbol, Direction> sequence_type;
typedef sequence_annihilator <Symbol, Direction> annihilator;
struct implementation {
// One argument is known to be an annihilator.
template <class Sequence> Sequence operator() (
annihilator const &, Sequence const & right,
utility::overload_order <1> *) const
{ return right; }
template <class Sequence> Sequence operator() (
Sequence const & left, annihilator const &,
utility::overload_order <2> *) const
{ return left; }
// One argument is known-empty.
template <class Right> empty operator() (
empty const &, Right const &,
utility::overload_order <3> *) const
{ return empty(); }
template <class Left> empty operator() (
Left const &, empty const &,
utility::overload_order <4> *) const
{ return empty(); }
// One argument is known-single.
single operator() (single const & left, single const & right,
utility::overload_order <5> *) const
{
if (left.symbol() < right.symbol())
return left;
else
return right;
}
// Case where one argument is at most one symbol long.
template <class Short, class Long> optional when_short (
Short const & s, Long const & l) const
{
if (l.is_annihilator())
return s;
if (s.empty() || l.empty())
return optional();
if (!range::empty (range::drop (
l.symbols(), range_direction())))
// l is more than one symbol long.
return s;
if (s.symbol() < range::first (l.symbols(), range_direction()))
return s;
else
return optional (l);
}
// One argument is an optional_sequence.
template <class Sequence> optional operator() (
optional const & left, Sequence const & right,
utility::overload_order <6> *) const
{ return when_short (left, right); }
template <class Sequence> optional operator() (
Sequence const & left, optional const & right,
utility::overload_order <7> *) const
{ return when_short (right, left); }
// One argument is an single_sequence.
template <class Sequence> optional operator() (
single const & left, Sequence const & right,
utility::overload_order <6> *) const
{ return when_short (left, right); }
template <class Sequence> optional operator() (
Sequence const & left, single const & right,
utility::overload_order <7> *) const
{ return when_short (right, left); }
// Both are sequences.
sequence_type operator() (
sequence_type const & left, sequence_type const & right,
utility::overload_order <8> *) const
{
if (left.is_annihilator())
return right;
if (right.is_annihilator())
return left;
auto left_size = range::size (left.symbols());
auto right_size = range::size (right.symbols());
if (left_size < right_size)
return left;
if (right_size < left_size)
return right;
if (range::less_lexicographical (
left.symbols(), right.symbols()))
return left;
else
return right;
}
};
template <class Left, class Right> auto operator() (
Left const & left, Right const & right) const
RETURNS (implementation() (left, right, utility::pick_overload()));
};
// Since "choose" selects the shortest sequence first, and compares
// lexicographically as a tie-breaker, sequences in one direction are
// semirings in both directions.
template <class Symbol, class Direction> struct is_semiring <
sequence_tag <Symbol, Direction>, either,
callable::times, callable::choose>
: rime::true_type {};
// Concatenate
template <class Symbol, class Direction>
struct times <sequence_tag <Symbol, Direction>>
: associative
{
private:
typedef sequence_annihilator <Symbol, Direction> annihilator_type;
typedef empty_sequence <Symbol, Direction> empty_type;
typedef single_sequence <Symbol, Direction> single_type;
typedef optional_sequence <Symbol, Direction> optional_type;
typedef sequence <Symbol, Direction> sequence_type;
struct implementation {
// Annihilator: annihilates.
auto operator() (
annihilator_type const & left, annihilator_type const &,
utility::overload_order <1> *) const
RETURNS (left);
template <class Sequence> auto operator() (
annihilator_type const & annihilator, Sequence const &,
utility::overload_order <1> *) const
RETURNS (annihilator);
template <class Sequence> auto operator() (
Sequence const &, annihilator_type const & annihilator,
utility::overload_order <1> *) const
RETURNS (annihilator);
// Empty symbol: return the other one.
auto operator() (
empty_type const & left, empty_type const &,
utility::overload_order <2> *) const
RETURNS (left);
template <class Sequence> auto operator() (
empty_type const &, Sequence const & right,
utility::overload_order <2> *) const
RETURNS (right);
template <class Sequence> auto operator() (
Sequence const & left, empty_type const &,
utility::overload_order <2> *) const
RETURNS (left);
// Non-empty: both operands could be three types.
void append_symbols (
std::vector <Symbol> & symbols, single_type const & s) const
{ symbols.push_back (s.symbol()); }
void append_symbols (
std::vector <Symbol> & symbols, optional_type const & s) const
{
if (!s.empty())
symbols.push_back (s.symbol().get());
}
void append_symbols (
std::vector <Symbol> & symbols, sequence_type const & s) const
{
auto && s_symbols = s.symbols();
symbols.insert (symbols.end(),
s_symbols.begin(), s_symbols.end());
}
template <class Sequence1, class Sequence2>
sequence <Symbol, Direction> operator() (
Sequence1 const & sequence1, Sequence2 const & sequence2,
utility::overload_order <3> *) const
{
if (sequence1.is_annihilator())
return sequence1;
if (sequence2.is_annihilator())
return sequence2;
std::vector <Symbol> concatenation;
append_symbols (concatenation, sequence1);
append_symbols (concatenation, sequence2);
return sequence <Symbol, Direction> (std::move (concatenation));
}
};
public:
template <class Left, class Right> auto operator() (
Left const & left, Right const & right) const
RETURNS (implementation() (left, right, utility::pick_overload()));
// This returns the wrong type but is useful for debugging:
/*
template <class Left, class Right> sequence <Symbol, Direction>
operator() (Left const & left, Right const & right) const
{ return implementation() (left, right, utility::pick_overload()); }
*/
};
/**
Addition for left sequences: find the longest common prefix.
*/
template <class Symbol, class Direction>
struct plus <sequence_tag <Symbol, Direction>>
: associative, commutative, idempotent
{
private:
typedef sequence_annihilator <Symbol, Direction> annihilator_type;
typedef empty_sequence <Symbol, Direction> empty_type;
typedef single_sequence <Symbol, Direction> single_type;
typedef optional_sequence <Symbol, Direction> optional_type;
typedef sequence <Symbol, Direction> sequence_type;
struct implementation {
// Annihalator is the additive identity.
template <class Sequence> auto operator() (
annihilator_type const &, Sequence const & right,
utility::overload_order <1> *) const
RETURNS (right);
template <class Sequence> auto operator() (
Sequence const & left, annihilator_type const &,
utility::overload_order <2> *) const
RETURNS (left);
// Empty sequence: return the empty sequence.
template <class Sequence> auto operator() (
empty_type const & empty,
Sequence const &,
utility::overload_order <3> *) const
RETURNS (empty);
template <class Sequence> auto operator() (
Sequence const &, empty_type const & empty,
utility::overload_order <4> *) const
RETURNS (empty);
// Two sequences: different implementation for left and right.
/*
\todo This should really be one implementation for left and right
sequences.
This is easy (take the implementation for the right sequence,and
s/range::back/direction/), except for dealing with "push_back".
This requires range::subrange.
*/
sequence <Symbol, left> operator() (
sequence <Symbol, left> const & sequence1,
sequence <Symbol, left> const & sequence2,
utility::overload_order <5> * pick) const
{
// Annihalator is the additive identity.
if (sequence1.is_annihilator())
return sequence2;
if (sequence2.is_annihilator())
return sequence1;
// This should be replaced by something with range::subrange.
std::vector <Symbol> longest_common_prefix;
auto symbols1 = range::view (sequence1.symbols());
auto symbols2 = range::view (sequence2.symbols());
while (!range::empty (symbols1) && !range::empty (symbols2)
&& range::first (symbols1) == range::first (symbols2))
{
longest_common_prefix.push_back (range::first (symbols1));
symbols1 = range::drop (symbols1);
symbols2 = range::drop (symbols2);
}
return sequence <Symbol, left> (longest_common_prefix);
}
sequence <Symbol, right> operator() (
sequence <Symbol, right> const & sequence1,
sequence <Symbol, right> const & sequence2,
utility::overload_order <5> * pick) const
{
// Annihalator is the additive identity.
if (sequence1.is_annihilator())
return sequence2;
if (sequence2.is_annihilator())
return sequence1;
std::list <Symbol> longest_common_prefix;
auto symbols1 = range::view (sequence1.symbols(), range::back);
auto symbols2 = range::view (sequence2.symbols(), range::back);
while (!range::empty (symbols1, range::back)
&& !range::empty (symbols2, range::back)
&& range::first (symbols1, range::back)
== range::first (symbols2, range::back))
{
longest_common_prefix.push_front (
range::first (symbols1, range::back));
symbols1 = range::drop (symbols1, range::back);
symbols2 = range::drop (symbols2, range::back);
}
return sequence <Symbol, right> (longest_common_prefix);
}
// At least one of the arguments has zero or one, or one elements.
// The common prefix therefore is zero or one elements long.
static Symbol const & first_symbol (single_type const & s)
{ return s.symbol(); }
static Symbol const & first_symbol (optional_type const & s)
{ return s.symbol().get(); }
static Symbol const & first_symbol (
sequence <Symbol, left> const & s)
{ return range::first (s.symbols(), range::front); }
static Symbol const & first_symbol (
sequence <Symbol, right> const & s)
{ return range::first (s.symbols(), range::back); }
template <class Sequence1, class Sequence2>
optional_type operator() (
Sequence1 const & sequence1, Sequence2 const & sequence2,
utility::overload_order <5> * pick) const
{
typedef optional_type result_type;
// Annihilator is the additive identity.
if (sequence1.is_annihilator())
return result_type (sequence2);
if (sequence2.is_annihilator())
return result_type (sequence1);
if (sequence1.empty() || sequence2.empty())
return result_type();
Symbol const & symbol = first_symbol (sequence1);
if (symbol == first_symbol (sequence2))
return result_type (symbol);
else
return result_type();
}
};
public:
template <class Left, class Right> auto operator() (
Left const & left, Right const & right) const
RETURNS (implementation() (left, right, utility::pick_overload()));
// This returns the wrong type but is useful for debugging:
/*
template <class Left, class Right> sequence <Symbol, Direction>
operator() (Left const & left, Right const & right) const
{ return implementation() (left, right, utility::pick_overload()); }
*/
};
// The direction matches the direction of the sequence:
// left and right sequences are left and right semirings over times and
// plus.
template <class Symbol, class Direction> struct is_semiring <
sequence_tag <Symbol, Direction>, Direction,
callable::times, callable::plus>
: rime::true_type {};
/**
The prefix of the dividend must be equal to the divisor.
\return The dividend without the prefix.
*/
template <class Symbol, class Direction>
struct divide <sequence_tag <Symbol, Direction>, Direction>
: throw_if_undefined
{
typedef sequence <Symbol, Direction> sequence_type;
typedef empty_sequence <Symbol, Direction> empty_sequence_type;
typedef single_sequence <Symbol, Direction> single_sequence_type;
typedef optional_sequence <Symbol, Direction> optional_sequence_type;
typedef sequence_annihilator <Symbol, Direction>
sequence_annihilator_type;
struct implementation {
/**
Implementation for the run-time type.
This works for all sequences, but it returns the general type, which
is not efficient.
*/
sequence_type operator() (sequence_type const & dividend,
sequence_type const & divisor, utility::overload_order <10> *)
const
{
if (dividend.is_annihilator()) {
if (divisor.is_annihilator())
throw operation_undefined();
else
return dividend;
} else if (divisor.is_annihilator())
throw divide_by_zero();
// Start at the front of the sequence for a left division, and
// at the back for a right division.
auto start = typename
sequence_detail::range_direction <Direction>::type();
auto divisor_symbols = range::view (divisor.symbols(), start);
auto dividend_symbols = range::view (dividend.symbols(), start);
while (!range::empty (divisor_symbols, start)) {
if (range::empty (dividend_symbols, start))
throw operation_undefined();
if (range::first (dividend_symbols, start)
!= range::first (divisor_symbols, start))
throw operation_undefined();
divisor_symbols = range::drop (divisor_symbols, start);
dividend_symbols = range::drop (dividend_symbols, start);
}
return sequence_type (dividend_symbols);
}
/* Specialisations. */
/* 1. annihilators. */
sequence_type operator() (
sequence_annihilator_type const & dividend,
sequence_annihilator_type const & divisor,
utility::overload_order <1> *) const
{ throw operation_undefined(); }
// Divide by annihilator.
template <class Dividend> sequence_type operator() (
Dividend const & dividend,
sequence_annihilator_type const & divisor,
utility::overload_order <1> *) const
{ throw divide_by_zero(); }
// Divide annihilator by something else.
template <class Divisor> sequence_annihilator_type operator() (
sequence_annihilator_type const & dividend,
Divisor const & divisor, utility::overload_order <1> *) const
{
if (divisor.is_annihilator())
throw operation_undefined();
else
return dividend;
}
/* 2. Empty divisor. */
template <class Dividend> auto operator() (
Dividend const & dividend, empty_sequence_type const & divisor,
utility::overload_order <2> *) const
RETURNS (dividend);
/* 3. Empty dividend. */
template <class Divisor> empty_sequence_type operator() (
empty_sequence_type const & dividend, Divisor const & divisor,
utility::overload_order <3> *) const
{
if (divisor.is_annihilator())
throw divide_by_zero();
else if (!divisor.empty())
throw operation_undefined();
else
return dividend;
}
/* 4. Combinations of single, optional, and general sequence. */
Symbol const & first_symbol (sequence <Symbol, left> const & s)
const
{ return range::first (s.symbols()); }
Symbol const & first_symbol (sequence <Symbol, right> const & s)
const
{ return range::first (s.symbols(), range::back); }
empty_sequence_type operator() (
single_sequence_type const & dividend,
single_sequence_type const & divisor,
utility::overload_order <4> *) const
{
if (dividend.symbol() == divisor.symbol())
return empty_sequence_type();
else
throw operation_undefined();
}
optional_sequence_type operator() (
single_sequence_type const & dividend,
optional_sequence_type const & divisor,
utility::overload_order <4> *) const
{
// Correct case.
if (divisor.empty())
return dividend;
if (dividend.symbol() == divisor.symbol().get())
return empty_sequence_type();
throw operation_undefined();
}
optional_sequence_type operator() (
single_sequence_type const & dividend,
sequence_type const & divisor,
utility::overload_order <4> *) const
{
if (divisor.is_annihilator())
throw divide_by_zero();
// Correct case.
if (divisor.empty())
return dividend;
if (range::size (divisor.symbols()) == 1
&& dividend.symbol() == first_symbol (divisor))
return empty_sequence_type();
// Otherwise, the division is not defined.
throw operation_undefined();
}
// Optional-symbol dividend.
empty_sequence_type operator() (
optional_sequence_type const & dividend,
single_sequence_type const & divisor,
utility::overload_order <4> *) const
{
if (!dividend.empty()
&& dividend.symbol().get() == divisor.symbol())
return empty_sequence_type();
throw operation_undefined();
}
optional_sequence_type operator() (
optional_sequence_type const & dividend,
optional_sequence_type const & divisor,
utility::overload_order <4> *) const
{
if (divisor.empty())
return dividend;
if (!dividend.empty() && !divisor.empty()
&& dividend.symbol() == divisor.symbol())
return empty_sequence_type();
throw operation_undefined();
}
optional_sequence_type operator() (
optional_sequence_type const & dividend,
sequence_type const & divisor,
utility::overload_order <4> *) const
{
if (divisor.is_annihilator())
throw divide_by_zero();
// Correct case.
if (divisor.empty())
return dividend;
if (!dividend.empty()
&& range::size (divisor.symbols()) == 1
&& dividend.symbol() == first_symbol (divisor))
return empty_sequence_type();
// Otherwise, the division is not defined.
throw operation_undefined();
}
// Negotiate between std::vector and the Range library.
std::vector <Symbol> drop_copy (
std::vector <Symbol> const & symbols) const
{
auto result_symbols_source = range::drop (symbols, typename
sequence_detail::range_direction <Direction>::type());
return std::vector <Symbol> (
result_symbols_source.begin(), result_symbols_source.end());
}
// Divisor is a single_sequence or an optional_sequence.
sequence_type operator() (
sequence_type const & dividend,
single_sequence_type const & divisor,
utility::overload_order <4> *) const
{
if (dividend.is_annihilator())
return dividend;
if (dividend.empty())
throw operation_undefined();
if (first_symbol (dividend) != divisor.symbol())
throw operation_undefined();
return sequence_type (drop_copy (dividend.symbols()));
}
sequence_type operator() (
sequence_type const & dividend,
optional_sequence_type const & divisor,
utility::overload_order <4> *) const
{
if (divisor.empty() || dividend.is_annihilator())
return dividend;
if (dividend.empty())
throw operation_undefined();
if (first_symbol (dividend) != divisor.symbol().get())
throw operation_undefined();
return sequence_type (drop_copy (dividend.symbols()));
}
};
template <class Sequence1, class Sequence2> auto operator() (
Sequence1 const & sequence1, Sequence2 const & sequence2) const
RETURNS (
implementation() (sequence1, sequence2, utility::pick_overload()));
};
template <class Symbol, class Direction>
struct reverse <sequence_tag <Symbol, Direction>, callable::times>
{
typedef typename opposite_direction <Direction>::type other_direction;
sequence <Symbol, other_direction> operator() (
sequence <Symbol, Direction> const & s) const
{
if (s.is_annihilator())
return sequence_annihilator <Symbol, other_direction>();
else {
std::vector <Symbol> symbols = s.symbols();
std::reverse (symbols.begin(), symbols.end());
return sequence <Symbol, other_direction> (std::move (symbols));
}
}
empty_sequence <Symbol, other_direction> operator() (
empty_sequence <Symbol, Direction> const & s) const
{ return empty_sequence <Symbol, other_direction>(); }
single_sequence <Symbol, other_direction> operator() (
single_sequence <Symbol, Direction> const & s) const
{ return single_sequence <Symbol, other_direction> (s.symbol()); }
sequence_annihilator <Symbol, other_direction> operator() (
sequence_annihilator <Symbol, Direction> const & s) const
{ return sequence_annihilator <Symbol, other_direction>(); }
};
template <class Symbol, class Direction>
struct print <sequence_tag <Symbol, Direction>>
{
template <class Stream>
void operator() (Stream & stream,
sequence <Symbol, Direction> const & s) const
{
if (s.is_annihilator())
stream << "<annihilator>";
else {
stream << "[ ";
RANGE_FOR_EACH (symbol, s.symbols())
stream << symbol << " ";
stream << "]";
}
}
template <class Stream>
void operator() (Stream & stream,
empty_sequence <Symbol, Direction> const &) const
{ stream << "[]"; }
template <class Stream>
void operator() (Stream & stream,
single_sequence <Symbol, Direction> const & s) const
{ stream << "[ " << s.symbol() << "]"; }
template <class Stream>
void operator() (Stream & stream,
sequence_annihilator <Symbol, Direction> const &) const
{ stream << "<annihilator>"; }
};
/**
If the sequence types are the same, return that type.
If the sequence types are not the same, return sequence <...> or
optional_sequence <...>.
*/
// Same type.
template <class Symbol, class Direction, class Sequence>
struct unify_type <sequence_tag <Symbol, Direction>, Sequence, Sequence>
{ typedef Sequence type; };
template <class Symbol, class Direction, class Sequence1, class Sequence2>
struct unify_type <sequence_tag <Symbol, Direction>,
Sequence1, Sequence2>
{ typedef sequence <Symbol, Direction> type; };
// Mixing empty/single sequences.
template <class Symbol, class Direction>
struct unify_type <sequence_tag <Symbol, Direction>,
empty_sequence <Symbol, Direction>,
single_sequence <Symbol, Direction>>
{ typedef optional_sequence <Symbol, Direction> type; };
template <class Symbol, class Direction>
struct unify_type <sequence_tag <Symbol, Direction>,
empty_sequence <Symbol, Direction>,
optional_sequence <Symbol, Direction>>
{ typedef optional_sequence <Symbol, Direction> type; };
template <class Symbol, class Direction>
struct unify_type <sequence_tag <Symbol, Direction>,
single_sequence <Symbol, Direction>,
empty_sequence <Symbol, Direction>>
{ typedef optional_sequence <Symbol, Direction> type; };
template <class Symbol, class Direction>
struct unify_type <sequence_tag <Symbol, Direction>,
single_sequence <Symbol, Direction>,
optional_sequence <Symbol, Direction>>
{ typedef optional_sequence <Symbol, Direction> type; };
template <class Symbol, class Direction>
struct unify_type <sequence_tag <Symbol, Direction>,
optional_sequence <Symbol, Direction>,
empty_sequence <Symbol, Direction>>
{ typedef optional_sequence <Symbol, Direction> type; };
template <class Symbol, class Direction>
struct unify_type <sequence_tag <Symbol, Direction>,
optional_sequence <Symbol, Direction>,
single_sequence <Symbol, Direction>>
{ typedef optional_sequence <Symbol, Direction> type; };
} // namespace operation
/*
Hash support for Boost.Hash.
This makes all values (even of different types) that compare equal have equal
hashes.
This means that an annihilator will always have the same (random) hash.
Empty sequences have a hash value defined by range::hash_range.
Single-symbol sequences have the hash value of the one symbol.
Longer sequences use all symbols for the hash value.
*/
namespace sequence_detail {
// A random number that will hopefully not come up much.
static std::size_t constexpr annihilator_hash =
std::size_t (0x84c8fa43d5283350 & std::size_t (-1));
} // sequence_detail
template <class Symbol, class Direction> inline
std::size_t hash_value (
sequence_annihilator <Symbol, Direction> const & s)
{ return sequence_detail::annihilator_hash; }
template <class Symbol, class Direction> inline
std::size_t hash_value (sequence <Symbol, Direction> const & s)
{
if (s.is_annihilator())
return sequence_detail::annihilator_hash;
else
return range::hash_range (s.symbols());
}
template <class Symbol, class Direction> inline
std::size_t hash_value (empty_sequence <Symbol, Direction> const & s)
// Return whatever hash_range returns for empty sequences.
{ return range::hash_range (s.symbols()); }
template <class Symbol, class Direction> inline
std::size_t hash_value (single_sequence <Symbol, Direction> const & s)
{ return boost::hash <Symbol>() (s.symbol()); }
template <class Symbol, class Direction> inline
std::size_t hash_value (optional_sequence <Symbol, Direction> const & s)
{ return range::hash_range (s.symbols()); }
} // namespace math
#endif // MATH_SEQUENCE_HPP_INCLUDED
|
# python3 simclr_feature_generation.py
# --output_dir /export/medical_ai/ucsf/simclr_rtog/model_resnet50_gp4plus_pretrained_lr=0.05_b=256/features/RTOG-9413_features/
# --input_dir /export/medical_ai/ucsf/RTOG-9413/tissue_pickles_v2/
# --checkpoint_path /export/medical_ai/ucsf/simclr_rtog/model_resnet50_gp4plus_pretrained_lr=0.05_b=256/checkpoint-epoch18.pt
# --base_model resnet50
import tqdm
import torch
import os
import pickle
import numpy as np
import argparse
import torch.backends.cudnn as cudnn
from torchvision import transforms
from torchvision import models
from torch.autograd import Function
from multiprocessing import Process, Queue
from PIL import Image
import multiprocessing
from torch import nn
import time
cudnn.benchmark = True
use_cuda = torch.cuda.is_available()
torch.manual_seed(1)
parser = argparse.ArgumentParser(description='Classifies tissue patches and saves the result per slide in a pickle.')
parser.add_argument('--checkpoint_path', type=str)
parser.add_argument('--output_dir', type=str)
parser.add_argument('--input_dir', type=str)
parser.add_argument('--num_loaders', type=int, default=2)
parser.add_argument('--out_dim', type=int, default=128,
help='Feature dimensionality')
parser.add_argument('--base_model', type=str, default='resnet34')
args = parser.parse_args()
use_cuda = True
num_classes = 6
class ResNetSimCLR(nn.Module):
def __init__(self, base_model, out_dim):
super().__init__()
print(base_model)
if base_model == 'resnet18':
self.backbone = models.resnet18(pretrained=False, num_classes=out_dim)
elif base_model == 'resnet34':
self.backbone = models.resnet34(pretrained=False, num_classes=out_dim)
elif base_model == 'resnet50':
self.backbone = models.resnet50(pretrained=False, num_classes=out_dim)
else:
raise ValueError('Invalid base_model!')
dim_mlp = self.backbone.fc.in_features
# add mlp projection head
self.backbone.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.backbone.fc)
def forward(self, x):
return self.backbone(x)
def chunk_it(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def equal_chunks(l, n):
""" Yield n successive chunks from l."""
newn = int(1.0 * len(l) / n + 0.5)
for i in range(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
def prep_data(files, queue, finish_queue, test_transforms):
"""Loads tissue pickle and applies transforms
Args:
files (list): [description]
queue (Queue): data queue to submit preprocessed samples to
finish_queue (Queue): queue to indicate completion
test_transforms (Transforms): list of transforms to apply.
"""
for file in files:
if os.path.exists(output_path + "/" + file):
continue
try:
with open(base_path + file, "rb") as f:
tiles = pickle.load(f)
except:
print("failed: ", file)
continue
processed_imgs = []
for img in tiles:
processed_imgs += [test_transforms(img)]
tiles = torch.stack(processed_imgs).float()
queue.put((tiles, file))
queue.put((None,None))
finish_queue.get()
return
def process_data(queue, num_preprocessing_threads):
"""Loads data from a queue and processing it with a model and saves the feature vectors.
Args:
queue (Queue): Queue that is being filled with data.
num_preprocessing_threads (Int): Number of threads preprocessing.
"""
pbar = tqdm.tqdm(total=len(all_files))
model = ResNetSimCLR(args.base_model, args.out_dim)
# if use_cuda:
# model = torch.nn.DataParallel(model).cuda()
model = torch.nn.DataParallel(model).cuda()
checkpoint = torch.load(args.checkpoint_path)
state_dict = checkpoint.get('state_dict', checkpoint)
model.load_state_dict(state_dict)
model.eval()
processed = []
num_nones = 0
while True:
tiles, filename = queue.get()
if tiles == None:
num_nones += 1
if num_nones == num_preprocessing_threads:
break
continue
batches = list(chunk_it(tiles, 250))
features = []
softmax = []
for batch in batches:
with torch.no_grad():
feats = model(batch.float().cuda())
features += [feats.cpu().detach()]
features = np.array(torch.cat(features))
with open(output_path + "/" + filename, "wb") as f:
pickle.dump(features, f)
pbar.update(1)
pbar.close()
test_transforms = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()])
test_transforms = transforms.Compose([transforms.ToPILImage(), transforms.Resize((224,224), Image.NEAREST), transforms.ToTensor()])
print("will resize to 224 by 224")
base_path = args.input_dir
output_path = args.output_dir
all_files = os.listdir(base_path)
worker_files = equal_chunks(all_files, args.num_loaders)
worker_files = list(worker_files)
data_queue = Queue(50)
finish_queue = Queue()
# start preprocessing threads
loader_threads = []
for batch_files in list(worker_files):
loader_thread = Process(target=prep_data, args=[batch_files, data_queue, finish_queue, test_transforms], daemon=True)
loader_thread.start()
loader_threads += [loader_thread]
process_data(data_queue, len(worker_files))
# let preprocessing threads know they can stop. Otherwise the last few objects they create
# are deleted when they exit.
for i in range(len(worker_files)):
finish_queue.put(None)
data_queue.close()
|
Formal statement is: lemma cmod_power2: "(cmod z)\<^sup>2 = (Re z)\<^sup>2 + (Im z)\<^sup>2" Informal statement is: The square of the absolute value of a complex number is the sum of the squares of its real and imaginary parts.
|
Engineers Without Borders is an international nonprofit organizations nonprofit organization that works to bring simple Sustainability sustainable engineering solutions to developing regions of the world. The primary goal of the organization is to undertake projects that will be continued by local agencies and increase sanitation, water quality, agricultural ability, and infrastructure for communities.
The UC Davis chapter was established by a group of undergrad and graduate students in 2005. They have traveled to Xix, Guatemala, Nkokonjeru, Uganda and Quincucirca, Bolivia to work on projects including educating about safe drinking water and sanitation.
Projects are supported by fundraising efforts in Davis and by the involved students themselves. Students take an active role in all parts of the project from planning, design of solutions, contact with the local agencies, construction and implementation of solutions in the community.
If you are interested in becoming a member check out the http://ewbucd.weebly.com/ webpage or email Mailto([email protected], the officers).
General meetings are held on Monday evenings in the third floor conference room of Ghausi Hall. Check the website for exact times and dates.
They run the annual Wine into Water event.
|
/-
Copyright (c) 2022 Julian Kuelshammer. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author : Julian Kuelshammer
-/
import data.int.basic
import tactic.linear_combination
import tactic.linarith
/-
# Quotients in Lean
Upon request, let's try to see how to construct number systems like the integers or the
rational numbers in Lean. Note that this is again some mathematical way to do this, not the
actual way, e.g. integers are defined as the disjoint union of ℕ with itself, where the first
copy is interpreted as the usual natural numbers while the second copy is interpreted as the
numbers `1-n` where `n : ℕ`. Similarly, ℚ is contructed as pairs of coprime integers (p,q).
This makes them computationally a bit better behaved than our quotient way.
## Equivalence relations in Lean
Lean knows what an equivalence relation is. It is a reflexive, symmetric and transitive relation.
A relation on a set `X` is a function `X → X → Prop`, i.e. a function that takes two elements
of a set `X` and outputs a truth value depending whether they are related or not.
```
def reflexive := ∀ x, x ∼ x
def symmetric := ∀ ⦃x y⦄, x ∼ y → y ∼ x
def transitive := ∀ ⦃x y z⦄, x ∼ y → y ∼ m z → x ∼ z
def equivalence := reflexive r ∧ symmetric r ∧ transitive r
```
-/
def R (r s : ℕ × ℕ) : Prop :=
r.1+s.2=s.1+r.2
lemma R_def (r s : ℕ × ℕ) :
R r s ↔ r.1 + s.2 = s.1 + r.2 :=
begin
sorry
end
lemma R_refl : reflexive R :=
begin
sorry
end
lemma R_symm : symmetric R :=
begin
sorry
end
lemma R_trans : transitive R :=
begin
/- The lemma add_right_inj could be helpful at some point. -/
sorry
end
lemma R_equiv : equivalence R :=
begin
sorry
end
/- A setoid on a Type is a relation together with the fact that
this relation is an equivalence relation. -/
instance s : setoid (ℕ × ℕ) :=
_
structure int_plane_non_zero :=
(fst : ℤ) (snd : ℤ) (non_zero : snd ≠ 0)
def S (r s : int_plane_non_zero) : Prop :=
r.1 * s.2 = s.1 * r.2
lemma S_def (r s : int_plane_non_zero) :
S r s ↔ r.1 * s.2 = s.1 * r.2 :=
begin
sorry
end
lemma S_refl : reflexive S :=
begin
sorry
end
lemma S_symm : symmetric S :=
begin
sorry
end
lemma S_trans : transitive S :=
begin
/- The following lemma would be helpful at some point: mul_right_inj' -/
sorry
end
lemma S_equiv : equivalence S :=
begin
sorry
end
instance t : setoid (int_plane_non_zero) :=
_
|
/*
* Copyright (c) 2013-2020, The PurpleI2P Project
*
* This file is part of Purple i2pd project and licensed under BSD3
*
* See full license text in LICENSE file at top of project tree
*/
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <map>
#include <string>
#include <boost/program_options/cmdline.hpp>
#include <boost/program_options/options_description.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/program_options/variables_map.hpp>
#include "Identity.h"
#include "Config.h"
#include "version.h"
#include "Log.h"
using namespace boost::program_options;
namespace i2p {
namespace config {
options_description m_OptionsDesc;
variables_map m_Options;
void Init()
{
options_description general("General options");
general.add_options()
("help", "Show this message")
("version", "Show i2pd version")
("conf", value<std::string>()->default_value(""), "Path to main i2pd config file (default: try ~/.i2pd/i2pd.conf or /var/lib/i2pd/i2pd.conf)")
("tunconf", value<std::string>()->default_value(""), "Path to config with tunnels list and options (default: try ~/.i2pd/tunnels.conf or /var/lib/i2pd/tunnels.conf)")
("tunnelsdir", value<std::string>()->default_value(""), "Path to extra tunnels' configs folder (default: ~/.i2pd/tunnels.d or /var/lib/i2pd/tunnels.d")
("pidfile", value<std::string>()->default_value(""), "Path to pidfile (default: ~/i2pd/i2pd.pid or /var/lib/i2pd/i2pd.pid)")
("log", value<std::string>()->default_value(""), "Logs destination: stdout, file, syslog (stdout if not set)")
("logfile", value<std::string>()->default_value(""), "Path to logfile (stdout if not set, autodetect if daemon)")
("loglevel", value<std::string>()->default_value("warn"), "Set the minimal level of log messages (debug, info, warn, error, none)")
("logclftime", bool_switch()->default_value(false), "Write full CLF-formatted date and time to log (default: disabled, write only time)")
("family", value<std::string>()->default_value(""), "Specify a family, router belongs to")
("datadir", value<std::string>()->default_value(""), "Path to storage of i2pd data (RI, keys, peer profiles, ...)")
("host", value<std::string>()->default_value("0.0.0.0"), "External IP")
("ifname", value<std::string>()->default_value(""), "Network interface to bind to")
("ifname4", value<std::string>()->default_value(""), "Network interface to bind to for ipv4")
("ifname6", value<std::string>()->default_value(""), "Network interface to bind to for ipv6")
("nat", bool_switch()->default_value(true), "Should we assume we are behind NAT? (default: enabled)")
("port", value<uint16_t>()->default_value(0), "Port to listen for incoming connections (default: auto)")
("ipv4", bool_switch()->default_value(true), "Enable communication through ipv4 (default: enabled)")
("address4", value<std::string>()->default_value(""), "Local address to bind ipv4 transport sockets to")
("ipv6", bool_switch()->default_value(false), "Enable communication through ipv6 (default: disabled)")
("address6", value<std::string>()->default_value(""), "Local address to bind ipv6 transport sockets to")
("reservedrange", bool_switch()->default_value(true), "Check remote RI for being in blacklist of reserved IP ranges (default: enabled)")
("netid", value<int>()->default_value(I2PD_NET_ID), "Specify NetID. Main I2P is 2")
("daemon", bool_switch()->default_value(false), "Router will go to background after start (default: disabled)")
("service", bool_switch()->default_value(false), "Router will use system folders like '/var/lib/i2pd' (default: disabled)")
("notransit", bool_switch()->default_value(false), "Router will not accept transit tunnels at startup (default: disabled)")
("floodfill", bool_switch()->default_value(false), "Router will be floodfill (default: disabled)")
("bandwidth", value<std::string>()->default_value(""), "Bandwidth limit: integer in KBps or letters: L (32), O (256), P (2048), X (>9000)")
("share", value<int>()->default_value(100), "Limit of transit traffic from max bandwidth in percents. (default: 100)")
("ntcp", bool_switch()->default_value(false), "Ignored. Always false")
("ssu", bool_switch()->default_value(true), "Enable SSU transport (default: enabled)")
("ntcpproxy", value<std::string>()->default_value(""), "Ignored")
#ifdef _WIN32
("svcctl", value<std::string>()->default_value(""), "Ignored")
("insomnia", bool_switch()->default_value(false), "Prevent system from sleeping (default: disabled)")
("close", value<std::string>()->default_value("ask"), "Action on close: minimize, exit, ask")
#endif
;
options_description limits("Limits options");
limits.add_options()
("limits.coresize", value<uint32_t>()->default_value(0), "Maximum size of corefile in Kb (0 - use system limit)")
("limits.openfiles", value<uint16_t>()->default_value(0), "Maximum number of open files (0 - use system default)")
("limits.transittunnels", value<uint16_t>()->default_value(2500), "Maximum active transit sessions (default:2500)")
("limits.ntcpsoft", value<uint16_t>()->default_value(0), "Threshold to start probabilistic backoff with ntcp sessions (default: use system limit)")
("limits.ntcphard", value<uint16_t>()->default_value(0), "Maximum number of ntcp sessions (default: use system limit)")
("limits.ntcpthreads", value<uint16_t>()->default_value(1), "Maximum number of threads used by NTCP DH worker (default: 1)")
;
options_description httpserver("HTTP Server options");
httpserver.add_options()
("http.enabled", value<bool>()->default_value(true), "Enable or disable webconsole")
("http.address", value<std::string>()->default_value("127.0.0.1"), "Webconsole listen address")
("http.port", value<uint16_t>()->default_value(7070), "Webconsole listen port")
("http.auth", value<bool>()->default_value(false), "Enable Basic HTTP auth for webconsole")
("http.user", value<std::string>()->default_value("i2pd"), "Username for basic auth")
("http.pass", value<std::string>()->default_value(""), "Password for basic auth (default: random, see logs)")
("http.strictheaders", value<bool>()->default_value(true), "Enable strict host checking on WebUI")
("http.hostname", value<std::string>()->default_value("localhost"), "Expected hostname for WebUI")
("http.webroot", value<std::string>()->default_value("/"), "WebUI root path (default: / )")
("http.lang", value<std::string>()->default_value("english"), "WebUI language (default: english )")
;
options_description httpproxy("HTTP Proxy options");
httpproxy.add_options()
("httpproxy.enabled", value<bool>()->default_value(true), "Enable or disable HTTP Proxy")
("httpproxy.address", value<std::string>()->default_value("127.0.0.1"), "HTTP Proxy listen address")
("httpproxy.port", value<uint16_t>()->default_value(4444), "HTTP Proxy listen port")
("httpproxy.keys", value<std::string>()->default_value("transient-proxy"), "File to persist HTTP Proxy keys. Transient by default")
("httpproxy.signaturetype", value<i2p::data::SigningKeyType>()->
default_value(i2p::data::SIGNING_KEY_TYPE_EDDSA_SHA512_ED25519), "Signature type for new keys. 7 (EdDSA) by default")
("httpproxy.inbound.length", value<std::string>()->default_value("3"), "HTTP proxy inbound tunnel length")
("httpproxy.outbound.length", value<std::string>()->default_value("3"), "HTTP proxy outbound tunnel length")
("httpproxy.inbound.quantity", value<std::string>()->default_value("5"), "HTTP proxy inbound tunnels quantity")
("httpproxy.outbound.quantity", value<std::string>()->default_value("5"), "HTTP proxy outbound tunnels quantity")
("httpproxy.latency.min", value<std::string>()->default_value("0"), "HTTP proxy min latency for tunnels")
("httpproxy.latency.max", value<std::string>()->default_value("0"), "HTTP proxy max latency for tunnels")
("httpproxy.outproxy", value<std::string>()->default_value(""), "HTTP proxy upstream out proxy url")
("httpproxy.addresshelper", value<bool>()->default_value(true), "Enable or disable addresshelper")
("httpproxy.i2cp.leaseSetType", value<std::string>()->default_value("3"), "Local destination's LeaseSet type")
("httpproxy.i2cp.leaseSetEncType", value<std::string>()->default_value("0,4"), "Local destination's LeaseSet encryption type")
("httpproxy.i2cp.leaseSetPrivKey", value<std::string>()->default_value(""), "LeaseSet private key")
;
options_description socksproxy("SOCKS Proxy options");
socksproxy.add_options()
("socksproxy.enabled", value<bool>()->default_value(true), "Enable or disable SOCKS Proxy")
("socksproxy.address", value<std::string>()->default_value("127.0.0.1"), "SOCKS Proxy listen address")
("socksproxy.port", value<uint16_t>()->default_value(4447), "SOCKS Proxy listen port")
("socksproxy.keys", value<std::string>()->default_value("transient-proxy"), "File to persist SOCKS Proxy keys. Transient by default")
("socksproxy.signaturetype", value<i2p::data::SigningKeyType>()->
default_value(i2p::data::SIGNING_KEY_TYPE_EDDSA_SHA512_ED25519), "Signature type for new keys. 7 (EdDSA) by default")
("socksproxy.inbound.length", value<std::string>()->default_value("3"), "SOCKS proxy inbound tunnel length")
("socksproxy.outbound.length", value<std::string>()->default_value("3"), "SOCKS proxy outbound tunnel length")
("socksproxy.inbound.quantity", value<std::string>()->default_value("5"), "SOCKS proxy inbound tunnels quantity")
("socksproxy.outbound.quantity", value<std::string>()->default_value("5"), "SOCKS proxy outbound tunnels quantity")
("socksproxy.latency.min", value<std::string>()->default_value("0"), "SOCKS proxy min latency for tunnels")
("socksproxy.latency.max", value<std::string>()->default_value("0"), "SOCKS proxy max latency for tunnels")
("socksproxy.outproxy.enabled", value<bool>()->default_value(false), "Enable or disable SOCKS outproxy")
("socksproxy.outproxy", value<std::string>()->default_value("127.0.0.1"), "Upstream outproxy address for SOCKS Proxy")
("socksproxy.outproxyport", value<uint16_t>()->default_value(9050), "Upstream outproxy port for SOCKS Proxy")
("socksproxy.i2cp.leaseSetType", value<std::string>()->default_value("3"), "Local destination's LeaseSet type")
("socksproxy.i2cp.leaseSetEncType", value<std::string>()->default_value("0,4"), "Local destination's LeaseSet encryption type")
("socksproxy.i2cp.leaseSetPrivKey", value<std::string>()->default_value(""), "LeaseSet private key")
;
options_description sam("SAM bridge options");
sam.add_options()
("sam.enabled", value<bool>()->default_value(true), "Enable or disable SAM Application bridge")
("sam.address", value<std::string>()->default_value("127.0.0.1"), "SAM listen address")
("sam.port", value<uint16_t>()->default_value(7656), "SAM listen port")
("sam.singlethread", value<bool>()->default_value(true), "Sessions run in the SAM bridge's thread")
;
options_description bob("BOB options");
bob.add_options()
("bob.enabled", value<bool>()->default_value(false), "Enable or disable BOB command channel")
("bob.address", value<std::string>()->default_value("127.0.0.1"), "BOB listen address")
("bob.port", value<uint16_t>()->default_value(2827), "BOB listen port")
;
options_description i2cp("I2CP options");
i2cp.add_options()
("i2cp.enabled", value<bool>()->default_value(false), "Enable or disable I2CP")
("i2cp.address", value<std::string>()->default_value("127.0.0.1"), "I2CP listen address")
("i2cp.port", value<uint16_t>()->default_value(7654), "I2CP listen port")
("i2cp.singlethread", value<bool>()->default_value(true), "Destinations run in the I2CP server's thread")
;
options_description i2pcontrol("I2PControl options");
i2pcontrol.add_options()
("i2pcontrol.enabled", value<bool>()->default_value(false), "Enable or disable I2P Control Protocol")
("i2pcontrol.address", value<std::string>()->default_value("127.0.0.1"), "I2PCP listen address")
("i2pcontrol.port", value<uint16_t>()->default_value(7650), "I2PCP listen port")
("i2pcontrol.password", value<std::string>()->default_value("itoopie"), "I2PCP access password")
("i2pcontrol.cert", value<std::string>()->default_value("i2pcontrol.crt.pem"), "I2PCP connection certificate")
("i2pcontrol.key", value<std::string>()->default_value("i2pcontrol.key.pem"), "I2PCP connection certificate key")
;
bool upnp_default = false;
#if (defined(USE_UPNP) && (defined(WIN32_APP) || defined(ANDROID)))
upnp_default = true; // enable UPNP for windows GUI and android by default
#endif
options_description upnp("UPnP options");
upnp.add_options()
("upnp.enabled", value<bool>()->default_value(upnp_default), "Enable or disable UPnP: automatic port forwarding")
("upnp.name", value<std::string>()->default_value("I2Pd"), "Name i2pd appears in UPnP forwarding list")
;
options_description precomputation("Precomputation options");
precomputation.add_options()
("precomputation.elgamal",
#if defined(__x86_64__)
value<bool>()->default_value(false),
#else
value<bool>()->default_value(true),
#endif
"Enable or disable elgamal precomputation table")
;
options_description reseed("Reseed options");
reseed.add_options()
("reseed.verify", value<bool>()->default_value(false), "Verify .su3 signature")
("reseed.threshold", value<uint16_t>()->default_value(25), "Minimum number of known routers before requesting reseed")
("reseed.floodfill", value<std::string>()->default_value(""), "Path to router info of floodfill to reseed from")
("reseed.file", value<std::string>()->default_value(""), "Path to local .su3 file or HTTPS URL to reseed from")
("reseed.zipfile", value<std::string>()->default_value(""), "Path to local .zip file to reseed from")
("reseed.proxy", value<std::string>()->default_value(""), "url for reseed proxy, supports http/socks")
("reseed.urls", value<std::string>()->default_value(
"https://reseed.i2p-projekt.de/,"
"https://reseed.diva.exchange/,"
"https://reseed-fr.i2pd.xyz/,"
"https://reseed.memcpy.io/,"
"https://reseed.onion.im/,"
"https://i2pseed.creativecowpat.net:8443/,"
"https://reseed.i2pgit.org/,"
"https://i2p.novg.net/"
), "Reseed URLs, separated by comma")
("reseed.yggurls", value<std::string>()->default_value(
"http://[324:9de3:fea4:f6ac::ace]:7070/"
), "Reseed URLs through the Yggdrasil, separated by comma")
;
options_description addressbook("AddressBook options");
addressbook.add_options()
("addressbook.defaulturl", value<std::string>()->default_value(
"http://shx5vqsw7usdaunyzr2qmes2fq37oumybpudrd4jjj4e4vk4uusa.b32.i2p/hosts.txt"
), "AddressBook subscription URL for initial setup")
("addressbook.subscriptions", value<std::string>()->default_value(""), "AddressBook subscriptions URLs, separated by comma")
("addressbook.hostsfile", value<std::string>()->default_value(""), "File to dump addresses in hosts.txt format");
options_description trust("Trust options");
trust.add_options()
("trust.enabled", value<bool>()->default_value(false), "Enable explicit trust options")
("trust.family", value<std::string>()->default_value(""), "Router Family to trust for first hops")
("trust.routers", value<std::string>()->default_value(""), "Only Connect to these routers")
("trust.hidden", value<bool>()->default_value(false), "Should we hide our router from other routers?")
;
// Save deprecated websocket options for compatibility
options_description websocket("Websocket Options");
websocket.add_options()
("websockets.enabled", value<bool>()->default_value(false), "Deprecated option")
("websockets.address", value<std::string>()->default_value(""), "Deprecated option")
("websockets.port", value<uint16_t>()->default_value(0), "Deprecated option")
;
options_description exploratory("Exploratory Options");
exploratory.add_options()
("exploratory.inbound.length", value<int>()->default_value(2), "Exploratory inbound tunnel length")
("exploratory.outbound.length", value<int>()->default_value(2), "Exploratory outbound tunnel length")
("exploratory.inbound.quantity", value<int>()->default_value(3), "Exploratory inbound tunnels quantity")
("exploratory.outbound.quantity", value<int>()->default_value(3), "Exploratory outbound tunnels quantity")
;
options_description ntcp2("NTCP2 Options");
ntcp2.add_options()
("ntcp2.enabled", value<bool>()->default_value(true), "Enable NTCP2 (default: enabled)")
("ntcp2.published", value<bool>()->default_value(true), "Publish NTCP2 (default: enabled)")
("ntcp2.port", value<uint16_t>()->default_value(0), "Port to listen for incoming NTCP2 connections (default: auto)")
("ntcp2.addressv6", value<std::string>()->default_value("::"), "Address to publish NTCP2 with")
("ntcp2.proxy", value<std::string>()->default_value(""), "Proxy URL for NTCP2 transport")
;
options_description nettime("Time sync options");
nettime.add_options()
("nettime.enabled", value<bool>()->default_value(false), "Disable time sync (default: disabled)")
("nettime.ntpservers", value<std::string>()->default_value(
"0.pool.ntp.org,"
"1.pool.ntp.org,"
"2.pool.ntp.org,"
"3.pool.ntp.org"
), "Comma separated list of NTCP servers")
("nettime.ntpsyncinterval", value<int>()->default_value(72), "NTP sync interval in hours (default: 72)")
;
options_description persist("Network information persisting options");
persist.add_options()
("persist.profiles", value<bool>()->default_value(true), "Persist peer profiles (default: true)")
("persist.addressbook", value<bool>()->default_value(true), "Persist full addresses (default: true)")
;
options_description cpuext("CPU encryption extensions options");
cpuext.add_options()
("cpuext.aesni", bool_switch()->default_value(true), "Use auto detection for AESNI CPU extensions. If false, AESNI will be not used")
("cpuext.avx", bool_switch()->default_value(true), "Use auto detection for AVX CPU extensions. If false, AVX will be not used")
("cpuext.force", bool_switch()->default_value(false), "Force usage of CPU extensions. Useful when cpuinfo is not available on virtual machines")
;
options_description meshnets("Meshnet transports options");
meshnets.add_options()
("meshnets.yggdrasil", bool_switch()->default_value(false), "Support transports through the Yggdrasil (deafult: false)")
("meshnets.yggaddress", value<std::string>()->default_value(""), "Yggdrasil address to publish")
;
m_OptionsDesc
.add(general)
.add(limits)
.add(httpserver)
.add(httpproxy)
.add(socksproxy)
.add(sam)
.add(bob)
.add(i2cp)
.add(i2pcontrol)
.add(upnp)
.add(precomputation)
.add(reseed)
.add(addressbook)
.add(trust)
.add(websocket) // deprecated
.add(exploratory)
.add(ntcp2)
.add(nettime)
.add(persist)
.add(cpuext)
.add(meshnets)
;
}
void ParseCmdline(int argc, char* argv[], bool ignoreUnknown)
{
try
{
auto style = boost::program_options::command_line_style::unix_style
| boost::program_options::command_line_style::allow_long_disguise;
style &= ~ boost::program_options::command_line_style::allow_guessing;
if (ignoreUnknown)
store(command_line_parser(argc, argv).options(m_OptionsDesc).style (style).allow_unregistered().run(), m_Options);
else
store(parse_command_line(argc, argv, m_OptionsDesc, style), m_Options);
}
catch (boost::program_options::error& e)
{
ThrowFatal ("Error while parsing arguments: ", e.what());
std::cerr << "args: " << e.what() << std::endl;
exit(EXIT_FAILURE);
}
if (!ignoreUnknown && (m_Options.count("help") || m_Options.count("h")))
{
std::cout << "i2pd version " << I2PD_VERSION << " (" << I2P_VERSION << ")" << std::endl;
std::cout << m_OptionsDesc;
exit(EXIT_SUCCESS);
}
else if (m_Options.count("version"))
{
std::cout << "i2pd version " << I2PD_VERSION << " (" << I2P_VERSION << ")" << std::endl;
std::cout << "Boost version "
<< BOOST_VERSION / 100000 << "." // maj. version
<< BOOST_VERSION / 100 % 1000 << "." // min. version
<< BOOST_VERSION % 100 // patch version
<< std::endl;
#if defined(OPENSSL_VERSION_TEXT)
std::cout << OPENSSL_VERSION_TEXT << std::endl;
#endif
#if defined(LIBRESSL_VERSION_TEXT)
std::cout << LIBRESSL_VERSION_TEXT << std::endl;
#endif
exit(EXIT_SUCCESS);
}
}
void ParseConfig(const std::string& path)
{
if (path == "") return;
std::ifstream config(path, std::ios::in);
if (!config.is_open())
{
ThrowFatal ("Missing or unreadable config file: ", path);
std::cerr << "missing/unreadable config file: " << path << std::endl;
exit(EXIT_FAILURE);
}
try
{
store(boost::program_options::parse_config_file(config, m_OptionsDesc), m_Options);
}
catch (boost::program_options::error& e)
{
ThrowFatal ("Error while parsing config file: ", e.what());
std::cerr << e.what() << std::endl;
exit(EXIT_FAILURE);
};
}
void Finalize()
{
notify(m_Options);
}
bool IsDefault(const char *name)
{
if (!m_Options.count(name))
throw "try to check non-existent option";
if (m_Options[name].defaulted())
return true;
return false;
}
bool GetOptionAsAny(const char *name, boost::any& value)
{
if (!m_Options.count(name))
return false;
value = m_Options[name];
return true;
}
bool GetOptionAsAny(const std::string& name, boost::any& value)
{
return GetOptionAsAny (name.c_str (), value);
}
} // namespace config
} // namespace i2p
|
If $f$ and $g$ are holomorphic on open sets $A$ and $B$, respectively, and $f$ and $g$ agree on $A \cap B$, then the function $h$ defined by $h(z) = f(z)$ if $z \<in> A$ and $h(z) = g(z)$ if $z \<in> B$ is holomorphic on $A \cup B$.
|
State Before: S : Type u_1
R : Type u_2
inst✝² : AddMonoidWithOne R
inst✝¹ : SetLike S R
s : S
inst✝ : AddSubmonoidWithOneClass S R
n : ℕ
⊢ ↑n ∈ s State After: no goals Tactic: induction n <;> simp [zero_mem, add_mem, one_mem, *]
|
# Copyright 2020 The TensorStore Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorstore.downsample."""
import numpy as np
import pytest
import tensorstore as ts
pytestmark = pytest.mark.asyncio
async def test_downsample_store_float32():
t = ts.array(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32))
downsampled = ts.downsample(t, [1, 2], method='mean')
np.testing.assert_equal(
np.array([[1.5, 3], [4.5, 6]], dtype=np.float32), await
downsampled.read())
async def test_downsample_store_uint32():
t = ts.array(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint32))
downsampled = ts.downsample(t, [2, 2], method='mean')
np.testing.assert_equal(
np.array([[3, 4]], dtype=np.uint32), await
downsampled.read())
async def test_downsample_spec():
t = ts.array(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32))
spec = t.spec()
downsampled_spec = ts.downsample(spec, [1, 2], method='mean')
downsampled = await ts.open(downsampled_spec)
np.testing.assert_equal(
np.array([[1.5, 3], [4.5, 6]], dtype=np.float32), await
downsampled.read())
|
PAR’THA EXPANSE – The surprised crew of the medical ship Blackwell were thrown off their feet by a cloaked minefield that damaged the ship’s port nacelle and scattered high concentrations of veteron particles.
The Blackwell was cut off with other fleet ships after a second set of explosions from impacting the minefield. Minor injuries were reported across the ship, and the crew has reported they are working to repair the damage, at full stop.
Leading up to these events, the Blackwell was making slow progress to the planet Arndall at the behest of House Tadere to give medical assistance to wounded Valcarian soldiers coming off the front line. At the lower warp speeds required in the Expanse, the journey was due to take over two months, giving the crew ample time to prepare before they were waylaid by the minefield.
Other ships in the Andaris Task Force have also encountered a number of dangers in the Par’tha Expanse, starting with a recent shoreleave, during which an area native attempted to assassinate a Starfleet crew member on Deep Space 26. The boy was subdued and held for for questioning, pending possible extradition. The investigation into the motive for this attack is ongoing.
After shoreleave, the Atlantis, another ship in the Andaris Task Force, continued work on recent damage to the ship’s computer systems. They brought the ship to operational capacity and welcomed a new commanding officer, Capt. S’Ranna, and a new First Officer, LtCmdr. Udro Nepra.
The Atlantis was then dispatched to Aelann, one of the most prominent trading hubs in the Freeworlds where they would lead negotiations Taventa Robotics who were have hopes to begin a trade partnership with the Federation.
“This could, potentially signal the end of high risk away teams,” Capt S’Ranna remarked in regard to the autonomous robots, exciting prospect considering the danger that many away teams encounter, and surely a comfort to families at home.
|
if !isdefined(:LombScargle) using LombScargle end
function find_n_peaks_in_pgram(period::Vector, power::Vector; num_peaks::Integer = 1, exclude_period_factor::Real = 1.2 )
@assert num_peaks>= 1
peak_periods = zeros(num_peaks)
peaks_found = 1
peak_periods[peaks_found] = period[findmax(power)[2]]
while peaks_found < num_peaks
idx_active = trues(length(period))
for j in 1:length(period)
for i in 1:peaks_found
if peak_periods[i]/exclude_period_factor <= period[j] <= peak_periods[i]*exclude_period_factor
idx_active[j] = false
end # if near a previous peak
end # for over peaks
end # for over periods
peaks_found += 1
peak_periods[peaks_found] = period[idx_active][findmax(power[idx_active])[2]]
end # while more peaks to be found
peak_periods
end
function find_n_peaks(time::Vector, obs::Vector, sigma::Vector; num_peaks::Integer = 1, exclude_period_factor::Real = 1.2 )
pgram = lombscargle(time, obs, sigma)
find_n_peaks_in_pgram(period(pgram), power(pgram), num_peaks=num_peaks, exclude_period_factor=exclude_period_factor )
end
|
.ds TL "Hardware"
.ds TI "RELEASE NOTES"
.NH "Compatibility Information"
.PP
It is impossible for Mark Williams Company to test more than
a small fraction of the many computers, controllers, BIOSes, disks,
and other devices that purport to be compatible with the IBM PC.
The \*(CO system has been installed on tens of thousands of computers
throughout the world, and we have received reports from many of
our customers who have successfully installed and run \*(CO on
their systems (as well as from the few who could not do so).
.PP
This section names the machines, add-on cards, and BIOSes
that have been reported either to work or not to work
with the \*(CO operating system.
.PP
Before you continue, please note the following caveats:
.PP
.I
First, this is only a partial list of the hardware on which \*(CO runs.
.R
We receive confirmation of new machine configurations almost daily.
If you believe that you have a machine, BIOS, or add-on board that
is \fBnot\fR compatible with \*(CO but is listed below,
please telephone our technical support department.
.PP
.I
Second, manufacturers change their hardware as part of redesign or
product improvement.
.R
These changes can include logic, timing, firmware, or functionality.
Although we do try to support tested
products, Mark Williams Company cannot guarantee compatibility
with products not under its control.
.PP
If you believe that your computer cannot run \*(CO,
please contact the Mark Williams Company technical support department.
\fIIf you do not find your machine in this section, that does not mean
that it will not run \*(CO; odds are that it will.\fR
Whatever happens, please contact Mark Williams Company and
let us know what happened, so we can make your experience available
to future users of \*(CO.
.SH "Compatible Systems"
.PP
The following 386/486 systems have been tested, and have
been found to have been compatible with \*(CO.
Note that configurations vary, especially with respect to disk controllers,
so not all possible configurations have been tested.
.nf
.sp \n(pDu
.ta 3.5i
ACROS 325SE Notebook ALR PowerFlex, 386SX, 386/220
ALR VEISA Tower 486/33 AMI 386SX, 386
Arche 386/25 AST NB4/25s
AST Premium 386/33 AST Premium 486/33E (EISA)
AT&T 6386 Austin 386SX, 386/33
AUVA VIP, 300, 350/25 Bitwise 33-386 Portable
Cheetah International i486/25 Compaq 386, 386 Portable
CompuAdd 320, 325 Compudyne 386
Computer Directions 386SX Comtex 386/20
Dataworld 386 Dell System 300, 310, 325
Dyna 386/20 EDP 386SX
EPS 386 Five Star 386/20
Gateway 2000 (RLL and ESDI) Gateway 486, 33MHz (IDE)
GCH 386 AT Giga-Byte 386-33
Hauppauge 386 IBM Thinkpad 750P*
Intel 301 Jameco 3550
JDR M386 Leading Edge 386, D3
Leading Technology 386SX Logix 386-25
MAXAR 386 Micro-1 386
Micro-Designs 386, 25MHz Micro Express 386
Micronics 386 Micronics 386SX
Mitsubishi 386 Mitsubishi MAXY 386NOTE
MTEK MS-35, MS-37, MS-41 Mylex MAE486, 33 MHz EISA
Mylex MNE486, 33 MHz EISA Mylex MWS386, 25 MHz ISA
Mylex MTX386, 16/20 MHz ISA Mylex MTI386, 25/33 MHz ISA
NCR 386 NEC 386/25, Powermate 386/20, 386SX Northgate 386/16, 486
Northgate Elegance 386/25 ISA Northgate Elegance 433e EISA
Novex 386 Olivetti M380
Omega 386/20 Opti 486 WB, 493 chipset motherboard
Optima 386 Packard Bell 610, Axcel 386SX
PC Brand 386/20, 386/25 PC Pros 486
PC Systems 386-20 Philips P3302
Pulse 386-SX SEFCO 16 MHz 386SX
Smart Micro 386 Solution 386
Standard Brands 386-25, 386/SX Sunnytech 386-20
Sys Technologies 386 Tandon 386/20, 386/33
Tandy 4000DX, 4000SX Tera-Tek 386
Texas Instruments Notebook ThinkPad models 350, 360, 750
Tri-Star 386 Unibit DS316
UTI 386 Victor 386, V386S
Wyse 3216 Zenith Supersport 386SX
Zenith Mastersport 386SL Zenith TurboSport 386, 386/33
ZEOS 386, 386SX, 386 Portable ZEOS Notebook 386SX
.PP
\fI*When used with no virtual consoles or STREAMS.\fR
.SH "Compatible Serial Cards"
.PP
The \*(CO driver
.B asy
supports up to 32 devices (i.e., serial ports) using the same card and driver.
This lets \*(CO support a wealth of serial cards.
The following serial cards, both intelligent and dumb, have been tested with
\*(CO and have been found to work.
.nf
.sp \n(pDu
.ta 3.5i
Applied Digital SIO4
Arnet Multiport 4/8
Arnet TwinPort 2
Bocaboard BB1008
Comtrol Hostess 550 08
Comtrol Hostess 550 16
Connect Tech DFlex-4/8 serial card
Digiboard PC/16 serial card
DigiCHANNEL PC/X
Gtek PCSS-8TX serial card
Sea Level Systems Turbo Comm+2/4/8 serial card
SEFCO serial adapter
Sritek FastCom
Sritek FastCom Plus
Stargate Plus 8 serial card
.fi
.SH "Compatible Disk Controllers and Drives"
.PP
\*(CO supports a variety of disk controllers and drives,
in a variety of formats.
The following controllers and drives
have been tested with \*(CO and have been found to work.
.nf
.sp \n(pDu
.ta 3.5i
Adaptec AHA-1540A SCSI Host Adapter
Adaptec AHA-1542A SCSI Host Adapter
Adaptec AHA-1540B SCSI Host Adapter
Adaptec AHA-1542B SCSI Host Adapter
Adaptec AHA-1542C SCSI Host Adapter
ADD Adaptec AHA-174X SCSI Host Adapter (when jumpered to 154X mode)
Adaptec 2372B RLL Controller
Adaptec 2372C RLL 1:1 Controller
AMI SCSI FAST Controller (100% Adaptec 154X compatible)
Bustek SCSI Host Adapter (100% Adaptec 154X compatible)
Data Technology DTC7287 1:1 RLL 1:1
DPT Smart Cache Plus SCSI controller #PM2012B/90/95 (Western Digital emulation)
DPT Smart Connex SCSI Host Adapter (Western Digital emulation)
DTK PTI-217 IDE HD/FD
Galaxy DC 600B Cache (alternate polling only)
Hedaka HED-827-02 IDE controller
Kalok KL3100 IDE drive
Maxtor 7080AT IDE hard-disk drive
Micropolis 1684-07 SCSI hard-disk drive
.\"Mylex DCE376DR EISA SCSI Adapter (WD emulation)
National Computer Ltd NDC545 MFM
Perstor PS180-16FN RLL disk
Seagate ST01, ST02 SCSI Host Adapter
Seagate ST-225, ST-4096 MFM disk
Seagate ST-4144 RLL disk
Seagate ST-296N SCSI disk
Ultrastore Ultra 12 ESDI
Western Digital 930xx series IDE hard disks
Western Digital Caviar 2200 IDE hard disks
Western Digital Piranha hard disks
Western Digital WD1006V-MM2 1:1 MFM
Western Digital WD1006V-SR2 1:1 RLL
Western Digital WD1007-WAH ESDI (feature level F001 with on-board BIOS)
Western Digital WD1007-WAH2 ESDI (feature level F003 with on-board BIOS)
.fi
.PP
Please note that for the DPT Caching ESDI Controller, you must use
alternate polling.
For details on alternate polling, see the chapter on installation in these
notes.
.SH "Compatible CD-ROM Devices"
.PP
\*(CO works with the following CD-ROM devices:
.nf
.sp \n(pDu
.ta 3.5i
Sony CD-ROM model CDU31A, plugged into its own controller
Mitsumi CD-ROM models FX001, FX001 high speed,
FX001D, or LU005, plugged into its own controller.
.fi
Any SCSI CD-ROM plugged into the following controllers:
Adaptec 1542, Seagate ST01/02, Future Domain TMC-845/850/860/875/885,
or Future Domain TMC-840/841/880/881.
.SH "Compatible Tape Devices"
.PP
\*(CO is known to work with the following tape devices:
.PP
Archive Viper 60, 150, 250, and 525 SCSI-tape drives
.br
Conner CS250Q, CS525Q, CS1350Q, CS2000DAT, and CS4000 DAT drives
.br
Conner Python 4520NT, 4521NT, 4330XT, and 4331XT DAT drives
.br
QIC-40 and QIC-80 floppy-tape
drives from Colorado, Archive, Mountain, and Summit, and IBM
.br
The
.SH "Compatible Video Cards"
.PP
\*(CO supports virtually all EGA, VGA, and SVGA video cards
in both standard and Local Bus versions.
Exceptions are noted in the incompatible list.
Please note that this following applies
.I only
to \*(CO itself:
it does
.I not
applies to the X Window System, which at present supports
a much narrower range of video cards.
.PP
Video cards by the following manufacturers
have been tested with \*(CO, and have been found to work:
.nf
.sp \n(pDu
ATI
BOCA
BTC 1505 Monochrome Graphic Printer Card (does \fInot\fR work with X)
Diamond Speedstar (does \fInot\fR work with X)
DTK Graphicsmith
Genoa
Oak Technologies (does \fInot\fR work with X)
Orchid
Paradise
SEFCO monochrome adapter
Trident
Video 7
Western Digital
.fi
.SH "Compatible Peripherals"
.PP
The following peripheral devices have been tested with \*(CO and have
been found to work.
.nf
.sp \n(pDu
.ta 3.5i
Cherry keyboards
DTK PEI-301 32-bit memory expansion
Honeywell keyboards
IBM monochrome printer card
IBM keyboards
Keytronic KB101 PLUS keyboard
Linksys Parallel Link HLP-100
Linksys Parallel Link HPL-100
MicroSwitch keyboards
NMB Technologies keyboards
Northgate Omnikey 102 keyboard
Syquest SCSI removable cartridge disks
.fi
.SH "Compatible BIOS ROMs"
.PP
The following BIOS ROMs have been tested with \*(CO,
and have been found to be compatible.
.nf \n(pDu
.sp
.ta 3.5i
AMI 386, 486
DTK 386
Mylex 386, 486
OPTI-Modular
PHOENIX 386, 486
PHOENIX 386SX
.fi
.PP
.I
Please note that the AMI 386 BIOS does not present a problem during
installation.
.R
However, due to a bug in the BIOS, certain versions of the AMI 386 BIOS
fail to reset the system correctly when you reboot your system by typing
.BR "<ctrl><alt><del>" .
If you have this BIOS,
press the \fB<reset>\fR button to reboot your system.
.PP
Certain releases of the AMI BIOS fail to correctly
reset the keyboard controller until after the memory test has completed.
On these systems,
you will not be able to prematurely exit from the memory test by using the
.B <Esc>
key, but you may be able to exit by using the
.B "<Num Lock>"
key.
.PP
Certain releases of the AMI 486 BIOS incorrectly diagnose
external cache memory as being bad after rebooting \*(CO via
.B /etc/reboot
or a
.B <ctrl><alt><del>
key sequence.
If your system exhibits these symptoms, press the
reset button to reboot your system.
.PP
Versions of the Award BIOS have an option to load the BIOS into RAM
at start-up.
Enabling this option causes the \*(CO installation to abort at the point
where it attempts to generate a new system.
You should disable this option and restart your system before you attempt
to install \*(CO.
.SH "Incompatible Hardware"
.PP
The following hardware is known \fInot\fP to work with this
release of \*(CO.
.nf
.sp \n(pDu
All mice that use a bus port rather than a COM port
All 8088-, 8086-, and 80286-based computers
All 80386DX-based computers that have a CPU chip labeled ``16-bit only''
ALR Venture 386SX Laptop \(em boot disk fails to install keyboard driver
American Multi-Source model 1004 MFM/RLL
Chicony 101B IDE adapter
Chicony keyboards (cannot be used with \fBnkb\fR loadable keyboard driver)
Dataworld 386/33 (video incompatibility)
Dataworld Laptop 386 model NM325SX (disk incompatibility)
.\"DTK 386 machines
Fujitsu 2612ET IDE hard disk
Gateway NOMAD 425DXL (486 notebook)
IBM MicroChannel PS/1 and PS/2 computers
JETkey v3.0 Fasttest Keyboard BIOS (on some inexpensive clone motherboards)
Keyboards build around the Winbond W83C42 1415QA140792900001 keyboard chip
Laser computers; they use the VTEK BIOS, which has problems associated with it
Microsoft InPort Mouse
Miniscribe IDE interface
NEC G8BUT ESDI disk controller; uses proprietary, on-board BIOS
NEC VGB video card; requires configuration for eight-bit transfer mode
OMTI 8620 disk controller
Orchid Privilege 386SX-16 motherboard
ThinkPad machines, models 700 and 720
VTEK BIOS
Western Digital 1004-27X, 1004-WX1, 1002 series
Western Digital XTGEN, XTGEN+, XTGEN-2, XTGEN-R
XT (i.e., all eight-bit) disk controllers
Zenith Z449 video card (older versions cause panics)
.fi
|
#pragma once
#include <type_traits>
#include <gsl/gsl>
#include "sly/macros.h"
#include "sly/types.h"
template <class T>
struct is_unique_ptr : std::false_type
{};
template <class T, class D>
struct is_unique_ptr<std::unique_ptr<T, D>> : std::true_type
{};
template <class T>
struct unique_ptr_type
{};
template <class T, class D>
struct unique_ptr_type<std::unique_ptr<T, D>>
{
using type = std::unique_ptr<T, D>;
using underlying_type = T;
using delete_type = D;
};
template <class T>
struct is_shared_ptr : std::false_type
{};
template <class T>
struct is_shared_ptr<std::shared_ptr<T>> : std::true_type
{};
template <typename T>
struct is_indexable : std::false_type
{ };
template <typename T, typename A>
struct is_indexable<std::vector<T, A>> : std::true_type
{ };
template <typename T, std::size_t N>
struct is_indexable<std::array<T, N>> : std::true_type
{ };
namespace sly {
typedef s32 StatusCode;
typedef const char* ErrorMessage;
static const StatusCode SUCCESS = (StatusCode)0;
static const StatusCode UNKNOWN = (StatusCode)-1;
template <typename T, typename = typename std::is_reference<T>::type>
struct retval { };
template <typename T>
struct retval<T, std::false_type> {
public:
using type = T;
using type_pointer = typename std::remove_reference<T>::type*;
using type_reference = typename std::remove_reference<T>::type&;
//using unique_ptr = typename is_unique_ptr<T>::value;
retval(T value) :
_value(std::move(value)),
_statusCode(SUCCESS) {}
retval(StatusCode code) : _statusCode(code) {}
//retval(const retval&) = delete;
retval& operator =(const retval&) = delete;
~retval() = default;
template <class V, typename Y = T, typename std::enable_if_t<is_unique_ptr<Y>::value || is_shared_ptr<Y>::value>* = nullptr>
V to() const {
return reinterpret_cast<V>(_value.get());
}
template <class V, typename Y = T, typename std::enable_if_t<!is_unique_ptr<Y>::value && !is_shared_ptr<Y>::value>* = nullptr>
V to() const {
return reinterpret_cast<V>(_value);
}
template<typename V>
operator V() const {
return to<V>();
}
type result() { return _value; }
type_reference ref() { return _value; }
template <typename Y = T, typename std::enable_if_t<is_unique_ptr<Y>::value || is_shared_ptr<Y>::value>* = nullptr>
operator type_reference() const {
return *_value.get();
}
template <typename Y = T, typename std::enable_if_t<is_unique_ptr<Y>::value || is_shared_ptr<Y>::value>* = nullptr>
operator typename unique_ptr_type<Y>::underlying_type&() {
return *_value.get();
}
template <typename Y = T, typename std::enable_if_t<!is_unique_ptr<Y>::value && !is_shared_ptr<Y>::value>* = nullptr>
operator type() { return result(); }
template <typename Y = T, typename std::enable_if_t<std::is_pointer<Y>::value || is_unique_ptr<Y>::value || is_shared_ptr<Y>::value>* = nullptr>
type_reference operator->() { return _value; }
template <typename Y = T, typename std::enable_if_t<!std::is_pointer<Y>::value && !is_unique_ptr<Y>::value && !is_shared_ptr<Y>::value>* = nullptr>
type_pointer operator->() { return &_value; }
//template <typename N, typename Y = T, typename std::enable_if_t<std::is_pointer<Y>::value || is_unique_ptr<Y>::value || is_shared_ptr<Y>::value || is_indexable<Y>::value>* = nullptr>
template<typename N>
auto operator[](const N index) { return _value[index]; }
StatusCode statusCode() const { return _statusCode; }
bool_t succeeded() { return statusCode() == SUCCESS; }
bool_t failed() { return statusCode() != SUCCESS; }
template<typename X>
X as() {
return (X)result;
}
private:
StatusCode _statusCode;
T _value;
};
template <typename T>
struct retval<T, std::true_type> {
public:
using type = T;
using type_pointer = typename std::remove_reference<T>::type*;
retval(T value) :
_value(std::addressof(value)),
_statusCode(SUCCESS) {}
retval(StatusCode code) : _statusCode(code), _value(nullptr) {}
retval(const retval&) = delete;
retval& operator =(const retval&) = delete;
~retval() = default;
T result() { return *_value; }
operator T() { return result(); }
type_pointer operator->() { return _value; }
StatusCode statusCode() const { return _statusCode; }
bool_t succeeded() { return statusCode() == SUCCESS; }
bool_t failed() { return statusCode() != SUCCESS; }
template<typename X>
X as() {
return (X)result();
}
private:
StatusCode _statusCode;
type_pointer _value;
};
template <>
struct retval<void, std::false_type> {
public:
retval() : _statusCode(SUCCESS) {}
retval(StatusCode code) : _statusCode(code) {}
retval(const retval&) = delete;
retval& operator =(const retval&) = delete;
~retval() = default;
void value() const { }
StatusCode statusCode() const { return _statusCode; }
bool_t succeeded() { return statusCode() == SUCCESS; }
bool_t failed() { return statusCode() != SUCCESS; }
template<typename X>
X as() {
return (X)result;
}
private:
StatusCode _statusCode;
};
template <typename T>
retval<T> value(const T& value) {
auto result = retval<T> ( std::move(value) ) ;
return result;
}
template <typename T>
retval<T&> reference(T& value) {
return retval<T&> ( value );
}
retval<void> success();
retval<void> failed();
template <typename T>
retval<T> failed(StatusCode statusCode, ErrorMessage message) {
Expects(statusCode != SUCCESS);
return retval<T> { statusCode };
}
template <typename T>
retval<T> failed() {
return retval<T> { UNKNOWN };
}
}
|
from .layer import layer
from .preprocess.util import Flatten
from .optimizers import optimizer
from ml import losses
import numpy as np
optimizers_dict={"sgd":optimizer.Optimizer,"SGD":optimizer.Optimizer}
class Graph():
def __init__(self,flatten=True):
self.in_flag=flatten
self.history=[]
def add(self,ly):
if not isinstance(ly,layer.Layer):
raise ValueError("Not a layer instance")
else:
if not self.history:self.history.append(ly)
else:
self.with_bias=ly.set_bias
ly.set_weights(self.history[-1].get_units)
self.history.append(ly)
@property
def get_graph(self):
print("\t\tNetwork Graph:\n")
for i in self.history:
print(i.__class__.__name__,"\tunits:%d\tactivation:%s\n"%(i.get_units,i.get_activation.__name__))
def train(self,X=None,Y=None,lr=0.1,epochs=10,optimizer=None):
if not optimizer:
raise InputError("expected an optimizer")
if isinstance(optimizer,str):
try:
op=optimizers_dict[optimizer]
self.opt=op(X,Y,lr,epochs,self.history)
except:
raise InputError("invalid optimizer provided")
if self.in_flag:
X=Flatten(X)
self.opt.optimize()
def next_layer(self,l):return self.history[self.history.index(l)+1]
def prev_layer(self,l):return self.history[self.history.index(l)-1]
def predict(self,X):
class_vect=[]
for j in X:
pred=j
for i in self.history:
pred=i(pred)
class_vect.append(np.argmax(pred))
return class_vect
|
Early on April 30 a special train from Fort Macleod arrived with police officers and doctors . Premier Frederick Haultain arrived at the disaster site on May 1 , where he met with engineers who had investigated the top of Turtle Mountain . Though new fissures had formed at the peak , they felt there was limited further risk to the town ; the CPR 's chief engineer was convinced that Frank was in imminent danger from another slide . Siding with the latter , Haultain ordered the town evacuated , and the Geological Survey of Canada ( GSC ) sent two of its top geologists to investigate further . They reported that the slide had created two new peaks on the mountain and that the north peak , overlooking the town , was not in imminent danger of collapse . As a result , the evacuation order was lifted on May 10 and Frank 's citizens returned . The North @-@ West Mounted Police , reinforced by officers who arrived from Cranbrook , Macleod and Calgary , kept tight control of the town and ensured that no cases of looting occurred during the evacuation .
|
module TestDay12
import AOC2021.Day12.part1, AOC2021.Day12.part2
using Test
test_input_short = """
dc-end
HN-start
start-kj
dc-start
dc-HN
LN-dc
HN-end
kj-sa
kj-HN
kj-dc"""
test_input_long = """
fs-end
he-DX
fs-he
start-DX
pj-DX
end-zg
zg-sl
zg-pj
pj-he
RW-he
fs-DX
pj-RW
zg-RW
start-pj
he-WI
zg-he
pj-fs
start-RW"""
@testset "Day 12" begin
@testset "part 1" begin
@test part1(test_input_short) == 19
@test part1(test_input_long) == 226
end
@testset "part 2" begin
@test part2(test_input_short) == 103
@test part2(test_input_long) == 3509
end
end
end # module
|
module TestOperations
using Test
using MLJBase
using ..Models
@testset "errors for deserialized machines" begin
filename = joinpath(@__DIR__, "machine.jlso")
m = machine(filename)
@test_throws ArgumentError predict(m)
end
@testset "error for operations on nodes" begin
X = source()
m = machine(OneHotEncoder(), X)
@test_throws ArgumentError transform(m)
end
end
true
|
#include <boost/test/unit_test.hpp>
#include "logs.h"
#include "route.h"
#include "track.h"
using namespace GPS;
// Documentation has been done suitably in the form of comments in this file.
/********************************************
* Test Suite for TotalHeightGain - N0693955 *
* *******************************************/
/*
* The function of this test suite is to test the correctness
* of the totalHeightGain function which I implemented. It aims to
* check for typical, atypical, and edge case scenarios for the
* totalHeightGain function.
*/
/*
* STRUCTURE OF THE TEST SUITE
*
* Test Case 1 is a general purpose test case that tests the basic functionality.
*
* Test Cases 2-3 handle testing for inclination and declination above sea level.
*
* Test Cases 4-5 handle testing for inclination and declination below sea level
*
* Test Case 6 handles testing for inclination from below sea level to above sea level.
*
* Test Case 7 handles testing for declination from above sea level to below sea level.
*
* Test Case 8 handles testing for points below sea level that don't follow a continuous inclination.
*
* Test Case 9 handles testing for points above sea level that don't follow a continuous declination.
*
* Test Case 10 handles a singular point.
*
*/
BOOST_AUTO_TEST_SUITE( TotalHeightGain_n0693955 )
const bool isFileName = true;
/*************
* TEST CASES *
**************/
/***********************************************************************************/
//TEST CASE 1
/*
* This test case checks the usual functionality of the function,
* wherein the function ignores negative height differences and
* adds the positive height differences
*/
BOOST_AUTO_TEST_CASE( ABBM )
{
Route route = Route(LogFiles::GPXRoutesDir + "ABBM-n0693955.gpx", isFileName);
BOOST_CHECK_EQUAL(route.totalHeightGain(), 50);
}
/***********************************************************************************/
//TEST CASE 2
/*
* This test case checks if the function evaluates the height values
* correctly when the heights are inclining in succession. Since all the
* differences in heights will give a positive non-zero value, these
* differences should be added to the total height gain.
*
* Possible Scenario: Going on an inclination from sea level to above sea level.
*/
BOOST_AUTO_TEST_CASE( IncliningHeights1 )
{
Route route = Route(LogFiles::GPXRoutesDir + "IncliningHeights1-n0693955.gpx", isFileName);
BOOST_CHECK_EQUAL(route.totalHeightGain(), 200);
}
/***********************************************************************************/
//TEST CASE 3
/*
* This test case checks if the function evaluates the height values
* correctly when the heights are declining in succession. Since all the
* differences in height values would be negative in this scenario,
* these differences should not be added to the total height gain.
*
* Possible Scenario: Going on a declination from above sea level to sea level.
*/
BOOST_AUTO_TEST_CASE( DecliningHeights1 )
{
Route route = Route(LogFiles::GPXRoutesDir + "DecliningHeights1-n0693955.gpx", isFileName);
BOOST_CHECK_EQUAL(route.totalHeightGain(), 0);
}
/***********************************************************************************/
//TEST CASE 4
/*
* This test case checks if the function evaluates the height values
* correctly when the heights are inclining in succession. Since all the
* height values here are negative, this test case checks if it evaluates the
* difference in each case correctly.
*
* Possible Scenario: Going on an inclination from below sea level to sea level.
*/
BOOST_AUTO_TEST_CASE( IncliningHeights2 )
{
Route route = Route(LogFiles::GPXRoutesDir + "IncliningHeights2-n0693955.gpx", isFileName);
BOOST_CHECK_EQUAL(route.totalHeightGain(), 200);
}
/***********************************************************************************/
//TEST CASE 5
/*
* This test case checks if the function evaluate the height values
* correctly when the heights are declining in succession. Since all the values
* here are negative, this test case checks if it evaluates the difference
* in each case correctly.
*
* Possible Scenario: Going on a declination from sea level to below sea level.
*/
BOOST_AUTO_TEST_CASE( DecliningHeights2 )
{
Route route = Route(LogFiles::GPXRoutesDir + "DecliningHeights2-n0693955.gpx", isFileName);
BOOST_CHECK_EQUAL(route.totalHeightGain(), 0);
}
/***********************************************************************************/
//TEST CASE 6
/*
* This test case checks if the function evaluates the difference in
* elevations correctly when the height being subtracted is a
* negative value.
*
* Possible Scenario: Going on an inclination from below sea level to above sea level.
*/
BOOST_AUTO_TEST_CASE( STG )
{
Route route = Route(LogFiles::GPXRoutesDir + "STG-n0693955.gpx", isFileName);
BOOST_CHECK_EQUAL(route.totalHeightGain(), 200);
}
/***********************************************************************************/
//TEST CASE 7
/*
* This purpose of this test case is similar to the previous one, however in this case
* the height being subtracted initially is not a negative elevation value.
*
* Possible Scenario: Going on a declination from above sea level to below sea level.
*/
BOOST_AUTO_TEST_CASE( GTS )
{
Route route = Route(LogFiles::GPXRoutesDir + "GTS-n0693955.gpx", isFileName);
BOOST_CHECK_EQUAL(route.totalHeightGain(), 0);
}
/***********************************************************************************/
//TEST CASE 8
/*
* This test case checks if the function evaluates correctly when all the
* height values present are negative.
*
* Possibile Scenario: All points below sea level.
*/
BOOST_AUTO_TEST_CASE( AllNegatives )
{
Route route = Route(LogFiles::GPXRoutesDir + "AllNegatives-n0693955.gpx", isFileName);
BOOST_CHECK_EQUAL(route.totalHeightGain(), 50);
}
/***********************************************************************************/
//TEST CASE 9
/*
* This test case checks if the function evaluates correctly when all the
* height values present are positive.
*
* Possibile Scenario: All points above sea level.
*/
BOOST_AUTO_TEST_CASE( AllPositives )
{
Route route = Route(LogFiles::GPXRoutesDir + "AllPositives-n0693955.gpx", isFileName);
BOOST_CHECK_EQUAL(route.totalHeightGain(), 100);
}
/***********************************************************************************/
//TEST CASE 10
/*
* This test case disregards the data if there is only one point,
* and therefore, only one height value. The total height gain
* cannot be calculated if there is only one value available for
* a subtraction where two values must be present.
*
* Possible Scenario: Singular point, regardless of whether it is above, beyond, or
* at sea level.
*
* NOTE: This test case is set up to throw up an invalid argument exception as the gpx file
* holds just one height value.
*/
BOOST_AUTO_TEST_CASE ( SingleHeight )
{
Route route = Route(LogFiles::GPXRoutesDir + "SingleHeight-n0693955.gpx", isFileName);
BOOST_CHECK_THROW(route.totalHeightGain(), std::invalid_argument);
}
/***********************************************************************************/
BOOST_AUTO_TEST_SUITE_END()
|
\documentclass[12pt,openany]{book}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\usepackage{fixltx2e} % provides \textsubscript
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\else % if luatex or xelatex
\ifxetex
\usepackage{mathspec}
\else
\usepackage{fontspec}
\fi
\defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase}
\fi
% use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
% use microtype if available
\IfFileExists{microtype.sty}{%
\usepackage{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\usepackage[margin=1in]{geometry}
\usepackage{hyperref}
\hypersetup{unicode=true,
pdftitle={Analysis of the automobile-loss-prediction dataset},
pdfauthor={Jared Musil \& Jake McNair},
pdfborder={0 0 0},
breaklinks=true}
\urlstyle{same} % don't use monospace font for urls
\usepackage{natbib}
\bibliographystyle{apalike}
\usepackage{longtable,booktabs}
\usepackage{graphicx,grffile}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}
}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{5}
% Redefines (sub)paragraphs to behave more like sections
\ifx\paragraph\undefined\else
\let\oldparagraph\paragraph
\renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}}
\fi
\ifx\subparagraph\undefined\else
\let\oldsubparagraph\subparagraph
\renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}}
\fi
%%% Use protect on footnotes to avoid problems with footnotes in titles
\let\rmarkdownfootnote\footnote%
\def\footnote{\protect\rmarkdownfootnote}
%%% Change title format to be more compact
\usepackage{titling}
% Create subtitle command for use in maketitle
\newcommand{\subtitle}[1]{
\posttitle{
\begin{center}\large#1\end{center}
}
}
\setlength{\droptitle}{-2em}
\title{Analysis of the ``automobile-loss-prediction'' dataset}
\pretitle{\vspace{\droptitle}\centering\huge}
\posttitle{\par}
\subtitle{Illinois State University - ACC 471 - Final Report}
\author{Jared Musil \& Jake McNair}
\preauthor{\centering\large\emph}
\postauthor{\par}
\predate{\centering\large\emph}
\postdate{\par}
\date{2017-11-30}
\usepackage{booktabs}
\usepackage{amsthm}
\makeatletter
\def\thm@space@setup{%
\thm@preskip=8pt plus 2pt minus 4pt
\thm@postskip=\thm@preskip
}
\makeatother
\begin{document}
\maketitle
{
\setcounter{tocdepth}{1}
\tableofcontents
}
\chapter{Introduction}\label{intro}
The ability to utilize analytics to predict automobiele lossess is a
area of active research and application throughout the insurance and
fin-tech industries. All of the ``big four'' US domiciled auto insurrers
being State Farm, Geico, Allstate, and Progressive are actively engaging
in research to operationalize analytical models to increase operational
efficency. {[}citation needed\ldots{}{]}. This dataset is representitive
of claims data common to all of these auto insurance providers, and the
industry at large.
From a consumer standpoint, this has the potential to reduce average
claim times, reduce premium costs, and improve claims decisions (total
loss, not total loss).
Throughout this report, the columns of our dataset will be refered to as
factors, and the rows of our dataset will be refered to as reccords.
This is because it follows the terminology used by the R statistical
programming language, which was the analytical tool used in this report.
This was chosen to allow for reproducable research and full transparency
of the methods used to arrive at our conclusions. The code itself has
been omitted from the report for brevity, but is available for review
and reuse at the following URL:
\url{https://github.com/jaredmusil/acc471-final-report}
\chapter{Problem Description}\label{problem-description}
This data set consists of three types of entities: (a) the specification
of an auto in terms of various characteristics, (b) its assigned
insurance risk rating, (c) its normalized losses in use as compared to
other cars. The second rating corresponds to the degree to which the
auto is more risky than its price indicates. Cars are initially assigned
a risk factor symbol associated with its price. Then, if it is more
risky (or less), this symbol is adjusted by moving it up (or down) the
scale. Actuarians call this process ``symboling''. A value of +3
indicates that the auto is risky, -3 that it is probably pretty safe.
\includegraphics{bookdown-demo_files/figure-latex/risk-scale-1.pdf}
The third factor is the relative average loss payment per insured
vehicle year. This value is normalized for all autos within a particular
size classification (two-door small, station wagons, sports/speciality,
etc\ldots{}), and represents the average loss per car per year.
\chapter{Data}\label{data}
Before doing any analysis, the factors within the dataset were first
checked for missing or invalid data. The individual factors can be
described as follows: 15 continuous, 10 nominal, and 1 integer.
Seven of the factors contained missing or improperly coded data. In this
dataset in partular, all missing data has been coded with the value of
\texttt{?}. In all cases below, the records containing the missing data
have been removed.
\begin{longtable}[]{@{}lll@{}}
\toprule
Index & Factor & Number of records missing a value\tabularnewline
\midrule
\endhead
2 & normalized-losses & 41\tabularnewline
6 & num-of-doors & 2\tabularnewline
19 & bore & 4\tabularnewline
20 & stroke & 4\tabularnewline
22 & horsepower & 2\tabularnewline
23 & peak-rpm & 2\tabularnewline
26 & price & 4\tabularnewline
\bottomrule
\end{longtable}
Of the original 205 records, X were removed because they contained
missing data for the \texttt{normalized-lossess} factor, which was coded
as a \texttt{?}. This resulted in a dataset of 164 records of clean
data. No other factors needed cleaning up, as the data was properly
coded for each record.
\begin{table}
\caption{\label{tab:data-dictionary}Data Dictionary - Initial}
\centering
\begin{tabular}[t]{rlll}
\toprule
N & Description & Values & Keep\\
\midrule
1 & symboling & -3, -2, -1, 0, 1, 2, 3 & No\\
2 & normalized-losses & continuous from [65 to 256] & Yes\\
3 & make & alfa-romero, audi, bmw, chevrolet, dodge, honda, isuzu, jaguar, mazda, mercedes-benz, mercury, mitsubishi, nissan, peugot, plymouth, porsche, mitsubishi, nissan, peugot, plymouth, porsche, renault, saab, subaru, toyota, volkswagen, volvo & Yes\\
4 & fuel-type & diesel, gas & Yes\\
5 & aspiration & std, turbo & Yes\\
\addlinespace
6 & num-of-doors & four, two & Yes\\
7 & body-style & hardtop, wagon, sedan, hatchback, convertible & Yes\\
8 & drive-wheels & 4wd, fwd, rwd. & Yes\\
9 & engine-location & front, rear & Yes\\
10 & wheel-base & continuous from [86.6 to 120.9] & Yes\\
\addlinespace
11 & length & continuous from [141.1 to 208.1] & Yes\\
12 & width & continuous from [60.3 to 72.3] & Yes\\
13 & height & continuous from [47.8 to 59.8] & Yes\\
14 & curb-weight: & continuous from [1488 to 4066] & Yes\\
15 & engine-type & dohc, dohcv, l, ohc, ohcf, ohcv, rotor & Yes\\
\addlinespace
16 & num-of-cylinders & eight, five, four, six, three, twelve, two & Yes\\
17 & engine-size & continuous from [61 to 326] & Yes\\
18 & fuel-system & 1bbl, 2bbl, 4bbl, idi, mfi, mpfi, spdi, spfi & Yes\\
19 & bore & continuous from [2.54 to 3.94] & Yes\\
20 & stroke & continuous from [2.07 to 4.17] & Yes\\
\addlinespace
21 & compression-ratio & continuous from [7 to 23] & Yes\\
22 & horsepower & continuous from [48 to 288] & Yes\\
23 & peak-rpm & continuous from [4,150 to 6,600] & Yes\\
24 & city-mpg & continuous from [13 to 49] & Yes\\
25 & highway-mpg & continuous from [16 to 54] & Yes\\
26 & price & continuous from [5,118 to 45,400] & Yes\\
\bottomrule
\end{tabular}
\end{table}
Of these factors, 10 of the initial 26 were removed, resulting in the 16
factors that will be used in analysis. These factors are noted in green
in \texttt{Keep} column of the above table.
The objective factor in the dataset is determined to be
\texttt{symboling}.
Next, the data was partitioned into three groups named \emph{training},
\emph{test}, and \emph{validation}. This was
\begin{verbatim}
## X1 X2 X3 X4 X5
## Min. :-2.0000 Min. : 65 toyota :31 diesel: 15 std :136
## 1st Qu.: 0.0000 1st Qu.: 94 nissan :18 gas :149 turbo: 28
## Median : 1.0000 Median :115 mazda :15
## Mean : 0.7927 Mean :122 honda :13
## 3rd Qu.: 2.0000 3rd Qu.:150 subaru :12
## Max. : 3.0000 Max. :256 volvo :11
## (Other):64
## X6 X7 X8 X9 X10
## ? : 1 convertible: 2 4wd: 8 front:164 Min. : 86.60
## four:95 hardtop : 5 fwd:106 1st Qu.: 94.50
## two :68 hatchback :60 rwd: 50 Median : 96.55
## sedan :80 Mean : 98.16
## wagon :17 3rd Qu.:100.40
## Max. :115.60
##
## X11 X12 X13 X14 X15
## Min. :141.1 Min. :60.3 Min. :49.40 Min. :1488 dohc : 8
## 1st Qu.:165.7 1st Qu.:64.0 1st Qu.:52.00 1st Qu.:2091 l : 8
## Median :172.0 Median :65.4 Median :54.10 Median :2368 ohc :124
## Mean :172.2 Mean :65.6 Mean :53.77 Mean :2458 ohcf : 12
## 3rd Qu.:177.8 3rd Qu.:66.5 3rd Qu.:55.50 3rd Qu.:2786 ohcv : 8
## Max. :202.6 Max. :71.7 Max. :59.80 Max. :4066 rotor: 4
##
## X16 X17 X18 X19 X20
## eight: 1 Min. : 61.0 1bbl:11 3.62 :20 3.03 :14
## five : 7 1st Qu.: 97.0 2bbl:63 3.15 :15 3.15 :14
## four :137 Median :109.0 4bbl: 3 3.19 :15 3.4 :13
## six : 14 Mean :118.0 idi :15 2.97 :12 3.23 :12
## three: 1 3rd Qu.:131.8 mfi : 1 3.03 :10 2.64 :11
## two : 4 Max. :258.0 mpfi:66 2.91 : 7 3.29 : 9
## spdi: 5 (Other):85 (Other):91
## X21 X22 X23 X24
## Min. : 7.00 Min. : 48.00 Min. :4150 Min. :15.00
## 1st Qu.: 8.70 1st Qu.: 69.00 1st Qu.:4800 1st Qu.:22.00
## Median : 9.00 Median : 91.00 Median :5200 Median :26.00
## Mean :10.13 Mean : 96.21 Mean :5138 Mean :26.27
## 3rd Qu.: 9.40 3rd Qu.:114.00 3rd Qu.:5500 3rd Qu.:31.00
## Max. :23.00 Max. :200.00 Max. :6600 Max. :49.00
##
## X25 X26
## Min. :18.00 Min. : 5118
## 1st Qu.:28.00 1st Qu.: 7446
## Median :32.00 Median : 9268
## Mean :31.85 Mean :11467
## 3rd Qu.:37.00 3rd Qu.:14559
## Max. :54.00 Max. :35056
##
\end{verbatim}
\chapter{Methods Used}\label{methods-used}
A number of analytical methods are available for use such as decision
trees, classification trees, regression, multiple-regression. Not all of
these techniques makes sense for our purpouses as they are used to
predict diffrent types of information.
We utilized \texttt{X} methods in our analysis, while setteling on
regression trees for our final reccomendation.
The main goal of our analysis is to predict how risky a particular car
is, and therefore Regression trees make the most sense.
\begin{figure}[htbp]
\centering
\includegraphics{img/04-decision-tree-flowchart.png}
\caption{\textbf{Source:}
\url{http://www.simafore.com/blog/bid/62482/2-main-differences-between-classification-and-regression-trees}}
\end{figure}
\chapter{Results}\label{results}
\ldots{}
\section{Regression Tree}\label{regression-tree}
\includegraphics{bookdown-demo_files/figure-latex/regression-tree-1.pdf}
\includegraphics{bookdown-demo_files/figure-latex/regression-tree-pruned-1.pdf}
\ldots{}
\textbf{Lift Chart}
\ldots{}
\textbf{Decile Chart}
\ldots{}
\section{Classification Tree}\label{classification-tree}
\textbf{Lift Chart}
\ldots{}
\textbf{Decile Chart}
\ldots{}
\chapter{Reccomentations}\label{reccomentations}
\ldots{}
\chapter{Future Analysis}\label{future-analysis}
As with any data analysis, the quality of the input data will determine
the quality of the resulting models. In this case we started with 26
factors. A good way to increase the quality of the model would be to
provide it with more factors and potentially more levels within the
factors.
All of this data also is only related to the automobeile itself, and
does not account for the individual driving it. While some behavorial
and demographic factors protected by federal law from being used for
analysis like race and religion(CITE), Others such as gender are
allowed. Including these behavorial factors as inputs into the model
would be an opportunity to strethen the existing model. Technology and
in partucular the increase of telematics within vehicles and internet of
things (IoT) connected devices, will increase the ubiquity and variety
of this datastream. With the advances in autonomous vehicles, behavorial
factors may impact results less, but is something to monitor for the
future of auto risk classification.
\chapter{Conculsion}\label{conculsion}
Given the results of this analysis, we
\bibliography{packages,book}
\end{document}
|
The imaginary part of $rcis(r, a)$ is $r \sin(a)$.
|
module Main
import Minesweeper.REPL
main : IO ()
main = repl
|
proposition homotopic_paths_assoc: "\<lbrakk>path p; path_image p \<subseteq> s; path q; path_image q \<subseteq> s; path r; path_image r \<subseteq> s; pathfinish p = pathstart q; pathfinish q = pathstart r\<rbrakk> \<Longrightarrow> homotopic_paths s (p +++ (q +++ r)) ((p +++ q) +++ r)"
|
[STATEMENT]
lemma monadP_qbs_empty_iff:
"qbs_space X = {} \<longleftrightarrow> qbs_space (monadP_qbs X) = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (qbs_space X = {}) = (qbs_space (monadP_qbs X) = {})
[PROOF STEP]
proof auto
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x. \<lbrakk>qbs_space X = {}; x \<in> monadP_qbs_Px X\<rbrakk> \<Longrightarrow> False
2. \<And>x. \<lbrakk>monadP_qbs_Px X = {}; x \<in> qbs_space X\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x. \<lbrakk>qbs_space X = {}; x \<in> monadP_qbs_Px X\<rbrakk> \<Longrightarrow> False
2. \<And>x. \<lbrakk>monadP_qbs_Px X = {}; x \<in> qbs_space X\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
assume 1:"qbs_space X = {}"
"x \<in> monadP_qbs_Px X"
[PROOF STATE]
proof (state)
this:
qbs_space X = {}
x \<in> monadP_qbs_Px X
goal (2 subgoals):
1. \<And>x. \<lbrakk>qbs_space X = {}; x \<in> monadP_qbs_Px X\<rbrakk> \<Longrightarrow> False
2. \<And>x. \<lbrakk>monadP_qbs_Px X = {}; x \<in> qbs_space X\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
qbs_space X = {}
x \<in> monadP_qbs_Px X
[PROOF STEP]
obtain \<alpha> \<mu> where "qbs_prob X \<alpha> \<mu>"
[PROOF STATE]
proof (prove)
using this:
qbs_space X = {}
x \<in> monadP_qbs_Px X
goal (1 subgoal):
1. (\<And>\<alpha> \<mu>. qbs_prob X \<alpha> \<mu> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using rep_monadP_qbs_Px
[PROOF STATE]
proof (prove)
using this:
qbs_space X = {}
x \<in> monadP_qbs_Px X
?s \<in> monadP_qbs_Px ?X \<Longrightarrow> \<exists>\<alpha> \<mu>. ?s = qbs_prob_space (?X, \<alpha>, \<mu>) \<and> qbs_prob ?X \<alpha> \<mu>
goal (1 subgoal):
1. (\<And>\<alpha> \<mu>. qbs_prob X \<alpha> \<mu> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
qbs_prob X \<alpha> \<mu>
goal (2 subgoals):
1. \<And>x. \<lbrakk>qbs_space X = {}; x \<in> monadP_qbs_Px X\<rbrakk> \<Longrightarrow> False
2. \<And>x. \<lbrakk>monadP_qbs_Px X = {}; x \<in> qbs_space X\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
thus False
[PROOF STATE]
proof (prove)
using this:
qbs_prob X \<alpha> \<mu>
goal (1 subgoal):
1. False
[PROOF STEP]
using empty_quasi_borel_iff[of X] qbs_empty_not_qbs_prob[of \<alpha> \<mu>] 1(1)
[PROOF STATE]
proof (prove)
using this:
qbs_prob X \<alpha> \<mu>
(qbs_space X = {}) = (X = empty_quasi_borel)
\<not> qbs_prob empty_quasi_borel \<alpha> \<mu>
qbs_space X = {}
goal (1 subgoal):
1. False
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
False
goal (1 subgoal):
1. \<And>x. \<lbrakk>monadP_qbs_Px X = {}; x \<in> qbs_space X\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. \<lbrakk>monadP_qbs_Px X = {}; x \<in> qbs_space X\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. \<lbrakk>monadP_qbs_Px X = {}; x \<in> qbs_space X\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
assume 1:"monadP_qbs_Px X = {}"
"x \<in> qbs_space X"
[PROOF STATE]
proof (state)
this:
monadP_qbs_Px X = {}
x \<in> qbs_space X
goal (1 subgoal):
1. \<And>x. \<lbrakk>monadP_qbs_Px X = {}; x \<in> qbs_space X\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
monadP_qbs_Px X = {}
x \<in> qbs_space X
[PROOF STEP]
interpret qp: qbs_prob X "\<lambda>r. x" "return real_borel 0"
[PROOF STATE]
proof (prove)
using this:
monadP_qbs_Px X = {}
x \<in> qbs_space X
goal (1 subgoal):
1. qbs_prob X (\<lambda>r. x) (return real_borel 0)
[PROOF STEP]
by(auto intro!: qbs_probI prob_space_return)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. \<lbrakk>monadP_qbs_Px X = {}; x \<in> qbs_space X\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
have "qbs_prob_space (X,\<lambda>r. x,return real_borel 0) \<in> monadP_qbs_Px X"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. qbs_prob_space (X, \<lambda>r. x, return real_borel 0) \<in> monadP_qbs_Px X
[PROOF STEP]
by(simp add: monadP_qbs_Px_def)
[PROOF STATE]
proof (state)
this:
qbs_prob_space (X, \<lambda>r. x, return real_borel 0) \<in> monadP_qbs_Px X
goal (1 subgoal):
1. \<And>x. \<lbrakk>monadP_qbs_Px X = {}; x \<in> qbs_space X\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
thus False
[PROOF STATE]
proof (prove)
using this:
qbs_prob_space (X, \<lambda>r. x, return real_borel 0) \<in> monadP_qbs_Px X
goal (1 subgoal):
1. False
[PROOF STEP]
by(simp add: 1)
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
|
# Settings before server session starts
if (file.exists("data/data.RData")) {
# load information sent through "launch_application"
load("data/data.RData")
}
shiny::shinyServer(
function(input, output) {
if (!exists("app_data")) {
app_data <- NULL
}
if (!exists("git_hash")) {
git_hash <- NULL
}
if (!exists("github_repo")) {
github_repo <- NULL
}
if (isTRUE(getOption("shiny.testmode"))) {
# Load static/dummy data if this is a test run
app_data <- SKDEresultater::testdata
}
output$plot_kvalitet <- shiny::renderUI({
if (length(input$valgtBo) == 0) {
return(NULL)
} else {
plotly::plotlyOutput("plotly_plot")
}
})
output$plot_variasjon <- shiny::renderUI({
plotly::plotlyOutput("heatmap", width = "auto", height = "800px")
})
output$heatmap <- plotly::renderPlotly({
SKDEresultater::create_heatmap(data = variasjon_data())
})
variasjon_data <- shiny::reactive({
if (input$valgtVariasjon == "Gynekologi") {
return(data::gyn)
} else if (input$valgtVariasjon == "Fødselshjelp") {
return(data::fodsel)
} else if (input$valgtVariasjon == "Dagkirurgi") {
return(data::dagkir2)
} else if (input$valgtVariasjon == "Kols") {
return(data::kols)
} else if (input$valgtVariasjon == "Barn") {
return(data::barn)
} else if (input$valgtVariasjon == "Nyfødt") {
return(data::nyfodt)
} else if (input$valgtVariasjon == "Eldre") {
return(data::eldre)
} else if (input$valgtVariasjon == "Ortopedi") {
return(data::ortopedi)
}
return(NULL)
})
output$pick_kvalitet <- shiny::renderUI({
mulige_valg <- as.character(unique(SKDEresultater::testdata$bohf))
shinyWidgets::radioGroupButtons(
inputId = "valgtKvalitet",
choices = c("Trombolyse", "Hofteprotese"),
justified = TRUE
)
})
output$pick_variasjon <- shiny::renderUI({
shinyWidgets::radioGroupButtons(
inputId = "valgtVariasjon",
choices = c("Fødselshjelp",
"Gynekologi",
"Ortopedi",
"Dagkirurgi",
"Kols",
"Eldre",
"Nyfødt",
"Barn"),
justified = TRUE
)
})
output$plotly_plot <- plotly::renderPlotly({
if (input$valgtKvalitet == "Trombolyse") {
mydata <- SKDEresultater::testdata
data_to_plot <- dplyr::filter(mydata, mydata$bohf %in% input$valgtBo)
} else {
return(NULL)
}
return(SKDEresultater::dotplot(data_to_plot = data_to_plot,
all_data = mydata,
ref_line = 30
)
)
})
bo_picker <- shiny::reactive({
mulige_valg <- c("Finnmark", "UNN", "Nordland", "Helgeland")
shinyWidgets::checkboxGroupButtons(
inputId = "valgtBo",
choices = mulige_valg,
justified = TRUE,
checkIcon = list(yes = icon("ok", lib = "glyphicon"), no = icon("remove", lib = "glyphicon"))
)
})
output$pick_bo <- shiny::renderUI({
bo_picker()
})
}
)
|
--
-- Propagation : train supervised dataset and judge images
--
module CNN.Propagation (
trainLayers
, update
, evaluateLayers
, judgeImage
, forwardProp
) where
import Control.Monad
import Data.List (foldl')
import Debug.Trace
import Numeric.LinearAlgebra
import CNN.Algebra
import CNN.Image
import CNN.Layer
import CNN.LayerType
{-
trainLayers
IN : layers
reversed layers (for backward prop)
trainig data
OUT: difference of layers
-}
trainLayers :: [Layer] -> [Layer] -> Trainer -> [Maybe Layer]
trainLayers [] _ (i, c) = []
trainLayers ls rls (i, c) = dls
where
(y, op') = judgeImage ls i
d = [reshape (size c) (y `vsub` c)]
(_, dls) = backwardProp (zip (tail op') rls) (d, [])
{-
update
IN : learning rate
differences of layers of each training data
original layers
OUT: updated layers
-}
update :: Double -> [Layer] -> [[Maybe Layer]] -> [Layer]
update lr [] _ = []
update lr ls [] = ls
update lr (l:ls) (dl:dls) = updateLayer lr l dl : update lr ls dls
{-
evaluateLayers
IN : layers
trainer data
OUT: result (output)
ratio of correct answer
-}
{- 過去の実装
evaluate :: [Layer] -> [Trainer] -> [(Vector R, Double)]
evaluate _ [] = []
evaluate ls (s:ss) = (op, rt) : evaluate ls ss
where
op = flatten $ head (head $ forwardProp ls [fst s])
rt = (snd s) <.> op
-}
evaluateLayers :: [Layer] -> Double -> Trainer -> Double
evaluateLayers ls rr (i, c) = rr + c <.> y
where
(y, op) = judgeImage ls i
judgeImage :: [Layer] -> Image -> (Vector R, [Image])
judgeImage ls im = (flatten $ head $ head y, op)
where
(y, op) = splitAt 1 $ foldl' forwardProp' [im] ls
---
{-
fowardProp
IN : layers
OUT: image list
list of output of each layer
-}
forwardProp :: [Layer] -> [Image] -> [Image]
forwardProp [] is = is
forwardProp (l:ls) (i:is) = forwardProp ls (forwardLayer l i ++ is)
forwardProp' :: [Image] -> Layer -> [Image]
forwardProp' (i:is) l = forwardLayer l i ++ is
{- |
backwardProp
IN : list of image and layer pair
delta from previous step
difference of layers
OUT: delta
difference of layers
-}
backwardProp :: [(Image, Layer)] -> (Delta, [Maybe Layer])
-> (Delta, [Maybe Layer])
backwardProp [] (_, ls) = ([], ls)
backwardProp ((im,l):ols) (d, ls) = backwardProp ols (d', l':ls)
where
(d', l') = backwardLayer l im d
{-
selectLayer :: Layer -> Bool
selectLayer (ConvLayer _ _) = False
selectLayer (FullConnLayer _) = True
selectLayer _ = False
-}
|
Our student athletes take their talents to colleges, universities, and professional leagues when they leave Country Day. Please check back on this section as we profile the many athletes playing at the next level.
Amelie Fackethal, ‘18, is a swimmer at Stanford and a USA Swimming National Junior team member (2017-18). Amalie is interested in pursuing a major in Human Biology or Psychology at Stanford.
She is a Freshman member of the Stanford Swimming and Diving Team and just won both the 2019 PAC-12 Championship and the 2019 NCAA Division 1 National Championship. At the NCAA Championship, she earned five All-American honors, placing 13th individually in the 100 Freestyle, as well as Top 8 in 4 relay teams, the 200/400 Free Relays and 200/400 Medley Relays.
Yasmin Gupta, ‘18, is majoring in pre-medicine and minoring in both kinesiology and psychology at Cornell College in Mount Vernon, Iowa, to which she was accepted on a full-ride basketball scholarship.
Marigot Fackenthal (‘17) is a sophomore at Cornell University, majoring in Mechanical Engineering with a minor in Astronomy.
She led Cornell Women’s Fencing Team as the Vice Captain in 2019, earned NCAA Fencing National Championship Institutional Alternate status and was inducted into Cornell University’s “400 Club” which honors varsity student-athletes who achieve perfect 4.0 semester grade point averages.
Marigot has served as an Undergraduate Teaching Assistant for Cornell’s Physics Department as well as for the Mechanical & Aerospace Engineering Department. She is also working as a Research Assistant in Cornell’s Bewley Applied Turbulence Lab.
Jenny Kerbs, ‘16, attends Vassar College in Poughkeepsie, New York, where she plays volleyball. She has become a reliable hitter for the Brewers, playing in 96 of Vassar 106 sets as a sophomore.
Claire started her collegiate career as a swimmer at Harvard. She was top nationally ranked 15-16 800 LCM FR-R (8:19.32) (4th for 15-18) and was fourth at the 2014 Summer Junior Nationals as a lead off.
Logan Winfield, ‘13, attended Colorado School of Mines, where was a Cross Country distance runner. He graduated with a degree in Chemical and Biochemical Engineering.
In 2017, for the first time in the program’s history, the NCAA-champion Colorado School of Mines men’s cross country team was named the USTFCCCA Scholar Team of the Year. The Orediggers earned the honor as the highest-finishing team at NCAA Championships to have earned All-Academic Team honors with a team GPA over 3.0. Logan received individual All-Academic honors.
Morgan Bennett-Smith played soccer at Occidental College, where he was named first-team all-conference and all-academic his sophomore and junior seasons. After graduating with a degree in Biology, he moved Saudi Arabia to attend King Abdullah University of Science and Technology, where he is earning a master’s in coral genomics at the Red Sea Research Center.
|
[STATEMENT]
lemma lowered_lotteries: "lowered.lotteries \<subseteq> lotteries"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lowered.lotteries \<subseteq> lotteries
[PROOF STEP]
unfolding lotteries_on_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {p. set_pmf p \<subseteq> alts'} \<subseteq> {p. set_pmf p \<subseteq> alts}
[PROOF STEP]
using alts'_subset
[PROOF STATE]
proof (prove)
using this:
alts' \<subseteq> alts
goal (1 subgoal):
1. {p. set_pmf p \<subseteq> alts'} \<subseteq> {p. set_pmf p \<subseteq> alts}
[PROOF STEP]
by blast
|
------------------------------------------------------------------------------
-- The ABP using the Agda standard library
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module FOT.FOTC.Program.ABP.ABP-SL where
open import Codata.Musical.Notation
open import Codata.Musical.Stream
open import Data.Bool
open import Data.Product
open import Relation.Nullary
------------------------------------------------------------------------------
Bit : Set
Bit = Bool
-- Data type used to model the fair unreliable transmission channel.
data Err (A : Set) : Set where
ok : (x : A) → Err A
error : Err A
-- The mutual sender functions.
-- TODO (2019-01-04): Agda doesn't accept this definition which was
-- accepted by a previous version.
{-# TERMINATING #-}
sendA : {A : Set} → Bit → Stream A → Stream (Err Bit) → Stream (A × Bool)
awaitA : {A : Set} → Bit → Stream A → Stream (Err Bit) → Stream (A × Bit)
sendA b (i ∷ is) ds = (i , b) ∷ ♯ awaitA b (i ∷ is) ds
awaitA b (i ∷ is) (ok b' ∷ ds) with b ≟ b'
... | yes p = sendA (not b) (♭ is) (♭ ds)
... | no ¬p = (i , b) ∷ ♯ (awaitA b (i ∷ is) (♭ ds))
awaitA b (i ∷ is) (error ∷ ds) = (i , b) ∷ ♯ (awaitA b (i ∷ is) (♭ ds))
-- The receiver functions.
-- TODO (2019-01-04): Agda doesn't accept this definition which was
-- accepted by a previous version.
{-# TERMINATING #-}
ackA : {A : Set} → Bit → Stream (Err (A × Bit)) → Stream Bit
ackA b (ok (_ , b') ∷ bs) with b ≟ b'
... | yes p = b ∷ ♯ (ackA (not b) (♭ bs))
... | no ¬p = not b ∷ ♯ (ackA b (♭ bs))
ackA b (error ∷ bs) = not b ∷ ♯ (ackA b (♭ bs))
-- 25 June 2014. Requires the TERMINATING flag when using
-- --without-K. See Agda Issue 1214.
-- TODO (03 December 2015): Report the issue.
{-# TERMINATING #-}
outA : {A : Set} → Bit → Stream (Err (A × Bit)) → Stream A
outA b (ok (i , b') ∷ bs) with b ≟ b'
... | yes p = i ∷ ♯ (outA (not b) (♭ bs))
... | no ¬p = outA b (♭ bs)
outA b (error ∷ bs) = outA b (♭ bs)
-- Model the fair unreliable tranmission channel.
-- TODO (2019-01-04): Agda doesn't accept this definition which was
-- accepted by a previous version.
{-# TERMINATING #-}
corruptA : {A : Set} → Stream Bit → Stream A → Stream (Err A)
corruptA (true ∷ os) (_ ∷ xs) = error ∷ ♯ (corruptA (♭ os) (♭ xs))
corruptA (false ∷ os) (x ∷ xs) = ok x ∷ ♯ (corruptA (♭ os) (♭ xs))
-- 25 June 2014. Requires the TERMINATING flag when using
-- --without-K. See Agda Issue 1214.
-- TODO (03 December 2015): Report the issue.
-- The ABP transfer function.
{-# TERMINATING #-}
abpTransA : {A : Set} → Bit → Stream Bit → Stream Bit → Stream A → Stream A
abpTransA {A} b os₁ os₂ is = outA b bs
where
as : Stream (A × Bit)
bs : Stream (Err (A × Bit))
cs : Stream Bit
ds : Stream (Err Bit)
as = sendA b is ds
bs = corruptA os₁ as
cs = ackA b bs
ds = corruptA os₂ cs
|
```python
import numpy as np
from scipy import io
from scipy import sparse
from scipy.sparse import csgraph
from scipy import fftpack
from scipy import signal
from scipy import linalg
from matplotlib import pyplot as plt
from matplotlib import collections
import mpl_toolkits.mplot3d.art3d as art3d
import seaborn as sns
import igraph
import graph3d
%matplotlib inline
```
```python
sns.set_palette('husl')
```
# Signal smoothness with respect to graph structure
```python
# Define a ring graph
n = 100
Aring = np.roll(np.eye(n), -1, axis=1) + np.roll(np.eye(n), 1, axis=1)
# Define a signal over the ring graph
f = np.sin(np.linspace(-8*np.pi, 8*np.pi, n))
```
```python
# Generate coords for plotting
x = np.cos(np.linspace(-2*np.pi, 2*np.pi, n))
y = np.sin(np.linspace(-2*np.pi, 2*np.pi, n))
xy = np.column_stack([x, y])
startcoords = xy[np.where(Aring)[0]]
endcoords = xy[np.where(Aring)[1]]
Aring_lines = [[tuple(x), tuple(y)] for x, y in zip(startcoords, endcoords)]
```
```python
Acrossed = Aring.copy()
# Randomly connect 40 vertices of the ring
s = np.random.randint(0, len(Aring), 40)
e = np.random.randint(0, len(Aring), 40)
for i, j in zip(s, e):
Acrossed[i, j] = 1
Acrossed[j, i] = 1
# Regenerate coords for plotting
startcoords = xy[np.where(Acrossed)[0]]
endcoords = xy[np.where(Acrossed)[1]]
Acrossed_lines = [[tuple(x), tuple(y)] for x, y in zip(startcoords, endcoords)]
```
```python
# Compute laplacian eigendecompositions
Lring = csgraph.laplacian(Aring)
lring, uring = linalg.eigh(Lring)
Lcrossed = csgraph.laplacian(Acrossed)
lcrossed, ucrossed = linalg.eigh(Lcrossed)
```
```python
# Compute graph Fourier transform of signals
fhat_ring = np.conj(uring.T) @ f
fhat_crossed = np.conj(ucrossed.T) @ f
```
```python
# Plot frequency spectrum for ring and crossed ring
fig = plt.figure(figsize=(10,6))
lc = collections.LineCollection(Aring_lines, linewidths=1, colors='k')
ax0 = fig.add_subplot(2, 2, 1, projection='3d')
graph3d.Graph3d(xy[:,0], xy[:,1], f, zmin=-4, zmax=4, fig=fig, ax=ax0, lc=lc)
ax0.set_title(r'$A_{ring}$')
lc = collections.LineCollection(Acrossed_lines, linewidths=1, colors='k')
ax1 = fig.add_subplot(2, 2, 3, projection='3d')
graph3d.Graph3d(xy[:,0], xy[:,1], f, zmin=-4, zmax=4, fig=fig, ax=ax1, lc=lc)
ax1.set_title(r'$A_{crossed}$')
ax2 = fig.add_subplot(2, 2, 2)
ax2.stem(lring, fhat_ring)
ax2.set_ylabel(r'$\hat{f}(\lambda)$')
ax4 = fig.add_subplot(2, 2, 4)
ax4.stem(lcrossed, fhat_crossed)
ax4.set_ylabel(r'$\hat{f}(\lambda)$')
ax4.set_xlabel(r'$\lambda$')
plt.tight_layout()
plt.savefig('../img/signal_smoothness_0.pdf', bbox_inches='tight')
```
# Local metrics of smoothness
#### The edge derivative of $f$ with respect to edge $e=(i,j)$
\begin{equation}
\frac{\partial f}{\partial e} \bigg|_i = \sqrt{W_{i,j}} [f(j) - f(i)]
\end{equation}
The local variation can be measured by the square root of the sum of the squared differences
between signal values at adjacent vertices.
#### The graph gradient of $f$ at vertex $i$
\begin{equation}
|| \nabla_i f || = \bigg[ \bigg\{ \frac{\partial f}{\partial e} \bigg|_i \bigg\} \bigg|_{e \in \epsilon \ s.t. \ e=(i,j) \text{ for some } j \in V} \bigg]
\end{equation}
#### The local variation at vertex $i$
\begin{equation}
|| \nabla_i f ||_2 = \bigg[ \sum_{\text{e connected to i}} \bigg( \frac{\partial f}{\partial e} \bigg|_i \bigg)^2 \bigg]^{1/2} = \bigg[ \sum_{j \in N_i} W_{i,j} [f(j) - f(i)]^2 \bigg]^{1/2}
\end{equation}
```python
def graph_gradient(A, f, i):
# j : unique neighbors of i (possibly asymmetrical)
j = np.unique(np.concatenate([np.where(A[i, :])[0],
np.where(A[:, i])[0]]))
return A[i, j]*(f[i] - f[j])
```
```python
graph_gradient(Aring, f, 50)
```
array([ 0.50229597, -0.43893102])
```python
def local_variation(A, f, i):
local_var = np.sqrt(np.sum(graph_gradient(A, f, i)**2))
return local_var
```
```python
local_variation(Aring, f, 50)
```
0.66705448799508404
# Global metrics of smoothness
#### Discrete p-Dirichlet form of $f$
\begin{equation}
S_p(f) = \frac{1}{p} \sum_{i \in V} \bigg[ \sum_{j \in N_i} W_{i,j} [f(j) - f(i)]^2 \bigg]^{\frac{p}{2}}
\end{equation}
For $p=1$, $S_1$ is simply the sum of local variations across all vertices.
For $p=2$, $S_2$ is a quadratic function of the Laplacian:
#### Graph Laplacian Quadratic Form
\begin{equation}
\begin{split}
S_2(f) = \frac{1}{2} \sum_{i \in V} \bigg[ \sum_{j \in N_i} W_{i,j}
[f(j) - f(i)]^2 \bigg]^{\frac{1}{2}} = \sum_{(i, j) \in \epsilon}
W_{i,j} [f(j) - f(i)]^2 \\
= f^T L f
\end{split}
\end{equation}
$S_2$ is small when $f$ has similar values at strongly-connected vertices.
```python
def dirichlet_p(A, f, p=2):
i, j = np.where(A)
return (1/p)*np.sum((A[i, j]*(f[i] - f[j])**2)**(p/2))
```
```python
def quad_laplace(L, f):
return f @ L @ f
```
```python
# Is the dirichlet p form equivalent to the quadratic form?
dirichlet_ring = dirichlet_p(Aring, f, p=2)
quadratic_ring = quad_laplace(Lring, f)
np.isclose(dirichlet_ring, quadratic_ring)
```
True
```python
dirichlet_crossed = dirichlet_p(Acrossed, f, p=2)
quadratic_crossed = quad_laplace(Lcrossed, f)
np.isclose(dirichlet_crossed, quadratic_crossed)
```
True
```python
print('Graph laplacian quadratic form:')
print('\n')
print(r'Ring: ', f @ Lring @ f)
print(r'Crossed ring: ', f @ Lcrossed @ f)
```
Graph laplacian quadratic form:
Ring: 12.4889116701
Crossed ring: 56.6338735435
|
#' Benchmark regular models
#'
#' A function to benchmark a collection of regular machine learning models.
#' @param benchmarking_data A dataframe from the output of \code{\link{get_benchmarking_data}} function. This dataset contains species occurrence coordinates together with a set of environmental data points.
#' @param learners A list of mlr learner objects which specify which models to use (i.e. Random Forests). The following learners are supported: "classif.logreg", "classif.gbm", "classif.multinom", "classif.naiveBayes", "classif.xgboost", "classif.ksvm".
#' @param dataset_type A character string indicating spatial partitioning method. This is used in order to avoid spatial autocorrelation issues.
#' @param sample Logical. Indicates whether benchmarking should be done on an undersampled dataset. This is useful for testing model efficiency with an imbalanced dataset (i.e. few observations and many background (pseudo-absence) points).
#'
#' @return Benchmarking object (class bmr). This object can be accessed by other functions in order to obtain the benchmark results.
#' @examples
#' \dontrun{
#' # download benchmarking data
#' benchmarking_data <- get_benchmarking_data("Lynx lynx",
#' limit = 1500)
#'
#' # create a list of algorithms to compare
#' # here it is important to specify predict.type as "prob"
#' learners <- list(mlr::makeLearner("classif.randomForest",
#' predict.type = "prob"),
#' mlr::makeLearner("classif.logreg",
#' predict.type = "prob"))
#'
#' # run the model benchmarking process
#' # if you have previously used a partitioning method you should specify it here
#' bmr <- benchmark_sdm(benchmarking_data$df_data,
#' learners = learners,
#' dataset_type = "default")
#'
#' # for benchmarking an imbalanced dataset you can undersample
#' bmr <- benchmark_sdm(benchmarking_data$df_data,
#' learners = learners,
#' dataset_type = "default",
#' sample = TRUE)
#'
#' # inspect the benchmark results
#' bmr
#' }
#' @export
benchmark_sdm <- function(benchmarking_data, learners, dataset_type = "default", sample = FALSE) {
benchmarking_data$label <- as.factor(benchmarking_data$label)
if (dataset_type == "default") {
# choose benchmarking metrics
ms <- list(mlr::auc)
# use undersampling
if (sample) {
task_default <- mlr::makeClassifTask(data = benchmarking_data, target = "label")
task <- mlr::undersample(task_default, rate = 1/8)
} else {
task <- mlr::makeClassifTask(data = benchmarking_data, target = "label")
}
bmr <- mlr::benchmark(learners = learners, tasks = task, measures = ms, show.info = TRUE, models = TRUE)
return(bmr)
} else if (dataset_type == "checkerboard1" | dataset_type == "checkerboard2") {
rdesc <- mlr::makeResampleDesc("CV", iters = 2)
ms <- list(mlr::auc)
# assign spatial partitioning vector to split the data
blocking <- benchmarking_data$grp_checkerboard
benchmarking_data$grp_checkerboard <- NULL
if (sample) {
task_default <- mlr::makeClassifTask(data = benchmarking_data, target = "label", blocking = blocking)
task <- mlr::undersample(task_default, rate = 1/8)
} else {
task <- mlr::makeClassifTask(data = benchmarking_data, target = "label", blocking = blocking)
}
bmr <- mlr::benchmark(learners = learners, tasks = task, measures = ms, show.info = TRUE, resampling = rdesc, models = TRUE)
return(bmr)
} else if (dataset_type == "block") {
rdesc <- mlr::makeResampleDesc("CV", iters = 4)
ms <- list(mlr::auc)
blocking <- as.factor(benchmarking_data$grp)
benchmarking_data$grp <- NULL
if (sample) {
task_default <- mlr::makeClassifTask(data = benchmarking_data, target = "label", blocking = blocking)
task <- mlr::undersample(task_default, rate = 1/8)
} else {
task <- mlr::makeClassifTask(data = benchmarking_data, target = "label", blocking = blocking)
}
bmr <- mlr::benchmark(learners = learners, tasks = task, measures = ms, show.info = TRUE, resampling = rdesc, models = TRUE)
return(bmr)
}
}
|
/-
Copyright (c) 2022 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import data.finset.locally_finite
import data.dfinsupp.interval
import data.dfinsupp.multiset
import data.nat.interval
/-!
# Finite intervals of multisets
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file provides the `locally_finite_order` instance for `multiset α` and calculates the
cardinality of its finite intervals.
## Implementation notes
We implement the intervals via the intervals on `dfinsupp`, rather than via filtering
`multiset.powerset`; this is because `(multiset.replicate n x).powerset` has `2^n` entries not `n+1`
entries as it contains duplicates. We do not go via `finsupp` as this would be noncomputable, and
multisets are typically used computationally.
-/
open finset dfinsupp function
open_locale big_operators pointwise
variables {α : Type*} {β : α → Type*}
namespace multiset
variables [decidable_eq α] (f g : multiset α)
instance : locally_finite_order (multiset α) :=
locally_finite_order.of_Icc (multiset α)
(λ f g, (finset.Icc f.to_dfinsupp g.to_dfinsupp).map
(multiset.equiv_dfinsupp.to_equiv.symm.to_embedding))
(λ f g x, by simp)
lemma Icc_eq :
finset.Icc f g =
(finset.Icc f.to_dfinsupp g.to_dfinsupp).map
(multiset.equiv_dfinsupp.to_equiv.symm.to_embedding) := rfl
lemma card_Ico :
(finset.Ico f g).card = ∏ i in f.to_finset ∪ g.to_finset, (g.count i + 1 - f.count i) - 1 :=
by rw [card_Ico_eq_card_Icc_sub_one, card_Icc]
lemma card_Ioc :
(finset.Ioc f g).card = ∏ i in f.to_finset ∪ g.to_finset, (g.count i + 1 - f.count i) - 1 :=
by rw [card_Ioc_eq_card_Icc_sub_one, card_Icc]
lemma card_Ioo :
(finset.Ioo f g).card = ∏ i in f.to_finset ∪ g.to_finset, (g.count i + 1 - f.count i) - 2 :=
by rw [card_Ioo_eq_card_Icc_sub_two, card_Icc]
lemma card_Iic :
(finset.Iic f).card = ∏ i in f.to_finset, (f.count i + 1) :=
by simp_rw [Iic_eq_Icc, card_Icc, bot_eq_zero, to_finset_zero, empty_union, count_zero, tsub_zero]
end multiset
|
open import Agda.Primitive public using (lzero)
{-# BUILTIN LEVELZERO lzero #-}
|
lemma sigma_sets_top: "sp \<in> sigma_sets sp A"
|
\input{../../assignment-header}
%========================================================================
\title{Assignment 08: Pendulum Swing-Up}
\date{Assigned: March 16 --- Due: March 28 at 11:55pm}
\author{Optimal Control for Robotics}
%========================================================================
\begin{document}
\maketitle
%=================================================
\section*{Introduction}
In this assignment you will compute the optimal swing-up trajectory for a simple
pendulum using multiple shooting. We will keep things simple:
use Euler's method on a uniform time grid,
with one simulation step per multiple shooting segment.
\section*{Deliverables}
Implement the function \texttt{simplePendulumOptimBvp} using the template provided.
\section*{Write-Up:}
There is no separate write-up for this assignment. Instead,
please include the total time you spent working on this assignment in
the comments near the top of your implementation of\\
\texttt{simplePendulumOptimBvp}.
Be sure to clearly organize and document your code.
\section*{Comments}
In this assignment I provide a simple script to run your optimization,
along with an encrypted version of the solution.
You should run this script with a variety of different parameters to learn
more about how trajectory optimization behaves with different inputs.
Once you've done that, then start working on implementing your code.
\par If you run out of time and your assignment does not work correctly,
then add a notes section to the comments in the top of \texttt{simplePendulumOptimBvp}.
You should clearly state that the function does not work, why you think it does
not work, and your best guess about how to go about debugging it if you had time.
\par The file \texttt{simplePendulumOptimBvpSoln.p} provides you with a correct
implementation of multiple shooting, against which you can compare your code.
This implementation is not guarenteed to obtain the \quotes{true} solution to
the trajectory optimization problem. If FMINCON converges, then the resulting
trajectory is an approximation of a locally optimal trajectory.
\par One final note: \texttt{simplePendulumOptimBvpSoln.p} uses a very simple
initialization routine. If you are clever you can probably beat it: either by
finding the same solution more quickly or by avoiding a local minima.
For example, the optimal solution might require three swings,
but \texttt{simplePendulumOptimBvpSoln.p} only manages to find a solution with
one swing (which has a higher objective function value).
%=================================================
\end{document}
|
theory Closures2
imports
Closures
Well_Quasi_Orders.Well_Quasi_Orders
begin
section \<open>Closure under \<open>SUBSEQ\<close> and \<open>SUPSEQ\<close>\<close>
text \<open>Properties about the embedding relation\<close>
lemma subseq_strict_length:
assumes a: "subseq x y" "x \<noteq> y"
shows "length x < length y"
using a
by (induct) (auto simp add: less_Suc_eq)
lemma subseq_wf:
shows "wf {(x, y). subseq x y \<and> x \<noteq> y}"
proof -
have "wf (measure length)" by simp
moreover
have "{(x, y). subseq x y \<and> x \<noteq> y} \<subseteq> measure length"
unfolding measure_def by (auto simp add: subseq_strict_length)
ultimately
show "wf {(x, y). subseq x y \<and> x \<noteq> y}" by (rule wf_subset)
qed
lemma subseq_good:
shows "good subseq (f :: nat \<Rightarrow> ('a::finite) list)"
using wqo_on_imp_good[where f="f", OF wqo_on_lists_over_finite_sets]
by simp
lemma subseq_Higman_antichains:
assumes a: "\<forall>(x::('a::finite) list) \<in> A. \<forall>y \<in> A. x \<noteq> y \<longrightarrow> \<not>(subseq x y) \<and> \<not>(subseq y x)"
shows "finite A"
proof (rule ccontr)
assume "infinite A"
then obtain f::"nat \<Rightarrow> 'a::finite list" where b: "inj f" and c: "range f \<subseteq> A"
by (auto simp add: infinite_iff_countable_subset)
from subseq_good[where f="f"]
obtain i j where d: "i < j" and e: "subseq (f i) (f j) \<or> f i = f j"
unfolding good_def
by auto
have "f i \<noteq> f j" using b d by (auto simp add: inj_on_def)
moreover
have "f i \<in> A" using c by auto
moreover
have "f j \<in> A" using c by auto
ultimately have "\<not>(subseq (f i) (f j))" using a by simp
with e show "False" by auto
qed
subsection \<open>Sub- and Supersequences\<close>
definition
"SUBSEQ A \<equiv> {x::('a::finite) list. \<exists>y \<in> A. subseq x y}"
definition
"SUPSEQ A \<equiv> {x::('a::finite) list. \<exists>y \<in> A. subseq y x}"
lemma SUPSEQ_atom [simp]:
shows "SUPSEQ {[c]} = UNIV \<cdot> {[c]} \<cdot> UNIV"
unfolding SUPSEQ_def conc_def
by (auto dest: list_emb_ConsD)
lemma SUPSEQ_union [simp]:
shows "SUPSEQ (A \<union> B) = SUPSEQ A \<union> SUPSEQ B"
unfolding SUPSEQ_def by auto
lemma SUPSEQ_conc [simp]:
shows "SUPSEQ (A \<cdot> B) = SUPSEQ A \<cdot> SUPSEQ B"
unfolding SUPSEQ_def conc_def
apply(auto)
apply(drule list_emb_appendD)
apply(auto)
by (metis list_emb_append_mono)
lemma SUPSEQ_star [simp]:
shows "SUPSEQ (A\<star>) = UNIV"
apply(subst star_unfold_left)
apply(simp only: SUPSEQ_union)
apply(simp)
done
subsection \<open>Regular expression that recognises every character\<close>
definition
Allreg :: "'a::finite rexp"
where
"Allreg \<equiv> \<Uplus>(Atom ` UNIV)"
lemma Allreg_lang [simp]:
shows "lang Allreg = (\<Union>a. {[a]})"
unfolding Allreg_def by auto
lemma Star_Allreg_lang [simp]:
shows "lang (Star Allreg) = UNIV"
by simp
fun
UP :: "'a::finite rexp \<Rightarrow> 'a rexp"
where
"UP (Zero) = Zero"
| "UP (One) = Star Allreg"
| "UP (Atom c) = Times (Star Allreg) (Times (Atom c) (Star Allreg))"
| "UP (Plus r1 r2) = Plus (UP r1) (UP r2)"
| "UP (Times r1 r2) = Times (UP r1) (UP r2)"
| "UP (Star r) = Star Allreg"
lemma lang_UP:
fixes r::"'a::finite rexp"
shows "lang (UP r) = SUPSEQ (lang r)"
by (induct r) (simp_all)
lemma SUPSEQ_regular:
fixes A::"'a::finite lang"
assumes "regular A"
shows "regular (SUPSEQ A)"
proof -
from assms obtain r::"'a::finite rexp" where "lang r = A" by auto
then have "lang (UP r) = SUPSEQ A" by (simp add: lang_UP)
then show "regular (SUPSEQ A)" by auto
qed
lemma SUPSEQ_subset:
fixes A::"'a::finite list set"
shows "A \<subseteq> SUPSEQ A"
unfolding SUPSEQ_def by auto
lemma SUBSEQ_complement:
shows "- (SUBSEQ A) = SUPSEQ (- (SUBSEQ A))"
proof -
have "- (SUBSEQ A) \<subseteq> SUPSEQ (- (SUBSEQ A))"
by (rule SUPSEQ_subset)
moreover
have "SUPSEQ (- (SUBSEQ A)) \<subseteq> - (SUBSEQ A)"
proof (rule ccontr)
assume "\<not> (SUPSEQ (- (SUBSEQ A)) \<subseteq> - (SUBSEQ A))"
then obtain x where
a: "x \<in> SUPSEQ (- (SUBSEQ A))" and
b: "x \<notin> - (SUBSEQ A)" by auto
from a obtain y where c: "y \<in> - (SUBSEQ A)" and d: "subseq y x"
by (auto simp add: SUPSEQ_def)
from b have "x \<in> SUBSEQ A" by simp
then obtain x' where f: "x' \<in> A" and e: "subseq x x'"
by (auto simp add: SUBSEQ_def)
from d e have "subseq y x'"
by (rule subseq_order.order_trans)
then have "y \<in> SUBSEQ A" using f
by (auto simp add: SUBSEQ_def)
with c show "False" by simp
qed
ultimately show "- (SUBSEQ A) = SUPSEQ (- (SUBSEQ A))" by simp
qed
definition
minimal :: "'a::finite list \<Rightarrow> 'a lang \<Rightarrow> bool"
where
"minimal x A \<equiv> (\<forall>y \<in> A. subseq y x \<longrightarrow> subseq x y)"
subsection \<open>Closure of @{const SUBSEQ} and @{const SUPSEQ}\<close>
lemma closure_SUPSEQ:
fixes A::"'a::finite lang"
shows "regular (SUPSEQ A)"
proof -
obtain M where a: "finite M" and b: "SUPSEQ A = SUPSEQ M"
using main_lemma by blast
have "regular M" using a by (rule finite_regular)
then have "regular (SUPSEQ M)" by (rule SUPSEQ_regular)
then show "regular (SUPSEQ A)" using b by simp
qed
lemma closure_SUBSEQ:
fixes A::"'a::finite lang"
shows "regular (SUBSEQ A)"
proof -
have "regular (SUPSEQ (- SUBSEQ A))" by (rule closure_SUPSEQ)
then have "regular (- SUBSEQ A)" by (subst SUBSEQ_complement) (simp)
then have "regular (- (- (SUBSEQ A)))" by (rule closure_complement)
then show "regular (SUBSEQ A)" by simp
qed
end
|
lemma reduced_labelling_unique: "r \<le> n \<Longrightarrow> \<forall>i<r. x i = 0 \<Longrightarrow> r = n \<or> x r \<noteq> 0 \<Longrightarrow> reduced n x = r"
|
There are multiple preachers that come throughout the year to preach loudly on campus. One is confirmed to be the Pastor Tom Griner hailing from Carson City, NV. Earlier in the year he brought with him a construction worker from Fremont. The one who does most of the talking has been known to talk about his three virgin daughters, and to refer to claims that between twenty and thirty percent of college students have, or had, a sexually transmitted infection. (This statistic may come from a Columbia University Health Services study, Go Ask Alice, 1999).
They make quite a scene with their very large signs. Sometimes they draw a large crowd, composed mostly of students attracted to a scene and some who want to question or heckle the signholders. Many students pose questions on the order of, What about dinosaurs? and Where did Cain get his wife? that may be intended to sidestep the preachers intended message. A few students are more daring and do things like distributing condoms or blowing them up into balloons. Some samesex students have also kissed each other in front of the preachers.
From queries, their reasons for visiting Davis range from converting the students to the correct, true, etc. path, to warn us about the what they see as students sinning ways and the idea that there is nearing Judgment, and to provoke new thought in the students. Many students find their message to be unChristian, homophobic, racist, sexist, narrowminded and just plain mean, though others do not. Ultimately the message is in the eye of the beholder.
The preachers are from different groups. Tom Griner states that he is not affiliated with the man carrying the You Make Me Sick sign, and sees it as ridiculous. Griners own sign usually reads, Fear God, which he says is controversial enough. He states that his message, though sensational, has afforded him the opportunity to pray with many students, and to have dialogue with still more. Some, he says, are shaken and leave thinking.
Also, widely known is Brother Jed Smock, who had the most recent appearance on January 28th, 2008.
Fred Phelps is apparently coming to Davis pretty soon.
If nothing else, their actions serve as a catalyst for strong emotions and gut reactions, prompting debates and arguments among the student body and even those offcampus. The debates sparked by these Protests demonstrations may be the strongest legacy of their visits to Davis.
Local Opinions
http://www.californiaaggie.com/media/storage/paper981/news/2005/05/06/Features/TheSigns.Of.Small.Men1320456.shtml Aggie columnist: The signs of small men
Past Appearances:
October 2006
April 26, 2005
April 28, 2005
May 16, 2005
January 17, 2006
Dick and Luke Otterstad, father and son preaching duo:
Counter sign: Kyuhee Baik, Emily Siu
November 7, 2006
January 28th, 2008
Brother Jed Smock
20050516 17:09:12 The Atheists and Agnostics had a banner out. Users/JimSchwab
20050516 17:09:58 I noticed them (it was hard not to). One guy held a sign that detailed everything he hated. It was interesting that he included Roman Catholics and anyone who watched TV in the list. Another guy held a sign that said Trust Jesus or Burn in Hell. The crowd seemed to be having a great time and at one point one of the guys put down his sign so he could arm wrestle a student challenger. It was all very amusing. Users/SharlaDaly
20050516 17:59:52 nbsp The other flag was for the Davis Atheist/Agnostic Alliance or something along those lines when the younger of the two had mentioned that Atheists were the worst of all Users/JarredOral
20050516 17:59:52 nbsp Lets see. To answer Edwin... I asked them to explain dinosaurs. I asked one of them if he would help me get my smog checked by sticking his mouth on my cars exhaust pipe. I asked the other if he thought God made him in one day, because it certainly looked as though god rushed it (thanks bill hicks). I also consistently yelled out contradictions to his argument, cheered for the flag people, and tried to get a bunch of people to get in on some Buddhist chanting. I think next time they come, we should get a loud stereo system and just stand in front of them, turn it up really loud, and drown out there shit. Also, would anyone be interested in getting together a group of say 100 students or so to contribute 1$ in order to create an equally big sign that says something we all agree upon. Something like, ATTENTION: CIRCUS SIDESHOW OF OBNOXIOUS HUMAN BEINGS RIGHT HERE. COME ENJOY THE LAUGHTER or perhaps something like GOD HATES PREACHERS WITH BIG SIGNS. If anyone is interested, feel free to post up here. You could also send me a message at [email protected] if youre interested.Users/MichaelGiardina
20050516 22:37:45 nbsp I think showing hate and trying to shut them up is not the most productive way to respond. Users/KenjiYamada
20050516 23:11:01 nbsp I dont think thats http://www.brojed.org/ Bro Jed, unless there are, god forbid, two of them out there. The Official Bro Jed has come to Davis on occasion in the past, though. Looks like he skipped us this California tour. Aw shucks. Users/JessicaLuedtke
20050516 23:43:36 nbsp Oops. I think his name was Jeremiah or Jedediah or something, so I assumed his name was Bro Jed. Someone post his correct name. Users/JeyKottalam
20050517 09:32:19 nbsp Nice pics, Rob...I was cursing myself for not bringing my camera yesterday, but Im glad someone got some good shots. Users/KalenRidenour
20050517 10:58:28 nbsp I can never decide whether I hate these guys or love em for the hours of endless entertainment they bring. Users/GeorgeLewis
Im glad they exist and are allowed to speak. Diversity of thought is always important. Im also glad that their views are generally poorly viewed, but thats simply because I dont agree with them. They arent here to debate, they are here to convert, and thats fine. Not my cup of tea, but then I drink coffee. Id reserve negative feelings for the smallminded people who would try to silence them. Users/JabberWokky
Diversity of thought is important if everyones openminded. Closemindedness and diversity of thought doesnt get us anywhere, and who could be more closeminded than these folks. These guys are certainly no less smallminded than someone trying to silence them may be. So you might as well express your distaste with these guys... Users/TheShah
20050517 13:47:56 nbsp What could be more upfront than the fact that women is listed on that sign? Users/AmeliaCarlson
20050517 13:52:35 nbsp Its actually rebellious women. Hey, at least they dont have a problem with homosexuals in general, just the child molesting ones, and you know, I have a problem with them, too :) We can pretend thats what they mean, anyways.... Users/JessicaLuedtke
It does say rebellious women +but+ while other multiword categories used a continuous type, this one did not. Seems like some point is being made...
Actually, statistics show that very, very, very few pedophiles are homosexual, regardless of whether they attack boys or girls. Pedophiles are predominantly hetero. Users/GeorgeLewis GEL
Perhaps I should have been clearer. I have a problem with child molesters, regardless of their sexual orientation. Users/JessicaLuedtke
20050517 14:15:17 nbsp Man, imagine working at Kinkos or someplace and having to fill the order on that YOU MAKE ME SICK sign. The fonts alone would be such a headache. Users/MikeIvanov
20050517 14:22:42 nbsp I cant believe they bought a bunch of Friends Urging Campus Kindness Platform vinyl signs. Those things are so bad for the environment. Cant they be good stewards of Gods creation? Users/BrentLaabs
20050517 16:00:09 nbsp Users/KrisFricke http://www.livejournal.com/users/emosnail/172422.html wrote on EMOSNAIL regarding these preachers. Users/KenBloom
I think I should emphasize something that Kris said: whether or not it sounds like it would be a pleasant arrangement has absolutely no bearing on whether or not something is theologically true. Users/KenBloom
20050517 21:51:16 nbsp Anybody have an idea what they meant by ankle biters and why they make them sick? I thought ankle biters referred to children (who youd think would do less sinning than the lot of us). Im confused, anybody have a guess? Users/AbbyLawson
20050517 22:12:35 nbsp An ankle biter as they used it was someone who quipped a comment as they passed by, but didnt stick around for the guy to attack and yell at. Users/JeyKottalam
20050517 22:12:35 I asked him this exact question. He told me that an anklebiter is like a little dog that just makes a lot of noise and tries to bite your ankles, that an anklebiter is someone who throws out judgments, comments, and random words that are intended to flame the recipient without encouraging respectful conversation. Although this is, in fact, what most people do to them (harass)... it seemed to me that these guys werent aware that this is exactly what they do as well. Another interesting point Id like to bring up, is that one of the guys claimed that being liked was something that would very much frighten him. He cited passages from the bible, wherein jesus said that true messages must be spread through harsh resistance. He then said that if he went anywhere and everybody liked him, that he would feel quite scared. This is really interesting, if not for the message but for psychology. What would cause such a selfconcept? Users/MichaelGiardina
20050518 08:39:33 nbsp Some guy who I recognized threw a cup of water at Jeremiah yesterday. I dont know about everyone else, but that sounds like an idiotic idea. Everyone pretty much agree? Users/GeorgeLewis
20050518 09:17:04 nbsp Well, I can see why someone would throw water on him. He thinks hes preaching about God, but instead hes preaching hate towards others not exactly like himself. Personally, a person walking around with a big sign telling me that I am going to burn in hell b/c I am catholic that to me is a reason to perhaps cool this lunatic off with a glass of water. Who does he think he is after all to say others are going to hell when he is the one preaching intolerance and hate. Users/AshleyOrsaba
20050518 09:21:18 nbsp Well, he is clearly wrong. But I am wrong about a lot of things, you dont throw water on me. Actually I am never wrong, that was just an example. Also, doing crap like that makes them feel really good about themselves, makes them feel justified, almost like martyrs if you will. That feeling of satisfaction would prefer not to let them have. That is just sinking below their level and completely uncalled for. You have to remember this is coming from the president of AGASA! Users/GeorgeLewis
20050518 12:53:18 nbsp I dont think its fair to claim that theyre preaching hate. These preachers dont hate (for example) homosexualsthey love homosexuals, and want them to go to heaven. But according to the preachers own beliefs, the homosexuals CANT go to heaven until they stop being homosexual, so, out of loveNOT hatethe preachers try to convince the homosexuals that their behavior is wrong. Of course, I disagree with their assumptions here, but I do think that they honestly believe it themselves. Disagree with the preachers if you want, but dont call them hateful. Furthermore, I dont recall ever seeing Brother Jed (the guy whos so proud of his three virgin daughters) with a big sign, so Im not sure he should be listed here. I like Brother Jed. He dresses nicely, speaks eloquently (although rather loudly), and occasionally makes good points (although his facts often could use a bit more research). I once chased down a thief who stole Jeds Bible and convinced him to give it back; in gratitude, Jed gave me a copy of his book <i>Who Will Rise Up?</i> I disagree with Jed on many issues, but hes a pretty cool guy nonetheless. AND he rejects the doctrine of original sin, which gains him a LOT of points in my book. Users/BarnabasTruman
When he was here a few years back (I guess it must have been 1999), one of his daughters, who was about 10 at the time, was there with him. She was an amazing speaker, especially considering the hostility of the crowd. I dont agree with their theology, but still found her very admirable (as well as quite friendly and ordinary when talking one on one), and Ive wondered since then who shell grow up to be. Users/JessicaLuedtke
20050518 15:55:51 nbsp I disagree. The preachers (May 16) are preaching with hate and malice. I really dont see how a sign that says You make me SICK could possibly convey love. I think it certainly had a different tone from the April 28th appearance despite the fact that its some of the same guys. The earlier signs only said things like WARNING as if the preachers were trying to help sinners get to heaven, not insult them. Users/AbbyLawson
These were different guys. On the 15th (I think) there were two groups of preachers here but they didnt come together and didnt really know eachother. They didnt play to be here on the same day. Pastor Tom Griner and company are not nearly as extreme as Jeremiah and Frank. That is why the signs are different. Also, the signs are merely to attract attention. Jeremiah admitted he didnt really have a problem with some of the things on there, for example nonchristian tattoos (as long as they were blasphemous or offensive. Either way, anyway you look at it, they are pretty much wrong about everything, regardless of their attitudes or motivations. Users/GeorgeLewis GEL
20050518 18:19:18 nbsp Do these guys have jobs? How do they eat? Where do they live? Users/ArlenAbraham
20050518 21:53:33 nbsp They dont eat... or sleep for that matter. Thats why theyre so successful. Gratz to all who yelled at them. Just a warning though: engaging in debate with these guys is a bad idea because they dont come to convince people intellectually, but rather to provoke. If you want reasoned debate, I invite you to check out the Agnostic and Atheist Student Association. Anyway, I like having these guys around because it provides for an entertaining lunch hour. Users/JustinKhoo
20050604 09:33:24 nbsp I respect most of these people and in fact, downright admire them. Although I completely disagree with what they are saying, look at what they are doing. These are real people, with real families, real homes. They are not getting paid to do this and are probably not supported financially at all. Within their framework, within how they think, they dont hate jewish people, or gays, or what not; for the most part. Moreover, they are there with the signs to attract the unsaved. They want to save us from our sinful ways so we dont go to hell. They are out there because they want to help us. That is why I respect them although I disagree with them. And oh yeah, the people out there from the LGBT center total and complete assholes. Users/ChristopherMckenzie
20050722 00:29:45 nbsp You may be right that they dont hate gays or Jews, but they certainly hate Mormons. Are they really deserving of any accolades for yelling at people, telling them theyre going to suffer for eternity, and telling Mormons they hate them? Users/JustinKhoo
20050722 01:50:45 nbsp JustinKhoo... The answer to your question is: no. Users/MichaelGiardina
20060117 16:09:43 nbsp Thats pretty clever, to connect MLK to antihomosexuality. Kudos. Users/RoyWright
20060117 21:12:27 nbsp The best part about Dick and Luke on the 17th of Jan... this guy ran up while they were midprotest to announce he was going to go home and Tonight Im going to assfuck my girlfriend! Users/AdamGhaziTehrani
20060117 22:49:07 nbsp I walked through the quad today, late to class, and I saw the signs and a man yelling at the man with the sign. Did anyone stick around to hear the whole bit? And that sign that read MLK does not equal Tolerance? WTF? Users/KarlaFung
20060117 22:55:22 nbsp I thought it said MLK (not equal) Intolerance, as a protest back at the preachers? Users/AllisonEriksen
20060515 00:34:09 nbsp Isnt Racism and Sodomy bad things? Users/CarlosOverstreet
20061010 14:32:51 nbsp Hey, they were here today, anyone got pictures? The abortion guy joined in so it got rather fun. Users/DavidPoole
20061010 14:35:06 nbsp They were there today, but they got outclassed by the fire department demonstrations. At least at the noon passing time, no one seemed to pay them much attention Users/ErinBadillo
20061010 14:35:53 nbsp Actually I am glad these people exist. Users/DavidPoole
20061010 18:39:28 nbsp They live to yell at people, Id say theyre addicted to the thrill. One day, everyone should all decide to ignore them and they wont have any fun. Users/KarlMogel
20061022 10:43:17 nbsp They probably are making such inflammatory signs to get a response of anger and hate. They feel probably that by being virtually crucified and mocked by the populace that it makes them like Christ on the cross, and therefore, a better Christian. A martyr of sorts, despite the fact that theyre just acting obnoxious, and are thus getting mocked and attacked. Users/MatthewTom
20061022 15:04:54 nbsp I think these guys are funny, and it always puts me in a philosophical mood when I see them yelling their absurdities. Not because of what theyre saying, but because people crowd around. The other day there was a guy with a religious sign getting tons of attention and a girl on a skateboard with an I give Free Hugs sign that was getting no attention. Why? Anyway, if someone were to come up with a GOD HATES PREACHERS WITH BIG SIGNS, Id totally pitch in a buck. You could probably get more than you paid for the sign if you put out a hat and a small sign saying help me pay for the big sign. Users/AmyGoogenspa
20061101 16:33:14 nbsp Well at least all the majority of homosexual who are not childmolesters arent going to hell Users/BrianTrott
20061113 16:52:38 nbsp I wish these guys would go away. They just hurt my religion. Unfortunately every group has their nut cases (rightwing nutjobs, terrorists, etc.) and the nuts are always the loudest. These guys do not represent the whole of Christianity, in fact they dont really represent any of it. They dont convert anyone and they are actually driving people away from Christ. Jesus never stood in a public place and screamed at people with a big sign telling them they are going to burn in hell. He visited the homes of the people who were hated the most in his day (prostitutes, tax collectors, Romans) and told them that God loves them, forgives their sins, and wants them to come to heaven. The message these guys preach is completely different and I appreciate them making a mockery of my religion about as much as a Muslim appreciates a terrorist going on TV saying that it is the will of Allah to kill innocent people. One more thing, I saw a sinlist sign that had football fans on the list and I thought, Football fans?!? Users/JamesHaile
20061116 00:25:12 nbsp To speak for the reasonable Christians, they do not represent the teachings of Christianity. Christianity is about love for all people in the world, regardless of what wrong things they do. Another principle teaching is that judging is not something that you should do because we all have done bad things and therefore cannot judge others because if we do we are hypocrites. So I apologize on behalf of reasonable Christians who do not support the way these pepole preach their message. If you want to really know about Christianity, not be trying to convert you, but just a conversation, feel free to contact me. Users/ChrisDietrich
20070214 02:43:30 nbsp These guys are cool. I welcome them as well as equally cool ultraliberals. Users/GregWebb
20070214 11:24:15 nbsp Last october they gave me a sin test I failed and was judged a bad person...I love free speech, it lets nutjobs bring signs to campus and lets everyone else have a laugh Users/AndrewPeake
20070214 11:43:49 nbsp You should have taken that Internet Dantes Test, its the only true way to know. Users/SteveOstrowski
For some, this issue raises the larger question of what really constitutes hate speech. They wonder, is it necessarily hateful for a person to express a moral conviction that calls into question another persons behavior? In their view, the answer cannot always be yes. Otherwise it would be considered hateful to speak against such things as greedy CEOs, deceitful government officials, theft, or murder, things that social consensus finds abhorrent. On the other hand, they contend, the issue cannot turn only on social consensus. Then the question would become, at exactly what point, in terms of the ratio of public opinion, does it become “hateful” to express a contrary conviction? What do we do with such dissenters? And if the dissenters later become the majority, what then? Therefore, they argue that social consensus alone is an ineffective and unreliable guide for either moral behavior or classifying speech. They claim that the current method of classifying hate speech is inadequate because it categorizes people (for example, preachers) as hateful simply because their message may be offensive, even though they may not actually feel hate toward anyone.
This problem became all too clear in a January, 2007, roundtable discussion aired on National Public Radio, wherein panel members and callers struggled unsuccessfully to answer the question of whether certain “hate” words were ever considered appropriate to use, and if so, under what circumstances. Some guests argued that racial epithets such as the “N word, or words that are targeted at a person’s sexual orientation, were never appropriate and always qualified as hate speech. Other guests pointed out that very many people groups use such words amongst themselves in affectionate, playful contexts, and have in fact appropriated them for legitimate use. For example, many gays frequently refer to each other using the “F word, and African Americans often use the “N word amongst themselves. But the apparent rules become fuzzy because those words are also used affectionately by people across boundary lines. Close friends of different races and/or sexual orientations frequently use “inappropriate” words with each other in an irreverent, playful manner, in contexts where neither party becomes offended. The program’s guests and callers disagreed on whether society should attempt to arbitrarily prohibit or discourage this sort of speech.
Some argue that arbitrary systems of speech classification ultimately fail because the question of what qualifies as hate speech really turns on what the hearers believe about the speakers’ thoughts or intentions.
Some people look to genetics as a possible guideline. If certain behaviors are hardwired into a person’s essential framework by evolution, they argue, then it can hardly be appropriate to call those behaviors into moral question. For decades, there has been an effort in many quarters to find a genetic basis for sexual orientation. Some scientists claim their work on this front has begun to furnish answers, though other scientists disagree. A possible problem for those who seek to explain sexual orientation on the basis of genetics is that scientists also claim to have isolated genetic origins for behaviors such as lust, greed, theft, racism, promiscuity, and adultery. (How convenient, say moralists, that everything our forefathers taught us was immoral, can now be defended on the basis of our genes.) Still other scientists claim to have identified genetic sources for traditionally “good” parts of the human experience, such as faith and love. If everything can be explained on the basis of genetics, then, of course, all moral bets are off. But then offensive speech must receive the same protections, along with speech that is often merely uncomfortable, such as public preaching.
Its is also important to remember that the term hate speech is purely subjective, and therefore defies formalized definition or classification, which makes it a dangerous and unpredictable focus of litigation. All speech that carries a viewpoint is, and should continue to be, protected free speech, from Larry Flint to the Amish. That does not prevent a person from judging somebody elses views as abhorrent, but their ability to voice their views is one of our first liberties and a cornerstone of a free and equal society. The fact that extreme views of intolerance can exist is a, perhaps strange, but certainly eloquent confirmation of the tolerance and freedom of our society. As a result there are people who support their activities and condemn their views. Eric Robinson
20070226 15:13:02 nbsp After being referred to this page by the COTD entry, all I can say is: At least the Otterstads Christianity is real, not the sugarcoated, trendy coffee shop Christianity that the majority of Christians emulate. Hold those signs high and keep up the good work, Dick and Luke! Users/MarieGruca
It is great to see the unbiased opinion of an indivdual from out of town, welcome to the wiki? ~DavePoole
There are ways to http://sostrowski.livejournal.com/24583.html counter them. Users/SteveOstrowski
20070228 13:48:00 nbsp Out of curiosity, why do they need to be countered? Users/EricRobinson
Political gain, personal satisfaction and amusement. But when it comes to these types of issues its best to make a game out of it. The fact of the matter is that Preachers with Signs gives conservative Christians a bad name, but there is also the opportunity to take the high road and dismiss these guys. Users/SteveOstrowski
20070301 11:28:16 nbsp Christians probably cant do much to avoid earning what many people consider to be a bad name. People want to sin, and they want to try to feel good about themselves anyway. Some Christians believe that if they love people, they have a responsibility to remind them of the truth, even if it means enduring a predictable backlash. Except for a few fruitcakes, most of these Christians do not relish the response they know they will get, but they accept it. After all, their Leader warned them to expect people to hate them, just as they hated Himthat is, Christ. As one Christian famously put it, Everyone who does evil hates the light, and will not come into the light for fear that his deeds will be exposed. John the Apostle. I respect some of these Preachers with Signs. They make me ask myself if there is anything I believe in strongly enough that I would stand with a sign, surrounded by a crowd of mockers, just in case I might be able to reach one or two. Users/EricRobinson
The fact of the matter is that there are churches and Christian organizations already in place here in Davis and at the university. These preachers come down for a day, cause disruption and leave. That isnt anywhere close to the hours and dedication the other Christian organizations put into ministry to students. And as for the Bible, if one is disruptive to the congregation by causing factions and leading people away then those Christian organizations can biblically refuse to associate themselves with these preachers. Users/SteveOstrowski
20070301 15:05:11 nbsp Judging from the smiles and bemused expressions on many faces in the pictures, Id say the disruption is harmless. As you personally stated, you like to screw with them for amusements sake. As for the Christian organizations on campus, more power to them. My daughter is heavily involved with Fellowship of Christian Athletes. I can easily see both perspectives. Sometimes it is good for outside parties to come in and stir things up. It gets everyones blood pumping a little faster and makes people think and talk and get creative. If the campus organizations are smart, instead of complaining, theyll figure out a way to springboard off these events without denigrating the preachers involved, some of whom are good men. Users/EricRobinson
20070301 15:48:35 nbsp That is a good point. I know a few campuses that have folks pledge to pay five bucks for every hour these guys are on campus. The money goes to the QSU or LGBTIQ center. It would be kinda fun to tell them they are fundraising for us. Users/AndrewPeake
Thats the spirit because every gay pride parade on UC Davis increases donor support for conservative groups on and off the UC Davis campus. Generally displays of any nature are used by the opposite organizations for exploitations. In reality activists (unless collecting money or influence) are pawns for the other side. Users/SteveOstrowski
20070301 15:55:14 nbsp See, now there you go. It could be a symbiotic relationship. My guess is that these guys would not mind a bit to know about the fundraising program as long as they get to preach. Everybody wins. :) Users/EricRobinson
Thanks for adding to this discussion, it has been really positive I think. Honestly it would be kinda nice to do even more to make these guys a positive experience, because many see them so negatively, and I personally am glad whenever they come, not because I wish them to convert people, or spread a message, but that it does indeed make people think, though some will get angry, they are not harmed, and they will think about it, and why we allow people here to yell and shout their beliefs, I am honestly glad that these individuals can come to show us what a great land this is that it allows people the freedom to express themselves and act according to their heart and passion, does anyone know when they are coming again? ~Dave
I agree to a point, but I would say that events like the Christian/Muslim debate have more knowledge content and inspire more thinking than these guys. Anyone can shout out stuff and be obnoxious. However, if one is theologically inclined you could get a decent debate going. Users/SteveOstrowski
20070301 23:05:51 nbsp Rather than debating, I want to be a person of conviction with a clear message and simply trust that those with an ear to hear will hear. Thats mainly how Jesus worked. A small % of his audience got it, and the rest were either ambivalent or angry. He didnt debate much with the latter two groups. He just stated the unvarnished truth and let the chips fall. When he did debate, his zingers were so cool that his opponents jaws dropped, but the evidence suggests that these came extemporaneously from the Father, not from clever argumentation. Interestingly, it does say that Jesus raised his voice loudly and shouted stuff in public places. His critics probably considered him arrogant, obnoxious, and narrowminded when he said stuff like, All you who are weary and weighed down, come to me and I will give you rest, and No man comes to the Father except by me, and I am the Good Shepherd. All who came before me were thieves and robbers. Users/EricRobinson
There should only be one united Church where there is only one message. Preachers with Signs divides Christians and disrupts their operations. There is the carrot and then there is the stick. You will find that the stick doesnt usually work on a university campus. Your morality may be sound, but the tone and body language equate hate to people. It means you need a better PR department, not necessarily a new message. But I think that generally people like Preachers with Signs divide Christian denominations into smaller denominations and sects, and this division is not helpful. Users/SteveOstrowski
20081120 15:02:37 nbsp The You make me sick sign makes me sick. But People that talk to pets more than god made me laugh. Hey, if God is omnipresent, arent pets God?
Yes. Users/gurglemeow
20090116 22:13:29 nbsp Our very own preachers with signs made it onto holytaco.com The preachers with signs have become an internet phenomenon!
Link:
http://www.holytaco.com/innermonologuechristianextremistmakingsign Users/AynReyes
20090227 14:37:18 nbsp I saw someone trying to debate with one of them a few weeks ago. He just yelled at them and talked so loud that the other person couldnt say anything. Im pretty sure then he proclaimed himself the winner because the other person walked away. Users/WaylandLee
20100324 01:48:07 nbsp I was pleased to find that God had not permitted the Westboro Baptist Church folks to make it out of Kansas this year; apparently they had car trouble. The would probably blame the devil, but I suppose thats besides the point. My name is Mike; thats me holding the God does not hate you (and I think youre pretty cool) sign in Chriss final pic. I suppose that if God needs us to tell people how evil and vile they are, then he isnt God.
There was recently a priest in Russia, Fr. Danil Sysoyev, who preached that any religion touting a God who requires blood of those who disagree with Him is but a demonic lie. Fr. Danil was murdered by an Islamic extremist shortly thereafter. His final sermon offers a healthy and nonjudgmental correction to these selfstyled preachers of the God who so loved the world: http://www.youtube.com/watch?vunIrR38NHf8 . What kind of twisted love places such impossible conditions on its intended recipients? Love me or I will kill you is not the love which Jesus reveals in the Gospel.
The preachers have a right to free speech on a public campus. According to the constitution, unless they are infringing upon public health, safety, or morals, then they can say whatever they want. I happen to be a big supporter of the constitution.
Nevertheless, there needs to be a deliberate and conscious response on the part of the Davis community and specifically the Christian subset of that community. The university cannot, and I believe, should not involve itself, so I guess its up to us.
I am pleased with the growing response from the local Christian community. I believe the best response it can make is twofold: one, that it should offer an alternative voice to onlookers without offering any attention to the visiting preachers, and two, that it should provide some positive distraction. The worst response possible would be to offer any attention to the itinerant preachers. I have heard that this past year there were plans for the Davis Swing Dancers club (which has a large overlap with the local evangelical Christian community) to have a prolonged dancing event on the quad for the duration of the preachers visit to serve as a distraction. I have been told the event was still a success despite the preachers being absent, and I hope it will continue to be a success in the future when they are present.
On the whole, I believe that the Davis community is intelligent enough to know that that the name of Jesus is not a weapon, and that the signwielders do not offer a fair representation of the Christian message. For those who would like to chalk it all up to a blanket category of organized religion, a social phenomenon, I would suggest that the same kind of hateful speech can be made without religion, and that the arts as a nonreligious social phenomenon could be just as easily implicated as being a vessel of hate if in the hands of the wrong persons, e.g. Josef Goebbels. I doubt there are many antireligionists who would suggest we do away with the arts as a solution to our social ills.
To conclude, you dont have to be straight, calvinist, biblical inerrantist, antifeminist, or even prolife for God to love you. You dont even have to believe in Him, if I may use the masculine pronoun. All you have to do is be. Thats enough. How you respond to that love is completely up to you, because love is found only in freedom.
Users/MichaelLahr
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj1synthconj5 : forall (lv0 : natural), (@eq natural (lv0) lv0).
Admitted.
QuickChick conj1synthconj5.
|
Require Import LTactics GenEncode.
Require Export LNat.
(** ** Encoding for L-terms *)
Run TemplateProgram (tmGenEncode "term_enc" term).
Hint Resolve term_enc_correct : Lrewrite.
(* register the non-constant constructors *)
Instance term_var : computableTime var (fun n _ => (1, tt)).
Proof.
extract constructor. solverec.
Defined.
Instance term_app : computableTime app (fun s1 _ => (1, (fun s2 _ => (1, tt)))).
Proof.
extract constructor. solverec.
Defined.
Instance term_lam : computableTime lam (fun s _ => (1, tt)).
Proof.
extract constructor. solverec.
Defined.
|
lemma triangle_points_closer: fixes a::complex shows "\<lbrakk>x \<in> convex hull {a,b,c}; y \<in> convex hull {a,b,c}\<rbrakk> \<Longrightarrow> norm(x - y) \<le> norm(a - b) \<or> norm(x - y) \<le> norm(b - c) \<or> norm(x - y) \<le> norm(c - a)"
|
<!-- dom:TITLE: PHY321: Two-body problems, gravitational forces, scattering and begin Lagrangian formalism -->
# PHY321: Two-body problems, gravitational forces, scattering and begin Lagrangian formalism
<!-- dom:AUTHOR: [Morten Hjorth-Jensen](http://mhjgit.github.io/info/doc/web/) at Department of Physics and Astronomy and Facility for Rare Ion Beams (FRIB), Michigan State University, USA & Department of Physics, University of Oslo, Norway -->
<!-- Author: -->
**[Morten Hjorth-Jensen](http://mhjgit.github.io/info/doc/web/)**, Department of Physics and Astronomy and Facility for Rare Ion Beams (FRIB), Michigan State University, USA and Department of Physics, University of Oslo, Norway
Date: **Mar 29, 2021**
Copyright 1999-2021, [Morten Hjorth-Jensen](http://mhjgit.github.io/info/doc/web/). Released under CC Attribution-NonCommercial 4.0 license
## Aims and Overarching Motivation
### Monday
1. Physical interpretation of various orbit types and summary gravitational forces, examples on whiteboard and handwritten notes
2. Start discussion two-body scattering
**Reading suggestion**: Taylor chapter 8 and sections 14.1-14.2 and Lecture notes
### Wednesday
1. Two-body scattering
**Reading suggestion**: Taylor and sections 14.3-14.6
### Friday
1. Lagrangian formalism
**Reading suggestion**: Taylor Sections 6.1-6.2
### Code example with gravitional force
The code example here is meant to illustrate how we can make a plot of the final orbit. We solve the equations in polar coordinates (the example here uses the minimum of the potential as initial value) and then we transform back to cartesian coordinates and plot $x$ versus $y$. We see that we get a perfect circle when we place ourselves at the minimum of the potential energy, as expected.
```python
%matplotlib inline
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
# Simple Gravitational Force -alpha/r
DeltaT = 0.01
#set up arrays
tfinal = 8.0
n = ceil(tfinal/DeltaT)
# set up arrays for t, v and r
t = np.zeros(n)
v = np.zeros(n)
r = np.zeros(n)
phi = np.zeros(n)
x = np.zeros(n)
y = np.zeros(n)
# Constants of the model, setting all variables to one for simplicity
alpha = 1.0
AngMom = 1.0 # The angular momentum
m = 1.0 # scale mass to one
c1 = AngMom*AngMom/(m*m)
c2 = AngMom*AngMom/m
rmin = (AngMom*AngMom/m/alpha)
# Initial conditions, place yourself at the potential min
r0 = rmin
v0 = 0.0 # starts at rest
r[0] = r0
v[0] = v0
phi[0] = 0.0
# Start integrating using the Velocity-Verlet method
for i in range(n-1):
# Set up acceleration
a = -alpha/(r[i]**2)+c1/(r[i]**3)
# update velocity, time and position using the Velocity-Verlet method
r[i+1] = r[i] + DeltaT*v[i]+0.5*(DeltaT**2)*a
anew = -alpha/(r[i+1]**2)+c1/(r[i+1]**3)
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
phi[i+1] = t[i+1]*c2/(r0**2)
# Find cartesian coordinates for easy plot
x = r*np.cos(phi)
y = r*np.sin(phi)
fig, ax = plt.subplots(3,1)
ax[0].set_xlabel('time')
ax[0].set_ylabel('radius')
ax[0].plot(t,r)
ax[1].set_xlabel('time')
ax[1].set_ylabel('Angle $\cos{\phi}$')
ax[1].plot(t,np.cos(phi))
ax[2].set_ylabel('y')
ax[2].set_xlabel('x')
ax[2].plot(x,y)
#save_fig("Phasespace")
plt.show()
```
## Changing initial conditions
Try to change the initial value for $r$ and see what kind of orbits you get.
In order to test different energies, it can be useful to look at the plot of the effective potential discussed above.
However, for orbits different from a circle the above code would need modifications in order to allow us to display say an ellipse. For the latter, it is much easier to run our code in cartesian coordinates, as done here. In this code we test also energy conservation and see that it is conserved to numerical precision. The code here is a simple extension of the code we developed for homework 4.
```python
%matplotlib inline
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
DeltaT = 0.01
#set up arrays
tfinal = 10.0
n = ceil(tfinal/DeltaT)
# set up arrays
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
E = np.zeros(n)
# Constants of the model
m = 1.0 # mass, you can change these
alpha = 1.0
# Initial conditions as compact 2-dimensional arrays
x0 = 0.5; y0= 0.
r0 = np.array([x0,y0])
v0 = np.array([0.0,1.0])
r[0] = r0
v[0] = v0
rabs = sqrt(sum(r[0]*r[0]))
E[0] = 0.5*m*(v[0,0]**2+v[0,1]**2)-alpha/rabs
# Start integrating using the Velocity-Verlet method
for i in range(n-1):
# Set up the acceleration
rabs = sqrt(sum(r[i]*r[i]))
a = -alpha*r[i]/(rabs**3)
# update velocity, time and position using the Velocity-Verlet method
r[i+1] = r[i] + DeltaT*v[i]+0.5*(DeltaT**2)*a
rabs = sqrt(sum(r[i+1]*r[i+1]))
anew = -alpha*r[i+1]/(rabs**3)
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
E[i+1] = 0.5*m*(v[i+1,0]**2+v[i+1,1]**2)-alpha/rabs
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots(3,1)
ax[0].set_ylabel('y')
ax[0].set_xlabel('x')
ax[0].plot(r[:,0],r[:,1])
ax[1].set_xlabel('time')
ax[1].set_ylabel('y position')
ax[1].plot(t,r[:,0])
ax[2].set_xlabel('time')
ax[2].set_ylabel('y position')
ax[2].plot(t,r[:,1])
fig.tight_layout()
#save_fig("2DimGravity")
plt.show()
print(E)
```
## Scattering and Cross Sections
Scattering experiments don't measure entire trajectories. For elastic
collisions, they measure the distribution of final scattering angles
at best. Most experiments use targets thin enough so that the number
of scatterings is typically zero or one. The cross section, $\sigma$,
describes the cross-sectional area for particles to scatter with an
individual target atom or nucleus. Cross section measurements form the
basis for MANY fields of physics. BThe cross section, and the
differential cross section, encapsulates everything measurable for a
collision where all that is measured is the final state, e.g. the
outgoing particle had momentum $\boldsymbol{p}_f$. y studying cross sections,
one can infer information about the potential interaction between the
two particles. Inferring, or constraining, the potential from the
cross section is a classic {\it inverse} problem. Collisions are
either elastic or inelastic. Elastic collisions are those for which
the two bodies are in the same internal state before and after the
collision. If the collision excites one of the participants into a
higher state, or transforms the particles into different species, or
creates additional particles, the collision is inelastic. Here, we
consider only elastic collisions.
## Scattering: Coulomb forces
For Coulomb forces, the cross section is infinite because the range of
the Coulomb force is infinite, but for interactions such as the strong
interaction in nuclear or particle physics, there is no long-range
force and cross-sections are finite. Even for Coulomb forces, the part
of the cross section that corresponds to a specific scattering angle,
$d\sigma/d\Omega$, which is a function of the scattering angle
$\theta_s$ is still finite.
If a particle travels through a thin target, the chance the particle
scatters is $P_{\rm scatt}=\sigma dN/dA$, where $dN/dA$ is the number
of scattering centers per area the particle encounters. If the density
of the target is $\rho$ particles per volume, and if the thickness of
the target is $t$, the areal density (number of target scatterers per
area) is $dN/dA=\rho t$. Because one wishes to quantify the collisions
independently of the target, experimentalists measure scattering
probabilities, then divide by the areal density to obtain
cross-sections,
$$
\begin{eqnarray}
\sigma=\frac{P_{\rm scatt}}{dN/dA}.
\end{eqnarray}
$$
## Scattering, more details
Instead of merely stating that a particle collided, one can measure
the probability the particle scattered by a given angle. The
scattering angle $\theta_s$ is defined so that at zero the particle is
unscattered and at $\theta_s=\pi$ the particle is scattered directly
backward. Scattering angles are often described in the center-of-mass
frame, but that is a detail we will neglect for this first discussion,
where we will consider the scattering of particles moving classically
under the influence of fixed potentials $U(\boldsymbol{r})$. Because the
distribution of scattering angles can be measured, one expresses the
differential cross section,
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
\frac{d^2\sigma}{d\cos\theta_s~d\phi}.
\label{_auto1} \tag{1}
\end{equation}
$$
Usually, the literature expresses differential cross sections as
<!-- Equation labels as ordinary links -->
<div id="_auto2"></div>
$$
\begin{equation}
d\sigma/d\Omega=\frac{d\sigma}{d\cos\theta d\phi}=\frac{1}{2\pi}\frac{d\sigma}{d\cos\theta},
\label{_auto2} \tag{2}
\end{equation}
$$
where the last equivalency is true when the scattering does not depend
on the azimuthal angle $\phi$, as is the case for spherically
symmetric potentials.
The differential solid angle $d\Omega$ can be thought of as the area
subtended by a measurement, $dA_d$, divided by $r^2$, where $r$ is the
distance to the detector,
$$
\begin{eqnarray}
dA_d=r^2 d\Omega.
\end{eqnarray}
$$
With this definition $d\sigma/d\Omega$ is independent of the distance
from which one places the detector, or the size of the detector (as
long as it is small).
## Differential scattering cross sections
Differential scattering cross sections are calculated by assuming a
random distribution of impact parameters $b$. These represent the
distance in the $xy$ plane for particles moving in the $z$ direction
relative to the scattering center. An impact parameter $b=0$ refers to
being aimed directly at the target's center. The impact parameter
describes the transverse distance from the $z=0$ axis for the
trajectory when it is still far away from the scattering center and
has not yet passed it. The differential cross section can be expressed
in terms of the impact parameter,
<!-- Equation labels as ordinary links -->
<div id="_auto3"></div>
$$
\begin{equation}
d\sigma=2\pi bdb,
\label{_auto3} \tag{3}
\end{equation}
$$
which is the area of a thin ring of radius $b$ and thickness $db$. In
classical physics, one can calculate the trajectory given the incoming
kinetic energy $E$ and the impact parameter if one knows the mass and
potential.
## More on Differential Cross Sections
From the trajectory, one then finds the scattering angle
$\theta_s(b)$. The differential cross section is then
<!-- Equation labels as ordinary links -->
<div id="_auto4"></div>
$$
\begin{equation}
\frac{d\sigma}{d\Omega}=\frac{1}{2\pi}\frac{d\sigma}{d\cos\theta_s}=b\frac{db}{d\cos\theta_s}=\frac{b}{(d/db)\cos\theta_s(b)}.
\label{_auto4} \tag{4}
\end{equation}
$$
Typically, one would calculate $\cos\theta_s$ and $(d/db)\cos\theta_s$
as functions of $b$. This is sufficient to plot the differential cross
section as a function of $\theta_s$.
The total cross section is
<!-- Equation labels as ordinary links -->
<div id="_auto5"></div>
$$
\begin{equation}
\sigma_{\rm tot}=\int d\Omega\frac{d\sigma}{d\Omega}=2\pi\int d\cos\theta_s~\frac{d\sigma}{d\Omega}.
\label{_auto5} \tag{5}
\end{equation}
$$
Even if the total cross section is infinite, e.g. Coulomb forces, one
can still have a finite differential cross section as we will see
later on.
## Rutherford Scattering
This refers to the calculation of $d\sigma/d\Omega$ due to an inverse
square force, $F_{12}=\pm\alpha/r^2$ for repulsive/attractive
interaction. Rutherford compared the scattering of $\alpha$ particles
($^4$He nuclei) off of a nucleus and found the scattering angle at
which the formula began to fail. This corresponded to the impact
parameter for which the trajectories would strike the nucleus. This
provided the first measure of the size of the atomic nucleus. At the
time, the distribution of the positive charge (the protons) was
considered to be just as spread out amongst the atomic volume as the
electrons. After Rutherford's experiment, it was clear that the radius
of the nucleus tended to be roughly 4 orders of magnitude smaller than
that of the atom, which is less than the size of a football relative
to Spartan Stadium.
## Rutherford Scattering, more details
In order to calculate differential cross section, we must find how the
impact parameter is related to the scattering angle. This requires
analysis of the trajectory. We consider our previous expression for
the trajectory where we derived the elliptic form for the trajectory,
For that case we considered an attractive
force with the particle's energy being negative, i.e. it was
bound. However, the same form will work for positive energy, and
repulsive forces can be considered by simple flipping the sign of
$\alpha$. For positive energies, the trajectories will be hyperbolas,
rather than ellipses, with the asymptotes of the trajectories
representing the directions of the incoming and outgoing
tracks.
## Rutherford Scattering, final trajectories
We have
<!-- Equation labels as ordinary links -->
<div id="eq:ruthtraj"></div>
$$
\begin{equation}\label{eq:ruthtraj} \tag{6}
r=\frac{1}{\frac{m\alpha}{L^2}+A\cos\theta}.
\end{equation}
$$
Once $A$ is large enough, which will happen when the energy is
positive, the denominator will become negative for a range of
$\theta$. This is because the scattered particle will never reach
certain angles. The asymptotic angles $\theta'$ are those for which
the denominator goes to zero,
<!-- Equation labels as ordinary links -->
<div id="_auto6"></div>
$$
\begin{equation}
\cos\theta'=-\frac{m\alpha}{AL^2}.
\label{_auto6} \tag{7}
\end{equation}
$$
## Rutherford Scattering, Closest Approach
The trajectory's point of closest approach is at $\theta=0$ and the
two angles $\theta'$, which have this value of $\cos\theta'$, are the
angles of the incoming and outgoing particles. From
Fig (**to come**), one can see that the scattering angle
$\theta_s$ is given by,
<!-- Equation labels as ordinary links -->
<div id="eq:sthetover2"></div>
$$
\begin{eqnarray}
\label{eq:sthetover2} \tag{8}
2\theta'-\pi&=&\theta_s,~~~\theta'=\frac{\pi}{2}+\frac{\theta_s}{2},\\
\nonumber
\sin(\theta_s/2)&=&-\cos\theta'\\
\nonumber
&=&\frac{m\alpha}{AL^2}.
\end{eqnarray}
$$
Now that we have $\theta_s$ in terms of $m,\alpha,L$ and $A$, we wish
to re-express $L$ and $A$ in terms of the impact parameter $b$ and the
energy $E$. This will set us up to calculate the differential cross
section, which requires knowing $db/d\theta_s$. It is easy to write
the angular momentum as
<!-- Equation labels as ordinary links -->
<div id="_auto7"></div>
$$
\begin{equation}
L^2=p_0^2b^2=2mEb^2.
\label{_auto7} \tag{9}
\end{equation}
$$
## Rutherford Scattering, getting there
Finding $A$ is more complicated. To accomplish this we realize that
the point of closest approach occurs at $\theta=0$, so from
Eq. ([6](#eq:ruthtraj))
<!-- Equation labels as ordinary links -->
<div id="eq:rminofA"></div>
$$
\begin{eqnarray}
\label{eq:rminofA} \tag{10}
\frac{1}{r_{\rm min}}&=&\frac{m\alpha}{L^2}+A,\\
\nonumber
A&=&\frac{1}{r_{\rm min}}-\frac{m\alpha}{L^2}.
\end{eqnarray}
$$
Next, $r_{\rm min}$ can be found in terms of the energy because at the
point of closest approach the kinetic energy is due purely to the
motion perpendicular to $\hat{r}$ and
<!-- Equation labels as ordinary links -->
<div id="_auto8"></div>
$$
\begin{equation}
E=-\frac{\alpha}{r_{\rm min}}+\frac{L^2}{2mr_{\rm min}^2}.
\label{_auto8} \tag{11}
\end{equation}
$$
## Rutherford Scattering, More Manipulations
One can solve the quadratic equation for $1/r_{\rm min}$,
<!-- Equation labels as ordinary links -->
<div id="_auto9"></div>
$$
\begin{equation}
\frac{1}{r_{\rm min}}=\frac{m\alpha}{L^2}+\sqrt{(m\alpha/L^2)^2+2mE/L^2}.
\label{_auto9} \tag{12}
\end{equation}
$$
We can plug the expression for $r_{\rm min}$ into the expression for $A$, Eq. ([10](#eq:rminofA)),
<!-- Equation labels as ordinary links -->
<div id="_auto10"></div>
$$
\begin{equation}
A=\sqrt{(m\alpha/L^2)^2+2mE/L^2}=\sqrt{(\alpha^2/(4E^2b^4)+1/b^2}
\label{_auto10} \tag{13}
\end{equation}
$$
## Rutherford Scattering, final expression
Finally, we insert the expression for $A$ into that for the scattering angle, Eq. ([8](#eq:sthetover2)),
<!-- Equation labels as ordinary links -->
<div id="eq:scattangle"></div>
$$
\begin{eqnarray}
\label{eq:scattangle} \tag{14}
\sin(\theta_s/2)&=&\frac{m\alpha}{AL^2}\\
\nonumber
&=&\frac{a}{\sqrt{a^2+b^2}}, ~~a\equiv \frac{\alpha}{2E}
\end{eqnarray}
$$
## Rutherford Scattering, the Differential Cross Section
The differential cross section can now be found by differentiating the
expression for $\theta_s$ with $b$,
<!-- Equation labels as ordinary links -->
<div id="eq:rutherford"></div>
$$
\begin{eqnarray}
\label{eq:rutherford} \tag{15}
\frac{1}{2}\cos(\theta_s/2)d\theta_s&=&\frac{ab~db}{(a^2+b^2)^{3/2}}=\frac{bdb}{a^2}\sin^3(\theta_s/2),\\
\nonumber
d\sigma&=&2\pi bdb=\frac{\pi a^2}{\sin^3(\theta_s/2)}\cos(\theta_s/2)d\theta_s\\
\nonumber
&=&\frac{\pi a^2}{2\sin^4(\theta_s/2)}\sin\theta_s d\theta_s\\
\nonumber
\frac{d\sigma}{d\cos\theta_s}&=&\frac{\pi a^2}{2\sin^4(\theta_s/2)},\\
\nonumber
\frac{d\sigma}{d\Omega}&=&\frac{a^2}{4\sin^4(\theta_s/2)}.
\end{eqnarray}
$$
where $a= \alpha/2E$. This the Rutherford formula for the differential
cross section. It diverges as $\theta_s\rightarrow 0$ because
scatterings with arbitrarily large impact parameters still scatter to
arbitrarily small scattering angles. The expression for
$d\sigma/d\Omega$ is the same whether the interaction is positive or
negative.
## Rutherford Scattering, Example
Consider a particle of mass $m$ and charge $z$ with kinetic energy $E$
(Let it be the center-of-mass energy) incident on a heavy nucleus of
mass $M$ and charge $Z$ and radius $R$. We want to find the angle at which the
Rutherford scattering formula breaks down.
Let $\alpha=Zze^2/(4\pi\epsilon_0)$. The scattering angle in Eq. ([14](#eq:scattangle)) is
$$
\sin(\theta_s/2)=\frac{a}{\sqrt{a^2+b^2}}, ~~a\equiv \frac{\alpha}{2E}.
$$
The impact parameter $b$ for which the point of closest approach
equals $R$ can be found by using angular momentum conservation,
$$
\begin{eqnarray*}
p_0b&=&b\sqrt{2mE}=Rp_f=R\sqrt{2m(E-\alpha/R)},\\
b&=&R\frac{\sqrt{2m(E-\alpha/R)}}{\sqrt{2mE}}\\
&=&R\sqrt{1-\frac{\alpha}{ER}}.
\end{eqnarray*}
$$
## Rutherford Scattering, Example, wrapping up
Putting these together
$$
\theta_s=2\sin^{-1}\left\{
\frac{a}{\sqrt{a^2+R^2(1-\alpha/(RE))}}
\right\},~~~a=\frac{\alpha}{2E}.
$$
It was from this departure of the experimentally measured
$d\sigma/d\Omega$ from the Rutherford formula that allowed Rutherford
to infer the radius of the gold nucleus, $R$.
## Variational Calculus
The calculus of variations involves
problems where the quantity to be minimized or maximized is an integral.
The usual minimization problem one faces involves taking a function
${\cal L}(x)$, then finding the single value $x$ for which ${\cal L}$
is either a maximum or minimum. In multivariate calculus one also
learns to solve problems where you minimize for multiple variables,
${\cal L}(x_1,x_2,\cdots x_n)$, and finding the points $(x_1\cdots
y_n)$ in an $n$-dimensional space that maximize or minimize the
function. Here, we consider what seems to be a much more ambitious
problem. Imagine you have a function ${\cal L}(x(t),\dot{x}(t),t)$,
and you wish to find the extrema for an infinite number of values of
$x$, i.e. $x$ at each point $t$. The function ${\cal L}$ will not only
depend on $x$ at each point $t$, but also on the slope at each point,
plus an additional dependence on $t$. Note we are NOT finding an
optimum value of $t$, we are finding the set of optimum values of $x$
at each point $t$, or equivalently, finding the function $x(t)$.
## Variational Calculus, introducing the action
One treats the function $x(t)$ as being unknown while minimizing the action
$$
S=\int_{t_1}^{t_2}dt~{\cal L}(x(t),\dot{x}(t),t).
$$
Thus, we are minimizing $S$ with respect to an infinite number of
values of $x(t_i)$ at points $t_i$. As an additional criteria, we will
assume that $x(t_1)$ and $x(t_2)$ are fixed, and that that we will
only consider variations of $x$ between the boundaries. The dependence
on the derivative, $\dot{x}=dx/dt$, is crucial because otherwise the
solution would involve simply finding the one value of $x$ that
minimized ${\cal L}$, and $x(t)$ would equal a constant if there were no
explicit $t$ dependence. Furthermore, $x$ wouldn't need to be
continuous at the boundary.
## Variational Calculus, general Action
In the general case we have an integral of the type
$$
S[q]= \int_{t_1}^{t_2} {\cal L}(q(t),\dot{q}(t),t)dt,
$$
where $S$ is the quantity which is sought minimized or maximized. The
problem is that although ${\cal L}$ is a function of the general variables
$q(t),\dot{q}(t),t$ (note our change of variables), the exact dependence of $q$ on $t$ is not known.
This means again that even though the integral has fixed limits $t_1$
and $t_2$, the path of integration is not known. In our case the unknown
quantities are the positions and general velocities of a given number
of objects and we wish to choose an integration path which makes the
functional $S[q]$ stationary. This means that we want to find minima,
or maxima or saddle points. In physics we search normally for minima.
Our task is therefore to find the minimum of $S[q]$ so that its
variation $\delta S$ is zero subject to specific constraints. The
constraints can be treated via the technique of Lagrangian multipliers
as we will see below.
## Variational Calculus, Optimal Path
We assume the existence of an optimum path, that is a path for which
$S[q]$ is stationary. There are infinitely many such paths. The
difference between two paths $\delta q$ is called the variation of
$q$.
We call the variation $\eta(t)$ and it is scaled by a factor $\alpha$.
The function $\eta(t)$ is arbitrary except for
$$
\eta(t_1)=\eta(t_2)=0,
$$
and we assume that we can model the change in $q$ as
$$
q(t,\alpha) = q(t)+\alpha\eta(t),
$$
and
$$
\delta q = q(t,\alpha) -q(t,0)=\alpha\eta(t).
$$
## Variational Calculus, Condition for an Extreme Value
We choose $q(t,\alpha=0)$ as the unkonwn path that will minimize $S$. The value
$q(t,\alpha\ne 0)$ describes a neighbouring path.
We have
$$
S[q(\alpha)]= \int_{t_1}^{t_2} {\cal L}(q(t,\alpha),\dot{q}(t,\alpha),t)dt.
$$
The condition for an extreme of
$$
S[q(\alpha)]= \int_{t_1}^{t_2} {\cal L}(q(t,\alpha),\dot{q}(t,\alpha),t)dt,
$$
is
$$
\left[\frac{\partial S[q(\alpha)]}{\partial t}\right]_{\alpha=0} =0.
$$
## Variational Calculus. $\alpha$ Dependence
The $\alpha$ dependence is contained in $q(t,\alpha)$ and $\dot{q}(t,\alpha)$ meaning that
$$
\left[\frac{\partial E[q(\alpha)]}{\partial \alpha}\right]=\int_{t_1}^{t_2} \left( \frac{\partial {\cal l}}{\partial q}\frac{\partial q}{\partial \alpha}+\frac{\partial {\cal L}}{\partial \dot{q}}\frac{\partial \dot{q}}{\partial \alpha}\right)dt.
$$
We have defined
$$
\frac{\partial q(x,\alpha)}{\partial \alpha}=\eta(x)
$$
and thereby
$$
\frac{\partial \dot{q}(t,\alpha)}{\partial \alpha}=\frac{d(\eta(t))}{dt}.
$$
## INtegrating by Parts
Using
$$
\frac{\partial q(t,\alpha)}{\partial \alpha}=\eta(t),
$$
and
$$
\frac{\partial \dot{q}(t,\alpha)}{\partial \alpha}=\frac{d(\eta(t))}{dt},
$$
in the integral gives
$$
\left[\frac{\partial S[q(\alpha)]}{\partial \alpha}\right]=\int_{t_1}^{t_2} \left( \frac{\partial {\cal L}}{\partial q}\eta(t)+\frac{\partial {\cal L}}{\partial \dot{q}}\frac{d(\eta(t))}{dt}\right)dt.
$$
Integrating the second term by parts
$$
\int_{t_1}^{t_2} \frac{\partial {\cal L}}{\partial \dot{q}}\frac{d(\eta(t))}{dt}dt =\eta(t)\frac{\partial {\cal L}}{\partial \dot{q}}|_{t_1}^{t_2}-
\int_a^b \eta(t)\frac{d}{dx}\frac{\partial {\cal L}}{\partial \dot{q}}dt,
$$
and since the first term dissappears due to $\eta(a)=\eta(b)=0$, we obtain
$$
\left[\frac{\partial S[q(\alpha)]}{\partial \alpha}\right]=\int_{t_1}^{t_2} \left( \frac{\partial {\cal L}}{\partial q}-\frac{d}{dx}\frac{\partial {\cal L}}{\partial \dot{q}}
\right)\eta(t)dt=0.
$$
## Euler-Lagrange Equations
The latter can be written as
$$
\left[\frac{\partial S[q(\alpha)]}{\partial \alpha}\right]_{\alpha=0}=\int_{t_1}^{t_2} \left( \frac{\partial {\cal L}}{\partial q}-\frac{d}{\
dx}\frac{\partial {\cal L}}{\partial \dot{q}}\right)\delta q(t)dt=\delta S = 0.
$$
The condition for a stationary value is thus a partial differential equation
$$
\frac{\partial {\cal L}}{\partial q}-\frac{d}{dx}\frac{\partial {\cal L}}{\partial \dot{q}}=0,
$$
known as the **Euler-Lagrange** equation.
|
#Tools to study and correct for trends in spectroscopic succes rate (ssr)
#Initial LRG model fitting taken from Ronpgpu Zhou's notebook
import sys, os, glob, time, warnings, gc
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table, vstack, hstack, join
import fitsio
from scipy.optimize import curve_fit, minimize
import LSS.common_tools as common
elgcol = ['SUBSET','EBV','PRIORITY','TARGETID','OII_FLUX','OII_FLUX_IVAR','ELG_LOP','ELG_VLO','TSNR2_ELG','TSNR2_LRG','PHOTSYS','MASKBITS','FIBERFLUX_G','FIBERFLUX_R','FIBERFLUX_Z','COADD_FIBERSTATUS','Z','ZWARN','DELTACHI2']
def ELG_goodobs(data,fbs_col='COADD_FIBERSTATUS'):#,dt_col='DESI_TARGET'):
mask = data[fbs_col]==0
print(fbs_col,np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
# Remove "no data" fibers
mask &= data['ZWARN'] & 2**9==0
print('& No data', np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
# Apply imaging mask
#mask &= data['lrg_mask']==0
#print('& LRG imaging mask', np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
data['q'] = ELG_goodz(data)#data['ZWARN']==0
print('failure rate is '+str(np.sum(~data['q'])/len(data)))
return data
def ELG_goodz(data,zcol='Z'):
o2c = np.log10(data['OII_FLUX'] * np.sqrt(data['OII_FLUX_IVAR']))+0.2*np.log10(data['DELTACHI2'])
sel = o2c > 0.9
return sel
def LRG_goodobs(data,fbs_col='COADD_FIBERSTATUS',dt_col='DESI_TARGET'):
mask = data[fbs_col]==0
print(fbs_col,np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
# Remove "no data" fibers
mask &= data['ZWARN'] & 2**9==0
print('& No data', np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
# Apply LRG mask
#mask &= data['lrg_mask']==0
#print('& LRG imaging mask', np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
# Remove QSO targets
mask &= data[dt_col] & 2**2 ==0
print('& Remove QSO targets', np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
data = data[mask]
data['q'] = LRG_goodz(data)#data['ZWARN']==0
#data['q'] &= data['Z']<1.5
#data['q'] &= data['DELTACHI2']>15
print('failure rate is '+str(np.sum(~data['q'])/len(data)))
return data
def LRG_goodz(data,zcol='Z'):
sel = data['ZWARN']==0
sel &= data[zcol]<1.5
sel &= data['DELTACHI2']>15
return sel
def get_ELG_data_full(tracer,surveys=['DA02'],versions=['test'],specrels=['guadalupe']):
cats = []
for sur,ver,sr in zip(surveys,versions,specrels):
dir = '/global/cfs/cdirs/desi/survey/catalogs/'+sur+'/LSS/'+sr+'/LSScats/'+ver+'/'
tfn = tracer
if sur == 'DA02':
tfn+='zdone'
fn = dir+tfn+'_full.dat.fits'
data = Table(fitsio.read(fn))
print(len(data))
sel = data['ZWARN'] != 999999
data = data[sel]
print(len(data))
data['q'] = data['o2c'] > 0.9
cats.append(data)
if len(cats) == 1:
cat = cats[0]
cat['EFFTIME_ELG'] = 8.60 * cat['TSNR2_ELG']
cat['EFFTIME_LRG'] = 12.15 * cat['TSNR2_LRG']
cat['zfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_Z']) - 1.211 * cat['EBV']
cat['FIBERFLUX_Z_EC'] = cat['FIBERFLUX_Z']*10**(0.4*1.211*cat['EBV'])
gextc = 3.214
cat['gfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_G']) - gextc * cat['EBV']
cat['FIBERFLUX_G_EC'] = cat['FIBERFLUX_G']*10**(0.4*gextc*cat['EBV'])
rextc = 2.165
cat['rfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_R']) - rextc * cat['EBV']
cat['FIBERFLUX_R_EC'] = cat['FIBERFLUX_R']*10**(0.4*rextc*cat['EBV'])
cat['qf'] = np.array(cat['q'], dtype=float)
return cat
def get_BGS_data_full(tracer,surveys=['DA02'],versions=['test'],specrels=['guadalupe']):
cats = []
for sur,ver,sr in zip(surveys,versions,specrels):
dir = '/global/cfs/cdirs/desi/survey/catalogs/'+sur+'/LSS/'+sr+'/LSScats/'+ver+'/'
tfn = tracer
if sur == 'DA02':
tfn+='zdone'
fn = dir+tfn+'_full.dat.fits'
data = Table(fitsio.read(fn))
print(len(data))
sel = data['ZWARN'] != 999999
data = data[sel]
print(len(data))
gz = data['ZWARN'] == 0
gz &= data['DELTACHI2'] > 40
data['q'] = gz
cats.append(data)
if len(cats) == 1:
cat = cats[0]
cat['EFFTIME_ELG'] = 8.60 * cat['TSNR2_ELG']
cat['EFFTIME_LRG'] = 12.15 * cat['TSNR2_LRG']
cat['EFFTIME_BGS'] = 12.15/89.8 * cat['TSNR2_BGS']
cat['zfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_Z']) - 1.211 * cat['EBV']
cat['FIBERFLUX_Z_EC'] = cat['FIBERFLUX_Z']*10**(0.4*1.211*cat['EBV'])
gextc = 3.214
cat['gfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_G']) - gextc * cat['EBV']
cat['FIBERFLUX_G_EC'] = cat['FIBERFLUX_G']*10**(0.4*gextc*cat['EBV'])
rextc = 2.165
cat['rfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_R']) - rextc * cat['EBV']
cat['FIBERFLUX_R_EC'] = cat['FIBERFLUX_R']*10**(0.4*rextc*cat['EBV'])
cat['qf'] = np.array(cat['q'], dtype=float)
return cat
def get_QSO_data_full(tracer,surveys=['DA02'],versions=['test'],specrels=['guadalupe']):
cats = []
for sur,ver,sr in zip(surveys,versions,specrels):
dir = '/global/cfs/cdirs/desi/survey/catalogs/'+sur+'/LSS/'+sr+'/LSScats/'+ver+'/'
tfn = tracer
if sur == 'DA02':
tfn+='zdone'
fn = dir+tfn+'_full.dat.fits'
data = Table(fitsio.read(fn))
print(len(data))
sel = data['ZWARN'] != 999999
sel &= data['SPECTYPE'] != 'STAR'
data = data[sel]
wz = data['Z_not4clus']*0 == 0
wz &= data['Z_not4clus'] != 999999
wz &= data['Z_not4clus'] != 1.e20
print(len(data),len(wz),np.sum(wz))
data['q'] = wz
cats.append(data)
if len(cats) == 1:
cat = cats[0]
cat['EFFTIME_ELG'] = 8.60 * cat['TSNR2_ELG']
cat['EFFTIME_QSO'] = 8.60/0.255 * cat['TSNR2_QSO']
cat['EFFTIME_LRG'] = 12.15 * cat['TSNR2_LRG']
cat['zfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_Z']) - 1.211 * cat['EBV']
cat['FIBERFLUX_Z_EC'] = cat['FIBERFLUX_Z']*10**(0.4*1.211*cat['EBV'])
gextc = 3.214
cat['gfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_G']) - gextc * cat['EBV']
cat['FIBERFLUX_G_EC'] = cat['FIBERFLUX_G']*10**(0.4*gextc*cat['EBV'])
rextc = 2.165
cat['rfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_R']) - rextc * cat['EBV']
cat['FIBERFLUX_R_EC'] = cat['FIBERFLUX_R']*10**(0.4*rextc*cat['EBV'])
cat['qf'] = np.array(cat['q'], dtype=float)
return cat
def get_ELG_data(specrel='fuji',tr='ELG_LOP',maskbits=[1,11,12,13],notqso=True):
maintids = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/main/LSS/'+tr+'targetsDR9v1.1.1.fits',columns=['TARGETID','DESI_TARGET','MASKBITS','NOBS_G','NOBS_R','NOBS_Z'])
maintids = common.cutphotmask(maintids,maskbits)
elgcatdir = '/global/cfs/cdirs/desi/users/raichoor/spectro/'+specrel
sv3 = fitsio.read(elgcatdir+'/sv3-elg-fuji-tiles.fits',columns=elgcol)
st = []
for i in range(0,len(sv3)):
st.append(sv3['SUBSET'][i][:4])
st = np.array(st)
wg = st == "thru"
sv3 = sv3[wg]
if tr != 'ELG':
print('cutting SV3 to main '+tr)
sel = sv3[tr] == True
print('length before is '+str(len(sv3)))
sv3 = sv3[sel]
print('length after is '+str(len(sv3)))
sel = sv3['PRIORITY'] > 10000
sv3 = sv3[sel]
print('length after cutting to priority > 10000 '+str(len(sv3)))
sv3 = ELG_goodobs(Table(sv3))
sv3 = join(sv3,maintids,keys=['TARGETID'])
print('length after join to main targets to get DESI_TARGET and cut on maskbits values '+str(len(sv3)))
elgcatdirg = '/global/cfs/cdirs/desi/users/raichoor/spectro/guadalupe'
main = fitsio.read(elgcatdirg+'/main-elg-guadalupe-tiles.fits',columns=elgcol)
st = []
for i in range(0,len(main)):
st.append(main['SUBSET'][i][:4])
st = np.array(st)
wg = st == "thru"
main = main[wg]
if tr != 'ELG':
print('cutting main to main '+tr)
sel = main[tr] == True
print('length before is '+str(len(main)))
main = main[sel]
print('length after is '+str(len(main)))
main = ELG_goodobs(Table(main))
main = join(main,maintids,keys=['TARGETID'])
print('length after join to main targets to get DESI_TARGET and cut on maskbits values '+str(len(main)))
sv1 = fitsio.read(elgcatdir+'/sv1-elg-fuji-tiles.fits',columns=elgcol)
if tr != 'ELG':
print('cutting SV1 to main '+tr)
sel = sv1[tr] == True
print('length before is '+str(len(sv1)))
sv1 = sv1[sel]
print('length after is '+str(len(sv1)))
sv1 = ELG_goodobs(Table(sv1))
sv1 = join(sv1,maintids,keys=['TARGETID'])
print('length after join to main targets to get DESI_TARGET and cut on maskbits values '+str(len(sv1)))
#cat = vstack([sv1, sv3, main], join_type='inner')
#cat = vstack([sv1, main], join_type='inner')
cat = main
print(len(cat))
if notqso:
# Remove QSO targets
mask = cat['DESI_TARGET'] & 2**2 ==0
print(' Remove QSO targets', np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
cat = cat[mask]
cat['EFFTIME_ELG'] = 8.60 * cat['TSNR2_ELG']
cat['EFFTIME_LRG'] = 12.15 * cat['TSNR2_LRG']
cat['zfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_Z']) - 1.211 * cat['EBV']
cat['FIBERFLUX_Z_EC'] = cat['FIBERFLUX_Z']*10**(0.4*1.211*cat['EBV'])
gextc = 3.214
cat['gfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_G']) - gextc * cat['EBV']
cat['FIBERFLUX_G_EC'] = cat['FIBERFLUX_G']*10**(0.4*gextc*cat['EBV'])
rextc = 2.165
cat['rfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_R']) - rextc * cat['EBV']
cat['FIBERFLUX_R_EC'] = cat['FIBERFLUX_R']*10**(0.4*rextc*cat['EBV'])
cat['qf'] = np.array(cat['q'], dtype=float)
return cat
def get_LRG_data(specrel='fuji'):
maintids = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/main/LSS/LRGtargetsDR9v1.1.1.fits',columns=['TARGETID','lrg_mask'])
sel = maintids['lrg_mask'] == 0
maintids = maintids[sel]
zcatdir = '/global/cfs/cdirs/desi/spectro/redux/'+specrel+'/zcatalog/'
perexpall = Table(fitsio.read(zcatdir+'ztile-sv1-dark-perexp.fits'))
sel = np.isin(perexpall['TARGETID'],maintids['TARGETID'])
perexplrg = perexpall[sel]
del perexpall
perexplrg = LRG_goodobs(perexplrg,'COADD_FIBERSTATUS','SV1_DESI_TARGET')
cat_1xall = Table(fitsio.read(zcatdir+'ztile-sv1-dark-1x_depth.fits'))
sel = np.isin(cat_1xall['TARGETID'],maintids['TARGETID'])
cat_1xlrg = cat_1xall[sel]
del cat_1xall
cat_1xlrg = LRG_goodobs(cat_1xlrg,'COADD_FIBERSTATUS','SV1_DESI_TARGET')
cat_deepall = Table(fitsio.read(zcatdir+'ztile-sv1-dark-cumulative.fits'))
sel = np.isin(cat_deepall['TARGETID'],maintids['TARGETID'])
cat_deeplrg = cat_deepall[sel]
del cat_deepall
cat_deeplrg = LRG_goodobs(cat_deeplrg,'COADD_FIBERSTATUS','SV1_DESI_TARGET')
cat_sv3all = Table(fitsio.read(zcatdir+'ztile-sv3-dark-cumulative.fits'))
sel = np.isin(cat_sv3all['TARGETID'],maintids['TARGETID'])
sel &= cat_sv3all['PRIORITY'] == 103200 #we don't want to include the failed repeats in the statistics
cat_sv3lrg = cat_sv3all[sel]
del cat_sv3all
cat_sv3lrg = LRG_goodobs(cat_sv3lrg,'COADD_FIBERSTATUS','SV3_DESI_TARGET')
if specrel == 'fuji':
specrelmain = 'guadalupe'
zcatdirm = '/global/cfs/cdirs/desi/spectro/redux/'+specrelmain+'/zcatalog/'
cat_mainall = Table(fitsio.read(zcatdirm+'ztile-main-dark-cumulative.fits'))
sel = np.isin(cat_mainall['TARGETID'],maintids['TARGETID'])
cat_mainlrg = cat_mainall[sel]
del cat_mainall
cat_mainlrg = LRG_goodobs(cat_mainlrg,'COADD_FIBERSTATUS','DESI_TARGET')
cat = vstack([perexplrg, cat_1xlrg, cat_mainlrg, cat_deeplrg, cat_sv3lrg], join_type='inner')
print(len(cat))
cat['EFFTIME_ELG'] = 8.60 * cat['TSNR2_ELG']
cat['EFFTIME_LRG'] = 12.15 * cat['TSNR2_LRG']
cat['zfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_Z']) - 1.211 * cat['EBV']
cat['FIBERFLUX_Z_EC'] = cat['FIBERFLUX_Z']*10**(0.4*1.211*cat['EBV'])
cat['qf'] = np.array(cat['q'], dtype=float)
return cat
def fit_cons(dl,el,minv=0,step=0.01):
c = minv
newcost = np.sum((dl-c)**2./el**2.)
oldcost = newcost + 1
while newcost < oldcost:
oc = c
oldcost = newcost
c += step
newcost = np.sum((dl-c)**2./el**2.)
return oldcost,c
class LRG_ssr:
def __init__(self,specrel='fuji',efftime_min=500,efftime_max=2000):
self.cat = get_LRG_data(specrel)
mask = self.cat['EFFTIME_LRG']>efftime_min
mask &= self.cat['EFFTIME_LRG']<efftime_max
self.cat = self.cat[mask]
def cost(self,q_predict):
return np.sum((self.cat['qf']-q_predict)**2)
def wrapper(self,params):
q_predict = 1-self.failure_rate(self.cat['FIBERFLUX_Z_EC'], self.cat['EFFTIME_LRG'], *params)
return self.cost(q_predict)
def failure_rate(self,flux, efftime, a, b, c):
sn = flux * np.sqrt(efftime)
return np.clip(np.exp(-(sn+a)/b)+c/flux, 0, 1)
def add_modpre(self,data):
res = minimize(self.wrapper, [0, 10., 0.01], bounds=((-200, 200), (0, 100), (0., 1)),
method='Powell', tol=1e-6)
pars = res.x
print(pars)
dflux = data['FIBERFLUX_Z']*10**(0.4*1.211*data['EBV'])#data['FIBERFLUX_Z_EC']
deff = 12.15 * data['TSNR2_LRG']#data['EFFTIME_LRG']
data['mod_success_rate'] = 1. -self.failure_rate(dflux,deff,*pars)
return data
class BGS_ssr:
def __init__(self,specrel='fuji',efftime_min=100,efftime_max=300):
self.cat = get_BGS_data_full('BGS_BRIGHT')
mask = self.cat['EFFTIME_BGS']>efftime_min
mask &= self.cat['EFFTIME_BGS']<efftime_max
self.cat = self.cat[mask]
self.selgz = self.cat['q'] == 1
ha,bine = np.histogram(self.cat['EFFTIME_BGS'])
hf,_ = np.histogram(self.cat['EFFTIME_BGS'][~self.selgz])
self.nzf = hf/ha
print(self.nzf)
self.nzfe = np.sqrt(hf)/ha
bc = []
bs = bine[1]-bine[0]
for i in range(0,len(bine)-1):
bc.append(bine[i]+bs/2.)
self.bc = np.array(bc)
self.bine = bine
def cost(self,q_predict):
return np.sum((self.cat['qf']-q_predict)**2)
def wrapper(self,params):
q_predict = 1-self.failure_rate(self.cat['FIBERFLUX_R_EC'], self.cat['EFFTIME_BGS'], *params)
return self.cost(q_predict)
def failure_rate(self,flux, efftime, a, b, c):
sn = flux * np.sqrt(efftime)
return np.clip(np.exp(-(sn+a)/b)+c/flux, 0, .5)
def add_modpre(self,data):
res = minimize(self.wrapper, [0, 10., 0.01], bounds=((-200, 200), (0, 100), (0., 1)),
method='Powell', tol=1e-6)
pars = res.x
print(pars,self.wrapper(pars))
dflux = data['FIBERFLUX_R']*10**(0.4*2.165*data['EBV'])#data['FIBERFLUX_Z_EC']
deff = 12.15/89.8 * data['TSNR2_BGS']#data['EFFTIME_LRG']
data['mod_success_rate'] = 1. -self.failure_rate(dflux,deff,*pars)
#print(len(data),np.sum(data['mod_success_rate']))
ha,_ = np.histogram(deff,bins=self.bine)
gz = data['ZWARN'] == 0
gz &= data['DELTACHI2'] > 40
hf,_ = np.histogram(deff[gz],weights=1/data[gz]['mod_success_rate'],bins=self.bine)
plt.errorbar(self.bc,1.-self.nzf,self.nzfe,fmt='ko')
plt.errorbar(self.bc,hf/ha,self.nzfe,fmt='rd')
plt.show()
return data
class ELG_ssr:
def __init__(self,specrel='fuji',efftime_min=450,efftime_max=1500):
self.cat = get_ELG_data_full('ELG_LOPnotqso')#get_ELG_data(specrel)
mask = self.cat['EFFTIME_ELG']>efftime_min
mask &= self.cat['EFFTIME_ELG']<efftime_max
self.cat = self.cat[mask]
self.selgz = self.cat['q'] == 1
ha,bine = np.histogram(self.cat['EFFTIME_ELG'])
hf,_ = np.histogram(self.cat['EFFTIME_ELG'][~self.selgz])
self.nzf = hf/ha
print(self.nzf)
self.nzfe = np.sqrt(hf)/ha
bc = []
bs = bine[1]-bine[0]
for i in range(0,len(bine)-1):
bc.append(bine[i]+bs/2.)
self.bc = np.array(bc)
self.bine = bine
self.vis_5hist = False
def cost(self,q_predict):
return np.sum((self.cat['qf']-q_predict)**2)
def wrapper(self,params):
q_predict = 1-self.failure_rate(self.cat['FIBERFLUX_G_EC'], self.cat['EFFTIME_ELG'], *params)
return self.cost(q_predict)
def wrapper_hist(self,params):
h_predict = self.failure_rate_eff(self.bc, *params)
diff = self.nzf-h_predict
cost = np.sum((diff/self.nzfe)**2.)
return cost
def failure_rate(self,flux, efftime, a, b, c):
#sn = flux * np.sqrt(efftime)
#return np.clip(np.exp(-(sn+a)/b)+c/flux, 0, 1)
return np.clip(np.exp(-(efftime+a)/b)+c/flux, 0, 1)
def failure_rate_eff(self, efftime, a, b, c):
#sn = flux * np.sqrt(efftime)
#return np.clip(np.exp(-(sn+a)/b)+c/flux, 0, 1)
return np.clip(np.exp(-(efftime+a)/b)+c, 0, 1)
def hist_norm(self,fluxc):
nzfper = []
consl = []
nb = 5
pstep = 100//5
costt = 0
for i in range(0,nb):
sel = self.cat['FIBERFLUX_G_EC'] > np.percentile(self.cat['FIBERFLUX_G_EC'],i*pstep)
sel &= self.cat['FIBERFLUX_G_EC'] < np.percentile(self.cat['FIBERFLUX_G_EC'],(i+1)*pstep)
mf = np.median(self.cat['FIBERFLUX_G_EC'][sel])
if self.vis_5hist:
print(mf)
#fper.append(mf)
wtf = (fluxc*(self.mft-self.cat['FIBERFLUX_G_EC'])/self.mft+1)*(self.wts_fid-1)+1
selw = wtf < 1
wtf[selw] = 1
ha,_ = np.histogram(self.cat['EFFTIME_ELG'][sel],bins=self.bine)
hf,_ = np.histogram(self.cat['EFFTIME_ELG'][sel&self.selgz],weights=wtf[sel&self.selgz],bins=self.bine)
#if self.vis_5hist:
# print(mf)
# print(np.sum(ha))
# print(np.sum(hf))
dl = hf/ha
nzfper.append(dl)
def ccost(c):
return np.sum((dl-c)**2./self.nzfpere[i]**2.)
resc = minimize(ccost, np.ones(1))
bc = resc.x
cost = ccost(bc)
consl.append(bc)
costt += cost
if self.vis_5hist:
for i in range(0,nb):
plt.errorbar(self.bc,nzfper[i],self.nzfpere[i])
plt.plot(self.bc,np.ones(len(self.bc))*consl[i],'k:')
plt.show()
return costt
def add_modpre(self,data):
res = minimize(self.wrapper_hist, [-200, 10., 0.01], bounds=((-10000, 0), (0, 10000), (0., 1)),
method='Powell', tol=1e-6)
pars = res.x
print(pars,self.wrapper_hist(pars))
gextc = 3.214
dflux = data['FIBERFLUX_G']*10**(0.4*gextc*data['EBV']) #data['FIBERFLUX_G_EC']
deff = 8.60 * data['TSNR2_ELG']#data['EFFTIME_ELG']
#data['mod_success_rate'] = 1. -self.failure_rate(dflux,deff,*pars)
data['mod_success_rate'] = 1. -self.failure_rate_eff(deff,*pars)
assr = 1. -self.failure_rate_eff(self.cat['EFFTIME_ELG'],*pars)
relssr = assr/np.max(assr)
drelssr = data['mod_success_rate']/np.max(assr)#np.max(data['mod_success_rate'])
seld = deff > 450
seld &= deff < 1500
print(len(relssr),len(drelssr[seld]),np.max(assr),np.max(data[seld]['mod_success_rate']))
self.wts_fid = 1/relssr
nzfper = []
nzfpere = []
fper = []
self.mft = np.median(self.cat['FIBERFLUX_G_EC'])
nb = 5
pstep = 100//5
for i in range(0,nb):
sel = self.cat['FIBERFLUX_G_EC'] > np.percentile(self.cat['FIBERFLUX_G_EC'],i*pstep)
sel &= self.cat['FIBERFLUX_G_EC'] < np.percentile(self.cat['FIBERFLUX_G_EC'],(i+1)*pstep)
mf = np.median(self.cat['FIBERFLUX_G_EC'][sel])
fper.append(mf)
ha,_ = np.histogram(self.cat['EFFTIME_ELG'][sel],bins=self.bine)
hf,_ = np.histogram(self.cat['EFFTIME_ELG'][sel&self.selgz],bins=self.bine)
hfw,_ = np.histogram(self.cat['EFFTIME_ELG'][sel&self.selgz],weights=self.wts_fid[sel&self.selgz],bins=self.bine)
nzfper.append(hf/ha)
nzfpere.append(np.sqrt(ha-hf)/ha)
#plt.plot(self.bc,hfw/ha)
#plt.title('inputs')
#plt.show()
self.nzfpere = nzfpere
rest = minimize(self.hist_norm, np.ones(1))#, bounds=((-10, 10)),
#method='Powell', tol=1e-6)
fcoeff = rest.x
self.vis_5hist = True
print(fcoeff,self.hist_norm(fcoeff))#,self.hist_norm(0.),self.hist_norm(1.))
wtf = (fcoeff*(self.mft-dflux)/self.mft+1)*(1/drelssr-1)+1
sel = wtf < 1
wtf[sel] = 1
data['WEIGHT_ZFAIL'] = wtf
return data
# nb = 5
# pstep = 100//5
# costt = 0
#
# seld = np.ones(len(dflux),dtype='bool')
# dflux = dflux[seld]
# deff =deff[seld]
# dselgz = data[seld]['o2c'] > 0.9
# wtf = (1/drelssr[seld]-1)+1
#print('are weight arrays equal?',np.array_equal(self.wts_fid,wtf))
# for i in range(0,nb):
# sel = dflux > np.percentile(dflux,i*pstep)
# sel &= dflux < np.percentile(dflux,(i+1)*pstep)
# mf = np.median(dflux[sel])
#
#
#
# ha,_ = np.histogram(deff[sel],bins=self.bine)
# hf,_ = np.histogram(deff[sel&dselgz],weights=wtf[sel&dselgz],bins=self.bine)
class QSO_ssr:
def __init__(self,specrel='fuji',efftime_min=450,efftime_max=1500):
self.cat = get_QSO_data_full('QSO')#get_ELG_data(specrel)
mask = self.cat['EFFTIME_QSO']>efftime_min
mask &= self.cat['EFFTIME_QSO']<efftime_max
self.cat = self.cat[mask]
self.selgz = self.cat['q'] == 1
ha,bine = np.histogram(self.cat['EFFTIME_QSO'])
hf,_ = np.histogram(self.cat['EFFTIME_QSO'][~self.selgz])
self.nzf = hf/ha
print(self.nzf)
self.nzfe = np.sqrt(hf)/ha
bc = []
bs = bine[1]-bine[0]
for i in range(0,len(bine)-1):
bc.append(bine[i]+bs/2.)
self.bc = np.array(bc)
self.bine = bine
self.vis_5hist = False
def cost(self,q_predict):
return np.sum((self.cat['qf']-q_predict)**2)
def wrapper(self,params):
q_predict = 1-self.failure_rate(self.cat['FIBERFLUX_G_EC'], self.cat['EFFTIME_QSO'], *params)
return self.cost(q_predict)
def wrapper_hist(self,params):
h_predict = self.failure_rate_eff(self.bc, *params)
diff = self.nzf-h_predict
cost = np.sum((diff/self.nzfe)**2.)
return cost
def failure_rate(self,flux, efftime, a, b, c):
#sn = flux * np.sqrt(efftime)
#return np.clip(np.exp(-(sn+a)/b)+c/flux, 0, 1)
return np.clip(np.exp(-(efftime+a)/b)+c/flux, 0, 1)
def failure_rate_eff(self, efftime, a, b, c):
#sn = flux * np.sqrt(efftime)
#return np.clip(np.exp(-(sn+a)/b)+c/flux, 0, 1)
return np.clip(np.exp(-(efftime+a)/b)+c, 0, 1)
def hist_norm(self,fluxc):
nzfper = []
consl = []
nb = 5
pstep = 100//5
costt = 0
for i in range(0,nb):
sel = self.cat['FIBERFLUX_G_EC'] > np.percentile(self.cat['FIBERFLUX_G_EC'],i*pstep)
sel &= self.cat['FIBERFLUX_G_EC'] < np.percentile(self.cat['FIBERFLUX_G_EC'],(i+1)*pstep)
mf = np.median(self.cat['FIBERFLUX_G_EC'][sel])
if self.vis_5hist:
print(mf)
#fper.append(mf)
wtf = (fluxc*(self.mft-self.cat['FIBERFLUX_G_EC'])/self.mft+1)*(self.wts_fid-1)+1
selw = wtf < 1
wtf[selw] = 1
ha,_ = np.histogram(self.cat['EFFTIME_QSO'][sel],bins=self.bine)
hf,_ = np.histogram(self.cat['EFFTIME_QSO'][sel&self.selgz],weights=wtf[sel&self.selgz],bins=self.bine)
#if self.vis_5hist:
# print(mf)
# print(np.sum(ha))
# print(np.sum(hf))
dl = hf/ha
nzfper.append(dl)
def ccost(c):
return np.sum((dl-c)**2./self.nzfpere[i]**2.)
resc = minimize(ccost, np.ones(1))
bc = resc.x
cost = ccost(bc)
consl.append(bc)
costt += cost
if self.vis_5hist:
for i in range(0,nb):
plt.errorbar(self.bc,nzfper[i],self.nzfpere[i])
plt.plot(self.bc,np.ones(len(self.bc))*consl[i],'k:')
plt.show()
return costt
def add_modpre(self,data):
res = minimize(self.wrapper_hist, [-0.001, 1, 0.4], bounds=((-1000, 0), (0, 1000), (0., 1)),
method='Powell', tol=1e-6)
pars = res.x
print(pars,self.wrapper_hist(pars))
plt.errorbar(self.bc,self.nzf,self.nzfe,fmt='ko')
mod = self.failure_rate_eff(self.bc, *pars)
plt.plot(self.bc,mod,'k--')
plt.show()
gextc = 3.214
rextc = 2.165
dflux = data['FIBERFLUX_R']*10**(0.4*rextc*data['EBV']) #data['FIBERFLUX_G_EC']
deff = 8.60/0.255 * data['TSNR2_QSO']#data['EFFTIME_ELG']
#data['mod_success_rate'] = 1. -self.failure_rate(dflux,deff,*pars)
data['mod_success_rate'] = 1. -self.failure_rate_eff(deff,*pars)
assr = 1. -self.failure_rate_eff(self.cat['EFFTIME_QSO'],*pars)
relssr = assr/np.max(assr)
drelssr = data['mod_success_rate']/np.max(assr)#np.max(data['mod_success_rate'])
seld = deff > 450
seld &= deff < 1500
print(len(relssr),len(drelssr[seld]),np.max(assr),np.max(data[seld]['mod_success_rate']))
self.wts_fid = 1/relssr
nzfper = []
nzfpere = []
fper = []
self.mft = np.median(self.cat['FIBERFLUX_G_EC'])
nb = 5
pstep = 100//5
for i in range(0,nb):
sel = self.cat['FIBERFLUX_R_EC'] > np.percentile(self.cat['FIBERFLUX_R_EC'],i*pstep)
sel &= self.cat['FIBERFLUX_R_EC'] < np.percentile(self.cat['FIBERFLUX_R_EC'],(i+1)*pstep)
mf = np.median(self.cat['FIBERFLUX_R_EC'][sel])
fper.append(mf)
ha,_ = np.histogram(self.cat['EFFTIME_QSO'][sel],bins=self.bine)
hf,_ = np.histogram(self.cat['EFFTIME_QSO'][sel&self.selgz],bins=self.bine)
hfw,_ = np.histogram(self.cat['EFFTIME_QSO'][sel&self.selgz],weights=self.wts_fid[sel&self.selgz],bins=self.bine)
nzfper.append(hf/ha)
nzfpere.append(np.sqrt(ha-hf)/ha)
#plt.plot(self.bc,hfw/ha)
#plt.title('inputs')
#plt.show()
self.nzfpere = nzfpere
rest = minimize(self.hist_norm, np.ones(1))#, bounds=((-10, 10)),
#method='Powell', tol=1e-6)
fcoeff = rest.x
self.vis_5hist = True
print(fcoeff,self.hist_norm(fcoeff))#,self.hist_norm(0.),self.hist_norm(1.))
wtf = (fcoeff*(self.mft-dflux)/self.mft+1)*(1/drelssr-1)+1
sel = wtf < 1
wtf[sel] = 1
data['WEIGHT_ZFAIL'] = wtf
return data
# print(mf)
# print(np.sum(ha))
# print(np.sum(hf))
# dl = hf/ha
# plt.plot(self.bc,dl)
# plt.show()
|
""" --Juana Valeria Hurtado Rincon [email protected]
Signal Processing and Pattern Recognition Research Group
Universidad Nacional de colombia """
from numpy import fft
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
from emokit import emotiv
import gevent
def readEEG(Fs,time,ch):
"""Return EEG matrix nxm (n:time,m:channels) from Emotiv
Parameters: Fs: -int- sampling frequency
time: -int- time recording
ch: -int- number of channels
Returns: EEG: -ndarray- EEG signal
headset: -object- emotiv headset -> emotiv.Emotiv()"""
headset = emotiv.Emotiv()
gevent.spawn(headset.setup)
gevent.sleep(1)
samples = int(Fs*time)
EEG = np.zeros(shape=(samples, ch))
for i in xrange(samples):
packet = headset.dequeue()
EEG[i,0]=value = int(''.join(map(str,list(packet.F3)))) #Tuple to int
EEG[i,1]=value = int(''.join(map(str,list(packet.F4)))) #Tuple to int
EEG[i,2]=value = int(''.join(map(str,list(packet.P7)))) #Tuple to int
EEG[i,3]=value = int(''.join(map(str,list(packet.FC6)))) #Tuple to int
EEG[i,4]=value = int(''.join(map(str,list(packet.F7)))) #Tuple to int
EEG[i,5]=value = int(''.join(map(str,list(packet.F8)))) #Tuple to int
EEG[i,6]=value = int(''.join(map(str,list(packet.T7)))) #Tuple to int
EEG[i,7]=value = int(''.join(map(str,list(packet.P8)))) #Tuple to int
EEG[i,8]=value = int(''.join(map(str,list(packet.FC5)))) #Tuple to int
EEG[i,9]=value = int(''.join(map(str,list(packet.AF4)))) #Tuple to int
EEG[i,10]=value = int(''.join(map(str,list(packet.T8)))) #Tuple to int
EEG[i,11]=value = int(''.join(map(str,list(packet.O2)))) #Tuple to int
EEG[i,12]=value = int(''.join(map(str,list(packet.O1)))) #Tuple to int
EEG[i,13]=value = int(''.join(map(str,list(packet.AF3)))) #Tuple to int
#gevent.sleep(0)
#headset.close()
return(EEG, headset)
def readEEG2(Fs,time,ch,headset):
"""Return EEG matrix nxm (n:time,m:channels) from Emotiv
Parameters: Fs: -int- sampling frequency
time: -int- time recording
ch: -int- number of channels
headset: -object- emotiv headset -> emotiv.Emotiv()
Returns: EEG: -ndarray- EEG signal"""
samples = int(Fs*time)
EEG = np.zeros(shape=(samples, ch))
for i in xrange(samples):
packet = headset.dequeue()
EEG[i,0]=value = int(''.join(map(str,list(packet.F3)))) #Tuple to int
EEG[i,1]=value = int(''.join(map(str,list(packet.F4)))) #Tuple to int
EEG[i,2]=value = int(''.join(map(str,list(packet.P7)))) #Tuple to int
EEG[i,3]=value = int(''.join(map(str,list(packet.FC6)))) #Tuple to int
EEG[i,4]=value = int(''.join(map(str,list(packet.F7)))) #Tuple to int
EEG[i,5]=value = int(''.join(map(str,list(packet.F8)))) #Tuple to int
EEG[i,6]=value = int(''.join(map(str,list(packet.T7)))) #Tuple to int
EEG[i,7]=value = int(''.join(map(str,list(packet.P8)))) #Tuple to int
EEG[i,8]=value = int(''.join(map(str,list(packet.FC5)))) #Tuple to int
EEG[i,9]=value = int(''.join(map(str,list(packet.AF4)))) #Tuple to int
EEG[i,10]=value = int(''.join(map(str,list(packet.T8)))) #Tuple to int
EEG[i,11]=value = int(''.join(map(str,list(packet.O2)))) #Tuple to int
EEG[i,12]=value = int(''.join(map(str,list(packet.O1)))) #Tuple to int
EEG[i,13]=value = int(''.join(map(str,list(packet.AF3)))) #Tuple to int
#gevent.sleep(0)
#headset.close()
return(EEG)
def normalizeEEG(EEG):
"""Return EEG normalized matrix
Parameters: EEG: -ndarray- EEG matrix(time x ch)
Returns: EEG_n: -ndarray- Normalized EEG signal"""
EEG_n = np.zeros(shape=(EEG.shape))
for u in range(EEG.shape[1]):
EEG_n[:,u] = (( EEG[:,u] - np.mean(EEG[:,u]))+100*(u+1)).transpose()
return(EEG_n)
def getChannels(EEG,ch_list):
"""Return the EEG data of the given channel list and the new ch number
Parameters: EEG: -ndarray- EEG matrix(time x ch)
ch_list: -1xn array- contains the numbers that corresponds with the desired channels
0:F3, 1:F4, 2:P7, 3:FC6, 4:F7, 5:F8, 6:T7, 7:P8, 8:FC5, 9:AF4, 10:T8, 11:O2, 12:O1, 13:AF3
Returns: EEG_c: -ndarray- Normalized EEG signal
ch: -int- New number of channels
Examples: 1) getChannels(EEG,[0,3,4,5,10,11,12])
2)ch_list = [0,1,2,3,4,5,6,7,8,9,10,11,12,13]
getChannels(EEG,ch_list)"""
EEG_c = np.zeros(shape=(EEG.shape[0], len(ch_list)))
for c in range(len(ch_list)):
EEG_c[:,c] = EEG[:,ch_list[c]]
ch = len(ch_list)
return EEG_c, ch
def saveEEGTxt(EEG,filename,Header):
"""Save the EEG signal in a .txt file
Parameters: EEG: -ndarray- EEG matrix(time x ch)
filename: -str- Name of the file
header: -str- Header of the file"""
m = np.asmatrix(EEG)
np.savetxt(filename + '.txt', m,fmt='%.d',header=str(Header), comments='')
def rhythmsBasicWaves( fs, dx, EEG):
"""Return the basic rhythms (delta, theta, alpha, beta) energy mean of the given EEG signal
Parameters: EEG: -ndarray- EEG matrix(time x ch)
fs: -int- Sampling frequency
dx: -int- 1/fs
Returns: rhythm_mean: -1x4 array- Rhythms energy """
#fs: Is the sampling frequency
#dx: Is 1/fs
#EEG: EEG matrix size: time,ch
#Ritmos delta 0-4 Hz.
#Actividad theta: 4-7 Hz.
#Ritmos alfa: 8-13 Hz.
#Ritmos beta: 14-60 Hz.
delta = [0.8,4]
theta = [4,7]
alfa = [8,13]
beta = [14,60]
freq_rhythms=[delta[0],delta[1],theta[0],theta[1],alfa[0],alfa[1],beta[0],beta[1]] # vector con en rango de frecuencia de los ritmos
X = EEG # Cargar un archivo txt con el EEG
[m,n] = X.shape # Tama\no de la matriz cargada
if (m % 2 != 0): #Dejando como numero par el tama\no
X = X[0:m-1,:] #(evitar problemas al tomar la parte positiva de la transformada)
[m,n] = X.shape
Y = np.zeros(shape=(m,n)) # Creando matrices a utilzar
f = np.zeros(shape=(m,n))
Yf = np.zeros(shape=(m/2,n))
ff = np.zeros(shape=(m/2,n))
prom = np.zeros(len(freq_rhythms)/2)
for i in range(n):
Y[:,i] = fft.fft(X[:,i]) # Transformada rapida de Fourier
Y[:,i]= fft.fftshift(Y[:,i]) # Centrando en cero los valores
Yf[:,i] = Y[len(Y)/2:,i] # Tomando la parte positiva
f[:,i] = fft.fftfreq(len(X[:,i]),0.0078125) # Eje de las frecuencias, !!!!!!cuidado con el dx
f[:,i] = fft.fftshift(f[:,i]) # Centrando en cero los valores
ff[:,i] = f[len(f)/2:,i] # Tomando la parte positiva
Yff = np.sum(Yf,axis=1) #Sumando en la frecuencia los canales
ff = ff[:,0]
posi = []
for p in range(len(freq_rhythms)):
freq_rhythms2 = min(ff, key=lambda x:abs(x-freq_rhythms[p])) #Encontrando los valores mas cercanos al rango de frec de los ritmos
posi.append(np.where(ff == freq_rhythms2)) #en el eje de las frecuencias. Buscando en el eje de las frec las
# posiciones en las que estan los valores ya encontrados
q = 0
for j in range(len(freq_rhythms)/2): # Promedio de la energia en cada rango de frec
ini = posi[q]
fin = posi[q+1]
prom[j] = np.mean(np.square(np.real(Yff[ini[0]:fin[0]])))
q=q+2
#print 'delta, theta, alfa, beta'
rhythm_mean = prom
return rhythm_mean
def rhythmsAllWaves( fs, dx, EEG):
"""Return the rhythms energy mean of the given EEG signal
Parameters: EEG: -ndarray- EEG matrix(time x ch)
fs: -int- Sampling frequency
dx: -int- 1/fs
Returns: rhythm_mean: -1x4 array- Rhythms energy
delta, theta, low_alpha, high_alpha, low_beta, high_beta, low_gamma, high_gamma"""
#fs: Is the sampling frequency
#dx: Is 1/fs
#EEG: EEG matrix size: time,ch
#Ritmos delta 0-4 Hz.
#Actividad theta: 4-7 Hz.
#Ritmos alfa: 8-13 Hz.
#Ritmos beta: 14-60 Hz.
delta = [1.5,3]
theta = [4,7]
low_alpha = [7.5,12.5]
high_alpha = [8,15]
low_beta = [12.5, 18]
high_beta = [18,30]
low_gamma = [30,40]
high_gamma = [35,60]
freq_rhythms=[delta[0],delta[1],theta[0],theta[1],low_alpha[0],low_alpha[1],high_alpha[0],high_alpha[1],low_beta[0],low_beta[1],high_beta[0],high_beta[1],low_gamma[0],low_gamma[1],high_gamma[0],high_gamma[1]] # vector con en rango de frecuencia de los ritmos
X = EEG # Cargar un archivo txt con el EEG
[m,n] = X.shape # Tama\no de la matriz cargada
if (m % 2 != 0): #Dejando como numero par el tama\no
X = X[0:m-1,:] #(evitar problemas al tomar la parte positiva de la transformada)
#(evitar problemas al tomar la parte positiva de la transformada)
[m,n] = X.shape
Y = np.zeros(shape=(m,n)) # Creando matrices a utilzar
f = np.zeros(shape=(m,n))
Yf = np.zeros(shape=(m/2,n))
ff = np.zeros(shape=(m/2,n))
prom = np.zeros(len(freq_rhythms)/2)
for i in range(n):
Y[:,i] = fft.fft(X[:,i]) # Transformada rapida de Fourier
Y[:,i]= fft.fftshift(Y[:,i]) # Centrando en cero los valores
Yf[:,i] = Y[len(Y)/2:,i] # Tomando la parte positiva
f[:,i] = fft.fftfreq(len(X[:,i]),0.0078125) # Eje de las frecuencias, !!!!!!cuidado con el dx
f[:,i] = fft.fftshift(f[:,i]) # Centrando en cero los valores
ff[:,i] = f[len(f)/2:,i] # Tomando la parte positiva
Yff = np.sum(Yf,axis=1) #Sumando en la frecuencia los canales
ff = ff[:,0]
posi = []
for p in range(len(freq_rhythms)):
freq_rhythms2 = min(ff, key=lambda x:abs(x-freq_rhythms[p])) #Encontrando los valores mas cercanos al rango de frec de los ritmos
posi.append(np.where(ff == freq_rhythms2)) #en el eje de las frecuencias. Buscando en el eje de las frec las
# posiciones en las que estan los valores ya encontrados
q = 0
for j in range(len(freq_rhythms)/2): # Promedio de la energia en cada rango de frec
ini = posi[q]
fin = posi[q+1]
prom[j] = np.mean(np.square(np.real(Yff[ini[0]:fin[0]])))
q=q+2
#print 'delta, theta, alfa, beta'
return prom
def rhythmsFromFile( fs, dx,filename):
#fs: Is the sampling frequency
#dx: Is 1/fs
#filename: Is the name (String type) that Corresponds to a .txt file wich contains the EEG data during a t time
#Ritmos delta 0-4 Hz.
#Actividad theta: 4-7 Hz.
#Ritmos alfa: 8-13 Hz.
#Ritmos beta: 14-60 Hz.
delta = [0,4]
theta = [4,7]
alfa = [8,13]
beta = [14,60]
freq_rhythms=[delta[0],delta[1],theta[0],theta[1],alfa[0],alfa[1],beta[0],beta[1]] # vector con en rango de frecuencia de los ritmos
X = np.loadtxt(filename, skiprows=1) # Cargar un archivo txt con el EEG
XT = X.transpose()
[m,n] = XT.shape # Tama\no de la matriz cargada
X = XT
number= 4
if n % 2 != 0:
X = X[0:m-1,:] #Dejando como numero par el tama\no
#(evitar problemas al tomar la parte positiva de la transformada)
[m,n] = X.shape
Y = np.zeros(shape=(m,n)) # Creando matrices a utilzar
f = np.zeros(shape=(m,n))
Yf = np.zeros(shape=(m/2,n))
ff = np.zeros(shape=(m/2,n))
prom = np.zeros(shape=(1,4))
for i in range(n):
Y[:,i] = fft.fft(X[:,i]) # Transformada rapida de Fourier
Y[:,i]= fft.fftshift(Y[:,i]) # Centrando en cero los valores
Yf[:,i] = Y[len(Y)/2:,i] # Tomando la parte positiva
f[:,i] = fft.fftfreq(len(X[:,i]),0.004) # Eje de las frecuencias, !!!!!!cuidado con el dx
f[:,i] = fft.fftshift(f[:,i]) # Centrando en cero los valores
ff[:,i] = f[len(f)/2:,i] # Tomando la parte positiva
Yff = np.sum(Yf,axis=1) #Sumando en la frecuencia los canales
ff = ff[:,0]
posi = []
for p in range(len(freq_rhythms)):
freq_rhythms2 = min(ff, key=lambda x:abs(x-freq_rhythms[p])) #Encontrando los valores mas cercanos al rango de frec de los ritmos
posi.append(np.where(ff == freq_rhythms2)) #en el eje de las frecuencias. Buscando en el eje de las frec las
# posiciones en las que estan los valores ya encontrados
q = 0
for j in range(4): # Promedio de la energia en cada rango de frec
ini = posi[q]
fin = posi[q+1]
prom[0,j] = np.mean(np.square(np.real(Yff[ini[0]:fin[0]])))
q=q+2
#print 'delta, theta, alfa, beta'
#print prom
return prom[0]
def normalizeRhythms(rhy):
"""Return normalized rhythms
Parameters: rhy: -1xn array- Rhythms energy
Returns: rhy_n: -1xn array- Normalized Rhythms"""
rhy_n = (rhy - min(rhy))/max(rhy)*100
return(rhy_n)
def GraphBar(win,y,c):
""" Plot a Bar graphic for each value of y(n array) with its respective color c(n array)"""
bg1 = pg.BarGraphItem(x=[0], height=[y[0]], width=0.5, brush=c[0])
bg2 = pg.BarGraphItem(x=[1], height=[y[1]], width=0.5, brush=c[1])
bg3 = pg.BarGraphItem(x=[2], height=[y[2]], width=0.5, brush=c[2])
bg4 = pg.BarGraphItem(x=[3], height=[y[3]], width=0.5, brush=c[3])
win.addItem(bg1)
win.addItem(bg2)
win.addItem(bg3)
win.addItem(bg4)
win.getAxis('bottom').setTicks([[(0, 'Delta'), (1, 'Theta'), (2, 'Alpha'), (3, 'Beta')]])
win.setLabel('left', 'Energy', units='%')
def wavesDiagram(E,Deg):
"""Return the points to plot the Waves Diagram
Parameters: E: -1xn array- Energy of each rhythm
Deg: -1xn array- Plot angle for each rhythm
Returns: x: -1xn array- plot points in x axis
y: -1xn array- plot points in y axis
Examples: 1) Deg = [90., 45., 330., 300., 240., 210., 150., 120.]
xr,yr = mn.wavesDiagram(r,Deg)
2)Deg = [90., 45., 0. ,315., 270. , 225., 180., 135.]
xr,yr = mn.wavesDiagram(r,Deg)"""
x = np.zeros(len(E)+1)
y = np.zeros(len(E)+1)
for i in range(len(E)):
toDeg = np.pi / 180.
x[i] = E[i]* np.cos(Deg[i]*toDeg)
y[i] = E[i]* np.sin(Deg[i]*toDeg)
x[-1] = x[0]
y[-1] = y[0]
return x,y
|
{-# OPTIONS --prop --without-K --rewriting #-}
module Calf.Types.Bool where
open import Calf.Prelude
open import Calf.Metalanguage
open import Data.Bool public using (Bool; true; false; if_then_else_)
bool : tp pos
bool = U (meta Bool)
|
library(dplyr)
## requires targets df
age_table <- function(df) {
by_target_age <- df %>%
filter(!is.na(Age)) %>%
group_by(Age) %>%
summarise(total = n())
return(by_target_age)
}
|
@doc """
KMeansOptions <: Options
Collect the data necessary during computation of the k means clustering, i.e.
* `points::Vector{P}` – the given data
* `centers::Vector{P}` – the cluster centrs
* `assignment::Vector{<:Int}` – a vector the same length as `points` assigning each of them to a cluster
* `stop::StoppingCriterion` a stoppingCriterion
Here `P` is a data type for points on the manifold the `points` (and `centers`) live on.
This manifold is stored in the [`KMeansProblem`](@ref).
# Constructor
KMeansOptions(
points::Vector{P},
centers::Vector{P},
stop::StoppingCriterion=StoppingCriterion(100)
)
Initialize the options. The assignment is set to zero and initialized at the beginning of
the algorithm.
"""
struct KMeansOptions{P} <: Options
points::Vector{P}
centers::Vector{P}
assignment::Vector{<:Int}
stop::StoppingCriterion
function KMeansOptions{P}(points::Vector{P}, centers::Vector{P}, stop::StoppingCriterion) where {P}
return new(points, centers, zeros(Int,length(points)), stop)
end
end
function KMeansOptions(points::Vector{P}, centers::Vector{P}, stop::StoppingCriterion=StopAfterIteration(100)) where {P}
return KMeansOptions{P}(points, centers, stop)
end
@doc """
KMeansProblem <: Problem
Store the fixed data necessary for [`kmeans`](@ref), i.e. only a `Manifold M`.
"""
struct KMeansProblem{TM <: Manifold} <: Problem
M::TM
end
function initialize_solver!(p::KMeansProblem, o::KMeansOptions)
k_means_update_assignment!(p,o)
end
function step_solver!(p::KMeansProblem, o::KMeansOptions, ::Int)
# (1) Update assignments
k_means_update_assignment!(p,o)
# (2) Update centers
for i=1:length(o.centers)
any(o.assignment==i) && mean!(p.M, o.centers[i], o.points[o.assignment==i])
end
end
function k_means_update_assignment!(p::KMeansProblem, o::KMeansOptions)
for i=1:length(o.points)
o.assignment[i] = argmin([ distance(p.M,o.points[i],c) for c in o.centers ] )
end
end
"""
kmeans( M::Manifold, pts::Vector{P};
num_centers=5,
centers = pts[1:num_centers],
stop=StopAfterIteration(100),
kwargs...
)
Compute a simple k-means on a Riemannian manifold `M` for the points `pts`.
The `num_centers` defaults to `5` and the initial centers `centers` are set to the first
`num_centers` data items. The stopping criterion is set by default to 100 iterations.
The `kwargs...` can be used to initialize [`RecordOptions`](https://manoptjl.org/stable/plans/index.html#RecordOptions-1) or [`DebugOptions`](https://manoptjl.org/stable/plans/index.html#DebugOptions-1)
decorators from [Manopt.jl](https://manoptjl.org)
Returns the final [`KMeansOptions`](@ref) including the final assignment vector and the centers.
"""
function kmeans(M::Manifold, pts::Vector{P};
num_centers = 5,
centers = pts[1:num_centers],
stop=StopAfterIteration(100),
kwargs...
) where {P}
p = KMeansProblem(M)
o = KMeansOptions(pts,centers,stop)
o = decorate_options(o; kwargs...)
oR = solve(p,o)
return get_options(oR)
end
|
function mixread(filename_cstr, blockinfo, blocks_ptr) bind(c)
use, intrinsic :: iso_fortran_env, only: real64
use, intrinsic :: iso_c_binding
use g2k_c_binding
implicit none
type, bind(c) :: blocksinfo_t
integer(c_int) :: nblocks
integer(c_int) :: nelectrons
integer(c_int) :: ncsfstotal
integer(c_int) :: norbitals
integer(c_int) :: nvectotal
integer(c_int) :: nvecsize
end type
type, bind(c) :: block_t
integer(c_int) :: blockid
integer(c_int) :: ncsfs
integer(c_int) :: nevs
integer(c_int) :: iatjp, iaspa
real(c_double) :: eav
type(c_ptr) :: eigenstates
type(c_ptr) :: eigenenergies
end type
integer, parameter :: dp = real64
character(kind=c_char), intent(in) :: filename_cstr(1)
type(blocksinfo_t), intent(out) :: blockinfo
type(c_ptr), intent(out) :: blocks_ptr
type(block_t), pointer :: blocks(:)
type(block_t) :: block
integer(c_int) :: mixread
character(:), allocatable :: filename
integer :: fhandle, ios
character(255) :: iom
character(6) :: g92mix
integer :: errlineno = 0
integer :: ib, nb, ncfblk, nevblk, iatjp, iaspa
integer :: i, j, dummy_int
real(c_double), pointer :: eval(:), evec(:,:)
filename = from_cstring(filename_cstr)
open(newunit=fhandle, file=filename, form="unformatted", status="old", iostat=ios, iomsg=iom)
if(ios /= 0) then
print *, "ERROR: Unable to open file:", ios, iom
mixread = 1
return
endif
! Check header
read(fhandle, iostat=ios, iomsg=iom) g92mix
if(g92mix /= "G92MIX") then
print *, "ERROR: Bad file header -- not a G92MIX file?"
mixread = 3
return
endif
! Read the global information of the mixing file (number of block etc.).
! Corresponds to the following READ in rmixextract:
!
! READ (nfmix) nelec, ncftot, nw, nvectot, nvecsiz, nblock
!
errlineno=__LINE__; read(fhandle, iostat=ios, iomsg=iom) &
blockinfo%nelectrons, blockinfo%ncsfstotal, blockinfo%norbitals, &
blockinfo%nvectotal, blockinfo%nvecsize, blockinfo%nblocks
if(ios /= 0) go to 999
allocate(blocks(blockinfo%nblocks))
blocks_ptr = c_loc(blocks)
do ib = 1, blockinfo%nblocks
! ncfblk -- total number of CSFs in the block
! nevblk -- number of states in the block
! eav -- energy of the state
read(fhandle, iostat=ios, iomsg=iom) nb, ncfblk, nevblk, blocks(ib)%iatjp, blocks(ib)%iaspa
if(ios /= 0) go to 999
blocks(ib)%blockid = nb
blocks(ib)%ncsfs = ncfblk
blocks(ib)%nevs = nevblk
! The original allocations:
! CALL alloc (pnteval, nevblk, 8)
! CALL alloc (pntevec, nevblk*ncfblk, 8)
! CALL alloc (pntiset, ncfblk, 4)
allocate(eval(nevblk), evec(ncfblk, nevblk))
blocks(ib)%eigenstates = c_loc(evec)
blocks(ib)%eigenenergies = c_loc(eval)
errlineno=__LINE__; read(fhandle, iostat=ios, iomsg=iom) (dummy_int, i = 1, nevblk)
if(ios /= 0) go to 999
errlineno=__LINE__; read(fhandle, iostat=ios, iomsg=iom) blocks(ib)%eav, (eval(i), i = 1, nevblk)
if(ios /= 0) go to 999
errlineno=__LINE__; read(fhandle, iostat=ios, iomsg=iom) ((evec(i,j), i = 1, ncfblk), j = 1, nevblk)
if(ios /= 0) go to 999
do j = 1, nevblk
eval(j) = blocks(ib)%eav + eval(j)
enddo
enddo
close(fhandle)
mixread = 0 ! no error
return
! Error handling for IO errors (reachable via goto)
999 continue
print '(a)', "Terminating mixread() with IO error."
print '(a,a,":",i0)', " at: ", __FILE__, errlineno
print '(" while reading: ",a)', filename
print *, ios, iom
close(fhandle)
mixread = 2
return
end function mixread
|
import ray
import argparse
import numpy as np
import scipy.optimize
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
parser = argparse.ArgumentParser(description="Run the L-BFGS example.")
parser.add_argument("--node-ip-address", default=None, type=str, help="The IP address of this node.")
parser.add_argument("--scheduler-address", default=None, type=str, help="The address of the scheduler.")
if __name__ == "__main__":
args = parser.parse_args()
# If node_ip_address and scheduler_address are provided, then this command
# will connect the driver to the existing scheduler. If not, it will start
# a local scheduler and connect to it.
ray.init(start_ray_local=(args.node_ip_address is None),
node_ip_address=args.node_ip_address,
scheduler_address=args.scheduler_address,
num_workers=(10 if args.node_ip_address is None else None))
# Define the dimensions of the data and of the model.
image_dimension = 784
label_dimension = 10
w_shape = [image_dimension, label_dimension]
w_size = np.prod(w_shape)
b_shape = [label_dimension]
b_size = np.prod(b_shape)
dim = w_size + b_size
# Define a function for initializing the network. Note that this code does not
# call initialize the network weights. If it did, the weights would be
# randomly initialized on each worker and would differ from worker to worker.
# We pass the weights into the remote functions loss and grad so that the
# weights are the same on each worker.
def net_initialization():
x = tf.placeholder(tf.float32, [None, image_dimension])
w = tf.Variable(tf.zeros(w_shape))
b = tf.Variable(tf.zeros(b_shape))
y = tf.nn.softmax(tf.matmul(x, w) + b)
y_ = tf.placeholder(tf.float32, [None, label_dimension])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
cross_entropy_grads = tf.gradients(cross_entropy, [w, b])
sess = tf.Session()
# In order to set the weights of the TensorFlow graph on a worker, we add
# assignment nodes. To get the network weights (as a list of numpy arrays)
# and to set the network weights (from a list of numpy arrays), use the
# methods get_weights and set_weights. This can be done from within a remote
# function or on the driver.
def get_and_set_weights_methods():
assignment_placeholders = []
assignment_nodes = []
for var in tf.trainable_variables():
assignment_placeholders.append(tf.placeholder(var.value().dtype, var.get_shape().as_list()))
assignment_nodes.append(var.assign(assignment_placeholders[-1]))
def get_weights():
return [v.eval(session=sess) for v in tf.trainable_variables()]
def set_weights(new_weights):
sess.run(assignment_nodes, feed_dict={p: w for p, w in zip(assignment_placeholders, new_weights)})
return get_weights, set_weights
get_weights, set_weights = get_and_set_weights_methods()
return sess, cross_entropy, cross_entropy_grads, x, y_, get_weights, set_weights
# By default, when a reusable variable is used by a remote function, the
# initialization code will be rerun at the end of the remote task to ensure
# that the state of the variable is not changed by the remote task. However,
# the initialization code may be expensive. This case is one example, because
# a TensorFlow network is constructed. In this case, we pass in a special
# reinitialization function which gets run instead of the original
# initialization code. As users, if we pass in custom reinitialization code,
# we must ensure that no state is leaked between tasks.
def net_reinitialization(net_vars):
return net_vars
# Create a reusable variable for the network.
ray.reusables.net_vars = ray.Reusable(net_initialization, net_reinitialization)
# Load the weights into the network.
def load_weights(theta):
sess, _, _, _, _, get_weights, set_weights = ray.reusables.net_vars
set_weights([theta[:w_size].reshape(w_shape), theta[w_size:].reshape(b_shape)])
# Compute the loss on a batch of data.
@ray.remote
def loss(theta, xs, ys):
sess, cross_entropy, _, x, y_, _, _ = ray.reusables.net_vars
load_weights(theta)
return float(sess.run(cross_entropy, feed_dict={x: xs, y_: ys}))
# Compute the gradient of the loss on a batch of data.
@ray.remote
def grad(theta, xs, ys):
sess, _, cross_entropy_grads, x, y_, _, _ = ray.reusables.net_vars
load_weights(theta)
gradients = sess.run(cross_entropy_grads, feed_dict={x: xs, y_: ys})
return np.concatenate([g.flatten() for g in gradients])
# Compute the loss on the entire dataset.
def full_loss(theta):
theta_id = ray.put(theta)
loss_ids = [loss.remote(theta_id, xs_id, ys_id) for (xs_id, ys_id) in batch_ids]
return sum(ray.get(loss_ids))
# Compute the gradient of the loss on the entire dataset.
def full_grad(theta):
theta_id = ray.put(theta)
grad_ids = [grad.remote(theta_id, xs_id, ys_id) for (xs_id, ys_id) in batch_ids]
return sum(ray.get(grad_ids)).astype("float64") # This conversion is necessary for use with fmin_l_bfgs_b.
# From the perspective of scipy.optimize.fmin_l_bfgs_b, full_loss is simply a
# function which takes some parameters theta, and computes a loss. Similarly,
# full_grad is a function which takes some parameters theta, and computes the
# gradient of the loss. Internally, these functions use Ray to distribute the
# computation of the loss and the gradient over the data that is represented
# by the remote object IDs x_batches and y_batches and which is potentially
# distributed over a cluster. However, these details are hidden from
# scipy.optimize.fmin_l_bfgs_b, which simply uses it to run the L-BFGS
# algorithm.
# Load the mnist data and turn the data into remote objects.
print "Downloading the MNIST dataset. This may take a minute."
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
batch_size = 100
num_batches = mnist.train.num_examples / batch_size
batches = [mnist.train.next_batch(batch_size) for _ in range(num_batches)]
print "Putting MNIST in the object store."
batch_ids = [(ray.put(xs), ray.put(ys)) for (xs, ys) in batches]
# Initialize the weights for the network to the vector of all zeros.
theta_init = 1e-2 * np.random.normal(size=dim)
# Use L-BFGS to minimize the loss function.
print "Running L-BFGS."
result = scipy.optimize.fmin_l_bfgs_b(full_loss, theta_init, maxiter=10, fprime=full_grad, disp=True)
|
Set Implicit Arguments.
Require Import Coq.Lists.SetoidList.
Definition equiv_2 A B p1 p2 := forall (a : A) (b : B), p1 a b <-> p2 a b.
Lemma equiv_2_trans : forall A B a b c, @equiv_2 A B a b -> equiv_2 b c -> equiv_2 a c.
unfold equiv_2; intros; split; intros.
eapply H0; eapply H; eauto.
eapply H; eapply H0; eauto.
Qed.
Lemma InA_eq_In_iff : forall elt (ls : list elt) (x : elt), InA eq x ls <-> List.In x ls.
induction ls; simpl; intros.
intuition.
eapply InA_nil in H; eauto.
split; intros.
inversion H; subst.
eauto.
right.
eapply IHls.
eauto.
destruct H.
subst.
econstructor 1.
eauto.
econstructor 2.
eapply IHls.
eauto.
Qed.
Lemma InA_weaken :
forall A (P : A -> A -> Prop) (x : A) (ls : list A),
InA P x ls ->
forall (P' : A -> A -> Prop) x',
(forall y, P x y -> P' x' y) ->
InA P' x' ls.
induction 1; simpl; intuition.
Qed.
Lemma equiv_InA : forall elt (eq1 eq2 : elt -> elt -> Prop), equiv_2 eq1 eq2 -> equiv_2 (InA eq1) (InA eq2).
unfold equiv_2; split; intros; eapply InA_weaken; eauto; intros; eapply H; eauto.
Qed.
Lemma In_InA : forall A (x : A) ls,
List.In x ls
-> InA eq x ls.
intros; eapply InA_eq_In_iff; eauto.
Qed.
Lemma InA_In : forall A (x : A) ls,
InA eq x ls ->
List.In x ls.
intros; eapply InA_eq_In_iff; eauto.
Qed.
Local Hint Constructors List.NoDup NoDupA.
Lemma NoDupA_NoDup : forall A ls,
@NoDupA A eq ls
-> List.NoDup ls.
induction 1; intuition auto using In_InA.
Qed.
Lemma NoDup_NoDupA : forall A ls,
List.NoDup ls ->
@NoDupA A eq ls.
induction 1; intuition auto using InA_In.
Qed.
|
include("startup.jl")
for col in names(tps)
tps[!, col] = collect(tps[!, col])
end
tps_specific = stack(tps, ["ageMonths", "cogScore", "ECHOTPCoded", brainmeta...], [:subject, :timepoint])
subj_specific = stack(tps, [:mother_HHS_Education, :simple_race], [:subject])
grp = groupby(subj_specific, [:subject, :variable])
subj_specific = DataFrames.combine(grp, :value=> (v-> coalesce(v...))=> :value)
##
tps_specific = vcat(tps_specific, DataFrame(subject = get(species, :subject),
timepoint = get(species, :timepoint),
variable = fill("sample", nsamples(species)),
value = samplenames(species))
)
for s in samples(species)
subject = s.subject
timepoint = s.timepoint
for f in features(species)
variable = name(f)
value = species[f, s]
push!(tps_specific, (; subject, timepoint, variable, value))
end
end
CSV.write("data/wrangled/tidy_timepoints_with_brain.csv", tps_specific)
CSV.write("data/wrangled/tidy_subjects.csv", subj_specific)
##
|
```python
import sympy as sy
import scipy as sp
import numpy as np
import random as rd
import plotly as py
from ipywidgets import interact
from matplotlib import pyplot
from stat_dist import *
```
##### Gaussian distribution is:
$$ f(x; \mu, \sigma) = \frac{1}{\sqrt{2 \pi \sigma^2}} e^{-\frac{(x-\mu)^2}{2\sigma}}$$
where $\mu$ is mean value ($1^{st}$ moment) and $\sigma$ is standard deviation (square rot of $2^{nd}$ moment).
```python
N = int(input())
l, r = map(float, input().split())
```
```python
def g(mu, sigma):
x = gauss(mu-3*sigma, mu+3*sigma, mu, sigma, N)
stat(x, mu-3*sigma, mu+3*sigma, 0.001, N)
interact(g, mu = (-10, 10, 0.01), sigma = (0.01, 10, 0.01))
```
#### Uniform distribution on set $\Omega$ is:
$$p(x; \Omega) = \frac{1}{\mu(\Omega)}$$
where $\mu(\Omega) = \int_{\Omega} d\mu$ is Lebesgue measure of a set $\Omega$.
For example:
1. $\Omega = [a,b]$: $$p(x) = \frac{1}{b-a}$$,
2. $\Omega = \{a_1,a_2,...,a_N\}$: $$p(a_i) = \frac{1}{N}$$
and so on...
```python
N = int(input())
l, r = map(float, input().split())
x = uniform(l, r, N)
stat(x, l, r, 0.001, N)
```
#### Gamma distribution is:
$$f(x; \alpha, \beta) = \frac{\beta^{\alpha} x^{\alpha-1}e^{-\beta x}}{\Gamma(\alpha)}$$
```python
N = int(input())
h = float(input())
```
```python
def gam(alfa, beta):
x = gamma(h, alfa, beta, N)
stat(x, 0, h, 0.001, N)
interact(gam, alfa = (0.01, 10, 0.01), beta = (0.01, 10, 0.01))
```
stat_dist.py contains some more probability distribution functions, whose graphs can be constructed in the same maneer.
```python
```
|
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
#include <pybind11/stl.h>
#include <pybind11/stl_bind.h>
#include <pybind11/operators.h>
#include <boost/archive/text_oarchive.hpp>
#include <mspass/utility/AntelopePf.h>
#include <mspass/utility/AttributeMap.h>
#include <mspass/utility/dmatrix.h>
#include <mspass/utility/Metadata.h>
#include <mspass/utility/MetadataDefinitions.h>
#include <mspass/utility/ProcessingHistory.h>
#include <mspass/utility/SphericalCoordinate.h>
#include "python/utility/Publicdmatrix_py.h"
#include "python/utility/boost_any_converter_py.h"
namespace mspass {
namespace mspasspy {
namespace py=pybind11;
using namespace std;
using namespace mspass::utility;
/* This is what the pybind11 documentation calls a trampoline class for
needed to handle virtual function in the abstract base class BasicMetadata. */
class PyBasicMetadata : public BasicMetadata {
public:
int get_int(const std::string key) const override {
PYBIND11_OVERLOAD_PURE(
int,
BasicMetadata,
get_int,
key
);
};
double get_double(const std::string key) const override {
PYBIND11_OVERLOAD_PURE(
double,
BasicMetadata,
get_double,
key
);
};
bool get_bool(const std::string key) const override {
PYBIND11_OVERLOAD_PURE(
bool,
BasicMetadata,
get_bool,
key
);
};
std::string get_string(const std::string key) const override {
PYBIND11_OVERLOAD_PURE(
std::string,
BasicMetadata,
get_string,
key
);
};
void put(const std::string key,const double val) override {
PYBIND11_OVERLOAD_PURE(
void,
BasicMetadata,
put,
key,
val
);
};
void put(const std::string key,const int val) override {
PYBIND11_OVERLOAD_PURE(
void,
BasicMetadata,
put,
key,
val
);
};
void put(const std::string key,const bool val) override {
PYBIND11_OVERLOAD_PURE(
void,
BasicMetadata,
put,
key,
val
);
};
void put(const std::string key,const std::string val) override {
PYBIND11_OVERLOAD_PURE(
void,
BasicMetadata,
put,
key,
val
);
};
};
/* Trampoline class for BasicProcessingHistory - new for 2020 API change */
class PyBasicProcessingHistory : public BasicProcessingHistory {
public:
size_t number_of_stages() override {
PYBIND11_OVERLOAD(
size_t,
BasicProcessingHistory,
number_of_stages, //exta comma needed for reasons given in pybind docs
);
}
};
/* Special iterator data structure for python */
struct PyMetadataIterator {
PyMetadataIterator(const Metadata &md, py::object ref) : md(md), ref(ref) { }
std::string next() {
if (index == md.end())
throw py::stop_iteration();
std::string key = index->first;
++index;
return key;
}
const Metadata &md;
py::object ref; // keep a reference
std::map<std::string,boost::any>::const_iterator index = md.begin();
};
/* The following Python C API are needed to construct the PyMsPASSError
exception with pybind11. This is following the example from:
https://www.pierov.org/2020/03/01/python-custom-exceptions-c-extensions/
*/
static PyObject *MsPASSError_tp_str(PyObject *selfPtr)
{
py::str ret;
try {
py::handle self(selfPtr);
py::tuple args = self.attr("args");
ret = py::str(args[0]);
} catch (py::error_already_set &e) {
ret = "";
}
/* ret will go out of scope when returning, therefore increase its reference
count, and transfer it to the caller (like PyObject_Str). */
ret.inc_ref();
return ret.ptr();
}
static PyObject *MsPASSError_get_message(PyObject *selfPtr, void *closure)
{
return MsPASSError_tp_str(selfPtr);
}
static PyObject *MsPASSError_get_severity(PyObject *selfPtr, void *closure)
{
try {
py::handle self(selfPtr);
py::tuple args = self.attr("args");
py::object severity;
if(args.size() < 2)
severity = py::cast(ErrorSeverity::Fatal);
else {
severity = args[1];
if(py::isinstance<py::str>(args[1])) {
severity = py::cast(string2severity(std::string(py::str(args[1]))));
} else if(!py::isinstance(args[1], py::cast(ErrorSeverity::Fatal).get_type())) {
severity = py::cast(ErrorSeverity::Fatal);
}
}
severity.inc_ref();
return severity.ptr();
} catch (py::error_already_set &e) {
/* We could simply backpropagate the exception with e.restore, but
exceptions like OSError return None when an attribute is not set. */
py::none ret;
ret.inc_ref();
return ret.ptr();
}
}
static PyGetSetDef MsPASSError_getsetters[] = {
{"message", (getter)MsPASSError_get_message, NULL, NULL},
{"severity", (getter)MsPASSError_get_severity, NULL, NULL},
{NULL}
};
PYBIND11_MODULE(utility, m) {
m.attr("__name__") = "mspasspy.ccore.utility";
m.doc() = "A submodule for utility namespace of ccore";
py::class_<SphericalCoordinate>(m,"SphericalCoordinate","Enscapsulates concept of spherical coordinates")
.def(py::init<>())
.def(py::init<const SphericalCoordinate&>())
.def(py::init([](py::array_t<double> uv) {
py::buffer_info info = uv.request();
if (info.ndim != 1 || info.shape[0] != 3)
throw py::value_error("SphericalCoordinate expects a vector of 3 elements");
return UnitVectorToSpherical(static_cast<double*>(info.ptr));
}))
/* The use of capsule to avoid copy is found at
https://github.com/pybind/pybind11/issues/1042 */
.def_property_readonly("unit_vector", [](const SphericalCoordinate& self) {
double* v = SphericalToUnitVector(self);
auto capsule = py::capsule(v, [](void *v) { delete[] reinterpret_cast<double*>(v); });
return py::array(3, v, capsule);
},"Return the unit vector equivalent to direction defined in sphereical coordinates")
.def_readwrite("radius", &SphericalCoordinate::radius,"R of spherical coordinates")
.def_readwrite("theta", &SphericalCoordinate::theta,"zonal angle of spherical coordinates")
.def_readwrite("phi", &SphericalCoordinate::phi,"azimuthal angle of spherical coordinates")
;
py::class_<BasicMetadata,PyBasicMetadata>(m,"BasicMetadata")
.def(py::init<>())
;
py::enum_<MDtype>(m,"MDtype")
.value("Real",MDtype::Real)
.value("Real32",MDtype::Real32)
.value("Double",MDtype::Double)
.value("Real64",MDtype::Real64)
.value("Integer",MDtype::Integer)
.value("Int32",MDtype::Int32)
.value("Long",MDtype::Long)
.value("Int64",MDtype::Int64)
.value("String",MDtype::String)
.value("Boolean",MDtype::Boolean)
.value("Double_Array",MDtype::Double_Array)
.value("Invalid",MDtype::Invalid)
;
py::class_<Metadata,BasicMetadata> md(m,"Metadata");
md.def(py::init<>())
.def(py::init<const Metadata&>())
.def(py::init<std::ifstream&,const std::string>())
/* The order of the following type check matters. Note that due to
* the pybind11's asymmetric conversion behavior from bytes to string,
* we have to handle bytes before strings. The same applies to the
* put and __setitem__ methods.
*/
.def(py::init([](py::dict d) {
auto md = new Metadata();
for(auto i : d) {
if(py::isinstance<py::float_>(i.second))
md->put(std::string(py::str(i.first)), (i.second.cast<double>()));
else if(py::isinstance<py::bool_>(i.second))
md->put(std::string(py::str(i.first)), (i.second.cast<bool>()));
else if(py::isinstance<py::int_>(i.second))
md->put(std::string(py::str(i.first)), (i.second.cast<long>()));
else if(py::isinstance<py::bytes>(i.second))
md->put_object(std::string(py::str(i.first)), py::reinterpret_borrow<py::object>(i.second));
else if(py::isinstance<py::str>(i.second))
md->put(std::string(py::str(i.first)), std::string(py::str(i.second)));
else
md->put_object(std::string(py::str(i.first)), py::reinterpret_borrow<py::object>(i.second));
}
return md;
}))
.def("get_double",&Metadata::get_double,"Retrieve a real number by a specified key")
.def("get_long",&Metadata::get_long,"Return a long integer by a specified key")
.def("get_bool",&Metadata::get_bool,"Return a (C) boolean defined by a specified key")
.def("get_string",&Metadata::get_string,"Return a string indexed by a specified key")
.def("get",&Metadata::get_any,"Return the value indexed by a specified key")
.def("__getitem__",&Metadata::get_any,"Return the value indexed by a specified key")
.def("put", [](Metadata& md, const py::bytes k, const py::object v) {
if(py::isinstance<py::float_>(v))
md.put(std::string(py::str(k.attr("__str__")())), (v.cast<double>()));
else if(py::isinstance<py::bool_>(v))
md.put(std::string(py::str(k.attr("__str__")())), (v.cast<bool>()));
else if(py::isinstance<py::int_>(v))
md.put(std::string(py::str(k.attr("__str__")())), (v.cast<long>()));
else if(py::isinstance<py::bytes>(v))
md.put_object(std::string(py::str(k.attr("__str__")())), v);
else if(py::isinstance<py::str>(v))
md.put(std::string(py::str(k.attr("__str__")())), std::string(py::str(v)));
else
md.put_object(std::string(py::str(k.attr("__str__")())), v);
})
.def("put",py::overload_cast<const std::string,const double>(&BasicMetadata::put))
.def("put",py::overload_cast<const std::string,const bool>(&BasicMetadata::put))
.def("put",py::overload_cast<const std::string,const long>(&Metadata::put_long))
.def("put",[](Metadata& md, const std::string k, const py::bytes v) {
md.put_object(k, py::reinterpret_borrow<py::object>(v));
})
.def("put",py::overload_cast<const std::string,const std::string>(&BasicMetadata::put))
.def("put",py::overload_cast<const std::string,const py::object>(&Metadata::put_object))
.def("__setitem__", [](Metadata& md, const py::bytes k, const py::object v) {
if(py::isinstance<py::float_>(v))
md.put(std::string(py::str(k.attr("__str__")())), (v.cast<double>()));
else if(py::isinstance<py::bool_>(v))
md.put(std::string(py::str(k.attr("__str__")())), (v.cast<bool>()));
else if(py::isinstance<py::int_>(v))
md.put(std::string(py::str(k.attr("__str__")())), (v.cast<long>()));
else if(py::isinstance<py::bytes>(v))
md.put_object(std::string(py::str(k.attr("__str__")())), v);
else if(py::isinstance<py::str>(v))
md.put(std::string(py::str(k.attr("__str__")())), std::string(py::str(v)));
else
md.put_object(std::string(py::str(k.attr("__str__")())), v);
})
.def("__setitem__",py::overload_cast<const std::string,const double>(&BasicMetadata::put))
.def("__setitem__",py::overload_cast<const std::string,const bool>(&BasicMetadata::put))
.def("__setitem__",py::overload_cast<const std::string,const long>(&Metadata::put_long))
.def("__setitem__",[](Metadata& md, const std::string k, const py::bytes v) {
md.put_object(k, py::reinterpret_borrow<py::object>(v));
})
.def("__setitem__",py::overload_cast<const std::string,const std::string>(&BasicMetadata::put))
.def("__setitem__",py::overload_cast<const std::string,const py::object>(&Metadata::put_object))
.def("put_double",&Metadata::put_double,"Interface class for doubles")
.def("put_bool",&Metadata::put_bool,"Interface class for boolean")
.def("put_string",&Metadata::put_string,"Interface class for strings")
.def("put_long",&Metadata::put_long,"Interface class for long ints")
/* Intentionally do NOT enable put_int. Found type skew problems if
* called from python. Best avoided.
.def("put_int",&Metadata::put_int,"Interface class for generic ints")
*/
.def("keys",&Metadata::keys,"Return a list of the keys of all defined attributes")
.def("type",[](const Metadata &md, const std::string &key) -> std::string {
std::string typ = md.type(key);
if(typ == "pybind11::object")
return py::str(boost::any_cast<pybind11::object>(md.get_any(key)).get_type());
else if (typ.substr(0, 26) == "std::__cxx11::basic_string" || typ.substr(0, 22) == "std::__1::basic_string")
return std::string("string");
else
return typ;
},"Return a demangled typename for value associated with a key")
.def("modified",&Metadata::modified,"Return a list of all attributes that have been changes since construction")
.def("clear_modified",&Metadata::clear_modified,"Clear container used to mark altered Metadata")
/* For unknown reasons could not make this overload work.
* Ended up commenting out char * section of C++ code - baggage in python
* anyway.
.def("is_defined",py::overload_cast<const std::string>(&Metadata::is_defined))
.def("is_defined",py::overload_cast<const char *>(&Metadata::is_defined))
*/
.def("is_defined",&Metadata::is_defined,"Test if a key has a defined value")
.def("__contains__",&Metadata::is_defined,"Test if a key has a defined value")
.def("append_chain",&Metadata::append_chain,"Create or append to a string attribute that defines a chain")
.def("erase",&Metadata::erase,"Delete contents associated with a single key")
.def("__delitem__",&Metadata::erase,"Clears contents associated with a key")
.def("__len__",&Metadata::size,"Return len(self)")
.def("__iter__", [](py::object s) { return PyMetadataIterator(s.cast<const Metadata &>(), s); })
.def("__reversed__", [](const Metadata &s) -> Metadata {
throw py::type_error(std::string("'") +
py::cast(s).attr("__class__").attr("__name__").cast<std::string>() +
"' object is not reversible");
})
.def("__str__", [](const Metadata &s) -> std::string {
if(s.size() == 0)
return "{}";
std::string strout("{");
for(auto index = s.begin(); index != s.end(); ++index) {
std::string key = index->first;
key = std::string(py::repr(py::cast(key)));
if(index->second.type() == typeid(py::object)) {
py::str val = py::repr(boost::any_cast<py::object>(index->second));
key = key + ": " + std::string(val) + ", ";
}
else if (index->second.type() == typeid(double))
key = key + ": " + std::to_string(boost::any_cast<double>(index->second)) + ", ";
else if (index->second.type() == typeid(bool) &&
boost::any_cast<bool>(index->second) == true)
key = key + ": True, ";
else if (index->second.type() == typeid(bool) &&
boost::any_cast<bool>(index->second) == false)
key = key + ": False, ";
else if (index->second.type() == typeid(long))
key = key + ": " + std::to_string(boost::any_cast<long>(index->second)) + ", ";
/* The py::repr function will get the double/single
* quotes right based on the content of the string */
else
key = key + ": " + std::string(py::repr(py::cast(
boost::any_cast<string>(index->second)))) + ", ";
strout += key;
}
strout.pop_back();
strout.pop_back();
return strout + "}";
})
.def("__repr__", [](const Metadata &s) -> std::string {
return py::cast(s).attr("__class__").attr("__name__").cast<std::string>() +
"(" + std::string(py::str(py::cast(s).attr("__str__")())) + ")";
})
.def("change_key",&Metadata::change_key,"Change key to access an attribute")
.def(py::self += py::self)
.def(py::self + py::self)
/* these are need to allow the class to be pickled*/
.def(py::pickle(
[](const Metadata &self) {
string sbuf;
sbuf=serialize_metadata(self);
return py::make_tuple(sbuf);
},
[](py::tuple t) {
string sbuf=t[0].cast<std::string>();
return Metadata(restore_serialized_metadata(sbuf));
}
))
;
py::class_<PyMetadataIterator>(md, "Metadata_keyIterator")
.def("__iter__", [](PyMetadataIterator &it) -> PyMetadataIterator& { return it; })
.def("__next__", &PyMetadataIterator::next);
py::class_<AntelopePf,Metadata>(m,"AntelopePf")
.def(py::init<>())
.def(py::init<const AntelopePf&>())
.def(py::init<std::string>(),"Construct from a file")
.def(py::init<std::list<string>>(),"Construct from a list of strings defining lines to be parsed")
.def("get_tbl",&AntelopePf::get_tbl,"Fetch contents of a block of the pf file defined by Tbl&")
.def("get_branch",&AntelopePf::get_branch,"Fetch contents of a block of the pf defined by an Arr&")
.def("arr_keys",&AntelopePf::arr_keys,"Return a list of all branch (&Arr) keys")
.def("tbl_keys",&AntelopePf::tbl_keys,"Fetch a list of keys for all &Tbl blocks")
.def("ConvertToMetadata",&AntelopePf::ConvertToMetadata,"Convert to a flat Metadata space (no branches)")
;
py::class_<Metadata_typedef>(m,"Metadata_typedef")
.def(py::init<>())
.def_readwrite("tag",&Metadata_typedef::tag,"Name key for this metadata")
.def_readwrite("mdt",&Metadata_typedef::mdt,"Type of any value associated with this key")
;
/* We need this definition to bind dmatrix to a numpy array as described
in this section of pybind11 documentation:\
https://pybind11.readthedocs.io/en/stable/advanced/pycpp/numpy.html
Leans heavily on example here:
https://github.com/pybind/pybind11/blob/master/tests/test_buffers.cpp
*/
py::class_<dmatrix>(m, "dmatrix", py::buffer_protocol())
.def(py::init<>())
.def(py::init<size_t,size_t>())
.def(py::init<const dmatrix&>())
/* This is the copy constructor wrapper */
.def(py::init([](py::array_t<double, py::array::f_style | py::array::forcecast> b) {
py::buffer_info info = b.request();
if (info.ndim != 2)
throw std::runtime_error("dmatrix python wrapper: Incompatible buffer dimension!");
auto v = new dmatrix(info.shape[0], info.shape[1]);
memcpy(v->get_address(0,0), info.ptr, sizeof(double) * v->rows() * v->columns());
return v;
}))
.def_buffer([](dmatrix &m) -> py::buffer_info {
return py::buffer_info(
m.get_address(0,0), /* Pointer to buffer */
sizeof(double), /* Size of one scalar */
py::format_descriptor<double>::format(), /* Python struct-style format descriptor */
2, /* Number of dimensions */
{ m.rows(), m.columns() }, /* Buffer dimensions */
{ sizeof(double), /* Strides (in bytes) for each index - inverted from example*/
sizeof(double) * m.rows() }
);
})
.def("rows",&dmatrix::rows,"Rows in the matrix")
.def("columns",&dmatrix::columns,"Columns in the matrix")
.def("__len__",&dmatrix::rows,"Rows in the matrix")
.def_property_readonly("size", [](dmatrix& self) {
return static_cast<Publicdmatrix&>(self).length;
},"The size of the matrix")
.def("zero",&dmatrix::zero,"Initialize a matrix to all zeros")
.def("transpose",[](const dmatrix &self) {
return tr(self);
},"Matrix transpose")
.def_property_readonly("shape", [](const dmatrix& self) {
return py::make_tuple(self.rows(), self.columns());
},"Return the unit vector equivalent to direction defined in sphereical coordinates")
.def(py::self + py::self,"Operator +")
.def("__add__", [](const dmatrix &a, py::object b) {
return py::module_::import("mspasspy.ccore.utility").attr("dmatrix")(
py::cast(a).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__add__")(b));
})
.def(py::self - py::self,"Operator -")
.def("__sub__", [](const dmatrix &a, py::object b) {
return py::module_::import("mspasspy.ccore.utility").attr("dmatrix")(
py::cast(a).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__sub__")(b));
})
.def(py::self * py::self,"Operator *")
.def(py::self * double(),"Operator *")
.def(double() * py::self,"Operator *")
.def(py::self += py::self,"Operator +=")
.def(py::self -= py::self,"Operator -=")
.def("__getitem__", [](dmatrix &m, py::slice slice) {
size_t start, stop, step, slicelength;
if (!slice.compute(m.rows(), &start, &stop, &step, &slicelength))
throw py::error_already_set();
double* packet;
try{
packet = m.get_address(start,0);
} catch (MsPASSError& e) {
packet = nullptr;
}
std::vector<ssize_t> size(2);
size[0] = slicelength;
size[1] = m.columns();
std::vector<ssize_t> stride(2);
stride[0] = sizeof(double) * step;
stride[1] = sizeof(double) * m.rows();
// The following undocumented trick is from
// https://github.com/pybind/pybind11/issues/323
py::str dummyDataOwner;
py::array rows(py::dtype(py::format_descriptor<double>::format()), size,
stride, packet, dummyDataOwner);
return rows;
})
.def("__getitem__", [](const dmatrix &m, std::pair<int, int> i) {
return py::cast(m).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__getitem__")(i);
})
.def("__getitem__", [](dmatrix &m, int i) {
return py::cast(m).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__getitem__")(i);
})
.def("__getitem__", [](const dmatrix &m, std::pair<py::slice, int> i) {
return py::cast(m).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__getitem__")(i);
})
.def("__getitem__", [](const dmatrix &m, std::pair<int, py::slice> i) {
return py::cast(m).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__getitem__")(i);
})
.def("__getitem__", [](const dmatrix &m, std::pair<py::slice, py::slice> i) {
return py::cast(m).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__getitem__")(i);
})
.def("__setitem__", [](dmatrix &m, int i, py::object const b) {
py::cast(m).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__setitem__")(i, b);
})
.def("__setitem__", [](dmatrix &m, py::slice i, py::object const b) {
py::cast(m).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__setitem__")(i, b);
})
.def("__setitem__", [](dmatrix &m, std::pair<int, int> i, py::object const b) {
py::cast(m).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__setitem__")(i, b);
})
.def("__setitem__", [](dmatrix &m, std::pair<py::slice, int> i, py::object const b) {
py::cast(m).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__setitem__")(i, b);
})
.def("__setitem__", [](dmatrix &m, std::pair<int, py::slice> i, py::object const b) {
py::cast(m).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__setitem__")(i, b);
})
.def("__setitem__", [](dmatrix &m, std::pair<py::slice, py::slice> i, py::object const b) {
py::cast(m).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__setitem__")(i, b);
})
.def("__str__", [](const dmatrix &m) -> std::string {
return std::string(py::str(py::cast(m).attr("__getitem__")(py::reinterpret_steal<py::slice>(
PySlice_New(Py_None, Py_None, Py_None))).attr("__str__")()));;
})
.def("__repr__", [](const dmatrix &m) -> std::string {
std::string strout("dmatrix(");
strout += std::string(py::str(py::cast(m).attr("__str__")())) + ")";
size_t pos = strout.find('\n');
while(pos != string::npos)
{
strout.insert(++pos, 8, ' ');
pos = strout.find('\n', pos);
}
return strout;
})
;
py::enum_<ErrorSeverity>(m,"ErrorSeverity")
.value("Fatal",ErrorSeverity::Fatal)
.value("Invalid",ErrorSeverity::Invalid)
.value("Suspect",ErrorSeverity::Suspect)
.value("Complaint",ErrorSeverity::Complaint)
.value("Debug",ErrorSeverity::Debug)
.value("Informational",ErrorSeverity::Informational)
;
/* The following magic were based on the great example from:
https://www.pierov.org/2020/03/01/python-custom-exceptions-c-extensions/
This appears to be the cleanest and easiest way to implement a custom
exception with pybind11.
The PyMsPASSError is the python object inherited from the base exception
object on the python side. This is done by the PyErr_NewException call.
The register_exception_translator is the pybind11 interface that catches
and translates any MsPASSError thrown from the C++ side and throws a new
PyMsPASSError to python side.
*/
static PyObject *PyMsPASSError = PyErr_NewException("mspasspy.ccore.utility.MsPASSError", NULL, NULL);
if (PyMsPASSError) {
PyTypeObject *as_type = reinterpret_cast<PyTypeObject *>(PyMsPASSError);
as_type->tp_str = MsPASSError_tp_str;
for (int i = 0; MsPASSError_getsetters[i].name != NULL; i++) {
PyObject *descr = PyDescr_NewGetSet(as_type, MsPASSError_getsetters+i);
auto dict = py::reinterpret_borrow<py::dict>(as_type->tp_dict);
dict[py::handle(PyDescr_NAME(descr))] = py::handle(descr);
}
Py_XINCREF(PyMsPASSError);
m.add_object("MsPASSError", py::handle(PyMsPASSError));
}
py::register_exception_translator([](std::exception_ptr p) {
try {
if (p) {
std::rethrow_exception(p);
}
} catch (MsPASSError &e) {
py::tuple args(2);
args[0] = e.what();
args[1] = e.severity();
PyErr_SetObject(PyMsPASSError, args.ptr());
}
});
/* this set of functions are companions to MsPASSError needed as a
workaround for problem that MsPASSError method are not visible to
error handlers for a caught MsPASSError exception. */
m.def("error_says_data_bad",&error_says_data_bad,
"Test if what message from MsPASSError defines data as invalid and should be killed")
;
m.def("error_severity_string",&parse_message_error_severity,
"Return a string defining error severity of a MsPASSError exception")
;
m.def("error_severity",&message_error_severity,
"Return an ErrorSeverity object defining severity of a MsPASSError being handled")
;
m.def("pfread",&pfread,"parameter file reader",
py::return_value_policy::copy,
py::arg("pffile")
);
m.def("get_mdlist",&get_mdlist,"retrieve list with keys and types",
py::return_value_policy::copy
);
py::enum_<MDDefFormat>(m,"MDDefFormat")
.value("PF",MDDefFormat::PF)
.value("YAML",MDDefFormat::YAML)
;
py::class_<MetadataDefinitions>(m,"MetadataDefinitions","Load a catalog of valid metadata names with types defined")
.def(py::init<>())
.def(py::init<string>())
.def(py::init<std::string,MDDefFormat>())
.def("is_defined",&MetadataDefinitions::is_defined,"Test if a key is defined")
.def("concept",&MetadataDefinitions::concept,"Return a string with a brief description of the concept this attribute captures")
.def("type",&MetadataDefinitions::type,"Return a description of the type of this attribute")
.def("add",&MetadataDefinitions::add,"Append a new attribute to the catalog")
.def("has_alias",&MetadataDefinitions::has_alias,"Returns true if a specified key as an alterate name - alias")
.def("is_alias",&MetadataDefinitions::is_alias,"Return true if a key is an alias")
.def("aliases",&MetadataDefinitions::aliases,"Return a list of aliases for a particular key")
.def("unique_name",&MetadataDefinitions::unique_name,"Returns the unique key name associated with an alias")
.def("add_alias",&MetadataDefinitions::add_alias,"Add an alias for a particular atrribute key")
.def("keys",&MetadataDefinitions::keys,"Return a list of all valid keys")
.def("writeable",&MetadataDefinitions::writeable,"Test if an attribute should be saved")
.def("readonly",&MetadataDefinitions::readonly,"Test if an attribute is marked readonly")
.def("set_readonly",&MetadataDefinitions::set_readonly,"Force an attribute to be marked readonly")
.def("set_writeable",&MetadataDefinitions::set_writeable,"Force an attribute to be marked as writeable")
.def("is_normalized",&MetadataDefinitions::is_normalized,"Test to see if an attribute is stored in a master collection (table)")
.def("unique_id_key",&MetadataDefinitions::unique_id_key,"Return the key for a unique id to fetch an attribute from a master collection (table)")
.def("collection",&MetadataDefinitions::collection,"Return the table (collection) name for an attribute defined in a master table")
.def("normalize_data",&MetadataDefinitions::normalize_data,"Faster method to return unique_id_key and table name")
.def("apply_aliases",&MetadataDefinitions::apply_aliases,"Apply a set of alias names to Metadata or child of Metadata")
.def("clear_aliases",&MetadataDefinitions::clear_aliases,"Clear aliases in a Metadata or child of Metadata")
.def(py::self += py::self)
;
/* These are needed for mspass extensions of Core data objects */
py::class_<LogData>(m,"LogData","Many mspass create error and log messages with this structure")
.def(py::init<>())
.def(py::init<int,std::string,MsPASSError&>())
.def(py::init<int,std::string,std::string,ErrorSeverity>())
.def(py::init([](py::dict d) {
auto ld = new LogData();
for(auto i : d) {
if(std::string(py::str(i.first)) == "job_id")
ld->job_id = i.second.cast<long>();
else if(std::string(py::str(i.first)) == "p_id")
ld->p_id = i.second.cast<long>();
else if(std::string(py::str(i.first)) == "algorithm")
ld->algorithm = i.second.cast<std::string>();
else if(std::string(py::str(i.first)) == "message")
ld->message = i.second.cast<std::string>();
else if(std::string(py::str(i.first)) == "badness")
ld->badness = i.second.cast<ErrorSeverity>();
}
return ld;
}))
.def_readwrite("job_id",&LogData::job_id,"Return the job id defined for this log message")
.def_readwrite("p_id",&LogData::p_id,"Return the process id of the procedure that threw the defined message")
.def_readwrite("algorithm",&LogData::algorithm,"Return the algorithm of the procedure that threw the defined message")
.def_readwrite("badness",&LogData::badness,"Return a error level code")
.def_readwrite("message",&LogData::message,"Return the actual posted message")
.def("__str__", [](const LogData &ld) -> std::string {
return std::string("{'job_id': ") + std::to_string(ld.job_id) +
", 'p_id': " + std::to_string(ld.p_id) +
", 'algorithm': " + std::string(py::repr(py::cast(ld.algorithm))) +
", 'message': " + std::string(py::repr(py::cast(ld.message))) + ", 'badness': " +
std::string(py::str(py::cast(ld.badness))) + "}";
})
.def("__repr__", [](const LogData &ld) -> std::string {
std::string strout("LogData(");
return strout + std::string(py::str(py::cast(ld).attr("__str__")())) + ")";
})
;
py::class_<ErrorLogger>(m,"ErrorLogger","Used to post any nonfatal errors without aborting a program of family of parallel programs")
.def(py::init<>())
.def(py::init<const ErrorLogger&>())
.def(py::init<int>())
.def("set_job_id",&ErrorLogger::set_job_id)
.def("get_job_id",&ErrorLogger::get_job_id)
.def("log_error",py::overload_cast<const MsPASSError&>(&ErrorLogger::log_error),"log error thrown as MsPASSError")
.def("log_error",py::overload_cast<const std::string,const std::string,const ErrorSeverity>(&ErrorLogger::log_error),"log a message at a specified severity level")
.def("log_verbose",&ErrorLogger::log_verbose,"Log an informational message - tagged as log message")
.def("get_error_log",&ErrorLogger::get_error_log,"Return all posted entries")
.def("size",&ErrorLogger::size,"Return number of entries in this log")
.def("__len__",&ErrorLogger::size,"Return number of entries in this log")
.def("worst_errors",&ErrorLogger::worst_errors,"Return a list of only the worst errors")
.def("__getitem__", [](ErrorLogger &self, size_t i) {
return py::cast(self).attr("get_error_log")().attr("__getitem__")(i);
})
;
/* New classes in 2020 API revision - object level history preservation */
py::enum_<ProcessingStatus>(m,"ProcessingStatus")
.value("RAW",ProcessingStatus::RAW)
.value("ORIGIN",ProcessingStatus::ORIGIN)
.value("VOLATILE",ProcessingStatus::VOLATILE)
.value("SAVED",ProcessingStatus::SAVED)
.value("UNDEFINED",ProcessingStatus::UNDEFINED)
;
py::enum_<AtomicType>(m,"AtomicType")
.value("TIMESERIES",AtomicType::TIMESERIES)
.value("SEISMOGRAM",AtomicType::SEISMOGRAM)
.value("UNDEFINED",AtomicType::UNDEFINED)
;
py::class_<BasicProcessingHistory,PyBasicProcessingHistory>
//py::class_<BasicProcessingHistory>
(m,"BasicProcessingHistory","Base class - hold job history data")
.def(py::init<>())
.def("jobid",&BasicProcessingHistory::jobid,
"Return job id string")
.def("jobname",&BasicProcessingHistory::jobname,
"Return job name string defining main python script driving this processing chain")
.def("set_jobid",&BasicProcessingHistory::set_jobid,
"Set a unique id so jobname + id is unique")
.def("set_jobname",&BasicProcessingHistory::set_jobname,
"Set the base job name defining the main python script for this run")
;
py::class_<NodeData>
(m,"NodeData","Data structure used in ProcessingHistory to processing tree node data")
.def(py::init<>())
.def(py::init<const NodeData&>())
.def_readwrite("status",&NodeData::status,"ProcessingStatus value at this node")
.def_readwrite("uuid",&NodeData::uuid,"uuid of data stage associated with this node")
.def_readwrite("algorithm",&NodeData::algorithm,"algorithm that created data linked to this node position")
.def_readwrite("algid",&NodeData::algid,
"id defining an instance of a particular algorithm (defines what parameter choices were used)")
.def_readwrite("stage",&NodeData::stage,
"Processing stage counter for this node of the processing tree")
.def_readwrite("type",&NodeData::type,"Type of data this process handled as this input")
.def("__str__", [](const NodeData &nd) -> std::string {
return std::string("{'status': ") + std::string(py::str(py::cast(nd.status))) +
", 'uuid': " + std::string(py::repr(py::cast(nd.uuid))) +
", 'algorithm': " + std::string(py::repr(py::cast(nd.algorithm))) +
", 'algid': " + std::string(py::repr(py::cast(nd.algid))) +
", 'stage': " + std::to_string(nd.stage) +
", 'type': " + std::string(py::str(py::cast(nd.type))) +
"}";
})
.def("__repr__", [](const NodeData &nd) -> std::string {
std::string strout("NodeData(");
return strout + std::string(py::str(py::cast(nd).attr("__str__")())) + ")";
})
.def(py::pickle(
[](const NodeData &self) {
stringstream ssnd;
boost::archive::text_oarchive arnd(ssnd);
arnd<<self;
return py::make_tuple(ssnd.str());
},
[](py::tuple t) {
stringstream ssnd(t[0].cast<std::string>());
boost::archive::text_iarchive arnd(ssnd);
NodeData nd;
arnd>>nd;
return nd;
}
))
;
py::class_<ProcessingHistory,BasicProcessingHistory>
(m,"ProcessingHistory","Used to save object level processing history.")
.def(py::init<>())
.def(py::init<const std::string,const std::string>())
.def(py::init<const ProcessingHistory&>())
.def("is_empty",&ProcessingHistory::is_empty,
"Return true if the processing chain is empty")
.def("is_raw",&ProcessingHistory::is_raw,
"Return True if the data are raw data with no previous processing")
.def("is_origin",&ProcessingHistory::is_origin,
"Return True if the data are marked as an origin - commonly an intermediate save")
.def("is_volatile",&ProcessingHistory::is_volatile,
"Return True if the data are unsaved, partially processed data")
.def("is_saved",&ProcessingHistory::is_saved,
"Return True if the data are saved and history can be cleared")
.def("number_of_stages",&ProcessingHistory::number_of_stages,
"Return count of the number of processing steps applied so far")
.def("set_as_origin",&ProcessingHistory::set_as_origin,
"Load data defining this as the top of a processing history chain",
py::arg("alg"),
py::arg("algid"),
py::arg("uuid"),
py::arg("type"),
py::arg("define_as_raw") = false)
.def("set_as_raw",[](ProcessingHistory &self, const string alg,const string algid,
const string uuid,const AtomicType typ){
self.set_as_origin(alg, algid, uuid, typ, true);
},
"Load data defining this as the raw input of a processing history chain")
.def("new_ensemble_process",&ProcessingHistory::new_ensemble_process,
"Set up history chain to define the current data as result of reduction - output form multiple inputs",
py::arg("alg"),
py::arg("algid"),
py::arg("type"),
py::arg("parents"),
py::arg("create_newid") = true)
.def("add_one_input",&ProcessingHistory::add_one_input,
"A single input datum after initialization with new_ensemble_process or accumulate",
py::arg("newinput"))
.def("add_many_inputs",&ProcessingHistory::add_many_inputs,
"Add multiple inputs after initialization with new_ensemble_process or accumulate",
py::arg("inputs"))
.def("accumulate",&ProcessingHistory::accumulate,
"History accumulator for spark reduce operators",
py::arg("alg"),
py::arg("algid"),
py::arg("type"),
py::arg("newinput")
)
.def("_merge",&ProcessingHistory::merge,
"Merge the history nodes from another",
py::arg("newinput")
)
.def("new_map",py::overload_cast<const std::string,const std::string,
const AtomicType,const ProcessingStatus>
(&ProcessingHistory::new_map),
"Set history chain to define the current data as a one-to-one map from parent",
py::arg("alg"),
py::arg("algid"),
py::arg("type"),
py::arg("newstatus") = ProcessingStatus::VOLATILE)
.def("new_map",py::overload_cast<const std::string,const std::string,
const AtomicType,const ProcessingHistory&,
const ProcessingStatus>
(&ProcessingHistory::new_map),
"Set history chain to define the current data as a one-to-one map from parent",
py::arg("alg"),
py::arg("algid"),
py::arg("type"),
py::arg("data_to_clone"),
py::arg("newstatus") = ProcessingStatus::VOLATILE)
.def("map_as_saved",&ProcessingHistory::map_as_saved,
"Load data defining this as the end of chain that was or will soon be saved")
.def("clear_history",&ProcessingHistory::clear,
"Clear this history chain - use with caution")
.def("get_nodes", &ProcessingHistory::get_nodes,
"Retrieve the nodes multimap that defines the tree stucture branches")
.def("stage",&ProcessingHistory::stage,
"Return the current stage number (counter of processing stages applied in this run)")
.def("id",&ProcessingHistory::id,"Return current uuid")
.def("created_by",&ProcessingHistory::created_by ,"Return the algorithm name and id that created current node")
.def("current_nodedata",&ProcessingHistory::current_nodedata,"Return all the attributes of current")
.def("newid",&ProcessingHistory::newid,"Create a new uuid for current data")
.def("set_id",&ProcessingHistory::set_id,"Set current uuid to valued passed")
.def("inputs",&ProcessingHistory::inputs,
"Return a list of uuids of all data that were inputs to defined uuid (current or any ancestor)")
.def("number_inputs",py::overload_cast<const std::string>(&ProcessingHistory::number_inputs, py::const_),
"Return the number of inputs used to generate a specified uuid of the process chain")
.def("number_inputs",py::overload_cast<>(&ProcessingHistory::number_inputs, py::const_),
"Return the number of inputs used to create the current data")
.def_readwrite("elog",&ProcessingHistory::elog)
.def("__str__", [](const ProcessingHistory &ph) -> std::string {
return std::string(py::str(py::cast(ph.current_nodedata())));
})
.def("__repr__", [](const ProcessingHistory &ph) -> std::string {
std::string strout("ProcessingHistory(");
return strout + std::string(py::str(py::cast(ph).attr("__str__")())) + ")";
})
.def(py::pickle(
[](const ProcessingHistory &self) {
stringstream ssph;
boost::archive::text_oarchive arph(ssph);
arph<<self;
return py::make_tuple(ssph.str());
},
[](py::tuple t) {
stringstream ssph(t[0].cast<std::string>());
boost::archive::text_iarchive arph(ssph);
ProcessingHistory ph;
arph>>ph;
return ph;
}
))
;
/* this pair of functions are potentially useful for interactive queries of
ProcessingHistory data */
m.def("algorithm_history",&algorithm_history,
"Return a list of algorithms applied to produce current data object",
py::return_value_policy::copy,
py::arg("h"))
;
m.def("algorithm_outputs",&algorithm_outputs,
"Return a list of uuids of data created by a specified algorithm",
py::return_value_policy::copy,
py::arg("h"),
py::arg("algorithm"),
py::arg("algid") )
;
}
} // namespace mspasspy
} // namespace mspass
|
State Before: α : Type u_1
β : Type ?u.897639
γ : Type ?u.897642
δ : Type ?u.897645
m : MeasurableSpace α
μ ν : Measure α
f g : α →ₛ ℝ≥0∞
⊢ lintegral (map Prod.fst (pair f g)) μ ⊔ lintegral (map Prod.snd (pair f g)) μ ≤
∑ x in SimpleFunc.range (pair f g), (x.fst ⊔ x.snd) * ↑↑μ (↑(pair f g) ⁻¹' {x}) State After: α : Type u_1
β : Type ?u.897639
γ : Type ?u.897642
δ : Type ?u.897645
m : MeasurableSpace α
μ ν : Measure α
f g : α →ₛ ℝ≥0∞
⊢ (∑ x in SimpleFunc.range (pair f g), x.fst * ↑↑μ (↑(pair f g) ⁻¹' {x})) ⊔
∑ x in SimpleFunc.range (pair f g), x.snd * ↑↑μ (↑(pair f g) ⁻¹' {x}) ≤
∑ x in SimpleFunc.range (pair f g), (x.fst ⊔ x.snd) * ↑↑μ (↑(pair f g) ⁻¹' {x}) Tactic: rw [map_lintegral, map_lintegral] State Before: α : Type u_1
β : Type ?u.897639
γ : Type ?u.897642
δ : Type ?u.897645
m : MeasurableSpace α
μ ν : Measure α
f g : α →ₛ ℝ≥0∞
⊢ (∑ x in SimpleFunc.range (pair f g), x.fst * ↑↑μ (↑(pair f g) ⁻¹' {x})) ⊔
∑ x in SimpleFunc.range (pair f g), x.snd * ↑↑μ (↑(pair f g) ⁻¹' {x}) ≤
∑ x in SimpleFunc.range (pair f g), (x.fst ⊔ x.snd) * ↑↑μ (↑(pair f g) ⁻¹' {x}) State After: case refine'_1
α : Type u_1
β : Type ?u.897639
γ : Type ?u.897642
δ : Type ?u.897645
m : MeasurableSpace α
μ ν : Measure α
f g : α →ₛ ℝ≥0∞
a : ℝ≥0∞ × ℝ≥0∞
x✝ : a ∈ SimpleFunc.range (pair f g)
⊢ a.fst ≤ a.fst ⊔ a.snd
case refine'_2
α : Type u_1
β : Type ?u.897639
γ : Type ?u.897642
δ : Type ?u.897645
m : MeasurableSpace α
μ ν : Measure α
f g : α →ₛ ℝ≥0∞
a : ℝ≥0∞ × ℝ≥0∞
x✝ : a ∈ SimpleFunc.range (pair f g)
⊢ a.snd ≤ a.fst ⊔ a.snd Tactic: refine' sup_le _ _ <;> refine' Finset.sum_le_sum fun a _ => mul_le_mul_right' _ _ State Before: case refine'_1
α : Type u_1
β : Type ?u.897639
γ : Type ?u.897642
δ : Type ?u.897645
m : MeasurableSpace α
μ ν : Measure α
f g : α →ₛ ℝ≥0∞
a : ℝ≥0∞ × ℝ≥0∞
x✝ : a ∈ SimpleFunc.range (pair f g)
⊢ a.fst ≤ a.fst ⊔ a.snd
case refine'_2
α : Type u_1
β : Type ?u.897639
γ : Type ?u.897642
δ : Type ?u.897645
m : MeasurableSpace α
μ ν : Measure α
f g : α →ₛ ℝ≥0∞
a : ℝ≥0∞ × ℝ≥0∞
x✝ : a ∈ SimpleFunc.range (pair f g)
⊢ a.snd ≤ a.fst ⊔ a.snd State After: case refine'_2
α : Type u_1
β : Type ?u.897639
γ : Type ?u.897642
δ : Type ?u.897645
m : MeasurableSpace α
μ ν : Measure α
f g : α →ₛ ℝ≥0∞
a : ℝ≥0∞ × ℝ≥0∞
x✝ : a ∈ SimpleFunc.range (pair f g)
⊢ a.snd ≤ a.fst ⊔ a.snd Tactic: exact le_sup_left State Before: case refine'_2
α : Type u_1
β : Type ?u.897639
γ : Type ?u.897642
δ : Type ?u.897645
m : MeasurableSpace α
μ ν : Measure α
f g : α →ₛ ℝ≥0∞
a : ℝ≥0∞ × ℝ≥0∞
x✝ : a ∈ SimpleFunc.range (pair f g)
⊢ a.snd ≤ a.fst ⊔ a.snd State After: no goals Tactic: exact le_sup_right State Before: α : Type u_1
β : Type ?u.897639
γ : Type ?u.897642
δ : Type ?u.897645
m : MeasurableSpace α
μ ν : Measure α
f g : α →ₛ ℝ≥0∞
⊢ ∑ x in SimpleFunc.range (pair f g), (x.fst ⊔ x.snd) * ↑↑μ (↑(pair f g) ⁻¹' {x}) = lintegral (f ⊔ g) μ State After: no goals Tactic: rw [sup_eq_map₂, map_lintegral]
|
module FastKeywords
iskw(ex) = false
iskw(ex::Expr) = ex.head == :kw && length(ex.args) == 2 && (ex.args[1] isa Symbol)
iskw_or_eq(ex) = false
iskw_or_eq(ex::Expr) = (ex.head == :(=) || ex.head == :kw) && length(ex.args) == 2 && (ex.args[1] isa Symbol)
include("KW.jl")
include("fastcall.jl")
include("fastkw.jl")
export @kw, @fastkw, @fastcall
end # module
|
using AlgebraicDynamics.UWDDynam
using Catlab.WiringDiagrams
using Test
const UWD = UndirectedWiringDiagram
@testset "UWDDynam" begin
dx(x, p, t) = [x[1]^2, 2*x[1]-x[2]]
dy(y, p, t) = [1 - y[1]^2]
r = ContinuousResourceSharer{Float64}(2, dx)
@test eltype(r) == Float64
#identity
d = UWD(2)
add_box!(d, 2)
add_junctions!(d, 2)
set_junction!(d, [1,2])
set_junction!(d, [1,2], outer=true)
r2 = oapply(d, [r])
@test nstates(r) == nstates(r2)
@test nports(r) == nports(r2)
@test portmap(r) == portmap(r2)
x0 = [10.0, 7.5]
@test eval_dynamics(r, x0) == eval_dynamics(r2, x0)
@test exposed_states(r, x0) == exposed_states(r2, x0)
h = 0.1
drs = oapply(d, [euler_approx(r, h)])
drs2 = euler_approx(r2, h)
@test eval_dynamics(drs, x0) == eval_dynamics(drs2, x0)
drs3 = euler_approx(r2)
drs4 = oapply(d, [euler_approx(r)])
@test eval_dynamics(drs, x0) == eval_dynamics(drs3, x0, [h])
@test eval_dynamics(drs, x0) == eval_dynamics(drs4, x0, [h])
# merge
d = UWD(1)
add_box!(d, 2)
add_junctions!(d, 1)
set_junction!(d, [1,1])
set_junction!(d, [1], outer=true)
r2 = oapply(d, [r])
@test nstates(r2) == 1
@test nports(r2) == 1
@test portmap(r2) == [1]
@test eval_dynamics(r2, [5.0]) == [30.0]
@test exposed_states(r2, [5.0]) == [5.0]
r = ContinuousResourceSharer{Float64}(2, (u,p,t) -> [u[1]*p[1], u[2]*p[2] + t])
r2 = oapply(d, [r])
@test eval_dynamics(r2, [5.0], [1.0, 2.0], 10.0) == [25.0]
# copy
r = ContinuousResourceSharer{Float64}(1, 2, dx, [2])
d = UWD(2)
add_box!(d, 1)
add_junctions!(d, 1)
set_junction!(d, [1])
set_junction!(d, [1,1], outer = true)
r2 = oapply(d, r)
@test nstates(r2) == 2
@test nports(r2) == 2
@test portmap(r2) == [2,2]
@test eval_dynamics(r2, x0) == eval_dynamics(r, x0)
@test exposed_states(r2, x0) == [x0[2], x0[2]]
# copy states and merge back together
r = ContinuousResourceSharer{Float64}(2, 2, dx, [1,1])
d = UWD(1)
add_box!(d, 2)
add_junctions!(d, 1)
set_junction!(d, [1,1])
set_junction!(d, [1], outer = true)
r2 = oapply(d, r)
@test nstates(r2) == 2
@test nports(r2) == 1
@test portmap(r2) == [1]
@test eval_dynamics(r2, x0) == eval_dynamics(r, x0)
@test exposed_states(r2, x0) == [x0[1]]
# copy states and merge with otherwise
r = ContinuousResourceSharer{Float64}(1, dy)
rcopy = ContinuousResourceSharer{Float64}(2, 1, dy, [1,1])
d = HypergraphDiagram{Nothing, Symbol}(2)
add_box!(d, 1, name = :r); add_box!(d, 2, name = :copy); add_box!(d, 1, name = :r)
add_junctions!(d, 2)
set_junction!(d, [1,1,2,2])
set_junction!(d, [1,2], outer = true)
xs = Dict(:r => r, :copy => rcopy)
r2 = oapply(d, xs)
@test nstates(r2) == 1
@test nports(r2) == 2
@test portmap(r2) == [1,1]
@test eval_dynamics(r2, [7.0]) == [3 *(1 - 7.0^2)]
@test exposed_states(r2, [7.0]) == [7.0, 7.0]
# add a state
d = UWD(2)
add_box!(d, 1)
add_junctions!(d, 2)
set_junction!(d, [1])
set_junction!(d, [1,2], outer = true)
r2 = oapply(d, [r])
@test nstates(r2) == 2
@test nports(r2) == 2
@test portmap(r2) == [1,2]
@test eval_dynamics(r2, [7.0, 11.0]) == [-48.0, 0.0]
@test exposed_states(r2, [7.0, 11.0]) == [7.0, 11.0]
# lots of boxes
r = ContinuousResourceSharer{Float64}(2, dx)
s = ContinuousResourceSharer{Float64}(1, dy)
xs = Dict(:r => r, :s => s)
d = HypergraphDiagram{Nothing, Symbol}(5)
add_box!(d, 2, name = :r); add_box!(d, 1, name = :s); add_box!(d, 2, name = :r)
add_junctions!(d, 4)
set_junction!(d, [1,1,1,4,2])
set_junction!(d, [1,1,2,3,3], outer = true)
r2 = oapply(d, xs)
@test nstates(r2) == 4
@test nports(r2) == 5
@test portmap(r2) == [1,1, 3, 4,4]
x0 = [2.0, 7.0, 3.0, 5.0]
@test eval_dynamics(r2, x0) == [3.0, 49.0, 11.0, 0.0]
@test exposed_states(r2, x0) == [2.0, 2.0, 3.0, 5.0, 5.0]
h = 0.1
dr = oapply(d, euler_approx([r,s,r], h))
dr2 = euler_approx(r2, h)
dr3 = oapply(d, euler_approx([r,s,r]))
dr4 = euler_approx(r2)
@test eval_dynamics(dr, x0) == [2.3, 11.9, 4.1, 5.0]
@test eval_dynamics(dr, x0) == eval_dynamics(dr2, x0)
@test eval_dynamics(dr, x0) == eval_dynamics(dr3, x0, [h], 0)
@test eval_dynamics(dr, x0) == eval_dynamics(dr4, x0, [h], 0)
h = 0.25
dr = oapply(d, euler_approx(xs, h))
dr2 = euler_approx(r2, h)
dr3 = oapply(d, euler_approx(xs))
dr4 = euler_approx(r2)
@test eval_dynamics(dr, x0) == eval_dynamics(dr2, x0)
@test eval_dynamics(dr, x0) == eval_dynamics(dr3, x0, [h], 0)
@test eval_dynamics(dr, x0) == eval_dynamics(dr4, x0, [h], 0)
# substitute and oapply commute
d = UWD(4)
add_box!(d, 1); add_box!(d, 2)
add_junctions!(d, 3)
set_junction!(d, [1,2,1])
set_junction!(d, [1,2,3,3], outer = true)
din = UWD(2)
add_box!(din, 1); add_box!(din, 2)
add_junctions!(din, 2)
set_junction!(din, [1,1,2])
set_junction!(din, [1,2], outer = true)
dtot = ocompose(d, 2, din)
s = ContinuousResourceSharer{Float64}(1, 2, dx, [2])
r1 = oapply(dtot, [s,s,r])
r2 = oapply(d, [s, oapply(din, [s,r])])
@test nstates(r1) == nstates(r2)
@test nports(r1) == nports(r2)
@test portmap(r1) == portmap(r2)
x0 = [2.0, 3.0, 5.0, 7.0, 11.0]
@test eval_dynamics(r1, x0) == eval_dynamics(r2, x0)
end # test set
|
\documentclass[010-intro.tex]{subfiles}
\begin{document}
\subsection{Communities in Data Science History: The Carpentries}
Programming is not a taught in many academic disciplines.
But as research relied more on computational tools,
a growing need for teaching and learning these computational tools in academia was needed.
Software Carpentry was founded in 1998 by Greg Wilson and Brent Gorda with the goal of
teaching researchers the computing skills to get their work done more efficiently and effectively.
These workshops are primarily focused on general programming skills for scientific computation,
and were made open source in 2005 with support from the Python Software Foundation (PSF).
Software Carpentry continues to grow with the support from the Alfred P. Sloan Foundation and Mozilla Science Lab in 2012
\cite{CarpentriesHowWe, jordanCarpentries2020Annual}.
% and NumFOCUS was also founded as a 501(c)(3) public charity status as a nonprofit in the United States with the mission to support open source projects and help sustain them.
The following year, 2013, the first workshops geared towards Librarians were run.
By 2014, Data Carpentry is founded by Karen Cranston, Hilmar Lapp, Tracy Teal, and Ethan White
with support from the National Science Foundation (NSF)
with the goal of creating materials focused on data literacy aimed at novices in specific research domains.
James Baker is able to expand on Library Carpentry with support from the Software Sustainability Institute (SSI)
with the goal of teaching information sciences and best practices in data structures.
Software Carpentry Foundation is also founded under NumFOCUS in 2014.
In 2015, Data Carpentry gets support from the Gordon and Betty Moore Foundation and
in 2018, with the help of Community Initiatives, Software Carpentry, Data Carpentry, and Library Carpentry
merge together to form The Carpentries.
From 2012 to 2020, The Carpentries have run
2,700 workshops across 71 countries and have touched at least 66,000 novice learners
\cite{CarpentriesHowWe, jordanCarpentries2020Annual}.
Today, the Carpentries support over 50 lessons across their 3 Lesson Programs
(Data Carpentry, Library Carpentry, and Software Carpentry),
with over 150 lesson maintainers
\cite{chenPointContactEach2021}.
These lessons cover all the basic data literacy, data management, data science, and software programming
skills in specific curricula domains (Figure \ref{fig:carpentries-venn}).
Instead of having one-off lesson materials maintained by a small set of authors,
The Carpentries lessons are kept up-to-date with a rotating set of maintainers for each lesson,
and leverages the broader community to upkeep the teaching infrastructure.
This serves as an efficient way to teach workshops, maintain lessons, and train new instructors.
\begin{figure}[!hbtp]
\centering
\includegraphics[scale=0.5]{figs/050-intro/carpentries-venn}
\caption[Differences between Carpentries Lesson programs]{
The differences between the 3 Carpentries lesson programs: Data Carpentry, Library Carpentry, and Software Carpentry.
Data Carpentry focuses more on researchers who work with data in a specific domain,
Library Carpentry focuses more on programming tasks in the library and information sciences, and
Software Carpentry focuses more on programming concepts.
Figure adapted from the Carpentries Trainer Training lesson \cite{thecarpentriesCarpentryTrainerTraining}.
}
\label{fig:carpentries-venn}
\end{figure}
\end{document}
|
#include <b1/rodeos/wasm_ql.hpp>
#include <b1/rodeos/callbacks/chaindb.hpp>
#include <b1/rodeos/callbacks/compiler_builtins.hpp>
#include <b1/rodeos/callbacks/console.hpp>
#include <b1/rodeos/callbacks/memory.hpp>
#include <b1/rodeos/callbacks/unimplemented.hpp>
#include <boost/multi_index/member.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index/sequenced_index.hpp>
#include <boost/multi_index_container.hpp>
#include <eosio/abi.hpp>
#include <eosio/bytes.hpp>
#include <eosio/vm/watchdog.hpp>
#include <fc/log/logger.hpp>
#include <fc/scoped_exit.hpp>
#include <mutex>
using namespace std::literals;
namespace ship_protocol = eosio::ship_protocol;
using boost::multi_index::indexed_by;
using boost::multi_index::member;
using boost::multi_index::multi_index_container;
using boost::multi_index::ordered_non_unique;
using boost::multi_index::sequenced;
using boost::multi_index::tag;
using eosio::ship_protocol::action_receipt_v0;
using eosio::ship_protocol::action_trace_v1;
using eosio::ship_protocol::transaction_trace_v0;
namespace eosio {
// todo: abieos support for pair. Used by extensions_type.
template <typename S>
void to_json(const std::pair<uint16_t, std::vector<char>>&, S& stream) {
eosio::check(false, eosio::convert_stream_error(stream_error::bad_variant_index));
}
} // namespace eosio
namespace b1::rodeos::wasm_ql {
template <class... Ts>
struct overloaded : Ts... {
using Ts::operator()...;
};
template <class... Ts>
overloaded(Ts...)->overloaded<Ts...>;
// todo: relax some of these limits
// todo: restore max_function_section_elements to 1023 and use nodeos's hard fork
struct wasm_ql_backend_options {
// static constexpr std::uint32_t max_mutable_global_bytes = 1024;
// static constexpr std::uint32_t max_table_elements = 1024;
// static constexpr std::uint32_t max_section_elements = 8191;
// static constexpr std::uint32_t max_function_section_elements = 8000;
// static constexpr std::uint32_t max_import_section_elements = 1023;
// static constexpr std::uint32_t max_element_segment_elements = 8191;
// static constexpr std::uint32_t max_data_segment_bytes = 8191;
// static constexpr std::uint32_t max_linear_memory_init = 64 * 1024;
// static constexpr std::uint32_t max_func_local_bytes = 8192;
// static constexpr std::uint32_t max_local_sets = 1023;
// static constexpr std::uint32_t eosio_max_nested_structures = 1023;
// static constexpr std::uint32_t max_br_table_elements = 8191;
// static constexpr std::uint32_t max_symbol_bytes = 8191;
// static constexpr std::uint32_t max_memory_offset = (33 * 1024 * 1024 - 1);
static constexpr std::uint32_t max_pages = 528; // 33 MiB
static constexpr std::uint32_t max_call_depth = 251;
};
struct callbacks;
using rhf_t = registered_host_functions<callbacks>;
using backend_t = eosio::vm::backend<rhf_t, eosio::vm::jit, wasm_ql_backend_options>;
struct callbacks : action_callbacks<callbacks>,
chaindb_callbacks<callbacks>,
compiler_builtins_callbacks<callbacks>,
console_callbacks<callbacks>,
context_free_system_callbacks<callbacks>,
db_callbacks<callbacks>,
memory_callbacks<callbacks>,
query_callbacks<callbacks>,
unimplemented_callbacks<callbacks> {
wasm_ql::thread_state& thread_state;
rodeos::chaindb_state& chaindb_state;
rodeos::db_view_state& db_view_state;
callbacks(wasm_ql::thread_state& thread_state, rodeos::chaindb_state& chaindb_state,
rodeos::db_view_state& db_view_state)
: thread_state{ thread_state }, chaindb_state{ chaindb_state }, db_view_state{ db_view_state } {}
auto& get_state() { return thread_state; }
auto& get_chaindb_state() { return chaindb_state; }
auto& get_db_view_state() { return db_view_state; }
};
std::once_flag registered_callbacks;
void register_callbacks() {
action_callbacks<callbacks>::register_callbacks<rhf_t>();
chaindb_callbacks<callbacks>::register_callbacks<rhf_t>();
compiler_builtins_callbacks<callbacks>::register_callbacks<rhf_t>();
console_callbacks<callbacks>::register_callbacks<rhf_t>();
context_free_system_callbacks<callbacks>::register_callbacks<rhf_t>();
db_callbacks<callbacks>::register_callbacks<rhf_t>();
memory_callbacks<callbacks>::register_callbacks<rhf_t>();
query_callbacks<callbacks>::register_callbacks<rhf_t>();
unimplemented_callbacks<callbacks>::register_callbacks<rhf_t>();
}
struct backend_entry {
eosio::name name; // only for wasms loaded from disk
eosio::checksum256 hash; // only for wasms loaded from chain
std::unique_ptr<backend_t> backend;
};
struct by_age;
struct by_name;
struct by_hash;
using backend_container = multi_index_container<
backend_entry,
indexed_by<sequenced<tag<by_age>>, //
ordered_non_unique<tag<by_name>, member<backend_entry, eosio::name, &backend_entry::name>>,
ordered_non_unique<tag<by_hash>, member<backend_entry, eosio::checksum256, &backend_entry::hash>>>>;
class backend_cache {
private:
std::mutex mutex;
const wasm_ql::shared_state& shared_state;
backend_container backends;
public:
backend_cache(const wasm_ql::shared_state& shared_state) : shared_state{ shared_state } {}
void add(backend_entry&& entry) {
std::lock_guard<std::mutex> lock{ mutex };
auto& ind = backends.get<by_age>();
ind.push_back(std::move(entry));
while (ind.size() > shared_state.wasm_cache_size) ind.pop_front();
}
std::optional<backend_entry> get(eosio::name name) {
std::optional<backend_entry> result;
std::lock_guard<std::mutex> lock{ mutex };
auto& ind = backends.get<by_name>();
auto it = ind.find(name);
if (it == ind.end())
return result;
ind.modify(it, [&](auto& x) { result = std::move(x); });
ind.erase(it);
return result;
}
std::optional<backend_entry> get(const eosio::checksum256& hash) {
std::optional<backend_entry> result;
std::lock_guard<std::mutex> lock{ mutex };
auto& ind = backends.get<by_hash>();
auto it = ind.find(hash);
if (it == ind.end())
return result;
ind.modify(it, [&](auto& x) { result = std::move(x); });
ind.erase(it);
return result;
}
};
shared_state::shared_state(std::shared_ptr<chain_kv::database> db)
: backend_cache(std::make_shared<wasm_ql::backend_cache>(*this)), db(std::move(db)) {}
shared_state::~shared_state() {}
std::optional<std::vector<uint8_t>> read_code(wasm_ql::thread_state& thread_state, eosio::name account) {
std::optional<std::vector<uint8_t>> code;
if (!thread_state.shared->contract_dir.empty()) {
auto filename = thread_state.shared->contract_dir + "/" + (std::string)account + ".wasm";
std::ifstream wasm_file(filename, std::ios::binary);
if (wasm_file.is_open()) {
ilog("compiling ${f}", ("f", filename));
wasm_file.seekg(0, std::ios::end);
int len = wasm_file.tellg();
if (len < 0)
throw std::runtime_error("wasm file length is -1");
code.emplace(len);
wasm_file.seekg(0, std::ios::beg);
wasm_file.read((char*)code->data(), code->size());
wasm_file.close();
}
}
return code;
}
std::optional<eosio::checksum256> get_contract_hash(db_view_state& db_view_state, eosio::name account) {
std::optional<eosio::checksum256> result;
auto meta = get_state_row<ship_protocol::account_metadata>(
db_view_state.kv_state.view,
std::make_tuple(eosio::name{ "account.meta" }, eosio::name{ "primary" }, account));
if (!meta)
return result;
auto& meta0 = std::get<ship_protocol::account_metadata_v0>(meta->second);
if (!meta0.code->vm_type && !meta0.code->vm_version)
result = meta0.code->code_hash;
return result;
}
std::optional<std::vector<uint8_t>> read_contract(db_view_state& db_view_state, const eosio::checksum256& hash,
eosio::name account) {
std::optional<std::vector<uint8_t>> result;
auto code_row = get_state_row<ship_protocol::code>(
db_view_state.kv_state.view,
std::make_tuple(eosio::name{ "code" }, eosio::name{ "primary" }, uint8_t(0), uint8_t(0), hash));
if (!code_row)
return result;
auto& code0 = std::get<ship_protocol::code_v0>(code_row->second);
// todo: avoid copy
result.emplace(code0.code.pos, code0.code.end);
ilog("compiling ${h}: ${a}", ("h", eosio::convert_to_json(hash))("a", (std::string)account));
return result;
}
void run_action(wasm_ql::thread_state& thread_state, const std::vector<char>& contract_kv_prefix,
ship_protocol::action& action, action_trace_v1& atrace, const rocksdb::Snapshot* snapshot,
const std::chrono::steady_clock::time_point& stop_time, std::vector<std::vector<char>>& memory) {
if (std::chrono::steady_clock::now() >= stop_time)
throw eosio::vm::timeout_exception("execution timed out");
chain_kv::write_session write_session{ *thread_state.shared->db, snapshot };
db_view_state db_view_state{ state_account, *thread_state.shared->db, write_session, contract_kv_prefix };
std::optional<backend_entry> entry = thread_state.shared->backend_cache->get(action.account);
std::optional<std::vector<uint8_t>> code;
if (!entry)
code = read_code(thread_state, action.account);
std::optional<eosio::checksum256> hash;
if (!entry && !code) {
hash = get_contract_hash(db_view_state, action.account);
if (hash) {
entry = thread_state.shared->backend_cache->get(*hash);
if (!entry)
code = read_contract(db_view_state, *hash, action.account);
}
}
// todo: fail? silent success like normal transactions?
if (!entry && !code)
throw std::runtime_error("account " + (std::string)action.account + " has no code");
if (!entry) {
entry.emplace();
if (hash)
entry->hash = *hash;
else
entry->name = action.account;
std::call_once(registered_callbacks, register_callbacks);
entry->backend = std::make_unique<backend_t>(*code, nullptr);
rhf_t::resolve(entry->backend->get_module());
}
auto se = fc::make_scoped_exit([&] { thread_state.shared->backend_cache->add(std::move(*entry)); });
fill_status_sing sing{ state_account, db_view_state, false };
if (!sing.exists())
throw std::runtime_error("No fill_status records found; is filler running?");
auto& fill_status = sing.get();
// todo: move these out of thread_state since future enhancements could cause state to accidentally leak between
// queries
thread_state.max_console_size = thread_state.shared->max_console_size;
thread_state.receiver = action.account;
thread_state.action_data = action.data;
thread_state.action_return_value.clear();
std::visit([&](auto& stat) { thread_state.block_num = stat.head; }, fill_status);
thread_state.block_info.reset();
chaindb_state chaindb_state;
callbacks cb{ thread_state, chaindb_state, db_view_state };
entry->backend->set_wasm_allocator(&thread_state.wa);
try {
eosio::vm::watchdog wd{ stop_time - std::chrono::steady_clock::now() };
entry->backend->timed_run(wd, [&] {
entry->backend->initialize(&cb);
(*entry->backend)(cb, "env", "apply", action.account.value, action.account.value, action.name.value);
});
} catch (...) {
atrace.console = std::move(thread_state.console);
throw;
}
atrace.console = std::move(thread_state.console);
memory.push_back(std::move(thread_state.action_return_value));
atrace.return_value = memory.back();
} // run_action
const std::vector<char>& query_get_info(wasm_ql::thread_state& thread_state,
const std::vector<char>& contract_kv_prefix) {
rocksdb::ManagedSnapshot snapshot{ thread_state.shared->db->rdb.get() };
chain_kv::write_session write_session{ *thread_state.shared->db, snapshot.snapshot() };
db_view_state db_view_state{ state_account, *thread_state.shared->db, write_session, contract_kv_prefix };
std::string result = "{\"server_type\":\"wasm-ql\"";
{
global_property_kv table{ { db_view_state } };
bool found = false;
if (table.primary_index.begin() != table.primary_index.end()) {
auto record = table.primary_index.begin().value();
if (auto* obj = std::get_if<ship_protocol::global_property_v1>(&record)) {
found = true;
result += ",\"chain_id\":" + eosio::convert_to_json(obj->chain_id);
}
}
if (!found)
throw std::runtime_error("No global_property_v1 records found; is filler running?");
}
{
fill_status_sing sing{ state_account, db_view_state, false };
if (sing.exists()) {
std::visit(
[&](auto& obj) {
result += ",\"head_block_num\":\"" + std::to_string(obj.head) + "\"";
result += ",\"head_block_id\":" + eosio::convert_to_json(obj.head_id);
result += ",\"last_irreversible_block_num\":\"" + std::to_string(obj.irreversible) + "\"";
result += ",\"last_irreversible_block_id\":" + eosio::convert_to_json(obj.irreversible_id);
},
sing.get());
} else
throw std::runtime_error("No fill_status records found; is filler running?");
}
result += "}";
thread_state.action_return_value.assign(result.data(), result.data() + result.size());
return thread_state.action_return_value;
}
struct get_block_params {
std::string block_num_or_id = {};
};
EOSIO_REFLECT(get_block_params, block_num_or_id)
const std::vector<char>& query_get_block(wasm_ql::thread_state& thread_state,
const std::vector<char>& contract_kv_prefix, std::string_view body) {
get_block_params params;
std::string s{ body.begin(), body.end() };
eosio::json_token_stream stream{ s.data() };
try {
from_json(params, stream);
} catch (std::exception& e) {
throw std::runtime_error("An error occurred deserializing get_block_params: "s + e.what());
}
rocksdb::ManagedSnapshot snapshot{ thread_state.shared->db->rdb.get() };
chain_kv::write_session write_session{ *thread_state.shared->db, snapshot.snapshot() };
db_view_state db_view_state{ state_account, *thread_state.shared->db, write_session, contract_kv_prefix };
std::string bn_json = "\"" + params.block_num_or_id + "\"";
eosio::json_token_stream bn_stream{ bn_json.data() };
std::optional<std::pair<std::shared_ptr<const chain_kv::bytes>, block_info>> info;
if (params.block_num_or_id.size() == 64) {
eosio::checksum256 id;
try {
from_json(id, bn_stream);
} catch (std::exception& e) {
throw std::runtime_error("An error occurred deserializing block_num_or_id: "s + e.what());
}
info = get_state_row_secondary<block_info>(db_view_state.kv_state.view,
std::make_tuple(eosio::name{ "block.info" }, eosio::name{ "id" }, id));
} else {
uint32_t num;
try {
from_json(num, bn_stream);
} catch (std::exception& e) {
throw std::runtime_error("An error occurred deserializing block_num_or_id: "s + e.what());
}
info = get_state_row<block_info>(db_view_state.kv_state.view,
std::make_tuple(eosio::name{ "block.info" }, eosio::name{ "primary" }, num));
}
if (info) {
auto& obj = std::get<block_info_v0>(info->second);
uint32_t ref_block_prefix;
memcpy(&ref_block_prefix, obj.id.value.begin() + 8, sizeof(ref_block_prefix));
std::string result = "{";
result += "\"block_num\":" + eosio::convert_to_json(obj.num);
result += ",\"id\":" + eosio::convert_to_json(obj.id);
result += ",\"timestamp\":" + eosio::convert_to_json(obj.timestamp);
result += ",\"producer\":" + eosio::convert_to_json(obj.producer);
result += ",\"confirmed\":" + eosio::convert_to_json(obj.confirmed);
result += ",\"previous\":" + eosio::convert_to_json(obj.previous);
result += ",\"transaction_mroot\":" + eosio::convert_to_json(obj.transaction_mroot);
result += ",\"action_mroot\":" + eosio::convert_to_json(obj.action_mroot);
result += ",\"schedule_version\":" + eosio::convert_to_json(obj.schedule_version);
result += ",\"producer_signature\":" + eosio::convert_to_json(obj.producer_signature);
result += ",\"ref_block_prefix\":" + eosio::convert_to_json(ref_block_prefix);
result += "}";
thread_state.action_return_value.assign(result.data(), result.data() + result.size());
return thread_state.action_return_value;
}
throw std::runtime_error("block " + params.block_num_or_id + " not found");
} // query_get_block
struct get_abi_params {
eosio::name account_name = {};
};
EOSIO_REFLECT(get_abi_params, account_name)
struct get_abi_result {
eosio::name account_name;
std::optional<eosio::abi_def> abi;
};
EOSIO_REFLECT(get_abi_result, account_name, abi)
const std::vector<char>& query_get_abi(wasm_ql::thread_state& thread_state, const std::vector<char>& contract_kv_prefix,
std::string_view body) {
get_abi_params params;
std::string s{ body.begin(), body.end() };
eosio::json_token_stream stream{ s.data() };
try {
from_json(params, stream);
} catch (std::exception& e) {
throw std::runtime_error("An error occurred deserializing get_abi_params: "s + e.what());
}
rocksdb::ManagedSnapshot snapshot{ thread_state.shared->db->rdb.get() };
chain_kv::write_session write_session{ *thread_state.shared->db, snapshot.snapshot() };
db_view_state db_view_state{ state_account, *thread_state.shared->db, write_session, contract_kv_prefix };
auto acc = get_state_row<ship_protocol::account>(
db_view_state.kv_state.view,
std::make_tuple(eosio::name{ "account" }, eosio::name{ "primary" }, params.account_name));
if (!acc)
throw std::runtime_error("account " + (std::string)params.account_name + " not found");
auto& acc0 = std::get<ship_protocol::account_v0>(acc->second);
get_abi_result result;
result.account_name = acc0.name;
if (acc0.abi.pos != acc0.abi.end) {
result.abi.emplace();
eosio::from_bin(*result.abi, acc0.abi);
}
// todo: avoid the extra copy
auto json = eosio::convert_to_json(result);
thread_state.action_return_value.assign(json.begin(), json.end());
return thread_state.action_return_value;
} // query_get_abi
// Ignores data field
struct action_no_data {
eosio::name account = {};
eosio::name name = {};
std::vector<ship_protocol::permission_level> authorization = {};
};
struct extension_hex_data {
uint16_t type = {};
eosio::bytes data = {};
};
EOSIO_REFLECT(extension_hex_data, type, data)
EOSIO_REFLECT(action_no_data, account, name, authorization)
struct transaction_for_get_keys : ship_protocol::transaction_header {
std::vector<action_no_data> context_free_actions = {};
std::vector<action_no_data> actions = {};
std::vector<extension_hex_data> transaction_extensions = {};
};
EOSIO_REFLECT(transaction_for_get_keys, base ship_protocol::transaction_header, context_free_actions, actions,
transaction_extensions)
struct get_required_keys_params {
transaction_for_get_keys transaction = {};
std::vector<eosio::public_key> available_keys = {};
};
EOSIO_REFLECT(get_required_keys_params, transaction, available_keys)
struct get_required_keys_result {
std::vector<eosio::public_key> required_keys = {};
};
EOSIO_REFLECT(get_required_keys_result, required_keys)
const std::vector<char>& query_get_required_keys(wasm_ql::thread_state& thread_state, std::string_view body) {
get_required_keys_params params;
std::string s{ body.begin(), body.end() };
eosio::json_token_stream stream{ s.data() };
try {
from_json(params, stream);
} catch (std::exception& e) {
throw std::runtime_error("An error occurred deserializing get_required_keys_params: "s + e.what());
}
get_required_keys_result result;
for (auto& action : params.transaction.context_free_actions)
if (!action.authorization.empty())
throw std::runtime_error("Context-free actions may not have authorizations");
for (auto& action : params.transaction.actions)
if (!action.authorization.empty())
throw std::runtime_error("Actions may not have authorizations"); // todo
// todo: avoid the extra copy
auto json = eosio::convert_to_json(result);
thread_state.action_return_value.assign(json.begin(), json.end());
return thread_state.action_return_value;
} // query_get_required_keys
struct send_transaction_params {
std::vector<eosio::signature> signatures = {};
std::string compression = {};
eosio::bytes packed_context_free_data = {};
eosio::bytes packed_trx = {};
};
EOSIO_REFLECT(send_transaction_params, signatures, compression, packed_context_free_data, packed_trx)
struct send_transaction_results {
eosio::checksum256 transaction_id; // todo: redundant with processed.id
transaction_trace_v0 processed;
};
EOSIO_REFLECT(send_transaction_results, transaction_id, processed)
const std::vector<char>& query_send_transaction(wasm_ql::thread_state& thread_state,
const std::vector<char>& contract_kv_prefix, std::string_view body,
bool return_trace_on_except) {
send_transaction_params params;
{
std::string s{ body.begin(), body.end() };
eosio::json_token_stream stream{ s.data() };
try {
from_json(params, stream);
} catch (std::exception& e) {
throw std::runtime_error("An error occurred deserializing send_transaction_params: "s + e.what());
}
}
if (params.compression != "0" && params.compression != "none")
throw std::runtime_error("Compression must be 0 or none"); // todo
ship_protocol::packed_transaction trx{ 0,
{ ship_protocol::prunable_data_type::full_legacy{
std::move(params.signatures), params.packed_context_free_data.data } },
params.packed_trx.data };
rocksdb::ManagedSnapshot snapshot{ thread_state.shared->db->rdb.get() };
std::vector<std::vector<char>> memory;
send_transaction_results results;
results.processed = query_send_transaction(thread_state, contract_kv_prefix, trx, snapshot.snapshot(), memory,
return_trace_on_except);
// todo: hide variants during json conversion
// todo: avoid the extra copy
auto json = eosio::convert_to_json(results);
thread_state.action_return_value.assign(json.begin(), json.end());
return thread_state.action_return_value;
} // query_send_transaction
bool is_signatures_empty(const ship_protocol::prunable_data_type& data) {
return std::visit(overloaded{ [](const ship_protocol::prunable_data_type::none&) { return true; },
[](const auto& v) { return v.signatures.empty(); } },
data.prunable_data);
}
bool is_context_free_data_empty(const ship_protocol::prunable_data_type& data) {
return std::visit(overloaded{ [](const ship_protocol::prunable_data_type::none&) { return true; },
[](const ship_protocol::prunable_data_type::full_legacy& v) {
return v.packed_context_free_data.pos == v.packed_context_free_data.end;
},
[](const auto& v) { return v.context_free_segments.empty(); } },
data.prunable_data);
}
transaction_trace_v0 query_send_transaction(wasm_ql::thread_state& thread_state, //
const std::vector<char>& contract_kv_prefix, //
const ship_protocol::packed_transaction& trx, //
const rocksdb::Snapshot* snapshot, //
std::vector<std::vector<char>>& memory, //
bool return_trace_on_except) {
eosio::input_stream s{ trx.packed_trx };
ship_protocol::transaction unpacked;
try {
eosio::from_bin(unpacked, s);
} catch (std::exception& e) { throw std::runtime_error("An error occurred deserializing packed_trx: "s + e.what()); }
if (s.end != s.pos)
throw std::runtime_error("Extra data in packed_trx");
if (!is_signatures_empty(trx.prunable_data))
throw std::runtime_error("Signatures must be empty"); // todo
if (trx.compression)
throw std::runtime_error("Compression must be 0 or none"); // todo
if (!is_context_free_data_empty(trx.prunable_data))
throw std::runtime_error("packed_context_free_data must be empty");
// todo: verify query transaction extension is present, but no others
// todo: redirect if transaction extension not present?
if (!unpacked.transaction_extensions.empty())
throw std::runtime_error("transaction_extensions must be empty");
// todo: check expiration, ref_block_num, ref_block_prefix
if (unpacked.delay_sec.value)
throw std::runtime_error("delay_sec must be 0"); // queries can't be deferred
if (!unpacked.context_free_actions.empty())
throw std::runtime_error("context_free_actions must be empty"); // todo: is there a case where CFA makes sense?
for (auto& action : unpacked.actions)
if (!action.authorization.empty())
throw std::runtime_error("authorization must be empty"); // todo
// todo: fill transaction_id
transaction_trace_v0 tt;
tt.action_traces.reserve(unpacked.actions.size());
auto start_time = std::chrono::steady_clock::now();
auto stop_time = start_time + std::chrono::milliseconds{ thread_state.shared->max_exec_time_ms };
for (auto& action : unpacked.actions) {
tt.action_traces.emplace_back();
auto& at = tt.action_traces.back().emplace<action_trace_v1>();
at.action_ordinal.value = tt.action_traces.size(); // starts at 1
at.receiver = action.account;
at.act = action;
try {
run_action(thread_state, contract_kv_prefix, action, at, snapshot, stop_time, memory);
} catch (eosio::vm::timeout_exception&) { //
throw std::runtime_error(
"timeout after " +
std::to_string(std::chrono::duration_cast<std::chrono::milliseconds>(stop_time - start_time).count()) +
" ms");
} catch (std::exception& e) {
if (!return_trace_on_except)
throw;
// todo: errorcode
at.except = tt.except = e.what();
tt.status = ship_protocol::transaction_status::soft_fail;
break;
}
at.receipt.emplace();
auto& r = at.receipt->emplace<action_receipt_v0>();
r.receiver = action.account;
}
return tt;
} // query_send_transaction
} // namespace b1::rodeos::wasm_ql
|
{-# OPTIONS --safe #-}
module Cubical.Relation.Binary.Poset where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Equiv.HalfAdjoint
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Univalence
open import Cubical.Foundations.Transport
open import Cubical.Foundations.SIP
open import Cubical.Data.Sigma
open import Cubical.Reflection.RecordEquiv
open import Cubical.Reflection.StrictEquiv
open import Cubical.Displayed.Base
open import Cubical.Displayed.Auto
open import Cubical.Displayed.Record
open import Cubical.Displayed.Universe
open import Cubical.Relation.Binary.Base
open Iso
open BinaryRelation
private
variable
ℓ ℓ' ℓ'' ℓ₀ ℓ₀' ℓ₁ ℓ₁' : Level
record IsPoset {A : Type ℓ} (_≤_ : A → A → Type ℓ') : Type (ℓ-max ℓ ℓ') where
no-eta-equality
constructor isposet
field
is-set : isSet A
is-prop-valued : isPropValued _≤_
is-refl : isRefl _≤_
is-trans : isTrans _≤_
is-antisym : isAntisym _≤_
unquoteDecl IsPosetIsoΣ = declareRecordIsoΣ IsPosetIsoΣ (quote IsPoset)
record PosetStr (ℓ' : Level) (A : Type ℓ) : Type (ℓ-max ℓ (ℓ-suc ℓ')) where
constructor posetstr
field
_≤_ : A → A → Type ℓ'
isPoset : IsPoset _≤_
infixl 7 _≤_
open IsPoset isPoset public
Poset : ∀ ℓ ℓ' → Type (ℓ-max (ℓ-suc ℓ) (ℓ-suc ℓ'))
Poset ℓ ℓ' = TypeWithStr ℓ (PosetStr ℓ')
poset : (A : Type ℓ) (_≤_ : A → A → Type ℓ') (h : IsPoset _≤_) → Poset ℓ ℓ'
poset A _≤_ h = A , posetstr _≤_ h
record IsPosetEquiv {A : Type ℓ₀} {B : Type ℓ₁}
(M : PosetStr ℓ₀' A) (e : A ≃ B) (N : PosetStr ℓ₁' B)
: Type (ℓ-max (ℓ-max ℓ₀ ℓ₀') ℓ₁')
where
constructor
isposetequiv
-- Shorter qualified names
private
module M = PosetStr M
module N = PosetStr N
field
pres≤ : (x y : A) → x M.≤ y ≃ equivFun e x N.≤ equivFun e y
PosetEquiv : (M : Poset ℓ₀ ℓ₀') (M : Poset ℓ₁ ℓ₁') → Type (ℓ-max (ℓ-max ℓ₀ ℓ₀') (ℓ-max ℓ₁ ℓ₁'))
PosetEquiv M N = Σ[ e ∈ ⟨ M ⟩ ≃ ⟨ N ⟩ ] IsPosetEquiv (M .snd) e (N .snd)
isPropIsPoset : {A : Type ℓ} (_≤_ : A → A → Type ℓ') → isProp (IsPoset _≤_)
isPropIsPoset _≤_ = isOfHLevelRetractFromIso 1 IsPosetIsoΣ
(isPropΣ isPropIsSet
λ isSetA → isPropΣ (isPropΠ2 (λ _ _ → isPropIsProp))
λ isPropValued≤ → isProp×2
(isPropΠ (λ _ → isPropValued≤ _ _))
(isPropΠ5 λ _ _ _ _ _ → isPropValued≤ _ _)
(isPropΠ4 λ _ _ _ _ → isSetA _ _))
𝒮ᴰ-Poset : DUARel (𝒮-Univ ℓ) (PosetStr ℓ') (ℓ-max ℓ ℓ')
𝒮ᴰ-Poset =
𝒮ᴰ-Record (𝒮-Univ _) IsPosetEquiv
(fields:
data[ _≤_ ∣ autoDUARel _ _ ∣ pres≤ ]
prop[ isPoset ∣ (λ _ _ → isPropIsPoset _) ])
where
open PosetStr
open IsPoset
open IsPosetEquiv
PosetPath : (M N : Poset ℓ ℓ') → PosetEquiv M N ≃ (M ≡ N)
PosetPath = ∫ 𝒮ᴰ-Poset .UARel.ua
-- an easier way of establishing an equivalence of posets
module _ {P : Poset ℓ₀ ℓ₀'} {S : Poset ℓ₁ ℓ₁'} (e : ⟨ P ⟩ ≃ ⟨ S ⟩) where
private
module P = PosetStr (P .snd)
module S = PosetStr (S .snd)
module _ (isMon : ∀ x y → x P.≤ y → equivFun e x S.≤ equivFun e y)
(isMonInv : ∀ x y → x S.≤ y → invEq e x P.≤ invEq e y) where
open IsPosetEquiv
open IsPoset
makeIsPosetEquiv : IsPosetEquiv (P .snd) e (S .snd)
pres≤ makeIsPosetEquiv x y = propBiimpl→Equiv (P.isPoset .is-prop-valued _ _)
(S.isPoset .is-prop-valued _ _)
(isMon _ _) (isMonInv' _ _)
where
isMonInv' : ∀ x y → equivFun e x S.≤ equivFun e y → x P.≤ y
isMonInv' x y ex≤ey = transport (λ i → retEq e x i P.≤ retEq e y i) (isMonInv _ _ ex≤ey)
module PosetReasoning (P' : Poset ℓ ℓ') where
private P = fst P'
open PosetStr (snd P')
open IsPoset
_≤⟨_⟩_ : (x : P) {y z : P} → x ≤ y → y ≤ z → x ≤ z
x ≤⟨ p ⟩ q = isPoset .is-trans x _ _ p q
_◾ : (x : P) → x ≤ x
x ◾ = isPoset .is-refl x
infixr 0 _≤⟨_⟩_
infix 1 _◾
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
section "Signed Words"
theory Signed_Words
imports "~~/src/HOL/Word/Word"
begin
text \<open>Signed words as separate (isomorphic) word length class. Useful for tagging words in C.\<close>
typedef ('a::len0) signed = "UNIV :: 'a set" ..
lemma card_signed [simp]: "CARD (('a::len0) signed) = CARD('a)"
unfolding type_definition.card [OF type_definition_signed]
by simp
instantiation signed :: (len0) len0
begin
definition
len_signed [simp]: "len_of (x::'a::len0 signed itself) = len_of TYPE('a)"
instance ..
end
instance signed :: (len) len
by (intro_classes, simp)
type_synonym 'a sword = "'a signed word"
type_synonym sword8 = "8 sword"
type_synonym sword16 = "16 sword"
type_synonym sword32 = "32 sword"
type_synonym sword64 = "64 sword"
end
|
[STATEMENT]
lemma utp_order_fpl: "utp_order H = fpl \<P> H"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. utp_order H = fpl \<P> H
[PROOF STEP]
by (auto simp add: utp_order_def upred_lattice_def fps_def Healthy_def)
|
(*
Authors: Jose Divasón
Sebastiaan Joosten
René Thiemann
Akihisa Yamada
*)
subsection \<open>Record Based Version\<close>
theory Finite_Field_Factorization_Record_Based
imports
Finite_Field_Factorization
Matrix_Record_Based
Poly_Mod_Finite_Field_Record_Based
"HOL-Types_To_Sets.Types_To_Sets"
Jordan_Normal_Form.Matrix_IArray_Impl
Jordan_Normal_Form.Gauss_Jordan_IArray_Impl
Polynomial_Interpolation.Improved_Code_Equations
Polynomial_Factorization.Missing_List
begin
hide_const(open) monom coeff
text \<open>Whereas @{thm finite_field_factorization} provides a result for a polynomials over GF(p),
we now develop a theorem which speaks about integer polynomials modulo p.\<close>
lemma (in poly_mod_prime_type) finite_field_factorization_modulo_ring:
assumes g: "(g :: 'a mod_ring poly) = of_int_poly f"
and sf: "square_free_m f"
and fact: "finite_field_factorization g = (d,gs)"
and c: "c = to_int_mod_ring d"
and fs: "fs = map to_int_poly gs"
shows "unique_factorization_m f (c, mset fs)"
proof -
have [transfer_rule]: "MP_Rel f g" unfolding g MP_Rel_def by (simp add: Mp_f_representative)
have sg: "square_free g" by (transfer, rule sf)
have [transfer_rule]: "M_Rel c d" unfolding M_Rel_def c by (rule M_to_int_mod_ring)
have fs_gs[transfer_rule]: "list_all2 MP_Rel fs gs"
unfolding fs list_all2_map1 MP_Rel_def[abs_def] Mp_to_int_poly by (simp add: list.rel_refl)
have [transfer_rule]: "rel_mset MP_Rel (mset fs) (mset gs)"
using fs_gs using rel_mset_def by blast
have [transfer_rule]: "MF_Rel (c,mset fs) (d,mset gs)" unfolding MF_Rel_def by transfer_prover
from finite_field_factorization[OF sg fact]
have uf: "unique_factorization Irr_Mon g (d,mset gs)" by auto
from uf[untransferred] show "unique_factorization_m f (c, mset fs)" .
qed
text \<open>We now have to implement @{const finite_field_factorization}.\<close>
context
fixes p :: int
and ff_ops :: "'i arith_ops_record" (* finite-fields *)
begin
fun power_poly_f_mod_i :: "('i list \<Rightarrow> 'i list) \<Rightarrow> 'i list \<Rightarrow> nat \<Rightarrow> 'i list" where
"power_poly_f_mod_i modulus a n = (if n = 0 then modulus (one_poly_i ff_ops)
else let (d,r) = Divides.divmod_nat n 2;
rec = power_poly_f_mod_i modulus (modulus (times_poly_i ff_ops a a)) d in
if r = 0 then rec else modulus (times_poly_i ff_ops rec a))"
declare power_poly_f_mod_i.simps[simp del]
fun power_polys_i :: "'i list \<Rightarrow> 'i list \<Rightarrow> 'i list \<Rightarrow> nat \<Rightarrow> 'i list list" where
"power_polys_i mul_p u curr_p (Suc i) = curr_p #
power_polys_i mul_p u (mod_field_poly_i ff_ops (times_poly_i ff_ops curr_p mul_p) u) i"
| "power_polys_i mul_p u curr_p 0 = []"
lemma length_power_polys_i[simp]: "length (power_polys_i x y z n) = n"
by (induct n arbitrary: x y z, auto)
definition berlekamp_mat_i :: "'i list \<Rightarrow> 'i mat" where
"berlekamp_mat_i u = (let n = degree_i u;
ze = arith_ops_record.zero ff_ops; on = arith_ops_record.one ff_ops;
mul_p = power_poly_f_mod_i (\<lambda> v. mod_field_poly_i ff_ops v u)
[ze, on] (nat p);
xks = power_polys_i mul_p u [on] n
in mat_of_rows_list n (map (\<lambda> cs. cs @ replicate (n - length cs) ze) xks))"
definition berlekamp_resulting_mat_i :: "'i list \<Rightarrow> 'i mat" where
"berlekamp_resulting_mat_i u = (let Q = berlekamp_mat_i u;
n = dim_row Q;
QI = mat n n (\<lambda> (i,j). if i = j then arith_ops_record.minus ff_ops (Q $$ (i,j)) (arith_ops_record.one ff_ops) else Q $$ (i,j))
in (gauss_jordan_single_i ff_ops (transpose_mat QI)))"
definition berlekamp_basis_i :: "'i list \<Rightarrow> 'i list list" where
"berlekamp_basis_i u = (map (poly_of_list_i ff_ops o list_of_vec)
(find_base_vectors_i ff_ops (berlekamp_resulting_mat_i u)))"
primrec berlekamp_factorization_main_i :: "'i \<Rightarrow> 'i \<Rightarrow> nat \<Rightarrow> 'i list list \<Rightarrow> 'i list list \<Rightarrow> nat \<Rightarrow> 'i list list" where
"berlekamp_factorization_main_i ze on d divs (v # vs) n = (
if v = [on] then berlekamp_factorization_main_i ze on d divs vs n else
if length divs = n then divs else
let of_int = arith_ops_record.of_int ff_ops;
facts = filter (\<lambda> w. w \<noteq> [on])
[ gcd_poly_i ff_ops u (minus_poly_i ff_ops v (if s = 0 then [] else [of_int (int s)])) .
u \<leftarrow> divs, s \<leftarrow> [0 ..< nat p]];
(lin,nonlin) = List.partition (\<lambda> q. degree_i q = d) facts
in lin @ berlekamp_factorization_main_i ze on d nonlin vs (n - length lin))"
| "berlekamp_factorization_main_i ze on d divs [] n = divs"
definition berlekamp_monic_factorization_i :: "nat \<Rightarrow> 'i list \<Rightarrow> 'i list list" where
"berlekamp_monic_factorization_i d f = (let
vs = berlekamp_basis_i f
in berlekamp_factorization_main_i (arith_ops_record.zero ff_ops) (arith_ops_record.one ff_ops) d [f] vs (length vs))"
partial_function (tailrec) dist_degree_factorize_main_i ::
"'i \<Rightarrow> 'i \<Rightarrow> nat \<Rightarrow> 'i list \<Rightarrow> 'i list \<Rightarrow> nat \<Rightarrow> (nat \<times> 'i list) list
\<Rightarrow> (nat \<times> 'i list) list" where
[code]: "dist_degree_factorize_main_i ze on dv v w d res = (if v = [on] then res else if d + d > dv
then (dv, v) # res else let
w = power_poly_f_mod_i (\<lambda> f. mod_field_poly_i ff_ops f v) w (nat p);
d = Suc d;
gd = gcd_poly_i ff_ops (minus_poly_i ff_ops w [ze,on]) v
in if gd = [on] then dist_degree_factorize_main_i ze on dv v w d res else
let v' = div_field_poly_i ff_ops v gd
in dist_degree_factorize_main_i ze on (degree_i v') v' (mod_field_poly_i ff_ops w v') d ((d,gd) # res))"
definition distinct_degree_factorization_i
:: "'i list \<Rightarrow> (nat \<times> 'i list) list" where
"distinct_degree_factorization_i f = (let ze = arith_ops_record.zero ff_ops;
on = arith_ops_record.one ff_ops in if degree_i f = 1 then [(1,f)] else
dist_degree_factorize_main_i ze on (degree_i f) f [ze,on] 0 [])"
definition finite_field_factorization_i :: "'i list \<Rightarrow> 'i \<times> 'i list list" where
"finite_field_factorization_i f = (if degree_i f = 0 then (lead_coeff_i ff_ops f,[]) else let
a = lead_coeff_i ff_ops f;
u = smult_i ff_ops (arith_ops_record.inverse ff_ops a) f;
gs = (if use_distinct_degree_factorization then distinct_degree_factorization_i u else [(1,u)]);
(irr,hs) = List.partition (\<lambda> (i,f). degree_i f = i) gs
in (a,map snd irr @ concat (map (\<lambda> (i,g). berlekamp_monic_factorization_i i g) hs)))"
end
context prime_field_gen
begin
lemma power_polys_i: assumes i: "i < n" and [transfer_rule]: "poly_rel f f'" "poly_rel g g'"
and h: "poly_rel h h'"
shows "poly_rel (power_polys_i ff_ops g f h n ! i) (power_polys g' f' h' n ! i)"
using i h
proof (induct n arbitrary: h h' i)
case (Suc n h h' i) note * = this
note [transfer_rule] = *(3)
show ?case
proof (cases i)
case 0
with Suc show ?thesis by auto
next
case (Suc j)
with *(2-) have "j < n" by auto
note IH = *(1)[OF this]
show ?thesis unfolding Suc by (simp, rule IH, transfer_prover)
qed
qed simp
lemma power_poly_f_mod_i: assumes m: "(poly_rel ===> poly_rel) m (\<lambda> x'. x' mod m')"
shows "poly_rel f f' \<Longrightarrow> poly_rel (power_poly_f_mod_i ff_ops m f n) (power_poly_f_mod m' f' n)"
proof -
from m have m: "\<And> x x'. poly_rel x x' \<Longrightarrow> poly_rel (m x) (x' mod m')"
unfolding rel_fun_def by auto
show "poly_rel f f' \<Longrightarrow> poly_rel (power_poly_f_mod_i ff_ops m f n) (power_poly_f_mod m' f' n)"
proof (induct n arbitrary: f f' rule: less_induct)
case (less n f f')
note f[transfer_rule] = less(2)
show ?case
proof (cases "n = 0")
case True
show ?thesis
by (simp add: True power_poly_f_mod_i.simps power_poly_f_mod_binary,
rule m[OF poly_rel_one])
next
case False
hence n: "(n = 0) = False" by simp
obtain q r where div: "Divides.divmod_nat n 2 = (q,r)" by force
from this[unfolded divmod_nat_def] n have "q < n" by auto
note IH = less(1)[OF this]
have rec: "poly_rel (power_poly_f_mod_i ff_ops m (m (times_poly_i ff_ops f f)) q)
(power_poly_f_mod m' (f' * f' mod m') q)"
by (rule IH, rule m, transfer_prover)
have other: "poly_rel
(m (times_poly_i ff_ops (power_poly_f_mod_i ff_ops m (m (times_poly_i ff_ops f f)) q) f))
(power_poly_f_mod m' (f' * f' mod m') q * f' mod m')"
by (rule m, rule poly_rel_times[unfolded rel_fun_def, rule_format, OF rec f])
show ?thesis unfolding power_poly_f_mod_i.simps[of _ _ _ n] Let_def
power_poly_f_mod_binary[of _ _ n] div split n if_False using rec other by auto
qed
qed
qed
lemma berlekamp_mat_i[transfer_rule]: "(poly_rel ===> mat_rel R)
(berlekamp_mat_i p ff_ops) berlekamp_mat"
proof (intro rel_funI)
fix f f'
let ?ze = "arith_ops_record.zero ff_ops"
let ?on = "arith_ops_record.one ff_ops"
assume f[transfer_rule]: "poly_rel f f'"
have deg: "degree_i f = degree f'" by transfer_prover
{
fix i j
assume i: "i < degree f'" and j: "j < degree f'"
define cs where "cs = (\<lambda>cs :: 'i list. cs @ replicate (degree f' - length cs) ?ze)"
define cs' where "cs' = (\<lambda>cs :: 'a mod_ring poly. coeffs cs @ replicate (degree f' - length (coeffs cs)) 0)"
define poly where "poly = power_polys_i ff_ops
(power_poly_f_mod_i ff_ops (\<lambda>v. mod_field_poly_i ff_ops v f) [?ze, ?on] (nat p)) f [?on]
(degree f')"
define poly' where "poly' = (power_polys (power_poly_f_mod f' [:0, 1:] (nat p)) f' 1 (degree f'))"
have *: "poly_rel (power_poly_f_mod_i ff_ops (\<lambda>v. mod_field_poly_i ff_ops v f) [?ze, ?on] (nat p))
(power_poly_f_mod f' [:0, 1:] (nat p))"
by (rule power_poly_f_mod_i, transfer_prover, simp add: poly_rel_def one zero)
have [transfer_rule]: "poly_rel (poly ! i) (poly' ! i)"
unfolding poly_def poly'_def
by (rule power_polys_i[OF i f *], simp add: poly_rel_def one)
have *: "list_all2 R (cs (poly ! i)) (cs' (poly' ! i))"
unfolding cs_def cs'_def by transfer_prover
from list_all2_nthD[OF *[unfolded poly_rel_def], of j] j
have "R (cs (poly ! i) ! j) (cs' (poly' ! i) ! j)" unfolding cs_def by auto
hence "R
(mat_of_rows_list (degree f')
(map (\<lambda>cs. cs @ replicate (degree f' - length cs) ?ze)
(power_polys_i ff_ops
(power_poly_f_mod_i ff_ops (\<lambda>v. mod_field_poly_i ff_ops v f) [?ze, ?on] (nat p)) f [?on]
(degree f'))) $$
(i, j))
(mat_of_rows_list (degree f')
(map (\<lambda>cs. coeffs cs @ replicate (degree f' - length (coeffs cs)) 0)
(power_polys (power_poly_f_mod f' [:0, 1:] (nat p)) f' 1 (degree f'))) $$
(i, j))"
unfolding mat_of_rows_list_def length_map length_power_polys_i power_polys_works
length_power_polys index_mat[OF i j] split
unfolding poly_def cs_def poly'_def cs'_def using i
by auto
} note main = this
show "mat_rel R (berlekamp_mat_i p ff_ops f) (berlekamp_mat f')"
unfolding berlekamp_mat_i_def berlekamp_mat_def Let_def nat_p[symmetric] deg
unfolding mat_rel_def
by (intro conjI allI impI, insert main, auto)
qed
lemma berlekamp_resulting_mat_i[transfer_rule]: "(poly_rel ===> mat_rel R)
(berlekamp_resulting_mat_i p ff_ops) berlekamp_resulting_mat"
proof (intro rel_funI)
fix f f'
assume "poly_rel f f'"
from berlekamp_mat_i[unfolded rel_fun_def, rule_format, OF this]
have bmi: "mat_rel R (berlekamp_mat_i p ff_ops f) (berlekamp_mat f')" .
show "mat_rel R (berlekamp_resulting_mat_i p ff_ops f) (berlekamp_resulting_mat f')"
unfolding berlekamp_resulting_mat_def Let_def berlekamp_resulting_mat_i_def
by (rule gauss_jordan_i[unfolded rel_fun_def, rule_format],
insert bmi, auto simp: mat_rel_def one intro!: minus[unfolded rel_fun_def, rule_format])
qed
lemma berlekamp_basis_i[transfer_rule]: "(poly_rel ===> list_all2 poly_rel)
(berlekamp_basis_i p ff_ops) berlekamp_basis"
unfolding berlekamp_basis_i_def[abs_def] berlekamp_basis_code[abs_def] o_def
by transfer_prover
lemma berlekamp_factorization_main_i[transfer_rule]:
"((=) ===> list_all2 poly_rel ===> list_all2 poly_rel ===> (=) ===> list_all2 poly_rel)
(berlekamp_factorization_main_i p ff_ops (arith_ops_record.zero ff_ops)
(arith_ops_record.one ff_ops))
berlekamp_factorization_main"
proof (intro rel_funI, clarify, goal_cases)
case (1 _ d xs xs' ys ys' _ n)
let ?ze = "arith_ops_record.zero ff_ops"
let ?on = "arith_ops_record.one ff_ops"
let ?of_int = "arith_ops_record.of_int ff_ops"
from 1(2) 1(1) show ?case
proof (induct ys ys' arbitrary: xs xs' n rule: list_all2_induct)
case (Cons y ys y' ys' xs xs' n)
note trans[transfer_rule] = Cons(1,2,4)
obtain clar0 clar1 clar2 where clarify: "\<And> s u. gcd_poly_i ff_ops u
(minus_poly_i ff_ops y
(if s = 0 then [] else [?of_int (int s)])) = clar0 s u"
"[0..<nat p] = clar1"
"[?on] = clar2" by auto
define facts where "facts = concat (map (\<lambda>u. concat
(map (\<lambda>s. if gcd_poly_i ff_ops u
(minus_poly_i ff_ops y (if s = 0 then [] else [?of_int (int s)])) \<noteq>
[?on]
then [gcd_poly_i ff_ops u
(minus_poly_i ff_ops y (if s = 0 then [] else [?of_int (int s)]))]
else [])
[0..<nat p])) xs)"
define Facts where "Facts = [w\<leftarrow>concat
(map (\<lambda>u. map (\<lambda>s. gcd_poly_i ff_ops u
(minus_poly_i ff_ops y
(if s = 0 then [] else [?of_int (int s)])))
[0..<nat p])
xs) . w \<noteq> [?on]]"
have Facts: "Facts = facts"
unfolding Facts_def facts_def clarify
proof (induct xs)
case (Cons x xs)
show ?case by (simp add: Cons, induct clar1, auto)
qed simp
define facts' where "facts' = concat
(map (\<lambda>u. concat
(map (\<lambda>x. if gcd u (y' - [:of_nat x:]) \<noteq> 1
then [gcd u (y' - [:of_int (int x):])] else [])
[0..<nat p]))
xs')"
have id: "\<And> x. of_int (int x) = of_nat x" "[?on] = one_poly_i ff_ops"
by (auto simp: one_poly_i_def)
have facts[transfer_rule]: "list_all2 poly_rel facts facts'"
unfolding facts_def facts'_def
apply (rule concat_transfer[unfolded rel_fun_def, rule_format])
apply (rule list.map_transfer[unfolded rel_fun_def, rule_format, OF _ trans(3)])
apply (rule concat_transfer[unfolded rel_fun_def, rule_format])
apply (rule list_all2_map_map)
proof (unfold id)
fix f f' x
assume [transfer_rule]: "poly_rel f f'" and x: "x \<in> set [0..<nat p]"
hence *: "0 \<le> int x" "int x < p" by auto
from of_int[OF this] have rel[transfer_rule]: "R (?of_int (int x)) (of_nat x)" by auto
{
assume "0 < x"
with * have *: "0 < int x" "int x < p" by auto
have "(of_nat x :: 'a mod_ring) = of_int (int x)" by simp
also have "\<dots> \<noteq> 0" unfolding of_int_of_int_mod_ring using * unfolding p
by (transfer', auto)
}
with rel have [transfer_rule]: "poly_rel (if x = 0 then [] else [?of_int (int x)]) [:of_nat x:]"
unfolding poly_rel_def by (auto simp add: cCons_def p)
show "list_all2 poly_rel
(if gcd_poly_i ff_ops f (minus_poly_i ff_ops y (if x = 0 then [] else [?of_int (int x)])) \<noteq> one_poly_i ff_ops
then [gcd_poly_i ff_ops f (minus_poly_i ff_ops y (if x = 0 then [] else [?of_int (int x)]))]
else [])
(if gcd f' (y' - [:of_nat x:]) \<noteq> 1 then [gcd f' (y' - [:of_nat x:])] else [])"
by transfer_prover
qed
have id1: "berlekamp_factorization_main_i p ff_ops ?ze ?on d xs (y # ys) n = (
if y = [?on] then berlekamp_factorization_main_i p ff_ops ?ze ?on d xs ys n else
if length xs = n then xs else
(let fac = facts;
(lin, nonlin) = List.partition (\<lambda>q. degree_i q = d) fac
in lin @ berlekamp_factorization_main_i p ff_ops ?ze ?on d nonlin ys (n - length lin)))"
unfolding berlekamp_factorization_main_i.simps Facts[symmetric]
by (simp add: o_def Facts_def Let_def)
have id2: "berlekamp_factorization_main d xs' (y' # ys') n = (
if y' = 1 then berlekamp_factorization_main d xs' ys' n
else if length xs' = n then xs' else
(let fac = facts';
(lin, nonlin) = List.partition (\<lambda>q. degree q = d) fac
in lin @ berlekamp_factorization_main d nonlin ys' (n - length lin)))"
by (simp add: o_def facts'_def nat_p)
have len: "length xs = length xs'" by transfer_prover
have id3: "(y = [?on]) = (y' = 1)"
by (transfer_prover_start, transfer_step+, simp add: one_poly_i_def finite_field_ops_int_def)
show ?case
proof (cases "y' = 1")
case True
hence id4: "(y' = 1) = True" by simp
show ?thesis unfolding id1 id2 id3 id4 if_True
by (rule Cons(3), transfer_prover)
next
case False
hence id4: "(y' = 1) = False" by simp
note id1 = id1[unfolded id3 id4 if_False]
note id2 = id2[unfolded id4 if_False]
show ?thesis
proof (cases "length xs' = n")
case True
thus ?thesis unfolding id1 id2 Let_def len using trans by simp
next
case False
hence id: "(length xs' = n) = False" by simp
have id': "length [q\<leftarrow>facts . degree_i q = d] = length [q\<leftarrow>facts'. degree q = d]"
by transfer_prover
have [transfer_rule]: "list_all2 poly_rel (berlekamp_factorization_main_i p ff_ops ?ze ?on d [x\<leftarrow>facts . degree_i x \<noteq> d] ys
(n - length [q\<leftarrow>facts . degree_i q = d]))
(berlekamp_factorization_main d [x\<leftarrow>facts' . degree x \<noteq> d] ys'
(n - length [q\<leftarrow>facts' . degree q = d]))"
unfolding id'
by (rule Cons(3), transfer_prover)
show ?thesis unfolding id1 id2 Let_def len id if_False
unfolding partition_filter_conv o_def split by transfer_prover
qed
qed
qed simp
qed
lemma berlekamp_monic_factorization_i[transfer_rule]:
"((=) ===> poly_rel ===> list_all2 poly_rel)
(berlekamp_monic_factorization_i p ff_ops) berlekamp_monic_factorization"
unfolding berlekamp_monic_factorization_i_def[abs_def] berlekamp_monic_factorization_def[abs_def] Let_def
by transfer_prover
lemma dist_degree_factorize_main_i:
"poly_rel F f \<Longrightarrow> poly_rel G g \<Longrightarrow> list_all2 (rel_prod (=) poly_rel) Res res
\<Longrightarrow> list_all2 (rel_prod (=) poly_rel)
(dist_degree_factorize_main_i p ff_ops
(arith_ops_record.zero ff_ops) (arith_ops_record.one ff_ops) (degree_i F) F G d Res)
(dist_degree_factorize_main f g d res)"
proof (induct f g d res arbitrary: F G Res rule: dist_degree_factorize_main.induct)
case (1 v w d res V W Res)
let ?ze = "arith_ops_record.zero ff_ops"
let ?on = "arith_ops_record.one ff_ops"
note simp = dist_degree_factorize_main.simps[of v w d]
dist_degree_factorize_main_i.simps[of p ff_ops ?ze ?on "degree_i V" V W d]
have v[transfer_rule]: "poly_rel V v" by (rule 1)
have w[transfer_rule]: "poly_rel W w" by (rule 1)
have res[transfer_rule]: "list_all2 (rel_prod (=) poly_rel) Res res" by (rule 1)
have [transfer_rule]: "poly_rel [?on] 1"
by (simp add: one poly_rel_def)
have id1: "(V = [?on]) = (v = 1)" unfolding finite_field_ops_int_def by transfer_prover
have id2: "degree_i V = degree v" by transfer_prover
note simp = simp[unfolded id1 id2]
note IH = 1(1,2)
show ?case
proof (cases "v = 1")
case True
with res show ?thesis unfolding id2 simp by simp
next
case False
with id1 have "(v = 1) = False" by auto
note simp = simp[unfolded this if_False]
note IH = IH[OF False]
show ?thesis
proof (cases "degree v < d + d")
case True
thus ?thesis unfolding id2 simp using res v by auto
next
case False
hence "(degree v < d + d) = False" by auto
note simp = simp[unfolded this if_False]
let ?P = "power_poly_f_mod_i ff_ops (\<lambda>f. mod_field_poly_i ff_ops f V) W (nat p)"
let ?G = "gcd_poly_i ff_ops (minus_poly_i ff_ops ?P [?ze, ?on]) V"
let ?g = "gcd (w ^ CARD('a) mod v - monom 1 1) v"
define G where "G = ?G"
define g where "g = ?g"
note simp = simp[unfolded Let_def, folded G_def g_def]
note IH = IH[OF False refl refl refl]
have [transfer_rule]: "poly_rel [?ze,?on] (monom 1 1)" unfolding poly_rel_def
by (auto simp: coeffs_monom one zero)
have id: "w ^ CARD('a) mod v = power_poly_f_mod v w (nat p)"
unfolding power_poly_f_mod_def by (simp add: p)
have P[transfer_rule]: "poly_rel ?P (w ^ CARD('a) mod v)" unfolding id
by (rule power_poly_f_mod_i[OF _ w], transfer_prover)
have g[transfer_rule]: "poly_rel G g" unfolding G_def g_def by transfer_prover
have id3: "(G = [?on]) = (g = 1)" by transfer_prover
note simp = simp[unfolded id3]
show ?thesis
proof (cases "g = 1")
case True
from IH(1)[OF this[unfolded g_def] v P res] True
show ?thesis unfolding id2 simp by simp
next
case False
have vg: "poly_rel (div_field_poly_i ff_ops V G) (v div g)" by transfer_prover
have "poly_rel (mod_field_poly_i ff_ops ?P
(div_field_poly_i ff_ops V G)) (w ^ CARD('a) mod v mod (v div g))" by transfer_prover
note IH = IH(2)[OF False[unfolded g_def] refl vg[unfolded G_def g_def] this[unfolded G_def g_def],
folded g_def G_def]
have "list_all2 (rel_prod (=) poly_rel) ((Suc d, G) # Res) ((Suc d, g) # res)"
using g res by auto
note IH = IH[OF this]
from False have "(g = 1) = False" by simp
note simp = simp[unfolded this if_False]
show ?thesis unfolding id2 simp using IH by simp
qed
qed
qed
qed
lemma distinct_degree_factorization_i[transfer_rule]: "(poly_rel ===> list_all2 (rel_prod (=) poly_rel))
(distinct_degree_factorization_i p ff_ops) distinct_degree_factorization"
proof
fix F f
assume f[transfer_rule]: "poly_rel F f"
have id: "(degree_i F = 1) = (degree f = 1)" by transfer_prover
note d = distinct_degree_factorization_i_def distinct_degree_factorization_def
let ?ze = "arith_ops_record.zero ff_ops"
let ?on = "arith_ops_record.one ff_ops"
show "list_all2 (rel_prod (=) poly_rel) (distinct_degree_factorization_i p ff_ops F)
(distinct_degree_factorization f)"
proof (cases "degree f = 1")
case True
with id f show ?thesis unfolding d by auto
next
case False
from False id have "?thesis = (list_all2 (rel_prod (=) poly_rel)
(dist_degree_factorize_main_i p ff_ops ?ze ?on (degree_i F) F [?ze, ?on] 0 [])
(dist_degree_factorize_main f (monom 1 1) 0 []))" unfolding d Let_def by simp
also have \<dots>
by (rule dist_degree_factorize_main_i[OF f], auto simp: poly_rel_def
coeffs_monom one zero)
finally show ?thesis .
qed
qed
lemma finite_field_factorization_i[transfer_rule]:
"(poly_rel ===> rel_prod R (list_all2 poly_rel))
(finite_field_factorization_i p ff_ops) finite_field_factorization"
unfolding finite_field_factorization_i_def finite_field_factorization_def Let_def lead_coeff_i_def'
by transfer_prover
text \<open>Since the implementation is sound, we can now combine it with the soundness result
of the finite field factorization.\<close>
lemma finite_field_i_sound:
assumes f': "f' = of_int_poly_i ff_ops (Mp f)"
and berl_i: "finite_field_factorization_i p ff_ops f' = (c',fs')"
and sq: "square_free_m f"
and fs: "fs = map (to_int_poly_i ff_ops) fs'"
and c: "c = arith_ops_record.to_int ff_ops c'"
shows "unique_factorization_m f (c, mset fs)
\<and> c \<in> {0 ..< p}
\<and> (\<forall> fi \<in> set fs. set (coeffs fi) \<subseteq> {0 ..< p})"
proof -
define f'' :: "'a mod_ring poly" where "f'' = of_int_poly (Mp f)"
have rel_f[transfer_rule]: "poly_rel f' f''"
by (rule poly_rel_of_int_poly[OF f'], simp add: f''_def)
interpret pff: idom_ops "poly_ops ff_ops" poly_rel
by (rule idom_ops_poly)
obtain c'' fs'' where berl: "finite_field_factorization f'' = (c'',fs'')" by force
from rel_funD[OF finite_field_factorization_i rel_f, unfolded rel_prod_conv assms(2) split berl]
have rel[transfer_rule]: "R c' c''" "list_all2 poly_rel fs' fs''" by auto
from to_int[OF rel(1)] have cc': "c = to_int_mod_ring c''" unfolding c by simp
have c: "c \<in> {0 ..< p}" unfolding cc'
by (metis Divides.pos_mod_bound Divides.pos_mod_sign M_to_int_mod_ring atLeastLessThan_iff
gr_implies_not_zero nat_le_0 nat_p not_le poly_mod.M_def zero_less_card_finite)
{
fix f
assume "f \<in> set fs'"
with rel(2) obtain f' where "poly_rel f f'" unfolding list_all2_conv_all_nth set_conv_nth
by auto
hence "is_poly ff_ops f" using fun_cong[OF Domainp_is_poly, of f]
unfolding Domainp_iff[abs_def] by auto
}
hence fs': "Ball (set fs') (is_poly ff_ops)" by auto
define mon :: "'a mod_ring poly \<Rightarrow> bool" where "mon = monic"
have [transfer_rule]: "(poly_rel ===> (=)) (monic_i ff_ops) mon" unfolding mon_def
by (rule poly_rel_monic)
have len: "length fs' = length fs''" by transfer_prover
have fs': "fs = map to_int_poly fs''" unfolding fs
proof (rule nth_map_conv[OF len], intro allI impI)
fix i
assume i: "i < length fs'"
obtain f g where id: "fs' ! i = f" "fs'' ! i = g" by auto
from i rel(2)[unfolded list_all2_conv_all_nth[of _ fs' fs'']] id
have "poly_rel f g" by auto
from to_int_poly_i[OF this] have "to_int_poly_i ff_ops f = to_int_poly g" .
thus "to_int_poly_i ff_ops (fs' ! i) = to_int_poly (fs'' ! i)" unfolding id .
qed
have f: "f'' = of_int_poly f" unfolding poly_eq_iff f''_def
by (simp add: to_int_mod_ring_hom.injectivity to_int_mod_ring_of_int_M Mp_coeff)
have *: "unique_factorization_m f (c, mset fs)"
using finite_field_factorization_modulo_ring[OF f sq berl cc' fs'] by auto
have fs': "(\<forall>fi\<in>set fs. set (coeffs fi) \<subseteq> {0..<p})" unfolding fs'
using range_to_int_mod_ring[where 'a = 'a]
by (auto simp: coeffs_to_int_poly p)
with c fs *
show ?thesis by blast
qed
end
definition finite_field_factorization_main :: "int \<Rightarrow> 'i arith_ops_record \<Rightarrow> int poly \<Rightarrow> int \<times> int poly list" where
"finite_field_factorization_main p f_ops f \<equiv>
let (c',fs') = finite_field_factorization_i p f_ops (of_int_poly_i f_ops (poly_mod.Mp p f))
in (arith_ops_record.to_int f_ops c', map (to_int_poly_i f_ops) fs')"
lemma(in prime_field_gen) finite_field_factorization_main:
assumes res: "finite_field_factorization_main p ff_ops f = (c,fs)"
and sq: "square_free_m f"
shows "unique_factorization_m f (c, mset fs)
\<and> c \<in> {0 ..< p}
\<and> (\<forall> fi \<in> set fs. set (coeffs fi) \<subseteq> {0 ..< p})"
proof -
obtain c' fs' where
res': "finite_field_factorization_i p ff_ops (of_int_poly_i ff_ops (Mp f)) = (c', fs')" by force
show ?thesis
by (rule finite_field_i_sound[OF refl res' sq],
insert res[unfolded finite_field_factorization_main_def res'], auto)
qed
definition finite_field_factorization_int :: "int \<Rightarrow> int poly \<Rightarrow> int \<times> int poly list" where
"finite_field_factorization_int p = (
if p \<le> 65535
then finite_field_factorization_main p (finite_field_ops32 (uint32_of_int p))
else if p \<le> 4294967295
then finite_field_factorization_main p (finite_field_ops64 (uint64_of_int p))
else finite_field_factorization_main p (finite_field_ops_integer (integer_of_int p)))"
context poly_mod_prime begin
lemmas finite_field_factorization_main_integer = prime_field_gen.finite_field_factorization_main
[OF prime_field.prime_field_finite_field_ops_integer, unfolded prime_field_def mod_ring_locale_def,
unfolded poly_mod_type_simps, internalize_sort "'a :: prime_card", OF type_to_set, unfolded remove_duplicate_premise, cancel_type_definition, OF non_empty]
lemmas finite_field_factorization_main_uint32 = prime_field_gen.finite_field_factorization_main
[OF prime_field.prime_field_finite_field_ops32, unfolded prime_field_def mod_ring_locale_def,
unfolded poly_mod_type_simps, internalize_sort "'a :: prime_card", OF type_to_set, unfolded remove_duplicate_premise, cancel_type_definition, OF non_empty]
lemmas finite_field_factorization_main_uint64 = prime_field_gen.finite_field_factorization_main
[OF prime_field.prime_field_finite_field_ops64, unfolded prime_field_def mod_ring_locale_def,
unfolded poly_mod_type_simps, internalize_sort "'a :: prime_card", OF type_to_set, unfolded remove_duplicate_premise, cancel_type_definition, OF non_empty]
lemma finite_field_factorization_int:
assumes sq: "poly_mod.square_free_m p f"
and result: "finite_field_factorization_int p f = (c,fs)"
shows "poly_mod.unique_factorization_m p f (c, mset fs)
\<and> c \<in> {0 ..< p}
\<and> (\<forall> fi \<in> set fs. set (coeffs fi) \<subseteq> {0 ..< p})"
using finite_field_factorization_main_integer[OF _ sq, of c fs]
finite_field_factorization_main_uint32[OF _ _ sq, of c fs]
finite_field_factorization_main_uint64[OF _ _ sq, of c fs]
result[unfolded finite_field_factorization_int_def]
by (auto split: if_splits)
end
end
|
function fid = writefort15( f15dat, f15out, boudat )
%
if ( strcmp(strtrim(f15out), 'fort.15') )
disp('Error: an output file name must not be fort.15') ;
return ;
end
fid = fopen( f15out, 'w' ) ;
fprintf(fid, '%s\n', f15dat.rundes ) ; % RUNDES
fprintf(fid, '%s\n', f15dat.runid ) ; % RUNID
% NFOVER
L = length(f15dat.nfover);
dfmt = repmat('%g ',1, L);
fprintf( fid, [dfmt ' \t ! NFOVER \n'], f15dat.nfover ) ;
% NABOUT
fprintf(fid, '%d \t ! NABOUT \n', f15dat.nabout ) ;
% NSCREEN
fprintf(fid, '%d \t ! NSCREEN \n', f15dat.nscreen ) ;
% IHOT
fprintf(fid, '%d \t ! IHOT \n', f15dat.ihot ) ;
% ICS
fprintf(fid, '%d \t ! ICS \n', f15dat.ics ) ;
% IM
fprintf(fid, '%d \t ! IM \n', f15dat.im ) ;
% IDEN
if f15dat.im == 20 || f15dat.im == 30
fprintf( fid, '%d \t ! IDEN \n', f15dat.iden ) ;
end
% NOLIBF
fprintf(fid, '%d \t ! NOLIBF \n', f15dat.nolibf ) ;
% NOLIFA
fprintf(fid, '%d \t ! NOLIFA \n', f15dat.nolifa ) ;
% NOLICA
fprintf(fid, '%d \t ! NOLICA \n', f15dat.nolica ) ;
% NOLICAT
fprintf(fid, '%d \t ! NOLICAT \n', f15dat.nolicat ) ;
% NWP
fprintf(fid, '%d \t ! NWP \n', f15dat.nwp ) ;
if ( f15dat.nwp > 0 )
for l = 1: f15dat.nwp
fprintf(fid, '%s\n', f15dat.AttrName(l).name ) ;
end
end
% NCOR
fprintf(fid, '%d \t \t ! NCOR \n', f15dat.ncor ) ;
% NTIP
fprintf(fid, '%d \t \t ! NTIP \n', f15dat.ntip ) ;
% NWS
fprintf(fid, '%d \t \t ! NWS \n', f15dat.nws ) ;
% NRAMP
fprintf(fid, '%d \t \t ! NRAMP \n', f15dat.nramp ) ;
% G
fprintf(fid, '%f \t ! G \n', f15dat.gravity ) ;
% TAU0
fprintf(fid, '%g \t \t ! TAU0 \n', f15dat.tau0 ) ;
% Tau0FullDomainMin, Tau0FullDomainMax
if ( abs(f15dat.tau0 + 5.D0) < 1e-10 )
fprintf(fid, '%f %f \t ! Tau0FullDomainMin, Tau0FullDomainMax \n', f15dat.tau0minmax ) ;
end
% DTDP
fprintf( fid, '%g \t \t ! DTDP \n', f15dat.dtdp ) ;
% STATIM
fprintf( fid, '%g \t \t ! STATIM \n', f15dat.statim ) ;
% REFTIM
fprintf( fid, '%g \t \t ! REFTIM \n', f15dat.reftim ) ;
% WTIMINC
if f15dat.nws == 8
fprintf( fid, '%d %d %d %d %d %g', f15dat.wtimnc ) ;
fprintf( fid, ' \t ! YYYY MM DD HH24 StormNumber BLAdj \n' ) ;
elseif f15dat.nws >= 19
fprintf( fid, '%d %d %d %d %d %g %d', f15dat.wtimnc ) ;
fprintf( fid, ' \t ! YYYY MM DD HH24 StormNumber BLAdj geofactor \n' ) ;
elseif f15dat.nws > 0
fprintf( fid, '%d ', f15dat.wtimnc ) ;
fprintf( fid, ' \t ! WTMINC \n' ) ;
end
% RNDY
fprintf( fid, '%g \t \t ! RNDY \n', f15dat.rndy ) ;
% DRAMP
L = length(f15dat.dramp);
dfmt = repmat('%g ',1, L);
fprintf( fid, [dfmt ' \t \t ! DRAMP \n'], f15dat.dramp ) ;
% A00, B00, C00
fprintf( fid, '%g %g %g \t ! A00, B00, C00 \n', f15dat.a00b00c00 ) ;
% H0
len = length(f15dat.h0) ;
for k = 1: len
fprintf( fid, '%g ', f15dat.h0(k) ) ;
end
fprintf( fid, ' \t ! H0, 2*dummy, VELMIN \n' ) ;
% SLAM0, SFEA0
fprintf( fid, '%f %f \t \t ! SLAM0, SFEA0 \n', f15dat.slam ) ;
% CF
if ( f15dat.nolibf <= 2 )
fprintf( fid, '%f ', f15dat.taucf ) ;
fprintf( fid, ' \t ! CF \n' ) ;
end
% ESLM, ESLC
if ( f15dat.im <= 2 || f15dat.im == 10 || f15dat.im >= 111111 )
fprintf( fid, '%f ', f15dat.elsm ) ;
fprintf( fid, ' \t ! ELSM \n' ) ;
end
% CORI
fprintf( fid, '%f \t ! CORI \n', f15dat.cori ) ;
% NTIF
fprintf( fid, '%d \t \t ! NTIF \n', f15dat.ntif ) ;
% Tidal potential
for k = 1: f15dat.ntif
fprintf( fid, '%s \n', f15dat.tipotag(k).name ) ;
fprintf( fid, '%f %16.9e %f %f %f', f15dat.tipotag(k).val ) ;
fprintf( fid, ' \t ! TPK, AMIGT, ETRF, FFT, FACET \n' ) ;
end
% NBFR
fprintf( fid, '%d \t \t ! NBFR \n', f15dat.nbfr ) ;
for k = 1: f15dat.nbfr
fprintf( fid, '%s \n', f15dat.bountag(k).name ) ;
fprintf( fid, '%16.9e %f %f \n', f15dat.bountag(k).val ) ;
end
% Open boundary harmonic forcing
for k = 1: f15dat.nbfr
fprintf(fid, '%s \n', f15dat.opealpha(k).name ) ;
fprintf(fid, '%16.9e %16.10g \n', f15dat.opealpha(k).val' ) ;
end
% ANGINN
fprintf( fid, '%g \t \t ! ANGINN \n', f15dat.anginn ) ;
% Land boundary
ibtype = [2 12 22 32 52] ;
sm = 0 ;
if ~isempty(boudat)
for k = 1: length(ibtype)
sm = sm + sum(~(boudat.ibtype - ibtype(k))) ;
end
end
if ( sm > 0 )
fprintf( fid, '%d \t \t ! NFFR \n', f15dat.nffr ) ;
nm = 0 ;
for ib = 1: boudat.nbou
ibty = boudat.ibtype(ib);
switch ibty
case {2,12,22,32,52}
nm = nm + boudat.nvell(ib) ;
otherwise
end
end
for k = 1: f15dat.nffr
fprintf( fid, '%s \n', f15dat.fbountag(k).name ) ;
fprintf( fid, '%15.8e ', f15dat.fbounspec(k).val ) ;
fprintf( fid, '\n') ;
end
for k = 1: f15dat.nffr
fprintf( fid, '%s \n', f15dat.boualpha(k).name ) ;
% val = fscanf(fid, '%f ' ) ; % Must be revisit
icnt = 0 ;
for ib = 1: boudat.nbou
ibty = boudat.ibtype(ib);
switch ibty
case {2,12,22,52}
for ir = 1: boudat.nvell(ib)
icnt = icnt + 1 ;
fprintf(fid, '%16.9e ', f15dat.qnam(k).val(icnt,1:2) ) ;
fprintf(fid, ' \n' ) ;
end
case 32
for ir = 1: boudat.nvell(ib)
icnt = icnt + 1 ;
fprintf(fid, '%15.8e ', f15dat.qnam(k).val(icnt,1:5) ) ;
fprintf(fid, ' \n' ) ;
end
otherwise
end
end
end
end
% NOUTE, TOUTSE, TOUTFE, NSPOOLE
fprintf( fid, '%d %g %g %d', f15dat.oute ) ;
fprintf( fid, ' \t ! NOUTE, TOUTSE, TOUTFE, NSPOOLE \n' ) ;
% NSTAE
fprintf( fid, '%d \t \t ! NSTAE \n', f15dat.nstae ) ;
% STAE location
if ( f15dat.nstae > 0 )
for k = 1: f15dat.nstae
fprintf(fid, '%f %f %s \n', f15dat.elvstaloc(k,1:2), f15dat.elvstaname{k} ) ;
end
end
% NOUTV, TOUTV, TOUTFV, NSPOOLV
fprintf( fid, '%d %g %g %d', f15dat.outv ) ;
fprintf( fid, ' \t ! NOUTV, TOUTV, TOUTFV, NSPOOLV \n') ;
% NSTAV
fprintf( fid, '%d \t \t ! NSTAV \n', f15dat.nstav ) ;
% STAV location
if ( f15dat.nstav > 0 )
for k = 1: f15dat.nstav
fprintf( fid, '%f %f %s \n', f15dat.velstaloc(k,1:2), f15dat.velstaname{k} ) ;
end
end
% NOUTM, TOUTM, TOUTFM, NSPOOLM
if ( f15dat.nws ~= 0 )
fprintf(fid, '%d %g %g %d', f15dat.outm ) ;
fprintf(fid, ' \t ! NOUTM, TOUTM, TOUTFM, NSPOOLM \n') ;
% NSTAM
fprintf(fid, '%d \t \t ! NSTAM \n', f15dat.nstam ) ;
% STAM location
if ( f15dat.nstam > 0 )
for k = 1: f15dat.nstam
fprintf( fid, '%f %f %s \n', f15dat.metstaloc(k,1:2), f15dat.metstaname{k} ) ;
end
end
end
% NOUTGE
fprintf( fid, '%d %g %g %d', f15dat.outge ) ;
fprintf( fid, ' \t ! NOUTGE, ... \n') ;
% NOUTGV
fprintf( fid, '%d %g %g %d', f15dat.outgv ) ;
fprintf( fid, ' \t ! NOUTGV, ... \n') ;
% NOUTGC
if ( f15dat.im == 10 )
fprintf( fid, '%d %g %g %d', f15dat.outgc ) ;
fprintf( fid, ' \t ! NOUTGC, ... \n' ) ;
end
% NOUTGM
if ( f15dat.nws ~= 0 )
fprintf( fid, '%d %g %g %d', f15dat.outgm ) ;
fprintf( fid, ' \t ! NOUTGM, ... \n' ) ;
end
% NFREQ
fprintf( fid, '%d \t \t ! NFREQ \n', f15dat.nfreq ) ;
for k = 1: f15dat.nfreq
fprintf( fid, '%s \n', f15dat.harfreq(k).name ) ;
fprintf( fid, '%16.9e %f %f \n', f15dat.harfreq(k).val ) ;
end
% THAS, THAF, NHAINC, FMV
fprintf( fid, '%g %g %d %g \t ! THAS, THAF, NHAINC, FMV \n', f15dat.outhar ) ;
% NHASE, NHASV, NHAGE, NHAGV
fprintf( fid, '%d ', f15dat.outhar_flag ) ;
fprintf( fid, ' \t ! NHASE, NHASV, NHAGE, NHAGV \n' ) ;
% NHSTAR, NHSINC
fprintf( fid, '%d %d \t ! NHSTAR, NHSINC \n', f15dat.nhstar) ;
% ITITER, ISLDIA, CONVCR, ITMAX
fprintf( fid, '%d %d %16.9e %d \t ! ITITER, ISLDIA, CONVCR, ITMAX \n', f15dat.ititer ) ;
% Extra lines including NETCDF & namelist
for k = 1: f15dat.nextraline
fprintf( fid, '%s\n', f15dat.extraline(k).msg ) ;
end
% If
if find(strcmp(fieldnames(f15dat),'controllist'),1)
for k = 1: length(f15dat.controllist)
fprintf( fid, '! -- Begin %s Control Namelist -- \n', f15dat.controllist(k).type ) ;
fprintf( fid, '&%sControl\n', f15dat.controllist(k).type ) ;
for m = 1:length(f15dat.controllist(k).var)
val = f15dat.controllist(k).var(m).val;
if ~ischar(val); val = num2str(val); end
fprintf( fid, '%s = %s,\n',f15dat.controllist(k).var(m).name,val) ;
end
fprintf( fid, '/\n') ;
fprintf( fid, '! -- End %s Control Namelist -- \n', f15dat.controllist(k).type ) ;
end
end
fclose(fid) ;
end
|
# install.packages("devtools", dependencies = TRUE, INSTALL_opts = '--no-lock')
# install.packages("rjson")
# library(devtools)
# if("devtools" %in% rownames(installed.packages()) == FALSE) {
# install.packages("devtools", dependencies = TRUE, INSTALL_opts = '--no-lock')
# }
# library("devtools")
#if("SPEI" %in% rownames(installed.packages()) == FALSE) {
# install.packages('SPEI', dependencies = TRUE, INSTALL_opts = '--no-lock')
#}
library("SPEI")
#if("rjson" %in% rownames(installed.packages()) == FALSE) {
# install.packages("rjson", dependencies = TRUE, INSTALL_opts = '--no-lock')
#}
library("rjson")
args <- commandArgs(trailingOnly = TRUE)
name <- args[1]
lat <- as.numeric(args[2])
rcp <- c("rcp26", "rcp45", "rcp85")
years <- c(2050, 2100)
speiData <- data.frame(matrix(ncol = 7, nrow = 60))
colnames(speiData) <- c("date", paste0("rcp26", "_", years), paste0("rcp45", "_", years), paste0("rcp85", "_", years))
speiData["date"] <- format(seq(as.Date("2046-01-01"), as.Date("2050-12-01"), "month"), "%Y-%m")
# Calculate potential evapotranspiration using penman for a combined time frame
# between 2046 - 2050 and 2096 - 2100
for(r in rcp) {
for(year in years) {
data <- fromJSON(file = file.path("./portfolio/climate_risk_dash/data/temp", paste0(name, "_", r, "_", year, ".json")))
pen <- penman(Tmin = data$tasmin, Tmax = data$tasmax, U2 = data$sfcWind, lat = lat, Rs = data$rsds, RH = data$hurs, P = data$ps)
p <- data$pr - pen
speiData[paste0(r, "_", year)] <- spei(p, 12)$fitted
print(year)
}
}
write.csv(speiData, file = file.path("./portfolio/climate_risk_dash/report", paste0(name, "_speiData.csv")))
|
# Testing code
import numpy as np
import unittest
import subprocess
from .. import netcdf_read_write
class Tests(unittest.TestCase):
def test_pixel_node_writer(self):
"""
See if the writing function for pixel-node files produces a pixel-node file.
The behavior has been finicky for float32 vs float64
Writing a full test for float32 would be good (although the example grd file gets pretty close)
"""
grid_def = [-120, -114, 32, 37];
inc = [0.02, 0.02];
filename = 'test_outfile.nc'
lons = np.arange(grid_def[0], grid_def[1] + 0.00001, inc[0])
lats = np.arange(grid_def[2], grid_def[3] + 0.00001, inc[1])
# Test a write function
grid = np.zeros((len(lats), len(lons)));
netcdf_read_write.write_netcdf4(lons, lats, grid, filename);
netcdf_read_write.parse_pixelnode_registration(filename);
subprocess.call(['rm', filename], shell=False);
subprocess.call(['rm', 'gmt.history'], shell=False);
# Test a read-write cycle on an example grid
[x, y, z] = netcdf_read_write.read_any_grd("Tectonic_Utils/read_write/test/example_grd.grd");
netcdf_read_write.write_netcdf4(x, y, z, "Tectonic_Utils/read_write/test/written_example.grd");
netcdf_read_write.parse_pixelnode_registration("Tectonic_Utils/read_write/test/written_example.grd");
subprocess.call(['rm', 'gmt.history'], shell=False);
return;
if __name__ == "__main__":
unittest.main();
|
lemma filterlim_divide_at_infinity: fixes f g :: "'a \<Rightarrow> 'a :: real_normed_field" assumes "filterlim f (nhds c) F" "filterlim g (at 0) F" "c \<noteq> 0" shows "filterlim (\<lambda>x. f x / g x) at_infinity F"
|
/*
* Copyright 2020 Makani Technologies LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SIM_MATH_ODE_SOLVER_GSL_H_
#define SIM_MATH_ODE_SOLVER_GSL_H_
#include <gsl/gsl_errno.h>
#include <gsl/gsl_odeiv2.h>
#include <stdint.h>
#include <vector>
#include "common/macros.h"
#include "sim/math/ode_solver.h"
#include "sim/sim_types.h"
namespace sim {
// Wrapper for the GSL ODE library.
class GslOdeSolver : public OdeSolver {
public:
explicit GslOdeSolver(const OdeSystem &ode_system,
const SimOdeSolverParams ¶ms);
~GslOdeSolver() {
if (ode_driver_ != nullptr) gsl_odeiv2_driver_free(ode_driver_);
}
OdeSolverStatus Integrate(double t0, double tf, const std::vector<double> &x0,
double *t_int, std::vector<double> *x) override;
private:
// Static callback function for the GSL ODE solver.
//
// Args:
// t: Time at which derivative will be evaluated.
// x: Array of length num_states() containing the state at which
// the derivative will be evaluated.
// dx: Array of length num_states() into which the derivative is stored.
// context: Pointer to the OdeSolver class containing the OdeSystem.
//
// Returns:
// GSL_SUCCESS if the derivative was calculated successfully,
// GSL_FAILURE if the time step was too large, or
// GSL_EBADFUNC if the integration should be aborted immediately.
static int32_t GslCallback(double t, const double x[], double dx[],
void *context);
// Solver parameters.
const SimOdeSolverParams ¶ms_;
// ODE to be solved.
const OdeSystem &ode_system_;
// Parameters for GSL.
gsl_odeiv2_system sys_;
gsl_odeiv2_driver *ode_driver_;
DISALLOW_COPY_AND_ASSIGN(GslOdeSolver);
};
} // namespace sim
#endif // SIM_MATH_ODE_SOLVER_GSL_H_
|
{-# OPTIONS --universe-polymorphism #-}
module Desc where
--********************************************
-- Prelude
--********************************************
-- Some preliminary stuffs, to avoid relying on the stdlib
--****************
-- Universe polymorphism
--****************
data Level : Set where
zero : Level
suc : Level -> Level
{-# BUILTIN LEVEL Level #-}
{-# BUILTIN LEVELZERO zero #-}
{-# BUILTIN LEVELSUC suc #-}
max : Level -> Level -> Level
max zero m = m
max (suc n) zero = suc n
max (suc n) (suc m) = suc (max n m)
{-# BUILTIN LEVELMAX max #-}
data Lifted {l : Level} (A : Set l) : Set (suc l) where
lifter : A → Lifted A
lift : {i : Level} -> Set i -> Set (suc i)
lift x = Lifted x
unlift : {l : Level}{A : Set l} -> Lifted A -> A
unlift (lifter a) = a
--****************
-- Sigma and friends
--****************
data Sigma {i j : Level}(A : Set i) (B : A -> Set j) : Set (max i j) where
_,_ : (x : A) (y : B x) -> Sigma A B
pair : {i j : Level}{A : Set i}{B : A -> Set j} ->
(x : A) (y : B x) -> Sigma {i = i}{j = j} A B
pair x y = x , y
_*_ : {i j : Level}(A : Set i)(B : Set j) -> Set (max i j)
A * B = Sigma A \_ -> B
fst : {i j : Level}{A : Set i}{B : A -> Set j} -> Sigma A B -> A
fst (a , _) = a
snd : {i j : Level}{A : Set i}{B : A -> Set j} (p : Sigma A B) -> B (fst p)
snd (a , b) = b
data Zero {i : Level} : Set i where
data Unit {i : Level} : Set i where
Void : Unit
--****************
-- Sum and friends
--****************
data _+_ {i j : Level}(A : Set i)(B : Set j) : Set (max i j) where
l : A -> A + B
r : B -> A + B
--****************
-- Equality
--****************
data _==_ {l : Level}{A : Set l}(x : A) : A -> Set l where
refl : x == x
cong : {l m : Level}{A : Set l}{B : Set m}
(f : A -> B){x y : A} -> x == y -> f x == f y
cong f refl = refl
cong2 : {l m n : Level}{A : Set l}{B : Set m}{C : Set n}
(f : A -> B -> C){x y : A}{z t : B} ->
x == y -> z == t -> f x z == f y t
cong2 f refl refl = refl
trans : {l : Level}{A : Set l}{x y z : A} -> x == y -> y == z -> x == z
trans refl refl = refl
proof-lift-unlift-eq : {l : Level}{A : Set l}(x : Lifted A) -> lifter (unlift x) == x
proof-lift-unlift-eq (lifter a) = refl
postulate
reflFun : {l m : Level}{A : Set l}{B : A -> Set m}(f : (a : A) -> B a)(g : (a : A) -> B a)-> ((a : A) -> f a == g a) -> f == g
--********************************************
-- Desc code
--********************************************
-- In the paper, we have presented Desc as the grammar of inductive
-- types. Hence, the codes in the paper closely follow this
-- grammar:
data DescPaper : Set1 where
oneP : DescPaper
sigmaP : (S : Set) -> (S -> DescPaper) -> DescPaper
indx : DescPaper -> DescPaper
hindx : Set -> DescPaper -> DescPaper
-- We take advantage of this model to give you an alternative
-- presentation. This alternative model is the one implemented in
-- Epigram. It is also the one which inspired the code for indexed
-- descriptions.
-- With sigma, we are actually "quoting" a standard type-former,
-- namely:
-- |Sigma : (S : Set) -> (S -> Set) -> Set|
-- With:
-- |sigma : (S : Set) -> (S -> Desc) -> Desc|
-- In the alternative presentation, we go further and present all our
-- codes as quotations of standard type-formers:
data Desc {l : Level} : Set (suc l) where
id : Desc
const : Set l -> Desc
prod : Desc -> Desc -> Desc
sigma : (S : Set l) -> (S -> Desc) -> Desc
pi : (S : Set l) -> (S -> Desc) -> Desc
-- Note that we replace |oneP| by a more general |const| code. Whereas
-- |oneP| was interpreted as the unit set, |const K| is
-- interpreted as |K|, for any |K : Set|. Extensionally,
-- |const K| and |sigma K (\_ -> Unit)| are equivalent. However,
-- |const| is *first-order*, unlike its equivalent encoding. From a
-- definitional perspective, we are giving more opportunities to the
-- type-system, hence reducing the burden on the programmer. For the same
-- reason, we introduce |prod| that overlaps with |pi|.
-- This reorganisation is strictly equivalent to the |DescPaper|. For
-- instance, we can encode |indx| and |hindx| using the following
-- code:
indx2 : {l : Level} -> Desc {l = l} -> Desc {l = l}
indx2 D = prod id D
hindx2 : Set -> Desc -> Desc
hindx2 H D = prod (pi H (\_ -> id)) D
--********************************************
-- Desc interpretation
--********************************************
[|_|]_ : {l : Level} -> Desc -> Set l -> Set l
[| id |] Z = Z
[| const X |] Z = X
[| prod D D' |] Z = [| D |] Z * [| D' |] Z
[| sigma S T |] Z = Sigma S (\s -> [| T s |] Z)
[| pi S T |] Z = (s : S) -> [| T s |] Z
--********************************************
-- Fixpoint construction
--********************************************
data Mu {l : Level}(D : Desc {l = l}) : Set l where
con : [| D |] (Mu D) -> Mu D
--********************************************
-- Predicate: All
--********************************************
All : {l : Level}(D : Desc)(X : Set)(P : X -> Set l) -> [| D |] X -> Set l
All id X P x = P x
All (const Z) X P x = Unit
All (prod D D') X P (d , d') = (All D X P d) * (All D' X P d')
All (sigma S T) X P (a , b) = All (T a) X P b
All (pi S T) X P f = (s : S) -> All (T s) X P (f s)
all : {l : Level}(D : Desc)(X : Set)(P : X -> Set l)(R : (x : X) -> P x)(x : [| D |] X) -> All D X P x
all id X P R x = R x
all (const Z) X P R z = Void
all (prod D D') X P R (d , d') = all D X P R d , all D' X P R d'
all (sigma S T) X P R (a , b) = all (T a) X P R b
all (pi S T) X P R f = \ s -> all (T s) X P R (f s)
--********************************************
-- Map
--********************************************
-- This one is bonus: one could rightfully expect our so-called
-- functors to have a morphism part! Here it is.
map : {l : Level}(D : Desc)(X Y : Set l)(f : X -> Y)(v : [| D |] X) -> [| D |] Y
map id X Y sig x = sig x
map (const Z) X Y sig z = z
map (prod D D') X Y sig (d , d') = map D X Y sig d , map D' X Y sig d'
map (sigma S T) X Y sig (a , b) = (a , map (T a) X Y sig b)
map (pi S T) X Y sig f = \x -> map (T x) X Y sig (f x)
-- Together with the proof that they respect the functor laws:
-- map id = id
proof-map-id : {l : Level}(D : Desc)(X : Set l)(v : [| D |] X) -> map D X X (\x -> x) v == v
proof-map-id id X v = refl
proof-map-id (const Z) X v = refl
proof-map-id (prod D D') X (v , v') = cong2 (\x y -> (x , y)) (proof-map-id D X v) (proof-map-id D' X v')
proof-map-id (sigma S T) X (a , b) = cong (\x -> (a , x)) (proof-map-id (T a) X b)
proof-map-id (pi S T) X f = reflFun (\a -> map (T a) X X (\x -> x) (f a)) f (\a -> proof-map-id (T a) X (f a))
-- map (f . g) = map f . map g
proof-map-compos : {l : Level}(D : Desc)(X Y Z : Set l)
(f : X -> Y)(g : Y -> Z)
(v : [| D |] X) ->
map D X Z (\x -> g (f x)) v == map D Y Z g (map D X Y f v)
proof-map-compos id X Y Z f g v = refl
proof-map-compos (const K) X Y Z f g v = refl
proof-map-compos (prod D D') X Y Z f g (v , v') = cong2 (\x y -> (x , y))
(proof-map-compos D X Y Z f g v)
(proof-map-compos D' X Y Z f g v')
proof-map-compos (sigma S T) X Y Z f g (a , b) = cong (\x -> (a , x)) (proof-map-compos (T a) X Y Z f g b)
proof-map-compos (pi S T) X Y Z f g fc = reflFun (\a -> map (T a) X Z (\x -> g (f x)) (fc a))
(\a -> map (T a) Y Z g (map (T a) X Y f (fc a)))
(\a -> proof-map-compos (T a) X Y Z f g (fc a))
--********************************************
-- Elimination principle: induction
--********************************************
-- One would like to write the following:
{-
ind : {l : Level}
(D : Desc)
(P : Mu D -> Set l) ->
( (x : [| D |] (Mu D)) ->
All D (Mu D) P x -> P (con x)) ->
(v : Mu D) ->
P v
ind D P ms (con xs) = ms xs (all D (Mu D) P (\x -> ind D P ms x) xs)
-}
-- But the termination checker is unhappy.
-- So we write the following:
module Elim {l : Level}
(D : Desc)
(P : Mu D -> Set l)
(ms : (x : [| D |] (Mu D)) ->
All D (Mu D) P x -> P (con x))
where
mutual
ind : (x : Mu D) -> P x
ind (con xs) = ms xs (hyps D xs)
hyps : (D' : Desc)
(xs : [| D' |] (Mu D)) ->
All D' (Mu D) P xs
hyps id x = ind x
hyps (const Z) z = Void
hyps (prod D D') (d , d') = hyps D d , hyps D' d'
hyps (sigma S T) (a , b) = hyps (T a) b
hyps (pi S T) f = \s -> hyps (T s) (f s)
ind : {l : Level}
(D : Desc)
(P : Mu D -> Set l) ->
( (x : [| D |] (Mu D)) ->
All D (Mu D) P x -> P (con x)) ->
(v : Mu D) ->
P v
ind D P ms x = Elim.ind D P ms x
--********************************************
-- Examples
--********************************************
--****************
-- Nat
--****************
data NatConst : Set where
Ze : NatConst
Su : NatConst
natCases : NatConst -> Desc
natCases Ze = const Unit
natCases Suc = id
NatD : Desc
NatD = sigma NatConst natCases
Nat : Set
Nat = Mu NatD
ze : Nat
ze = con (Ze , Void)
su : Nat -> Nat
su n = con (Su , n)
-- Now we can get addition for example:
plusCase : (xs : [| NatD |] Nat) ->
All NatD Nat (\_ -> Nat -> Nat) xs -> Nat -> Nat
plusCase ( Ze , Void ) hs y = y
plusCase ( Su , n ) hs y = su (hs y)
plus : Nat -> Nat -> Nat
plus x = ind NatD (\ _ -> (Nat -> Nat)) plusCase x
-- Do this thing in Epigram, you will see that this is *not*
-- hieroglyphic with a bit of elaboration.
--****************
-- List
--****************
data ListConst : Set where
Nil : ListConst
Cons : ListConst
listCases : Set -> ListConst -> Desc
listCases X Nil = const Unit
listCases X Cons = sigma X (\_ -> id)
ListD : Set -> Desc
ListD X = sigma ListConst (listCases X)
List : Set -> Set
List X = Mu (ListD X)
nil : {X : Set} -> List X
nil = con ( Nil , Void )
cons : {X : Set} -> X -> List X -> List X
cons x t = con ( Cons , ( x , t ))
--****************
-- Tree
--****************
data TreeConst : Set where
Leaf : TreeConst
Node : TreeConst
treeCases : Set -> TreeConst -> Desc
treeCases X Leaf = const Unit
treeCases X Node = sigma X (\_ -> prod id id)
TreeD : Set -> Desc
TreeD X = sigma TreeConst (treeCases X)
Tree : Set -> Set
Tree X = Mu (TreeD X)
leaf : {X : Set} -> Tree X
leaf = con (Leaf , Void)
node : {X : Set} -> X -> Tree X -> Tree X -> Tree X
node x le ri = con (Node , (x , (le , ri)))
--********************************************
-- Finite sets
--********************************************
-- If we weren't such big fans of levitating things, we would
-- implement finite sets with:
{-
data En : Set where
nE : En
cE : En -> En
spi : (e : En)(P : EnumT e -> Set) -> Set
spi nE P = Unit
spi (cE e) P = P EZe * spi e (\e -> P (ESu e))
switch : (e : En)(P : EnumT e -> Set)(b : spi e P)(x : EnumT e) -> P x
switch nE P b ()
switch (cE e) P b EZe = fst b
switch (cE e) P b (ESu n) = switch e (\e -> P (ESu e)) (snd b) n
-}
-- But no, we make it fly in Desc:
--****************
-- En
--****************
-- As we have no tags here, we use Nat instead of List.
EnD : Desc
EnD = NatD
En : Set
En = Nat
nE : En
nE = ze
cE : En -> En
cE e = su e
--****************
-- EnumT
--****************
-- Because I don't want to fall back on wacky unicode symbols, I will
-- write EnumT for #, EZe for 0, and ESu for 1+. Sorry about that
data EnumT : (e : En) -> Set where
EZe : {e : En} -> EnumT (cE e)
ESu : {e : En} -> EnumT e -> EnumT (cE e)
--****************
-- Small Pi
--****************
-- This corresponds to the small pi |\pi|.
casesSpi : {l : Level}(xs : [| EnD |] En) ->
All EnD En (\e -> (EnumT e -> Set l) -> Set l) xs ->
(EnumT (con xs) -> Set l) -> Set l
casesSpi (Ze , Void) hs P' = Unit
casesSpi (Su , n) hs P' = P' EZe * hs (\e -> P' (ESu e))
spi : {l : Level}(e : En)(P : EnumT e -> Set l) -> Set l
spi {x} e P = ind EnD (\E -> (EnumT E -> Set x) -> Set x) casesSpi e P
--****************
-- Switch
--****************
casesSwitch : {l : Level}
(xs : [| EnD |] En) ->
All EnD En (\e -> (P' : EnumT e -> Set l)
(b' : spi e P')
(x' : EnumT e) -> P' x') xs ->
(P' : EnumT (con xs) -> Set l)
(b' : spi (con xs) P')
(x' : EnumT (con xs)) -> P' x'
casesSwitch (Ze , Void) hs P' b' ()
casesSwitch (Su , n) hs P' b' EZe = fst b'
casesSwitch (Su , n) hs P' b' (ESu e') = hs (\e -> P' (ESu e)) (snd b') e'
switch : {l : Level}
(e : En)
(P : EnumT e -> Set l)
(b : spi e P)
(x : EnumT e) -> P x
switch {x} e P b xs = ind EnD
(\e -> (P : EnumT e -> Set x)
(b : spi e P)
(xs : EnumT e) -> P xs)
casesSwitch e P b xs
--****************
-- Desc
--****************
-- In the following, we implement Desc in itself. As usual, we have a
-- finite set of constructors -- the name of the codes. Note that we
-- could really define these as a finite set built above. However, in
-- Agda, it's horribly verbose. For the sake of clarity, we won't do
-- that here.
data DescDef : Set1 where
DescId : DescDef
DescConst : DescDef
DescProd : DescDef
DescSigma : DescDef
DescPi : DescDef
-- We slightly diverge here from the presentation of the paper: note
-- the presence of terminating "const Unit". Recall our Lisp-ish
-- notation for nested tuples:
-- |[a b c]|
-- Corresponds to
-- |[a , [ b , [c , []]]]|
-- So, if we want to write constructors using our Lisp-ish notation, the interpretation
-- [| DescD |] (Mu DescD) have to evaluates to [ constructor , [ arg1 , [ arg2 , []]]]
-- Hence, we define Desc's code as follow:
descCases : DescDef -> Desc
descCases DescId = const Unit
descCases DescConst = sigma Set (\_ -> const Unit)
descCases DescProd = prod id (prod id (const Unit))
descCases DescSigma = sigma Set (\S -> prod (pi (lift S) (\_ -> id)) (const Unit))
descCases DescPi = sigma Set (\S -> prod (pi (lift S) (\_ -> id)) (const Unit))
DescD : Desc
DescD = sigma DescDef descCases
DescIn : Set1
DescIn = Mu DescD
-- So that the constructors are:
-- (Note the annoying |pair|s to set the implicit levels. I could not
-- get rid of the yellow otherwise)
idIn : DescIn
idIn = con (pair {i = suc zero} {j = suc zero} DescId Void)
constIn : Set -> DescIn
constIn K = con (pair {i = suc zero} {j = suc zero} DescConst (K , Void))
prodIn : (D D' : DescIn) -> DescIn
prodIn D D' = con (pair {i = suc zero} {j = suc zero} DescProd (D , ( D' , Void )))
sigmaIn : (S : Set)(D : S -> DescIn) -> DescIn
sigmaIn S D = con (pair {i = suc zero} {j = suc zero} DescSigma (S , ((\s -> D (unlift s)) , Void )))
piIn : (S : Set)(D : S -> DescIn) -> DescIn
piIn S D = con (pair {i = suc zero} {j = suc zero} DescPi (S , ((\s -> D (unlift s)) , Void )))
-- At this stage, we could prove the isomorphism between |DescIn| and
-- |Desc|. While not technically difficult, it is long and
-- laborious. We have carried this proof on the more complex and
-- interesting |IDesc| universe, in IDesc.agda.
--********************************************
-- Tagged description
--********************************************
TagDesc : {l : Level} -> Set (suc l)
TagDesc = Sigma En (\e -> spi e (\_ -> Desc))
de : TagDesc -> Desc
de (B , F) = sigma (EnumT B) (\E -> switch B (\_ -> Desc) F E)
--********************************************
-- Catamorphism
--********************************************
cata : (D : Desc)
(T : Set) ->
([| D |] T -> T) ->
(Mu D) -> T
cata D T phi x = ind D (\_ -> T) (\x ms -> phi (replace D T x ms)) x
where replace : (D' : Desc)(T : Set)(xs : [| D' |] (Mu D))(ms : All D' (Mu D) (\_ -> T) xs) -> [| D' |] T
replace id T x y = y
replace (const Z) T z z' = z
replace (prod D D') T (x , x') (y , y') = replace D T x y , replace D' T x' y'
replace (sigma A B) T (a , b) t = a , replace (B a) T b t
replace (pi A B) T f t = \s -> replace (B s) T (f s) (t s)
--********************************************
-- Free monad construction
--********************************************
_**_ : TagDesc -> (X : Set) -> TagDesc
(e , D) ** X = cE e , (const X , D)
--********************************************
-- Substitution
--********************************************
apply : (D : TagDesc)(X Y : Set) ->
(X -> Mu (de (D ** Y))) ->
[| de (D ** X) |] (Mu (de (D ** Y))) ->
Mu (de (D ** Y))
apply (E , B) X Y sig (EZe , x) = sig x
apply (E , B) X Y sig (ESu n , t) = con (ESu n , t)
subst : (D : TagDesc)(X Y : Set) ->
Mu (de (D ** X)) ->
(X -> Mu (de (D ** Y))) ->
Mu (de (D ** Y))
subst D X Y x sig = cata (de (D ** X)) (Mu (de (D ** Y))) (apply D X Y sig) x
|
/-
Copyright (c) 2018 Patrick Massot. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Patrick Massot, Johannes Hölzl
-/
import algebra.ring.prod
import ring_theory.ideal.quotient
import ring_theory.subring.basic
import topology.algebra.group
/-!
# Topological (semi)rings
A topological (semi)ring is a (semi)ring equipped with a topology such that all operations are
continuous. Besides this definition, this file proves that the topological closure of a subring
(resp. an ideal) is a subring (resp. an ideal) and defines products and quotients
of topological (semi)rings.
## Main Results
- `subring.topological_closure`/`subsemiring.topological_closure`: the topological closure of a
`subring`/`subsemiring` is itself a `sub(semi)ring`.
- `prod.topological_semiring`/`prod.topological_ring`: The product of two topological
(semi)rings.
- `pi.topological_semiring`/`pi.topological_ring`: The arbitrary product of topological
(semi)rings.
- `ideal.closure`: The closure of an ideal is an ideal.
- `topological_ring_quotient`: The quotient of a topological semiring by an ideal is a
topological ring.
-/
open classical set filter topological_space function
open_locale classical topological_space filter
section topological_semiring
variables (α : Type*)
/-- a topological semiring is a semiring `R` where addition and multiplication are continuous.
We allow for non-unital and non-associative semirings as well.
The `topological_semiring` class should *only* be instantiated in the presence of a
`non_unital_non_assoc_semiring` instance; if there is an instance of `non_unital_non_assoc_ring`,
then `topological_ring` should be used. Note: in the presence of `non_assoc_ring`, these classes are
mathematically equivalent (see `topological_semiring.has_continuous_neg_of_mul` or
`topological_semiring.to_topological_ring`). -/
class topological_semiring [topological_space α] [non_unital_non_assoc_semiring α]
extends has_continuous_add α, has_continuous_mul α : Prop
/-- A topological ring is a ring `R` where addition, multiplication and negation are continuous.
If `R` is a (unital) ring, then continuity of negation can be derived from continuity of
multiplication as it is multiplication with `-1`. (See
`topological_semiring.has_continuous_neg_of_mul` and
`topological_semiring.to_topological_add_group`) -/
class topological_ring [topological_space α] [non_unital_non_assoc_ring α]
extends topological_semiring α, has_continuous_neg α : Prop
variables {α}
/-- If `R` is a ring with a continuous multiplication, then negation is continuous as well since it
is just multiplication with `-1`. -/
lemma topological_semiring.has_continuous_neg_of_mul [topological_space α] [non_assoc_ring α]
[has_continuous_mul α] : has_continuous_neg α :=
{ continuous_neg :=
by simpa using (continuous_const.mul continuous_id : continuous (λ x : α, (-1) * x)) }
/-- If `R` is a ring which is a topological semiring, then it is automatically a topological
ring. This exists so that one can place a topological ring structure on `R` without explicitly
proving `continuous_neg`. -/
lemma topological_semiring.to_topological_ring [topological_space α] [non_assoc_ring α]
(h : topological_semiring α) : topological_ring α :=
{ ..h,
..(by { haveI := h.to_has_continuous_mul,
exact topological_semiring.has_continuous_neg_of_mul } : has_continuous_neg α) }
@[priority 100] -- See note [lower instance priority]
instance topological_ring.to_topological_add_group [non_unital_non_assoc_ring α]
[topological_space α] [topological_ring α] : topological_add_group α :=
{ ..topological_ring.to_topological_semiring.to_has_continuous_add,
..topological_ring.to_has_continuous_neg }
@[priority 50]
instance discrete_topology.topological_semiring [topological_space α]
[non_unital_non_assoc_semiring α] [discrete_topology α] : topological_semiring α := ⟨⟩
@[priority 50]
instance discrete_topology.topological_ring [topological_space α]
[non_unital_non_assoc_ring α] [discrete_topology α] : topological_ring α := ⟨⟩
section
variables [topological_space α] [semiring α] [topological_semiring α]
namespace subsemiring
instance (S : subsemiring α) :
topological_semiring S :=
{ ..S.to_submonoid.has_continuous_mul,
..S.to_add_submonoid.has_continuous_add }
end subsemiring
/-- The (topological-space) closure of a subsemiring of a topological semiring is
itself a subsemiring. -/
def subsemiring.topological_closure (s : subsemiring α) : subsemiring α :=
{ carrier := closure (s : set α),
..(s.to_submonoid.topological_closure),
..(s.to_add_submonoid.topological_closure ) }
@[simp] lemma subsemiring.topological_closure_coe (s : subsemiring α) :
(s.topological_closure : set α) = closure (s : set α) :=
rfl
instance subsemiring.topological_closure_topological_semiring (s : subsemiring α) :
topological_semiring (s.topological_closure) :=
{ ..s.to_add_submonoid.topological_closure_has_continuous_add,
..s.to_submonoid.topological_closure_has_continuous_mul }
lemma subsemiring.subring_topological_closure (s : subsemiring α) :
s ≤ s.topological_closure :=
subset_closure
lemma subsemiring.is_closed_topological_closure (s : subsemiring α) :
is_closed (s.topological_closure : set α) :=
by convert is_closed_closure
lemma subsemiring.topological_closure_minimal
(s : subsemiring α) {t : subsemiring α} (h : s ≤ t) (ht : is_closed (t : set α)) :
s.topological_closure ≤ t :=
closure_minimal h ht
/-- If a subsemiring of a topological semiring is commutative, then so is its
topological closure. -/
def subsemiring.comm_semiring_topological_closure [t2_space α] (s : subsemiring α)
(hs : ∀ (x y : s), x * y = y * x) : comm_semiring s.topological_closure :=
{ ..s.topological_closure.to_semiring,
..s.to_submonoid.comm_monoid_topological_closure hs }
end
section
variables {β : Type*} [topological_space α] [topological_space β]
/-- The product topology on the cartesian product of two topological semirings
makes the product into a topological semiring. -/
instance [non_unital_non_assoc_semiring α] [non_unital_non_assoc_semiring β]
[topological_semiring α] [topological_semiring β] : topological_semiring (α × β) := {}
/-- The product topology on the cartesian product of two topological rings
makes the product into a topological ring. -/
instance [non_unital_non_assoc_ring α] [non_unital_non_assoc_ring β]
[topological_ring α] [topological_ring β] : topological_ring (α × β) := {}
end
instance {β : Type*} {C : β → Type*} [∀ b, topological_space (C b)]
[Π b, non_unital_non_assoc_semiring (C b)]
[Π b, topological_semiring (C b)] : topological_semiring (Π b, C b) := {}
instance {β : Type*} {C : β → Type*} [∀ b, topological_space (C b)]
[Π b, non_unital_non_assoc_ring (C b)]
[Π b, topological_ring (C b)] : topological_ring (Π b, C b) := {}
section mul_opposite
open mul_opposite
instance [non_unital_non_assoc_semiring α] [topological_space α] [has_continuous_add α] :
has_continuous_add αᵐᵒᵖ :=
{ continuous_add := continuous_induced_rng $ (@continuous_add α _ _ _).comp
(continuous_unop.prod_map continuous_unop) }
instance [non_unital_non_assoc_semiring α] [topological_space α] [topological_semiring α] :
topological_semiring αᵐᵒᵖ := {}
instance [non_unital_non_assoc_ring α] [topological_space α] [has_continuous_neg α] :
has_continuous_neg αᵐᵒᵖ :=
{ continuous_neg := continuous_induced_rng $ (@continuous_neg α _ _ _).comp continuous_unop }
instance [non_unital_non_assoc_ring α] [topological_space α] [topological_ring α] :
topological_ring αᵐᵒᵖ := {}
end mul_opposite
section add_opposite
open add_opposite
instance [non_unital_non_assoc_semiring α] [topological_space α] [has_continuous_mul α] :
has_continuous_mul αᵃᵒᵖ :=
{ continuous_mul := by convert
(continuous_op.comp $ (@continuous_mul α _ _ _).comp $ continuous_unop.prod_map continuous_unop) }
instance [non_unital_non_assoc_semiring α] [topological_space α] [topological_semiring α] :
topological_semiring αᵃᵒᵖ := {}
instance [non_unital_non_assoc_ring α] [topological_space α] [topological_ring α] :
topological_ring αᵃᵒᵖ := {}
end add_opposite
section
variables {R : Type*} [non_unital_non_assoc_ring R] [topological_space R]
lemma topological_ring.of_add_group_of_nhds_zero [topological_add_group R]
(hmul : tendsto (uncurry ((*) : R → R → R)) ((𝓝 0) ×ᶠ (𝓝 0)) $ 𝓝 0)
(hmul_left : ∀ (x₀ : R), tendsto (λ x : R, x₀ * x) (𝓝 0) $ 𝓝 0)
(hmul_right : ∀ (x₀ : R), tendsto (λ x : R, x * x₀) (𝓝 0) $ 𝓝 0) : topological_ring R :=
begin
refine {..‹topological_add_group R›, ..},
have hleft : ∀ x₀ : R, 𝓝 x₀ = map (λ x, x₀ + x) (𝓝 0), by simp,
have hadd : tendsto (uncurry ((+) : R → R → R)) ((𝓝 0) ×ᶠ (𝓝 0)) (𝓝 0),
{ rw ← nhds_prod_eq,
convert continuous_add.tendsto ((0 : R), (0 : R)),
rw zero_add },
rw continuous_iff_continuous_at,
rintro ⟨x₀, y₀⟩,
rw [continuous_at, nhds_prod_eq, hleft x₀, hleft y₀, hleft (x₀*y₀), filter.prod_map_map_eq,
tendsto_map'_iff],
suffices :
tendsto ((λ (x : R), x + x₀ * y₀) ∘ (λ (p : R × R), p.1 + p.2) ∘
(λ (p : R × R), (p.1*y₀ + x₀*p.2, p.1*p.2)))
((𝓝 0) ×ᶠ (𝓝 0)) (map (λ (x : R), x + x₀ * y₀) $ 𝓝 0),
{ convert this using 1,
{ ext, simp only [comp_app, mul_add, add_mul], abel },
{ simp only [add_comm] } },
refine tendsto_map.comp (hadd.comp (tendsto.prod_mk _ hmul)),
exact hadd.comp (((hmul_right y₀).comp tendsto_fst).prod_mk ((hmul_left x₀).comp tendsto_snd))
end
lemma topological_ring.of_nhds_zero
(hadd : tendsto (uncurry ((+) : R → R → R)) ((𝓝 0) ×ᶠ (𝓝 0)) $ 𝓝 0)
(hneg : tendsto (λ x, -x : R → R) (𝓝 0) (𝓝 0))
(hmul : tendsto (uncurry ((*) : R → R → R)) ((𝓝 0) ×ᶠ (𝓝 0)) $ 𝓝 0)
(hmul_left : ∀ (x₀ : R), tendsto (λ x : R, x₀ * x) (𝓝 0) $ 𝓝 0)
(hmul_right : ∀ (x₀ : R), tendsto (λ x : R, x * x₀) (𝓝 0) $ 𝓝 0)
(hleft : ∀ x₀ : R, 𝓝 x₀ = map (λ x, x₀ + x) (𝓝 0)) : topological_ring R :=
begin
haveI := topological_add_group.of_comm_of_nhds_zero hadd hneg hleft,
exact topological_ring.of_add_group_of_nhds_zero hmul hmul_left hmul_right
end
end
variables {α} [topological_space α]
section
variables [non_unital_non_assoc_ring α] [topological_ring α]
/-- In a topological semiring, the left-multiplication `add_monoid_hom` is continuous. -/
lemma mul_left_continuous (x : α) : continuous (add_monoid_hom.mul_left x) :=
continuous_const.mul continuous_id
/-- In a topological semiring, the right-multiplication `add_monoid_hom` is continuous. -/
lemma mul_right_continuous (x : α) : continuous (add_monoid_hom.mul_right x) :=
continuous_id.mul continuous_const
end
variables [ring α] [topological_ring α]
namespace subring
instance (S : subring α) :
topological_ring S :=
topological_semiring.to_topological_ring S.to_subsemiring.topological_semiring
end subring
/-- The (topological-space) closure of a subring of a topological ring is
itself a subring. -/
def subring.topological_closure (S : subring α) : subring α :=
{ carrier := closure (S : set α),
..S.to_submonoid.topological_closure,
..S.to_add_subgroup.topological_closure }
instance subring.topological_closure_topological_ring (s : subring α) :
topological_ring (s.topological_closure) :=
{ ..s.to_add_subgroup.topological_closure_topological_add_group,
..s.to_submonoid.topological_closure_has_continuous_mul }
lemma subring.subring_topological_closure (s : subring α) :
s ≤ s.topological_closure := subset_closure
lemma subring.is_closed_topological_closure (s : subring α) :
is_closed (s.topological_closure : set α) := by convert is_closed_closure
lemma subring.topological_closure_minimal
(s : subring α) {t : subring α} (h : s ≤ t) (ht : is_closed (t : set α)) :
s.topological_closure ≤ t := closure_minimal h ht
/-- If a subring of a topological ring is commutative, then so is its topological closure. -/
def subring.comm_ring_topological_closure [t2_space α] (s : subring α)
(hs : ∀ (x y : s), x * y = y * x) : comm_ring s.topological_closure :=
{ ..s.topological_closure.to_ring,
..s.to_submonoid.comm_monoid_topological_closure hs }
end topological_semiring
section topological_comm_ring
variables {α : Type*} [topological_space α] [comm_ring α] [topological_ring α]
/-- The closure of an ideal in a topological ring as an ideal. -/
def ideal.closure (S : ideal α) : ideal α :=
{ carrier := closure S,
smul_mem' := λ c x hx, map_mem_closure (mul_left_continuous _) hx $ λ a, S.mul_mem_left c,
..(add_submonoid.topological_closure S.to_add_submonoid) }
@[simp] lemma ideal.coe_closure (S : ideal α) : (S.closure : set α) = closure S := rfl
end topological_comm_ring
section topological_ring
variables {α : Type*} [topological_space α] [comm_ring α] (N : ideal α)
open ideal.quotient
instance topological_ring_quotient_topology : topological_space (α ⧸ N) :=
show topological_space (quotient _), by apply_instance
-- note for the reader: in the following, `mk` is `ideal.quotient.mk`, the canonical map `R → R/I`.
variable [topological_ring α]
lemma quotient_ring.is_open_map_coe : is_open_map (mk N) :=
begin
intros s s_op,
change is_open (mk N ⁻¹' (mk N '' s)),
rw quotient_ring_saturate,
exact is_open_Union (λ ⟨n, _⟩, is_open_map_add_left n s s_op)
end
lemma quotient_ring.quotient_map_coe_coe : quotient_map (λ p : α × α, (mk N p.1, mk N p.2)) :=
is_open_map.to_quotient_map
((quotient_ring.is_open_map_coe N).prod (quotient_ring.is_open_map_coe N))
((continuous_quot_mk.comp continuous_fst).prod_mk (continuous_quot_mk.comp continuous_snd))
(by rintro ⟨⟨x⟩, ⟨y⟩⟩; exact ⟨(x, y), rfl⟩)
instance topological_ring_quotient : topological_ring (α ⧸ N) :=
topological_semiring.to_topological_ring
{ continuous_add :=
have cont : continuous (mk N ∘ (λ (p : α × α), p.fst + p.snd)) :=
continuous_quot_mk.comp continuous_add,
(quotient_map.continuous_iff (quotient_ring.quotient_map_coe_coe N)).mpr cont,
continuous_mul :=
have cont : continuous (mk N ∘ (λ (p : α × α), p.fst * p.snd)) :=
continuous_quot_mk.comp continuous_mul,
(quotient_map.continuous_iff (quotient_ring.quotient_map_coe_coe N)).mpr cont }
end topological_ring
/-!
### Lattice of ring topologies
We define a type class `ring_topology α` which endows a ring `α` with a topology such that all ring
operations are continuous.
Ring topologies on a fixed ring `α` are ordered, by reverse inclusion. They form a complete lattice,
with `⊥` the discrete topology and `⊤` the indiscrete topology.
Any function `f : α → β` induces `coinduced f : topological_space α → ring_topology β`. -/
universes u v
/-- A ring topology on a ring `α` is a topology for which addition, negation and multiplication
are continuous. -/
@[ext]
structure ring_topology (α : Type u) [ring α]
extends topological_space α, topological_ring α : Type u
namespace ring_topology
variables {α : Type*} [ring α]
instance inhabited {α : Type u} [ring α] : inhabited (ring_topology α) :=
⟨{to_topological_space := ⊤,
continuous_add := continuous_top,
continuous_mul := continuous_top,
continuous_neg := continuous_top}⟩
@[ext]
lemma ext' {f g : ring_topology α} (h : f.is_open = g.is_open) : f = g :=
by { ext, rw h }
/-- The ordering on ring topologies on the ring `α`.
`t ≤ s` if every set open in `s` is also open in `t` (`t` is finer than `s`). -/
instance : partial_order (ring_topology α) :=
partial_order.lift ring_topology.to_topological_space $ ext
local notation `cont` := @continuous _ _
private def def_Inf (S : set (ring_topology α)) : ring_topology α :=
let Inf_S' := Inf (to_topological_space '' S) in
{ to_topological_space := Inf_S',
continuous_add :=
begin
apply continuous_Inf_rng,
rintros _ ⟨⟨t, tr⟩, haS, rfl⟩, resetI,
have h := continuous_Inf_dom (set.mem_image_of_mem to_topological_space haS) continuous_id,
have h_continuous_id := @continuous.prod_map _ _ _ _ t t Inf_S' Inf_S' _ _ h h,
exact @continuous.comp _ _ _ (id _) (id _) t _ _ continuous_add h_continuous_id,
end,
continuous_mul :=
begin
apply continuous_Inf_rng,
rintros _ ⟨⟨t, tr⟩, haS, rfl⟩, resetI,
have h := continuous_Inf_dom (set.mem_image_of_mem to_topological_space haS) continuous_id,
have h_continuous_id := @continuous.prod_map _ _ _ _ t t Inf_S' Inf_S' _ _ h h,
exact @continuous.comp _ _ _ (id _) (id _) t _ _ continuous_mul h_continuous_id,
end,
continuous_neg :=
begin
apply continuous_Inf_rng,
rintros _ ⟨⟨t, tr⟩, haS, rfl⟩, resetI,
have h := continuous_Inf_dom (set.mem_image_of_mem to_topological_space haS) continuous_id,
exact @continuous.comp _ _ _ (id _) (id _) t _ _ continuous_neg h,
end }
/-- Ring topologies on `α` form a complete lattice, with `⊥` the discrete topology and `⊤` the
indiscrete topology.
The infimum of a collection of ring topologies is the topology generated by all their open sets
(which is a ring topology).
The supremum of two ring topologies `s` and `t` is the infimum of the family of all ring topologies
contained in the intersection of `s` and `t`. -/
instance : complete_semilattice_Inf (ring_topology α) :=
{ Inf := def_Inf,
Inf_le := λ S a haS, by { apply topological_space.complete_lattice.Inf_le, use [a, ⟨ haS, rfl⟩] },
le_Inf :=
begin
intros S a hab,
apply topological_space.complete_lattice.le_Inf,
rintros _ ⟨b, hbS, rfl⟩,
exact hab b hbS,
end,
..ring_topology.partial_order }
instance : complete_lattice (ring_topology α) :=
complete_lattice_of_complete_semilattice_Inf _
/-- Given `f : α → β` and a topology on `α`, the coinduced ring topology on `β` is the finest
topology such that `f` is continuous and `β` is a topological ring. -/
def coinduced {α β : Type*} [t : topological_space α] [ring β] (f : α → β) :
ring_topology β :=
Inf {b : ring_topology β | (topological_space.coinduced f t) ≤ b.to_topological_space}
lemma coinduced_continuous {α β : Type*} [t : topological_space α] [ring β] (f : α → β) :
cont t (coinduced f).to_topological_space f :=
begin
rw continuous_iff_coinduced_le,
refine le_Inf _,
rintros _ ⟨t', ht', rfl⟩,
exact ht',
end
/-- The forgetful functor from ring topologies on `a` to additive group topologies on `a`. -/
def to_add_group_topology (t : ring_topology α) : add_group_topology α :=
{ to_topological_space := t.to_topological_space,
to_topological_add_group := @topological_ring.to_topological_add_group _ _ t.to_topological_space
t.to_topological_ring }
/-- The order embedding from ring topologies on `a` to additive group topologies on `a`. -/
def to_add_group_topology.order_embedding : order_embedding (ring_topology α)
(add_group_topology α) :=
{ to_fun := λ t, t.to_add_group_topology,
inj' :=
begin
intros t₁ t₂ h_eq,
dsimp only at h_eq,
ext,
have h_t₁ : t₁.to_topological_space = t₁.to_add_group_topology.to_topological_space := rfl,
rw [h_t₁, h_eq],
refl,
end,
map_rel_iff' :=
begin
intros t₁ t₂,
rw [embedding.coe_fn_mk],
have h_le : t₁ ≤ t₂ ↔ t₁.to_topological_space ≤ t₂.to_topological_space := by refl,
rw h_le,
refl,
end }
end ring_topology
|
Measure K of the November 2006 Election decided the fate of Second Street Crossing, a proposed shopping center that will include a Target.
The certified election returns show that the measure passed with 51.5% of the vote (11,761 votes for, and 11,087 votes against).
ProView: http://targetindavis.com
ConView: http://dontbigboxdavis.org
According to campaign spending filings through October 26, 2006, Target Corp. had spent $269,795 in support of Measure K. Dont BigBox Davis had spent $20,453 opposing Measure K.Footnote(St. John, Claire. http://www.davisenterprise.com/articles/2006/10/31/news/061new1.txt Target spends $269K on campaign. The Davis Enterprise. 20061031.)
Discussion: Target Debate
|
-- Andreas, 2017-10-04, ignore irrelevant arguments during with-abstraction
-- Feature request by xekoukou
{-# OPTIONS --allow-unsolved-metas --show-irrelevant #-}
-- {-# OPTIONS -v tc.abstract:100 #-}
open import Agda.Builtin.Equality
open import Agda.Builtin.Bool
not : Bool → Bool
not true = false
not false = true
but : Bool → .(Bool → Bool) → Bool
but true f = false
but false f = true
test : (x : Bool) → but x not ≡ true
test x with but x not' where
not' : Bool → Bool
not' true = false
not' false = true
test x | true = refl
test x | false = _ -- unsolved meta ok
|
EVB's office is right along the Warriors victory parade route, and in eager anticipation, my partner and I wanted to do something special for our favorite player: Draymond Green.
In commemoration of his infamous "Yup." interview with Roz at the 2015 victory parade, and his interview in 2017 where he expressed his desire to "annihilate" the Cavs, we created two banners that produced results that not even we were expecting.
If it weren't for his buddies, Draymond may have never seen the banner.
Huge thanks to Scott Strazzante for snapping the shot that made it into the Chronicle.
|
[STATEMENT]
lemma (in ab_group_add) eq_add_iff: "x = x + y \<longleftrightarrow> y = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x = x + y) = (y = (0::'a))
[PROOF STEP]
using add_left_imp_eq[of x y 0]
[PROOF STATE]
proof (prove)
using this:
x + y = x + (0::'a) \<Longrightarrow> y = (0::'a)
goal (1 subgoal):
1. (x = x + y) = (y = (0::'a))
[PROOF STEP]
by auto
|
# this doesn't actually put anything into the type form
# It transforms any number into a bitstype that can hold that number
#function make_number_from_tuple( n::Tuple ; base = 1000 )
# return foldl( (a,b)-> a*base+b, big(0), n )
#end
function make_number_from_tuple( n::Tuple ; base = 1000 )
# BigInts to ensure known return type
fn(a::BigInt,b) = BigInt( a*base+b )
return foldl( fn, big(0), n )
end
function make_tuple_from_number( n; last_digits = tuple(), base =1000 )
if -base < n < base
return ( oftype(base,n),last_digits...)
end
next_n, next_digit = fldmod(n,base)
return make_tuple_from_number( next_n; last_digits = (oftype( base, next_digit ),last_digits...), base = base )
end
# for testing:
function tuple_number_round_trip( n ; base =1000 )
t = make_tuple_from_number( n; base=base )
n2 = make_number_from_tuple( t; base=base )
@assert( n == n2 )
return n2
end
function to_type_constant( n::Integer )
if Int == Int32 && typemin(Int32) <= n <= typemax(Int32)
# only for machines where Int is Int32
return Int32(n)
elseif typemin(Int64) <= n <= typemax(Int64)
return Int64(n)
elseif typemin(Int128) <= n <= typemax(Int128)
return Int128(n)
else
return make_tuple_from_number(n )
end
end
function from_type_constant( n::Integer )
return BigInt(n)
end
function from_type_constant( n::Tuple )
return make_number_from_tuple( n )
end
|
1890
|
module module_mosaic_cloudchem
integer, parameter :: l_so4_aqyy = 1
integer, parameter :: l_no3_aqyy = 2
integer, parameter :: l_cl_aqyy = 3
integer, parameter :: l_nh4_aqyy = 4
integer, parameter :: l_na_aqyy = 5
integer, parameter :: l_oin_aqyy = 6
integer, parameter :: l_bc_aqyy = 7
integer, parameter :: l_oc_aqyy = 8
integer, parameter :: nyyy = 8
contains
subroutine mosaic_cloudchem_driver( &
id, ktau, ktauc, dtstepc, config_flags, &
p_phy, t_phy, rho_phy, alt, &
cldfra, ph_no2, &
moist, chem, &
gas_aqfrac, numgas_aqfrac, &
ids,ide, jds,jde, kds,kde, &
ims,ime, jms,jme, kms,kme, &
its,ite, jts,jte, kts,kte )
use module_state_description, only: &
num_moist, num_chem, p_qc
use module_configure, only: grid_config_rec_type
use module_data_mosaic_asect, only: cw_phase, nphase_aer
use module_data_mosaic_other, only: k_pegbegin, name
use module_mosaic_driver, only: mapaer_tofrom_host
implicit none
integer, intent(in) :: &
id, ktau, ktauc, &
numgas_aqfrac, &
ids, ide, jds, jde, kds, kde, &
ims, ime, jms, jme, kms, kme, &
its, ite, jts, jte, kts, kte
type(grid_config_rec_type), intent(in) :: config_flags
real, intent(in) :: &
dtstepc
real, intent(in), &
dimension( ims:ime, kms:kme, jms:jme ) :: &
p_phy, t_phy, rho_phy, alt, cldfra, ph_no2
real, intent(in), &
dimension( ims:ime, kms:kme, jms:jme, 1:num_moist ) :: &
moist
real, intent(inout), &
dimension( ims:ime, kms:kme, jms:jme, 1:num_chem ) :: &
chem
real, intent(inout), &
dimension( ims:ime, kms:kme, jms:jme, numgas_aqfrac ) :: &
gas_aqfrac
integer :: it, jt, kt, kpeg, k_pegshift, l, mpeg
integer :: icase
integer :: igaschem_onoff, iphotol_onoff, iradical_onoff
real :: gas_aqfrac_box(numgas_aqfrac)
real :: ph_aq_box
real, parameter :: qcldwtr_cutoff = 1.0e-6
real :: qcldwtr
if ((cw_phase .le. 0) .or. (cw_phase .gt. nphase_aer)) then
print *, '*** mosaic_cloudchem_driver - cw_phase not active'
return
end if
icase = 0
iphotol_onoff = 0
if (config_flags%phot_opt .gt. 0) iphotol_onoff = 1
igaschem_onoff = 0
if (config_flags%gaschem_onoff .gt. 0) igaschem_onoff = 1
if ((igaschem_onoff .le. 0) .or. (iphotol_onoff .le. 0)) then
iradical_onoff = 0
else
iradical_onoff = 1
end if
iradical_onoff = 0
do 3920 jt = jts, jte
do 3910 it = its, ite
do 3800 kt = kts, kte
qcldwtr = moist(it,kt,jt,p_qc)
if (qcldwtr .le. qcldwtr_cutoff) goto 3800
k_pegshift = k_pegbegin - kts
kpeg = kt + k_pegshift
mpeg = 1
icase = icase + 1
if (ktau .eq. -13579) then
call mosaic_cloudchem_dumpaa( &
id, ktau, ktauc, dtstepc, config_flags, &
p_phy, t_phy, rho_phy, alt, &
cldfra, ph_no2, &
moist, chem, &
gas_aqfrac, numgas_aqfrac, &
ids,ide, jds,jde, kds,kde, &
ims,ime, jms,jme, kms,kme, &
its,ite, jts,jte, kts,kte, &
qcldwtr_cutoff, &
it, jt, kt )
end if
call mapaer_tofrom_host( 0, &
ims,ime, jms,jme, kms,kme, &
its,ite, jts,jte, kts,kte, &
it, jt, kt, kt, &
num_moist, num_chem, moist, chem, &
t_phy, p_phy, rho_phy )
call mosaic_cloudchem_1box( &
id, ktau, ktauc, dtstepc, &
iphotol_onoff, iradical_onoff, &
ph_no2(it,kt,jt), &
ph_aq_box, gas_aqfrac_box, &
numgas_aqfrac, it, jt, kt, kpeg, mpeg, icase )
call mapaer_tofrom_host( 1, &
ims,ime, jms,jme, kms,kme, &
its,ite, jts,jte, kts,kte, &
it, jt, kt, kt, &
num_moist, num_chem, moist, chem, &
t_phy, p_phy, rho_phy )
gas_aqfrac(it,kt,jt,:) = gas_aqfrac_box(:)
3800 continue
3910 continue
3920 continue
return
end subroutine mosaic_cloudchem_driver
subroutine mosaic_cloudchem_1box( &
id, ktau, ktauc, dtstepc, &
iphotol_onoff, iradical_onoff, &
photol_no2_box, &
ph_aq_box, gas_aqfrac_box, &
numgas_aqfrac, it, jt, kt, kpeg, mpeg, icase )
use module_state_description, only: &
num_moist, num_chem
use module_data_mosaic_asect, only: &
msectional, &
maxd_asize, maxd_atype, &
cw_phase, nsize_aer, ntype_aer, &
lptr_so4_aer, lptr_no3_aer, lptr_cl_aer, lptr_co3_aer, &
lptr_msa_aer, lptr_nh4_aer, lptr_na_aer, lptr_ca_aer, &
lptr_oin_aer, lptr_bc_aer, lptr_oc_aer
use module_data_mosaic_other, only: &
l2maxd, ltot2, rsub
use module_data_cmu_bulkaqchem, only: &
meqn1max
implicit none
integer, intent(in) :: &
id, ktau, ktauc, &
numgas_aqfrac, it, jt, kt, kpeg, mpeg, &
icase, iphotol_onoff, iradical_onoff
real, intent(in) :: &
dtstepc, photol_no2_box
real, intent(inout) :: ph_aq_box
real, intent(inout), dimension( numgas_aqfrac ) :: gas_aqfrac_box
integer :: iphase
integer :: icase_in, idecomp_hmsa_hso5, &
iradical_in, istat_aqop
integer :: lptr_yyy_cwaer(maxd_asize,maxd_atype,nyyy)
real :: co2_mixrat_in
real :: ph_cmuaq_cur
real :: photol_no2_in
real :: xprescribe_ph
real :: yaq_beg(meqn1max), yaq_end(meqn1max)
real :: rbox(l2maxd), rbox_sv1(l2maxd)
real :: rbulk_cwaer(nyyy,2)
real, dimension( maxd_asize, maxd_atype ) :: fr_partit_cw
iphase = cw_phase
lptr_yyy_cwaer(:,:,l_so4_aqyy) = lptr_so4_aer(:,:,iphase)
lptr_yyy_cwaer(:,:,l_no3_aqyy) = lptr_no3_aer(:,:,iphase)
lptr_yyy_cwaer(:,:,l_cl_aqyy ) = lptr_cl_aer( :,:,iphase)
lptr_yyy_cwaer(:,:,l_nh4_aqyy) = lptr_nh4_aer(:,:,iphase)
lptr_yyy_cwaer(:,:,l_na_aqyy ) = lptr_na_aer( :,:,iphase)
lptr_yyy_cwaer(:,:,l_oin_aqyy) = lptr_oin_aer(:,:,iphase)
lptr_yyy_cwaer(:,:,l_bc_aqyy ) = lptr_bc_aer( :,:,iphase)
lptr_yyy_cwaer(:,:,l_oc_aqyy ) = lptr_oc_aer( :,:,iphase)
rbox(1:ltot2) = max( 0.0, rsub(1:ltot2,kpeg,mpeg) )
rbox_sv1(1:ltot2) = rbox(1:ltot2)
icase_in = icase
iradical_in = 1
idecomp_hmsa_hso5 = 1
co2_mixrat_in = 350.0
photol_no2_in = photol_no2_box
xprescribe_ph = -1.0e31
if ((iphotol_onoff .le. 0) .or. (iradical_onoff .le. 0)) then
photol_no2_in = 0.0
iradical_in = 0
end if
gas_aqfrac_box(:) = 0.0
call interface_to_aqoperator1( &
istat_aqop, &
dtstepc, &
rbox, gas_aqfrac_box, &
rbulk_cwaer, lptr_yyy_cwaer, &
co2_mixrat_in, photol_no2_in, xprescribe_ph, &
iradical_in, idecomp_hmsa_hso5, &
yaq_beg, yaq_end, ph_cmuaq_cur, &
numgas_aqfrac, id, it, jt, kt, kpeg, mpeg, ktau, icase_in )
ph_aq_box = ph_cmuaq_cur
call partition_cldwtr( &
rbox, fr_partit_cw, &
it, jt, kt, kpeg, mpeg, icase_in )
call distribute_bulk_changes( &
rbox, rbox_sv1, fr_partit_cw, &
rbulk_cwaer, lptr_yyy_cwaer, &
it, jt, kt, kpeg, mpeg, icase_in )
rsub(1:ltot2,kpeg,mpeg) = max( 0.0, rbox(1:ltot2) )
if (msectional .lt. 1000000000) then
call cloudchem_apply_move_sections( &
rbox, rbox_sv1, &
it, jt, kt, kpeg, mpeg, icase_in )
end if
return
end subroutine mosaic_cloudchem_1box
subroutine interface_to_aqoperator1( &
istat_aqop, &
dtstepc, &
rbox, gas_aqfrac_box, &
rbulk_cwaer, lptr_yyy_cwaer, &
co2_mixrat_in, photol_no2_in, xprescribe_ph, &
iradical_in, idecomp_hmsa_hso5, &
yaq_beg, yaq_end, ph_cmuaq_cur, &
numgas_aqfrac, id, it, jt, kt, kpeg, mpeg, ktau, icase )
use module_state_description, only: &
num_chem, param_first_scalar, p_qc, &
p_nh3, p_hno3, p_hcl, p_sulf, p_h2so4, p_hcho, &
p_ora1, p_so2, p_h2o2, p_o3, p_ho, &
p_ho2, p_no3, p_no, p_no2, p_hono, &
p_pan, p_ch3o2, p_ch3oh, p_op1
use module_data_cmu_bulkaqchem, only: &
meqn1max, naers, ngas, &
na4, naa, nac, nae, nah, nahmsa, nahso5, &
nan, nao, nar, nas, naw, &
ng4, nga, ngc, ngch3co3h, ngch3o2, ngch3o2h, ngch3oh, &
ngh2o2, nghcho, nghcooh, nghno2, ngho2, &
ngn, ngno, ngno2, ngno3, ngo3, ngoh, ngpan, ngso2
use module_cmu_bulkaqchem, only: aqoperator1
use module_data_mosaic_asect, only: &
maxd_asize, maxd_atype, &
cw_phase, nsize_aer, ntype_aer, &
lptr_so4_aer, lptr_no3_aer, lptr_cl_aer, lptr_co3_aer, &
lptr_msa_aer, lptr_nh4_aer, lptr_na_aer, lptr_ca_aer, &
lptr_oin_aer, lptr_bc_aer, lptr_oc_aer, &
mw_cl_aer, mw_na_aer, mw_nh4_aer, mw_no3_aer, mw_so4_aer
use module_data_mosaic_other, only: &
aboxtest_units_convert, cairclm, &
ktemp, l2maxd, ptotclm, rcldwtr_sub
implicit none
integer, intent(in) :: &
iradical_in, idecomp_hmsa_hso5, &
numgas_aqfrac, id, it, jt, kt, kpeg, mpeg, ktau, icase
integer, intent(inout) :: &
istat_aqop
integer, intent(in) :: lptr_yyy_cwaer(maxd_asize,maxd_atype,nyyy)
real, intent(in) :: &
dtstepc, co2_mixrat_in, &
photol_no2_in, xprescribe_ph
real, intent(inout) :: ph_cmuaq_cur
real, intent(inout), dimension( 1:l2maxd ) :: rbox
real, intent(inout), dimension( nyyy, 2 ) :: rbulk_cwaer
real, intent(inout), dimension( 1:numgas_aqfrac ) :: gas_aqfrac_box
real, intent(inout), dimension( meqn1max ) :: yaq_beg, yaq_end
integer :: i, iphase, isize, itype
integer :: iaq, istat_fatal, istat_warn
integer :: l, lunxx, lyyy
integer :: p1st
real, parameter :: eps=0.622
real :: cair_moleperm3
real :: dum, dumb
real :: factgas, factlwc, factpatm, factphoto
real :: factaerbc, factaercl, factaerna, factaernh4, &
factaerno3, factaeroc, factaeroin, factaerso4
real :: lwc
real :: p_atm, photo_in
real :: rh
real :: temp, tstep_beg_sec, tstep_end_sec
real :: totsulf_beg, totsulf_end
real :: gas(ngas), aerosol(naers)
real :: gas_aqfrac_cmu(ngas)
double precision tstep_beg_sec_dp, tstep_end_sec_dp, &
temp_dp, p_atm_dp, lwc_dp, rh_dp, &
co2_mixrat_in_dp, photo_in_dp, ph_cmuaq_cur_dp, &
xprescribe_ph_dp
double precision gas_dp(ngas), gas_aqfrac_cmu_dp(ngas), &
aerosol_dp(naers), yaq_beg_dp(meqn1max), yaq_end_dp(meqn1max)
p1st = param_first_scalar
factpatm = 1.0/1.01325e6
factlwc = 28.966*eps*1.0e6*cairclm(kpeg)
factphoto = 1.6
factgas = 1.0e6
dum = cairclm(kpeg)*1.0e12
factaerso4 = dum*mw_so4_aer
factaerno3 = dum*mw_no3_aer
factaercl = dum*mw_cl_aer
factaernh4 = dum*mw_nh4_aer
factaerna = dum*mw_na_aer
factaeroin = dum
factaeroc = dum
factaerbc = dum
if (aboxtest_units_convert .eq. 10) then
factpatm = 1.0
factlwc = 1.0
factphoto = 1.0
factgas = 1.0
factaerso4 = 1.0
factaerno3 = 1.0
factaercl = 1.0
factaernh4 = 1.0
factaerna = 1.0
factaeroin = 1.0
factaeroc = 1.0
factaerbc = 1.0
end if
temp = rbox(ktemp)
lwc = rcldwtr_sub(kpeg,mpeg) * factlwc
p_atm = ptotclm(kpeg) * factpatm
p_atm = cairclm(kpeg)*1.0e3*0.082058e0*temp
photo_in = photol_no2_in * factphoto
rh = 1.0
iaq = 1
tstep_beg_sec = 0.0
tstep_end_sec = dtstepc
gas(:) = 0.0
gas(nga ) = rbox(p_nh3 ) * factgas
gas(ngn ) = rbox(p_hno3 ) * factgas
gas(ngc ) = rbox(p_hcl ) * factgas
if(p_sulf > param_first_scalar ) gas(ng4 ) = rbox(p_sulf ) * factgas
if(p_h2so4 > param_first_scalar ) gas(ng4 ) = rbox(p_h2so4 ) * factgas
gas(nghcho ) = rbox(p_hcho ) * factgas
gas(nghcooh ) = rbox(p_ora1 ) * factgas
gas(ngso2 ) = rbox(p_so2 ) * factgas
gas(ngh2o2 ) = rbox(p_h2o2 ) * factgas
gas(ngo3 ) = rbox(p_o3 ) * factgas
gas(ngoh ) = rbox(p_ho ) * factgas
gas(ngho2 ) = rbox(p_ho2 ) * factgas
gas(ngno3 ) = rbox(p_no3 ) * factgas
gas(ngno ) = rbox(p_no ) * factgas
gas(ngno2 ) = rbox(p_no2 ) * factgas
gas(nghno2 ) = rbox(p_hono ) * factgas
gas(ngpan ) = rbox(p_pan ) * factgas
gas(ngch3o2 ) = rbox(p_ch3o2 ) * factgas
gas(ngch3oh ) = rbox(p_ch3oh ) * factgas
gas(ngch3o2h) = rbox(p_op1 ) * factgas
aerosol(:) = 0.0
rbulk_cwaer(:,:) = 0.0
iphase = cw_phase
do itype = 1, ntype_aer
do isize = 1, nsize_aer(itype)
do lyyy = 1, nyyy
l = lptr_yyy_cwaer(isize,itype,lyyy)
if (l .ge. p1st) rbulk_cwaer(lyyy,1) = rbulk_cwaer(lyyy,1) + rbox(l)
end do
end do
end do
aerosol(na4) = rbulk_cwaer(l_so4_aqyy,1) * factaerso4
aerosol(nan) = rbulk_cwaer(l_no3_aqyy,1) * factaerno3
aerosol(nac) = rbulk_cwaer(l_cl_aqyy, 1) * factaercl
aerosol(naa) = rbulk_cwaer(l_nh4_aqyy,1) * factaernh4
aerosol(nas) = rbulk_cwaer(l_na_aqyy, 1) * factaerna
aerosol(nar) = rbulk_cwaer(l_oin_aqyy,1) * factaeroin
aerosol(nae) = rbulk_cwaer(l_bc_aqyy, 1) * factaerbc
aerosol(nao) = rbulk_cwaer(l_oc_aqyy, 1) * factaeroc
tstep_beg_sec_dp = 0.0d0
if (tstep_beg_sec .ne. 0.0) tstep_beg_sec_dp = tstep_beg_sec
tstep_end_sec_dp = 0.0d0
if (tstep_end_sec .ne. 0.0) tstep_end_sec_dp = tstep_end_sec
temp_dp = 0.0d0
if (temp .ne. 0.0) temp_dp = temp
p_atm_dp = 0.0d0
if (p_atm .ne. 0.0) p_atm_dp = p_atm
lwc_dp = 0.0d0
if (lwc .ne. 0.0) lwc_dp = lwc
rh_dp = 0.0d0
if (rh .ne. 0.0) rh_dp = rh
co2_mixrat_in_dp = 0.0d0
if (co2_mixrat_in .ne. 0.0) co2_mixrat_in_dp = co2_mixrat_in
photo_in_dp = 0.0d0
if (photo_in .ne. 0.0) photo_in_dp = photo_in
xprescribe_ph_dp = 0.0d0
if (xprescribe_ph .ne. 0.0) xprescribe_ph_dp = xprescribe_ph
ph_cmuaq_cur_dp = 0.0d0
if (ph_cmuaq_cur .ne. 0.0) ph_cmuaq_cur_dp = ph_cmuaq_cur
do i = 1, ngas
gas_dp(i) = 0.0d0
if (gas(i) .ne. 0.0) gas_dp(i) = gas(i)
end do
do i = 1, naers
aerosol_dp(i) = 0.0d0
if (aerosol(i) .ne. 0.0) aerosol_dp(i) = aerosol(i)
end do
do i = 1, ngas
gas_aqfrac_cmu_dp(i) = 0.0d0
if (gas_aqfrac_cmu(i) .ne. 0.0) gas_aqfrac_cmu_dp(i) = gas_aqfrac_cmu(i)
end do
do i = 1, meqn1max
yaq_beg_dp(i) = 0.0d0
if (yaq_beg(i) .ne. 0.0) yaq_beg_dp(i) = yaq_beg(i)
end do
do i = 1, meqn1max
yaq_end_dp(i) = 0.0d0
if (yaq_end(i) .ne. 0.0) yaq_end_dp(i) = yaq_end(i)
end do
cair_moleperm3 = 1.0e3*p_atm_dp/(0.082058e0*temp_dp)
totsulf_beg = ( aerosol_dp(na4)/96. &
+ aerosol_dp(nahso5)/113. + aerosol_dp(nahmsa)/111. &
+ (gas_dp(ngso2) + gas_dp(ng4))*cair_moleperm3 )*96.0
call aqoperator1( &
istat_fatal, istat_warn, &
tstep_beg_sec_dp, tstep_end_sec_dp, &
gas_dp, aerosol_dp, gas_aqfrac_cmu_dp, &
temp_dp, p_atm_dp, lwc_dp, rh_dp, &
co2_mixrat_in_dp, photo_in_dp, xprescribe_ph_dp, &
iradical_in, idecomp_hmsa_hso5, iaq, &
yaq_beg_dp, yaq_end_dp, ph_cmuaq_cur_dp )
totsulf_end = ( aerosol_dp(na4)/96. &
+ aerosol_dp(nahso5)/113. + aerosol_dp(nahmsa)/111. &
+ (gas_dp(ngso2) + gas_dp(ng4))*cair_moleperm3 )*96.0
tstep_beg_sec = tstep_beg_sec_dp
tstep_end_sec = tstep_end_sec_dp
temp = temp_dp
p_atm = p_atm_dp
lwc = lwc_dp
rh = rh_dp
ph_cmuaq_cur = ph_cmuaq_cur_dp
do i = 1, ngas
gas(i) = gas_dp(i)
end do
do i = 1, naers
aerosol(i) = aerosol_dp(i)
end do
do i = 1, ngas
gas_aqfrac_cmu(i) = gas_aqfrac_cmu_dp(i)
end do
do i = 1, meqn1max
yaq_beg(i) = yaq_beg_dp(i)
end do
do i = 1, meqn1max
yaq_end(i) = yaq_end_dp(i)
end do
istat_aqop = 0
if (istat_fatal .ne. 0) then
write(6,*) &
'*** mosaic_cloudchem_driver, subr interface_to_aqoperator1'
write(6,'(a,4i5,2i10)') &
' id,it,jt,kt, istat_fatal, warn =', &
id, it, jt, kt, istat_fatal, istat_warn
istat_aqop = -10
end if
dum = totsulf_end - totsulf_beg
dumb = max( totsulf_beg, totsulf_end )
if (abs(dum) .gt. max(1.0e-3,1.0e-3*dumb)) then
write(6,*) &
'*** mosaic_cloudchem_driver, sulfur balance warning'
write(6,'(a,4i5,1p,3e12.4)') &
' id,it,jt,kt, total_sulfur_beg, _end, _error =', &
id, it, jt, kt, totsulf_beg, totsulf_end, dum
end if
rbox(p_nh3 ) = gas(nga ) / factgas
rbox(p_hno3 ) = gas(ngn ) / factgas
rbox(p_hcl ) = gas(ngc ) / factgas
if(p_sulf .gt. param_first_scalar) rbox(p_sulf ) = gas(ng4) / factgas
if(p_h2so4 .gt. param_first_scalar) rbox(p_h2so4 ) = gas(ng4) / factgas
rbox(p_hcho ) = gas(nghcho ) / factgas
rbox(p_ora1 ) = gas(nghcooh ) / factgas
rbox(p_so2 ) = gas(ngso2 ) / factgas
rbox(p_h2o2 ) = gas(ngh2o2 ) / factgas
rbox(p_o3 ) = gas(ngo3 ) / factgas
rbox(p_ho ) = gas(ngoh ) / factgas
rbox(p_ho2 ) = gas(ngho2 ) / factgas
rbox(p_no3 ) = gas(ngno3 ) / factgas
rbox(p_no ) = gas(ngno ) / factgas
rbox(p_no2 ) = gas(ngno2 ) / factgas
rbox(p_hono ) = gas(nghno2 ) / factgas
rbox(p_pan ) = gas(ngpan ) / factgas
rbox(p_ch3o2 ) = gas(ngch3o2 ) / factgas
rbox(p_ch3oh ) = gas(ngch3oh ) / factgas
rbox(p_op1 ) = gas(ngch3o2h) / factgas
gas_aqfrac_box(:) = 0.0
if (p_nh3 .le. numgas_aqfrac) &
gas_aqfrac_box(p_nh3 ) = gas_aqfrac_cmu(nga )
if (p_hno3 .le. numgas_aqfrac) &
gas_aqfrac_box(p_hno3 ) = gas_aqfrac_cmu(ngn )
if (p_hcl .le. numgas_aqfrac) &
gas_aqfrac_box(p_hcl ) = gas_aqfrac_cmu(ngc )
if (p_sulf .le. numgas_aqfrac .and. p_sulf .gt. param_first_scalar) &
gas_aqfrac_box(p_sulf ) = gas_aqfrac_cmu(ng4 )
if (p_h2so4 .le. numgas_aqfrac .and. p_h2so4 .gt. param_first_scalar) &
gas_aqfrac_box(p_h2so4 ) = gas_aqfrac_cmu(ng4 )
if (p_hcho .le. numgas_aqfrac) &
gas_aqfrac_box(p_hcho ) = gas_aqfrac_cmu(nghcho )
if (p_ora1 .le. numgas_aqfrac) &
gas_aqfrac_box(p_ora1 ) = gas_aqfrac_cmu(nghcooh )
if (p_so2 .le. numgas_aqfrac) &
gas_aqfrac_box(p_so2 ) = gas_aqfrac_cmu(ngso2 )
if (p_h2o2 .le. numgas_aqfrac) &
gas_aqfrac_box(p_h2o2 ) = gas_aqfrac_cmu(ngh2o2 )
if (p_o3 .le. numgas_aqfrac) &
gas_aqfrac_box(p_o3 ) = gas_aqfrac_cmu(ngo3 )
if (p_ho .le. numgas_aqfrac) &
gas_aqfrac_box(p_ho ) = gas_aqfrac_cmu(ngoh )
if (p_ho2 .le. numgas_aqfrac) &
gas_aqfrac_box(p_ho2 ) = gas_aqfrac_cmu(ngho2 )
if (p_no3 .le. numgas_aqfrac) &
gas_aqfrac_box(p_no3 ) = gas_aqfrac_cmu(ngno3 )
if (p_no .le. numgas_aqfrac) &
gas_aqfrac_box(p_no ) = gas_aqfrac_cmu(ngno )
if (p_no2 .le. numgas_aqfrac) &
gas_aqfrac_box(p_no2 ) = gas_aqfrac_cmu(ngno2 )
if (p_hono .le. numgas_aqfrac) &
gas_aqfrac_box(p_hono ) = gas_aqfrac_cmu(nghno2 )
if (p_pan .le. numgas_aqfrac) &
gas_aqfrac_box(p_pan ) = gas_aqfrac_cmu(ngpan )
if (p_ch3o2 .le. numgas_aqfrac) &
gas_aqfrac_box(p_ch3o2 ) = gas_aqfrac_cmu(ngch3o2 )
if (p_ch3oh .le. numgas_aqfrac) &
gas_aqfrac_box(p_ch3oh ) = gas_aqfrac_cmu(ngch3oh )
if (p_op1 .le. numgas_aqfrac) &
gas_aqfrac_box(p_op1 ) = gas_aqfrac_cmu(ngch3o2h)
rbulk_cwaer(l_so4_aqyy,2) = aerosol(na4) / factaerso4
rbulk_cwaer(l_no3_aqyy,2) = aerosol(nan) / factaerno3
rbulk_cwaer(l_cl_aqyy, 2) = aerosol(nac) / factaercl
rbulk_cwaer(l_nh4_aqyy,2) = aerosol(naa) / factaernh4
rbulk_cwaer(l_na_aqyy, 2) = aerosol(nas) / factaerna
rbulk_cwaer(l_oin_aqyy,2) = aerosol(nar) / factaeroin
rbulk_cwaer(l_bc_aqyy, 2) = aerosol(nae) / factaerbc
rbulk_cwaer(l_oc_aqyy, 2) = aerosol(nao) / factaeroc
return
end subroutine interface_to_aqoperator1
subroutine partition_cldwtr( &
rbox, fr_partit_cw, &
it, jt, kt, kpeg, mpeg, icase )
use module_state_description, only: &
param_first_scalar
use module_data_mosaic_asect, only: &
maxd_asize, maxd_atype, &
cw_phase, nsize_aer, ntype_aer, ncomp_aer, &
massptr_aer, numptr_aer, &
dens_aer, mw_aer, volumlo_sect, volumhi_sect
use module_data_mosaic_other, only: &
aboxtest_units_convert, cairclm, &
ktemp, l2maxd, ptotclm, rcldwtr_sub
implicit none
integer, intent(in) :: it, jt, kt, kpeg, mpeg, icase
real, intent(inout), dimension( 1:l2maxd ) :: rbox
real, intent(inout), dimension( maxd_asize, maxd_atype ) :: &
fr_partit_cw
integer :: iphase, isize, itype
integer :: jdone_mass, jdone_numb, jpos, jpos_mass, jpos_numb
integer :: l, ll, lunxx
integer :: p1st
real, parameter :: partit_wght_mass = 0.5
real :: dum, duma, dumb, dumc, dummass, dumnumb, dumvolu
real :: tmass, tnumb, umass, unumb, wmass, wnumb
real, dimension( maxd_asize, maxd_atype ) :: fmass, fnumb, xmass, xnumb
p1st = PARAM_FIRST_SCALAR
iphase = cw_phase
tmass = 0.0
tnumb = 0.0
umass = 0.0
unumb = 0.0
do itype = 1, ntype_aer
do isize = 1, nsize_aer(itype)
dummass = 0.0
dumvolu = 0.0
do ll = 1, ncomp_aer(itype)
l = massptr_aer(ll,isize,itype,iphase)
if (l .ge. p1st) then
dum = max( 0.0, rbox(l) )*mw_aer(ll,itype)
dummass = dummass + dum
dumvolu = dumvolu + dum/dens_aer(ll,itype)
end if
end do
l = numptr_aer(isize,itype,iphase)
dumnumb = max( 0.0, rbox(l) )
if (dumnumb .gt. dumvolu/volumlo_sect(isize,itype)) then
dumnumb = dumvolu/volumlo_sect(isize,itype)
rbox(l) = dumnumb
else if (dumnumb .lt. dumvolu/volumhi_sect(isize,itype)) then
dumnumb = dumvolu/volumhi_sect(isize,itype)
rbox(l) = dumnumb
end if
if (dummass .lt. 1.0e-37) dummass = 0.0
xmass(isize,itype) = dummass
if (dumnumb .lt. 1.0e-37) dumnumb = 0.0
xnumb(isize,itype) = dumnumb
tmass = tmass + xmass(isize,itype)
tnumb = tnumb + xnumb(isize,itype)
umass = max( umass, xmass(isize,itype) )
unumb = max( unumb, xnumb(isize,itype) )
end do
end do
jdone_mass = 0
jdone_numb = 0
jpos_mass = 0
jpos_numb = 0
do itype = 1, ntype_aer
do isize = 1, nsize_aer(itype)
fmass(isize,itype) = 0.0
if (tmass .ge. 1.0e-35) then
fmass(isize,itype) = xmass(isize,itype)/tmass
else if (umass .gt. 0.0) then
if ( (jdone_mass .eq. 0) .and. &
(xmass(isize,itype) .eq. umass) ) then
jdone_mass = 1
fmass(isize,itype) = 1.0
end if
end if
if (fmass(isize,itype) .gt. 0) jpos_mass = jpos_mass + 1
fnumb(isize,itype) = 0.0
if (tnumb .ge. 1.0e-35) then
fnumb(isize,itype) = xnumb(isize,itype)/tnumb
else if (unumb .gt. 0.0) then
if ( (jdone_numb .eq. 0) .and. &
(xnumb(isize,itype) .eq. unumb) ) then
jdone_numb = 1
fnumb(isize,itype) = 1.0
end if
end if
if (fnumb(isize,itype) .gt. 0) jpos_numb = jpos_numb + 1
end do
end do
if ((jpos_mass .eq. 1) .or. (jpos_numb .eq. 1)) then
do itype = 1, ntype_aer
do isize = 1, nsize_aer(itype)
if (jpos_mass .eq. 1) then
if (fmass(isize,itype) .gt. 0) fmass(isize,itype) = 1.0
end if
if (jpos_numb .eq. 1) then
if (fnumb(isize,itype) .gt. 0) fnumb(isize,itype) = 1.0
end if
end do
end do
end if
fr_partit_cw(:,:) = 0.0
if ((jpos_mass .eq. 0) .and. (jpos_numb .eq. 0)) then
itype = 1
isize = (nsize_aer(itype)+1)/2
fr_partit_cw(isize,itype) = 1.0
else if (jpos_mass .eq. 0) then
fr_partit_cw(:,:) = fnumb(:,:)
else if (jpos_numb .eq. 0) then
fr_partit_cw(:,:) = fmass(:,:)
else
wmass = max( 0.0, min( 1.0, partit_wght_mass ) )
wnumb = 1.0 - wmass
fr_partit_cw(:,:) = wmass*fmass(:,:) + wnumb*fnumb(:,:)
jpos = 0
do itype = 1, ntype_aer
do isize = 1, nsize_aer(itype)
if (fr_partit_cw(isize,itype) .gt. 0.0) jpos = jpos + 1
end do
end do
if (jpos .eq. 1) then
do itype = 1, ntype_aer
do isize = 1, nsize_aer(itype)
if (fr_partit_cw(isize,itype) .gt. 0.0) &
fr_partit_cw(isize,itype) = 1.0
end do
end do
end if
end if
return
end subroutine partition_cldwtr
subroutine distribute_bulk_changes( &
rbox, rbox_sv1, fr_partit_cw, &
rbulk_cwaer, lptr_yyy_cwaer, &
it, jt, kt, kpeg, mpeg, icase )
use module_state_description, only: &
param_first_scalar
use module_data_mosaic_asect, only: &
maxd_asize, maxd_atype, &
cw_phase, nsize_aer, ntype_aer, &
lptr_so4_aer, lptr_no3_aer, lptr_cl_aer, lptr_co3_aer, &
lptr_msa_aer, lptr_nh4_aer, lptr_na_aer, lptr_ca_aer, &
lptr_oin_aer, lptr_bc_aer, lptr_oc_aer
use module_data_mosaic_other, only: l2maxd, lunout, name
implicit none
integer, intent(in) :: it, jt, kt, kpeg, mpeg, icase
integer, intent(in) :: lptr_yyy_cwaer(maxd_asize,maxd_atype,nyyy)
real, intent(inout), dimension( 1:l2maxd ) :: rbox, rbox_sv1
real, intent(in), dimension( maxd_asize, maxd_atype ) :: &
fr_partit_cw
real, intent(in), dimension( nyyy, 2 ) :: rbulk_cwaer
integer :: iphase, isize, itype
integer :: idone, icount, ncount
integer :: jpos, jpos_sv
integer :: l, lunxx, lunxxaa, lunxxbb, lyyy
integer :: p1st
real :: duma, dumb, dumc
real :: fr, frsum_cur
real :: fr_cur(maxd_asize,maxd_atype)
real :: del_r_current, del_r_remain
real :: del_rbulk_cwaer(nyyy)
p1st = param_first_scalar
do lyyy = 1, nyyy
del_rbulk_cwaer(lyyy) = rbulk_cwaer(lyyy,2) - rbulk_cwaer(lyyy,1)
end do
iphase = cw_phase
jpos = 0
do itype = 1, ntype_aer
do isize = 1, nsize_aer(itype)
if (fr_partit_cw(isize,itype) .gt. 0) jpos = jpos + 1
end do
end do
jpos_sv = jpos
if (jpos_sv .eq. 1) then
do lyyy = 1, nyyy
do itype = 1, ntype_aer
do isize = 1, nsize_aer(itype)
fr = fr_partit_cw(isize,itype)
if (fr .eq. 1.0) then
l = lptr_yyy_cwaer(isize,itype,lyyy)
if (l .ge. p1st) rbox(l) = rbulk_cwaer(lyyy,2)
end if
end do
end do
end do
goto 7900
end if
do 3900 lyyy = 1, nyyy
if (del_rbulk_cwaer(lyyy) .eq. 0.0) then
goto 3900
else if (del_rbulk_cwaer(lyyy) .gt. 0.0) then
do itype = 1, ntype_aer
do isize = 1, nsize_aer(itype)
fr = fr_partit_cw(isize,itype)
if (fr .gt. 0.0) then
l = lptr_yyy_cwaer(isize,itype,lyyy)
if (l .ge. p1st) then
rbox(l) = rbox(l) + fr*del_rbulk_cwaer(lyyy)
end if
end if
end do
end do
goto 3900
end if
del_r_remain = del_rbulk_cwaer(lyyy)
fr_cur(:,:) = fr_partit_cw(:,:)
ncount = max( 1, jpos_sv*2 )
icount = 0
do while (icount .le. ncount)
icount = icount + 1
del_r_current = del_r_remain
jpos = 0
frsum_cur = 0.0
do itype = 1, ntype_aer
do isize = 1, nsize_aer(itype)
fr = fr_cur(isize,itype)
if (fr .gt. 0.0) then
l = lptr_yyy_cwaer(isize,itype,lyyy)
if (l .ge. p1st) then
duma = fr*del_r_current
dumb = rbox(l) + duma
if (dumb .gt. 0.0) then
jpos = jpos + 1
else if (dumb .eq. 0.0) then
fr_cur(isize,itype) = 0.0
else
duma = -rbox(l)
dumb = 0.0
fr_cur(isize,itype) = 0.0
end if
del_r_remain = del_r_remain - duma
rbox(l) = dumb
frsum_cur = frsum_cur + fr_cur(isize,itype)
else
fr_cur(isize,itype) = 0.0
end if
end if
end do
end do
if (jpos .eq. jpos_sv) then
idone = 1
else if (del_r_remain .ge. 0.0) then
idone = 2
else if (abs(del_r_remain) .le. 1.0e-7*abs(del_rbulk_cwaer(lyyy))) then
idone = 3
else if (frsum_cur .le. 0.0) then
idone = 4
else if (jpos .le. 0) then
idone = 5
else
idone = 0
end if
if (idone .gt. 0) then
lunxxaa = 6
if ((lunxxaa .gt. 0) .and. (icount .gt. (1+jpos_sv)/2)) then
write(lunxxaa,9800) &
'distribute_bulk_changes - icount>jpos_sv/2 - i,j,k'
write(lunxxaa,9810) it, jt, kt
write(lunxxaa,9800) 'icase, lyyy, idone, icount, jpos, jpos_sv'
write(lunxxaa,9810) icase, lyyy, idone, icount, jpos, jpos_sv
end if
goto 3900
end if
fr_cur(:,:) = fr_cur(:,:)/frsum_cur
end do
lunxxbb = 6
if (lunxxbb .gt. 0) then
write(lunxxbb,9800)
write(lunxxbb,9800) &
'distribute_bulk_changes - icount>ncount - i,j,k'
write(lunxxbb,9810) it, jt, kt
write(lunxxbb,9800) 'icase, lyyy, icount, ncount, jpos_sv, jpos'
write(lunxxbb,9810) icase, lyyy, icount, ncount, jpos_sv, jpos
write(lunxxbb,9800) 'rbulk_cwaer(1), del_rbulk_cwaer, del_r_remain, frsum_cur, (frsum_cur-1.0)'
write(lunxxbb,9820) rbulk_cwaer(lyyy,1), del_rbulk_cwaer(lyyy), &
del_r_remain, frsum_cur, (frsum_cur-1.0)
end if
9800 format( a )
9801 format( 3a )
9810 format( 7i10 )
9820 format( 7(1pe10.2) )
9840 format( 2i3, 5(1pe14.6) )
3900 continue
7900 continue
return
end subroutine distribute_bulk_changes
subroutine cloudchem_apply_move_sections( &
rbox, rbox_sv1, &
it, jt, kt, kpeg, mpeg, icase )
use module_state_description, only: &
param_first_scalar
use module_data_mosaic_asect, only: &
msectional, &
maxd_asize, maxd_atype, &
cw_phase, nsize_aer, ntype_aer, ncomp_aer, &
massptr_aer, numptr_aer, mw_aer, dens_aer, &
lptr_so4_aer, lptr_no3_aer, lptr_cl_aer, lptr_co3_aer, &
lptr_msa_aer, lptr_nh4_aer, lptr_na_aer, lptr_ca_aer, &
lptr_oin_aer, lptr_bc_aer, lptr_oc_aer, &
drymass_aftgrow, drymass_pregrow, &
drydens_aftgrow, drydens_pregrow
use module_data_mosaic_other, only: l2maxd, name, rsub
use module_mosaic_movesect, only: move_sections
implicit none
integer, intent(in) :: it, jt, kt, kpeg, mpeg, icase
real, intent(inout), dimension( 1:l2maxd ) :: rbox, rbox_sv1
integer :: idum_msect
integer :: iphase, isize, itype
integer :: l, ll, lunxx
integer :: p1st
integer :: lptr_dum(maxd_asize,maxd_atype)
real :: densdefault
real :: dmaft, dmpre, dvaft, dvpre
real :: duma, dumb, dumc
real :: smallmassbb
p1st = param_first_scalar
iphase = cw_phase
densdefault = 2.0
smallmassbb = 1.0e-30
do 1800 itype = 1, ntype_aer
do 1800 isize = 1, nsize_aer(itype)
dmaft = 0.0
dmpre = 0.0
dvaft = 0.0
dvpre = 0.0
do ll = 1, ncomp_aer(itype)
l = massptr_aer(ll,isize,itype,iphase)
if (l .ge. p1st) then
duma = mw_aer(ll,itype)
dmaft = dmaft + duma*rbox(l)
dmpre = dmpre + duma*rbox_sv1(l)
duma = duma/dens_aer(ll,itype)
dvaft = dvaft + duma*rbox(l)
dvpre = dvpre + duma*rbox_sv1(l)
end if
end do
drymass_aftgrow(isize,itype) = dmaft
drymass_pregrow(isize,itype) = dmpre
if (min(dmaft,dvaft) .le. smallmassbb) then
drydens_aftgrow(isize,itype) = densdefault
else
drydens_aftgrow(isize,itype) = dmaft/dvaft
end if
if (min(dmpre,dvpre) .le. smallmassbb) then
drydens_pregrow(isize,itype) = densdefault
else
drydens_pregrow(isize,itype) = dmpre/dvpre
end if
1800 continue
idum_msect = msectional
call move_sections( 2, it, jt, kpeg, mpeg )
msectional = idum_msect
return
end subroutine cloudchem_apply_move_sections
subroutine mosaic_cloudchem_dumpaa( &
id, ktau, ktauc, dtstepc, config_flags, &
p_phy, t_phy, rho_phy, alt, &
cldfra, ph_no2, &
moist, chem, &
gas_aqfrac, numgas_aqfrac, &
ids,ide, jds,jde, kds,kde, &
ims,ime, jms,jme, kms,kme, &
its,ite, jts,jte, kts,kte, &
qcldwtr_cutoff, &
itcur, jtcur, ktcur )
use module_state_description, only: &
num_moist, num_chem, p_qc
use module_configure, only: grid_config_rec_type
use module_data_mosaic_asect
use module_data_mosaic_other, only: k_pegbegin, name
use module_mosaic_driver, only: mapaer_tofrom_host
implicit none
integer, intent(in) :: &
id, ktau, ktauc, &
numgas_aqfrac, &
ids, ide, jds, jde, kds, kde, &
ims, ime, jms, jme, kms, kme, &
its, ite, jts, jte, kts, kte, &
itcur, jtcur, ktcur
type(grid_config_rec_type), intent(in) :: config_flags
real, intent(in) :: &
dtstepc, qcldwtr_cutoff
real, intent(in), &
dimension( ims:ime, kms:kme, jms:jme ) :: &
p_phy, t_phy, rho_phy, alt, cldfra, ph_no2
real, intent(in), &
dimension( ims:ime, kms:kme, jms:jme, 1:num_moist ) :: &
moist
real, intent(inout), &
dimension( ims:ime, kms:kme, jms:jme, 1:num_chem ) :: &
chem
real, intent(inout), &
dimension( ims:ime, kms:kme, jms:jme, numgas_aqfrac ) :: &
gas_aqfrac
integer :: it, jt, kt, l, ll, n
integer :: isize, itype
real :: dumai, dumcw
real :: qcldwtr
it = itcur
jt = jtcur
kt = ktcur
write(*,*)
write(*,*)
write(*,*)
write(*,9100)
write(*,9102) ktau, it, jt, kt
9100 format( 7('----------') )
9102 format( &
'mosaic_cloudchem_dumpaa - ktau, i, j, k =', 4i5 )
itype = 1
do 2900 isize = 1, nsize_aer(itype)
write(*,9110) isize
9110 format( / 'isize =', i3 / &
' k cldwtr mass-ai numb-ai mass-cw numb-cw' )
do 2800 kt = kte, kts, -1
dumai = 0.0
dumcw = 0.0
do ll = 1, ncomp_aer(itype)
l = massptr_aer(ll,isize,itype,1)
dumai = dumai + chem(it,kt,jt,l)
l = massptr_aer(ll,isize,itype,2)
dumcw = dumcw + chem(it,kt,jt,l)
end do
write(*,9120) kt, &
moist(it,kt,jt,p_qc), &
dumai, chem(it,kt,jt,numptr_aer(isize,itype,1)), &
dumcw, chem(it,kt,jt,numptr_aer(isize,itype,2))
9120 format( i3, 1p, e10.2, 2(3x, 2e10.2) )
2800 continue
2900 continue
write(*,*)
write(*,9100)
write(*,*)
kt = ktcur
if ((ktau .eq. 30) .and. (it .eq. 23) .and. &
(jt .eq. 1) .and. (kt .eq. 11)) then
qcldwtr = moist(it,kt,jt,p_qc)
write(*,*)
write(*,*)
write(*,9102) ktau, it, jt, kt
write(*,*)
write( *, '(3(1pe10.2,3x,a))' ) &
(chem(it,kt,jt,l), name(l)(1:10), l=1,num_chem)
write(*,*)
write( *, '(3(1pe10.2,3x,a))' ) &
p_phy(it,kt,jt), 'p_phy ', &
t_phy(it,kt,jt), 't_phy ', &
rho_phy(it,kt,jt), 'rho_phy ', &
alt(it,kt,jt), 'alt ', &
qcldwtr, 'qcldwtr ', &
qcldwtr_cutoff, 'qcldwtrcut'
write(*,*)
write(*,9100)
write(*,*)
end if
return
end subroutine mosaic_cloudchem_dumpaa
end module module_mosaic_cloudchem
|
theory v1_isar_new_K
imports Main "../QML"
begin
text \<open>
@{text "Compossibility\<^sub>6'"}: Necessarily, given a table, @{term x}, made from a hunk, @{term y},
for any table, @{term x'} which might be made from a hunk, @{term z}, distinct from @{term y}, it
is also possible that both @{term x} is a table made from @{term y} and @{term x'} is a table made
from @{term z}.
@{text "Origin Uniqueness\<^sub>6'"}:
Necessarily, if @{term x} is a table made from @{term y} and @{term x'} is a table made
from @{term z} and @{text "y\<noteq>z"}, then @{text "x\<noteq>x'"}.
\<close>
text \<open>
@{text "Origin Essentialism\<^sub>6'"}: Necessarily, given a table, @{term x}, made from a hunk, @{term y}, any table, @{term x'},
which might be made from a hunk, @{term z}, distinct from @{term y}, is distinct from @{term x}.
\<close>
subsection \<open>Our Formulation\<close>
text \<open>
We paraphrased the arguments mentioned above to a more logically readable form, without distorting
their essence.
\<close>
text \<open>
@{text "Compossibility\<^sub>6"}: If any table, say @{term x}, is made from any hunk of matter, say @{term y},
then necessarily if any table, say @{term x'}, is made from any hunk of matter, say @{term z},
such that @{term y} and @{term z} are distinct, then it is possible that both tables @{term x} (made from @{term y})
and @{term x'} (made from @{term z}) exist together.
@{text "Origin Uniqueness\<^sub>6"}:
If any table, say @{term x}, is made from any hunk of matter, say @{term y}, and any table,
say @{term x'}, is made from any hunk of matter, say @{term z}, such that @{term y} and @{term z} are distinct,
then @{term x} and @{term x'} are distinct.
\<close>
text \<open>
@{text "Origin Essentialism\<^sub>6"}: If any table, say @{term x}, is made from any hunk of matter, say @{term y},
then necessarily if any table, say @{term x'}, is made from any hunk of matter, say @{term z},
such that @{term y} and @{term z} are distinct, then @{term x} and @{term x'} are distinct
\<close>
text \<open> We will using this version henceforth.\<close>
subsection \<open>Overview of the Proof\<close>
text \<open>
\begin{enumerate}
\item Setting-up the worlds:
We begin in world @{term w} by assuming the antecedent of the thesis @{text "Origin Essentialism\<^sub>6"}
@{text "table_x1_from_y1: T x y w"}. On expanding the necessity operator in its consequent,
we obtain @{text "antecedent: (((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> T x' z) v"} i.e. @{term y}
and @{term z} are distinct and table @{term x'} can be made from @{term z} in @{term v} are both true.
We fix all the variables of universal quantifiers to maintain uniformity of meaning.
We assume the negation of the consequent in a world @{term v} i.e. tables @{term x} and @{term x'} are
identical, @{text "identity: (x\<^bold>=\<^sup>Lx') v"}.
\item Deriving co-existence of tables @{term x} and @{term x'} in @{term u}:
We use @{text "Compossibility\<^sub>6"} to show that tables @{term x} and @{term x'} co-exist in a world.
We derive the @{text "Compossibility\<^sub>6 Consequent"} in @{term u} using @{term antecedent}. Using this
we obtain @{text "Origin Uniqueness\<^sub>6 Antecedent"}
@{text "origin_uniqueness1_ante: (((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x y) \<^bold>\<and> (T x' z))) u"}.
\item Falsifying @{term identity}:
We obtain @{text "Origin Uniqueness\<^sub>6 Consequent"} @{text "((x\<^bold>\<noteq>\<^sup>Lx')) u"} using previously derived
@{term origin_uniqueness1_ante}. We use this obtained result to falsify @{term identity}.
\end{enumerate}
\<close>
(* (T x y) \<equiv> x made from y *)
consts makeTable :: "\<mu> \<Rightarrow> \<mu> \<Rightarrow> \<sigma>" ("T")
(* lemma necessity_of_distinctness: "\<lfloor>(\<^bold>\<forall>x. \<^bold>\<forall>y.(\<^bold>\<not>(x\<^bold>=\<^sup>Ly) \<^bold>\<rightarrow> \<^bold>\<box>((x\<^bold>\<noteq>\<^sup>Ly)))\<rfloor>" by auto *)
lemma
assumes compossibilty6: "\<lfloor>\<^bold>\<forall>x. \<^bold>\<forall>y. \<^bold>\<forall>x'. \<^bold>\<forall>z. T x y \<^bold>\<rightarrow> \<^bold>\<box>((y\<^bold>\<noteq>\<^sup>Lz \<^bold>\<and> T x' z) \<^bold>\<rightarrow> \<^bold>\<diamond>(T x y \<^bold>\<and> T x' z))\<rfloor>"
assumes origin_uniqueness6: "\<lfloor>\<^bold>\<forall>x. \<^bold>\<forall>y. \<^bold>\<forall>x'. \<^bold>\<forall>z. (y\<^bold>\<noteq>\<^sup>Lz \<^bold>\<and> T x y \<^bold>\<and> T x' z) \<^bold>\<rightarrow> x\<^bold>\<noteq>\<^sup>Lx'\<rfloor>"
shows origin_essentialism6: "\<lfloor>\<^bold>\<forall>x. \<^bold>\<forall>y. \<^bold>\<forall>x'. \<^bold>\<forall>z. T x y \<^bold>\<rightarrow> \<^bold>\<box>((y\<^bold>\<noteq>\<^sup>Lz \<^bold>\<and> T x' z) \<^bold>\<rightarrow> x\<^bold>\<noteq>\<^sup>Lx')\<rfloor>"
text \<open>\<^item> Setting-up the worlds\<close>
(*<*)
proof(rule allI)
fix w
show "(\<^bold>\<forall>x. \<^bold>\<forall>y. \<^bold>\<forall>x'. \<^bold>\<forall>z. T x y \<^bold>\<rightarrow> \<^bold>\<box>(((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x' z)) \<^bold>\<rightarrow> (x\<^bold>\<noteq>\<^sup>Lx'))) w"
proof(rule allI)
fix x
show "(\<^bold>\<forall>y. \<^bold>\<forall>x'. \<^bold>\<forall>z. T x y \<^bold>\<rightarrow> \<^bold>\<box>(((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x' z)) \<^bold>\<rightarrow> (x\<^bold>\<noteq>\<^sup>Lx'))) w"
proof(rule allI)
fix y
show "(\<^bold>\<forall>x'. \<^bold>\<forall>z. T x y \<^bold>\<rightarrow> \<^bold>\<box>(((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x' z)) \<^bold>\<rightarrow> (x\<^bold>\<noteq>\<^sup>Lx'))) w"
proof(rule allI)
fix x'
show "(\<^bold>\<forall>z. T x y \<^bold>\<rightarrow> \<^bold>\<box>(((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x' z)) \<^bold>\<rightarrow> (x\<^bold>\<noteq>\<^sup>Lx'))) w"
proof(rule allI)
fix z
show "(T x y \<^bold>\<rightarrow> \<^bold>\<box>(((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x' z)) \<^bold>\<rightarrow> (x\<^bold>\<noteq>\<^sup>Lx'))) w"
(*>*)
proof(rule impI)
assume table_x1_from_y1: "T x y w"
show "(\<^bold>\<box>(((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x' z)) \<^bold>\<rightarrow> (x\<^bold>\<noteq>\<^sup>Lx'))) w"
proof(rule allI)
fix v
show "(w r v) \<longrightarrow> (((((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x' z)) \<^bold>\<rightarrow> (x\<^bold>\<noteq>\<^sup>Lx'))) v)"
proof (rule impI)
assume "w r v"
show "(((((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x' z)) \<^bold>\<rightarrow> (x\<^bold>\<noteq>\<^sup>Lx'))) v)"
proof(rule impI)
assume antecedent: "(y\<^bold>\<noteq>\<^sup>Lz \<^bold>\<and> T x' z) v"
show "((x\<^bold>\<noteq>\<^sup>Lx')) v"
proof(rule notI)
assume identity: "(x\<^bold>=\<^sup>Lx') v"
from antecedent have table_x2_from_y2: "(T x' z) v" by (rule conjE)
from antecedent have non_overlapping: "(y\<^bold>\<noteq>\<^sup>Lz) v" by (rule conjE)
text\<open>\<^item> Deriving @{text "T x' z"} in @{term u} using @{term compossibilty6}\<close>
from compossibilty6 have "(\<^bold>\<forall>x. \<^bold>\<forall>y. \<^bold>\<forall>x'. \<^bold>\<forall>z. T x y \<^bold>\<rightarrow> \<^bold>\<box>((y\<^bold>\<noteq>\<^sup>Lz \<^bold>\<and> T x' z) \<^bold>\<rightarrow> \<^bold>\<diamond>(T x y \<^bold>\<and> T x' z))) w"..
(*<*)
then have "(\<^bold>\<forall>y. \<^bold>\<forall>x'. \<^bold>\<forall>z. T x y \<^bold>\<rightarrow> \<^bold>\<box>((y\<^bold>\<noteq>\<^sup>Lz \<^bold>\<and> T x' z) \<^bold>\<rightarrow> \<^bold>\<diamond>(T x y \<^bold>\<and> T x' z))) w" by (rule allE)
then have "(\<^bold>\<forall>x'. \<^bold>\<forall>z. T x y \<^bold>\<rightarrow> \<^bold>\<box>((y\<^bold>\<noteq>\<^sup>Lz \<^bold>\<and> T x' z) \<^bold>\<rightarrow> \<^bold>\<diamond>(T x y \<^bold>\<and> T x' z))) w" by (rule allE)
then have "(\<^bold>\<forall>z. T x y \<^bold>\<rightarrow> \<^bold>\<box>((y\<^bold>\<noteq>\<^sup>Lz \<^bold>\<and> T x' z) \<^bold>\<rightarrow> \<^bold>\<diamond>(T x y \<^bold>\<and> T x' z))) w" by (rule allE)
then have "(T x y \<^bold>\<rightarrow> \<^bold>\<box>((y\<^bold>\<noteq>\<^sup>Lz \<^bold>\<and> T x' z) \<^bold>\<rightarrow> \<^bold>\<diamond>(T x y \<^bold>\<and> T x' z))) w" by (rule allE)
(*>*)
then have "(\<^bold>\<box>((y\<^bold>\<noteq>\<^sup>Lz \<^bold>\<and> T x' z) \<^bold>\<rightarrow> \<^bold>\<diamond>(T x y \<^bold>\<and> T x' z))) w"
using table_x1_from_y1 by (rule mp)
then have "((y\<^bold>\<noteq>\<^sup>Lz \<^bold>\<and> T x' z) \<^bold>\<rightarrow> \<^bold>\<diamond>(T x y \<^bold>\<and> T x' z)) v" using `w r v` by auto
then have "\<^bold>\<diamond>(T x y \<^bold>\<and> T x' z) v" using antecedent by (rule mp)
then obtain u where u: "v r u \<and> ((T x y \<^bold>\<and> T x' z) u)" by (rule exE)
text\<open>\<^item> Framing the @{term origin_uniqueness6_ante}\<close>
then have origin_uniqueness6_ante: "(((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x y) \<^bold>\<and> (T x' z))) u"
using non_overlapping by auto
from u have "v r u" by (rule conjE)
text\<open>\<^item> Falsifying the @{term identity} using @{term origin_uniqueness6}\<close>
from origin_uniqueness6 have "(\<^bold>\<forall>x. \<^bold>\<forall>y. \<^bold>\<forall>x'. \<^bold>\<forall>z. (((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x y) \<^bold>\<and> (T x' z)) \<^bold>\<rightarrow> ((x\<^bold>\<noteq>\<^sup>Lx')))) u"..
(*<*)
then have "(\<^bold>\<forall>y. \<^bold>\<forall>x'. \<^bold>\<forall>z. (((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x y) \<^bold>\<and> (T x' z)) \<^bold>\<rightarrow> ((x\<^bold>\<noteq>\<^sup>Lx')))) u" by (rule allE)
then have "(\<^bold>\<forall>x'. \<^bold>\<forall>z. (((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x y) \<^bold>\<and> (T x' z)) \<^bold>\<rightarrow> ((x\<^bold>\<noteq>\<^sup>Lx')))) u" by (rule allE)
then have "(\<^bold>\<forall>z. (((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x y) \<^bold>\<and> (T x' z)) \<^bold>\<rightarrow> ((x\<^bold>\<noteq>\<^sup>Lx')))) u" by (rule allE)
then have "((((y\<^bold>\<noteq>\<^sup>Lz) \<^bold>\<and> (T x y) \<^bold>\<and> (T x' z)) \<^bold>\<rightarrow> ((x\<^bold>\<noteq>\<^sup>Lx')))) u" by (rule allE)
(*>*)
then have "((x\<^bold>\<noteq>\<^sup>Lx')) u" using origin_uniqueness6_ante by (rule mp)
then show "False" using identity and `v r u` by auto
(*<*)
qed
qed
qed
qed
qed
qed
qed
qed
qed
qed
(*>*)
end
|
Formal statement is: lemma locally_diff_closed: "\<lbrakk>locally P S; closedin (top_of_set S) t\<rbrakk> \<Longrightarrow> locally P (S - t)" Informal statement is: If $S$ is locally $P$ and $t$ is closed in $S$, then $S - t$ is locally $P$.
|
section\<open>Main ZF Theory: Everything Except AC\<close>
theory ZF imports List_ZF IntDiv_ZF CardinalArith begin
(*The theory of "iterates" logically belongs to Nat, but can't go there because
primrec isn't available into after Datatype.*)
subsection\<open>Iteration of the function @{term F}\<close>
consts iterates :: "[i=>i,i,i] => i" ("(_^_ '(_'))" [60,1000,1000] 60)
primrec
"F^0 (x) = x"
"F^(succ(n)) (x) = F(F^n (x))"
definition
iterates_omega :: "[i=>i,i] => i" ("(_^\<omega> '(_'))" [60,1000] 60) where
"F^\<omega> (x) == \<Union>n\<in>nat. F^n (x)"
lemma iterates_triv:
"[| n\<in>nat; F(x) = x |] ==> F^n (x) = x"
by (induct n rule: nat_induct, simp_all)
lemma iterates_type [TC]:
"[| n \<in> nat; a \<in> A; !!x. x \<in> A ==> F(x) \<in> A |]
==> F^n (a) \<in> A"
by (induct n rule: nat_induct, simp_all)
lemma iterates_omega_triv:
"F(x) = x ==> F^\<omega> (x) = x"
by (simp add: iterates_omega_def iterates_triv)
lemma Ord_iterates [simp]:
"[| n\<in>nat; !!i. Ord(i) ==> Ord(F(i)); Ord(x) |]
==> Ord(F^n (x))"
by (induct n rule: nat_induct, simp_all)
lemma iterates_commute: "n \<in> nat ==> F(F^n (x)) = F^n (F(x))"
by (induct_tac n, simp_all)
subsection\<open>Transfinite Recursion\<close>
text\<open>Transfinite recursion for definitions based on the
three cases of ordinals\<close>
definition
transrec3 :: "[i, i, [i,i]=>i, [i,i]=>i] =>i" where
"transrec3(k, a, b, c) ==
transrec(k, \<lambda>x r.
if x=0 then a
else if Limit(x) then c(x, \<lambda>y\<in>x. r`y)
else b(Arith.pred(x), r ` Arith.pred(x)))"
lemma transrec3_0 [simp]: "transrec3(0,a,b,c) = a"
by (rule transrec3_def [THEN def_transrec, THEN trans], simp)
lemma transrec3_succ [simp]:
"transrec3(succ(i),a,b,c) = b(i, transrec3(i,a,b,c))"
by (rule transrec3_def [THEN def_transrec, THEN trans], simp)
lemma transrec3_Limit:
"Limit(i) ==>
transrec3(i,a,b,c) = c(i, \<lambda>j\<in>i. transrec3(j,a,b,c))"
by (rule transrec3_def [THEN def_transrec, THEN trans], force)
declaration \<open>fn _ =>
Simplifier.map_ss (Simplifier.set_mksimps (fn ctxt =>
map mk_eq o Ord_atomize o Variable.gen_all ctxt))
\<close>
end
|
From mathcomp Require Import ssreflect ssrbool ssrfun eqtype ssrnat seq.
From mathcomp Require Import div choice fintype tuple finfun bigop.
From mathcomp Require Import prime binomial ssralg finset.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
(* Erik Martin-Dorel, 2016 *)
(** * Tactic for rewriting under bigops *)
(** ** When the bigop appears in the goal *)
(** [under_big] allows one to apply a given tactic under the bigop
that correspond to the specified arguments. *)
Ltac under_big b i Hi tac :=
let b' := eval hnf in b in
match b' with
| @BigOp.bigop ?R ?I ?idx ?r ?f =>
match f with
| fun x => @BigBody ?R ?I x ?op (@?P x) (@?F1 x) =>
(* erewrite (@eq_bigr R idx op I r P F1 _); (*not robust enough*) *)
pattern b;
match goal with
| [|- ?G b] =>
refine (@eq_rect_r _ _ G _ b
(@eq_bigr R idx op I r P F1 _ _ : _ = @BigOp.bigop _ _ _ _ (fun i => _)));
[|move=> i Hi; tac;
try reflexivity (* instead of "; first reflexivity" *) ];
cbv beta
end
end
end.
(** The following tactic can be used to add support for patterns to
tactic notation:
It will search for the first subterm of the goal matching [pat], and
then call [tac] with that subterm.
Inpired by Ralf Jung's post on 2016-02-25 to the ssreflect mailing list.
*)
Ltac find_pat pat tac :=
match goal with |- context [?x] =>
unify pat x with typeclass_instances;
tryif tac x then idtac else fail
end.
(** [under] allows one to apply a given tactic under some bigop:
if [pat] is a local variable (let-in) that appears in the goal,
only the occurrences of [pat] will be rewritten;
otherwise the occurrences of the first bigop that matches [pat]
will be rewritten. *)
Tactic Notation "under" open_constr(pat) simple_intropattern(i) simple_intropattern(Hi) tactic(tac) :=
tryif match goal with [|- context [pat]] => is_var pat end
then under_big pat i Hi tac
else find_pat pat ltac:(fun b => under_big b i Hi tac).
(** A shortcut when we want to rewrite the first occurrence of [bigop _ _ _] *)
Notation big := (bigop _ _ _) (only parsing).
(** [swap under big ? _ tac] : shortcut for [(under big ? _ tac); last first] *)
Tactic Notation "swap" tactic(tac) :=
tac; last first.
(** ** When the bigop appears in some hypothesis *)
(** [under_big_in] allows one to apply a given tactic under the bigop
that correspond to the specified arguments, in some hypothesis *)
Ltac under_big_in H b i Hi tac :=
let b' := eval hnf in b in
match b' with
| @BigOp.bigop ?R ?I ?idx ?r ?f =>
match f with
| fun x => @BigBody ?R ?I x ?op (@?P x) (@?F1 x) =>
(* erewrite (@eq_bigr R idx op I r P F1 _); (*not robust enough*) *)
pattern b in H;
match type of H with
| ?G b =>
let e := fresh in
let new := fresh in
refine (let e := G _ in _);
shelve_unifiable;
suff new : e;
[ try clear H ; try rename new into H
| refine (@eq_rect _ _ G H _
(@eq_bigr R idx op I r P F1 _ _ : _ = @BigOp.bigop _ _ _ _ (fun i => _)));
move=> i Hi; tac;
try reflexivity (* instead of "; first reflexivity" *)
]; try unfold e in * |- *; try clear e ; cbv beta
end
end
end.
Ltac find_pat_in H pat tac :=
match type of H with context [?x] =>
unify pat x with typeclass_instances;
tryif tac x then idtac else fail
end.
(** [under...in] allows one to apply a given tactic under some bigop:
if [pat] is a local variable (let-in) that appears in H,
only the occurrences of [pat] will be rewritten;
otherwise the occurrences of the first bigop that matches [pat]
will be rewritten. *)
Tactic Notation "under" open_constr(pat) "in" hyp(H) simple_intropattern(i) simple_intropattern(Hi) tactic(tac) :=
tryif match type of H with context [pat] => is_var pat end
then under_big_in H pat i Hi tac
else find_pat_in H pat ltac:(fun b => under_big_in H b i Hi tac).
(** * Similar material, for the bigop predicates *)
(** ** When the bigop appears in the goal *)
(** [underp_big] allows one to apply a given tactic for rewriting the
predicate of the bigop corresponding to the specified arguments. *)
Ltac underp_big b i tac :=
let b' := eval hnf in b in
match b' with
| @BigOp.bigop ?R ?I ?idx ?r ?f =>
match f with
| fun x => @BigBody ?R ?I x ?op (@?P1 x) (@?F x) =>
pattern b;
match goal with
| [|- ?G b] =>
refine (@eq_rect_r _ _ G _ b
(@eq_bigl R idx op I r P1 _ F _ : _ = @BigOp.bigop _ _ _ _ (fun i => _)));
[|move=> i; tac;
try reflexivity (* instead of "; first reflexivity" *) ];
cbv beta
end
end
end.
(** [underp] allows one to apply a given tactic for rewriting
some bigop predicate:
if [pat] is a local variable (let-in) that appears in the goal,
only the occurrences of [pat] will be rewritten;
otherwise the occurrences of the first bigop that matches [pat]
will be rewritten. *)
Tactic Notation "underp" open_constr(pat) simple_intropattern(i) tactic(tac) :=
tryif match goal with [|- context [pat]] => is_var pat end
then underp_big pat i tac
else find_pat pat ltac:(fun b => underp_big b i tac).
(** ** When the bigop appears in some hypothesis *)
(** [underp_big_in] allows one to apply a given tactic for rewriting the
predicate of the bigop corresponding to the specified arguments,
in some hypothesis *)
Ltac underp_big_in H b i tac :=
let b' := eval hnf in b in
match b' with
| @BigOp.bigop ?R ?I ?idx ?r ?f =>
match f with
| fun x => @BigBody ?R ?I x ?op (@?P1 x) (@?F x) =>
pattern b in H;
match type of H with
| ?G b =>
let e := fresh in
let new := fresh in
refine (let e := G _ in _);
shelve_unifiable;
suff new : e;
[ try clear H ; try rename new into H
| refine (@eq_rect _ _ G H _
(@eq_bigl R idx op I r P1 _ F _ : _ = @BigOp.bigop _ _ _ _ (fun i => _)));
move=> i; tac;
try reflexivity (* instead of "; first reflexivity" *)
]; try unfold e in * |- *; try clear e ; cbv beta
end
end
end.
(** [underp...in] allows one to apply a given tactic for rewriting
some bigop predicate:
if [pat] is a local variable (let-in) that appears in H,
only the occurrences of [pat] will be rewritten;
otherwise the occurrences of the first bigop that matches [pat]
will be rewritten. *)
Tactic Notation "underp" open_constr(pat) "in" hyp(H) simple_intropattern(i) tactic(tac) :=
tryif match type of H with context [pat] => is_var pat end
then underp_big_in H pat i tac
else find_pat_in H pat ltac:(fun b => underp_big_in H b i tac).
(*
(** * Tests and examples *)
Section Tests.
(* A test lemma covering several testcases. *)
Let test1 (n : nat) (R : ringType) (f1 f2 g : nat -> R) :
(\big[+%R/0%R]_(i < n) ((f1 i + f2 i) * g i) +
\big[+%R/0%R]_(i < n) ((f1 i + f2 i) * g i) =
\big[+%R/0%R]_(i < n) ((f1 i + f2 i) * g i) +
\big[+%R/0%R]_(i < n) (f1 i * g i) + \big[+%R/0%R]_(i < n) (f2 i * g i))%R.
Proof.
set b1 := {2}(bigop _ _ _).
Fail under b1 x _ rewrite GRing.mulrDr.
under b1 x _ rewrite GRing.mulrDl. (* only b1 is rewritten *)
Undo 1. rewrite /b1.
under b1 x _ rewrite GRing.mulrDl. (* 3 occurrences are rewritten *)
rewrite big_split /=.
by rewrite GRing.addrA.
Qed.
(* A test with a side-condition. *)
Let test2 (n : nat) (R : fieldType) (f : nat -> R) :
(forall k : 'I_n, f k != 0%R) ->
(\big[+%R/0%R]_(k < n) (f k / f k) = n%:R)%R.
Proof.
move=> Hneq0.
swap under big ? _ rewrite GRing.divff. (* the bigop variable becomes "i" *)
done.
rewrite big_const cardT /= size_enum_ord /GRing.natmul.
case: {Hneq0} n =>// n.
by rewrite iteropS iterSr GRing.addr0.
Qed.
(* Another test lemma when the bigop appears in some hypothesis *)
Let test3 (n : nat) (R : fieldType) (f : nat -> R) :
(forall k : 'I_n, f k != 0%R) ->
(\big[+%R/0%R]_(k < n) (f k / f k) +
\big[+%R/0%R]_(k < n) (f k / f k) = n%:R + n%:R)%R -> True.
Proof.
move=> Hneq0 H.
set b1 := {2}big in H.
under b1 in H ? _ rewrite GRing.divff. (* only b1 is rewritten *)
done.
Undo 2.
move: H.
under b1 ? _ rewrite GRing.divff.
done.
done.
Qed.
(* A test lemma for [underp] *)
Let testp1 (A : finType) (n : nat) (F : A -> nat) :
\big[addn/O]_(0 <= k < n)
\big[addn/O]_(J in {set A} | #|J :&: [set: A]| == k)
\big[addn/O]_(j in J) F j >= 0.
Proof.
under big k _ underp big J rewrite setIT. (* the bigop variables are kept *)
done.
Qed.
(* A test lemma for [underp...in] *)
Let testp2 (A : finType) (n : nat) (F : A -> nat) :
\big[addn/O]_(J in {set A} | #|J :&: [set: A]| == 1)
\big[addn/O]_(j in J) F j = \big[addn/O]_(j in A) F j -> True.
Proof.
move=> H.
underp big in H J rewrite setIT. (* the bigop variable "J" is kept *)
done.
Qed.
End Tests.
*)
|
proposition Cauchy_theorem_homotopic_loops: assumes hom: "homotopic_loops S g h" and "open S" and f: "f holomorphic_on S" and vpg: "valid_path g" and vph: "valid_path h" shows "contour_integral g f = contour_integral h f"
|
from mountaintools import client as mt
import mlprocessors as mlpr
from ..pycommon.nwb_to_dict import nwb_to_dict
import numpy as np
import imageio
import base64
import tempfile
class ExtractTwoPhotonSeriesMp4(mlpr.Processor):
NAME = 'H5ToDict'
VERSION = '0.1.1'
# Inputs
nwb_in = mlpr.Input()
# Outputs
mp4_out = mlpr.Output()
def run(self):
nwb_obj = nwb_to_dict(self.nwb_in, use_cache=True)
npy_path = nwb_obj['acquisition']['TwoPhotonSeries']['_datasets']['data']['_data']
npy_path2 = mt.realizeFile(npy_path)
if not npy_path2:
nwb_obj = nwb_to_dict(self.nwb_in, use_cache=False)
npy_path = nwb_obj['acquisition']['TwoPhotonSeries']['_datasets']['data']['_data']
npy_path2 = mt.realizeFile(npy_path)
if not npy_path2:
self._set_error('Unable to realize npy file: {}'.format(npy_path))
return
X = np.load(npy_path2)
# Note that there is a bug in imageio.mimwrite that prevents us to
# write to a memory buffer.
# See: https://github.com/imageio/imageio/issues/157
imageio.mimwrite(self.mp4_out, X, format='mp4', fps=10)
class TwoPhotonSeries:
def __init__(self):
super().__init__()
def javascript_state_changed(self, prev_state, state):
self._set_status('running', 'Running TwoPhotonSeries')
nwb_path = state.get('nwb_path', None)
download_from = state.get('download_from', [])
if not nwb_path:
self._set_error('Missing nwb_path')
return
mt.configDownloadFrom(download_from)
nwb_path2 = mt.realizeFile(nwb_path)
if not nwb_path2:
self._set_error('Unable to realize nwb file: {}'.format(nwb_path))
return
self._set_status('running', 'Extracting .mp4 data')
outputs = ExtractTwoPhotonSeriesMp4.execute(nwb_in=nwb_path2, mp4_out={'ext': '.mp4'}).outputs
self._set_status('running', 'Reading .mp4 data')
mp4_fname = mt.realizeFile(outputs['mp4_out'])
with open(mp4_fname, 'rb') as f:
video_data = f.read()
self._set_status('running', 'Encoding .mp4 data')
video_data_b64 = base64.b64encode(video_data).decode()
video_url = 'data:video/mp4;base64,{}'.format(video_data_b64)
self._set_status('running', 'Setting .mp4 data to python state')
self.set_python_state(dict(
video_url=video_url,
status='finished',
status_message=''
))
def _set_error(self, error_message):
self._set_status('error', error_message)
def _set_status(self, status, status_message=''):
self.set_python_state(dict(status=status, status_message=status_message))
|
[GOAL]
E : Type u_1
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : TopologicalSpace E
inst✝¹ : ContinuousAdd E
inst✝ : ContinuousSMul ℝ E
s : Set E
x : E
h : StarConvex ℝ x s
hne : Set.Nonempty s
⊢ ContractibleSpace ↑s
[PROOFSTEP]
refine'
(contractible_iff_id_nullhomotopic s).2
⟨⟨x, h.mem hne⟩, ⟨⟨⟨fun p => ⟨p.1.1 • x + (1 - p.1.1) • (p.2 : E), _⟩, _⟩, fun x => _, fun x => _⟩⟩⟩
[GOAL]
case refine'_1
E : Type u_1
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : TopologicalSpace E
inst✝¹ : ContinuousAdd E
inst✝ : ContinuousSMul ℝ E
s : Set E
x : E
h : StarConvex ℝ x s
hne : Set.Nonempty s
p : ↑unitInterval × ↑s
⊢ ↑p.fst • x + (1 - ↑p.fst) • ↑p.snd ∈ s
[PROOFSTEP]
exact h p.2.2 p.1.2.1 (sub_nonneg.2 p.1.2.2) (add_sub_cancel'_right _ _)
[GOAL]
case refine'_2
E : Type u_1
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : TopologicalSpace E
inst✝¹ : ContinuousAdd E
inst✝ : ContinuousSMul ℝ E
s : Set E
x : E
h : StarConvex ℝ x s
hne : Set.Nonempty s
⊢ Continuous fun p =>
{ val := ↑p.fst • x + (1 - ↑p.fst) • ↑p.snd, property := (_ : ↑p.fst • x + (1 - ↑p.fst) • ↑p.snd ∈ s) }
[PROOFSTEP]
exact
((continuous_subtype_val.fst'.smul continuous_const).add
((continuous_const.sub continuous_subtype_val.fst').smul continuous_subtype_val.snd')).subtype_mk
_
[GOAL]
case refine'_3
E : Type u_1
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : TopologicalSpace E
inst✝¹ : ContinuousAdd E
inst✝ : ContinuousSMul ℝ E
s : Set E
x✝ : E
h : StarConvex ℝ x✝ s
hne : Set.Nonempty s
x : ↑s
⊢ ContinuousMap.toFun
(ContinuousMap.mk fun p =>
{ val := ↑p.fst • x✝ + (1 - ↑p.fst) • ↑p.snd, property := (_ : ↑p.fst • x✝ + (1 - ↑p.fst) • ↑p.snd ∈ s) })
(0, x) =
↑(ContinuousMap.id ↑s) x
[PROOFSTEP]
ext1
[GOAL]
case refine'_3.a
E : Type u_1
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : TopologicalSpace E
inst✝¹ : ContinuousAdd E
inst✝ : ContinuousSMul ℝ E
s : Set E
x✝ : E
h : StarConvex ℝ x✝ s
hne : Set.Nonempty s
x : ↑s
⊢ ↑(ContinuousMap.toFun
(ContinuousMap.mk fun p =>
{ val := ↑p.fst • x✝ + (1 - ↑p.fst) • ↑p.snd, property := (_ : ↑p.fst • x✝ + (1 - ↑p.fst) • ↑p.snd ∈ s) })
(0, x)) =
↑(↑(ContinuousMap.id ↑s) x)
[PROOFSTEP]
simp
[GOAL]
case refine'_4
E : Type u_1
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : TopologicalSpace E
inst✝¹ : ContinuousAdd E
inst✝ : ContinuousSMul ℝ E
s : Set E
x✝ : E
h : StarConvex ℝ x✝ s
hne : Set.Nonempty s
x : ↑s
⊢ ContinuousMap.toFun
(ContinuousMap.mk fun p =>
{ val := ↑p.fst • x✝ + (1 - ↑p.fst) • ↑p.snd, property := (_ : ↑p.fst • x✝ + (1 - ↑p.fst) • ↑p.snd ∈ s) })
(1, x) =
↑(ContinuousMap.const ↑s { val := x✝, property := (_ : x✝ ∈ s) }) x
[PROOFSTEP]
ext1
[GOAL]
case refine'_4.a
E : Type u_1
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : TopologicalSpace E
inst✝¹ : ContinuousAdd E
inst✝ : ContinuousSMul ℝ E
s : Set E
x✝ : E
h : StarConvex ℝ x✝ s
hne : Set.Nonempty s
x : ↑s
⊢ ↑(ContinuousMap.toFun
(ContinuousMap.mk fun p =>
{ val := ↑p.fst • x✝ + (1 - ↑p.fst) • ↑p.snd, property := (_ : ↑p.fst • x✝ + (1 - ↑p.fst) • ↑p.snd ∈ s) })
(1, x)) =
↑(↑(ContinuousMap.const ↑s { val := x✝, property := (_ : x✝ ∈ s) }) x)
[PROOFSTEP]
simp
|
The Virginia Institute of Autism’s pioneering social impact program has struck a chord with news organizations around the state and in the District of Columbia. On September 10th, the Charlottesville Daily Progress ran a front-page story on VIAble Ventures, our program that develops micro-businesses specifically designed to employ adults with autism.
The Associated Press picked up the story and it was published by multiple regional news sites, including Lynchburg, Lexington (KY), Tidewater, and a DC television station.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.