text
stringlengths 0
3.34M
|
---|
import numpy
import logging
import sys
from apgl.graph import *
from apgl.generator import *
from apgl.util import *
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
class MatrixGraphProfile(object):
def __init__(self):
self.n = 1000
self.m = 10000
numFeatures = 0
vList = VertexList(self.n, numFeatures)
#self.graph = SparseGraph(vList)
#self.graph = DenseGraph(vList)
self.graph = PySparseGraph(vList)
def profileAddEdge(self):
V = numpy.random.randint(0, self.n, (self.m,2))
u = numpy.random.rand(self.m)
def runAdd():
for i in range(self.m):
self.graph.addEdge(V[i,0], V[i,1], u[i])
ProfileUtils.profile('runAdd()', globals(), locals())
def profileGetEdge(self):
V = numpy.random.randint(0, self.n, (self.m,2))
u = numpy.random.rand(self.m)
numEdges = 1000
for i in range(numEdges):
self.graph.addEdge(V[i,0], V[i,1], u[i])
def runGet():
for i in range(self.m):
u = self.graph.getEdge(V[i,0], V[i,1])
ProfileUtils.profile('runGet()', globals(), locals())
def profileNeighbours(self):
V = numpy.random.randint(0, self.n, (self.m,2))
u = numpy.random.rand(self.m)
numEdges = 1000
for i in range(numEdges):
self.graph.addEdge(V[i,0], V[i,1], u[i])
v = numpy.random.randint(0, self.n, self.m)
def runNeighbours():
for i in range(self.m):
u = self.graph.neighbours(v[i])
ProfileUtils.profile('runNeighbours()', globals(), locals())
profiler = MatrixGraphProfile()
#profiler.profileGetEdge()
profiler.profileNeighbours()
#PySparseGraph much faster
|
[STATEMENT]
lemma SkipRule: "p \<subseteq> q \<Longrightarrow> Valid p (Basic id) a q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. p \<subseteq> q \<Longrightarrow> Valid p SKIP a q
[PROOF STEP]
by (auto simp:Valid_def)
|
# Quantum Channel Discrimination
Date : December 30, 2021
This notebook contains material supporting a paper, currently titled *Five Starter Pieces: Quantum Information Science via Semi-definite Programs*, by Vikesh Siddhu ([email protected]) and Sridhar Tayur ([email protected]). The paper is available on this **[arXiv](http://arxiv.org/abs/2112.08276)** link. The arXiv paper is released there is under the **[arXiv.org perpetual, non-exclusive license](https://arxiv.org/licenses/nonexclusive-distrib/1.0/license.html)**, and this code is released under the **[MIT license](https://opensource.org/licenses/MIT)**.
This notebook depends upon various packages including [numpy](https://numpy.org/) >= 1.19.5, [picos](https://picos-api.gitlab.io/picos/index.html) >= 2.2.55, and [cvxopt](http://cvxopt.org/) >= 1.2.5.
[](https://colab.research.google.com/github/vsiddhu/SDP-Quantum-OR/blob/master/Notebook%203%20-%20Quantum%20Channel%20Discrimination.ipynb)
## Introduction
The probability of correctly distinguishing two unifromly chosen quantum channels $\mathcal{B}_1$ and $\mathcal{B}_2$ without using entangled inputs is
\begin{equation}
q^* = \frac{1}{2}(1 + \frac{1}{2} \max_{\rho}|| \mathcal{B}_1(\rho) - \mathcal{B}_2(\rho) ||_1)
\end{equation}
and using entangled inputs is
\begin{equation}
s^* = \frac{1}{2}(1 + \frac{1}{2}|| \mathcal{B}_1 - \mathcal{B}_2 ||_{\diamond}).
\end{equation}
Here $||.||_{\diamond}$ refers to the diamond norm of a linear map discussed next.
### Diamond Norm
A linear map $\mathcal{B}:\hat{ \mathcal{H}_a} \mapsto \hat{ \mathcal{H}_b}$ has diamond norm
$$
|| \mathcal{B} ||_{\diamond} =
\max_{ ||\rho_{aa}||_1 \leq 1} ||\mathcal{B} \otimes \mathcal{I} (\rho_{aa})||_1
$$
where $\mathcal{I}$ is the identity map on $\hat{ \mathcal{H}_a}$, and $||X||_1 = \rm Tr (\sqrt{X X^{\dagger}})$.
The map $\mathcal{B}$ has a Choi-Jamiolkowski representation
$$
\mathcal{J}_{ba}(\mathcal{B}) = \mathcal{B} \otimes \mathcal{I} (\gamma)
$$
where $\gamma = \sum_{ij} | ii \rangle \langle jj|$. Using this representation one can compute $|| \mathcal{B} ||_{\diamond}$ as the optimum value of the semi-definite program (SDP)
\begin{align}
\begin{aligned}
\text{maximize} & \; \frac{1}{2} \; \big( \rm Tr(\mathcal{J}_{ba}(\mathcal{B})X) + \rm Tr(\mathcal{J}_{ba}(\mathcal{B})X)^* \big) & \\
\text{subject to} & \;
\begin{pmatrix}
I_b \otimes \rho_a & X \\
X^{\dagger} & I_b \otimes \sigma_a
\end{pmatrix}
\succeq 0. & \\
& \rm Tr(\rho_a) = 1, & \\
& \rm Tr(\sigma_a) = 1, & \\
\text{and} \; & \quad X \in \hat{\mathcal{H}}_{ba},
\end{aligned}
\end{align}
where $I_b$ is the identity matrix on $\mathcal{H}_b$. This SDP problem mentioned above has a dual formulation,
\begin{align}
\begin{aligned}
\text{minimize} & \; \frac{1}{2} \big( \mu + \nu \big) & \\
\text{subject to} & \;
\begin{pmatrix}
N_{ba} & -\mathcal{J}_{ba}(\mathcal{B})\\
-\mathcal{J}_{ba}(\mathcal{B})^{\dagger} & M_{ba}
\end{pmatrix}
\succeq 0,\\
& \; \rm Tr_b (N_{ba}) \preceq \mu I_a \\
\text{and} & \; \rm Tr_b (M_{ba}) \preceq \nu I_a.
\end{aligned}
\end{align}
In what follows, consider various examples of this SDP.
```python
%pylab inline
```
Populating the interactive namespace from numpy and matplotlib
```python
# For Google Colab use, commands installing packages
try:
import google.colab
IN_COLAB = True
except:
IN_COLAB = False
# Install PICOS and CVXOPT in Google Colab
if IN_COLAB:
!pip install -q picos
!pip install -q cvxopt
```
```python
import picos as pic
import cvxopt as cvx
```
```python
print('Solvers supported on this installation of picos:', pic.solvers.all_solvers().keys())
```
Solvers supported on this installation of picos: dict_keys(['cplex', 'cvxopt', 'ecos', 'glpk', 'gurobi', 'mosek', 'mskfsn', 'scip', 'smcp'])
```python
print('Solvers available to picos on this machine :', pic.solvers.available_solvers())
```
Solvers available to picos on this machine : ['cvxopt', 'mosek', 'mskfsn']
### Example 1
Diamond norm of a qubit depolarizing channel
\begin{equation}
\Delta(\rho) = \lambda \rho + \frac{(1-\lambda)}{2} \rm Tr(\rho) I,
\end{equation}
where $-1/3 \leq \lambda \leq 1$. The Choi-Jamiolkowski representation of this channel is
\begin{equation}
\mathcal{J}_{ba}(\Delta) =
\begin{pmatrix}
(1 + \lambda)/2 & 0 & 0 & \lambda \\
0 & (1 - \lambda)/2 & 0 & 0 \\
0 & 0 & (1 - \lambda)/2 & 0 \\
\lambda & 0 & 0 & (1 + \lambda)/2
\end{pmatrix}.
\end{equation}
The depolarizing channel above can also be written using the parameter $p := 3(1-\lambda)/4$ which lies between $0$ and $1$.
```python
#Example 1 using the primal formulation
cjMat = lambda lm : np.array([[(1. + lm)/2, 0., 0., lm],
[0., (1. - lm)/2, 0., 0.],
[0., 0., (1. - lm)/2, 0.],
[lm, 0., 0., (1 + lm)/2]])
p = 1
lmVal = 1.-4.*p/3.
da = 2
db = 2
gammaBA = cjMat(lmVal)
```
```python
#Constants
#----------
gammaPic = pic.Constant("gammaBA", gammaBA)
shpBA = np.shape( gammaBA )
shpB = np.shape(pic.partial_trace(gammaPic, subsystems=(1),dimensions=2))
shpA = np.shape(pic.partial_trace(gammaPic, subsystems=(0),dimensions=2))
iMatB = pic.Constant('Ib', np.eye(shpB[0]))
#Variables
#----------
rhoPic = pic.HermitianVariable("rhoA", shpA)
sigPic = pic.HermitianVariable("sigA", shpB)
XPic = pic.ComplexVariable("X", shpBA)
```
```python
prob1P = pic.Problem()
#Constraint
#----------
prob1P.add_constraint(((iMatB @ rhoPic & XPic) // (XPic.H & iMatB @ sigPic)) >> 0)
prob1P.add_constraint(pic.trace(rhoPic) == 1)
prob1P.add_constraint(pic.trace(sigPic) == 1)
#Objective
#----------
obj = pic.trace(gammaPic | XPic).real
prob1P.set_objective('max',obj)
```
```python
#User readable view of the problem being composed in PICOS'
print(prob1P)
```
-------------------------------------
Complex Semidefinite Program
maximize Re(⟨gammaBA, X⟩)
over
4×4 complex variable X
2×2 hermitian variable rhoA, sigA
subject to
[Ib⊗rhoA, X; Xᴴ, Ib⊗sigA] ≽ 0
tr(rhoA) = 1
tr(sigA) = 1
-------------------------------------
```python
#Solve the problem using cvxopt as a solver
prob1P.solve(verbosity=False,solver='cvxopt')
```
<primal feasible solution pair (claimed optimal) from cvxopt>
```python
#Solver claims to have found optimal solution
dNorm1P = prob1P.value
```
```python
#Example 1 Dual Formulation
#Constants
#----------
iMatA = pic.Constant('Ia', np.eye(shpA[0]))
#Variables
#----------
NPicBA = pic.HermitianVariable("Nba", shpBA)
MPicBA = pic.HermitianVariable("Mba", shpBA)
mu = pic.RealVariable("mu")
nu = pic.RealVariable("nu")
```
```python
prob1D = pic.Problem()
#Constraint
#----------
prob1D.add_constraint(((NPicBA & -gammaPic) // (-gammaPic.H & MPicBA)) >> 0)
NPicA = pic.partial_trace(NPicBA,subsystems=(0),dimensions=2)
MPicA = pic.partial_trace(MPicBA,subsystems=(0),dimensions=2)
prob1D.add_constraint(MPicA<<mu*iMatA)
prob1D.add_constraint(NPicA<<nu*iMatA)
#Objective
#----------
obj = (mu + nu)/2
prob1D.set_objective('min',obj)
```
```python
#User readable view of the problem being composed in PICOS'
print(prob1D)
```
---------------------------------------
Complex Semidefinite Program
minimize (mu + nu)/2
over
4×4 hermitian variable Mba, Nba
1×1 real variable mu, nu
subject to
[Nba, -gammaBA; -gammaBAᵀ, Mba] ≽ 0
Mba.{tr([2×2])⊗[2×2]} ≼ mu·Ia
Nba.{tr([2×2])⊗[2×2]} ≼ nu·Ia
---------------------------------------
```python
#Solve the problem using mosek as a cvxopt
prob1D.solve(verbosity=False,solver='cvxopt')
```
<primal feasible solution pair (claimed optimal) from cvxopt>
```python
#Solver claims to have found optimal solution
dNorm1D = prob1D.value
```
```python
dNorm1Alg = 1.0
print('Diamond Norm of a Depolarizing Channel')
print('Using Primal SDP = ', dNorm1P)
print('Using DualSDP = ', dNorm1D)
print('Actual Value', dNorm1Alg)
print('Difference between algebraic and primal numeric', abs(dNorm1Alg - dNorm1P))
print('Difference between algebraic and dual numeric', abs(dNorm1Alg - dNorm1D))
```
Diamond Norm of a Depolarizing Channel
Using Primal SDP = 0.9999999999206617
Using DualSDP = 0.9999999999718845
Actual Value 1.0
Difference between algebraic and primal numeric 7.933831369655309e-11
Difference between algebraic and dual numeric 2.811550992021239e-11
* As mentioned in Sec. 6, any quantum channel (completely positive trace preserving map) $\mathcal{B}$ has a diamond norm $|| \mathcal{B}||_{\diamond} = 1$. The numerics above simply confirm this fact in the case of a qubit depolarizing channel. In what follows we discuss the advantage of using entanglement to distinguish quantum channels.
### Example 2: Distinguishing Depolarizing from Identity
In the introduction state at the top of this notebook, let $\mathcal{B}_1$ be the qubit identity channel $\mathcal{I}$ and $\mathcal{B}_2$ be the qubit depolarizing channel
\begin{equation}
\Delta(\rho) = \lambda \rho + \frac{(1-\lambda)}{2} \rm Tr(\rho) I,
\end{equation}
where $-1/3 \leq \lambda \leq 1$.
```python
#Example 2. Using the primal formulation
upsBA = cjMat(1)
lmVal = -1/3
thetaBA = cjMat(lmVal)
gamma2BA = upsBA - thetaBA
```
```python
#Primal SDP
#Constants
#----------
gamma2Pic = pic.Constant("gamma2BA", gamma2BA)
shpBA = np.shape( gamma2Pic )
shpB = np.shape(pic.partial_trace(gamma2Pic, subsystems=(1),dimensions=2))
shpA = np.shape(pic.partial_trace(gamma2Pic, subsystems=(0),dimensions=2))
iMatB = pic.Constant('Ib', np.eye(shpB[0]))
#Variables
#----------
rhoPic = pic.HermitianVariable("rhoA", shpA)
sigPic = pic.HermitianVariable("sigA", shpA)
XPic = pic.ComplexVariable("X", shpBA)
prob2P = pic.Problem()
#Constraint
#----------
prob2P.add_constraint(((iMatB @ rhoPic & XPic ) // (XPic.H & iMatB @ sigPic)) >> 0)
prob2P.add_constraint(pic.trace(rhoPic) == 1)
prob2P.add_constraint(pic.trace(sigPic) == 1)
#Objective
#----------
obj = pic.trace(gamma2Pic | XPic).real
prob2P.set_objective('max',obj)
```
```python
#User readable view of the problem being composed in PICOS'
print(prob2P)
```
-------------------------------------
Complex Semidefinite Program
maximize Re(⟨gamma2BA, X⟩)
over
4×4 complex variable X
2×2 hermitian variable rhoA, sigA
subject to
[Ib⊗rhoA, X; Xᴴ, Ib⊗sigA] ≽ 0
tr(rhoA) = 1
tr(sigA) = 1
-------------------------------------
```python
#Solve the problem using mosek as a cvxopt
prob2P.solve(verbosity=False,solver='cvxopt')
```
<primal feasible solution pair (claimed optimal) from cvxopt>
```python
#Solver claims to have found optimal saolution
dNorm2P = prob2P.value
```
```python
#Example 2 Dual Formulation
#Constants
#----------
iMatA = pic.Constant('Ia', np.eye(shpA[0]))
#Variables
#----------
NPicBA = pic.HermitianVariable("Nba", shpBA)
MPicBA = pic.HermitianVariable("Mba", shpBA)
mu = pic.RealVariable("mu")
nu = pic.RealVariable("nu")
```
```python
prob2D = pic.Problem()
#Constraint
#----------
prob2D.add_constraint(((NPicBA & -gamma2Pic) // (-gamma2Pic.H & MPicBA)) >> 0)
NPicA = pic.partial_trace(NPicBA,subsystems=(0),dimensions=2)
MPicA = pic.partial_trace(MPicBA,subsystems=(0),dimensions=2)
prob2D.add_constraint(MPicA<<mu*iMatA)
prob2D.add_constraint(NPicA<<nu*iMatA)
#Objective
#----------
obj = (mu + nu)/2
prob2D.set_objective('min',obj)
```
```python
#User readable view of the problem being composed in PICOS'
print(prob2D)
```
-----------------------------------------
Complex Semidefinite Program
minimize (mu + nu)/2
over
4×4 hermitian variable Mba, Nba
1×1 real variable mu, nu
subject to
[Nba, -gamma2BA; -gamma2BAᵀ, Mba] ≽ 0
Mba.{tr([2×2])⊗[2×2]} ≼ mu·Ia
Nba.{tr([2×2])⊗[2×2]} ≼ nu·Ia
-----------------------------------------
```python
#Solve the problem using mosek as a cvxopt
prob2D.solve(verbosity=False,solver='cvxopt')
```
<primal feasible solution pair (claimed optimal) from cvxopt>
```python
#Solver claims to have found optimal solution
dNorm2D = prob2D.value
```
```python
print('Diamond Norm distance between identity and equal probability Pauli error Channel')
print('Using Primal SDP = ', dNorm2P)
print('Using DualSDP = ', dNorm2D)
print('Difference between primal and dual values', abs(dNorm2D - dNorm2P))
```
Diamond Norm distance between identity and equal probability Pauli error Channel
Using Primal SDP = 1.9999999997151092
Using DualSDP = 1.9999999999322486
Difference between primal and dual values 2.1713941755763244e-10
```python
pE = (1 + dNorm2D/2)/2
print('Probability of distinguishing with an entangled input s* = ', pE)
print('Probability of distinguishing without an entangled input q* = ', (3 - lmVal)/4)
```
Probability of distinguishing with an entangled input s* = 0.9999999999830622
Probability of distinguishing without an entangled input q* = 0.8333333333333334
The value pE obtained above is in agreement with algebraic value $s^* = (7 - 3\lambda)/8$ stated in Sec.6. This value is larger than $q^* = (3-\lambda)/4$, the probability of distinguishing without entangled inputs.
### Example 3: Distinguishing Werner-Holevo channels
\begin{equation}
\mathcal{B}_1(\rho) = \frac{1}{d+1} \big( \rm Tr(\rho) I + \rho^T\big),
\quad \text{and} \quad
\mathcal{B}_2(\rho) = \frac{1}{d-1} \big( \rm Tr(\rho) I - \rho^T\big).
\end{equation}
Who have Choi-Jamiolkowski representations
\begin{equation}
\mathcal{J}_{ba}(\mathcal{B}_1) = \frac{1}{d+1}( I \otimes I + S_{ba}),
\quad \text{and} \quad
\mathcal{J}_{ab}(\mathcal{B}_2) = \frac{1}{d-1}( I \otimes I - S_{ba}),
\end{equation}
resp., where $S_{ba} |i \rangle |j \rangle = |j \rangle |i \rangle$. We compute their diamond norm distance
\begin{equation}
|| \mathcal{B}_1 - \mathcal{B}_2 ||_{\diamond},
\end{equation}
using which the probability $s^* = \frac{1}{2}(1 + \frac{1}{2}|| \mathcal{B}_1 - \mathcal{B}_2 ||_{\diamond})$ is be computed with relative ease.
```python
d = 3
iMatJoint = np.eye(d*d)
```
```python
wMatrix = np.zeros(shape=(d*d,d*d))
for i in range(d):
for j in range(d):
k1 = i*d + j
k2 = j*d + i
wMatrix[k1,k2] = 1.
```
```python
thBA = (iMatJoint + wMatrix)/(d+1)
psiBA = (iMatJoint - wMatrix)/(d-1)
```
```python
#Example 3 using the primal formulation
#Constants
#----------
gamma3Pic = pic.Constant("gammaBA", thBA - psiBA)
shpBA = np.shape( gamma3Pic )
shpB = np.shape(pic.partial_trace(gamma3Pic, subsystems=(1),dimensions=d))
shpA = np.shape(pic.partial_trace(gamma3Pic, subsystems=(0),dimensions=d))
iMatB = pic.Constant('Ib', np.eye(shpB[0]))
#Variables
#----------
rhoPic = pic.HermitianVariable("rhoA", shpA)
sigPic = pic.HermitianVariable("sigA", shpB)
XPic = pic.ComplexVariable("X", shpBA)
prob3P = pic.Problem()
#Constraint
#----------
prob3P.add_constraint(((iMatB @ rhoPic & XPic) // (XPic.H & iMatB @ sigPic)) >> 0)
prob3P.add_constraint(pic.trace(rhoPic) == 1)
prob3P.add_constraint(pic.trace(sigPic) == 1)
#Objective
#----------
obj = pic.trace(gamma3Pic | XPic).real
prob3P.set_objective('max',obj)
```
```python
#User readable view of the problem being composed in PICOS'
print(prob3P)
```
-------------------------------------
Complex Semidefinite Program
maximize Re(⟨gammaBA, X⟩)
over
9×9 complex variable X
3×3 hermitian variable rhoA, sigA
subject to
[Ib⊗rhoA, X; Xᴴ, Ib⊗sigA] ≽ 0
tr(rhoA) = 1
tr(sigA) = 1
-------------------------------------
```python
#Solve the problem using mosek as a cvxopt
prob3P.solve(verbosity=False,solver='cvxopt')
```
<primal feasible solution pair (claimed optimal) from cvxopt>
```python
#Solver claims to have found optimal solution
dNorm3P = prob3P.value
```
```python
#Example 3 Dual Formulation
#Constants
#----------
iMatA = pic.Constant('Ia', np.eye(shpA[0]))
#Variables
#----------
NPicBA = pic.HermitianVariable("Nba", shpBA)
MPicBA = pic.HermitianVariable("Mba", shpBA)
mu = pic.RealVariable("mu")
nu = pic.RealVariable("nu")
prob3D = pic.Problem()
#Constraint
#----------
prob3D.add_constraint(((NPicBA & -gamma3Pic) // (-gamma3Pic.H & MPicBA)) >> 0)
NPicA = pic.partial_trace(NPicBA,subsystems=(0),dimensions=d)
MPicA = pic.partial_trace(MPicBA,subsystems=(0),dimensions=d)
prob3D.add_constraint(MPicA<<mu*iMatA)
prob3D.add_constraint(NPicA<<nu*iMatA)
#Objective
#----------
obj = (mu + nu)/2
prob3D.set_objective('min',obj)
```
```python
#User readable view of the problem being composed in PICOS'
print(prob3D)
```
---------------------------------------
Complex Semidefinite Program
minimize (mu + nu)/2
over
9×9 hermitian variable Mba, Nba
1×1 real variable mu, nu
subject to
[Nba, -gammaBA; -gammaBAᵀ, Mba] ≽ 0
Mba.{tr([3×3])⊗[3×3]} ≼ mu·Ia
Nba.{tr([3×3])⊗[3×3]} ≼ nu·Ia
---------------------------------------
```python
#Solve the problem using mosek as a cvxopt
prob3D.solve(verbosity=False,solver='cvxopt')
```
<primal feasible solution pair (claimed optimal) from cvxopt>
```python
#Solver claims to have found optimal solution
dNorm3D = prob3D.value
```
```python
print('Diamond Norm distance between identity and equal probability Pauli error Channel')
print('Using Primal SDP = ', dNorm3P)
print('Using DualSDP = ', dNorm3D)
print('Difference between primal and dual values', abs(dNorm3D - dNorm3P))
```
Diamond Norm distance between identity and equal probability Pauli error Channel
Using Primal SDP = 1.9999999997341638
Using DualSDP = 1.9999999999261795
Difference between primal and dual values 1.9201573664417992e-10
```python
pE = (1 + dNorm3D/2)/2
print('Probability of distinguishing with an entangled input s* = ', pE)
print('Probability of distinguishing without an entangled input q* = ', .5 + 1/(d+1))
```
Probability of distinguishing with an entangled input s* = 0.9999999999815449
Probability of distinguishing without an entangled input q* = 0.75
The value pE obtained above is in agreement with algebraic value $s^* = 1$ stated in Sec.6. This value is larger than $q^* = \frac{1}{2} + \frac{1}{d+1}$, the probability of distinguishing without entangled inputs.
|
#include "../common/print.hh"
#include "../word_index.hh"
#include "../../util/file.hh"
#include "../../util/read_compressed.hh"
#include <boost/lexical_cast.hpp>
#include <iostream>
#include <vector>
int main(int argc, char *argv[]) {
if (argc != 4) {
std::cerr << "Usage: " << argv[0] << " counts vocabulary order\n"
"The counts file contains records with 4-byte vocabulary ids followed by 8-byte\n"
"counts. Each record has order many vocabulary ids.\n"
"The vocabulary file contains the words delimited by NULL in order of id.\n"
"The vocabulary file may not be compressed because it is mmapped but the counts\n"
"file can be compressed.\n";
return 1;
}
util::ReadCompressed counts(util::OpenReadOrThrow(argv[1]));
util::scoped_fd vocab_file(util::OpenReadOrThrow(argv[2]));
lm::VocabReconstitute vocab(vocab_file.get());
unsigned int order = boost::lexical_cast<unsigned int>(argv[3]);
std::vector<char> record(sizeof(uint32_t) * order + sizeof(uint64_t));
while (std::size_t got = counts.ReadOrEOF(&*record.begin(), record.size())) {
UTIL_THROW_IF(got != record.size(), util::Exception, "Read " << got << " bytes at the end of file, which is not a complete record of length " << record.size());
const lm::WordIndex *words = reinterpret_cast<const lm::WordIndex*>(&*record.begin());
for (const lm::WordIndex *i = words; i != words + order; ++i) {
UTIL_THROW_IF(*i >= vocab.Size(), util::Exception, "Vocab ID " << *i << " is larger than the vocab file's maximum of " << vocab.Size() << ". Are you sure you have the right order and vocab file for these counts?");
std::cout << vocab.Lookup(*i) << ' ';
}
// TODO don't use std::cout because it is slow. Add fast uint64_t printing support to FileStream.
std::cout << *reinterpret_cast<const uint64_t*>(words + order) << '\n';
}
}
|
/-
Copyright (c) 2020 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.category_theory.monoidal.functor
import Mathlib.PostPort
universes u v
namespace Mathlib
/-!
# Endofunctors as a monoidal category.
We give the monoidal category structure on `C ⥤ C`,
and show that when `C` itself is monoidal, it embeds via a monoidal functor into `C ⥤ C`.
## TODO
Can we use this to show coherence results, e.g. a cheap proof that `λ_ (𝟙_ C) = ρ_ (𝟙_ C)`?
I suspect this is harder than is usually made out.
-/
namespace category_theory
/--
The category of endofunctors of any category is a monoidal category,
with tensor product given by composition of functors
(and horizontal composition of natural transformations).
-/
def endofunctor_monoidal_category (C : Type u) [category C] : monoidal_category (C ⥤ C) :=
monoidal_category.mk (fun (F G : C ⥤ C) => F ⋙ G)
(fun (F G F' G' : C ⥤ C) (α : F ⟶ G) (β : F' ⟶ G') => α ◫ β) 𝟭
(fun (F G H : C ⥤ C) => functor.associator F G H) (fun (F : C ⥤ C) => functor.left_unitor F)
fun (F : C ⥤ C) => functor.right_unitor F
/--
Tensoring on the right gives a monoidal functor from `C` into endofunctors of `C`.
-/
@[simp] theorem tensoring_right_monoidal_to_lax_monoidal_functor_to_functor (C : Type u)
[category C] [monoidal_category C] :
lax_monoidal_functor.to_functor
(monoidal_functor.to_lax_monoidal_functor (tensoring_right_monoidal C)) =
monoidal_category.tensoring_right C :=
Eq.refl
(lax_monoidal_functor.to_functor
(monoidal_functor.to_lax_monoidal_functor (tensoring_right_monoidal C)))
end Mathlib
|
module Syntax.Number where
import Lvl
open import Logic.Propositional
open import Numeral.Natural
open import Type
record Numeral {ℓ} (T : Type{ℓ}) : Typeω where
field
{restriction-ℓ} : Lvl.Level
restriction : ℕ → Type{restriction-ℓ}
num : (n : ℕ) → ⦃ _ : restriction(n) ⦄ → T
open Numeral ⦃ ... ⦄ public using (num)
{-# BUILTIN FROMNAT num #-}
InfiniteNumeral = Numeral
module InfiniteNumeral {ℓ} {T : Type{ℓ}} where
intro : (ℕ → T) → InfiniteNumeral(T)
Numeral.restriction-ℓ (intro(_)) = Lvl.𝟎
Numeral.restriction (intro(_)) _ = ⊤
Numeral.num (intro(f)) n ⦃ _ ⦄ = f(n)
-- record InfiniteNumeral {ℓ} (T : Type{ℓ}) : Type{ℓ} where
-- record InfiniteNumeral {ℓ} (T : Type{ℓ}) : Type{ℓ} where
-- field
-- num : ℕ → T
-- instance
-- Numeral-from-InfiniteNumeral : ∀{ℓ}{T} → ⦃ _ : InfiniteNumeral{ℓ}(T) ⦄ → Numeral{ℓ}(T)
-- Numeral.restriction-ℓ ( Numeral-from-InfiniteNumeral ) = Lvl.𝟎
-- Numeral.restriction ( Numeral-from-InfiniteNumeral ) (_) = ⊤
-- num ⦃ Numeral-from-InfiniteNumeral ⦃ infNum ⦄ ⦄ (n) ⦃ _ ⦄ = InfiniteNumeral.num(infNum) (n)
instance
ℕ-InfiniteNumeral : InfiniteNumeral (ℕ)
ℕ-InfiniteNumeral = InfiniteNumeral.intro(id) where
id : ℕ → ℕ
id x = x
instance
Level-InfiniteNumeral : InfiniteNumeral (Lvl.Level)
Level-InfiniteNumeral = InfiniteNumeral.intro(f) where
f : ℕ → Lvl.Level
f(ℕ.𝟎) = Lvl.𝟎
f(ℕ.𝐒(n)) = Lvl.𝐒(f(n))
record NegativeNumeral {ℓ} (T : Type{ℓ}) : Typeω where
field
{restriction-ℓ} : Lvl.Level
restriction : ℕ → Type{restriction-ℓ}
num : (n : ℕ) → ⦃ _ : restriction(n) ⦄ → T
open NegativeNumeral ⦃ ... ⦄ public using () renaming (num to -num)
{-# BUILTIN FROMNEG -num #-}
InfiniteNegativeNumeral = NegativeNumeral
module InfiniteNegativeNumeral {ℓ} {T : Type{ℓ}} where
intro : (ℕ → T) → InfiniteNegativeNumeral(T)
NegativeNumeral.restriction-ℓ (intro(_)) = Lvl.𝟎
NegativeNumeral.restriction (intro(_)) _ = ⊤
NegativeNumeral.num (intro(f)) n ⦃ _ ⦄ = f(n)
-- record InfiniteNegativeNumeral {ℓ} (T : Type{ℓ}) : Type{ℓ} where
-- field
-- num : ℕ → T
-- open InfiniteNegativeNumeral ⦃ ... ⦄ public
-- instance
-- NegativeNumeral-from-InfiniteNegativeNumeral : ∀{ℓ}{T} → ⦃ _ : InfiniteNegativeNumeral{ℓ}(T) ⦄ → NegativeNumeral{ℓ}(T)
-- NegativeNumeral.restriction-ℓ ( NegativeNumeral-from-InfiniteNegativeNumeral ) = Lvl.𝟎
-- NegativeNumeral.restriction ( NegativeNumeral-from-InfiniteNegativeNumeral ) (_) = ⊤
-- -num ⦃ NegativeNumeral-from-InfiniteNegativeNumeral ⦃ infNegNum ⦄ ⦄ (n) ⦃ _ ⦄ = InfiniteNegativeNumeral.num(infNegNum) (n)
|
The overpayment scam is another time-honored classic that's made a successful leap to the online world. It goes like this: A business (usually a small, seemingly vulnerable one) receives a check from a buyer, usually of a big-ticket item. But suddenly there's a hitch — the buyer has mistakenly written the check for more than the correct amount. To avoid the trouble of writing another check, the buyer asks that the check be deposited and the difference wired back to them. We all know where this is going, right? The check bounces, the wire transfer is gone, the scammer is untraceable and the business owner is also responsible for bank fees.
Overpayment scams are pretty easy to avoid with just a few safeguards. There are so many warning signs with these cons that you should be able to stop things before they start. A no-check policy would instantly solve the problem, and it's also best to steer clear of wire transfers. If you must accept checks for some reason, always ask for all of the buyer's contact information and then double-check — or, better yet, deal with them in person and ask for a check from a local bank. Wait for checks to clear before you use any of the funds and, above all, never accept a check for more than the agreed-upon amount.
|
function SNR = hsvargplvmShowSNR(model, layers, displ)
if nargin < 3
displ = true;
end
if nargin < 2 || isempty(layers)
layers = 1:model.H;
end
for h=layers
if displ
fprintf('# SNR Layer %d\n',h)
end
for m=1:model.layer{h}.M
if isfield(model.layer{h}.comp{m}, 'mOrig')
varY = var(model.layer{h}.comp{m}.mOrig(:));
else
varY = var(model.layer{h}.comp{m}.m(:));
end
beta = model.layer{h}.comp{m}.beta;
SNR{h}(m) = varY * beta;
if displ
fprintf(' Model %d: %f (varY=%f, 1/beta=%f)\n', m, SNR{h}(m), varY, 1/beta)
end
end
end
|
module Compo1
import Incrementer
import Twicer
%default total
-- Realized (twicer . incrementer).
rlz_compo1_attrs : RealizedAttributes
rlz_compo1_attrs = MkRealizedAttributes (MkCosts 300 30 3) 0.9
-- The following does not work because 301 ≠ 200+100
-- rlz_compo1_attrs = MkRealizedAttributes (MkCosts 301 30 3) 0.9
rlz_compo1 : RealizedFunction (Int -> Int) Compo1.rlz_compo1_attrs
rlz_compo1 = compose rlz_twicer rlz_incrementer
|
/*
* KeyValueTable.hpp
*
* Created on: Jun 14, 2015
* Author: jcassidy
*/
#ifndef KEYVALUETABLE_HPP_
#define KEYVALUETABLE_HPP_
#include <string>
#include <vector>
#include <boost/serialization/serialization.hpp>
#include <cassert>
#include "OSMEntity.hpp"
#include "ValueTable.hpp"
class KeyValueTable {
public:
unsigned addKey(const std::string k) {
return keys_.addValue(k);
}
unsigned addValue(const std::string v) {
return values_.addValue(v);
}
const std::string& getKey(unsigned ki) const {
return keys_.getValue(ki);
}
const std::string& getValue(unsigned vi) const {
return values_.getValue(vi);
}
std::pair<std::string, std::string> getKeyValue(std::pair<unsigned, unsigned> p) const {
return std::make_pair(keys_.getValue(p.first), values_.getValue(p.second));
}
bool keyValid(unsigned ki) const {
return keys_.valueValid(ki);
}
bool valueValid(unsigned vi) const {
return values_.valueValid(vi);
}
unsigned getIndexForKeyString(const std::string s) const {
return keys_.getIndexOfValue(s);
}
unsigned getIndexForValueString(const std::string s) const {
return values_.getIndexOfValue(s);
}
std::function<const std::string&(unsigned) > keyLookup() const {
return [this](unsigned i) {
return cref(keys_.getValue(i));
};
}
std::function<const std::string&(unsigned) > valueLookup() const {
return [this](unsigned i) {
return cref(values_.getValue(i));
};
}
std::function<std::pair<const std::string&, const std::string&>(std::pair<unsigned, unsigned>) > pairLookup() const {
return [this](const std::pair<unsigned, unsigned> p) {
return std::make_pair(cref(keys_.getValue(p.first)), cref(values_.getValue(p.second)));
};
}
const std::vector<std::string>& keys() const {
return keys_.values();
}
const std::vector<std::string>& values() const {
return values_.values();
}
private:
ValueTable keys_;
ValueTable values_;
template<class Archive>void serialize(Archive& ar, const unsigned ver) {
ar & keys_ & values_;
}
friend class boost::serialization::access;
};
/** A key-value table which is bound to a specific set of tags.
*
*/
class BoundKeyValueTable : public KeyValueTable {
public:
BoundKeyValueTable(OSMEntity* e = nullptr) : entity_(e) {
}
void addTag(unsigned ki, unsigned vi) {
assert(entity_);
assert(keyValid(ki));
assert(valueValid(vi));
entity_->addTag(ki, vi);
}
void activeEntity(OSMEntity* e) {
entity_ = e;
}
OSMEntity* activeEntity() const {
return entity_;
}
private:
OSMEntity* entity_ = nullptr;
};
#endif /* KEYVALUETABLE_HPP_ */
|
#pragma once
#include <algorithm>
#include <array>
#include <cstdint>
#include <initializer_list>
#include <iterator>
#include <sstream>
#include <string>
#include <absl/types/span.h>
#include <gsl/gsl>
#include "chainerx/axes.h"
#include "chainerx/constant.h"
#include "chainerx/dims.h"
#include "chainerx/error.h"
namespace chainerx {
class Strides;
class Shape : public Dims {
using BaseVector = Dims;
public:
using const_iterator = BaseVector::const_iterator;
using const_reverse_iterator = BaseVector::const_reverse_iterator;
// TODO(niboshi): Declare other types required for this class to be a container.
Shape() = default;
~Shape() = default;
// by iterators
template <typename InputIt>
Shape(InputIt first, InputIt last) {
if (std::distance(first, last) > kMaxNdim) {
throw DimensionError{"too many dimensions: ", std::distance(first, last)};
}
insert(begin(), first, last);
}
// by span
explicit Shape(absl::Span<const int64_t> dims) : Shape{dims.begin(), dims.end()} {}
// by initializer list
Shape(std::initializer_list<int64_t> dims) : Shape{dims.begin(), dims.end()} {}
// copy
Shape(const Shape&) = default;
Shape& operator=(const Shape&) = default;
// move
Shape(Shape&&) = default;
Shape& operator=(Shape&&) = default;
int64_t GetTotalSize() const;
std::string ToString() const;
int8_t ndim() const noexcept { return gsl::narrow_cast<int8_t>(size()); }
const int64_t& operator[](int8_t index) const {
if (!(0 <= index && static_cast<size_t>(index) < size())) {
throw IndexError{"Shape index ", index, " out of bounds for shape with ", size(), " size."};
}
return this->Dims::operator[](index);
}
int64_t& operator[](int8_t index) {
if (!(0 <= index && static_cast<size_t>(index) < size())) {
throw IndexError{"Shape index ", index, " out of bounds for shape with ", size(), " size."};
}
return this->Dims::operator[](index);
}
// span
absl::Span<const int64_t> span() const { return {*this}; }
};
namespace internal {
bool IsContiguous(const Shape& shape, const Strides& strides, int64_t item_size);
// Returns true if a reduction can take place under the given conditions, only considering the number of dimensions.
// Otherwise, returns false.
//
// TODO(hvy): Check the dimension lengths too and reconsider the interface. E.g. return void and assert inside the function if only used for
// assertions.
bool IsValidReductionShape(const Shape& in_shape, const Axes& axes, const Shape& out_shape, bool allow_keepdims);
int64_t CountItemsAlongAxes(const Shape& shape, const Axes& axes);
Shape BroadcastShapes(const Shape& shape0, const Shape& shape1);
// Returns a shape where axes are reduced.
Shape ReduceShape(const Shape& shape, const Axes& axes, bool keepdims);
// Returns a shape with additional axes, with length 1.
Shape ExpandShape(const Shape& shape, const Axes& axes);
Shape TransposeShape(const Shape& shape, const Axes& axes);
} // namespace internal
std::ostream& operator<<(std::ostream& os, const Shape& shape);
void CheckEqual(const Shape& lhs, const Shape& rhs);
} // namespace chainerx
|
||| An Idealised HDL for linear wirings.
|||
||| Module : Idealised.idr
||| Copyright : (c) Jan de Muijnck-Hughes
||| License : see LICENSE
|||
module Circuits.Idealised
import public Circuits.Idealised.Core
import public Circuits.Idealised.Types
import public Circuits.Idealised.Terms
import public Circuits.Idealised.AST
import public Circuits.Idealised.Lexer
import public Circuits.Idealised.Parser
import public Circuits.Idealised.Check
import public Circuits.Idealised.Interp
-- [ EOF ]
|
##Global Variables
# <----- 0. Clear Memory and Restart R ----->
gc()
rm(list = ls())
.rs.restartR()
# <------ 1. Install packages (if required) ----->
#install.packages(
# c(
# 'dplyr',
# 'devtools',
# 'data.table',
# 'stringr',
# 'rvest',
# 'readr',
# 'filesstrings',
# 'data.table',
# 'reticulate',
# 'TTR',
# 'QuantTools',
# 'stringi',
# 'lubridate',
# 'BatchGetSymbols',
# 'tidyquant',
# 'quantmod',
# 'tidyr',
# 'tidyverse',
# 'ggplot2',
# 'purrr',
# 'DescTools',
# 'xlxs',
# 'rmarkdown',
# 'forecast',
# 'zoo',
# 'purrr',
# 'reshape2',
# 'rmarkdown',
# 'markdown',
# 'knitr',
# 'bookdown',
# 'xts',
# 'downloader',
# 'kableExtra'
# )
#)
#<----- 2. Load required libraries ----->
library(dplyr)
library(devtools)
library(data.table)
library(stringr)
library(rvest)
library(readr)
library(filesstrings)
library(data.table)
library(reticulate)
library(TTR)
library(QuantTools)
library(stringi)
library(lubridate)
library(BatchGetSymbols)
library(tidyquant)
library(quantmod)
library(tidyr)
library(ggplot2)
library(purrr)
library(DescTools)
library(xls)
library(rmarkdown)
library(forecast)
library(zoo)
library(purrr)
library(reshape2)
library(rmarkdown)
library(markdown)
library(knitr)
library(bookdown)
library(xts)
library(downloader)
library(kableExtra)
library(RcppAlgos)
library(gtools)
#<----- 4. Set working directory and file paths ----->
# - where data, function, variable and temnplate files live
root.directory <- "C:/Data_Files/"
if(getwd() != root.directory){setwd(root.directory)}
if(!dir.exists(root.directory)){dir.create(root.directory)}
# - Keep tenplate (R, Py and C++) files in C:/Data Files
template.directory <- paste0(root.directory,"Template_Directory/")
if(!dir.exists(template.directory)){dir.create(template.directory)}
# - Store R functions
function.directory <- paste0(root.directory,"Function_Directory/")
if(!dir.exists(function.directory)){dir.create(function.directory)}
# - Store R functions
variable.directory <- paste0(root.directory,"Variable_Directory/")
if(!dir.exists(variable.directory)){dir.create(variable.directory)}
#<----- 5. Load functions from function.directory, templates from template.directory and variables from variable.directory ----->
lapply(list.files(function.directory,full.names = T),function(x)(source(x)))
lapply(list.files(template.directory,full.names = T),function(x)(source(x)))
lapply(list.files(variable.directory,full.names = T),function(x)(source(x)))
#<------ 5. Change/update required paramaters ----->
# - Date and Version Control Variables
p.year <- 2021 #enter current year
p.current.version <- 1.05 #enter the current version number - would like to run weekly
p.prior.version <- p.current.version - .01
|
Formal statement is: lemma in_cbox_complex_iff: "x \<in> cbox a b \<longleftrightarrow> Re x \<in> {Re a..Re b} \<and> Im x \<in> {Im a..Im b}" Informal statement is: A complex number $x$ is in the closed box $[a,b]$ if and only if its real and imaginary parts are in the corresponding closed intervals.
|
Formal statement is: lemma replace_0: assumes "j < n" "a \<in> s" and p: "\<forall>x\<in>s - {a}. x j = 0" and "x \<in> s" shows "x \<le> a" Informal statement is: If $j < n$, $a \in s$, and $x \in s$, and if $x_j = 0$ for all $x \in s - \{a\}$, then $x \leq a$.
|
/-
Copyright (c) 2020 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro, Floris van Doorn, Yury Kudryashov
-/
import topology.algebra.order.monotone_continuity
import topology.instances.nnreal
import tactic.positivity
/-!
# Square root of a real number
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In this file we define
* `nnreal.sqrt` to be the square root of a nonnegative real number.
* `real.sqrt` to be the square root of a real number, defined to be zero on negative numbers.
Then we prove some basic properties of these functions.
## Implementation notes
We define `nnreal.sqrt` as the noncomputable inverse to the function `x ↦ x * x`. We use general
theory of inverses of strictly monotone functions to prove that `nnreal.sqrt x` exists. As a side
effect, `nnreal.sqrt` is a bundled `order_iso`, so for `nnreal` numbers we get continuity as well as
theorems like `sqrt x ≤ y ↔ x ≤ y * y` for free.
Then we define `real.sqrt x` to be `nnreal.sqrt (real.to_nnreal x)`. We also define a Cauchy
sequence `real.sqrt_aux (f : cau_seq ℚ abs)` which converges to `sqrt (mk f)` but do not prove (yet)
that this sequence actually converges to `sqrt (mk f)`.
## Tags
square root
-/
open set filter
open_locale filter nnreal topology
namespace nnreal
variables {x y : ℝ≥0}
/-- Square root of a nonnegative real number. -/
@[pp_nodot] noncomputable def sqrt : ℝ≥0 ≃o ℝ≥0 :=
order_iso.symm $ pow_order_iso 2 two_ne_zero
lemma sqrt_le_sqrt_iff : sqrt x ≤ sqrt y ↔ x ≤ y :=
sqrt.le_iff_le
lemma sqrt_lt_sqrt_iff : sqrt x < sqrt y ↔ x < y :=
sqrt.lt_iff_lt
lemma sqrt_eq_iff_sq_eq : sqrt x = y ↔ y ^ 2 = x :=
sqrt.to_equiv.apply_eq_iff_eq_symm_apply.trans eq_comm
lemma sqrt_le_iff : sqrt x ≤ y ↔ x ≤ y ^ 2 :=
sqrt.to_galois_connection _ _
lemma le_sqrt_iff : x ≤ sqrt y ↔ x ^ 2 ≤ y :=
(sqrt.symm.to_galois_connection _ _).symm
@[simp] lemma sqrt_eq_zero : sqrt x = 0 ↔ x = 0 :=
sqrt_eq_iff_sq_eq.trans $ by rw [eq_comm, sq, zero_mul]
@[simp] lemma sqrt_zero : sqrt 0 = 0 := sqrt_eq_zero.2 rfl
@[simp] lemma sqrt_one : sqrt 1 = 1 := sqrt_eq_iff_sq_eq.2 $ one_pow _
@[simp] lemma sq_sqrt (x : ℝ≥0) : (sqrt x)^2 = x := sqrt.symm_apply_apply x
@[simp] lemma mul_self_sqrt (x : ℝ≥0) : sqrt x * sqrt x = x := by rw [← sq, sq_sqrt]
@[simp] lemma sqrt_sq (x : ℝ≥0) : sqrt (x^2) = x := sqrt.apply_symm_apply x
@[simp] lemma sqrt_mul_self (x : ℝ≥0) : sqrt (x * x) = x := by rw [← sq, sqrt_sq x]
lemma sqrt_mul (x y : ℝ≥0) : sqrt (x * y) = sqrt x * sqrt y :=
by rw [sqrt_eq_iff_sq_eq, mul_pow, sq_sqrt, sq_sqrt]
/-- `nnreal.sqrt` as a `monoid_with_zero_hom`. -/
noncomputable def sqrt_hom : ℝ≥0 →*₀ ℝ≥0 := ⟨sqrt, sqrt_zero, sqrt_one, sqrt_mul⟩
lemma sqrt_inv (x : ℝ≥0) : sqrt (x⁻¹) = (sqrt x)⁻¹ := map_inv₀ sqrt_hom x
lemma sqrt_div (x y : ℝ≥0) : sqrt (x / y) = sqrt x / sqrt y := map_div₀ sqrt_hom x y
lemma continuous_sqrt : continuous sqrt := sqrt.continuous
end nnreal
namespace real
/-- An auxiliary sequence of rational numbers that converges to `real.sqrt (mk f)`.
Currently this sequence is not used in `mathlib`. -/
def sqrt_aux (f : cau_seq ℚ abs) : ℕ → ℚ
| 0 := rat.mk_nat (f 0).num.to_nat.sqrt (f 0).denom.sqrt
| (n + 1) := let s := sqrt_aux n in max 0 $ (s + f (n+1) / s) / 2
theorem sqrt_aux_nonneg (f : cau_seq ℚ abs) : ∀ i : ℕ, 0 ≤ sqrt_aux f i
| 0 := by rw [sqrt_aux, rat.mk_nat_eq, rat.mk_eq_div];
apply div_nonneg; exact int.cast_nonneg.2 (int.of_nat_nonneg _)
| (n + 1) := le_max_left _ _
/- TODO(Mario): finish the proof
theorem sqrt_aux_converges (f : cau_seq ℚ abs) : ∃ h x, 0 ≤ x ∧ x * x = max 0 (mk f) ∧
mk ⟨sqrt_aux f, h⟩ = x :=
begin
rcases sqrt_exists (le_max_left 0 (mk f)) with ⟨x, x0, hx⟩,
suffices : ∃ h, mk ⟨sqrt_aux f, h⟩ = x,
{ exact this.imp (λ h e, ⟨x, x0, hx, e⟩) },
apply of_near,
rsuffices ⟨δ, δ0, hδ⟩ : ∃ δ > 0, ∀ i, abs (↑(sqrt_aux f i) - x) < δ / 2 ^ i,
{ intros }
end -/
/-- The square root of a real number. This returns 0 for negative inputs. -/
@[pp_nodot] noncomputable def sqrt (x : ℝ) : ℝ :=
nnreal.sqrt (real.to_nnreal x)
/-quotient.lift_on x
(λ f, mk ⟨sqrt_aux f, (sqrt_aux_converges f).fst⟩)
(λ f g e, begin
rcases sqrt_aux_converges f with ⟨hf, x, x0, xf, xs⟩,
rcases sqrt_aux_converges g with ⟨hg, y, y0, yg, ys⟩,
refine xs.trans (eq.trans _ ys.symm),
rw [← @mul_self_inj_of_nonneg ℝ _ x y x0 y0, xf, yg],
congr' 1, exact quotient.sound e
end)-/
variables {x y : ℝ}
@[simp, norm_cast] lemma coe_sqrt {x : ℝ≥0} : (nnreal.sqrt x : ℝ) = real.sqrt x :=
by rw [real.sqrt, real.to_nnreal_coe]
@[continuity]
lemma continuous_sqrt : continuous sqrt :=
nnreal.continuous_coe.comp $ nnreal.sqrt.continuous.comp continuous_real_to_nnreal
theorem sqrt_eq_zero_of_nonpos (h : x ≤ 0) : sqrt x = 0 :=
by simp [sqrt, real.to_nnreal_eq_zero.2 h]
theorem sqrt_nonneg (x : ℝ) : 0 ≤ sqrt x := nnreal.coe_nonneg _
@[simp] theorem mul_self_sqrt (h : 0 ≤ x) : sqrt x * sqrt x = x :=
by rw [sqrt, ← nnreal.coe_mul, nnreal.mul_self_sqrt, real.coe_to_nnreal _ h]
@[simp] theorem sqrt_mul_self (h : 0 ≤ x) : sqrt (x * x) = x :=
(mul_self_inj_of_nonneg (sqrt_nonneg _) h).1 (mul_self_sqrt (mul_self_nonneg _))
theorem sqrt_eq_cases : sqrt x = y ↔ y * y = x ∧ 0 ≤ y ∨ x < 0 ∧ y = 0 :=
begin
split,
{ rintro rfl,
cases le_or_lt 0 x with hle hlt,
{ exact or.inl ⟨mul_self_sqrt hle, sqrt_nonneg x⟩ },
{ exact or.inr ⟨hlt, sqrt_eq_zero_of_nonpos hlt.le⟩ } },
{ rintro (⟨rfl, hy⟩|⟨hx, rfl⟩),
exacts [sqrt_mul_self hy, sqrt_eq_zero_of_nonpos hx.le] }
end
theorem sqrt_eq_iff_mul_self_eq (hx : 0 ≤ x) (hy : 0 ≤ y) :
sqrt x = y ↔ y * y = x :=
⟨λ h, by rw [← h, mul_self_sqrt hx], λ h, by rw [← h, sqrt_mul_self hy]⟩
theorem sqrt_eq_iff_mul_self_eq_of_pos (h : 0 < y) :
sqrt x = y ↔ y * y = x :=
by simp [sqrt_eq_cases, h.ne', h.le]
@[simp] lemma sqrt_eq_one : sqrt x = 1 ↔ x = 1 :=
calc sqrt x = 1 ↔ 1 * 1 = x :
sqrt_eq_iff_mul_self_eq_of_pos zero_lt_one
... ↔ x = 1 : by rw [eq_comm, mul_one]
@[simp] theorem sq_sqrt (h : 0 ≤ x) : (sqrt x)^2 = x :=
by rw [sq, mul_self_sqrt h]
@[simp] theorem sqrt_sq (h : 0 ≤ x) : sqrt (x ^ 2) = x :=
by rw [sq, sqrt_mul_self h]
theorem sqrt_eq_iff_sq_eq (hx : 0 ≤ x) (hy : 0 ≤ y) :
sqrt x = y ↔ y ^ 2 = x :=
by rw [sq, sqrt_eq_iff_mul_self_eq hx hy]
theorem sqrt_mul_self_eq_abs (x : ℝ) : sqrt (x * x) = |x| :=
by rw [← abs_mul_abs_self x, sqrt_mul_self (abs_nonneg _)]
theorem sqrt_sq_eq_abs (x : ℝ) : sqrt (x ^ 2) = |x| :=
by rw [sq, sqrt_mul_self_eq_abs]
@[simp] theorem sqrt_zero : sqrt 0 = 0 := by simp [sqrt]
@[simp] theorem sqrt_one : sqrt 1 = 1 := by simp [sqrt]
@[simp] theorem sqrt_le_sqrt_iff (hy : 0 ≤ y) : sqrt x ≤ sqrt y ↔ x ≤ y :=
by rw [sqrt, sqrt, nnreal.coe_le_coe, nnreal.sqrt_le_sqrt_iff, real.to_nnreal_le_to_nnreal_iff hy]
@[simp] theorem sqrt_lt_sqrt_iff (hx : 0 ≤ x) : sqrt x < sqrt y ↔ x < y :=
lt_iff_lt_of_le_iff_le (sqrt_le_sqrt_iff hx)
theorem sqrt_lt_sqrt_iff_of_pos (hy : 0 < y) : sqrt x < sqrt y ↔ x < y :=
by rw [sqrt, sqrt, nnreal.coe_lt_coe, nnreal.sqrt_lt_sqrt_iff, to_nnreal_lt_to_nnreal_iff hy]
theorem sqrt_le_sqrt (h : x ≤ y) : sqrt x ≤ sqrt y :=
by { rw [sqrt, sqrt, nnreal.coe_le_coe, nnreal.sqrt_le_sqrt_iff], exact to_nnreal_le_to_nnreal h }
theorem sqrt_lt_sqrt (hx : 0 ≤ x) (h : x < y) : sqrt x < sqrt y :=
(sqrt_lt_sqrt_iff hx).2 h
theorem sqrt_le_iff : sqrt x ≤ y ↔ 0 ≤ y ∧ x ≤ y ^ 2 :=
begin
rw [← and_iff_right_of_imp (λ h, (sqrt_nonneg x).trans h), and.congr_right_iff],
exact sqrt_le_left
end
lemma sqrt_lt (hx : 0 ≤ x) (hy : 0 ≤ y) : sqrt x < y ↔ x < y ^ 2 :=
by rw [←sqrt_lt_sqrt_iff hx, sqrt_sq hy]
lemma sqrt_lt' (hy : 0 < y) : sqrt x < y ↔ x < y ^ 2 :=
by rw [←sqrt_lt_sqrt_iff_of_pos (pow_pos hy _), sqrt_sq hy.le]
/- note: if you want to conclude `x ≤ sqrt y`, then use `le_sqrt_of_sq_le`.
if you have `x > 0`, consider using `le_sqrt'` -/
theorem le_sqrt (hx : 0 ≤ x) (hy : 0 ≤ y) : x ≤ sqrt y ↔ x ^ 2 ≤ y :=
le_iff_le_iff_lt_iff_lt.2 $ sqrt_lt hy hx
lemma le_sqrt' (hx : 0 < x) : x ≤ sqrt y ↔ x ^ 2 ≤ y := le_iff_le_iff_lt_iff_lt.2 $ sqrt_lt' hx
theorem abs_le_sqrt (h : x^2 ≤ y) : |x| ≤ sqrt y :=
by rw ← sqrt_sq_eq_abs; exact sqrt_le_sqrt h
theorem sq_le (h : 0 ≤ y) : x^2 ≤ y ↔ -sqrt y ≤ x ∧ x ≤ sqrt y :=
begin
split,
{ simpa only [abs_le] using abs_le_sqrt },
{ rw [← abs_le, ← sq_abs],
exact (le_sqrt (abs_nonneg x) h).mp },
end
theorem neg_sqrt_le_of_sq_le (h : x^2 ≤ y) : -sqrt y ≤ x :=
((sq_le ((sq_nonneg x).trans h)).mp h).1
theorem le_sqrt_of_sq_le (h : x^2 ≤ y) : x ≤ sqrt y :=
((sq_le ((sq_nonneg x).trans h)).mp h).2
@[simp] theorem sqrt_inj (hx : 0 ≤ x) (hy : 0 ≤ y) : sqrt x = sqrt y ↔ x = y :=
by simp [le_antisymm_iff, hx, hy]
@[simp] theorem sqrt_eq_zero (h : 0 ≤ x) : sqrt x = 0 ↔ x = 0 :=
by simpa using sqrt_inj h le_rfl
theorem sqrt_eq_zero' : sqrt x = 0 ↔ x ≤ 0 :=
by rw [sqrt, nnreal.coe_eq_zero, nnreal.sqrt_eq_zero, real.to_nnreal_eq_zero]
theorem sqrt_ne_zero (h : 0 ≤ x) : sqrt x ≠ 0 ↔ x ≠ 0 :=
by rw [not_iff_not, sqrt_eq_zero h]
theorem sqrt_ne_zero' : sqrt x ≠ 0 ↔ 0 < x :=
by rw [← not_le, not_iff_not, sqrt_eq_zero']
@[simp] theorem sqrt_pos : 0 < sqrt x ↔ 0 < x :=
lt_iff_lt_of_le_iff_le (iff.trans
(by simp [le_antisymm_iff, sqrt_nonneg]) sqrt_eq_zero')
alias sqrt_pos ↔ _ sqrt_pos_of_pos
section
open tactic tactic.positivity
/-- Extension for the `positivity` tactic: a square root is nonnegative, and is strictly positive if
its input is. -/
@[positivity]
meta def _root_.tactic.positivity_sqrt : expr → tactic strictness
| `(real.sqrt %%a) := do
(do -- if can prove `0 < a`, report positivity
positive pa ← core a,
positive <$> mk_app ``sqrt_pos_of_pos [pa]) <|>
nonnegative <$> mk_app ``sqrt_nonneg [a] -- else report nonnegativity
| _ := failed
end
@[simp] theorem sqrt_mul (hx : 0 ≤ x) (y : ℝ) : sqrt (x * y) = sqrt x * sqrt y :=
by simp_rw [sqrt, ← nnreal.coe_mul, nnreal.coe_eq, real.to_nnreal_mul hx, nnreal.sqrt_mul]
@[simp] theorem sqrt_mul' (x) {y : ℝ} (hy : 0 ≤ y) : sqrt (x * y) = sqrt x * sqrt y :=
by rw [mul_comm, sqrt_mul hy, mul_comm]
@[simp] theorem sqrt_inv (x : ℝ) : sqrt x⁻¹ = (sqrt x)⁻¹ :=
by rw [sqrt, real.to_nnreal_inv, nnreal.sqrt_inv, nnreal.coe_inv, sqrt]
@[simp] theorem sqrt_div (hx : 0 ≤ x) (y : ℝ) : sqrt (x / y) = sqrt x / sqrt y :=
by rw [division_def, sqrt_mul hx, sqrt_inv, division_def]
@[simp] theorem sqrt_div' (x) {y : ℝ} (hy : 0 ≤ y) : sqrt (x / y) = sqrt x / sqrt y :=
by rw [division_def, sqrt_mul' x (inv_nonneg.2 hy), sqrt_inv, division_def]
@[simp] theorem div_sqrt : x / sqrt x = sqrt x :=
begin
cases le_or_lt x 0,
{ rw [sqrt_eq_zero'.mpr h, div_zero] },
{ rw [div_eq_iff (sqrt_ne_zero'.mpr h), mul_self_sqrt h.le] },
end
theorem sqrt_div_self' : sqrt x / x = 1 / sqrt x :=
by rw [←div_sqrt, one_div_div, div_sqrt]
theorem sqrt_div_self : sqrt x / x = (sqrt x)⁻¹ :=
by rw [sqrt_div_self', one_div]
lemma lt_sqrt (hx : 0 ≤ x) : x < sqrt y ↔ x ^ 2 < y :=
by rw [←sqrt_lt_sqrt_iff (sq_nonneg _), sqrt_sq hx]
lemma sq_lt : x^2 < y ↔ -sqrt y < x ∧ x < sqrt y := by rw [←abs_lt, ←sq_abs, lt_sqrt (abs_nonneg _)]
theorem neg_sqrt_lt_of_sq_lt (h : x^2 < y) : -sqrt y < x := (sq_lt.mp h).1
theorem lt_sqrt_of_sq_lt (h : x^2 < y) : x < sqrt y := (sq_lt.mp h).2
lemma lt_sq_of_sqrt_lt {x y : ℝ} (h : sqrt x < y) : x < y ^ 2 :=
by { have hy := x.sqrt_nonneg.trans_lt h,
rwa [←sqrt_lt_sqrt_iff_of_pos (sq_pos_of_pos hy), sqrt_sq hy.le] }
/-- The natural square root is at most the real square root -/
lemma nat_sqrt_le_real_sqrt {a : ℕ} : ↑(nat.sqrt a) ≤ real.sqrt ↑a :=
begin
rw real.le_sqrt (nat.cast_nonneg _) (nat.cast_nonneg _),
norm_cast,
exact nat.sqrt_le' a,
end
/-- The real square root is at most the natural square root plus one -/
lemma real_sqrt_le_nat_sqrt_succ {a : ℕ} : real.sqrt ↑a ≤ nat.sqrt a + 1 :=
begin
rw real.sqrt_le_iff,
split,
{ norm_cast, simp, },
{ norm_cast, exact le_of_lt (nat.lt_succ_sqrt' a), },
end
instance : star_ordered_ring ℝ :=
{ nonneg_iff := λ r, by
{ refine ⟨λ hr, ⟨sqrt r, show r = sqrt r * sqrt r, by rw [←sqrt_mul hr, sqrt_mul_self hr]⟩, _⟩,
rintros ⟨s, rfl⟩,
exact mul_self_nonneg s },
..real.ordered_add_comm_group }
end real
open real
variables {α : Type*}
lemma filter.tendsto.sqrt {f : α → ℝ} {l : filter α} {x : ℝ} (h : tendsto f l (𝓝 x)) :
tendsto (λ x, sqrt (f x)) l (𝓝 (sqrt x)) :=
(continuous_sqrt.tendsto _).comp h
variables [topological_space α] {f : α → ℝ} {s : set α} {x : α}
lemma continuous_within_at.sqrt (h : continuous_within_at f s x) :
continuous_within_at (λ x, sqrt (f x)) s x :=
h.sqrt
lemma continuous_at.sqrt (h : continuous_at f x) : continuous_at (λ x, sqrt (f x)) x := h.sqrt
lemma continuous_on.sqrt (h : continuous_on f s) : continuous_on (λ x, sqrt (f x)) s :=
λ x hx, (h x hx).sqrt
@[continuity]
lemma continuous.sqrt (h : continuous f) : continuous (λ x, sqrt (f x)) := continuous_sqrt.comp h
|
(* Title: HOL/UNITY/Comp/Client.thy
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
Copyright 1998 University of Cambridge
*)
section{*Distributed Resource Management System: the Client*}
theory Client imports "../Rename" AllocBase begin
type_synonym
tokbag = nat --{*tokbags could be multisets...or any ordered type?*}
record state =
giv :: "tokbag list" --{*input history: tokens granted*}
ask :: "tokbag list" --{*output history: tokens requested*}
rel :: "tokbag list" --{*output history: tokens released*}
tok :: tokbag --{*current token request*}
record 'a state_d =
state +
dummy :: 'a --{*new variables*}
(*Array indexing is translated to list indexing as A[n] == A!(n-1). *)
(** Release some tokens **)
definition
rel_act :: "('a state_d * 'a state_d) set"
where "rel_act = {(s,s').
\<exists>nrel. nrel = size (rel s) &
s' = s (| rel := rel s @ [giv s!nrel] |) &
nrel < size (giv s) &
ask s!nrel \<le> giv s!nrel}"
(** Choose a new token requirement **)
(** Including s'=s suppresses fairness, allowing the non-trivial part
of the action to be ignored **)
definition
tok_act :: "('a state_d * 'a state_d) set"
where "tok_act = {(s,s'). s'=s | s' = s (|tok := Suc (tok s mod NbT) |)}"
definition
ask_act :: "('a state_d * 'a state_d) set"
where "ask_act = {(s,s'). s'=s |
(s' = s (|ask := ask s @ [tok s]|))}"
definition
Client :: "'a state_d program"
where "Client =
mk_total_program
({s. tok s \<in> atMost NbT &
giv s = [] & ask s = [] & rel s = []},
{rel_act, tok_act, ask_act},
\<Union>G \<in> preserves rel Int preserves ask Int preserves tok.
Acts G)"
definition
(*Maybe want a special theory section to declare such maps*)
non_dummy :: "'a state_d => state"
where "non_dummy s = (|giv = giv s, ask = ask s, rel = rel s, tok = tok s|)"
definition
(*Renaming map to put a Client into the standard form*)
client_map :: "'a state_d => state*'a"
where "client_map = funPair non_dummy dummy"
declare Client_def [THEN def_prg_Init, simp]
declare Client_def [THEN def_prg_AllowedActs, simp]
declare rel_act_def [THEN def_act_simp, simp]
declare tok_act_def [THEN def_act_simp, simp]
declare ask_act_def [THEN def_act_simp, simp]
lemma Client_ok_iff [iff]:
"(Client ok G) =
(G \<in> preserves rel & G \<in> preserves ask & G \<in> preserves tok &
Client \<in> Allowed G)"
by (auto simp add: ok_iff_Allowed Client_def [THEN def_total_prg_Allowed])
text{*Safety property 1: ask, rel are increasing*}
lemma increasing_ask_rel:
"Client \<in> UNIV guarantees Increasing ask Int Increasing rel"
apply (auto intro!: increasing_imp_Increasing simp add: guar_def preserves_subset_increasing [THEN subsetD])
apply (auto simp add: Client_def increasing_def)
apply (safety, auto)+
done
declare nth_append [simp] append_one_prefix [simp]
text{*Safety property 2: the client never requests too many tokens.
With no Substitution Axiom, we must prove the two invariants
simultaneously. *}
lemma ask_bounded_lemma:
"Client ok G
==> Client Join G \<in>
Always ({s. tok s \<le> NbT} Int
{s. \<forall>elt \<in> set (ask s). elt \<le> NbT})"
apply auto
apply (rule invariantI [THEN stable_Join_Always2], force)
prefer 2
apply (fast elim!: preserves_subset_stable [THEN subsetD] intro!: stable_Int)
apply (simp add: Client_def, safety)
apply (cut_tac m = "tok s" in NbT_pos [THEN mod_less_divisor], auto)
done
text{*export version, with no mention of tok in the postcondition, but
unfortunately tok must be declared local.*}
lemma ask_bounded:
"Client \<in> UNIV guarantees Always {s. \<forall>elt \<in> set (ask s). elt \<le> NbT}"
apply (rule guaranteesI)
apply (erule ask_bounded_lemma [THEN Always_weaken])
apply (rule Int_lower2)
done
text{*** Towards proving the liveness property ***}
lemma stable_rel_le_giv: "Client \<in> stable {s. rel s \<le> giv s}"
by (simp add: Client_def, safety, auto)
lemma Join_Stable_rel_le_giv:
"[| Client Join G \<in> Increasing giv; G \<in> preserves rel |]
==> Client Join G \<in> Stable {s. rel s \<le> giv s}"
by (rule stable_rel_le_giv [THEN Increasing_preserves_Stable], auto)
lemma Join_Always_rel_le_giv:
"[| Client Join G \<in> Increasing giv; G \<in> preserves rel |]
==> Client Join G \<in> Always {s. rel s \<le> giv s}"
by (force intro: AlwaysI Join_Stable_rel_le_giv)
lemma transient_lemma:
"Client \<in> transient {s. rel s = k & k<h & h \<le> giv s & h pfixGe ask s}"
apply (simp add: Client_def mk_total_program_def)
apply (rule_tac act = rel_act in totalize_transientI)
apply (auto simp add: Domain_unfold Client_def)
apply (blast intro: less_le_trans prefix_length_le strict_prefix_length_less)
apply (auto simp add: prefix_def genPrefix_iff_nth Ge_def)
apply (blast intro: strict_prefix_length_less)
done
lemma induct_lemma:
"[| Client Join G \<in> Increasing giv; Client ok G |]
==> Client Join G \<in> {s. rel s = k & k<h & h \<le> giv s & h pfixGe ask s}
LeadsTo {s. k < rel s & rel s \<le> giv s &
h \<le> giv s & h pfixGe ask s}"
apply (rule single_LeadsTo_I)
apply (frule increasing_ask_rel [THEN guaranteesD], auto)
apply (rule transient_lemma [THEN Join_transient_I1, THEN transient_imp_leadsTo, THEN leadsTo_imp_LeadsTo, THEN PSP_Stable, THEN LeadsTo_weaken])
apply (rule Stable_Int [THEN Stable_Int, THEN Stable_Int])
apply (erule_tac f = giv and x = "giv s" in IncreasingD)
apply (erule_tac f = ask and x = "ask s" in IncreasingD)
apply (erule_tac f = rel and x = "rel s" in IncreasingD)
apply (erule Join_Stable_rel_le_giv, blast)
apply (blast intro: order_less_imp_le order_trans)
apply (blast intro: sym order_less_le [THEN iffD2] order_trans
prefix_imp_pfixGe pfixGe_trans)
done
lemma rel_progress_lemma:
"[| Client Join G \<in> Increasing giv; Client ok G |]
==> Client Join G \<in> {s. rel s < h & h \<le> giv s & h pfixGe ask s}
LeadsTo {s. h \<le> rel s}"
apply (rule_tac f = "%s. size h - size (rel s) " in LessThan_induct)
apply (auto simp add: vimage_def)
apply (rule single_LeadsTo_I)
apply (rule induct_lemma [THEN LeadsTo_weaken], auto)
apply (blast intro: order_less_le [THEN iffD2] dest: common_prefix_linear)
apply (drule strict_prefix_length_less)+
apply arith
done
lemma client_progress_lemma:
"[| Client Join G \<in> Increasing giv; Client ok G |]
==> Client Join G \<in> {s. h \<le> giv s & h pfixGe ask s}
LeadsTo {s. h \<le> rel s}"
apply (rule Join_Always_rel_le_giv [THEN Always_LeadsToI], simp_all)
apply (rule LeadsTo_Un [THEN LeadsTo_weaken_L])
apply (blast intro: rel_progress_lemma)
apply (rule subset_refl [THEN subset_imp_LeadsTo])
apply (blast intro: order_less_le [THEN iffD2] dest: common_prefix_linear)
done
text{*Progress property: all tokens that are given will be released*}
lemma client_progress:
"Client \<in>
Increasing giv guarantees
(INT h. {s. h \<le> giv s & h pfixGe ask s} LeadsTo {s. h \<le> rel s})"
apply (rule guaranteesI, clarify)
apply (blast intro: client_progress_lemma)
done
text{*This shows that the Client won't alter other variables in any state
that it is combined with*}
lemma client_preserves_dummy: "Client \<in> preserves dummy"
by (simp add: Client_def preserves_def, clarify, safety, auto)
text{** Obsolete lemmas from first version of the Client **}
lemma stable_size_rel_le_giv:
"Client \<in> stable {s. size (rel s) \<le> size (giv s)}"
by (simp add: Client_def, safety, auto)
text{*clients return the right number of tokens*}
lemma ok_guar_rel_prefix_giv:
"Client \<in> Increasing giv guarantees Always {s. rel s \<le> giv s}"
apply (rule guaranteesI)
apply (rule AlwaysI, force)
apply (blast intro: Increasing_preserves_Stable stable_rel_le_giv)
done
end
|
In SecureWorks’ 2017 Cybersecurity Threat Insights Report, we found those cracks are often the result of failing to implement basic— the effective combination of people, processes and technologies to protect systems and data. Strong security hygiene requires knowing your assets, your data, and the controls protecting them. Yet in the report, our examination of 163 incident response engagements during the first half of 2016 uncovered failures ranging from poor patch management to a failure to protect the extended enterprise to ineffective preparation for incident response.
To understand what organizations need to do to prioritize the right areas for security spending and what can be done to more effectively prevent, remediate and respond to threats, cybersecurity leaders need to start with the fundamentals.
While much of the media focus is often on sophisticated, targeted attacks, the vast majority of the incidents for which Secure- Works was engaged in the first half of 2016 (88 percent) were opportunistic attacks that did not target a specific organization. Among the incidents in the report in which the initial access vector was known, phishing was used 38 percent of the time, making it the most common attack methodology used by attackers. Scan and exploit was the second most common at 22 percent, while strategic web compromises and credential abuse comprised 21 percent and 15 percent, respectively.
Removable media was involved in four percent of the incidents.
In terms of defense, the implication here is clear: organizations need to put an emphasis on addressing the challenge posed by phishing. Part of that requires educating and training employees to spot phishing emails when they hit their inboxes. Often, there are telltale signs—misspellings, requests for the recipient to do something out of the norm, etc.—but sometimes there are not. In targeted attacks, spear-phishing emails can be even more sneaky than most. It is common for advanced threat groups to perform extensive reconnaissance on their targets before launching an attack, allowing them to create convincing emails that take into account details such as the recipient’s job duties and what IT assets and data they have access to. With that kind of information at an attacker’s disposal, it is likely that someone in the organization will fall victim, making anti-phishing technologies like email filtering critical.
Phishing can often lead to credential theft. Once a phisher has a victim’s username, password or authentication information, they can abuse it to gain access to an account, service or network and take other actions—including data theft. In one incident noted in the report, a threat actor compromised a third-party organization providing help desk services to its true target. After compromising the third-party environment, the threat actor accessed their actual target. Once inside, the adversary gained access to administrator accounts, used them to access Citrix servers, and stole credentials from those servers for other systems. Protecting user credentials and enforcing best practices in regards to passwords/passphrases is a critical part of security. Another critical part is controlling user access and privileges. To prevent potential abuse by attackers or insider threats, user privileges should be limited to the lowest level necessary— a strategy that could cause culture clashes between the organization and users accustomed to not being limited, but also one that could impair an attack from spreading if a machine is compromised.
Strategic web compromises involve attackers infecting legitimate websites their targets are likely to visit in hopes of infecting their computers when they do. These types of drive-by download attacks are particularly sneaky because they take advantage of the trust the visitor has in the site. Although they sometimes use zerodays, the vulnerabilities are likely known issues the attacker is hoping the target has not yet patched. As a result, protecting against these types of attacks starts with an effective patch management strategy that identifies the vulnerabilities affecting your IT environment and rolls out the appropriate updates as promptly as possible.
Organizations should scan their networks and develop an inventory of their software and devices, then prioritize their patching according to the risk of an attack and the damage it could do if successful. In addition, vulnerability management extends to weaving security into the app development process and ensuring the safety of non-commodity code developed internally or by a third-party partner.
Of course, corporate security teams are hardly the only ones doing vulnerability scans. In the case of the recent Wanna- Cry ransomware attacks for example, the threat actors scanned Internet IP addresses for machines vulnerable to a Microsoft Windows vulnerability. This type of highvolume scanning of Internet-facing systems is a common way for threat actors to find systems they can exploit, and as noted above, was observed in nearly a quarter of the incidents examined in the report. One of the reasons the ransomware spread so quickly was that many organizations did not promptly apply Microsoft’s update despite it having been available since March. Buying the latest technology will not solve the problem posed by an unsecure Web server left accessible via the Internet.
The bottom line is that organizations need to take a risk-based approach to security that goes beyond regulatory compliance. Our Threat Insights Report outlines a number of recommendations.
Understand the extended enterprise. Take a data-centric approach. Define your key assets, know where they reside and who has access to them, including third parties.
Increase visibility. By collecting and monitoring security events, you will be able to reduce the time it takes to detect and respond to incidents as well as identify trends within the infrastructure. At a minimum, maintain logs on the following systems for 13 months: firewall, IDS/IPS, DNS, VPN, Active Directory, Web Services and critical servers and systems.
Build a culture of security. Everyone within the organization must take responsibility for protecting information. This involves getting buy-in from C-level leaders as well as other parts of the business outside IT in order to sell the importance of smart security behaviors.
Train your users. Employees unfortunately remain the weakest link. Phishing and social engineering remain popular for attackers seeing to infect enterprises and SMBs alike. Training employees to spot suspicious behavior can significantly improve your ability to block malicious activity.
Too often, the answer for these challenges is to buy the latest technology. However, to truly improve their security, chief information security officers need to focus more on people and processes. One of the mistakes many CISOs make is to take a compliance-first approach to security. Taking that type of checkbox approach does not best serve the organization. When it comes to cybersecurity, compliance should be thought of as a floor as opposed to a ceiling. For example, Secure- Works has talked to security teams at financial institutions who spent as much as 40 percent of their time on compliance initiatives rather than security initiatives that matter to their organizations. Ironically, putting a strong emphasis on security will address most compliance requirements.
Cybersecurity is not a problem that can be solved with technology alone. Developing an effective security strategy means understanding your needs, where your critical data and assets are, and what the risk levels are to that information and those devices. It means training employees, building an effective patch management program, and operationalizing threat intelligence to harden your defenses. It means implementing strategies like strong passwords and multi-factor authentication to control access to critical systems. Whether sophisticated attackers are at your doorstep or not, it won’t take any sophistication to break in if the door is unlocked.
This article originally appeared in the August 2017 issue of Security Today.
|
Formal statement is: lemma ksimplex_0: "n = 0 \<Longrightarrow> s = {(\<lambda>x. p)}" Informal statement is: If $n = 0$, then the $n$-simplex $s$ is the set containing the single point $(\lambda x. p)$.
|
#' Join two tbls together
#'
#' These are generic functions that dispatch to individual tbl methods - see the
#' method documentation for details of individual data sources. `x` and
#' `y` should usually be from the same data source, but if `copy` is
#' `TRUE`, `y` will automatically be copied to the same source as `x`.
#'
#' @section Join types:
#'
#' Currently dplyr supports four types of mutating joins, two types of filtering joins, and
#' a nesting join.
#'
#' \strong{Mutating joins} combine variables from the two data.frames:
#'
#' \describe{
#' \item{`inner_join()`}{return all rows from `x` where there are matching
#' values in `y`, and all columns from `x` and `y`. If there are multiple matches
#' between `x` and `y`, all combination of the matches are returned.}
#'
#' \item{`left_join()`}{return all rows from `x`, and all columns from `x`
#' and `y`. Rows in `x` with no match in `y` will have `NA` values in the new
#' columns. If there are multiple matches between `x` and `y`, all combinations
#' of the matches are returned.}
#'
#' \item{`right_join()`}{return all rows from `y`, and all columns from `x`
#' and y. Rows in `y` with no match in `x` will have `NA` values in the new
#' columns. If there are multiple matches between `x` and `y`, all combinations
#' of the matches are returned.}
#'
#' \item{`full_join()`}{return all rows and all columns from both `x` and `y`.
#' Where there are not matching values, returns `NA` for the one missing.}
#' }
#'
#'
#' \strong{Filtering joins} keep cases from the left-hand data.frame:
#'
#' \describe{
#' \item{`semi_join()`}{return all rows from `x` where there are matching
#' values in `y`, keeping just columns from `x`.
#'
#' A semi join differs from an inner join because an inner join will return
#' one row of `x` for each matching row of `y`, where a semi
#' join will never duplicate rows of `x`.}
#'
#' \item{`anti_join()`}{return all rows from `x` where there are not
#' matching values in `y`, keeping just columns from `x`.}
#' }
#'
#' \strong{Nesting joins} create a list column of data.frames:
#'
#' \describe{
#' \item{`nest_join()`}{return all rows and all columns from `x`. Adds a
#' list column of tibbles. Each tibble contains all the rows from `y`
#' that match that row of `x`. When there is no match, the list column is
#' a 0-row tibble with the same column names and types as `y`.
#'
#' `nest_join()` is the most fundamental join since you can recreate the other joins from it.
#' An `inner_join()` is a `nest_join()` plus an [tidyr::unnest()], and `left_join()` is a
#' `nest_join()` plus an `unnest(.drop = FALSE)`.
#' A `semi_join()` is a `nest_join()` plus a `filter()` where you check that every element of data has
#' at least one row, and an `anti_join()` is a `nest_join()` plus a `filter()` where you check every element has zero rows.
#' }
#' }
#'
#' @section Grouping:
#'
#' Groups are ignored for the purpose of joining, but the result preserves
#' the grouping of `x`.
#'
#' @param x,y tbls to join
#' @param by a character vector of variables to join by. If `NULL`, the
#' default, `*_join()` will do a natural join, using all variables with
#' common names across the two tables. A message lists the variables so
#' that you can check they're right (to suppress the message, simply
#' explicitly list the variables that you want to join).
#'
#' To join by different variables on x and y use a named vector.
#' For example, `by = c("a" = "b")` will match `x.a` to
#' `y.b`.
#' @param copy If `x` and `y` are not from the same data source,
#' and `copy` is `TRUE`, then `y` will be copied into the
#' same src as `x`. This allows you to join tables across srcs, but
#' it is a potentially expensive operation so you must opt into it.
#' @param suffix If there are non-joined duplicate variables in `x` and
#' `y`, these suffixes will be added to the output to disambiguate them.
#' Should be a character vector of length 2.
#' @param name the name of the list column nesting joins create. If `NULL` the name of `y` is used.
#' @param keep If `TRUE` the by columns are kept in the nesting joins.
#' @param ... other parameters passed onto methods, for instance, `na_matches`
#' to control how `NA` values are matched. See \link{join.tbl_df} for more.
#' @name join
#' @examples
#' # "Mutating" joins combine variables from the LHS and RHS
#' band_members %>% inner_join(band_instruments)
#' band_members %>% left_join(band_instruments)
#' band_members %>% right_join(band_instruments)
#' band_members %>% full_join(band_instruments)
#'
#' # "Filtering" joins keep cases from the LHS
#' band_members %>% semi_join(band_instruments)
#' band_members %>% anti_join(band_instruments)
#'
#' # "Nesting" joins keep cases from the LHS and nests the RHS
#' band_members %>% nest_join(band_instruments)
#'
#' # To suppress the message, supply by
#' band_members %>% inner_join(band_instruments, by = "name")
#' # This is good practice in production code
#'
#' # Use a named `by` if the join variables have different names
#' band_members %>% full_join(band_instruments2, by = c("name" = "artist"))
#' # Note that only the key from the LHS is kept
NULL
#' @rdname join
#' @export
inner_join <- function(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) {
UseMethod("inner_join")
}
#' @rdname join
#' @export
left_join <- function(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) {
UseMethod("left_join")
}
#' @rdname join
#' @export
right_join <- function(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) {
UseMethod("right_join")
}
#' @rdname join
#' @export
full_join <- function(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) {
UseMethod("full_join")
}
#' @rdname join
#' @export
semi_join <- function(x, y, by = NULL, copy = FALSE, ...) {
UseMethod("semi_join")
}
#' @rdname join
#' @export
nest_join <- function(x, y, by = NULL, copy = FALSE, keep = FALSE, name = NULL, ...) {
UseMethod("nest_join")
}
#' @rdname join
#' @export
anti_join <- function(x, y, by = NULL, copy = FALSE, ...) {
UseMethod("anti_join")
}
#' Join data frame tbls
#'
#' See [join] for a description of the general purpose of the
#' functions.
#'
#' @inheritParams inner_join
#' @param ... included for compatibility with the generic; otherwise ignored.
#' @param na_matches
#' Use `"never"` to always treat two `NA` or `NaN` values as
#' different, like joins for database sources, similarly to
#' `merge(incomparables = FALSE)`.
#' The default, `"na"`, always treats two `NA` or `NaN` values as equal, like [merge()].
#' Users and package authors can change the default behavior by calling
#' `pkgconfig::set_config("dplyr::na_matches" = "never")`.
#' @examples
#' if (require("Lahman")) {
#' batting_df <- as_tibble(Batting)
#' person_df <- as_tibble(Master)
#'
#' uperson_df <- as_tibble(Master[!duplicated(Master$playerID), ])
#'
#' # Inner join: match batting and person data
#' inner_join(batting_df, person_df)
#' inner_join(batting_df, uperson_df)
#'
#' # Left join: match, but preserve batting data
#' left_join(batting_df, uperson_df)
#'
#' # Anti join: find batters without person data
#' anti_join(batting_df, person_df)
#' # or people who didn't bat
#' anti_join(person_df, batting_df)
#' }
#' @name join.tbl_df
NULL
check_na_matches <- function(na_matches) {
na_matches <- match.arg(na_matches, choices = c("na", "never"))
accept_na_match <- (na_matches == "na")
accept_na_match
}
check_valid_names <- function(names, warn_only = FALSE) {
which_na <- which(is.na(names))
alert <- if (warn_only) warn else abort
if (length(which_na)) {
alert(glue("Column `{cols}` cannot have NA as name",
cols = glue_collapse(which_na, sep = ", ")
))
}
if (any(dup <- duplicated(names))){
alert(glue("Column `{cols}` must have a unique name",
cols = names[dup]
))
}
}
#' @export
#' @rdname join.tbl_df
inner_join.tbl_df <- function(x, y, by = NULL, copy = FALSE,
suffix = c(".x", ".y"), ...,
na_matches = pkgconfig::get_config("dplyr::na_matches")) {
check_valid_names(tbl_vars(x))
check_valid_names(tbl_vars(y))
by <- common_by(by, x, y)
suffix <- check_suffix(suffix)
na_matches <- check_na_matches(na_matches)
y <- auto_copy(x, y, copy = copy)
vars <- join_vars(tbl_vars(x), tbl_vars(y), by, suffix)
by_x <- check_by_x(vars$idx$x$by)
by_y <- vars$idx$y$by
aux_x <- vars$idx$x$aux
aux_y <- vars$idx$y$aux
by_names <- vars$alias[seq_len(length(by_y))]
y_split <- vec_group_pos(set_names(y[, by_y, drop = FALSE], by_names))
matches <- vec_match(
set_names(x[, by_x, drop = FALSE], by_names),
y_split$key
)
# expand indices
x_indices <- seq_len(nrow(x))[!is.na(matches)]
y_indices <- y_split$pos[matches[!is.na(matches)]]
x_indices <- rep(x_indices, lengths(y_indices))
y_indices <- vec_c(!!!y_indices, .pytype = integer())
x_slice <- vec_slice(x, x_indices)
y_slice <- vec_slice(y, y_indices)
# joined columns, cast to their common types
out <- new_list(ncol(x) + length(aux_y), names = vars$alias)
# join columns, perhaps with casting,
# x columns stay in same position
join_ptype <- vec_ptype2(x[, by_x, drop = FALSE], set_names(y[, by_y, drop = FALSE], names(x)[by_x]))
out[by_x] <- vec_cast(x_slice[, by_x, drop = FALSE], to = join_ptype)
# other columns from x
out[aux_x] <- x_slice[, aux_x, drop = FALSE]
# then columns from y
out[ncol(x) + seq_along(aux_y)] <- y_slice[, aux_y, drop = FALSE]
reconstruct_join(as_tibble(out), x, vars)
}
#' @importFrom tibble add_column
#' @export
#' @rdname join.tbl_df
nest_join.tbl_df <- function(x, y, by = NULL, copy = FALSE, keep = FALSE, name = NULL, ...) {
name_var <- name %||% as_label(enexpr(y))
check_valid_names(tbl_vars(x))
check_valid_names(tbl_vars(y))
by <- common_by(by, x, y)
y <- auto_copy(x, y, copy = copy)
vars <- join_vars(tbl_vars(x), tbl_vars(y), by)
by_x <- check_by_x(vars$idx$x$by)
by_names <- vars$alias[seq_len(length(by_x))]
by_y <- vars$idx$y$by
aux_y <- vars$idx$y$aux
aux_x <- vars$idx$x$aux
if (keep) {
aux_y <- c(by_y, aux_y)
}
y_split <- vec_group_pos(set_names(y[, by_y, drop = FALSE], by_names))
matches <- vec_match(
set_names(x[, by_x, drop = FALSE], by_names),
y_split$key
)
# expand indices
y_indices <- y_split$pos
# joined columns, cast to their common types
joined <- x[, by_x, drop = FALSE]
joined <- set_names(joined, vars$alias[seq_len(ncol(joined))])
joined[] <- map2(joined, y[, by_y, drop = FALSE], function(joined_i, y_i) {
vec_cast(joined_i, to = vec_ptype_common(joined_i, y_i))
})
# colums from x (no casting needed)
x_result <- set_names(
x[, aux_x, drop = FALSE],
vars$alias[seq2(ncol(joined) + 1, ncol(x))]
)
# columns from y
y_keep <- if (keep) y else y[, aux_y, drop = FALSE]
y_result_list <- map(matches, function(idx) {
if (identical(idx, NA_integer_)) {
vec_slice(y_keep, 0L)
} else {
vec_slice(y_keep, y_indices[[idx]])
}
})
out <- add_column(joined, !!!x_result, !!name_var := y_result_list)
reconstruct_join(out, x, vars)
}
check_by_x <- function(by_x) {
if (length(by_x) == 0L) {
abort(
"`by` must specify variables to join by",
"dplyr_join_empty_by"
)
}
by_x
}
#' @export
#' @rdname join.tbl_df
left_join.tbl_df <- function(x, y, by = NULL, copy = FALSE,
suffix = c(".x", ".y"), ...,
na_matches = pkgconfig::get_config("dplyr::na_matches")) {
check_valid_names(tbl_vars(x))
check_valid_names(tbl_vars(y))
by <- common_by(by, x, y)
suffix <- check_suffix(suffix)
na_matches <- check_na_matches(na_matches)
y <- auto_copy(x, y, copy = copy)
vars <- join_vars(tbl_vars(x), tbl_vars(y), by, suffix)
by_x <- check_by_x(vars$idx$x$by)
by_y <- vars$idx$y$by
aux_x <- vars$idx$x$aux
aux_y <- vars$idx$y$aux
# unique values and where they are in each
y_split <- vec_group_pos(y[, by_y, drop = FALSE])
# matching uniques in x with uniques in y
matches <- vec_match(x[, by_x, drop = FALSE], set_names(y_split$key, names(x)[by_x]))
# for each unique value in x, expand the ids according to the number
# of matches in y
x_indices <- vec_c(!!!map2(matches, seq_along(matches), function(match, ids, rhs_id) {
if (is.na(match)) {
ids
} else {
vec_repeat(ids, each = length(rhs_id[[match]]))
}
}, rhs_id = y_split$pos), .ptype = integer())
x_slice <- vec_slice(x, x_indices)
# same for ids of y
y_indices <- vec_c(!!!map2(matches, seq_along(matches), function(match, ids, rhs_id) {
if (is.na(match)) {
NA_integer_
} else {
rhs_id[[match]]
}
}, rhs_id = y_split$pos), .ptype = integer())
y_slice <- vec_slice(y, y_indices)
out <- new_list(ncol(x) + length(aux_y), names = vars$alias)
# join columns, perhaps with casting,
# x columns stay in same position
join_ptype <- vec_ptype2(x[, by_x, drop = FALSE], set_names(y[, by_y, drop = FALSE], names(x)[by_x]))
out[by_x] <- vec_cast(x_slice[, by_x, drop = FALSE], to = join_ptype)
# other columns from x
out[aux_x] <- x_slice[, aux_x, drop = FALSE]
# then columns from y
out[ncol(x) + seq_along(aux_y)] <- y_slice[, aux_y, drop = FALSE]
reconstruct_join(as_tibble(out), x, vars)
}
#' @export
#' @rdname join.tbl_df
right_join.tbl_df <- function(x, y, by = NULL, copy = FALSE,
suffix = c(".x", ".y"), ...,
na_matches = pkgconfig::get_config("dplyr::na_matches")) {
check_valid_names(tbl_vars(x))
check_valid_names(tbl_vars(y))
by <- common_by(by, x, y)
suffix <- check_suffix(suffix)
na_matches <- check_na_matches(na_matches)
y <- auto_copy(x, y, copy = copy)
vars <- join_vars(tbl_vars(x), tbl_vars(y), by, suffix)
by_x <- check_by_x(vars$idx$x$by)
by_y <- vars$idx$y$by
aux_x <- vars$idx$x$aux
aux_y <- vars$idx$y$aux
alias <- vars$alias
# unique values and where they are in each
x_split <- vec_group_pos(x[, by_x, drop = FALSE])
# matching uniques in x with uniques in y
matches <- vec_match(
y[, by_y, drop = FALSE],
set_names(x_split$key, names(y)[by_y])
)
# for each unique value in y, expand the ids according to the number
# of matches in x
y_indices <- vec_c(!!!map2(matches, seq_along(matches), function(match, id, lhs_id) {
if (is.na(match)) {
id
} else {
vec_repeat(id, each = length(lhs_id[[match]]))
}
}, lhs_id = x_split$pos), .ptype = integer())
# same for ids of x
x_indices <- vec_c(!!!map2(matches, seq_along(matches), function(match, id, lhs_id) {
if (is.na(match)) {
NA_integer_
} else {
vec_repeat(lhs_id[[match]], times = length(id))
}
}, lhs_id = x_split$pos), .ptype = integer())
x_slice <- vec_slice(x, x_indices)
y_slice <- vec_slice(y, y_indices)
out <- new_list(ncol(x) + length(aux_y), names = vars$alias)
# the joined columns (taken from `y`) and then cast to common type
join_ptype <- vec_ptype2(x[, by_x, drop = FALSE], set_names(y[, by_y, drop = FALSE], names(x)[by_x]))
out[by_x] <- vec_cast(set_names(y_slice[, by_y, drop = FALSE], names(x)[by_x]), to = join_ptype)
# other colums from x
out[aux_x] <- x_slice[, aux_x, drop = FALSE]
# then columns from y
out[ncol(x) + seq_along(aux_y)] <- y_slice[, aux_y, drop = FALSE]
reconstruct_join(as_tibble(out), x, vars)
}
#' @export
#' @rdname join.tbl_df
full_join.tbl_df <- function(x, y, by = NULL, copy = FALSE,
suffix = c(".x", ".y"), ...,
na_matches = pkgconfig::get_config("dplyr::na_matches")) {
check_valid_names(tbl_vars(x))
check_valid_names(tbl_vars(y))
by <- common_by(by, x, y)
suffix <- check_suffix(suffix)
na_matches <- check_na_matches(na_matches)
y <- auto_copy(x, y, copy = copy)
vars <- join_vars(tbl_vars(x), tbl_vars(y), by, suffix)
by_x <- check_by_x(vars$idx$x$by)
by_y <- vars$idx$y$by
aux_x <- vars$idx$x$aux
aux_y <- vars$idx$y$aux
by_names <- vars$alias[seq_len(length(by_x))]
# unique values and where they are in each
x_split <- vec_group_pos(set_names(x[, by_x, drop = FALSE], by_names))
y_split <- vec_group_pos(set_names(y[, by_y, drop = FALSE], by_names))
# matching uniques in x with uniques in y and vice versa
x_matches <- vec_match(x_split$key, y_split$key)
y_matches <- vec_match(y_split$key, x_split$key)
# expand x indices from x matches
x_indices_one <- vec_c(
!!!map2(x_matches, x_split$pos, function(match, ids, rhs_id) {
if (is.na(match)) {
ids
} else {
vec_repeat(ids, each = length(rhs_id[[match]]))
}
}, rhs_id = y_split$pos),
.ptype = integer()
)
x_indices_two <- rep(NA_integer_,
sum(lengths(y_split$pos[is.na(y_matches)]))
)
# rows in x
y_indices_one <- vec_c(
!!!map2(x_matches, x_split$pos, function(match, ids, rhs_id) {
if (is.na(match)) {
vec_repeat(NA_integer_, length(ids))
} else {
vec_repeat(rhs_id[[match]], times = length(ids))
}
}, rhs_id = y_split$pos),
.ptype = integer()
)
# rows in y and not in x
y_indices_two <- vec_c(!!!y_split$pos[is.na(y_matches)], .ptype = integer())
out <- new_list(ncol(x) + length(aux_y), names = vars$alias)
out[by_x] <- vec_rbind(
vec_slice(x[, by_x, drop = FALSE], x_indices_one),
set_names(vec_slice(y[, by_y, drop = FALSE], y_indices_two), names(x)[by_x])
)
# other colums from x
out[aux_x] <- vec_slice(x[, aux_x, drop = FALSE], c(x_indices_one, x_indices_two))
# columns from y
out[ncol(x) + seq_along(aux_y)] <- vec_slice(y[, aux_y, drop = FALSE], c(y_indices_one, y_indices_two))
reconstruct_join(as_tibble(out), x, vars)
}
#' @export
#' @rdname join.tbl_df
semi_join.data.frame <- function(x, y, by = NULL, copy = FALSE, ...,
na_matches = pkgconfig::get_config("dplyr::na_matches")) {
check_valid_names(tbl_vars(x))
check_valid_names(tbl_vars(y))
by <- common_by(by, x, y)
by_x <- check_by_x(by$x)
suffix <- check_suffix(c(".x", ".y"))
na_matches <- check_na_matches(na_matches)
vars <- join_vars(tbl_vars(x), tbl_vars(y), by, suffix)
by_x <- check_by_x(vars$idx$x$by)
by_y <- vars$idx$y$by
y <- auto_copy(x, y, copy = copy)
y_split <- vec_group_pos(
set_names(y[, by_y, drop = FALSE], names(x)[by_x])
)
indx <- which(!is.na(vec_match(x[, by_x, drop = FALSE], y_split$key)))
x[indx, , drop = FALSE]
}
#' @export
#' @rdname join.tbl_df
anti_join.data.frame <- function(x, y, by = NULL, copy = FALSE, ...,
na_matches = pkgconfig::get_config("dplyr::na_matches")) {
check_valid_names(tbl_vars(x))
check_valid_names(tbl_vars(y))
by <- common_by(by, x, y)
by_x <- check_by_x(by$x)
suffix <- check_suffix(c(".x", ".y"))
na_matches <- check_na_matches(na_matches)
vars <- join_vars(tbl_vars(x), tbl_vars(y), by, suffix)
by_x <- check_by_x(vars$idx$x$by)
by_y <- vars$idx$y$by
y <- auto_copy(x, y, copy = copy)
y_split <- vec_group_pos(
set_names(y[, by_y, drop = FALSE], names(x)[by_x])
)
indx <- which(is.na(vec_match(x[, by_x, drop = FALSE], y_split$key)))
x[indx, , drop = FALSE]
}
reconstruct_join <- function(out, x, vars) {
if (is_grouped_df(x)) {
groups_in_old <- match(group_vars(x), tbl_vars(x))
groups_in_alias <- match(groups_in_old, vars$x)
out <- grouped_df(out, vars$alias[groups_in_alias], group_by_drop_default(x))
}
out
}
#' @export
inner_join.data.frame <- function(x, y, by = NULL, copy = FALSE, ...) {
as.data.frame(inner_join(as_tibble(x), y, by = by, copy = copy, ...))
}
#' @export
left_join.data.frame <- function(x, y, by = NULL, copy = FALSE, ...) {
as.data.frame(left_join(as_tibble(x), y, by = by, copy = copy, ...))
}
#' @export
#' @rdname join.tbl_df
nest_join.data.frame <- function(x, y, by = NULL, copy = FALSE, keep = FALSE, name = NULL, ... ) {
as.data.frame(nest_join(as_tibble(x), y, by = by, copy = copy, ..., keep = keep, name = name))
}
#' @export
right_join.data.frame <- function(x, y, by = NULL, copy = FALSE, ...) {
as.data.frame(right_join(as_tibble(x), y, by = by, copy = copy, ...))
}
#' @export
full_join.data.frame <- function(x, y, by = NULL, copy = FALSE, ...) {
as.data.frame(full_join(as_tibble(x), y, by = by, copy = copy, ...))
}
# Helpers -----------------------------------------------------------------
check_suffix <- function(x) {
if (!is.character(x) || length(x) != 2) {
bad_args("suffix", "must be a character vector of length 2, ",
"not {friendly_type_of(x)} of length {length(x)}"
)
}
if (any(is.na(x))) {
bad_args("suffix", "can't be NA")
}
if (all(x == "")) {
bad_args("suffix", "can't be empty string for both `x` and `y` suffixes")
}
list(x = x[[1]], y = x[[2]])
}
|
input := FileTools:-Text:-ReadFile("AoC-2021-9-input.txt" ):
ogrid := ((parse~)@StringTools:-Explode)~(subs(""=NULL,StringTools:-Split(input))):
gridw := nops(ogrid[1]);
gridl := nops(ogrid);
grid := [ [ 9 $ (gridw+2) ], map(l->[9,l[],9], ogrid)[], [ 9 $ (gridw+2) ] ]:
lowpoints := NULL:
risklevel := 0:
for i from 2 to gridl+1 do
for j from 2 to gridw+1 do
if grid[i][j] < grid[i-1][j]
and grid[i][j] < grid[i+1][j]
and grid[i][j] < grid[i][j-1]
and grid[i][j] < grid[i][j+1]
then
lowpoints := lowpoints, [i,j];
risklevel := risklevel + grid[i][j]+1;
end if;
end do;
end do:
answer1 := risklevel;
counted := table(sparse=0):
basinsize := proc( input )
global grid, counted;
local nb, x, y, v;
x, y, v := op(input);
if v = 9 or counted[input] <> 0 then
return 0;
end if;
nb := [[x, y-1], [x, y+1], [x-1, y], [x+1,y]];
nb := remove( n->(min(n)<2 or n[1]>gridl+2 or n[2]>gridw+2), nb);
nb := map(n->[n[1], n[2], grid[n[1]][n[2]]], nb);
#nb := remove(n->n[3]=9 or n[3] < v, nb); # for non-9 bounded basins
counted[ input ] := 1;
if nb = [] then
return 1;
else
return add(map(thisproc, nb))+1;
end if;
end proc:
bs := map(l->basinsize([l[], grid[l[1]][l[2]]]), [lowpoints]):
answer2 := mul( sort(bs)[-3..-1] );
|
Require Import VerdiRaft.Raft.
Require Import VerdiRaft.TraceUtil.
Section InputBeforeOutputInterface.
Context {orig_base_params : BaseParams}.
Context {one_node_params : OneNodeParams orig_base_params}.
Context {raft_params : RaftParams orig_base_params}.
Section inner.
Variable client : clientId.
Variables id : nat.
Definition input_before_output (tr : list (name * (raft_input + list raft_output))) :=
before_func (is_input_with_key client id) (is_output_with_key client id) tr.
End inner.
Class input_before_output_interface : Prop :=
{
output_implies_input_before_output :
forall client id failed net tr,
step_failure_star step_failure_init (failed, net) tr ->
key_in_output_trace client id tr ->
input_before_output client id tr
}.
End InputBeforeOutputInterface.
|
State Before: b : Bool
⊢ (b::zero b) = zero b State After: no goals Tactic: cases b <;> rfl
|
[STATEMENT]
lemma rI3: "dNOR (\<I>\<^sub>R R)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. dNOR (\<I>\<^sub>R R)
[PROOF STEP]
unfolding Int_rel_def dNOR_def conn
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>w. contains (\<lambda>v. True) (R w) = True
[PROOF STEP]
by simp
|
lemma pole_theorem_open: assumes holg: "g holomorphic_on S" and S: "open S" and eq: "\<And>z. z \<in> S - {a} \<Longrightarrow> g z = (z - a) * f z" shows "(\<lambda>z. if z = a then deriv g a else f z - g a/(z - a)) holomorphic_on S"
|
# IMPORTS
from tkinter.filedialog import askopenfilename
import json
import importlib
import threading
import time
from tabulate import tabulate
import numpy as np
import matplotlib.pyplot as plt
import os
import shutil
import webbrowser
from wikipedia import wikipedia
import colorama
from termcolor import cprint
from win10toast import ToastNotifier
# CLASS CONTANING GLOBAL VARIABLES
class GlobalVariables:
totalGoals = 0
totalAssists = 0
statObjectsList = []
completePlayerStats = []
tour = type
dictFilename = ""
txtFilename = ""
pathDelimiter = "../"
tkinterFilePath = ""
maxPercentageGoalInvolvement = 0
mostValuablePlayer = ""
mostValuablePlayerWikiInfo = ""
AG_PlayerDataDict = {}
HM_PlayerDataDict = {}
NS_PlayerDataDict = {}
TZ_PlayerDataDict = {}
script_helper = type
Search_Number = 0
# CLASS FOR CALCULATING PLAYER STATS
class PlayerStats():
def __init__(self, name, goals, assists):
self.name = name
self.goals = goals
self.assists = assists
self.percentage_goal_involvement = np.around(((self.goals*3 + self.assists*2) / (
GlobalVariables.totalGoals*3 + GlobalVariables.totalAssists*2) * 100), decimals=2)
self.single_player_complete_stat = []
self.single_player_complete_stat.extend(
[self.name, self.goals, self.assists, self.percentage_goal_involvement])
GlobalVariables.completePlayerStats.append(
self.single_player_complete_stat)
if self.percentage_goal_involvement > GlobalVariables.maxPercentageGoalInvolvement:
GlobalVariables.maxPercentageGoalInvolvement = self.percentage_goal_involvement
GlobalVariables.mostValuablePlayer = self.name
# FINDING LAST MODIFIED FILE
def get_new_file(filesPath):
filename = ""
cont = 'n'
list_of_files = os.listdir(filesPath)
if len(list_of_files) == 0:
print("\nUnable to find any file")
else:
modified_time_of_new_file = np.format_float_scientific(
os.path.getmtime(filesPath+list_of_files[0]))
filename = list_of_files[0]
for i in list_of_files:
mtime = np.format_float_scientific(os.path.getmtime(filesPath+i))
if mtime > modified_time_of_new_file:
modified_time_of_new_file = mtime
filename = i
if filename.endswith(".txt"):
print("\nNew file found: ", filename)
cont = input("Continue? (y/n): ").lower().strip()
else:
print("\nUnable to find any file")
cont = 'n'
if cont == 'y' or cont == '':
return filename
elif cont == 'n':
manual_file_select = input(
"Enter filename manually (eg. jan1) or Enter to skip: ").lower().strip()
manual_file_select = manual_file_select+".txt"
if manual_file_select not in list_of_files:
print("Cannot find", manual_file_select+". Opening filepicker")
abs_txt_path = askopenfilename(
filetypes=[('Text Document', '*.txt')])
tfilename = str(abs_txt_path).split('/')[-1]
GlobalVariables.tkinterFilePath = abs_txt_path
return tfilename
else:
return manual_file_select
else:
raise Exception("Invalid input. Exiting")
# MAKE A COPY OF FILE IF THE FILE IS CHOSEN WITH tkinter
def write_to_file(tkinter_chosen_file_path):
if tkinter_chosen_file_path != GlobalVariables.pathDelimiter+"files/"+GlobalVariables.txtFilename:
shutil.copyfile(tkinter_chosen_file_path,
GlobalVariables.pathDelimiter+"files/"+GlobalVariables.txtFilename)
# TO GET THE RESPECTIVE WORKING DICTIONARY FROM script_helper.py
def get_working_dict_from_helper():
GlobalVariables.script_helper = importlib.import_module("script_helper")
GlobalVariables.tour, GlobalVariables.dictFilename = GlobalVariables.script_helper.get_working_dict(
GlobalVariables.txtFilename)
# CALCULATING TOTAL GOALS
def sum_of_goals(every_player_stats):
goal_list = []
goal_list = np.array(goal_list)
for i in every_player_stats:
goal_list = np.append(goal_list, i[1])
return (int(np.sum(goal_list)))
# CALCULATING TOTAL ASSISTS
def sum_of_assists(every_player_stats):
assist_list = []
assist_list = np.array(assist_list)
for i in every_player_stats:
assist_list = np.append(assist_list, i[2])
return (int(np.sum(assist_list)))
# DOING WIKIPEDIA SEARCH ON MVP
def wiki_search():
while GlobalVariables.Search_Number < 3:
search_term = GlobalVariables.mostValuablePlayer
category = "football"
try:
keyword = wikipedia.search(search_term)[
GlobalVariables.Search_Number]
WikiInfo = str(wikipedia.summary(
keyword, sentences=2, auto_suggest=False))
if WikiInfo.lower().find(category) != -1:
GlobalVariables.mostValuablePlayerWikiInfo = WikiInfo
return
else:
GlobalVariables.Search_Number += 1
wiki_search()
except:
GlobalVariables.mostValuablePlayerWikiInfo = "None"
# SAVING EVERY PLAYER'S TOTAL STATS
def record_player_stats(list_element, player_dict, alph):
if player_dict != {}:
if list_element[0] in player_dict.keys():
goal_updated = player_dict[list_element[0]][0] + list_element[1]
assist_updated = player_dict[list_element[0]][1] + list_element[2]
count = player_dict[list_element[0]][2][2] + 1
avg_goals = np.around(goal_updated / count, decimals=2)
avg_assists = np.around(assist_updated / count, decimals=2)
val_updated = {list_element[0]: [
goal_updated, assist_updated, [avg_goals, avg_assists, count]]}
if alph == "AG":
GlobalVariables.AG_PlayerDataDict.update(val_updated)
elif alph == "HM":
GlobalVariables.HM_PlayerDataDict.update(val_updated)
elif alph == "NS":
GlobalVariables.NS_PlayerDataDict.update(val_updated)
else:
GlobalVariables.TZ_PlayerDataDict.update(val_updated)
else:
remaining_lists_dict = {list_element[0]: [
list_element[1], list_element[2], [list_element[1], list_element[2], 1]]}
if alph == "AG":
GlobalVariables.AG_PlayerDataDict.update(remaining_lists_dict)
elif alph == "HM":
GlobalVariables.HM_PlayerDataDict.update(remaining_lists_dict)
elif alph == "NS":
GlobalVariables.NS_PlayerDataDict.update(remaining_lists_dict)
else:
GlobalVariables.TZ_PlayerDataDict.update(remaining_lists_dict)
if player_dict == {}:
remaining_lists_dict = {list_element[0]: [
list_element[1], list_element[2], [list_element[1], list_element[2], 1]]}
if alph == "AG":
GlobalVariables.AG_PlayerDataDict.update(remaining_lists_dict)
elif alph == "HM":
GlobalVariables.HM_PlayerDataDict.update(remaining_lists_dict)
elif alph == "NS":
GlobalVariables.NS_PlayerDataDict.update(remaining_lists_dict)
else:
GlobalVariables.TZ_PlayerDataDict.update(remaining_lists_dict)
# TABULATE PRETTY PRINTING
def tabular_display(table, headers):
print("\n")
print(tabulate(table, headers, tablefmt="pretty"))
# MAIN
def main():
# DISPLAYING TITLE
display_statements_list = ["PES 21 myClub Tour"]
width = len(display_statements_list[0])
colorama.init()
print('+-' + '-' * width + '-+')
for s in display_statements_list:
cprint('| {0:^{1}} |'.format(s, width), color='green')
print('+-' + '-'*(width) + '-+')
# FINDING CORRECT PATH pathDelimiter FOR BAT EXECUTION
try:
os.listdir(GlobalVariables.pathDelimiter+"files")
except FileNotFoundError:
GlobalVariables.pathDelimiter = "./"
# FINDING THE LATEST MODIFIED FILE
GlobalVariables.txtFilename = get_new_file(
GlobalVariables.pathDelimiter+"files/")
if GlobalVariables.txtFilename == '':
raise FileNotFoundError("No file chosen")
# GETTING THE RELEVANT TOUR DICTIONARY
get_working_dict_from_helper_thread = threading.Thread(
target=get_working_dict_from_helper, daemon=False)
get_working_dict_from_helper_thread.start()
# READING FROM THE TXT FILE
every_player_stats = []
single_player_stat = []
try:
if GlobalVariables.tkinterFilePath == "":
openfilepath = GlobalVariables.pathDelimiter+"files/"+GlobalVariables.txtFilename
else:
openfilepath = GlobalVariables.tkinterFilePath
write_to_file(GlobalVariables.tkinterFilePath)
with open(file=openfilepath, mode="r", encoding="utf-8") as working_file:
for line in working_file:
if line.startswith("Player Name"):
continue
else:
line = line.strip().split("\t")
param1 = str(line[0])
param2 = int(line[1])
param3 = int(line[2])
single_player_stat.extend([param1, param2, param3])
every_player_stats.append(list(single_player_stat))
single_player_stat.clear()
except FileNotFoundError:
raise FileNotFoundError("There is no such file in the directory\n")
# FINDING TOTAL GOALS & ASSIST NUMBERS
GlobalVariables.totalGoals = sum_of_goals(every_player_stats)
GlobalVariables.totalAssists = sum_of_assists(every_player_stats)
# STORING ALL OBJECTS
for item in every_player_stats:
GlobalVariables.statObjectsList.append(
PlayerStats(item[0], item[1], item[2]))
# WIKIPEDIA SEARCH THREAD
wiki_search_thread = threading.Thread(target=wiki_search, daemon=False)
print(GlobalVariables.mostValuablePlayerWikiInfo)
wiki_search_thread.start()
# SAVING EVERY PLAYERS TOTAL STATS
analysis_mode = input(
"Turn on player analysis mode? (y/n): ").lower().strip()
if analysis_mode == 'y' or analysis_mode == '':
try:
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_ag.json", mode="r", encoding="utf8") as input_json:
try:
player_dict_ag = json.load(input_json)
except:
player_dict_ag = {}
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_hm.json", mode="r", encoding="utf8") as input_json:
try:
player_dict_hm = json.load(input_json)
except:
player_dict_hm = {}
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_ns.json", mode="r", encoding="utf8") as input_json:
try:
player_dict_ns = json.load(input_json)
except:
player_dict_ns = {}
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_tz.json", mode="r", encoding="utf8") as input_json:
try:
player_dict_tz = json.load(input_json)
except:
player_dict_tz = {}
except FileNotFoundError:
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_ag.json", mode="w+", encoding="utf8"):
player_dict_ag = {}
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_hm.json", mode="w+", encoding="utf8"):
player_dict_hm = {}
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_ns.json", mode="w+", encoding="utf8"):
player_dict_ns = {}
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_tz.json", mode="w+", encoding="utf8"):
player_dict_tz = {}
alphabets_P1 = list(map(chr, range(65, 72)))
alphabets_P2 = list(map(chr, range(72, 78)))
alphabets_P3 = list(map(chr, range(78, 84)))
alphabets_P4 = list(map(chr, range(84, 91)))
for i in GlobalVariables.completePlayerStats:
player_name_first_char = i[0][0]
if player_name_first_char in alphabets_P1:
record_player_stats(i, player_dict_ag, "AG")
elif player_name_first_char in alphabets_P2:
record_player_stats(i, player_dict_hm, "HM")
elif player_name_first_char in alphabets_P3:
record_player_stats(i, player_dict_ns, "NS")
elif player_name_first_char in alphabets_P4:
record_player_stats(i, player_dict_tz, "TZ")
else:
print("Unicode error:", player_name_first_char,
"in", i[0]+". Skipping")
pass
player_dict_ag.update(GlobalVariables.AG_PlayerDataDict)
player_dict_hm.update(GlobalVariables.HM_PlayerDataDict)
player_dict_ns .update(GlobalVariables.NS_PlayerDataDict)
player_dict_tz.update(GlobalVariables.TZ_PlayerDataDict)
GlobalVariables.AG_PlayerDataDict.clear()
GlobalVariables.HM_PlayerDataDict.clear()
GlobalVariables.NS_PlayerDataDict.clear()
GlobalVariables.TZ_PlayerDataDict.clear()
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_ag.json", mode="w+", encoding="utf8") as output_json:
json.dump(player_dict_ag, output_json, sort_keys=True, indent=4)
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_hm.json", mode="w+", encoding="utf8") as output_json:
json.dump(player_dict_hm, output_json, sort_keys=True, indent=4)
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_ns.json", mode="w+", encoding="utf8") as output_json:
json.dump(player_dict_ns, output_json, sort_keys=True, indent=4)
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_tz.json", mode="w+", encoding="utf8") as output_json:
json.dump(player_dict_tz, output_json, sort_keys=True, indent=4)
# SORTING TABLE
table = GlobalVariables.completePlayerStats
sorting_method = input(
"\nSpecify sorting method (1:Default | 2:Name | 3:Goals | 4:Assist | 5:Percentage involvement): ").lower().strip()
if sorting_method == "1" or sorting_method == '':
pass
elif sorting_method == '2':
table.sort(key=lambda x: x[0], reverse=False)
elif sorting_method == '3':
table.sort(key=lambda x: x[1], reverse=True)
elif sorting_method == '4':
table.sort(key=lambda x: x[2], reverse=True)
elif sorting_method == '5':
table.sort(key=lambda x: x[3], reverse=True)
else:
print("Input not valid. Default sorting selected")
pass
# DISPLAYING THE COMPLETE PLAYER STATS IN A PRETTY TABULAR FORMAT ALONG WITH TOTAL GOALS & ASSISTS
tabular_display(table, ["Name", "Goals", "Assits", "% Involvement"])
print("Total Goals:", GlobalVariables.totalGoals,
"\tTotal Assists:", GlobalVariables.totalAssists)
# DISPLAYING MVP NAME AND SHOWING WINDOWS 10 TOAST
print("\nMVP: ", end='')
try:
cprint(GlobalVariables.mostValuablePlayer, color='green')
except:
print(GlobalVariables.mostValuablePlayer)
try:
n = ToastNotifier()
n.show_toast("PES 21 myCLUB Tour", "Most valuable player is " +
GlobalVariables.mostValuablePlayer, icon_path=GlobalVariables.pathDelimiter+"internal/icons/logo.ico", threaded=True)
except:
pass
# DISPLAYING MVP'S WIKIPEDIA INFO
disp_mvp_info = input("\nDo you want to know about " +
GlobalVariables.mostValuablePlayer+"? (y/n): ").lower().strip()
if disp_mvp_info == 'y' or disp_mvp_info == '':
wiki_search_thread.join()
if GlobalVariables.mostValuablePlayerWikiInfo == "" or GlobalVariables.mostValuablePlayerWikiInfo == "None":
print("\n[Sorry. Wikipedia search was not successful]\n")
else:
print("\n"+str(GlobalVariables.mostValuablePlayerWikiInfo)+"\n")
srch_about_mvp = input("Want to know more (y/n): ").lower().strip()
if srch_about_mvp == 'y':
try:
url = "https://www.google.com.tr/search?q={}".format(
GlobalVariables.mostValuablePlayer)
print("Searching about",
GlobalVariables.mostValuablePlayer+". Opening web-browser")
time.sleep(0.3)
webbrowser.open_new_tab(url)
except:
raise Exception("\nError in opening the webbrowser")
# UPDATING THE WORKING DICTIONARY WITH NEW FILE'S TOTAL GOALS
get_working_dict_from_helper_thread.join()
year_suffix = GlobalVariables.script_helper.get_year_suffix(GlobalVariables.txtFilename)
raw_file_name = GlobalVariables.txtFilename.split(".txt")[0]
updated_key = year_suffix + '-' + \
(GlobalVariables.txtFilename[0:3] + "-" + raw_file_name[3:]).title()
GlobalVariables.tour.update({updated_key: GlobalVariables.totalGoals})
with open(GlobalVariables.pathDelimiter+"internal/"+GlobalVariables.dictFilename+".json", mode="w+", encoding="utf8") as dict_json:
json.dump(GlobalVariables.tour, dict_json, indent=4)
# SHOWING THE GRAPH
graph_show = input("\nSave graphical data? (y/n): ").lower().strip()
if graph_show == 'y' or graph_show == '':
key_lists = []
value_lists = []
for key, value in GlobalVariables.tour.items():
key_lists.append(key)
value_lists.append(value)
x_pos = np.arange(len(key_lists))
y_pos = value_lists
plt.rcdefaults()
plt.bar(x_pos, y_pos, align='center', alpha=0.5)
plt.xticks(x_pos, key_lists)
plt_title = GlobalVariables.script_helper.get_year(GlobalVariables.txtFilename) + " " + GlobalVariables.script_helper.getQuarter(GlobalVariables.txtFilename)
plt.title(plt_title)
plt.ylabel('Number of goals')
plt.xlabel('Tour event')
for i, v in enumerate(y_pos):
plt.text(x=i, y=v+1, s=str(v))
plt_graph_filepath = GlobalVariables.pathDelimiter + "statistics/" + plt_title + ".jpg"
mng = plt.get_current_fig_manager()
mng.window.state("zoomed")
plt.savefig(plt_graph_filepath, format='JPEG')
plt.show()
print("\nGraph saved at", plt_graph_filepath)
print("\nDONE!")
if __name__ == "__main__":
main()
# Code developed by
# https://github.com/gokulmanohar
|
lemma tendsto_norm_zero_iff: "((\<lambda>x. norm (f x)) \<longlongrightarrow> 0) F \<longleftrightarrow> (f \<longlongrightarrow> 0) F"
|
Did Early Christians Have the Bible?
7 New Testament Valentines for your Sweetie!
Mormons in ConTEXT: What Happens in Mormon Temples?
|
context('reloading')
is_module_loaded = function (path) {
path %in% names(box:::loaded_mods)
}
unload_all = function () {
modenv = box:::loaded_mods
rm(list = names(modenv), envir = modenv)
}
tempfile_dir = function (...) {
file = tempfile()
dir.create(file)
file
}
create_nested_test_module = function (dir) {
mod = file.path(dir, 'mod', 'a')
dir.create(mod, recursive = TRUE)
writeLines("#' @export\nbox::use(./sub)", file.path(mod, '__init__.r'))
writeLines("#' @export\nvalue = 1L", file.path(mod, 'sub.r'))
}
edit_nested_test_module = function (dir) {
mod = file.path(dir, 'mod', 'a')
writeLines("#' @export\nvalue = 2L", file.path(mod, 'sub.r'))
}
test_that('module can be reloaded', {
# Required since other tests have side-effects.
# Tear-down would be helpful here, but not supported by testthat.
unload_all()
box::use(mod/a)
expect_equal(length(box:::loaded_mods), 1L)
counter = a$get_counter()
a$inc()
expect_equal(a$get_counter(), counter + 1L)
box::reload(a)
expect_true(is_module_loaded(box:::path(a)))
expect_length(box:::loaded_mods, 1L)
expect_equal(a$get_counter(), counter)
})
test_that('reload checks its arguments', {
expect_error(box::reload(123))
expect_error(box::reload(foo))
box::use(mod/a)
expect_error(box::reload((a)))
})
test_that('reload includes module dependencies', {
# This test case actually edits a dependency and reloads the edit. The
# purpose of this is to ensure that reloading doesn’t merely call `.on_load`
# again, but actually does reload the changes from disk.
dir = tempfile_dir()
on.exit(unlink(dir, recursive = TRUE))
old_path = options(box.path = dir)
on.exit(options(old_path), add = TRUE)
create_nested_test_module(dir)
box::use(mod/a)
expect_equal(a$sub$value, 1L)
edit_nested_test_module(dir)
box::reload(a)
expect_equal(a$sub$value, 2L)
# To do:
# * modules with compiled source,
# * tricky packages loaded as modules, e.g. packages that call
# system.file(), and alike, and
# * modules with S4 classes/object,
})
test_that('reload includes transitive dependencies', {
# Unlike in the previous test, this test uses `.on_load` as an indicator of
# reloading, to keep things simpler.
box::use(mod/reload/a)
expect_messages(
box::reload(a),
has = c('^c unloaded', '^c loaded')
)
})
test_that('reload of transitive imports skips packages', {
box::use(mod/reload/pkg)
expect_error(box::reload(pkg), NA)
})
test_that('`reload` shows expected errors', {
old_opts = options(useFancyQuotes = FALSE)
on.exit(options(old_opts))
expect_box_error(
box::reload(mod/a),
'"reload" expects a module object, got "mod/a"'
)
expect_box_error(
box::reload(./a),
'"reload" expects a module object, got "./a"'
)
expect_box_error(box::reload(na), 'object "na" not found')
x = 1L
expect_box_error(
box::reload(x),
'"reload" expects a module object, got "x", which is of type "integer" instead'
)
})
|
# Week 2
## Introduction to Solid State
```python
import numpy as np
import matplotlib.pyplot as plt
import os
import subprocess
import MSD as msd
from scipy import stats
def get_diffusion(file, atom):
with open(file) as f:
y = False
for line in f:
if str("atom D ") in line:
y = True
if y == True and str(atom) in line:
d = line.split()
break
return d
```
Now that you are familiar with molecular dynamics, you are now going to use it to tackle some real world problems. In the next three weeks you will investigate the transport properties of a simple fluorite material - Ca$F_2$. The transport properties of a material determine many properties that are utilised for modern technological applications. For example, solid oxide fuel cell (SOFCs - Alternative to batteries) materials are dependent on the movement of charge carriers through the solid electrolyte and nuclear fuel materials oxidise and fall apart and this corrosive behaviour is dependent on the diffusion of oxygen into the lattice. Due to the importance of the transport properties of these materials, scientists and engineers spend large amounts of their time tring to optomise these properties using different stoichiometries, introducing defects and by using different syntheisis techniques. Over the next three weeks you will investigate how the transport properties of Ca$F_2$ are affected by temperature, structural defects (Schottky and Frenkel) and by chemcial dopants (e.g. different cations). A rough breakdown looks as follows
- Week 2
- Introduction to DL_POLY
- Tutorial on the calculation of diffusion coefficients
- Tutorial on the Arhennius equation
- Molecular dynamics simulations of stoichiomteric Ca$F_2$
- Week 3
- Frenkel and Schottky defects
- Week 4
- Dopants
## Introduction to DL_POLY
DL_POLY is a molecular dynamics program maintained by Daresbury laboratories. In contrast to pylj, DL_POLY is a three dimensional molecular dynamics code that is used worldwide by computational scientists for molecular simulation, but it should be noted that the theory is exactly the same and any understanding gained from pylj is completely applicable to DL_POLY. For the next three weeks you will use DL_POLY to run short molecular dynamics simulations on Ca$F_2$. You first need to understand the input files required for DL_POLY.
- CONTROL - This is the file that contains all of the simulation parameters, e.g. simulation temperature, pressure, number of steps e.t.c
- CONFIG - This is the file that contains the structure - i.e. the atomic coordinates of each atom.
- FIELD - This is the file that contains the force field or potential model e.g. Lennard Jones.
Contained within the folder "Input" you will find a file called input.txt. This is the main file that you will interact with over the next three weeks and is used to generate the FIELD, CONTROL and CONFIG. Essentially it is easier to meddle with input.txt than it is to meddle with the 3 DL_POLY files everytime you want to change something. To run metadise we will use the subprocess python module. You specify what program you want to run and the file that you want to run it in, you will need to ensure the file path is correct.
```python
subprocess.call('H:/Third_year_lab/Advanced_Practical_Chemistry_Teaching-master/progs/metadise.exe', cwd='H:/Third_year_lab/Advanced_Practical_Chemistry_Teaching-master/course/week_2/Input/')
os.rename('Input/control_o0001.dlp', 'Input/CONTROL')
os.rename('Input/config__o0001.dlp', 'Input/CONFIG')
os.rename('Input/field___o0001.dlp', 'Input/FIELD')
```
Now you should have a CONFIG, CONTROL and FIELD file within the input directory. In theory you could just call the DL_POLY program on this directory and your simulation would run. However we need to tweak the CONTROL file in order to set up our desired simulation. Make a new subdirectory in the week 2 directory named "Example" and copy CONFIG, CONTROL and FIELD to that subdirectory. Now edit the CONTROL file.
We want to change the following
`Temperature 300 ---> Temperature 1500`
`Steps 5001 ---> Steps 40000`
`ensemble nve ---> ensemble npt hoover 0.1 0.5`
`trajectory nstraj= 1 istraj= 250 keytrj=0 ---> trajectory nstraj= 0 istraj= 100 keytrj=0`
Now your simulation is ready. As a point of interest it is always good to check your structure before and after the simulation. You can view the CONFIG file in three dimensions using the VESTA program. It is available for free at http://www.jp-minerals.org/vesta/en/download.html . Download it and use it to view your CONFIG, a demonstrator can help if necessary. VESTA can generate nice pictures which will look very good in a lab report.
<center>
<br>
<i>Figure 1. Fluorite Ca$F_2$ unit cell visualised in VESTA.</i>
<br>
</center>
To run DL_POLY from within a notebook use the below command. Keep in mind that this simulation will take 20 or so minutes so be patient.
```python
subprocess.call('H:/Third_year_lab/Advanced_Practical_Chemistry_Teaching-master/progs/dlpoly_classic.exe', cwd='H:/Third_year_lab/Advanced_Practical_Chemistry_Teaching-master/course/week_2/Example/')
```
0
Once DL_POLY has completed you will find several files relating to your simulaton.
- HISTORY - This file contains the configuration of your system at each step during the simulation. You can view this as a movie using the VMD program - Ask a demonstrator for details
- REVCON - This is the configuration at the end of the simulation - Can be viewed in VESTA - why not check to see how it has changed.
- STATIS - Contains the stats at each step in the simulation.
- OUTPUT - Contains properties
It is now important to understand how we can actually use the details of the simulation to get some information on the properties of the material, e.g. Diffusion coefficients and activation energies.
## Mean Squared Displacements - Calculating diffusion coefficients
As we have seen molecules in liquds, gases and solids do not stay in the same place and move constantly. Think about a drop of dye in a glass of water, as time passes the dye distributes throughout the water. This process is called diffusion and is common throughout nature.
Using the dye as an example, the motion of a dye molecule is not simple. As it moves it is jostled by collisions with other molecules, preventing it from moving in a straight path. If the path is examined in close detail, it will be seen to be a good approximation to a random walk. In mathmatics a random walk is a series of steps, each taken in a random direction. This was analysed by Albert Einstein in a study of Brownian motion and he showed that the mean square of the distance travelled by a particle following a random walk is proportional to the time elapsed.
\begin{align}
\Big \langle r^2 \big \rangle & = 6 D_t + C
\end{align}
where $\Big \langle r^2 \big \rangle$ is the mean squared distance, t is time, D is the diffusion rate and C is a constant.
## What is the mean squared displacement
Going back to the example of the dye in water, lets assume for the sake of simplicity that we are in one dimension. Each step can either be forwards or backwards and we cannot predict which. From a given starting position, what distance is our dye molecule likely to travel after 1000 steps? This can be determined simply by adding together the steps, taking into account the fact that steps backwards subtract from the total, while steps forward add to the total. Since both forward and backward steps are equally probable, we come to the surprising conclusion that the probable distance travelled sums up to zero.
By adding the square of the distance we will always be adding positive numbers to our total which now increases linearly with time. Based upon equation 1 it should now be clear that a plot of $\Big \langle r^2 \big \rangle$ vs time with produce a line, the gradient of which is equal to 6D. Giving us direct access to the diffusion coefficient of the system.
Lets try explore this with an example. Run a short DL_POLY simulation on the input files provided.
You will a small MSD program called MSD.py to analyse your simulation results. First you need to read in the data, the HISTORY file contains a list of the atomic coordiantes held by the atoms during the simulation.
```python
# Read in the HISTORY file
## Provide the path to the simulation and the atom that you want data for.
data = msd.read_history("Example/HISTORY", "F")
```
data is a dictionary variable containing the atomic trajectories, lattice vectors, total number of atoms, and total number of timesteps.
data = {'trajectories':trajectories, 'lv':lv, 'timesteps':timesteps, 'natoms':natoms}
The next step is to calculate the MSD.
```python
# Run the MSD calculation
msd_data = msd.run_msd(data)
```
run_msd returns a dictionary containing the total MSD, the dimensional MSD values and the time.
msd_data = {'msd': msd, 'xmsd': xmsd, 'ymsd': ymsd, 'zmsd': zmsd, 'time': time}
This can then be plotted to give a nice linear relationship.
```python
plt.plot(msd_data['time'], msd_data['msd'], lw=2, color="red", label="MSD")
plt.plot(msd_data['time'], msd_data['xmsd'], lw=2, color="blue", label="X-MSD")
plt.plot(msd_data['time'], msd_data['ymsd'], lw=2, color="green", label="Y-MSD")
plt.plot(msd_data['time'], msd_data['zmsd'], lw=2, color="black", label="Z-MSD")
plt.ylabel("MSD (" r'$\AA$' ")", fontsize=15)
plt.xlabel("Time / ps", fontsize=15)
plt.ylim(0, np.amax(msd_data['msd']))
plt.xlim(0, np.amax(msd_data['time']))
plt.legend(loc=2, frameon=False)
plt.show()
```
To calculate the gradient we need to perform a linear regression on the data.
```python
slope, intercept, r_value, p_value, std_err = stats.linregress(msd_data['time'], msd_data['msd'])
```
The gradient is equal to 6D (6 = dimensionality). So our final diffusion coefficient for the simulation is given by
```python
diffusion_coefficient = (np.average(slope) / 6)
print("Diffusion Coefficient: ", diffusion_coefficient, " X 10 ^-9 (m^-2)")
```
Diffusion Coefficient: 1.4550282026323467 X 10 ^-9 (m^-2)
## Simulation Length
It is important to consider the lenght of your simulation (Number of steps). Create a new folder called "Example_2", copy the CONFIG, FIELD and CONTROL files from your previous simulation but this time change the number of steps to 10000. Now rerun the simulation.
```python
subprocess.call('H:/Third_year_lab/Advanced_Practical_Chemistry_Teaching-master/progs/dlpoly_classic.exe', cwd='H:/Third_year_lab/Advanced_Practical_Chemistry_Teaching-master/course/week_2/Example_2/')
```
0
```python
data = msd.read_history("Example_2/HISTORY", "F")
msd_data = msd.run_msd(data)
plt.plot(msd_data['time'], msd_data['msd'], lw=2, color="red", label="MSD")
plt.plot(msd_data['time'], msd_data['xmsd'], lw=2, color="blue", label="X-MSD")
plt.plot(msd_data['time'], msd_data['ymsd'], lw=2, color="green", label="Y-MSD")
plt.plot(msd_data['time'], msd_data['zmsd'], lw=2, color="black", label="Z-MSD")
plt.ylabel("MSD (" r'$\AA$' ")", fontsize=15)
plt.xlabel("Time / ps", fontsize=15)
plt.ylim(0, np.amax(msd_data['msd']))
plt.xlim(0, np.amax(msd_data['time']))
plt.legend(loc=2, frameon=False)
plt.show()
```
```python
slope, intercept, r_value, p_value, std_err = stats.linregress(msd_data['time'], msd_data['msd'])
diffusion_coefficient = (np.average(slope) / 6)
print("Diffusion Coefficient: ", diffusion_coefficient, " X 10 ^-9 (m^-2)")
```
Diffusion Coefficient: 1.3602930070828145 X 10 ^-9 (m^-2)
You will hopefully see that your MSD plot has become considerably less linear. This shows that your simulation has not run long enough and your results will be unrealiable. You will hopefully also see a change to the value of your diffusion coefficient. The length of your simulation is something that you should keep in mind for the next 3 weeks.
## Arrhenius
The next thing is to use the diffusion coefficients to calcaulte the activation energy for F diffusion. This rea=quires diffusion coefficients from a temperature range. Common sense and chemical intuition suggest that the higher the temperature, the faster a given chemical reaction will proceed. Quantitatively this relationship between the rate a reaction proceeds and its temperature is determined by the Arrhenius Equation. At higher temperatures, the probability that two molecules will collide is higher. This higher collision rate results in a higher kinetic energy, which has an effect on the activation energy of the reaction. The activation energy is the amount of energy required to ensure that a reaction happens.
\begin{align}
k = A * e^{(-Ea / RT)}
\end{align}
where k is the rate coefficient, A is a constant, Ea is the activation energy, R is the universal gas constant, and T is the temperature (in kelvin).
## Week 2 Exercise
Using what you have learned over the last 45 mins your task this week is to calculate the activation energy of F diffusion in Ca$F_2$. You will need to select a temperature range and carry out simulations at different temperatures within that range.
#### Questions to answer
- In what temperature range is Ca$F_2$ completely solid i.e. no diffusion?
- In what range is fluorine essentially liquid i.e. fluorine diffusion with no calcium diffusion?
- What is the melting temperature?
- Plot an Arrhenius plot and determine the activation energies in temperature range - You will need to rearange the equation.
You are encouraged to split the work up within your group and to learn how to view the simulation "movie" using VMD (Ask a demonstrator). VMD is a fantastic program that allows you to visualise your simulation, included below is a video showing a short snippet of an MD simulation of Ca$F_2$. A single F atom has been highlighted to show that diffusion is occuring.
```python
%%HTML
<div align="middle">
</div>
```
<div align="middle">
</div>
Furthermore, VMD can also be used to generate images showing the entire trajectory of the simulation, e.g.
<center>
<br>
<i>Figure 2. A figure showing all positions occupied by F during an MD simulation at 1500 K. F positions are shown in orange and Ca atoms are shown in green.</i>
<br>
</center>
To save you the time you can use the function declared at the start of this notebook to pull out a diffusion coefficient directly from the simulation output file. MSD.py is a small code to allow visualisation of the MSD plot but it is not neccesary every time you want the diffusion coefficient.
It is up to you how you organise/create your directories but it is reccomended that you start a new notebook. Use the commands/functions used in this notebook to generate your input files, run DL_POLY and extract the diffusion coefficients. The write your own code to generate an Arrhenius plot and calculate the activation energies.
If you finish early then feel free to start week 3.
|
[STATEMENT]
lemma (in is_cat_finite_obj_coprod) is_cat_finite_obj_prod_op:
"op_ntcf \<pi> : U <\<^sub>C\<^sub>F\<^sub>.\<^sub>\<Prod>\<^sub>.\<^sub>f\<^sub>i\<^sub>n A : I \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> op_cat \<CC>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. op_ntcf \<pi> : U <\<^sub>C\<^sub>F\<^sub>.\<^sub>\<Prod>\<^sub>.\<^sub>f\<^sub>i\<^sub>n A : I \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> op_cat \<CC>
[PROOF STEP]
by (intro is_cat_finite_obj_prodI)
(
cs_concl cs_shallow
cs_simp: cat_op_simps
cs_intro: cat_fin_obj_coprod_index_in_\<omega> cat_cs_intros cat_op_intros
)
|
#!/usr/bin/gap
# time gap -o 8g hierarchy.gap
# Build clifford groups, up to 3 qubits, and
# search for T operators defined by field extensions......
Print("running hierarchy.gap\n");;
# See:
# https://www.mathstat.dal.ca/~selinger/papers/clifford.pdf
r2 := Sqrt(2);;
ir2 := 1/r2;;
i := [[1, 0], [0, 1]];;
w := [[E(4), 0], [0, E(4)]];;
x := [[0, 1], [1, 0]];;
z := [[1, 0], [0, -1]];;
s := [[1, 0], [0, E(4)]];;
h := [[ir2, ir2], [ir2, -ir2]];;
Cliff1 := Group(w, s, h);; # Order 192
Pauli1 := Group(w, x, z);; # Order 32
for U in Cliff1 do
found := false;;
#Udag := Inverse(U);
#Print("found? ");
for g in Pauli1 do
if (U*g*Inverse(U)*Inverse(g) in Pauli1) then found:=true; break; fi;
od;
if not found then Print("Not found\n"); fi;
od;
xi := KroneckerProduct(x, i);;
ix := KroneckerProduct(i, x);;
zi := KroneckerProduct(z, i);;
iz := KroneckerProduct(i, z);;
si := KroneckerProduct(s, i);;
is := KroneckerProduct(i, s);;
hi := KroneckerProduct(h, i);;
ih := KroneckerProduct(i, h);;
wi := KroneckerProduct(w, i);;
cz := [
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, -1]];;
Cliff2 := Group(si, is, hi, ih, wi, cz);; # Order 92160
Order(Cliff2);
#for g in Cliff2 do Print(g, "\n"); od;
Phase2 := Group(wi);;
Pauli2 := Group(wi, xi, ix, zi, iz);;
# Works:
for U in Cliff2 do
found := false;;
#Udag := Inverse(U);
#Print("found? ");
for g in Pauli2 do
if (U*g*Inverse(U)*Inverse(g) in Pauli2) then found:=true; break; fi;
od;
if not found then Print("Not found\n"); fi;
od;
Hadamard4 := (1/2)*[
[0, 1, 0, 1],
[2, 0, 2, 0],
[0, 1, 0, -1],
[2, 0, -2, 0]];
#Print(Hadamard4*Hadamard4, "\n");
#Print(Hadamard4, " in Cliff2 ", Hadamard4 in Cliff2, "\n");
a := [
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[-1, 0, 0, 0]];;
# Print(a in Cliff2, "\n"); # true
#hom:=EpimorphismFromFreeGroup(Cliff2:names:=["si", "is", "hi", "ih", "wi", "cz"]);
#Print(PreImagesRepresentative(hom, a), "\n"); # si^3*is^3*hi*si^2*ih*is^2*ih*cz*hi*si
#Print(PreImagesRepresentative(hom, a*a), "\n"); # (si^2*hi)^2
#Print(PreImagesRepresentative(hom, a*a*a), "\n"); # si^2*is^3*hi*(si^2*hi*si)^2*si*ih*is^2*ih*cz*hi*si
cls := ConjugacyClass(Cliff2, a);;
#Print(a, a in cls, "\n"); # true
#Print(a*a, a*a in cls, "\n"); # false
#Print(a*a*a, a*a*a in cls, "\n"); # true
#Print(a*a*a*a, a*a*a*a in cls, "\n"); # false
#Print(a*a*a*a*a, a*a*a*a*a in cls, "\n"); # true
#Print(a*a*a*a*a*a, a*a*a*a*a*a in cls, "\n"); # false
#Print(a*a*a*a*a*a*a, a*a*a*a*a*a*a in cls, "\n"); # true
#Print(a*a*a*a*a*a*a*a, a*a*a*a*a*a*a*a in cls, "\n"); # false
#Print(cz, cz in cls, "\n"); # false
#b := [
# [1, 0, 0, 0],
# [0, 1, 0, 0],
# [0, 0, 0, 1],
# [0, 0, -1, 0]];;
#Print(cz, cz in ConjugacyClass(Cliff2, b), "\n"); # false
#for b in ConjugacyClass(Cliff2, cz) do Print(b, "\n"); od;
#a1 := [
# [0, 0, 1, 0],
# [0, 0, 0, 1],
# [0, 1, 0, 0],
# [-1, 0, 0, 0]];;
#Print(a1 in Pauli2, "\n"); # false
#Print(a1 in Cliff2, "\n"); # true
#Print(a1 in cls, "\n"); # true
#Print(Eigenvalues(Cyclotomics, b), "\n"); # fail
#Print(a*a, "\n");
#Print(a*a*a, "\n");
#Print(a*a*a*a, "\n");
# Print(Order(G2));
Hadamard8 := (1/2)*[
[ 0, 1, 0,-1, 0, 1, 0,-1],
[ 1, 0, 1, 0, 1, 0, 1, 0],
[ 0, 1, 0, 1, 0, 1, 0, 1],
[-1, 0, 1, 0,-1, 0, 1, 0],
[ 0, 1, 0,-1, 0,-1, 0, 1],
[ 1, 0, 1, 0,-1, 0,-1, 0],
[ 0, 1, 0, 1, 0,-1, 0,-1],
[-1, 0, 1, 0, 1, 0,-1, 0]];
#Print(Hadamard8, "\n");
#Print(Hadamard8*Hadamard8, "\n"); # = identity
U2 := [
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]];
in_third_level_2 := function(U2)
# Is U2 in the third level of the clifford hierarchy ?
local A;
for g in Pauli2 do
A := U2*g*Inverse(U2)*Inverse(g);
if A in Cliff2 then continue; fi;
return false; # no
od;
return true; # yes
end;;
in_fourth_level_2 := function(U2)
# Is U2 in the fourth level of the clifford hierarchy ?
local A;
for g in Pauli2 do
A := U2*g*Inverse(U2)*Inverse(g);
if in_third_level_2(U2) then continue; fi;
return false; # no
od;
return true; # yes
end;;
in_fifth_level_2 := function(U2)
# Is U2 in the fifth level of the clifford hierarchy ?
local A;
for g in Pauli2 do
A := U2*g*Inverse(U2)*Inverse(g);
if in_fourth_level_2(U2) then continue; fi;
return false; # no
od;
return true; # yes
end;;
get_level_2 := function(U2)
if U2 in Phase2 then return "0\c"; fi;
if U2 in Pauli2 then return "1\c"; fi;
if U2 in Cliff2 then return "2\c"; fi;
if in_third_level_2(U2) then return "3\c"; fi;
if in_fourth_level_2(U2) then return "4\c"; fi;
if in_fifth_level_2(U2) then return "5\c"; fi;
return ".\c";
end;;
if true then
Print("# Where do control 1-qubit Pauli gates live ?\n");
for g in Pauli1 do
#Print(g, "\n");
U2{[3,4]}{[3,4]} := g;
Print(get_level_2(U2));
od;
Print("\n");
Print("# Where do control 1-qubit clifford gates live ?\n");
for g in Cliff1 do
#Print(g, "\n");
U2{[3,4]}{[3,4]} := g;
Print(get_level_2(U2));
od;
Print("\n");
fi;
# ---------------------------------------------------
#
# 3 qubit gates
#
xii := KroneckerProduct(xi, i);;
ixi := KroneckerProduct(i, xi);;
iix := KroneckerProduct(i, ix);;
zii := KroneckerProduct(zi, i);;
izi := KroneckerProduct(i, zi);;
iiz := KroneckerProduct(i, iz);;
sii := KroneckerProduct(si, i);;
isi := KroneckerProduct(i, si);;
iis := KroneckerProduct(i, is);;
hii := KroneckerProduct(hi, i);;
ihi := KroneckerProduct(i, hi);;
iih := KroneckerProduct(i, ih);;
wii := KroneckerProduct(wi, i);;
icz := KroneckerProduct(i, cz);;
czi := KroneckerProduct(cz, i);;
ca := [
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, -1, 0, 0, 0]];;
d := [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[-1, 0, 0, 0, 0, 0, 0, 0]];;
ca1 := [
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, -1, 0, 0, 0]];;
cb := [
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, -1, 0, 0, 0],
[0, 0, 0, 0, 0, -1, 0, 0]];;
cc := [
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, -1, 0, 0, 0],
[0, 0, 0, 0, 0, -1, 0, 0],
[0, 0, 0, 0, 0, 0, -1, 0]];;
Tofolli := [
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0]];;
Cliff3 := Group(sii, isi, iis, hii, ihi, iih, wii, icz, czi);; # Order 743178240
Pauli3 := Group(wii, xii, ixi, iix, zii, izi, iiz);; # Order 256
# Print(Order(Pauli3), "\n");;
# ca in Cliff3 = false
# ca in third level of clifford hierarchy = true
Cliff3_12 := Group(sii, isi, hii, ihi, wii, czi);;
Cliff3_13 := Group(sii, iis, hii, iih, wii);; # cz on 1&3 ??
Cliff3_23 := Group(isi, iis, ihi, iih, wii, icz);;
#SmCliff3 := Group(sii, isi, iis, hii, ihi, iih, icz, czi);; # Order 743178240
Print("warming up...\n");
Order(Cliff3);; # need this line otherwise membership test eats all memory...!
Print("Ok\n");
#Print("Hadamard8 in Cliff3 ", Hadamard8 in Cliff3, "\n"); # true
in_first_level := function(U3)
return U3 in Pauli3;
end;;
in_second_level := function(U3)
if U3 in Cliff3_12 then return true; fi;
if U3 in Cliff3_13 then return true; fi;
if U3 in Cliff3_23 then return true; fi;
if U3 in Cliff3 then return true; fi;
return false;
end;;
#in_second_level := function(U3)
# # Is U3 in the second level of the clifford hierarchy ?
# local A;
# for g in Pauli3 do
# A := U3*g*Inverse(U3)*Inverse(g);
# if in_first_level(A) then continue; fi;
# return false; # no
# od;
# return true; # yes
#end;;
in_third_level := function(U3)
# Is U3 in the third level of the clifford hierarchy ?
local A;
for g in Pauli3 do
A := U3*g*Inverse(U3)*Inverse(g);
if in_second_level(A) then continue; fi;
return false; # no
od;
return true; # yes
end;;
#Print(in_third_level(ca1), "\n"); # true
#quit;
in_fourth_level := function(U3)
# Is U3 in the fourth level of the clifford hierarchy ?
local A;
for g in Pauli3 do
A := U3*g*Inverse(U3)*Inverse(g);
if in_third_level(A) then continue; fi;
return false; # no
od;
return true; # yes
end;;
in_fifth_level := function(U3)
# Is U3 in the fifth level of the clifford hierarchy ?
local A;
for g in Pauli3 do
A := U3*g*Inverse(U3)*Inverse(g);
if in_fourth_level(A) then continue; fi;
return false; # no
od;
return true; # yes
end;;
get_level := function(U3)
if U3 in Pauli3 then return "1\c"; fi;
if in_second_level(U3) then return "2\c"; fi;
if in_third_level(U3) then return "3\c"; fi;
if in_fourth_level(U3) then return "4\c"; fi;
if in_first_level(U3) then return "5\c"; fi;
return ".\c";
end;;
#Print("in_third_level(d):", in_third_level(cc), "\n"); # true
#Print("in_third_level(ca):", in_third_level(ca), "\n"); # true
#Print("in_third_level(cb):", in_third_level(cb), "\n"); # true
#Print("in_third_level(cc):", in_third_level(cc), "\n"); # true
#Print("in_third_level(Tofolli):", in_third_level(Tofolli), "\n"); # true
#Print("in_third_level(Tofolli*ca):", in_third_level(Tofolli*ca), "\n"); # false
U3 := [
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1]];
Print("# Where do control control 1-qubit Pauli gates live ?\n");
for g in Pauli1 do
#Print(g, "\n");
U3{[7,8]}{[7,8]} := g;
Print(get_level(U3));
od;
Print("\n");
Print("# Where do control control 1-qubit clifford gates live ?\n");
for g in Cliff1 do
#Print(g, "\n");
U3{[7,8]}{[7,8]} := g;
Print(get_level(U3));
od;
Print("\n");
Print("# Where do control 2-qubit Pauli gates live ?\n");
for g in Pauli2 do
#Print(g, "\n");
U3{[5,6,7,8]}{[5,6,7,8]} := g;
Print(get_level(U3));
od;
Print("\n");
Print("# Where do control 2-qubit clifford gates live ?\n");
for g in Cliff2 do
#Print(g, "\n");
U3{[5,6,7,8]}{[5,6,7,8]} := g;
Print(get_level(U3));
od;
Print("\n");
Print("Done.\n");
|
\chapter{BSPSyncGPGPU: GPU PErformance Multi Kernel model}
|
function varargout = ReadData3D(varargin)
% This function ReadData3D allows the user to open medical 3D files. It
% supports the following formats :
%
% Dicom Files ( .dcm , .dicom )
% V3D Philips Scanner ( .v3d )
% GIPL Guys Image Processing Lab ( .gipl )
% HDR/IMG Analyze ( .hdr )
% ISI Files ( .isi )
% NifTi ( .nii )
% RAW files ( .raw , .* )
% VMP BrainVoyager ( .vmp )
% XIF HDllab/ATL ultrasound ( .xif )
% VTK Visualization Toolkit ( .vtk )
% Insight Meta-Image ( .mha, .mhd )
% Micro CT ( .vff )
% PAR/REC Philips ( .par, .rec)
%
% usage:
%
% [V,info]=ReadData3D;
%
% or,
%
% [V,info]=ReadData3D(filename)
%
% or,
%
% [V,info]=ReadData3D(filename,real);
%
%
% outputs,
% V : The 3D Volume
% info : Struct with info about the data
% Always the following fields are present
% info.Filename : Name of file
% info.Dimensions : Dimensions of Volume
% info.PixelDimensions : Size of one pixel / voxel
% real : If set to true (default), convert the raw data to
% type Single-precision and rescale data to real units
% (in CT Hounsfield). When false, it returns the raw-data.
%
% Warning!
% The read functions are not fully implemented as defined in
% the file-format standards. thus do not use this function for
% critical applications.
%
%
% Function is written by D.Kroon University of Twente (July 2010)
% Edit the above text to modify the response to help ReadData3D
% Last Modified by GUIDE v2.5 09-Nov-2010 14:12:50
% Begin initialization code - DO NOT EDIT
gui_Singleton = 0;
gui_State = struct('gui_Name', mfilename, ...
'gui_Singleton', gui_Singleton, ...
'gui_OpeningFcn', @ReadData3D_OpeningFcn, ...
'gui_OutputFcn', @ReadData3D_OutputFcn, ...
'gui_LayoutFcn', [] , ...
'gui_Callback', []);
if (nargin>2) && ischar(varargin{1})
gui_State.gui_Callback = str2func(varargin{1});
end
if nargout
[varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:});
else
gui_mainfcn(gui_State, varargin{:});
end
% End initialization code - DO NOT EDIT
% --- Executes just before ReadData3D is made visible.
function ReadData3D_OpeningFcn(hObject, eventdata, handles, varargin)
% This function has no output args, see OutputFcn.
% hObject handle to figure
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% varargin command line arguments to ReadData3D (see VARARGIN)
% Choose default command line output for ReadData3D
handles.output = hObject;
% Update handles structure
guidata(hObject, handles);
% UIWAIT makes ReadData3D wait for user response (see UIRESUME)
% uiwait(handles.figure1);
%---- Start supported file formats ----%
data.fileformat(1).ext='*.dcm';
data.fileformat(1).type='Dicom Files';
data.fileformat(1).folder='dicom';
data.fileformat(1).functioninfo='dicom_read_header';
data.fileformat(1).functionread='dicom_read_volume';
data.fileformat(2).ext='*.gipl';
data.fileformat(2).type='GIPL Guys Image Processing Lab';
data.fileformat(2).folder='gipl';
data.fileformat(2).functioninfo='gipl_read_header';
data.fileformat(2).functionread='gipl_read_volume';
data.fileformat(3).ext='*.hdr';
data.fileformat(3).type='HDR/IMG Analyze';
data.fileformat(3).folder='hdr';
data.fileformat(3).functioninfo='hdr_read_header';
data.fileformat(3).functionread='hdr_read_volume';
data.fileformat(4).ext='*.isi';
data.fileformat(4).type='ISI Files';
data.fileformat(4).folder='isi';
data.fileformat(4).functioninfo='isi_read_header';
data.fileformat(4).functionread='isi_read_volume';
data.fileformat(5).ext='*.nii';
data.fileformat(5).type='NifTi';
data.fileformat(5).folder='nii';
data.fileformat(5).functioninfo='nii_read_header';
data.fileformat(5).functionread='nii_read_volume';
data.fileformat(6).ext='*.raw';
data.fileformat(6).type='RAW files';
data.fileformat(6).folder='raw';
data.fileformat(6).functioninfo='raw_read_header';
data.fileformat(6).functionread='raw_read_volume';
data.fileformat(7).ext='*.v3d';
data.fileformat(7).type='V3D Philips Scanner';
data.fileformat(7).folder='v3d';
data.fileformat(7).functioninfo='v3d_read_header';
data.fileformat(7).functionread='v3d_read_volume';
data.fileformat(8).ext='*.vmp';
data.fileformat(8).type='VMP BrainVoyager';
data.fileformat(8).folder='vmp';
data.fileformat(8).functioninfo='vmp_read_header';
data.fileformat(8).functionread='vmp_read_volume';
data.fileformat(9).ext='*.xif';
data.fileformat(9).type='XIF HDllab/ATL ultrasound';
data.fileformat(9).folder='xif';
data.fileformat(9).functioninfo='xif_read_header';
data.fileformat(9).functionread='xif_read_volume';
data.fileformat(10).ext='*.vtk';
data.fileformat(10).type='VTK Visualization Toolkit';
data.fileformat(10).folder='vtk';
data.fileformat(10).functioninfo='vtk_read_header';
data.fileformat(10).functionread='vtk_read_volume';
data.fileformat(11).ext='*.mha';
data.fileformat(11).type='Insight Meta-Image';
data.fileformat(11).folder='mha';
data.fileformat(11).functioninfo='mha_read_header';
data.fileformat(11).functionread='mha_read_volume';
data.fileformat(12).ext='*.vff';
data.fileformat(12).type='Micro CT';
data.fileformat(12).folder='vff';
data.fileformat(12).functioninfo='vff_read_header';
data.fileformat(12).functionread='vff_read_volume';
data.fileformat(13).ext='*.par';
data.fileformat(13).type='Philips PAR/REC';
data.fileformat(13).folder='par';
data.fileformat(13).functioninfo='par_read_header';
data.fileformat(13).functionread='par_read_volume';
%---- End supported file formats ----%
% Get path of ReadData3D
functionname='ReadData3D.m';
functiondir=which(functionname);
functiondir=functiondir(1:end-length(functionname));
% Add the file-reader functions also to the matlab path
addpath([functiondir '/subfunctions']);
for i=1:length(data.fileformat), addpath(fullfile(functiondir, data.fileformat(i).folder)); end
% Make popuplist file formats
fileformatcell=cell(1,length(data.fileformat));
for i=1:length(data.fileformat), fileformatcell{i}=[data.fileformat(i).type ' (' data.fileformat(i).ext ')']; end
set(handles.popupmenu_format,'String',fileformatcell);
% Check if last filename is present from a previous time
data.configfile=[functiondir '/lastfile.mat'];
filename='';
fileformatid=1;
if(exist(data.configfile,'file')), load(data.configfile); end
data.handles=handles;
data.lastfilename=[];
data.volume=[];
data.info=[];
% If filename is selected, look if the extention is known
found=0;
if(~isempty(varargin))
filename=varargin{1}; [pathstr,name,ext]=fileparts(filename);
for i=1:length(data.fileformat)
if(strcmp(data.fileformat(i).ext(2:end),ext)), found=1; fileformatid=i; end
end
end
% Rescale the databack to original units.
if(length(varargin)>1), real=varargin{2}; else real=true; end
data.real=real;
data.filename=filename;
data.fileformatid=fileformatid;
set(handles.checkbox_real,'Value',data.real);
set(handles.edit_filename,'String',data.filename)
set(handles.popupmenu_format,'Value',data.fileformatid);
% Store all data
setMyData(data);
if(found==0)
% Show Dialog File selection
uiwait(handles.figure1);
else
% Load the File directly
loaddata();
end
% --- Outputs from this function are returned to the command line.
function varargout = ReadData3D_OutputFcn(hObject, eventdata, handles)
% varargout cell array for returning output args (see VARARGOUT);
% hObject handle to figure
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Get default command line output from handles structure
if(ishandle(hObject))
data=getMyData();
else
data=[];
end
if(~isempty(data))
varargout{1} = data.volume;
varargout{2} = data.info;
else
varargout{1}=[];
varargout{2}=[];
end
if(ishandle(hObject))
close(hObject)
end
function edit_filename_Callback(hObject, eventdata, handles)
% hObject handle to edit_filename (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Hints: get(hObject,'String') returns contents of edit_filename as text
% str2double(get(hObject,'String')) returns contents of edit_filename as a double
% --- Executes during object creation, after setting all properties.
function edit_filename_CreateFcn(hObject, eventdata, handles)
% hObject handle to edit_filename (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles empty - handles not created until after all CreateFcns called
% Hint: edit controls usually have a white background on Windows.
% See ISPC and COMPUTER.
if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))
set(hObject,'BackgroundColor','white');
end
% --- Executes on button press in pushbutton_browse.
function pushbutton_browse_Callback(hObject, eventdata, handles)
% hObject handle to pushbutton_browse (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
data=getMyData();
[extlist extlistid]=FileDialogExtentionList(data);
[filename, dirname,filterindex] = uigetfile(extlist, 'Select a dicom file',fileparts(data.filename));
if(filterindex>0)
if(extlistid(filterindex)~=0)
data.fileformatid=extlistid(filterindex);
set( handles.popupmenu_format,'Value',data.fileformatid);
end
if(filename==0), return; end
filename=[dirname filename];
data.filename=filename;
setMyData(data);
set(handles.edit_filename,'String',data.filename)
end
function [extlist extlistid]=FileDialogExtentionList(data)
extlist=cell(length(data.fileformat)+1,2);
extlistid=zeros(length(data.fileformat)+1,1);
ext=data.fileformat(data.fileformatid).ext;
type=data.fileformat(data.fileformatid).type;
extlistid(1)=data.fileformatid;
extlist{1,1}=ext; extlist{1,2}=[type ' (' ext ')'];
j=1;
for i=1:length(data.fileformat);
if(i~=data.fileformatid)
j=j+1;
ext=data.fileformat(i).ext;
type=data.fileformat(i).type;
extlistid(j)=i;
extlist{j,1}=ext; extlist{j,2}=[type ' (' ext ')'];
end
end
extlist{end,1}='*.*';
extlist{end,2}='All Files (*.*)';
function setMyData(data)
% Store data struct in figure
setappdata(gcf,'dataload3d',data);
function data=getMyData()
% Get data struct stored in figure
data=getappdata(gcf,'dataload3d');
% --- Executes on button press in pushbutton_cancel.
function pushbutton_cancel_Callback(hObject, eventdata, handles)
% hObject handle to pushbutton_cancel (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
setMyData([]);
uiresume;
% --- Executes on selection change in popupmenu_format.
function popupmenu_format_Callback(hObject, eventdata, handles)
% hObject handle to popupmenu_format (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Hints: contents = cellstr(get(hObject,'String')) returns popupmenu_format contents as cell array
% contents{get(hObject,'Value')} returns selected item from popupmenu_format
data=getMyData();
data.fileformatid=get( handles.popupmenu_format,'Value');
setMyData(data);
% --- Executes during object creation, after setting all properties.
function popupmenu_format_CreateFcn(hObject, eventdata, handles)
% hObject handle to popupmenu_format (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles empty - handles not created until after all CreateFcns called
% Hint: popupmenu controls usually have a white background on Windows.
% See ISPC and COMPUTER.
if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor'))
set(hObject,'BackgroundColor','white');
end
% --- Executes on button press in pushbutton_load.
function pushbutton_load_Callback(hObject, eventdata, handles)
% hObject handle to pushbutton_load (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
data=getMyData();
data.filename=get(handles.edit_filename,'string');
loaddata();
pause(0.1);
uiresume
function loaddata()
data=getMyData();
set(data.handles.figure1,'Pointer','watch'); drawnow('expose');
if(~strcmp(data.lastfilename,data.filename))
% Get info
fhandle = str2func( data.fileformat(data.fileformatid).functioninfo);
data.info=feval(fhandle,data.filename);
data.lastfilename=data.filename;
end
fhandle = str2func( data.fileformat(data.fileformatid).functionread);
data.volume=feval(fhandle,data.info);
if(data.real)
data.volume=single(data.volume);
if(isfield(data.info,'RescaleSlope')),
data.volume=data.volume*data.info.RescaleSlope;
else
disp('RescaleSlope not available, assuming 1')
end
if(isfield(data.info,'RescaleIntercept')),
data.volume=data.volume+data.info.RescaleIntercept;
else
disp('RescaleIntercept not available, assuming 0')
end
end
setMyData(data);
set(data.handles.figure1,'Pointer','arrow')
% Save the filename, for the next time this function is used
filename=data.filename; fileformatid=data.fileformatid;
try save(data.configfile,'filename','fileformatid'); catch ME; disp(ME.message); end
% --- Executes on button press in pushbutton_info.
function pushbutton_info_Callback(hObject, eventdata, handles)
% hObject handle to pushbutton_info (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
data=getMyData();
data.filename=get(handles.edit_filename,'string');
if(~strcmp(data.lastfilename,data.filename))
% Get info
set(data.handles.figure1,'Pointer','watch'); drawnow('expose');
fhandle = str2func( data.fileformat(data.fileformatid).functioninfo);
data.info=feval(fhandle,data.filename);
data.lastfilename=data.filename;
set(data.handles.figure1,'Pointer','arrow')
end
setMyData(data);
% Show info
InfoData3D(data.info);
% --- Executes on button press in checkbox_real.
function checkbox_real_Callback(hObject, eventdata, handles)
% hObject handle to checkbox_real (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Hint: get(hObject,'Value') returns toggle state of checkbox_real
data=getMyData();
data.real=get(handles.checkbox_real,'Value');
setMyData(data);
|
[STATEMENT]
lemma foldl_At_eq[simp]: "s \<bullet>\<bullet> ts = s \<bullet>\<bullet> ts' \<longleftrightarrow> ts = ts'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (s \<bullet>\<bullet> ts = s \<bullet>\<bullet> ts') = (ts = ts')
[PROOF STEP]
apply(rule)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. s \<bullet>\<bullet> ts = s \<bullet>\<bullet> ts' \<Longrightarrow> ts = ts'
2. ts = ts' \<Longrightarrow> s \<bullet>\<bullet> ts = s \<bullet>\<bullet> ts'
[PROOF STEP]
prefer 2
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. ts = ts' \<Longrightarrow> s \<bullet>\<bullet> ts = s \<bullet>\<bullet> ts'
2. s \<bullet>\<bullet> ts = s \<bullet>\<bullet> ts' \<Longrightarrow> ts = ts'
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. s \<bullet>\<bullet> ts = s \<bullet>\<bullet> ts' \<Longrightarrow> ts = ts'
[PROOF STEP]
apply(blast dest:foldl_At_eq_lemma foldl_At_eq_length)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
State Before: C : Type u
inst✝¹ : Category C
D : Type u₂
inst✝ : Category D
W✝ X Y Z : C
f : X ⟶ Z
g : Y ⟶ Z
t : PullbackCone f g
W : C
k l : W ⟶ t.pt
h₀ : k ≫ fst t = l ≫ fst t
h₁ : k ≫ snd t = l ≫ snd t
⊢ k ≫ t.π.app none = l ≫ t.π.app none State After: C : Type u
inst✝¹ : Category C
D : Type u₂
inst✝ : Category D
W✝ X Y Z : C
f : X ⟶ Z
g : Y ⟶ Z
t : PullbackCone f g
W : C
k l : W ⟶ t.pt
h₀ : k ≫ fst t = l ≫ fst t
h₁ : k ≫ snd t = l ≫ snd t
⊢ k ≫ t.π.app WalkingCospan.left ≫ (cospan f g).map inl = l ≫ t.π.app WalkingCospan.left ≫ (cospan f g).map inl Tactic: rw [← t.w inl] State Before: C : Type u
inst✝¹ : Category C
D : Type u₂
inst✝ : Category D
W✝ X Y Z : C
f : X ⟶ Z
g : Y ⟶ Z
t : PullbackCone f g
W : C
k l : W ⟶ t.pt
h₀ : k ≫ fst t = l ≫ fst t
h₁ : k ≫ snd t = l ≫ snd t
⊢ k ≫ t.π.app WalkingCospan.left ≫ (cospan f g).map inl = l ≫ t.π.app WalkingCospan.left ≫ (cospan f g).map inl State After: C : Type u
inst✝¹ : Category C
D : Type u₂
inst✝ : Category D
W✝ X Y Z : C
f : X ⟶ Z
g : Y ⟶ Z
t : PullbackCone f g
W : C
k l : W ⟶ t.pt
h₀ : k ≫ fst t = l ≫ fst t
h₁ : k ≫ snd t = l ≫ snd t
⊢ k ≫ fst t ≫ f = l ≫ fst t ≫ f Tactic: dsimp [h₀]
|
module Data.Compress.Utils.FiniteBuffer
import Data.Seq.Unsized
export
record FiniteBuffer a where
constructor FB
max_size : Int
size : Int
buffer : Seq a
export
empty : Nat -> FiniteBuffer a
empty n = FB (cast n) 0 empty
export
take_last : Nat -> FiniteBuffer a -> Maybe (List a)
take_last n fb = guard (cast n <= fb.size) $> (toList $ take n $ drop (cast (fb.size - cast n)) fb.buffer)
infixr 5 +<
infixr 5 +<><
export
(+<) : FiniteBuffer a -> a -> FiniteBuffer a
(FB max_size size buffer) +< x =
let
len = size + 1
buf = snoc buffer x
in
if len > max_size then FB max_size max_size $ tail buf else FB max_size len buf
export
(+<><) : Foldable f => FiniteBuffer a -> f a -> FiniteBuffer a
buf +<>< elems = foldl (+<) buf elems
export
length : FiniteBuffer a -> Nat
length = cast . size
|
function u=randDirVec(numDim,N)
%%RANDDIRVEC Generate a uniformly distributed random direction vector
% (unit vector) in an arbitrary number of dimensions.
%
%INPUTS: numDim The number of dimensions of the random unit vector. This
% is >=1.
% N The number of random direction vectors to generate. If
% this parameter is omitted, then N=1 is used.
%
%OUTPUTS: u A numDimXN matrix of N numDim-dimensional unit vectors that
% are uniformly distributed on the unit-sphere (or hypersphere).
%
%One cannot simply generate a uniformly-distributed direction vector by
%setting each dimensions to a Uniform(-1,1) random variable and then
%normalizing the result. Rather, following the algorithm in Chapter 3.4.1,
%E6 of [1], the elements of the vector must be generated as normal 0-1
%random variables and then the vector normalized.
%
%REFERENCES:
%[1] D. Knuth, The Art of Computer Programming: Seminumerical Algorithms,
% 3rd ed. Reading, MA: Addison-Wesley, 1998, vol. 2.
%
%September 2014 David F. Crouse, Naval Research Laboratory, Washington D.C.
%(UNCLASSIFIED) DISTRIBUTION STATEMENT A. Approved for public release.
if(nargin<2)
N=1;
end
u=zeros(numDim,N);
for curN=1:N
%Knuth, Chapter 3.4.1, E6 (Random point on an n-dimensional sphere with
%radius one.
u(:,curN)=randn(numDim,1);
u(:,curN)=u(:,curN)/norm(u(:,curN));
%Deal with the instance that norm(u)=0, which will occur with an extremely
%small probability due to the limited precision of the random number
%generation.
if(~isfinite(u(:,curN)))
u(:,curN)=zeros(numDim,1);
u(1,curN)=1;
end
end
end
%LICENSE:
%
%The source code is in the public domain and not licensed or under
%copyright. The information and software may be used freely by the public.
%As required by 17 U.S.C. 403, third parties producing copyrighted works
%consisting predominantly of the material produced by U.S. government
%agencies must provide notice with such work(s) identifying the U.S.
%Government material incorporated and stating that such material is not
%subject to copyright protection.
%
%Derived works shall not identify themselves in a manner that implies an
%endorsement by or an affiliation with the Naval Research Laboratory.
%
%RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE
%SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY THE NAVAL
%RESEARCH LABORATORY FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE ACTIONS
%OF RECIPIENT IN THE USE OF THE SOFTWARE.
|
import algebra.ring
namespace my_ring
variables {R : Type*} [ring R]
variables a b c : R
#check add_right_neg
#check sub_eq_add_neg
#check sub_eq_add_neg c
-- BEGIN
theorem self_sub (a : R) : a - a = 0 :=
begin
rw sub_eq_add_neg a,
rw add_right_neg,
end
-- END
#check self_sub
end my_ring
|
/-
Copyright (c) 2020 Aaron Anderson. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Aaron Anderson
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.order.rel_iso
import Mathlib.order.lattice_intervals
import Mathlib.order.order_dual
import Mathlib.PostPort
universes u_2 l u_1
namespace Mathlib
/-!
# Modular Lattices
This file defines Modular Lattices, a kind of lattice useful in algebra.
For examples, look to the subobject lattices of abelian groups, submodules, and ideals, or consider
any distributive lattice.
## Main Definitions
- `is_modular_lattice` defines a modular lattice to be one such that
`x ≤ z → (x ⊔ y) ⊓ z ≤ x ⊔ (y ⊓ z)`
- `inf_Icc_order_iso_Icc_sup` gives an order isomorphism between the intervals
`[a ⊓ b, a]` and `[b, a ⊔ b]`.
This corresponds to the diamond (or second) isomorphism theorems of algebra.
## Main Results
- `is_modular_lattice_iff_sup_inf_sup_assoc`:
Modularity is equivalent to the `sup_inf_sup_assoc`: `(x ⊓ z) ⊔ (y ⊓ z) = ((x ⊓ z) ⊔ y) ⊓ z`
- `distrib_lattice.is_modular_lattice`: Distributive lattices are modular.
## To do
- Relate atoms and coatoms in modular lattices
-/
/-- A modular lattice is one with a limited associativity between `⊓` and `⊔`. -/
class is_modular_lattice (α : Type u_2) [lattice α] where
sup_inf_le_assoc_of_le : ∀ {x : α} (y : α) {z : α}, x ≤ z → (x ⊔ y) ⊓ z ≤ x ⊔ y ⊓ z
theorem sup_inf_assoc_of_le {α : Type u_1} [lattice α] [is_modular_lattice α] {x : α} (y : α)
{z : α} (h : x ≤ z) : (x ⊔ y) ⊓ z = x ⊔ y ⊓ z :=
le_antisymm (is_modular_lattice.sup_inf_le_assoc_of_le y h)
(le_inf (sup_le_sup_left inf_le_left x) (sup_le h inf_le_right))
theorem is_modular_lattice.sup_inf_sup_assoc {α : Type u_1} [lattice α] [is_modular_lattice α]
{x : α} {y : α} {z : α} : x ⊓ z ⊔ y ⊓ z = (x ⊓ z ⊔ y) ⊓ z :=
Eq.symm (sup_inf_assoc_of_le y inf_le_right)
theorem inf_sup_assoc_of_le {α : Type u_1} [lattice α] [is_modular_lattice α] {x : α} (y : α)
{z : α} (h : z ≤ x) : x ⊓ y ⊔ z = x ⊓ (y ⊔ z) :=
sorry
protected instance order_dual.is_modular_lattice {α : Type u_1} [lattice α] [is_modular_lattice α] :
is_modular_lattice (order_dual α) :=
is_modular_lattice.mk
fun (x y z : order_dual α) (xz : x ≤ z) =>
le_of_eq
(eq.mpr (id (Eq._oldrec (Eq.refl ((x ⊔ y) ⊓ z = x ⊔ y ⊓ z)) inf_comm))
(eq.mpr (id (Eq._oldrec (Eq.refl (z ⊓ (x ⊔ y) = x ⊔ y ⊓ z)) sup_comm))
(eq.mpr (id (Eq._oldrec (Eq.refl (z ⊓ (y ⊔ x) = x ⊔ y ⊓ z)) (propext eq_comm)))
(eq.mpr (id (Eq._oldrec (Eq.refl (x ⊔ y ⊓ z = z ⊓ (y ⊔ x))) inf_comm))
(eq.mpr (id (Eq._oldrec (Eq.refl (x ⊔ z ⊓ y = z ⊓ (y ⊔ x))) sup_comm))
(eq.mpr
((fun (a a_1 : order_dual α) (e_1 : a = a_1) (ᾰ ᾰ_1 : order_dual α)
(e_2 : ᾰ = ᾰ_1) => congr (congr_arg Eq e_1) e_2)
(z ⊓ y ⊔ x) ((z ⊔ coe_fn order_dual.of_dual y) ⊓ x) (Eq.refl (z ⊓ y ⊔ x))
(z ⊓ (y ⊔ x)) (z ⊔ coe_fn order_dual.of_dual y ⊓ x) (Eq.refl (z ⊓ (y ⊔ x))))
(sup_inf_assoc_of_le (coe_fn order_dual.of_dual y)
(iff.mpr order_dual.dual_le xz))))))))
/-- The diamond isomorphism between the intervals `[a ⊓ b, a]` and `[b, a ⊔ b]` -/
def inf_Icc_order_iso_Icc_sup {α : Type u_1} [lattice α] [is_modular_lattice α] (a : α) (b : α) :
↥(set.Icc (a ⊓ b) a) ≃o ↥(set.Icc b (a ⊔ b)) :=
rel_iso.mk
(equiv.mk (fun (x : ↥(set.Icc (a ⊓ b) a)) => { val := ↑x ⊔ b, property := sorry })
(fun (x : ↥(set.Icc b (a ⊔ b))) => { val := a ⊓ ↑x, property := sorry }) sorry sorry)
sorry
namespace is_compl
/-- The diamond isomorphism between the intervals `set.Iic a` and `set.Ici b`. -/
def Iic_order_iso_Ici {α : Type u_1} [bounded_lattice α] [is_modular_lattice α] {a : α} {b : α}
(h : is_compl a b) : ↥(set.Iic a) ≃o ↥(set.Ici b) :=
order_iso.trans (order_iso.set_congr (set.Iic a) (set.Icc (a ⊓ b) a) sorry)
(order_iso.trans (inf_Icc_order_iso_Icc_sup a b)
(order_iso.set_congr (set.Icc b (a ⊔ b)) (set.Ici b) sorry))
end is_compl
theorem is_modular_lattice_iff_sup_inf_sup_assoc {α : Type u_1} [lattice α] :
is_modular_lattice α ↔ ∀ (x y z : α), x ⊓ z ⊔ y ⊓ z = (x ⊓ z ⊔ y) ⊓ z :=
sorry
namespace distrib_lattice
protected instance is_modular_lattice {α : Type u_1} [distrib_lattice α] : is_modular_lattice α :=
is_modular_lattice.mk
fun (x y z : α) (xz : x ≤ z) =>
eq.mpr (id (Eq._oldrec (Eq.refl ((x ⊔ y) ⊓ z ≤ x ⊔ y ⊓ z)) inf_sup_right))
(eq.mpr (id (Eq._oldrec (Eq.refl (x ⊓ z ⊔ y ⊓ z ≤ x ⊔ y ⊓ z)) (iff.mpr inf_eq_left xz)))
(le_refl (x ⊔ y ⊓ z)))
end distrib_lattice
namespace is_modular_lattice
protected instance is_modular_lattice_Iic {α : Type u_1} [bounded_lattice α] [is_modular_lattice α]
{a : α} : is_modular_lattice ↥(set.Iic a) :=
mk fun (x y z : ↥(set.Iic a)) (xz : x ≤ z) => sup_inf_le_assoc_of_le (↑y) xz
protected instance is_modular_lattice_Ici {α : Type u_1} [bounded_lattice α] [is_modular_lattice α]
{a : α} : is_modular_lattice ↥(set.Ici a) :=
mk fun (x y z : ↥(set.Ici a)) (xz : x ≤ z) => sup_inf_le_assoc_of_le (↑y) xz
end Mathlib
|
State Before: α : Type u_1
β : Type u_2
γ : Type ?u.28906
s s' : Finset α
t t' : Finset β
a : α
b : β
p : α → Prop
inst✝ : DecidablePred p
⊢ filter (fun x => p x.fst) (s ×ˢ t) = filter p s ×ˢ t State After: no goals Tactic: simpa using filter_product p fun _ => true
|
import Do.Mut
/-! # Early Return -/
open Lean
/- Disable the automatic monadic lifting feature described in the paper.
We want to make it clear that we do not depend on it. -/
set_option autoLift false
def runCatch [Monad m] (x : ExceptT α m α) : m α :=
ExceptT.run x >>= fun
| Except.ok x => pure x
| Except.error e => pure e
/-- Count syntax nodes satisfying `p`. -/
partial def Lean.Syntax.count (stx : Syntax) (p : Syntax → Bool) : Nat :=
stx.getArgs.foldl (fun n arg => n + arg.count p) (if p stx then 1 else 0)
syntax "return" term : stmt
syntax "return" : expander
macro_rules
| `(do' $s) => do -- (1')
-- optimization: fall back to original rule (1) if now `return` statement was expanded
let s' ← expandStmt (← `(stmt| expand! return in $s))
if s'.raw.count (· matches `(stmt| return $_)) == s.raw.count (· matches `(stmt| return $_)) then
`(d! $s)
else
`(ExceptCpsT.runCatch (d! $s'))
macro_rules
| `(stmt| expand! return in return $e) => `(stmt| throw $e) -- (R1)
| `(stmt| expand! return in $e:term) => `(stmt| ExceptCpsT.lift $e) -- (R2)
variable [Monad m]
variable (ma ma' : m α)
variable (b : Bool)
example [LawfulMonad m] :
(do' let x ← ma;
return x)
= ma
:= by simp
example : Id.run
(do' let x := 1; return x)
= 1
:= rfl
example [LawfulMonad m] :
(do' if b then {
let x ← ma;
return x
};
ma')
=
(if b then ma else ma')
:= by cases b <;> simp
example [LawfulMonad m] :
(do' let y ←
if b then {
let x ← ma;
return x
} else {
ma'
};
pure y)
=
(if b then ma else ma')
:= by cases b <;> simp
|
State Before: α : Type u
β : Type v
⊢ ∀ (f : ℕ → α → β) (l₁ l₂ : List α) (arr : Array β),
mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) State After: α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
⊢ mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) Tactic: intros f l₁ l₂ arr State Before: α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
⊢ mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) State After: α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
len : ℕ
e : length (l₁ ++ l₂) = len
⊢ mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) Tactic: generalize e : (l₁ ++ l₂).length = len State Before: α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
len : ℕ
e : length (l₁ ++ l₂) = len
⊢ mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) State After: α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
⊢ ∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) Tactic: revert l₁ l₂ arr State Before: α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
⊢ ∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) State After: case zero
α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
h : length (l₁ ++ l₂) = Nat.zero
⊢ mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr))
case succ
α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
ih :
∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr))
l₁ l₂ : List α
arr : Array β
h : length (l₁ ++ l₂) = Nat.succ len
⊢ mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) Tactic: induction' len with len ih <;> intros l₁ l₂ arr h State Before: case zero
α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
h : length (l₁ ++ l₂) = Nat.zero
⊢ mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) State After: case zero
α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
h : length (l₁ ++ l₂) = Nat.zero
l₁_nil : l₁ = []
⊢ mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) Tactic: have l₁_nil : l₁ = [] := by cases l₁; rfl; contradiction State Before: case zero
α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
h : length (l₁ ++ l₂) = Nat.zero
l₁_nil : l₁ = []
⊢ mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) State After: case zero
α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
h : length (l₁ ++ l₂) = Nat.zero
l₁_nil : l₁ = []
l₂_nil : l₂ = []
⊢ mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) Tactic: have l₂_nil : l₂ = [] := by cases l₂; rfl; rw [List.length_append] at h; contradiction State Before: case zero
α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
h : length (l₁ ++ l₂) = Nat.zero
l₁_nil : l₁ = []
l₂_nil : l₂ = []
⊢ mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) State After: case zero
α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
h : length (l₁ ++ l₂) = Nat.zero
l₁_nil : l₁ = []
l₂_nil : l₂ = []
⊢ mapIdx.go f ([] ++ []) arr = mapIdx.go f [] (toArray (mapIdx.go f [] arr)) Tactic: rw [l₁_nil, l₂_nil] State Before: case zero
α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
h : length (l₁ ++ l₂) = Nat.zero
l₁_nil : l₁ = []
l₂_nil : l₂ = []
⊢ mapIdx.go f ([] ++ []) arr = mapIdx.go f [] (toArray (mapIdx.go f [] arr)) State After: no goals Tactic: simp only [mapIdx.go, Array.toList_eq, Array.toArray_data] State Before: α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
h : length (l₁ ++ l₂) = Nat.zero
⊢ l₁ = [] State After: case nil
α : Type u
β : Type v
f : ℕ → α → β
l₂ : List α
arr : Array β
h : length ([] ++ l₂) = Nat.zero
⊢ [] = []
case cons
α : Type u
β : Type v
f : ℕ → α → β
l₂ : List α
arr : Array β
head✝ : α
tail✝ : List α
h : length (head✝ :: tail✝ ++ l₂) = Nat.zero
⊢ head✝ :: tail✝ = [] Tactic: cases l₁ State Before: case nil
α : Type u
β : Type v
f : ℕ → α → β
l₂ : List α
arr : Array β
h : length ([] ++ l₂) = Nat.zero
⊢ [] = []
case cons
α : Type u
β : Type v
f : ℕ → α → β
l₂ : List α
arr : Array β
head✝ : α
tail✝ : List α
h : length (head✝ :: tail✝ ++ l₂) = Nat.zero
⊢ head✝ :: tail✝ = [] State After: case cons
α : Type u
β : Type v
f : ℕ → α → β
l₂ : List α
arr : Array β
head✝ : α
tail✝ : List α
h : length (head✝ :: tail✝ ++ l₂) = Nat.zero
⊢ head✝ :: tail✝ = [] Tactic: rfl State Before: case cons
α : Type u
β : Type v
f : ℕ → α → β
l₂ : List α
arr : Array β
head✝ : α
tail✝ : List α
h : length (head✝ :: tail✝ ++ l₂) = Nat.zero
⊢ head✝ :: tail✝ = [] State After: no goals Tactic: contradiction State Before: α : Type u
β : Type v
f : ℕ → α → β
l₁ l₂ : List α
arr : Array β
h : length (l₁ ++ l₂) = Nat.zero
l₁_nil : l₁ = []
⊢ l₂ = [] State After: case nil
α : Type u
β : Type v
f : ℕ → α → β
l₁ : List α
arr : Array β
l₁_nil : l₁ = []
h : length (l₁ ++ []) = Nat.zero
⊢ [] = []
case cons
α : Type u
β : Type v
f : ℕ → α → β
l₁ : List α
arr : Array β
l₁_nil : l₁ = []
head✝ : α
tail✝ : List α
h : length (l₁ ++ head✝ :: tail✝) = Nat.zero
⊢ head✝ :: tail✝ = [] Tactic: cases l₂ State Before: case nil
α : Type u
β : Type v
f : ℕ → α → β
l₁ : List α
arr : Array β
l₁_nil : l₁ = []
h : length (l₁ ++ []) = Nat.zero
⊢ [] = []
case cons
α : Type u
β : Type v
f : ℕ → α → β
l₁ : List α
arr : Array β
l₁_nil : l₁ = []
head✝ : α
tail✝ : List α
h : length (l₁ ++ head✝ :: tail✝) = Nat.zero
⊢ head✝ :: tail✝ = [] State After: case cons
α : Type u
β : Type v
f : ℕ → α → β
l₁ : List α
arr : Array β
l₁_nil : l₁ = []
head✝ : α
tail✝ : List α
h : length (l₁ ++ head✝ :: tail✝) = Nat.zero
⊢ head✝ :: tail✝ = [] Tactic: rfl State Before: case cons
α : Type u
β : Type v
f : ℕ → α → β
l₁ : List α
arr : Array β
l₁_nil : l₁ = []
head✝ : α
tail✝ : List α
h : length (l₁ ++ head✝ :: tail✝) = Nat.zero
⊢ head✝ :: tail✝ = [] State After: case cons
α : Type u
β : Type v
f : ℕ → α → β
l₁ : List α
arr : Array β
l₁_nil : l₁ = []
head✝ : α
tail✝ : List α
h : length l₁ + length (head✝ :: tail✝) = Nat.zero
⊢ head✝ :: tail✝ = [] Tactic: rw [List.length_append] at h State Before: case cons
α : Type u
β : Type v
f : ℕ → α → β
l₁ : List α
arr : Array β
l₁_nil : l₁ = []
head✝ : α
tail✝ : List α
h : length l₁ + length (head✝ :: tail✝) = Nat.zero
⊢ head✝ :: tail✝ = [] State After: no goals Tactic: contradiction State Before: case succ
α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
ih :
∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr))
l₁ l₂ : List α
arr : Array β
h : length (l₁ ++ l₂) = Nat.succ len
⊢ mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr)) State After: case succ.nil
α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
ih :
∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr))
l₂ : List α
arr : Array β
h : length ([] ++ l₂) = Nat.succ len
⊢ mapIdx.go f ([] ++ l₂) arr = mapIdx.go f l₂ (toArray (Array.toList arr))
case succ.cons
α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
ih :
∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr))
l₂ : List α
arr : Array β
head : α
tail : List α
h : length (head :: tail ++ l₂) = Nat.succ len
⊢ mapIdx.go f (List.append tail l₂) (Array.push arr (f (Array.size arr) head)) =
mapIdx.go f l₂ (toArray (mapIdx.go f tail (Array.push arr (f (Array.size arr) head)))) Tactic: cases' l₁ with head tail <;> simp only [mapIdx.go] State Before: case succ.nil
α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
ih :
∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr))
l₂ : List α
arr : Array β
h : length ([] ++ l₂) = Nat.succ len
⊢ mapIdx.go f ([] ++ l₂) arr = mapIdx.go f l₂ (toArray (Array.toList arr)) State After: no goals Tactic: simp only [nil_append, Array.toList_eq, Array.toArray_data] State Before: case succ.cons
α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
ih :
∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr))
l₂ : List α
arr : Array β
head : α
tail : List α
h : length (head :: tail ++ l₂) = Nat.succ len
⊢ mapIdx.go f (List.append tail l₂) (Array.push arr (f (Array.size arr) head)) =
mapIdx.go f l₂ (toArray (mapIdx.go f tail (Array.push arr (f (Array.size arr) head)))) State After: case succ.cons
α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
ih :
∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr))
l₂ : List α
arr : Array β
head : α
tail : List α
h : length (head :: tail ++ l₂) = Nat.succ len
⊢ mapIdx.go f (tail ++ l₂) (Array.push arr (f (Array.size arr) head)) =
mapIdx.go f l₂ (toArray (mapIdx.go f tail (Array.push arr (f (Array.size arr) head)))) Tactic: simp only [List.append_eq] State Before: case succ.cons
α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
ih :
∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr))
l₂ : List α
arr : Array β
head : α
tail : List α
h : length (head :: tail ++ l₂) = Nat.succ len
⊢ mapIdx.go f (tail ++ l₂) (Array.push arr (f (Array.size arr) head)) =
mapIdx.go f l₂ (toArray (mapIdx.go f tail (Array.push arr (f (Array.size arr) head)))) State After: case succ.cons.e
α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
ih :
∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr))
l₂ : List α
arr : Array β
head : α
tail : List α
h : length (head :: tail ++ l₂) = Nat.succ len
⊢ length (tail ++ l₂) = len Tactic: rw [ih] State Before: case succ.cons.e
α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
ih :
∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr))
l₂ : List α
arr : Array β
head : α
tail : List α
h : length (head :: tail ++ l₂) = Nat.succ len
⊢ length (tail ++ l₂) = len State After: case succ.cons.e
α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
ih :
∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr))
l₂ : List α
arr : Array β
head : α
tail : List α
h : length tail + length l₂ = len
⊢ length (tail ++ l₂) = len Tactic: simp only [cons_append, length_cons, length_append, Nat.succ.injEq] at h State Before: case succ.cons.e
α : Type u
β : Type v
f : ℕ → α → β
len : ℕ
ih :
∀ (l₁ l₂ : List α) (arr : Array β),
length (l₁ ++ l₂) = len → mapIdx.go f (l₁ ++ l₂) arr = mapIdx.go f l₂ (toArray (mapIdx.go f l₁ arr))
l₂ : List α
arr : Array β
head : α
tail : List α
h : length tail + length l₂ = len
⊢ length (tail ++ l₂) = len State After: no goals Tactic: simp only [length_append, h]
|
If $f$ is L-Lipschitz on $X$, then $f$ is uniformly continuous on $X$.
|
# Problem set 3 (90 pts)
## Important note: the template for your solution filename is Name_Surname_PS3.ipynb
## Problem 1 (25 pts)
- (5 pts) Prove that $\mathrm{vec}(AXB) = (B^\top \otimes A)\, \mathrm{vec}(X)$ if $\mathrm{vec}(X)$ is a columnwise reshape of a matrix into a long vector. What does it change if the reshape is rowwise?
**Note:** To make a columnwise reshape in Python one should use ```np.reshape(X, order='f')```, where the string ```'f'``` stands for the Fortran ordering.
- (2 pts) What is the complexity of a naive computation of $(A \otimes B) x$? Show how it can be reduced.
- (3 pts) Let matrices $A$ and $B$ have eigendecompositions $A = S_A\Lambda_A S_A^{-1}$ and $B = S_B\Lambda_B S^{-1}_B$. Find eigenvectors and eigenvalues of the matrix $A\otimes I + I \otimes B$, where dimension of $I$ coincides with the dimension of $A$ and $B$.
- (10 pts) Let $A = \mathrm{diag}\left(\frac{1}{1000},\frac{2}{1000},\dots \frac{999}{1000}, 1, 1000 \right)$. Estimate analytically the number of iterations required to solve linear system with $A$ with the relative accuracy $10^{-4}$ using
- Richardson iteration with the optimal choice of parameter (use $2$-norm)
- Chebyshev iteration (use $2$-norm)
- Conjugate gradient method (use $A$-norm).
- (5 pts) Provide numerical confirmation of your estimate from theoretical point of view
**Task 1.1**
Let $U \in \mathbb{N}$. I will denote $[U] = \{0, 1, 2, \dots N-1\}$. Let $A$ be a matrix. As $A_{i, j}$ I will denote the matrix element corresponding to $i$-th row and $j$-th column.
Let $X \in \mathbb{R}^{N\times M}$, $A \in \mathbb{R}^{K \times N}$, $B \in \mathbb{R}^{M \times L}$. Then $B^T \otimes A \in \mathbb{R}^{KL \times NM}$, moreover,
\begin{gather*}
n \in [N], m \in [M], k \in [K], l \in [L] \Rightarrow \\
\Rightarrow (B^T \otimes A)_{Kl + k, Nm + n} = B_{m, l} A_{k, n}
\end{gather*}
Concerning $\mathrm{vec}(X)$ we have the following:
\begin{gather*}
n \in [N], m \in [M] \Rightarrow\\\Rightarrow
\mathrm{vec}(X)_{Nm + n} = X_{n, m}
\end{gather*}
Analogously,
\begin{gather*}
k \in [K], l \in [L] \Rightarrow\\\Rightarrow
\mathrm{vec}(AXB)_{Kl + k} = \left(AXB\right)_{k, l} = A_{k,\cdot} X B_{\cdot, l}
\end{gather*}
Here $A_{k, \cdot}$ is the $k$-th row of matrix $A$ an $B_{\cdot, l}$ is the $l$-th column of the matrix $B$.
Move on to our task:
\begin{gather*}
\text{Let } k \in [K], l \in [L] \Rightarrow\\
\Rightarrow \left((B^T \otimes A) \mathrm{vec}(X)\right)_{Kl + k} = \sum\limits_{n \in [N], m \in [M]} B_{m, l} A_{k, n} \mathrm{vec}(X)_{Nm + n} =\\=
\sum\limits_{n \in [N], m \in [M]} B_{m, l} A_{k, n} X_{n, m} = \sum\limits_{n \in [N], m \in [M]} A_{k, n} X_{n, m} B_{m, l} = A_{k, \cdot} X B_{\cdot,l} = \mathrm{vec}(AXB)_{Kl + k}
\end{gather*}
So, we finally obtain, that $\mathrm{vec}(AXB) = (B^T \otimes A) \mathrm{vec}(X)$.
In case of using rowwise reshape the changes will be the following:
\begin{gather*}
\begin{cases}
(A\otimes B^T)_{Lk + l, Mn + m} = B_{m, l}A_{k, n}\\
\mathrm{vec}_{rw}(X)_{Mn + m} = X_{n, m}\\
\mathrm{vec}_{rw}(AXB)_{Lk + l} = A_{k,\cdot} X B_{\cdot, l}
\end{cases} \Rightarrow \mathrm{vec}_{rw}(AXB) = (A\otimes B^T)\mathrm{vec}_{rw}(X)
\end{gather*}
**Task 1.2**
At first, we need the following statement:
**Statement** *(mixed product property)* Let $A, B, C, D$ be matrices (or vectors, which can be seen as special case of matrices) such, that $AB$ and $CD$ is well-defined (appropriate dimensions are coincide). Then:
\begin{gather*}
(A\otimes B)(C \otimes D) = (AC) \otimes (BD)
\end{gather*}
Actually, it is well-known property, a proof can be found, for example, in the following paper [The Kronecker Product](https://digitalcommons.unf.edu/cgi/viewcontent.cgi?article=1025&context=etd), Theorem 7.
Move on to our task. Let $A \in \mathbb{R}^{k \times l}, B \in \mathbb{R}^{m\times n}$, $x \in \mathbb{R}^{ln}$. Then, in naive computation of $(A \otimes B) x$ we, at first, compute matrix $C = A \otimes B$ which requires $O(klmn)$ operations and $O(klmn)$ memory. Then we barely multiply $x$ by $C$ which (in simple assumption, that we don't use smart techniques like Strassen algorithm) also requires $O(klmn)$ operations. Therefore:
\begin{gather*}
\textbf{Naive algorithm summary}\\
\text{Computations: } O(klmn)\\
\text{Memory: } O(klmn)
\end{gather*}
We come up with the following solution:
At first, from the *mixed product property* we have the following:
\begin{gather*}
(A I_l) \otimes (I_m B) = (A \otimes I_m) (I_l \otimes B)\\
I_l \otimes B = \mathrm{diag}(\underbrace{B, \dots, B}_{l \text{ times}}) = \begin{bmatrix}
B & 0 & \dots& & 0\\
0 & B & 0 & \dots & \\
\vdots & \ddots & \ddots & \ddots & \vdots\\
& \dots & 0 & B& 0\\
0 & \dots & & 0 & B
\end{bmatrix} \in \mathbb{R}^{lm \times ln}\\
A \otimes I_m = \begin{bmatrix}
A_{1,1} I_m & \dots & A_{1,l} I_m\\
\vdots & \ddots & \vdots\\
A_{k, 1} I_m & \dots & A_{k, l} I_m
\end{bmatrix} \in \mathbb{R}^{km \times ml}
\end{gather*}
To multiply $x$ by $I_l \otimes B$ we need $O(lmn)$ operations (we multply $x[ni:n(i + 1)]$ by $B$ for $i \in [l]$, for each multiplication we need $O(mn)$ operations). Let $ y = (I_l \otimes B) x$. Now we need to multiply $y$ by $A \otimes I_m$. Note, that each row of the matrix $A \otimes I_m$ includes only $l$ non-zero elements, corresponding to rows of matrix $A$. Also, the matrix $A \otimes I_m$ consists of $mk$ rows totally. Therefore, to multiply $y$ by $A \otimes i_m$ we need no more then $O(lmk)$ operations. Note, that each step we occupy no more then $O(ln + lm + km + kl + mn)$ of memory (I sum up storage of matrices $A$ and $B$, vector $x$, $y$ and result vector). Finally:
\begin{gather*}
\textbf{More efficient implementation summary}\\
\text{Computations: } O(lmk + lmn)\\
\text{Memory: } O(ln + lm + km + kl + mn)
\end{gather*}
**Task 1.3**
Let $A \in \mathbb{C}^{n \times n}$ has eigenvectors $a_1, \dots a_n$ with eigenvalues $\lambda_1, \lambda_2 \dots, \lambda_n$ and let $B \in \mathbb{C}^{n \times n}$ has eigenvectors $b_1, \dots b_n$ with eigenvalues $\mu_1, \dots \mu_n$ (actually the eigenvectors are just columns of $S_A$ an $S_B$ respectively). Let's show, that $v_{ij} = a_i \otimes b_j$, $i, j \in \{1, \dots n\}$ are eigenvectors of $A \otimes I + I \otimes B$.
1. Let $(i_1, j_1) \neq (i_2, j_2)$. Then:
\begin{gather*}
(a_{i_1} \otimes b_{j_1})^T (a_{i_2} \otimes b_{j_2}) = (a_{i_1}^T \otimes b_{j_1}^T) (a_{i_2} \otimes b_{j_2}) =\\=
(a_{i_1}^T a_{i_2}) \otimes (b_{j_1}^T b_{j_2}) = 0 \quad (\text{ because }a_{i_1}^T a_{i_2} = 0 \text{ or } b_{j_1}^T b_{j_2} = 0 )
\end{gather*}
So, we obtain, that $v_{ij}$ are linearly independent and therefore form basis in $\mathbb{R}^{n^2}$
2. We have the following:
\begin{gather*}
\left(A \otimes I + I \otimes B\right) v_{ij} = (A \otimes I) (a_i \otimes b_j) + (I \otimes B)(a_i \otimes b_j) = \\
= (Aa_i) \otimes (Ib_j) + (Ia_i) \otimes (Bb_j) = (\lambda_i + \mu_j) a_i \otimes b_j
\end{gather*}
So, we obtain, that the matrix $A \otimes I + I \otimes B$ has eigenvectors $a_i \otimes b_j$, $i, j \in \{1, \dots n\}$ with corresponding eigenvalues $\lambda_i + \mu_j$.
**Task 1.4**
We have $A = \mathrm{diag}\left(\frac{1}{1000},\frac{2}{1000},\dots \frac{999}{1000}, 1, 1000 \right)$. Let's estimate number of iterations of different methods required to reach the relative accuracy $ \frac{\Vert x - x^*\Vert}{\Vert x^* \Vert} = 10^{-4}$
1. **Richardson iterations**
From the lecture, we know, that
\begin{gather*}
\Vert e_k\Vert_2 \leq q^{k} \Vert e_0\Vert_2, \,\, e_i = \Vert x_i - x^* \Vert_2, \,\, q = \frac{\mathrm{cond}(A) - 1}{\mathrm{cond}(A)+1} \Rightarrow\\
\frac{\Vert x_n - x^*\Vert_2}{\Vert x^* \Vert_2} \leq q^{n} \frac{\Vert x_0 - x^* \Vert_2}{\Vert x^* \Vert_2}, \quad x_0 \text{ is initial approximation}
\end{gather*}
Given that for our matrix $q = \frac{10^6 - 1}{10^6 + 1}$ and supposing $\frac{\Vert x_0 - x^* \Vert_2}{\Vert x^* \Vert_2} \approx 1$ we obtain, that we need about $5 \cdot 10^6$ iterations to obtain the desired relative accuracy
2. **Chebyshev iterations**
As we know from the lecture,
\begin{gather*}
\Vert e_n \Vert_2 \leq \max\limits_{\xi \in [\lambda_{\min}, \lambda_{\max}]}\left|c T_{n}\left( \frac{2 \xi - (\lambda_{\max} + \lambda_{\min})}{\lambda_{\max} - \lambda_{\min}} \right)\right| \Vert e_0 \Vert_2
\end{gather*}
Here $T_n$ is the $n$-th Chebyshev polynomial and $\left|cT_n\left(\frac{-(\lambda_{\max} + \lambda_{\min})}{\lambda_{\max} - \lambda_{\min}}\right)\right| = 1 = \left|cT_n\left(\frac{\lambda_{\max} + \lambda_{\min}}{\lambda_{\max} - \lambda_{\min}}\right)\right|$ (the last equality follows from the properties of Chebyshev polynomial).
We know, the following:
\begin{gather*}
\begin{cases}
\lambda_{\max} = 1000\\
\lambda_{\min} = \frac{1}{1000}
\end{cases} \Rightarrow |c| = \frac{1}{T_n\left(\frac{10^6 + 1}{10^6 - 1}\right)}
\underbrace{\Rightarrow}_{\text{property of } T_n} |c| = \frac{1}{\mathrm{arccosh}(n \cosh(\frac{10^6 + 1}{10^6 - 1}))}
\end{gather*}
Since $\left|T_{n}\left( \frac{2 \xi - (\lambda_{\max} + \lambda_{\min})}{\lambda_{\max} - \lambda_{\min}} \right)\right| \leq 1$ on $\xi \in [\lambda_{\min}, \lambda_{\max}]$, then:
\begin{gather*}
\Vert e_n \Vert_2 \leq \frac{\Vert e_0 \Vert_2}{\mathrm{arccosh}(n \cosh(\frac{10^6 + 1}{10^6 - 1}))} \Rightarrow\\
\Rightarrow
\frac{\Vert x_n - x^*\Vert_2}{\Vert x^* \Vert_2} \leq \frac{1}{\mathrm{arccosh}(n \cosh(\frac{10^6 + 1}{10^6 - 1}))} \frac{\Vert x_0 - x^* \Vert_2}{\Vert x^* \Vert_2}
\end{gather*}
By estimating the expression $\mathrm{arccosh}(n \cosh(\frac{10^6 + 1}{10^6 - 1}))$ for different $n$ and supposing $\frac{\Vert x_0 - x^* \Vert_2}{\Vert x^* \Vert_2} \approx 1$ we obtain, that we need about $5 \cdot 10^3$ iterations to obtain the desired accuracy
3. **CG iterations**
From the lectures we know the following:
\begin{gather*}
\frac{\Vert x_n - x^* \Vert_A }{\Vert x^* \Vert_A} \leq \inf_{q_n, q_n(0)=1} \max\limits_{\lambda\in[\lambda_{\min},\lambda_{\max}]} |q_n({\lambda})|, \quad q_n \in \mathrm{Poly}(n)
\end{gather*}
As we know, the $\inf$ in the equation above is obtained on $q_n(\lambda) = c T_{n}\left( \frac{2 \lambda - (\lambda_{\max} + \lambda_{\min})}{\lambda_{\max} - \lambda_{\min}} \right)$, $q_n(0) = 1$. Similarly to the previous subtask, we obtain:
\begin{gather*}
\frac{\Vert x_n - x^* \Vert_A }{\Vert x^* \Vert_A} \leq \frac{1}{\mathrm{arccosh}(n \cosh(\frac{10^6 + 1}{10^6 - 1}))}
\end{gather*}
Therefore, we have the same estimation for the count of iteration needed.
**Task 1.4**
```python
# Your solution is here
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
sns.set()
def get_cond_A(diag_A):
'''
Returns condition number of the matrix A in diagonal
form with positive diagonal elements
'''
return np.max(diag_A)/np.min(diag_A)
def get_true_solution(diag_A, f):
'''
Returns true solution for the equation Ax = f
A in diagonal form
'''
return f/diag_A
def get_relative_accuracy_factor(diag_A, f, x_0):
x_true = get_true_solution(diag_A, f)
return np.linalg.norm(x_0 - x_true, ord=2)/np.linalg.norm(x_true, ord=2)
def get_cheb_decay(n, _lambda_max):
return 1./np.cosh(n * np.arccosh(
(_lambda_max**2 + 1.)/(_lambda_max**2 - 1.)))
def create_A_norm(diag_A):
def A_norm(x):
return np.sqrt(np.sum(diag_A * x * x))
return A_norm
def richardson_iterations(diag_A, f, x_0, rel_err=1e-4):
rich_theory_est = []
rich_prac_est = []
cond_A = get_cond_A(diag_A)
q = (cond_A - 1)/(cond_A + 1)
relative_factor = get_relative_accuracy_factor(diag_A, f, x_0)
n_iterations = np.int(
np.ceil(np.log(rel_err/relative_factor)/np.log(q)))
print("Richardson iterations will be repeated {} times".format(
n_iterations))
rich_theory_est.append(relative_factor)
rich_prac_est.append(relative_factor)
tau = 2/(np.max(diag_A) + np.min(diag_A))
x_true = get_true_solution(diag_A, f)
x_true_norm = np.linalg.norm(x_true, ord=2)
e = x_0 - x_true
for i_iter in tqdm(range(n_iterations)):
e -= tau*diag_A*e
rich_theory_est.append(rich_theory_est[-1]*q)
rich_prac_est.append(np.linalg.norm(e, ord=2)/x_true_norm)
print('Richardson iterations practice rel. error = {0:3f}'.format(
float(rich_prac_est[-1])))
return rich_theory_est, rich_prac_est
def launch_cheb_iters_once(niters, diag_A, e_0, _lambda_max):
lm = _lambda_max
roots = [np.cos((np.pi * (2 * i + 1)) / (2 * niters)) for i in range(niters)]
taus = [2./(lm + 1./lm - (lm - 1./lm) * r) for r in roots]
e = e_0.copy()
good_perm = np.array([i for i in range(0, niters)])
np.random.shuffle(good_perm)
for i in range(niters):
e -= taus[good_perm[i]] * (diag_A * e)
return e
def chebyshev_iterations(diag_A, f, x_0, rel_err=1e-4, n_launches=100):
cheb_theory_est = []
cheb_prac_est = []
_lambda_max=np.max(diag_A)
relative_factor = get_relative_accuracy_factor(diag_A, f, x_0)
n_iterations = 0
x_true = get_true_solution(diag_A, f)
x_true_norm = np.linalg.norm(x_true, ord=2)
while True:
n_iterations += 1
if get_cheb_decay(
n_iterations, _lambda_max) * relative_factor < rel_err:
break
print("Chebyshev iteratinos will be repeated {} times".format(
n_iterations))
n_diff = n_iterations//n_launches
launches = list(range(1, n_iterations + 1, n_diff))
if launches[-1] < n_iterations:
launches.append(n_iterations)
for niters in launches:
cheb_theory_est.append(
get_cheb_decay(niters, _lambda_max)*relative_factor)
cheb_prac_est.append(np.linalg.norm(
launch_cheb_iters_once(
niters, diag_A, x_0 - x_true, _lambda_max), ord=2)/x_true_norm)
print('Chebyshev iterations practice rel. error = {0:3f}'.format(
float(cheb_prac_est[-1])))
return cheb_theory_est, cheb_prac_est, launches
def cg_iteration_once(diag_A, f, r, p, x):
r_sq = np.sum(r*r)
alpha = r_sq/(np.sum(diag_A * p * p))
x_new = x + alpha * p
r_new = r - alpha * diag_A * p
beta = np.sum(r_new * r_new)/r_sq
p_new = r_new + beta * p
return r_new, p_new, x_new
def cg_iterations(diag_A, f, x_0, rel_err=1e-4):
cg_theory_est = []
cg_prac_est = []
_lambda_max = np.max(diag_A)
A_norm = create_A_norm(diag_A)
x_true = get_true_solution(diag_A, f)
x_true_norm = A_norm(x_true)
n_iterations = 0
while True:
n_iterations += 1
if get_cheb_decay(
n_iterations, _lambda_max) < rel_err:
break
print("CG iteratinos will be repeated {} times".format(
n_iterations))
r = f - diag_A * x_0
p = r.copy()
x = x_0.copy()
for i_iter in range(n_iterations):
r, p, x = cg_iteration_once(diag_A, f, r, p, x)
cg_theory_est.append(
get_cheb_decay(i_iter, _lambda_max))
cg_prac_est.append(A_norm(x - x_true)/x_true_norm)
if cg_prac_est[-1] < rel_err:
print('CG iterations: Early stop at {} iteration'.format(i_iter))
break
print('CG iterations practice rel. error = {0:3f}'.format(
float(cg_prac_est[-1])))
return cg_theory_est, cg_prac_est
np.random.seed(42)
A = np.diag(np.concatenate([np.arange(1., 1000)/1000., np.array([1., 1000.])]))
f = np.random.rand(A.shape[0])
x_0 = np.random.rand(A.shape[0])
diag_A = np.diag(A)
rich_th, rich_prac = richardson_iterations(diag_A, f, x_0, rel_err=1e-4)
cheb_th, cheb_prac, launches = chebyshev_iterations(diag_A, f, x_0, rel_err=1e-4)
cg_th, cg_prac = cg_iterations(diag_A, f, x_0, rel_err=1e-4)
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(15, 4))
ax[0].semilogy(rich_th, label='theory')
ax[0].semilogy(rich_prac, label='practice')
ax[0].legend()
ax[0].set_title('Richardson')
ax[0].set_xlabel('n iterations')
ax[0].set_ylabel('relative err.')
ax[1].semilogy(launches, cheb_th, label='theory')
ax[1].semilogy(launches, cheb_prac, label='practice')
ax[1].legend()
ax[1].set_title('Chebyshev')
ax[1].set_xlabel('n iterations')
ax[1].set_ylabel('relative err.')
ax[2].semilogy(cg_th, label='theory')
ax[2].semilogy(cg_prac, label='practice')
ax[2].legend()
ax[2].set_title('CG')
ax[2].set_xlabel('n iterations')
ax[2].set_ylabel('relative err.')
plt.show()
```
**Conclusion**
As we can see, for *Richardson* and *Chebyshev* iteration techniques the theoretical estimation for the count of iterations coincide with practical result. However, the theory and the practice diverge dramatically for *CG*. Such divergence can follow from the fact following:
As we know,
$$\frac{\Vert x_n - x^* \Vert_A }{\Vert x^* \Vert_A} \leq \inf_{q_n, q(0)=1} \max_{j} |q_n({\lambda_j})|, \quad q_n \in \mathrm{Poly}(n)$$
However, for theoretical estimations we have performed the relaxation:
$$
\frac{\Vert x_n - x^* \Vert_A }{\Vert x^* \Vert_A} \leq \inf_{q_n, q_n(0)=1} \max\limits_{\lambda\in[\lambda_{\min},\lambda_{\max}]} |q_n({\lambda})|, \quad q_n \in \mathrm{Poly}(n)
$$
Actually, it can be the reason, why in practice *CG* converges much better, especially taking int account, that most of the eigenvalues of $A$ are clastered on the segment $[0, 1]$
## Problem 2 (40 pts)
### Spectral graph partitioning and inverse iteration
Given connected graph $G$ and its corresponding graph Laplacian matrix $L = D - A$ with eigenvalues $0=\lambda_1, \lambda_2, ..., \lambda_n$, where $D$ is its degree matrix and $A$ is its adjacency matrix, *Fiedler vector* is an eignevector correspondng to the second smallest eigenvalue $\lambda_2$ of $L$. Fiedler vector can be used for graph partitioning: positive values correspond to the one part of a graph and negative values to another.
### Inverse power method (15 pts)
To find the Fiedler vector we will use the inverse iteration with adaptive shifts (Rayleigh quotient iteration).
* (5 pts) Write down the orthoprojection matrix on the space orthogonal to the eigenvector of $L$, corresponding to the eigenvalue $0$ and prove (analytically) that it is indeed an orthoprojection.
* (5 pts) Implement the spectral partitioning as the function ```partition```:
**Task 2.1.1**
As we know, the eigenvector, which corresponds to the eigenvalue $0$, equals (up to non-zero scalar) $e = (1, 1, \dots, 1)^T$. Let's prove, that orthoprojection matrix is as follows:
\begin{gather*}
P = \begin{bmatrix}
\frac{n - 1}{n} & \frac{-1}{n} & \dots & \frac{-1}{n}\\
\frac{-1}{n} & \frac{n - 1}{n} & \ddots & \vdots\\
\vdots & \ddots & \ddots & \frac{-1}{n}\\
\frac{-1}{n} & \dots & \frac{-1}{n} & \frac{n - 1}{n}
\end{bmatrix}_{n\times n} = I_n - \frac{1}{n} \begin{bmatrix}
1 & \dots & 1\\
\vdots &\ddots & \vdots\\
1 & \dots & 1
\end{bmatrix}_{n\times n}
\end{gather*}
Let's prove, that $P$ is indeed an orthprojection. Consider two linear spaces: $E = \mathrm{span}(e)$ and $Q = E^{\perp}$. Actually, our orthprojection operator $L_{proj}$ projects any vector $v \in \mathbb{R}^{n}$ onto $Q$.
We have the following: $v^E \in E \Leftrightarrow v^E = \alpha e$. Consider any $v \in \mathbb{R}^{n}$. Then there exist unique vectors $v^E \in E$ and $v^Q \in Q$, such, that $v = v^E + v^Q$. Then, $\sum_{i = 1}^{n} v_i = (v, e) = (v^E, e) \Rightarrow v^E = \left(\frac{\sum\limits_{i = 1}^{n} v_i}{n}\right) e$. Consequently, $v^{Q} = v - \left(\frac{\sum\limits_{i = 1}^{n} v_i}{n}\right) e \Rightarrow L_{proj}( v ) = v - \left(\frac{\sum\limits_{i = 1}^{n} v_i}{n}\right) e$. Obviously, the matrix $P$ defines such orthprojection operator.
**Task 2.1.2**
Let's implement the `partition` function
```python
import numpy as np
import scipy.sparse as spsp
from scipy.sparse.linalg import spsolve
def early_stop_activator_factory(eps):
def early_stop_activate(eigs, A, x):
if len(eigs) < 1:
return False, 0.0
# check if we already acheve eigenpair
l = eigs[-1]
x_mul = A.dot(x)
# print(np.linalg.norm(x_mul - l * x, ord=2))
if np.linalg.norm(x_mul - l * x, ord=2) < eps:
return True, eps
if len(eigs) < 2:
return False, 0.0
l_prev = eigs[-2]
l = eigs[-1]
diff_norm = np.linalg.norm(l - l_prev)
_norm = np.linalg.norm(l)
rel_tol = diff_norm/_norm
x_mul = A.dot(x)
return rel_tol <= eps, rel_tol
return early_stop_activate
def orthoproject(x):
return x - np.mean(x)
def rayleigh_quotient(A, x):
return np.sum(A.dot(x) * x)/np.sum(x * x)
def solve_A_shifted(A, sigma, x):
A_shifted = A - spsp.diags(
(sigma,), (0,), shape=A.shape, format=A.format)
return spsp.linalg.spsolve(A_shifted, x)
def inverse_iteration(A, sigma, x):
x = solve_A_shifted(A, sigma, x)
x = orthoproject(x) # do I need set it here?
x = x / np.linalg.norm(x, ord=2)
return x
def create_graph_laplacian(A):
L = -A.copy()
L.setdiag(-np.asarray(L.sum(axis=-1)).reshape(-1))
return L
# INPUT:
# A - adjacency matrix (scipy.sparse.csr_matrix)
# num_iter_fix - number of iterations with fixed shift (int)
# shift - (float number)
# num_iter_adapt - number of iterations with adaptive shift (int) -- Rayleigh quotient iteration steps
# x0 - initial guess (1D numpy.ndarray)
# OUTPUT:
# x - normalized Fiedler vector (1D numpy.ndarray)
# eigs - eigenvalue estimations at each step (1D numpy.ndarray)
# eps - relative tolerance (float)
def partition(A, shift, num_iter_fix, num_iter_adapt, x0, eps, verbose=True):
L = create_graph_laplacian(A)
x = orthoproject(x0)
x = x/np.linalg.norm(x, ord=2)
eigs = []
early_stop_activate = early_stop_activator_factory(eps)
for i_fix_iter in range(num_iter_fix):
eigs.append(rayleigh_quotient(L, x))
activate, tol = early_stop_activate(eigs, L, x)
x_new = inverse_iteration(L, shift, x)
if activate or np.any(np.isnan(x_new)):
if verbose:
print(
'Early stop activated on fix stage,',
'step {}'.format(i_fix_iter))
return x, np.asarray(eigs), tol
x = x_new
for i_adapt_iter in range(num_iter_adapt):
r = rayleigh_quotient(L, x)
eigs.append(r)
activate, tol = early_stop_activate(eigs, L, x)
x_new = inverse_iteration(L, r, x)
if activate or np.any(np.isnan(x_new)):
if verbose:
print(
'Early stop activated on adapt stage,',
'step {}'.format(i_adapt_iter))
return x, np.asarray(eigs), tol
x = x_new
_, tol = early_stop_activate(eigs, L, x)
return x, np.asarray(eigs), tol
```
Test of our function
```python
# an adjacency matrix
A = np.array([[0, 1., 1., 0], [1., 0, 1., 0], [1., 1, 0, 1], [0, 0, 1, 0]])
sA = spsp.csr_matrix(A)
# graph laplacian matrix
L = np.diag(A.sum(axis=-1)) - A
w, v = np.linalg.eig(L)
f_w, f_v = w[-1], v[:,-1]
f_v = orthoproject(f_v)
f_v /= np.linalg.norm(f_v, ord=2)
print('True Fiedler value: ', f_w)
print('True Fiedler vector: ', f_v*np.sign(f_v[0]))
np.random.seed(42)
x_0 = np.random.rand(4)
f_v_i, f_ws_i, tol = partition(sA, 1e-1, 2, 4, x_0, 1e-10)
print('Got Fiedler value: ', f_ws_i[-1])
print('Got Fiedler vector: ', f_v_i*np.sign(f_v_i[0]))
```
True Fiedler value: 0.9999999999999999
True Fiedler vector: [ 4.08248290e-01 4.08248290e-01 -4.63182642e-17 -8.16496581e-01]
Got Fiedler value: 1.000000587076576
Got Fiedler vector: [ 4.08248290e-01 4.08248291e-01 2.28300038e-12 -8.16496581e-01]
Algorithm must halt before `num_iter_fix + num_iter_adapt` iterations if the following condition is satisfied $$ \boxed{\|\lambda_k - \lambda_{k-1}\|_2 / \|\lambda_k\|_2 \leq \varepsilon} \text{ at some step } k.$$
Do not forget to use the orthogonal projection from above in the iterative process to get the correct eigenvector.
It is also a good idea to use ```shift=0``` before the adaptive stragy is used. This, however, is not possible since the matrix $L$ is singular, and sparse decompositions in ```scipy``` does not work in this case. Therefore, we first use a very small shift instead.
* (3 pts) Generate a random `lollipop_graph` using `networkx` library and find its partition. [Draw](https://networkx.github.io/documentation/networkx-1.9/examples/drawing/labels_and_colors.html) this graph with vertices colored according to the partition.
* (2 pts) Start the method with a random initial guess ```x0```, set ```num_iter_fix=0``` and comment why the method can converge to a wrong eigenvalue.
**Task 2.1.3**
Let's create `lollipop_graph` and see the partitioning of the graph by implemented function
```python
import matplotlib.pyplot as plt
import networkx as nx
def draw_partition(graph, f_vec, title='Partitioned lollipop graph'):
n_pos = list(np.where(f_vec > 0)[0])
n_neg = list(np.where(f_vec <= 0)[0])
print('Num positive components: {}'.format(len(n_pos)))
print('Num negative components: {}'.format(len(n_neg)))
pos=nx.spring_layout(graph)
plt.figure(figsize=(8, 6), dpi=100)
nx.draw_networkx_nodes(graph,pos,
nodelist=n_pos,
node_color='r',
node_size=25,
alpha=0.8)
nx.draw_networkx_nodes(graph,pos,
nodelist=n_neg,
node_color='b',
node_size=25,
alpha=0.8)
nx.draw_networkx_edges(graph,pos,width=1.0,alpha=0.5)
plt.axis('off')
plt.title(title)
plt.show()
```
```python
import networkx as nx
np.random.seed(42)
n_max = 40
m_max = 40
n = np.random.randint(0, n_max)
m = np.random.randint(0, m_max)
print('Create lollipop graph with n={} and m={}'.format(n, m))
llg = nx.generators.classic.lollipop_graph(n, m)
A = nx.linalg.graphmatrix.adjacency_matrix(llg)
shift=1e-2
num_iter_fix = 4
num_iter_adapt = 4
eps=1e-10
x_0 = np.random.rand(A.shape[0])
vec, eigs, tol = partition(A, shift, num_iter_fix, num_iter_adapt, x_0, eps)
draw_partition(llg, vec)
true_eig = nx.linalg.algebraic_connectivity(llg)
assert np.abs(true_eig - eigs[-1]) < 1e-12
```
**Task 2.1.4**
Let's do the partitioning with `num_iter_fix = 0`
```python
import networkx as nx
np.random.seed(42)
num_iter_fix = 0
num_iter_adapt = 50
eps=1e-16
llg = nx.generators.classic.lollipop_graph(10, 10)
A = nx.linalg.graphmatrix.adjacency_matrix(llg)
x_0 = np.random.rand(A.shape[0])
vec, eigs, tol = partition(A, 1., num_iter_fix, num_iter_adapt, x_0, eps)
print('Obtainted tolerance: ', tol)
print('Obtained Fiedler eigenvalue: ', eigs[-1])
_, true_eigs, _ = spsp.linalg.svds(create_graph_laplacian(A).asfptype(), k=19)
print('True Fideler eigenvalue: ', true_eigs[0])
print(
'Obtained eigenvalue is a graph laplacian matrix eigenvalue: ',
np.isclose(true_eigs, eigs[-1], atol=1e-16).any())
```
Early stop activated on adapt stage, step 11
Obtainted tolerance: 0.0
Obtained Fiedler eigenvalue: 3.909491608266256
True Fideler eigenvalue: 0.03795998478447482
Obtained eigenvalue is a graph laplacian matrix eigenvalue: True
**Conclusion**
As we can see, the Rayleigh iterations converged to an eigenvalue of the graph laplacian matrix $L$, however it is not the second smallest eigenvalue of $L$. It can be explained in the following way:
As we know, the step in Rayleigh iterations looks as follows:
\begin{gather*}
x_{i + 1} = \frac{( L - R(x_i)I)^{-1}x_i}{\Vert ( L - R(x_i)I)^{-1}x_i \Vert }\\
R(x_i) = \frac{<x_i, L x_i>}{<x_i, x_i>}
\end{gather*}
If we generate the initial guess $x_0$ randomly, then $R(x_0)$ will be any number in the segment $[\lambda_{\min}, \lambda_{\max}]$, $\lambda_{\min}, \lambda_{\max}$ are the smallest and the largest eigenvalues of $L$ correspondingly. Therefore, there is high probability, that $R(x_0)$ will be close to an eigenvalue $\lambda$ of $L$ which is different from the second smallest eigenvalue of $L$ (and far from the second smallest eigenvalue). Because of this, the matrix $(A - R(x_0)I)^{-1}$ (and with high probability, matrices $(A - R(x_i)I)$ on the subsequent steps) will have large singular value, which corresponds to the initial eigenvalue $\lambda$. Hence, the Rayleigh iterations will amplify the convergence to the $\lambda$, not to the second smallest eigenvalue of $L$
### Spectral graph properties (15 pts)
* (5 pts) Prove that multiplicity of the eigenvalue $0$ in the spectrum of the graphs Laplacian is the number of its connected components.
* (10 pts) The second-smallest eigenvalue of $L(G)$, $\lambda_2(L(G))$, is often called the algebraic connectivity of the
graph $G$. A basic intuition behind the use of this term is that a graph with a higher algebraic
connectivity typically has more edges, and can therefore be thought of as being “more connected”.
To check this statement, create few graphs with equal number of vertices using `networkx`, one of them should be $C_{30}$ - simple cyclic graph, and one of them should be $K_{30}$ - complete graph. (You also can change the number of vertices if it makes sense for your experiments, but do not make it trivially small).
* Find the algebraic connectivity for the each graph using inverse iteration.
* Plot the dependency $\lambda_2(G_i)$ on $|E_i|$.
* Draw a partition for a chosen graph from the generated set.
* Comment on the results.
**Task 2.2.1**
Suppose we have a graph $G$ with $n$ vertices and $k$ connected components. Consider the graph laplacian matrix $L$ of this graph. Let $\{v_1^j, v_2^j, \dots, v_{n_j}^j\} \subset \mathbb{R}^n$ be columns of the matrix $L$ which correspond to the $j$-th connected component, $j \in \{1, 2, \dots k\}$. Actually, $\sum\limits_{j = 1}^{k} n_j = n$.
Consider any $\lambda \in \mathbb{R}^n$ such, that $L\lambda = 0$ ($\lambda \in \mathrm{ker}(L)$). Therefore,
\begin{gather*}
\sum\limits_{j = 1}^{k}\sum\limits_{i = 1}^{n_j} \lambda_i^j v_i^j = 0
\end{gather*}
(Here $\lambda_i^j$ is the element of vector $\lambda$ which corresponds to column $v_i^j$)
Now, let's note the following fact:
**Statement 1**
\begin{gather*}
j_1 \neq j_2 \Rightarrow \forall i_1 \in \{1, \dots, n_{j_1}\}, \forall i_2 \in \{1, \dots, n_{j_2}\}\, : \, (v_{i_1}^{j_1}, v_{i_2}^{j_2}) = 0
\end{gather*}
$\triangleright$
Suppose, that $v_{i_1}^{j_1}$ corresponds to the vertex $h_1$ and $v_{i_2}^{j_2}$ corresponds to the vertex $h_2$ in the graph $G$.
Consider the $h$-th element of vectors $v_{i_1}^{j_1}$ and $v_{i_2}^{j_2}$. Let $\alpha_1 = v_{i_1}^{j_1}[h]$ and $\alpha_2 = v_{i_2}^{j_2}[h]$. Now, let $k \in \{1, 2\}$. Suppose, that $\alpha_k \neq 0$. It means one of the two following cases:
1. $h_k = h$ (in this case $\alpha_k > 0$, since it equals to degree of the vertex $h_k$). But this means, that $\alpha_{1 - k} = 0$, because the vertex $h_k$ is not connected with the vertex $h_{1 - k}$ since they belongs to different connected components.
2. $h_k \neq h$. Since $\alpha_k \neq 0$ (actually, $\alpha_k = -1$), then $h_k$ connected with $h$. But this means, that $h$ is not connected with $h_{1 - k}$ (and $h \neq h_{1 - k}$) for the same reason, that $h_k$ and $h_{1 - k}$ belongs to different components. Therefore, $\alpha_{1 - k} = 0$
Taking the aforementioned properties into account, we obtain, that $(v_{i_1}^{j_1}, v_{i_2}^{j_2}) = 0$.
$\triangleleft$
Statement 1 means, that linear subspaces $\mathrm{span}(v_1^j, \dots v_{n_j}^{j})\, , \, j \in \{1, \dots k\}$ are orthogonal to each other.
Therefore:
\begin{gather*}
\sum\limits_{j = 1}^{k}\sum\limits_{i = 1}^{n_j} \lambda_i^j v_i^j = 0 \Leftrightarrow \forall j \in \{1, \dots k\}\,:\, \sum\limits_{i = 1}^{n_j} \lambda_i^j v_i^j = 0
\end{gather*}
Now, let us fix any $j$. Let $i^* = \arg\max\limits_{i \in \{1, \dots n_j\}} |\lambda_i^j|$ (if the argmax is not unique, we select any of them). Without loss of generality, $\lambda_{i^*}^j \geq 0$. Consider the case when $\lambda_{i^*}^j > 0$. Let $h_{i}\, , \, i \in \{1, \dots n_j\}$ be the vertices of the graph $G$, which correspond to the columns $v_{i}^{j}$.
Consider the vertex $h_{i^*}$. Let $h_{i_1}, h_{i_2}, \dots, h_{i_m}$ be the neighbours of $h_{i^*}$. This means, that $v_{i^*}^j[h_{i^*}] = m$ and $v_{i_p}^j[h_{i^*}] = -1\, , \, p \in \{1, \dots m\}$. Note, that $\forall i \in \{1, \dots n_j\}\, , \, i \notin \{ i_1, \dots i_m\}\, : \, v_{i}^j[h_{i^*}] = 0$ We have the following:
\begin{gather*}
\sum\limits_{i = 1}^{n_j} \lambda_i^j v_i^j = 0 \Rightarrow \sum\limits_{i = 1}^{n_j} \lambda_i^j v_i^j[i^*] = 0 \Rightarrow
\lambda_{i^*}^j v_{i^*}^j[h_{i^*}] + \sum\limits_{p = 1}^{m} \lambda_{i_p}^j v_{i_p}^j[i^*] = 0 \Rightarrow\\\Rightarrow
m \lambda_{i^*}^j - \sum\limits_{p = 1}^{m} \lambda_{i_p}^j = 0 \underbrace{\Rightarrow}_{\lambda_{i^*}^j \geq \lambda_{i_p}^j} \lambda_{i^*}^j = \lambda_{i_p}^j\, , \, p \in \{1, \dots m\}
\end{gather*}
Repeating the aforementioned argumentation for the neighbours of $h_{i_1}, \dots, h_{i_p}$ (note, that we prove, that $\lambda_{i_p}^j = \max\limits_{i \in \{1, \dots n_j\}} |\lambda_i^j|\, , \, p \in \{1, \dots m\}$) and the neighbours of neighbours and so forth, we will cover the all vertices of the connected component $j$. So, we conclude, that:
\begin{gather*}
\lambda_1^j = \lambda_1^j = \dots = \lambda_{n_j}^j
\end{gather*}
Let's summarize, what we actually have obtained:
\begin{gather*}
L \lambda = 0 \Leftrightarrow (\Leftarrow \text{ is obvious }, \Rightarrow \text{ we just proven }) \Leftrightarrow \\
\begin{cases}
\lambda_{i_1} = \alpha_1 \in \mathbb{R}, i \in \{1, \dots, n_1\}\\
\lambda_{i_2} = \alpha_2 \in \mathbb{R}, i \in \{1, \dots, n_2\}\\
\dots \\
\lambda_{i_k} = \alpha_k \in \mathbb{R}, i \in \{1, \dots, n_k\}\\
\end{cases}
\end{gather*}
Obviously, it means, that $\mathrm{dim} \left(\mathrm{ker} (L)\right) = k$. Therefore, the eigenvalue $0$ has multiplicity $k$.
**Task 2.2.2**
```python
import networkx as nx
import numpy as np
import random
from collections import defaultdict
from IPython.display import clear_output
from tqdm import tqdm
random.seed(42)
np.random.seed(42)
n_vertices = 30
n_step = 1 # step in count of edges using in graph
n_repeat = 5 # count of generated graphs with the same count of edges
n_min_edges = n_vertices
n_max_edges = (n_vertices * (n_vertices - 1))//2
# parameters of iteration algorithm:
shift = 1e-2
num_iter_fix = 10
num_iter_adapt = 10
eps = 1e-16
x_0 = np.random.rand(n_vertices)
def get_alg_connectivity(graph):
A = nx.linalg.graphmatrix.adjacency_matrix(graph)
vec, eigs, tol = partition(
A, shift, num_iter_fix, num_iter_adapt, x_0, eps, verbose=False)
return eigs[-1]
results = defaultdict(list)
for curr_n_edges in tqdm(range(n_min_edges, n_max_edges, n_step)):
for _ in range(n_repeat):
graph = nx.dense_gnm_random_graph(n_vertices, curr_n_edges)
eig = get_alg_connectivity(graph)
results[curr_n_edges].append(eig)
# obtaining connectivity for cyclic graph
cgraph = nx.cycle_graph(n_vertices)
ceig = get_alg_connectivity(cgraph)
# obtaining connectivity for complete graph
kgraph = nx.complete_graph(n_vertices)
keig = get_alg_connectivity(kgraph)
```
0%| | 0/405 [00:00<?, ?it/s]D:\Anaconda3\envs\diplom_env\lib\site-packages\scipy\sparse\_index.py:124: SparseEfficiencyWarning: Changing the sparsity structure of a csr_matrix is expensive. lil_matrix is more efficient.
self._set_arrayXarray(i, j, x)
D:\Anaconda3\envs\diplom_env\lib\site-packages\scipy\sparse\linalg\dsolve\linsolve.py:206: MatrixRankWarning: Matrix is exactly singular
warn("Matrix is exactly singular", MatrixRankWarning)
100%|████████████████████████████████████████| 405/405 [00:38<00:00, 10.49it/s]
Now, let's show the dependencey of $\lambda_2(G_i)$ on $|E_i|$ for random graphs with $n = 30$ vertices
```python
import seaborn as sns
sns.set()
keys = np.repeat(np.array(list(results.keys())), n_repeat)
values = np.array(list(results.values())).reshape(-1)
plt.scatter(keys, values, s=1.)
plt.title('Algebraic connectivity')
plt.xlabel('n eidges')
plt.plot(n_vertices, ceig, 'ro', label=r'$C_n$')
plt.plot((n_vertices * (n_vertices - 1))//2, keig, 'go', label=r'$K_n$')
plt.legend()
plt.show()
```
Consider the graph $C_n$ and let's show the partition of this graph
```python
random.seed(41)
np.random.seed(41)
graph = nx.cycle_graph(n_vertices)
A = nx.linalg.graphmatrix.adjacency_matrix(graph)
vec, eigs, tol = partition(
A, shift, num_iter_fix, num_iter_adapt, x_0, eps, verbose=False)
draw_partition(graph, vec, title='Cycle Graph partitioning')
```
**Conclusion**
As we can see from the `Algebraic connectivity` dependence the algebraic connectivity correlates with count of edges of the graph pretty well, so it indeed can be considered as measure of `connectivity` of a graph.
### Image bipartition (10 pts)
Let us deal here with a graph constructed from a binarized image.
Consider the rule, that graph vertices are only pixels with $1$, and each vertex can have no more than $8$ connected vertices (pixel neighbours), $\textit{i.e}$ graph degree is limited by 8.
* (3 pts) Find an image with minimal size equal to $(256, 256)$ and binarize it such that graph built on black pixels has exactly $1$ connected component.
* (5 pts) Write a function that constructs sparse adjacency matrix from the binarized image, taking into account the rule from above.
* (2 pts) Find the partition of the resulting graph and draw the image in accordance with partition.
**Task 2.3**
At first, let's load the image
```python
# Your solution is here
import numpy as np
from PIL import Image
image = Image.open('./dragon.jpg')
plt.figure(dpi=200)
plt.axis('off')
plt.imshow(image)
plt.show()
```
Now, let's binarize the image
```python
thresh = 200
fn = lambda x : 255 if x > thresh else 0
bin_image = image.convert('L').point(fn, mode='1')
pix_array = 1 - np.array(bin_image).astype(int) # `white` color corresponds to 0
plt.figure(dpi=200)
plt.axis('off')
plt.imshow(1 - pix_array)
plt.show()
```
Now, let's create the adjacency matrix $A$ which corresponds to our image.
At first, we set the size of matrix $A$ as multiplication of dimensions of the image.
During the further steps we remove unnecessary dimensions (which corresponds to white pixels)
```python
gi = lambda i, j : i * pix_array.shape[1] + j
adj_m_shape = pix_array.shape[0] * pix_array.shape[1]
A = spsp.csr_matrix((adj_m_shape, adj_m_shape))
```
Here we pefrorm the filling of matrix $A$. We match pixel `i,j` of original image to the value `a(i, j) = i*image.shape[1] + j`, and set `A[a(i_1, j_1), a(i_2, j_2)]` to be equal to 1 iff pixels `i_1, j_1` and `i_2, j_2` are connected.
```python
def adj_by_direction(arr, sparse_Adj, i, j):
if i == 1 and j == 1:
_sum = arr[:-1, :-1] + arr[1:, 1:]
p_col = [0, 1]
p_row = [0, 1]
elif i == 0 and j == 1:
_sum = arr[:, 1:] + arr[:, :-1]
p_col = [0, 0]
p_row = [0, 1]
elif i == -1 and j == 1:
_sum = arr[:-1, 1:] + arr[1:, :-1]
p_col = [1, 0]
p_row = [0, 1]
elif i == -1 and j == 0:
_sum = arr[1:, :] + arr[:-1, :]
p_col = [0, 1]
p_row = [0, 0]
else:
return None
col, row = np.where(_sum == 2)
ind_1 = (col + p_col[0])*arr.shape[1] + (row + p_row[0])
ind_2 = (col + p_col[1])*arr.shape[1] + (row + p_row[1])
sparse_Adj[ind_1, ind_2] = 1
sparse_Adj[ind_2, ind_1] = 1
return None
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
adj_by_direction(pix_array, A, i, j)
# elements, which corresponds to `black color`
nnvals = np.where(A.getnnz(1)>0)[0]
# here we remove all white-colored pixels
A = A[A.getnnz(1)>0][:,A.getnnz(0)>0]
print(nnvals)
```
[ 172 427 428 ... 102692 102693 102694]
Now, we launch the `partition`.
```python
shift = 1e-8
num_iter_fix = 20
num_iter_adapt = 20
eps = 1e-16
x_0 = np.random.rand(len(nnvals))
vec, eigs, tol = partition(
A, shift, num_iter_fix, num_iter_adapt, x_0, eps, verbose=False)
print(eigs[-1])
```
1.9144195321750935e-05
Here we define the indices, which corresponds to positive partition
```python
pos_ind = np.where(vec > 0)[0]
im_pos_ind = nnvals[pos_ind]
i_pos = im_pos_ind // pix_array.shape[1]
j_pos = im_pos_ind % pix_array.shape[1]
```
Here we assign new color to positive partitioned indices and draw the final partitioned image
```python
pix_array_copy = pix_array.copy()
pix_array_copy[i_pos, j_pos] = 3
```
```python
plt.figure(dpi=200)
plt.axis('off')
plt.imshow(1 - pix_array_copy)
plt.show()
```
## Problem 3 (25 pts)
**Disclaimer**: this problem is released first time, so some typos can be found.
## Mathematical model (Navier-Stokes equations)
The governing equations for two-dimensional incompressible
flows can be written in a dimensionless form as:
\begin{equation}\tag{1}
\dfrac{\partial \omega}{\partial t} = \dfrac{1}{Re} \big(\dfrac{\partial^2 \omega}{\partial x^2} + \dfrac{\partial^2 \omega}{\partial y^2}\big) - \big(\dfrac{\partial \psi}{\partial y} \dfrac{\partial \omega}{\partial x} - \dfrac{\partial \psi}{\partial x} \dfrac{\partial \omega}{\partial y}\big),
\end{equation}
along with the kinematic relationship between vorticity $\omega(x,y,t)$ and stream function $\psi(x,y,t)$ according to the Poisson equation, which is given as:
\begin{equation}\tag{2}
\dfrac{\partial^2 \psi}{\partial x^2} + \dfrac{\partial^2 \psi}{\partial y^2} = -\omega.
\end{equation}
We consider equations (1) and (2) in the computational domain $\Omega = [0, 2\pi] \times [0, 2\pi]$ and impose the following periodic boundary conditions:
$$\omega(x,0,t) =\omega(x, 2\pi, t), \quad \omega(0,y,t) =\omega(2\pi, y, t), \quad t \geq 0,$$
and the same for $\psi(x,y,t)$.
Note: the Reynolds number, referred to as $Re$, is a fundamental physical constant that in particular determines whether the fluid flow is laminar or turbulent.
## The animation below represents a particular solution of the Navier-Stokes equations (1) and (2) and you will get it in the end of this problem
# Fourier-Galerkin pseudospectral method
Fourier series expansion based methods are often used for solving problems with periodic boundary conditions. One of the most accurate methods for solving the Navier–Stokes equations in periodic domains is **the pseudospectral method**, which exploits the Fast Fourier Transform (FFT) algorithm.
Outline: the main idea of spectral methods is to write the solution of a differential equation as a sum of certain "basis functions" (e.g. Fourier series, Chebyshev polynomials etc) and then to choose the coefficients in the sum in order to satisfy the differential equation as well as possible.
Comprehensive survey of such methods can be found in [this book](https://depts.washington.edu/ph506/Boyd.pdf).
### Discrete Fourier Transform
We discretize the domain $[0,L_x]\times[0, L_y]$ by introducing a computation **grid** consisting of $N_x \times N_y$ equally spaced points.
The discrete grid coordinates for $i = 0, 1, \ldots, N_x$ and $j = 0, 1, \ldots, N_y$ are given by:
$$x_i = \frac{i L_x}{N_x}, \quad y_j = \frac{j L_y}{N_y}.$$
Note, that since the domain is periodic $x_0 = x_{N_x}$ and $y_0 = y_{N_y}$.
Then, any discrete function $u_{i,j} = u(x_i,y_j)$ can be transformed to the Fourier space using the Discrete Fourier Transform (DFT):
$$
\tilde{u}_{m,n} = \sum_{i = 0}^{N_x - 1}\sum_{j = 0}^{N_y - 1} u_{i, j}e^{
\mathbf{i}(\frac{2\pi m}{L_x}x_i + \frac{2\pi n}{L_y}y_j)},
$$
and its inverse transform is:
$$ u_{i,j} = \frac{1}{N_x N_y} \sum_{m = -\frac{N_x}{2}}^{\frac{N_x}{2} - 1}\sum_{n = -\frac{N_y}{2}}^{\frac{N_y}{2} - 1} \tilde{u}_{m, n}e^{\mathbf{i}(\frac{2\pi m}{L_x}x_i + \frac{2\pi n}{L_y}y_j)},$$
where $i$ and $j$ represent indices for the physical space (i.e. coordinates in the introduced grid), $m$ and $n$ are indices in the Fourier space (i.e. frequencies).
We also introduce wavenumbers:
$$k_x = \frac{2\pi m}{L_x}, \quad k_y = \frac{2 \pi n}{L_y}.$$
**Bonus question:** how DFT coefficients $\tilde{u}_{m,n}$ relate to coefficients in the truncated Fourier series of $u(x,y)$?
### Differentiation
In Fourier space we can easily perform differentiation with respect to $x$ and $y$. For example, the
first and the second order derivatives of any function $u$ in discrete
domain becomes:
$$ \left(\dfrac{\partial u}{\partial x}\right)_{i,j} = \frac{1}{N_x N_y}\sum_{m = -\frac{N_x}{2}}^{\frac{N_x}{2} - 1}\sum_{n = \frac{N_y}{2}}^{\frac{N_y}{2} - 1} \tilde{u}_{m, n} (\mathbf{i}k_x) e^{\mathbf{i}(k_x x_i + k_y y_j)}, $$
$$ \left(\dfrac{\partial^2 u}{\partial x^2}\right)_{i,j} = \frac{1}{N_x N_y}\sum_{m = -\frac{N_x}{2}}^{\frac{N_x}{2} - 1}\sum_{n = -\frac{N_y}{2}}^{\frac{N_y}{2} - 1} \tilde{u}_{m, n} (-k_x^2) e^{\mathbf{i}(k_x x_i + k_y y_j)}, $$
and similarly for the derivatives w.r.t. $y$
Assume $L_x = L_y = L = 2\pi$, $N_x = N_y = N$ for simplicity. Then, differentiation $\frac{\partial}{\partial x}$ in the Fourier space can be implemented as follows:
```python
k1d = np.fft.fftfreq(3) * 3
print(k1d)
```
[ 0. 1. -1.]
```python
import numpy as np
import matplotlib.pyplot as plt
```
```python
def dudx(u_tilde, N):
k1d = np.fft.fftfreq(N) * N
return u_tilde * (1j * k1d)
```
Note, we use ```np.fft.fftfreq(N)``` to determine the order of frequencies for certain ```numpy``` implementation (see the documentation of ```numpy.fft``` module for details).
Consider the following example:
```python
L = 2*np.pi # size of computational domain
d = 7
N = 2**d
```
```python
# discretize the domain $[0, 2\pi] \times [0, 2\pi]$ with uniform grid
ls = np.linspace(0, L, N, endpoint=False)
xx, yy = np.meshgrid(ls, ls, indexing='xy')
# define simple periodic function
u = np.sin(xx) * np.sin(yy)
# first, compute du/dx analytically
u_x = np.cos(xx) * np.sin(yy)
# next, compute du/dx in Fourier space
u_tilde = np.fft.ifft2(u)
u_tilde_x = dudx(u_tilde, N)
u_x_fourier = np.fft.ifft2(u_tilde_x)
# check the result
err = np.linalg.norm(u_x - u_x_fourier)
print("error = ", err)
```
error = 5.417463750901354e-13
- (5 pts) Similarly with the implementation of ```dudx(u_tilde, N)``` given above, your first task is to implement other derivatives arising in the Navier-Stokes equtions (1), (2). Loops are prohibited!
```python
def dudy(u_tilde, N):
k2d = np.fft.fftfreq(N) * N
return u_tilde * (-1j * k2d[:, np.newaxis])
def d2udx2(u_tilde, N):
k1d = np.fft.fftfreq(N) * N
return u_tilde * (-1 * k1d ** 2)
def d2udy2(u_tilde, N):
k2d = np.fft.fftfreq(N) * N
return u_tilde * (-1 * k2d[:, np.newaxis] ** 2)
```
Let's test the implemented functions using the same function, as we use for testing `dudx`
```python
# discretize the domain $[0, 2\pi] \times [0, 2\pi]$ with uniform grid
ls = np.linspace(0, L, N, endpoint=False)
xx, yy = np.meshgrid(ls, ls, indexing='xy')
# the function
u = np.sin(xx) * np.sin(yy)
# compute du/dy analytically
u_y = np.sin(xx) * np.cos(yy)
# compute d2u/dx2 analytically
u2_x2 = - u
# compute d2u/dy2 analytically
u2_y2 = - u
# u_tilde
u_tilde = np.fft.ifft2(u)
# compute the derivatives using fourier tecnique
u_y_fourier = np.fft.fft2(dudy(u_tilde, N))
u2_x2_fourier = np.fft.fft2(d2udx2(u_tilde, N))
u2_y2_fourier = np.fft.fft2(d2udy2(u_tilde, N))
# check the result
print("u_y error: ", np.linalg.norm(u_y - u_y_fourier))
print("u2_x2_error: ", np.linalg.norm(u2_x2 - u2_x2_fourier))
print("u2_y2_error: ", np.linalg.norm(u2_y2 - u2_y2_fourier))
```
u_y error: 5.678806548086086e-13
u2_x2_error: 2.679559280454021e-11
u2_y2_error: 2.684774733733834e-11
### Navier-Stokes equations in the Fourier space
After transforming Eq. (1) and Eq. (2) to the Fourier space, the governing equations become:
\begin{equation}\tag{3}
\frac{\partial \tilde{\omega}_{m,n}}{\partial t} = \frac{1}{Re}[(-k_x^2 - k_y^2)\tilde{\omega}_{m,n}] - \tilde{N},
\end{equation}
\begin{equation}\tag{4}
(-k_x^2 - k_y^2)\tilde{\psi}_{m,n} = -\tilde{\omega}_{m,n},
\end{equation}
where $\tilde{N}$ represents the non-linear term which is computed using 2D convolutions as follows:
$$\tilde{N} = (\mathbf{i}k_y \tilde{\psi}_{m,n}) \circ (\mathbf{i}k_x \tilde{\omega}_{m,n}) - (\mathbf{i}k_x \tilde{\psi}_{m,n}) \circ (\mathbf{i}k_y \tilde{\omega}_{m,n}),$$
i.e. multiplications in physical space become convolutions in the Fourier space.
To clarify where these convolutions come from, consider two discrete functions $u$ and $v$ represented by their DFT (1D for simplicity):
$$ u_{i} = \frac{1}{N_x} \sum_{m = -\frac{N_x}{2}}^{\frac{N_x}{2} - 1} \tilde{u}_{m}e^{\mathbf{i}\frac{2\pi m}{L_x}x_i},$$
$$ v_{i} = \frac{1}{N_x} \sum_{n = -\frac{N_x}{2}}^{\frac{N_x}{2} - 1}\tilde{v}_{n}e^{\mathbf{i}\frac{2\pi n}{L_x}x_i}.$$
Then, the direct multiplication results in:
$$ u_{i} v_{i} = \frac{1}{N_x} \sum_{k = -N_x}^{N_x - 2} \frac{1}{N_x}\tilde{w}_{k}e^{\mathbf{i}\frac{2\pi k}{L_x}x_i},$$
where the coefficients $\tilde{\omega}_k$ are computed as follows (check it!):
$$\tilde{w}_{k} = \sum_{m + n = k}\tilde{u}_m\tilde{v}_n.$$
Below we provide a possible implementation of 2D convolution using ```scipy.signal``` module. Note, that *full* convolution introduces higher frequinces that should be truncated in a proper way.
```python
from scipy import signal
def conv2d_scipy(u_tilde, v_tilde, N):
# np.fft.fftshift is used to align implementation and formulas
full_conv = signal.convolve(np.fft.fftshift(u_tilde),\
np.fft.fftshift(v_tilde), mode='full')
trunc_conv = full_conv[N//2:-N//2+1, N//2:-N//2+1]
return np.fft.ifftshift(trunc_conv)/(N*N)
```
(10 pts) Your second task is to implement the same 2D convolution but using the *Convolution Theorem* in this time.
Hint: From the lecture course you should know that applying *Convolution Theorem* is straightforward when computing **circular** (or periodic) convolutions. However, for this task you should use an appropriate zero-padding by a factor of two (with further truncation).
**Implementation** of `conv2d` is here:
```python
def pad_or_truncate(d_arr, s_arr, dim1, dim2):
lb1 = dim1//2
rb1 = -dim1//2
lb2 = dim2//2
rb2 = -dim2//2
d_arr[:lb1, :lb2] = s_arr[:lb1, :lb2]
d_arr[rb1:, rb2:] = s_arr[rb1:, rb2:]
d_arr[:lb1, rb2:] = s_arr[:lb1, rb2:]
d_arr[rb1:, :lb2] = s_arr[rb1:, :lb2]
return d_arr
def zeropad(arr):
dim1 = arr.shape[0]
dim2 = arr.shape[1]
p_arr = np.zeros((dim1 * 2 , dim2 * 2 ))
return pad_or_truncate(p_arr, arr, dim1, dim2)
def truncate(p_arr):
dim1 = p_arr.shape[0]
dim2 = p_arr.shape[1]
assert dim1 % 2 == 0 and dim2 % 2 == 0
dim1 = dim1//2
dim2 = dim2//2
arr = np.zeros((dim1, dim2))
return pad_or_truncate(arr, p_arr, dim1, dim2)
def conv2d(u_tilde, v_tilde, N):
p_u_tilde = zeropad(u_tilde)
p_v_tilde = zeropad(v_tilde)
p_u = np.fft.fft2(p_u_tilde)
p_v = np.fft.fft2(p_v_tilde)
p_mul = p_u * p_v
p_mul_tilde = np.fft.ifft2(p_mul)
mul_tilde = truncate(p_mul_tilde)
return mul_tilde / N**2
```
```python
# check yourself
u_tilde = np.random.rand(N, N)
v_tilde = np.random.rand(N, N)
err = np.linalg.norm(conv2d(u_tilde, v_tilde, N) - conv2d_scipy(u_tilde, v_tilde, N))
print("error =", err) # should be close to machine precision
```
error = 3.4790371266337506e-15
**Poisson solver**
Finally, we need to solve the Poisson equation Eq. (2) which can be easily computed in the Fourier space according to the Eq. (4).
(5 pts) Implement inverse of the laplacian operator according to the template provided below. Note: the laplacian operator with periodic boundary conditions is singular (since the constant function is in nullspace). So, in order to avoid division by zero:
1. Assume the problem is always consistent (i.e. $\tilde{\omega}_{0,0} = 0$),
2. Assume $\tilde{\psi}_{0,0} = 0$ (i.e. return normal solution). Loops are prohibited!
```python
def laplace_inverse(omega_tilde, N):
psi_tilde = None
return psi_tilde
```
```python
# check yourself
# consider simple solution
sol_analytic = np.sin(xx)*np.sin(yy)
# compute corresponding right hand side analytically
rhs = -2*np.sin(xx)*np.sin(yy)
# solve Poisson problem in Fourier space
rhs_tilde = np.fft.fft2(rhs)
sol_tilde = laplace_inverse(rhs_tilde, N)
sol = np.fft.ifft2(sol_tilde)
# check error is small
err = np.linalg.norm(sol - sol_analytic)
print("error =", err)
```
error = 1.8561658787461062e-14
**Time integration**
Eqs. (3) and (4) can be considered as semi-discrete ordinary differential equations (ODEs) obtained after (spectral) spatial discretization of the partial differential equations (1) and (2):
\begin{equation}\tag{5}
\frac{d \tilde{\omega}}{dt} = \mathcal{L}(\tilde{\omega}, \tilde{\psi}),
\end{equation}
where $\mathcal{L}( \tilde{\omega} , \tilde{\psi})$ is the discrete operator of spatial derivatives including non-linear convective terms, linear diffusive terms, and $\tilde{\psi}$ which is obtained from the Poisson equation (4).
(5 pts) Implement $\mathcal{L}$ according to the template provided below
```python
def L_op(omega_tilde, psi_tilde, N, Re=1):
pass
```
We integrate in time using fourth-order Runge–Kutta scheme that can be written in the following form:
$$\tilde{\omega}^{(1)} = \tilde{\omega}^{n} + \frac{\Delta t}{2}\mathcal{L}(\tilde{\omega}^{n}, \tilde{\psi}^{n})$$
$$\tilde{\omega}^{(2)} = \tilde{\omega}^{n} + \frac{\Delta t}{2}\mathcal{L}(\tilde{\omega}^{(1)}, \tilde{\psi}^{(1)})$$
$$\tilde{\omega}^{(3)} = \tilde{\omega}^{n} + \Delta t\mathcal{L}(\tilde{\omega}^{(2)}, \tilde{\psi}^{(2)})$$
$$\tilde{\omega}^{n+1} = \frac{1}{3}(-\tilde{\omega}^{n} + \tilde{\omega}^{(1)} + 2\tilde{\omega}^{(2)} + \tilde{\omega}^{(3)}) + \frac{\Delta t}{6}\mathcal{L}(\tilde{\omega}^{3}, \tilde{\psi}^{3})$$
```python
def integrate_runge_kutta(omega0_tilde, N, n_steps, tau, Re):
omega_prev = omega0_tilde
psi_prev = laplace_inverse(-omega_prev, N)
for step in range(n_steps):
if(step%100 == 0):
print(step)
omega_1 = omega_prev + (tau/2)*L_op(omega_prev, psi_prev, N, Re)
psi_1 = -laplace_inverse(omega_1, N)
omega_2 = omega_prev + (tau/2)*L_op(omega_1, psi_1, N, Re)
psi_2 = -laplace_inverse(omega_2, N)
omega_3 = omega_prev + tau*L_op(omega_2, psi_2, N, Re)
psi_3 = -laplace_inverse(omega_3, N)
omega_next = (1./3)*(-omega_prev + omega_1 + 2*omega_2 + omega_3) + (tau/6)*L_op(omega_3, psi_3, N, Re)
psi_next = -laplace_inverse(omega_next, N)
omega_prev = omega_next
psi_prev = psi_next
return omega_prev
```
### Validation with analytical solution
We first consider the Taylor-Green vortex (known analytical solution of the Navier-Stokes equations) to validate our solver:
```python
# Taylor-Green vortex -- analytical solution for validation purposes
def taylor_green_vortex(xx, yy, t, N, Re):
k = 3
omega = 2*k*np.cos(k*xx)*np.cos(k*yy)*np.exp(-2*k**2*t*(1/Re))
return omega
```
```python
Re = 1000
tau = 1e-2 # timestep
n_steps = 100
T = tau * n_steps # finial time
omega0 = taylor_green_vortex(xx, yy, 0, N, Re) # initial vorticity
omega0_tilde = np.fft.fft2(omega0) # convert to the Fourier space
omegaT_tilde = integrate_runge_kutta(omega0_tilde, N, n_steps, tau, Re) # integrate in time in the Fourier space
omegaT = np.real(np.fft.ifft2(omegaT_tilde)) # return back to physical space
```
0
```python
# check the error is small
omegaT_analytical = taylor_green_vortex(xx, yy, T, N, Re)
err = np.linalg.norm(omegaT_analytical - omegaT)
print("error =", err)
```
error = 2.3043898350926834e-12
### Shear layer problem
Finaly, we consider another (more interesting) initial vorticity that gives the dynamic from the GIF in the beginning of this problem.
```python
# intial condition that evolves like a vortex
def shear_layer0(xx, yy, N):
delta = 0.05
sigma = 15/np.pi
a = delta*np.cos(yy[:, :N//2]) - sigma*(np.cosh(sigma*(xx[:, :N//2] - np.pi/2)))**(-2)
b = delta*np.cos(yy[:, N//2:]) + sigma*(np.cosh(sigma*(3*np.pi/2 - xx[:, N//2:])))**(-2)
return np.concatenate((a, b), axis=1)
```
```python
Re = 10000
tau = 1e-3 # timestep
n_steps = 10000
T = tau * n_steps # finial time
omega0 = shear_layer0(xx, yy, N) # initial vorticity
omega0_tilde = np.fft.fft2(omega0) # convert to the Fourier space
omegaT_tilde = integrate_runge_kutta(omega0_tilde, N, n_steps, tau, Re) # integrate in time in the Fourier space
omegaT = np.real(np.fft.ifft2(omegaT_tilde)) # return back to physical space
```
0
100
200
300
400
500
600
700
800
900
1000
1100
1200
1300
1400
1500
1600
1700
1800
1900
2000
2100
2200
2300
2400
2500
2600
2700
2800
2900
3000
3100
3200
3300
3400
3500
3600
3700
3800
3900
4000
4100
4200
4300
4400
4500
4600
4700
4800
4900
5000
5100
5200
5300
5400
5500
5600
5700
5800
5900
6000
6100
6200
6300
6400
6500
6600
6700
6800
6900
7000
7100
7200
7300
7400
7500
7600
7700
7800
7900
8000
8100
8200
8300
8400
8500
8600
8700
8800
8900
9000
9100
9200
9300
9400
9500
9600
9700
9800
9900
```python
# plot the solution at the final timestamp
plt.imshow(np.real(np.fft.ifft2(omega_final)), cmap='jet')
```
|
module Impure.LFRef.Welltyped where
open import Prelude
open import Data.List hiding ([_])
open import Data.List.All hiding (map)
open import Data.Vec as Vec hiding ([_]; map)
open import Data.Star hiding (_▻▻_; map)
open import Data.Sum hiding (map)
open import Extensions.List as L using ()
open import Impure.LFRef.Syntax hiding (subst)
open import Relation.Binary.List.Pointwise using (Rel)
Ctx : (n : ℕ) → Set
Ctx n = Vec (Type n) n
-- store typings
World : Set
World = List (Type 0)
weaken₁-tp : ∀ {n} → Type n → Type (suc n)
weaken₁-tp tp = tp tp/ wk
_:+:_ : ∀ {n} → Type n → Ctx n → Ctx (suc n)
a :+: Γ = (weaken₁-tp a) ∷ (Vec.map (flip _tp/_ wk) Γ)
weaken+-tm : ∀ {m} n → Term m → Term (n + m)
weaken+-tm n t = t / (wk⋆ n)
weaken+-tp : ∀ n → Type 0 → Type n
weaken+-tp zero t = t
weaken+-tp (suc n) t = subst Type (+-right-identity (suc n)) (t tp/ (wk⋆ (suc n)))
weaken+-tele : ∀ {m n} k → Tele n m → Tele (n + k) m
weaken+-tele k T = subst (flip Tele _) (+-comm k _) (T tele/ (wk⋆ k))
-- mutually inductive welltypedness judgments for kinds/types and terms respectively
data _,_,_⊢_teleok : ∀ {n m} → (𝕊 : Sig) → World → Ctx n → Tele n m → Set
data _,_,_⊢_::_ : ∀ {n m} (𝕊 : Sig) → World → Ctx n → Type n → Tele n m → Set
data _,_,_⊢_∶_ : ∀ {n} (𝕊 : Sig) → World → Ctx n → Term n → Type n → Set
data _,_,_⊢_teleok where
ε : ∀ {n 𝕊 Σ} {Γ : Ctx n} → 𝕊 , Σ , Γ ⊢ ε teleok
_⟶_ : ∀ {n m 𝕊 Σ Γ} {A : Type n} {K : Tele (suc n) m}→
𝕊 , Σ , Γ ⊢ A :: ε →
𝕊 , Σ , (A :+: Γ) ⊢ K teleok →
𝕊 , Σ , Γ ⊢ (A ⟶ K) teleok
data _,_,_⊢_∶ⁿ_ {n} (𝕊 : Sig) (Σ : World) (Γ : Ctx n) :
∀ {m} → List (Term n) → Tele n m → Set where
ε : 𝕊 , Σ , Γ ⊢ [] ∶ⁿ ε
_⟶_ : ∀ {m A t ts} {B : Tele (suc n) m}→
𝕊 , Σ , Γ ⊢ t ∶ A →
𝕊 , Σ , Γ ⊢ ts ∶ⁿ (B tele/ (sub t)) →
𝕊 , Σ , Γ ⊢ (t ∷ ts) ∶ⁿ (A ⟶ B)
-- specialize the returntype from a constructor from it's welltyped arguments
_con[_/_] : ∀ {n} → (C : ConType) → (ts : List (Term n)) → length ts ≡ (ConType.m C) → Type n
_con[_/_] {n} C ts p =
(ConType.tp C) [
map
(flip _/_ (subst (Vec _) p (fromList ts)))
(ConType.indices C)
]
-- specialize the return type of a function from it's welltyped arguments
_fun[_/_] : ∀ {n m} → Type m → (ts : List (Term n)) → length ts ≡ m → Type n
_fun[_/_] {n} {m} a ts p = a tp/ subst (Vec _) p ((fromList ts))
data _,_,_⊢_::_ where
Ref : ∀ {n 𝕊 Σ} {Γ : Ctx n} {A} →
𝕊 , Σ , Γ ⊢ A :: ε →
----------------------
𝕊 , Σ , Γ ⊢ Ref A :: ε
Unit : ∀ {n 𝕊 Σ} {Γ : Ctx n} →
---------------------
𝕊 , Σ , Γ ⊢ Unit :: ε
_[_] : ∀ {n 𝕊 Σ} {Γ : Ctx n} {k K ts} →
(Sig.types 𝕊) L.[ k ]= K →
𝕊 , [] , [] ⊢ (proj₂ K) teleok →
𝕊 , Σ , Γ ⊢ ts ∶ⁿ (weaken+-tele n (proj₂ K)) →
-------------------------
𝕊 , Σ , Γ ⊢ k [ ts ] :: ε
data _,_,_⊢_∶_ where
unit : ∀ {n 𝕊 Σ} {Γ : Ctx n} →
-----------------------
𝕊 , Σ , Γ ⊢ unit ∶ Unit
var : ∀ {n 𝕊 Σ} {Γ : Ctx n} {i A} →
Γ [ i ]= A →
---------------------
𝕊 , Σ , Γ ⊢ var i ∶ A
con : ∀ {n 𝕊 Σ} {Γ : Ctx n} {c C ts} →
(Sig.constructors 𝕊) L.[ c ]= C →
(p : 𝕊 , Σ , Γ ⊢ ts ∶ⁿ weaken+-tele n (ConType.args C)) →
(q : length ts ≡ (ConType.m C)) →
------------------------------------
𝕊 , Σ , Γ ⊢ con c ts ∶ (C con[ ts / q ])
loc : ∀ {n 𝕊 Σ} {Γ : Ctx n} {i S} →
Σ L.[ i ]= S →
---------------------
𝕊 , Σ , Γ ⊢ loc i ∶ Ref (weaken+-tp n S)
data _,_,_⊢ₑ_∶_ : ∀ {n} (𝕊 : Sig) → World → Ctx n → Exp n → Type n → Set where
tm : ∀ {n t} {Γ : Ctx n} {𝕊 Σ A} →
𝕊 , Σ , Γ ⊢ t ∶ A →
---------------------
𝕊 , Σ , Γ ⊢ₑ tm t ∶ A
_·★[_]_ : ∀ {n fn ts 𝕊 Σ φ} {Γ : Ctx n} →
(Sig.funs 𝕊) L.[ fn ]= φ →
(q : length ts ≡ (Fun.m φ)) →
(p : 𝕊 , Σ , Γ ⊢ ts ∶ⁿ weaken+-tele n (Fun.args φ)) →
-----------------------------------------------------
𝕊 , Σ , Γ ⊢ₑ (fn ·★ ts) ∶ ((Fun.returntype φ) fun[ ts / q ])
ref : ∀ {n x A 𝕊 Σ} {Γ : Ctx n} →
𝕊 , Σ , Γ ⊢ₑ x ∶ A →
--------------------------
𝕊 , Σ , Γ ⊢ₑ ref x ∶ Ref A
!_ : ∀ {n x A} {Γ : Ctx n} {𝕊 Σ} →
𝕊 , Σ , Γ ⊢ₑ x ∶ Ref A →
----------------------
𝕊 , Σ , Γ ⊢ₑ (! x) ∶ A
_≔_ : ∀ {n i x A} {Γ : Ctx n} {𝕊 Σ} →
𝕊 , Σ , Γ ⊢ₑ i ∶ Ref A →
𝕊 , Σ , Γ ⊢ₑ x ∶ A →
--------------------------
𝕊 , Σ , Γ ⊢ₑ (i ≔ x) ∶ Unit
data _,_,_⊢ₛ_∶_ : ∀ {n} (𝕊 : Sig) → World → Ctx n → SeqExp n → Type n → Set where
ret : ∀ {n x A 𝕊 Σ} {Γ : Ctx n} →
𝕊 , Σ , Γ ⊢ₑ x ∶ A →
---------------------
𝕊 , Σ , Γ ⊢ₛ ret x ∶ A
lett : ∀ {n x c A B 𝕊 Σ} {Γ : Ctx n} →
𝕊 , Σ , Γ ⊢ₑ x ∶ A →
𝕊 , (Σ) , (A :+: Γ) ⊢ₛ c ∶ weaken₁-tp B →
---------------------------------------r
𝕊 , Σ , Γ ⊢ₛ lett x c ∶ B
-- telescopes as context transformers
_⊢⟦_⟧ : ∀ {n m} → Ctx n → Tele n m → Ctx (n + m)
Γ ⊢⟦ ε ⟧ = subst Ctx (sym $ +-right-identity _) Γ
_⊢⟦_⟧ {n} Γ (_⟶_ {m = m} x T) = subst Ctx (sym $ +-suc n m) ((x :+: Γ) ⊢⟦ T ⟧)
_⊢_fnOk : Sig → Fun → Set
_⊢_fnOk 𝕊 φ = 𝕊 , [] , ([] ⊢⟦ Fun.args φ ⟧) ⊢ₑ (Fun.body φ) ∶ (Fun.returntype φ)
-- valid signature contexts
record _,_⊢ok {n} (𝕊 : Sig) (Γ : Ctx n) : Set where
field
funs-ok : All (λ x → 𝕊 ⊢ x fnOk) (Sig.funs 𝕊)
-- store welltypedness relation
-- as a pointwise lifting of the welltyped relation on closed expressions between a world and a store
_,_⊢_ : Sig → World → Store → Set
_,_⊢_ 𝕊 Σ μ = Rel (λ A x → 𝕊 , Σ , [] ⊢ (proj₁ x) ∶ A) Σ μ
-- a useful lemma about telescoped terms
tele-fit-length : ∀ {n m 𝕊 Σ Γ ts} {T : Tele n m} → 𝕊 , Σ , Γ ⊢ ts ∶ⁿ T → length ts ≡ m
tele-fit-length ε = refl
tele-fit-length (x ⟶ p) with tele-fit-length p
tele-fit-length (x ⟶ p) | refl = refl
|
REBOL [
Title: "invoice-status"
Date: 12-Sep-2017
Name: "invoice-status.r"
Author: "Mike Yaunish"
File: %invoice-status.r
Version: 1.0
Purpose: {DB-Rider go script. For database:billing_complete and table:invoicestatus}
]
display-query-results {SELECT * FROM invoicestatus WHERE ID > '0' ORDER BY ID DESC}
|
(* Title: HOL/Auth/n_g2kAbsAfter_lemma_on_inv__92.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_g2kAbsAfter Protocol Case Study*}
theory n_g2kAbsAfter_lemma_on_inv__92 imports n_g2kAbsAfter_base
begin
section{*All lemmas on causal relation between inv__92 and some rule r*}
lemma n_n_RecvReq_i1Vsinv__92:
assumes a1: "(r=n_n_RecvReq_i1 )" and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Ident ''Chan3_1'') ''Cmd'')) (Const InvAck)) (eqn (IVar (Ident ''CurCmd'')) (Const Empty))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_SendInvAck_i1Vsinv__92:
assumes a1: "(r=n_n_SendInvAck_i1 )" and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Ident ''Chan2_1'') ''Cmd'')) (Const Inv)) (eqn (IVar (Ident ''CurCmd'')) (Const ReqS))) (eqn (IVar (Field (Ident ''AChan3_1'') ''Cmd'')) (Const InvAck))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_RecvInvAck_i1Vsinv__92:
assumes a1: "(r=n_n_RecvInvAck_i1 )" and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_SendGntS_i1Vsinv__92:
assumes a1: "(r=n_n_SendGntS_i1 )" and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_SendGntE_i1Vsinv__92:
assumes a1: "(r=n_n_SendGntE_i1 )" and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_ARecvReq_i1Vsinv__92:
assumes a1: "(r=n_n_ARecvReq_i1 )" and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Ident ''Chan3_1'') ''Cmd'')) (Const InvAck)) (eqn (IVar (Ident ''CurCmd'')) (Const Empty))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_ASendInvAck_i1Vsinv__92:
assumes a1: "(r=n_n_ASendInvAck_i1 )" and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Ident ''AChan2_1'') ''Cmd'')) (Const Inv)) (eqn (IVar (Ident ''CurCmd'')) (Const ReqS))) (eqn (IVar (Field (Ident ''Chan3_1'') ''Cmd'')) (Const InvAck))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_ARecvInvAck_i1Vsinv__92:
assumes a1: "(r=n_n_ARecvInvAck_i1 )" and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_ASendGntS_i1Vsinv__92:
assumes a1: "(r=n_n_ASendGntS_i1 )" and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_ASendGntE_i1Vsinv__92:
assumes a1: "(r=n_n_ASendGntE_i1 )" and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_n_SendInvS_i1Vsinv__92:
assumes a1: "r=n_n_SendInvS_i1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_SendReqEI_i1Vsinv__92:
assumes a1: "r=n_n_SendReqEI_i1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendReqEI_i1Vsinv__92:
assumes a1: "r=n_n_ASendReqEI_i1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendReqIS_j1Vsinv__92:
assumes a1: "r=n_n_ASendReqIS_j1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendReqES_i1Vsinv__92:
assumes a1: "r=n_n_ASendReqES_i1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ARecvGntE_i1Vsinv__92:
assumes a1: "r=n_n_ARecvGntE_i1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ARecvGntS_i1Vsinv__92:
assumes a1: "r=n_n_ARecvGntS_i1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendInvE_i1Vsinv__92:
assumes a1: "r=n_n_ASendInvE_i1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendInvS_i1Vsinv__92:
assumes a1: "r=n_n_ASendInvS_i1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_SendReqES_i1Vsinv__92:
assumes a1: "r=n_n_SendReqES_i1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_SendInvE_i1Vsinv__92:
assumes a1: "r=n_n_SendInvE_i1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_ASendReqSE_j1Vsinv__92:
assumes a1: "r=n_n_ASendReqSE_j1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_RecvGntS_i1Vsinv__92:
assumes a1: "r=n_n_RecvGntS_i1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_SendReqEE_i1Vsinv__92:
assumes a1: "r=n_n_SendReqEE_i1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_RecvGntE_i1Vsinv__92:
assumes a1: "r=n_n_RecvGntE_i1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_Store_i1Vsinv__92:
assumes a1: "\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d" and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_AStore_i1Vsinv__92:
assumes a1: "\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d" and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_n_SendReqS_j1Vsinv__92:
assumes a1: "r=n_n_SendReqS_j1 " and
a2: "(f=inv__92 )"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
Formal statement is: lemma measurable_translation_subtract: "S \<in> lmeasurable \<Longrightarrow> ((\<lambda>x. x - a) ` S) \<in> lmeasurable" Informal statement is: If $S$ is a Lebesgue measurable set, then the set $S - a$ is also Lebesgue measurable.
|
// Copyright Oliver Kowalke 2009.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_CONTEXT_SOURCE
#include <boost/context/stack_utils.hpp>
extern "C" {
#include <windows.h>
}
#include <cmath>
#include <csignal>
#include <boost/assert.hpp>
namespace {
static SYSTEM_INFO system_info_()
{
SYSTEM_INFO si;
::GetSystemInfo( & si);
return si;
}
static SYSTEM_INFO system_info()
{
static SYSTEM_INFO si = system_info_();
return si;
}
}
namespace boost {
namespace ctx {
BOOST_CONTEXT_DECL
std::size_t default_stacksize()
{
static std::size_t size = 256 * 1024;
return size;
}
BOOST_CONTEXT_DECL
std::size_t minimum_stacksize()
{
static std::size_t stacksize(
static_cast< std::size_t >( system_info().dwAllocationGranularity) );
return stacksize;
}
BOOST_CONTEXT_DECL
std::size_t maximum_stacksize()
{
BOOST_ASSERT( ! is_stack_unbound() );
static std::size_t stacksize = 8 * 1024 * 1024;
return stacksize;
}
// Windows seams not to provide a limit for the stacksize
BOOST_CONTEXT_DECL
bool is_stack_unbound()
{ return true; }
BOOST_CONTEXT_DECL
std::size_t pagesize()
{
static std::size_t pagesize(
static_cast< std::size_t >( system_info().dwPageSize) );
return pagesize;
}
BOOST_CONTEXT_DECL
std::size_t page_count( std::size_t stacksize)
{
return static_cast< std::size_t >(
std::ceil(
static_cast< float >( stacksize) / pagesize() ) );
}
}}
|
\chapter{Clustering}
\begin{multicols*}{2}
\noindent Clustering is the process of grouping a set of objets into classes of similar objects\\
\noindent Ideally, we want similarity of documents are measured based on semantic similarity. However, practically we use euclidean distance and cosine similarity to achieve term-statistical similarity.
\section{Partition Clustering: K-means Clustering}
\noindent Algorithm:
\begin{itemize}
\item Pick k number of seeds
\item Assign each data to the nearest cluster
\item Compute centroids for k cluster
\item Repeat till converged
\end{itemize}
\noindent The sum of squared distances from cluster centroid should decrease while repeating:
$$E=\sum_k \sum_{i \in C_k} (d_i - c_k)^2$$
\section{Hierarchical Agglomerative Clustering}
\noindent Start with points as individual clusters. At each step, merge the closest pair of clusters until one cluster left.
\noindent Algorithm:
\begin{itemize}
\item Compute the proximity matrix
\item Let each data point be a cluster
\item Repeat:
\begin{itemize}
\item Merge the two closest clusters
\item Update the proximity matrix
\end{itemize}
\item Until only a single cluster remains
\end{itemize}
\noindent Four way to update proximity matrix:
\begin{itemize}
\item Single-link: similarity of closest points
\item Complete-link: similarity of furthest points
\item Centroid: similarity of centroids
\item Average-link: Average cosine between pairs of points
\end{itemize}
\section{Hierarchical Divisive Clustering}
Start with one cluster. At each step, split a cluster until each cluster contains a point.
\section{Evaluation}
\noindent Good clustering means the intra-class similarity is high and inter-class similarity is low. \\
\noindent Purity: the ratio between the dominant class in the cluster and the size of the cluster
$$j\in C, \text{Purity}(\omega) = \frac{1}{n_i} \text{max}_j (n_{ij})$$
\subsection{Rand Index}
\begin{center}
\begin{tabular}{ |c|c c| }
\hline
Number of points & Same cluster & Different cluster \\
\hline
Same Truth & A & C \\
Different Truth & B & D \\
\hline
\end{tabular}
\end{center}
$$RI=\frac{A+D}{A+B+C+D}$$
\end{multicols*}
|
module Main
import Data.Vect
data Box = X | O | Empty
isEmpty : Box -> Bool
isEmpty Empty = True
isEmpty _ = False
Board : (size : Nat) -> Type
Board size = Vect size (Vect size Box)
newBoard : (size : Nat) -> Board size
newBoard size = replicate size (replicate size Empty)
markColumn : Box -> Vect x Box -> Fin x -> Maybe (Vect x Box)
markColumn mark (x :: xs) FZ = case x of
Empty => Just (mark :: xs)
_ => Nothing
markColumn mark (x :: xs) (FS y) = case markColumn mark xs y of
Nothing => Nothing
Just rest => Just (x :: rest)
markSquare : Vect x (Vect y Box) -> Box -> Fin x -> Fin y -> Maybe (Vect x (Vect y Box))
markSquare (x :: xs) mark FZ j = case markColumn mark x j of
Nothing => Nothing
Just new_x => Just (new_x :: xs)
markSquare (x :: xs) mark (FS i) j = case markSquare xs mark i j of
Nothing => Nothing
Just rest => Just (x :: rest)
checkVect : Vect x Box -> Maybe Box
checkVect [] = Just Empty
checkVect (x :: xs) = let rest = checkVect xs in
match x rest where
match : Box -> Maybe Box -> Maybe Box
match x Nothing = Nothing
match X (Just X) = Just X
match X (Just O) = Nothing
match X (Just Empty) = Just X
match O (Just X) = Nothing
match O (Just O) = Just O
match O (Just Empty) = Just O
match Empty (Just y) = Nothing
checkRows : Vect x (Vect y Box) -> Maybe Box
checkRows [] = Nothing
checkRows (x :: xs) = case checkVect x of
Just X => Just X
Just Y => Just Y
_ => checkRows xs
checkCols : Vect x (Vect y Box) -> Maybe Box
checkCols board = checkRows (transpose board)
checkDiags : Board size -> Maybe Box
checkDiags {size} board = case checkVect (diag board) of
Just a => Just a
Nothing => case checkVect (diag (reverse board)) of
Just a => Just a
Nothing => Nothing
findLegalMoves : Vect x (Vect y Box) -> Nat -> List (Nat, Nat)
findLegalMoves [] i = []
findLegalMoves (x :: xs) i = let hits = the (List Nat) (findIndices isEmpty (toList x)) in
(zip (replicate (length hits) i) hits) ++ (findLegalMoves xs (i+1))
checkDraw : Board size -> Bool
checkDraw board = (length (findLegalMoves board 0)) == 0
checkVictory : Board size -> Maybe Box
checkVictory b = case checkRows b of
Just X => Just X
Just O => Just O
Nothing => case checkCols b of
Just X => Just X
Just O => Just O
Nothing => case checkDiags b of
Just X => Just X
Just O => Just O
Nothing => case checkDraw b of
True => Just Empty
False => Nothing
readNumber : IO (Maybe Nat)
readNumber = do input <- getLine
if all isDigit (unpack input)
then pure (Just (cast input))
else pure Nothing
getMove : Board size -> IO (Fin size, Fin size)
getMove board {size} = do putStr "Enter row: "
Just row <- readNumber | Nothing => getMove board
let fRow = natToFin row size
putStr "Enter col: "
Just col <- readNumber | Nothing => getMove board
let fCol = natToFin col size
case finalizeMove fRow fCol of
Nothing => getMove board
Just move => pure move
where
finalizeMove : Maybe (Fin b) -> Maybe (Fin b) -> Maybe (Fin b, Fin b)
finalizeMove Nothing y = Nothing
finalizeMove (Just x) Nothing = Nothing
finalizeMove (Just x) (Just y) = Just (x, y)
formattedRow : Vect n String -> String
formattedRow [] = ""
formattedRow (x :: xs) = x ++ (formattedRow xs)
stringify : Box -> String
stringify X = "X"
stringify O = "O"
stringify Empty = " "
showBoard : Vect n (Vect m Box) -> String
showBoard [] = ""
showBoard (x :: xs) {m} = (formattedRow (intersperse "|" (map stringify x))) ++ "\n" ++
rest xs where
rest : Vect n' (Vect m Box) -> String
rest [] = showBoard xs
rest xs = (formattedRow (replicate (2 * m) "-")) ++ "\n" ++ (showBoard xs)
flipPlayer : Box -> Box
flipPlayer X = O
flipPlayer O = X
flipPlayer Empty = Empty
runGame : Board size -> Box -> IO ()
runGame [] player = putStrLn "weird board!"
runGame board player = do putStrLn (showBoard board)
putStrLn ((stringify player) ++ "'s turn: ")
move <- getMove board
case markSquare board player (fst move) (snd move) of
Nothing => runGame board player
Just new_board => case checkVictory new_board of
Nothing => runGame new_board (flipPlayer player)
Just Empty => putStrLn "Draw"
Just b => putStrLn ((stringify b) ++ " wins!")
main : IO ()
main = do runGame (newBoard 3) X
|
full_algorithm <- function(Kc, Kr, S0, S1, MW, wt, minimum_threshold) {
# Two-way Tanimoto Algorithm
# ===========================
# Parameters:
# Kc Integer, how many neighbors to select for consumers
# Kr Integer, how many neighbors to select for resources
# S0 A large set of species and their preys, with column structure ['taxon', 'taxonomy', 'resource', 'non-resource']
# S1 The subset of S0 where we want to predict new preys, string vector with taxa name
# MW Mimimum weight to accept a candidate as a prey
# Output
# A vector of sets (the preys for each species)
predictions <- matrix(nrow = length(S1), ncol = 3, data = "", dimnames = list(c(S1), c('consumer','resource_empirical','resource_predictions'))) # empty object for resource predictions
predictions[, 'consumer'] <- S1
pb <- txtProgressBar(min = 0,max = length(S1), style = 3)
for(i in 1:length(S1)) { # loop through each taxon in S1
candidates <- matrix(nrow = 0, ncol = 2, dimnames = list(c(), c('resource', 'weight')), data = NA) # empty matrix for resource candidate list for S1[i], with taxon name and weight
resources.S1 <- unlist(strsplit(S0[S1[i], 'resource'], " \\|\\ ")) # resources of S1[i]
# Add resources that are already listed as resources for S1[i] in predictions[, 'resource_empirical'] or
# Find similar resources to resources for S1[i] in S1
if(length(resources.S1) > 0) {
empirical <- character()
for(j in 1:length(resources.S1)) { #loop through empirical resources for S1
if(resources.S1[j] %in% S1) {
empirical <- c(empirical, resources.S1[j]) # observed resource found in S1 are automatically added to the column resource_empirical
} else { # selecting Kr most similar resources in S1
# Let's assume for this part that we are not compiling a different similarity measure for predators and preys.
similarity.resources <- similarity_full_algorithm(S0 = unique(S0[which(S0[, 'taxon'] %in% S1 | S0[, 'taxon'] == resources.S1[j]), ]), # S1 in S0 + resource for which similarity has to be measured
S1 = resources.S1[j], # resource for which similarity has to be measured
wt = wt,
taxa = 'resource')
similar.resource <- matrix(nrow = nrow(similarity.resources), ncol = 2, dimnames = list(c(), c('resource','similarity')), data = NA) # importing K nearest neighbors resources
similar.resource[, 'resource'] <- names(similarity.resources[order(similarity.resources, decreasing = TRUE), ])
similar.resource[, 'similarity'] <- similarity.resources[order(similarity.resources, decreasing = TRUE)]
to.remove <- which(similar.resource[,'resource'] == resources.S1[j])
if(length(to.remove) > 0){ #remove resoures.S1[j] in case it gets through (just keeping it consistant with other similarity evaluation further down in the catalogue, even though it is not necessary in this portion)
similar.resource <- similar.resource[-which(similar.resource[,'resource'] == resources.S1[j]), ]
}
# If multiple taxa with same similarity, randomly select those that will be used as similar resources.
if(similar.resource[Kr+1, 'similarity'] == similar.resource[Kr, 'similarity']) {
same.similarity <- which(similar.resource[, 'similarity'] == similar.resource[Kr, 'similarity'])
similar.resource[same.similarity, ] <- similar.resource[sample(same.similarity), ]
similar.resource <- similar.resource[1:Kr, ]
} else {
similar.resource <- similar.resource[1:Kr, ]
}# if for random draw
for(l in 1:Kr) { # extracting resource candidates
if(all.equal(similar.resource[, 'similarity'], rep('0',Kr)) == TRUE) { # if similarities all == 0, break
break
} else if(similar.resource[l, 'similarity'] == '0') { # if similarity l == 0, no candidates provided
NULL
# minimum threshold try.. adding it as a Parameters.. might not make sense, have to discuss it. If we keep it, previous else ifs can be removed
} else if(similar.resource[l, 'similarity'] < minimum_threshold) {
NULL
} else if((similar.resource[l, 'resource'] %in% candidates[, 'resource']) == TRUE) { # if candidate is already in candidate list, add resource' with wt to its weight
candidates[which(candidates[, 'resource'] == similar.resource[l]), 'weight'] <- as.numeric(candidates[which(candidates[, 'resource'] == similar.resource[l]), 'weight']) + as.numeric(similar.resource[l, 'similarity'])
} else {
candidates <- rbind(candidates, similar.resource[l, ]) # if candidate is not in the list, add it resource' with wt to its weight
}#if3
}#l
}#if
}#j
predictions[S1[i], 'resource_empirical'] <- paste(empirical, collapse = ' | ')
}#if1
# Identify similar consumers to S1[i]
similarity.consumers <- similarity_full_algorithm(S0 = S0,
S1 = S1[i],
wt = wt,
taxa = 'consumer')
similar.consumer <- matrix(nrow = nrow(similarity.consumers), ncol = 2, dimnames = list(c(), c('consumer','similarity')), data = NA) # importing K nearest neighbors consumers
similar.consumer[, 'consumer'] <- names(similarity.consumers[order(similarity.consumers, decreasing = TRUE), ])
similar.consumer[, 'similarity'] <- similarity.consumers[order(similarity.consumers, decreasing = TRUE)]
to.remove <- which(similar.consumer[,'consumer'] == S1[i])
if(length(to.remove) > 0){ #remove resoures.S1[j] in case it gets through (just keeping it consistant with other similarity evaluation further down in the catalogue, even though it is not necessary in this portion)
similar.consumer <- similar.consumer[-which(similar.consumer[,'consumer'] == S1[i]), ]
}
# If multiple taxa with same similarity, randomly select those that will be used as similar resources.
if(similar.consumer[Kc+1, 'similarity'] == similar.consumer[Kc, 'similarity']) {
same.similarity <- which(similar.consumer[, 'similarity'] == similar.consumer[Kc, 'similarity'])
similar.consumer[same.similarity, ] <- similar.consumer[sample(same.similarity), ]
similar.consumer <- similar.consumer[1:Kc, ]
} else {
similar.consumer <- similar.consumer[1:Kc, ]
}# if for random draw
# Est-ce que la valeur de similarité a de l'importance pour l'attribution des proies?
# If yes, we could add an argument call wt_predator.
# if(wt_predator == FALSE) {
# resources <- unique of all prey species of all similar predators
# } else {}
for(j in 1:Kc) { #loop through consumers
if(all.equal(similar.consumer[, 'similarity'], rep('0',Kc)) == TRUE) { # if similarities all == 0, break
break
} else if(similar.consumer[j, 'similarity'] == '0') { # if similarity l == 0, no candidates provided
NULL
} else if(similar.consumer[j, 'similarity'] < minimum_threshold) {
NULL
} else {
# It's possible that consumers in the list have high taxonomic similarity, but no recorded resource
candidate.resource <- unlist(strsplit(S0[similar.consumer[j, 'consumer'], 'resource'], " \\|\\ ")) # list of resources for consumer j
# candidate.resource <- candidate.resource[(candidate.resource %in% resources.S1) == FALSE] # substracting candidate resources that are already listed as resources for S1[i] and hence considered in the preceding code segment
for(k in 1:length(candidate.resource)) { # loop through resources of consumer j
if(length(candidate.resource) == 0) { # if candidate resource list is empty, break
break
} else if(candidate.resource[1] == "") { # if candidate list is an empty vector "", break
break
} else if(candidate.resource[k] == S1[i]) {
# #// FIXME: if candidate resource is taxon for which predictions are being made, break (unless we want to allow CANIBALISM). Add argument for cannibalism allowed or not
NULL
} else if((candidate.resource[k] %in% S1) == TRUE) {
if((candidate.resource[k] %in% candidates[, 'resource']) == TRUE) {# if candidate is already in candidate list, add 1 to its weight
candidates[which(candidates[, 'resource'] == candidate.resource[k]), 'weight'] <- as.numeric(candidates[which(candidates[, 'resource'] == candidate.resource[k]), 'weight']) + 1
} else {
candidates <- rbind(candidates, c(candidate.resource[k], 1)) # if candidate is not in the list, add it with 1 to its weight
}#if2
} else {
similarity.resources <- similarity_full_algorithm(S0 = unique(S0[which(S0[, 'taxon'] %in% S1 | S0[, 'taxon'] == candidate.resource[k]), ]), # S1 in S0 + resource for which similarity has to be measured
S1 = candidate.resource[k], # resource for which similarity has to be measured
wt = wt,
taxa = 'resource')
similar.resource <- matrix(nrow = nrow(similarity.resources), ncol = 2, dimnames = list(c(), c('resource','similarity')), data = NA) # importing K nearest neighbors resources
similar.resource[, 'resource'] <- names(similarity.resources[order(similarity.resources, decreasing = TRUE), ])
similar.resource[, 'similarity'] <- similarity.resources[order(similarity.resources, decreasing = TRUE)]
to.remove <- which(similar.resource[,'resource'] == candidate.resource[k])
if(length(to.remove) > 0){ #remove resoures.S1[j] in case it gets through (just keeping it consistant with other similarity evaluation further down in the catalogue, even though it is not necessary in this portion)
similar.resource <- similar.resource[-which(similar.resource[,'resource'] == candidate.resource[k]), ]
}
# If multiple taxa with same similarity, randomly select those that will be used as similar resources.
if(similar.resource[Kr+1, 'similarity'] == similar.resource[Kr, 'similarity']) {
same.similarity <- which(similar.resource[, 'similarity'] == similar.resource[Kr, 'similarity'])
similar.resource[same.similarity, ] <- similar.resource[sample(same.similarity), ]
similar.resource <- similar.resource[1:Kr, ]
} else {
similar.resource <- similar.resource[1:Kr, ]
}# if for random draw
for(l in 1:Kr) { # extracting resource candidates
if(all.equal(similar.resource[, 'similarity'], rep('0',Kr)) == TRUE) { # if similarities all == 0, break
break
} else if(similar.resource[l, 'similarity'] == '0') { # if similarity l == 0, no candidates provided
NULL
# minimum threshold try.. adding it as a Parameters.. might not make sense, have to discuss it. If we keep it, previous else ifs can be removed
} else if(similar.resource[l, 'similarity'] < minimum_threshold) {
NULL
} else if((similar.resource[l, 'resource'] %in% candidates[, 'resource']) == TRUE) { # if candidate is already in candidate list, add 1 to its weight
candidates[which(candidates[, 'resource'] == similar.resource[l]), 'weight'] <- as.numeric(candidates[which(candidates[, 'resource'] == similar.resource[l]), 'weight']) + as.numeric(similar.resource[l, 'similarity'])
} else {
candidates <- rbind(candidates, similar.resource[l, ]) # if candidate is not in the list, add it with its weight = similarity
}#if3
}#l
} #if1
}#k
}#if
}#j
candidates <- candidates[which(candidates[, 'weight'] >= MW), ] # remove candidates with a weight below MW
if(is.matrix(candidates) == TRUE) { #if it's a vector, there's only one predicted resource, no need to order
candidates[order(candidates[, 'weight']), ] # sorts candidates according to their weight
predictions[S1[i], 'resource_predictions'] <- paste(candidates[, 'resource'], collapse = ' | ')
} else {
predictions[S1[i], 'resource_predictions'] <- paste(candidates['resource'], collapse = ' | ')
}#if
setTxtProgressBar(pb, i)
}#i
close(pb)
return(predictions)
}#full algorithm function
|
lemma poly_eqI: "(\<And>n. coeff p n = coeff q n) \<Longrightarrow> p = q"
|
If $a \neq 0$, then the degree of the monomial $a x^n$ is $n$.
|
Require Import GHC.Base.
(* For use when skipping a list comprehension binding because of `skip constructor` *)
Definition nil_skipped {a} (_skipped : String) : list a := nil.
|
# ---
# title: 371. Sum of Two Integers
# id: problem371
# author: Indigo
# date: 2021-09-26
# difficulty: Medium
# categories: Bit Manipulation
# link: <https://leetcode.com/problems/sum-of-two-integers/description/>
# hidden: true
# ---
#
# Calculate the sum of two integers _a_ and _b_ , but you are **not allowed** to
# use the operator `+` and `-`.
#
# **Example 1:**
#
#
#
# Input: a = 1, b = 2
# Output: 3
#
#
# **Example 2:**
#
#
#
# Input: a = -2, b = 3
# Output: 1
#
#
#
## @lc code=start
using LeetCode
function get_sum371(a::Int, b::Int)::Int
while b != 0
carry = (a & b) << 1
a = a ⊻ b
b = carry
end
a
end
## @lc code=end
|
\documentclass[11pt]{article}
\usepackage{url}
\usepackage{pgfplots}
\pgfplotsset{compat=newest}
\setlength\topmargin{-0.6cm}
\setlength\textheight{23.4cm}
\setlength\textwidth{17.0cm}
\setlength\oddsidemargin{0cm}
\begin{document}
\title{Ling 572 Reading 2}
\author{Daniel Campos \tt {[email protected]}}
\date{02/19/2019}
\maketitle
\section{ Q1: What is a hyperplane? What does each axis in the feature space represent? }
A hyper plane is a subspace in a feature space that is used to separate data. The hyper plane take a valye that is one dimension less than its feature space in order to create a diferentiable plane. Each axis in the feature space represent a feature that is used in classification ranging from word occourences to pixel symetry.
\section{ Q2: What does SVM try to optimize?}
The SVM tries to optimize the width of the maximum margin hyperplane. In other words, the SVM is trying to make the biggest hyperplane that comes come closest to point withing the negative and possitive classification data.
\section{ Q3:What is a kernel? What’s the benefit of using kernels?}
A kernel is a method used in machine to transform data into a higher dimension that has a clear dividing margin between classes. While one can manually transform data into a higher dimensionality this can be computationally expensive and by use of kernels SVMs can reap all the benefits by doing a inner product computation.
\section{Q4: What is soft margin? Why do we need it?}
A soft margin is a modification to the SVM method allows to build the margin hyperplane which contain examples in the data. This method is used because it is common to have training data with noisy labels or data that may be non differentiable. Its a simple way to ignore a few points within a margin in order to build the best hyperplane.
\end{document}
|
Peter Cousins covers what we should know if we are to take on a cave survey for real.
Caving has entered a new phase as cavers look long and hard at the caves of the Himalayas.
Some cave rescue teams are considering the difficulties of rescuing an injured caver from some of the latest super-severe pots that are being opened up by exploration.
Digging on Yorkshire’s Gragareth has revealed a new and serious undertaking: The Mohole.
New rescue dumps have been placed in Little Neath River Cave.
Descent (19), November 1971 SECOND-HAND Collector's copy: price includes postage and takes condition and scarcity into account. See Description tab for details of condition and the Notes tab for general information about second-hand copies of Descent.
Descent (9), December 1969 SECOND-HAND Collector's copy: price includes postage and takes condition and scarcity into account. See Description tab for details of condition and the Notes tab for general information about second-hand copies of Descent.
Descent (26), January 1974 SECOND-HAND Collector's copy: price includes postage and takes condition and scarcity into account. See Description tab for details of condition and the Notes tab for general information about second-hand copies of Descent.
Descent (29), August 1974 SECOND-HAND Collector's copy: price includes postage and takes condition and scarcity into account. See Description tab for details of condition and the Notes tab for general information about second-hand copies of Descent.
Descent (16), January 1971 SECOND-HAND Collector's copy: price includes postage and takes condition and scarcity into account. See Description tab for details of condition and the Notes tab for general information about second-hand copies of Descent.
Descent (42), July 1979 SECOND-HAND Collector's copy: price includes postage and takes condition and scarcity into account. See Description tab for details of condition and the Notes tab for general information about second-hand copies of Descent.
Descent (41), March 1979 SECOND-HAND Collector's copy: price includes postage and takes condition and scarcity into account. See Description tab for details of condition and the Notes tab for general information about second-hand copies of Descent.
Descent (33), April 1976 SECOND-HAND Collector's copy: price includes postage and takes condition and scarcity into account. See Description tab for details of condition and the Notes tab for general information about second-hand copies of Descent.
|
\section{Conclusion and outlook}
|
(* Author: Tobias Nipkow, Dmitriy Traytel *)
section \<open>Framework Instantiations using Marked Regular Expressions\<close>
(*<*)
theory Position_Autos
imports
Automaton
begin
(*>*)
subsection \<open>Marked Regular Expressions\<close>
type_synonym 'a mrexp = "(bool * 'a) rexp"
abbreviation "strip \<equiv> map_rexp snd"
primrec mrexps :: "'a rexp \<Rightarrow> ('a mrexp) set" where
"mrexps Zero = {Zero}"
| "mrexps One = {One}"
| "mrexps (Atom a) = {Atom (True, a), Atom (False, a)}"
| "mrexps (Plus r s) = case_prod Plus ` (mrexps r \<times> mrexps s)"
| "mrexps (Times r s) = case_prod Times ` (mrexps r \<times> mrexps s)"
| "mrexps (Star r) = Star ` mrexps r"
lemma finite_mrexps[simp]: "finite (mrexps r)"
by (induction r) auto
lemma strip_mrexps: "strip ` mrexps r = {r}"
by (induction r) (auto simp: set_eq_subset subset_iff image_iff)
fun Lm :: "'a mrexp \<Rightarrow> 'a lang" where
"Lm Zero = {}" |
"Lm One = {}" |
"Lm (Atom(m,a)) = (if m then {[a]} else {})" |
"Lm (Plus r s) = Lm r \<union> Lm s" |
"Lm (Times r s) = Lm r @@ lang(strip s) \<union> Lm s" |
"Lm (Star r) = Lm r @@ star(lang(strip r))"
fun final :: "'a mrexp \<Rightarrow> bool" where
"final Zero = False" |
"final One = False" |
"final (Atom(m,a)) = m" |
"final (Plus r s) = (final r \<or> final s)" |
"final (Times r s) = (final s \<or> nullable s \<and> final r)" |
"final (Star r) = final r"
abbreviation read :: "'a \<Rightarrow> 'a mrexp \<Rightarrow> 'a mrexp" where
"read a \<equiv> map_rexp (\<lambda>(m,x). (m \<and> a=x, x))"
lemma read_mrexps[simp]: "r \<in> mrexps s \<Longrightarrow> read a r \<in> mrexps s"
by (induction s arbitrary: a r) (auto simp: image_iff)
fun follow :: "bool \<Rightarrow> 'a mrexp \<Rightarrow> 'a mrexp" where
"follow m Zero = Zero" |
"follow m One = One" |
"follow m (Atom(_,a)) = Atom(m,a)" |
"follow m (Plus r s) = Plus (follow m r) (follow m s)" |
"follow m (Times r s) =
Times (follow m r) (follow (final r \<or> m \<and> nullable r) s)" |
"follow m (Star r) = Star(follow (final r \<or> m) r)"
lemma follow_mrexps[simp]: "r \<in> mrexps s \<Longrightarrow> follow b r \<in> mrexps s"
by (induction s arbitrary: b r) (auto simp: image_iff)
lemma strip_read[simp]: "strip (read a r) = strip r"
by (simp add: map_map_rexp split_def)
lemma Nil_notin_Lm[simp]: "[] \<notin> Lm r"
by (induction r) (auto split: if_splits)
lemma Nil_in_lang_strip[simp]: "[] \<in> lang(r) \<longleftrightarrow> [] \<in> lang(strip r)"
by (induction r) auto
lemma strip_follow[simp]: "strip(follow m r) = strip r"
by (induction r arbitrary: m) (auto split: if_splits)
lemma conc_lemma: "[] \<notin> A \<Longrightarrow> {w : A @@ B. w \<noteq> [] \<and> P(hd w)} = {w : A. w \<noteq> [] \<and> P(hd w)} @@ B"
unfolding conc_def by auto (metis hd_append2)+
lemma Lm_read: "Lm (read a r) = {w : Lm r. w \<noteq> [] \<and> hd w = a}"
proof (induction r)
case (Times r1 r2) thus ?case
using conc_lemma[OF Nil_notin_Lm, where P = "\<lambda>x. x=a" and r1 = r1] by auto
next
case Star thus ?case using conc_lemma[OF Nil_notin_Lm, where P = "\<lambda>x. x=a"] by simp
qed (auto split: if_splits)
lemma tl_conc[simp]: "[] \<notin> A \<Longrightarrow>tl ` (A @@ B) = tl ` A @@ B"
by (fastforce simp: image_def Bex_def tl_append split: list.split)
lemma Nil_in_tl_Lm_if_final[simp]: "final r \<Longrightarrow> [] : tl ` Lm r"
by (induction r) (auto simp: nullable_iff image_Un)
lemma Nil_notin_tl_if_not_final: "\<not> final r \<Longrightarrow> [] \<notin> tl ` Lm r"
by (induction r) (auto simp: nullable_iff Nil_tl singleton_in_conc intro!: image_eqI[rotated])
lemma Lm_follow: "Lm (follow m r) = tl ` Lm r \<union> (if m then lang(strip r) else {}) - {[]}"
proof (induction r arbitrary: m)
case (Atom mb) thus ?case by (cases mb) auto
next
case (Times r s) thus ?case
by (simp add: Un_Diff image_Un conc_Un_distrib nullable_iff
conc_Diff_if_Nil1 Nil_notin_tl_if_not_final Un_ac)
next
case (Star r) thus ?case
by (simp add: Un_Diff conc_Un_distrib
conc_Diff_if_Nil1 Nil_notin_tl_if_not_final star_Diff_Nil_fold)
qed auto
subsection \<open>Mark Before Atom\<close>
text\<open>Position automaton where mark is placed before atoms.\<close>
abbreviation "empty_mrexp \<equiv> map_rexp (\<lambda>a. (False,a))"
lemma empty_mrexp_mrexps[simp]: "empty_mrexp r \<in> mrexps r"
by (induction r) auto
lemma nullable_empty_mrexp[simp]: "nullable (empty_mrexp r) = nullable r"
by (induct r) auto
definition "init_b r = (follow True (empty_mrexp r), nullable r)"
lemma init_b_mrexps[simp]: "init_b r \<in> mrexps r \<times> UNIV"
unfolding init_b_def by auto
fun delta_b where
"delta_b a (r,b) = (let r' = read a r in (follow False r', final r'))"
lemma delta_b_mrexps[simp]: "rb \<in> mrexps r \<times> UNIV \<Longrightarrow> delta_b a rb \<in> mrexps r \<times> UNIV"
by (auto simp: Let_def)
lemma fold_delta_b_init_b_mrexps[simp]: "fold delta_b w (init_b s) \<in> mrexps s \<times> UNIV"
by (induction w arbitrary: s rule: rev_induct) auto
fun L_b where
"L_b (r,b) = Lm r \<union> (if b then {[]} else {})"
abbreviation "final_b \<equiv> snd"
lemma Lm_empty: "Lm (empty_mrexp r) = {}"
by (induction r) auto
lemma final_read_Lm: "final(read a r) \<longleftrightarrow> [a] \<in> Lm r"
by (induction r) (auto simp: nullable_iff concI_if_Nil2 singleton_in_conc split: if_splits)
global_interpretation before: rexp_DFA init_b delta_b final_b L_b
defines before_closure = before.closure
and check_eqv_b = before.check_eqv
and reachable_b = before.reachable
and automaton_b = before.automaton
and match_b = before.match
proof (standard, goal_cases)
case (1 r) show "L_b (init_b r) = lang r"
by(auto simp add: init_b_def Lm_follow Lm_empty map_map_rexp nullable_iff)
next
case (2 a rb) show "L_b (delta_b a rb) = Deriv a (L_b rb)"
by (cases rb) (auto simp add: Deriv_def final_read_Lm image_def Lm_read Lm_follow)
next
case (3 rb) show "final_b rb \<longleftrightarrow> [] \<in> L_b rb" by (cases rb) simp
next
case (4 s)
have "{fold delta_b w (init_b s) |w. True} \<subseteq> mrexps s \<times> UNIV"
by (intro subsetI, elim CollectE exE) (simp only: fold_delta_b_init_b_mrexps)
then show "finite {fold delta_b w (init_b s) |w. True}" by (rule finite_subset) simp
qed
subsection \<open>Mark After Atom\<close>
text\<open>Position automaton where mark is placed after atoms. This is the
Glushkov and McNaughton/Yamada construction.\<close>
definition "init_a r = (True, empty_mrexp r)"
lemma init_a_mrexps[simp]: "init_a r \<in> UNIV \<times> mrexps r"
unfolding init_a_def by auto
fun delta_a where
"delta_a a (b,r) = (False, read a (follow b r))"
lemma delta_a_mrexps[simp]: "br \<in> UNIV \<times> mrexps r \<Longrightarrow> delta_a a br \<in> UNIV \<times> mrexps r"
by auto
lemma fold_delta_a_init_a_mrexps[simp]: "fold delta_a w (init_a s) \<in> UNIV \<times> mrexps s"
by (induction w arbitrary: s rule: rev_induct) auto
fun final_a where
"final_a (b,r) \<longleftrightarrow> final r \<or> b \<and> nullable r"
fun L_a where
"L_a (b,r) = Lm (follow b r) \<union> (if final_a(b,r) then {[]} else {})"
lemma nonfinal_empty_mrexp: "\<not> final (empty_mrexp r)"
by (induction r) auto
lemma Cons_eq_tl_iff[simp]: "x # xs = tl ys \<longleftrightarrow> (\<exists>y. ys = y # x # xs)"
by (cases ys) auto
lemma tl_eq_Cons_iff[simp]: "tl ys = x # xs \<longleftrightarrow> (\<exists>y. ys = y # x # xs)"
by (cases ys) auto
global_interpretation after: rexp_DFA init_a delta_a final_a L_a
defines after_closure = after.closure
and check_eqv_a = after.check_eqv
and reachable_a = after.reachable
and automaton_a = after.automaton
and match_a = after.match
proof (standard, goal_cases)
case (1 r) show "L_a (init_a r) = lang r"
by (auto simp: init_a_def nonfinal_empty_mrexp Lm_follow Lm_empty map_map_rexp nullable_iff)
next
case (2 a br) show "L_a (delta_a a br) = Deriv a (L_a br)"
by (cases br) (simp add: Deriv_def final_read_Lm Lm_read Lm_follow,
fastforce simp: image_def neq_Nil_conv)
next
case (3 br) show "final_a br \<longleftrightarrow> [] \<in> L_a br" by (cases br) simp
next
case (4 s)
have "{fold delta_a w (init_a s) |w. True} \<subseteq> UNIV \<times> mrexps s"
by (intro subsetI, elim CollectE exE) (simp only: fold_delta_a_init_a_mrexps)
then show "finite {fold delta_a w (init_a s) |w. True}" by (rule finite_subset) simp
qed
text \<open>
The ``before'' atomaton is a quotient of the ``after'' automaton.
The proof below follows an informal proof given by Helmut Seidl in personal communication.
\<close>
fun hom_ab where
"hom_ab (b, r) = (follow b r, final_a (b, r))"
lemma hom_delta: "hom_ab (delta_a x br) = delta_b x (hom_ab br)"
by(cases br) (auto simp add: Let_def)
lemma hom_deltas: "hom_ab (fold delta_a w br) = fold delta_b w (hom_ab br)"
by (induct w arbitrary: br) (auto simp add: hom_delta)
lemma hom_init: "hom_ab (init_a r) = init_b r"
unfolding init_a_def init_b_def hom_ab.simps by (simp add: nonfinal_empty_mrexp)
lemma reachable_ab: "reachable_b as r = hom_ab ` reachable_a as r"
unfolding after.reachable before.reachable by (force simp: hom_init hom_deltas)
theorem card_reachable_ab: "card (reachable_b as r) \<le> card (reachable_a as r)"
unfolding reachable_ab using after.finite_reachable by (rule card_image_le)
text\<open>The implementation by Fischer et al.:\<close>
(* better: shift b m r and move m b r *)
fun shift :: "bool \<Rightarrow> 'a mrexp \<Rightarrow> 'a \<Rightarrow> 'a mrexp" where
"shift _ One _ = One" |
"shift _ Zero _ = Zero" |
"shift m (Atom (_,x)) c = Atom (m \<and> (x=c),x)" |
"shift m (Plus r s) c = Plus (shift m r c) (shift m s c)" |
"shift m (Times r s) c =
Times (shift m r c) (shift (final r \<or> m \<and> nullable r) s c)" |
"shift m (Star r) c = Star (shift (final r \<or> m) r c)"
lemma shift_read_follow: "shift m r x = read x (follow m r)"
by (induction m r x rule: shift.induct) auto
text\<open>In the spirit of Asperti, and similarly quadratic because of need
to call final1 in move.\<close>
fun final1 :: "'a mrexp \<Rightarrow> 'a \<Rightarrow> bool" where
"final1 Zero _ = False" |
"final1 One _ = False" |
"final1 (Atom(m,a)) x = (m \<and> a=x)" |
"final1 (Plus r s) x = (final1 r x \<or> final1 s x)" |
"final1 (Times r s) x = (final1 s x \<or> nullable s \<and> final1 r x)" |
"final1 (Star r) x = final1 r x"
fun move :: "'a \<Rightarrow> 'a mrexp \<Rightarrow> bool \<Rightarrow> 'a mrexp" where
"move _ One _ = One" |
"move _ Zero _ = Zero" |
"move c (Atom (_,a)) m = Atom (m, a)" |
"move c (Plus r s) m = Plus (move c r m) (move c s m)" |
"move c (Times r s) m =
Times (move c r m) (move c s (final1 r c \<or> m \<and> nullable r))" |
"move c (Star r) m = Star (move c r (final1 r c \<or> m))"
lemma nullable_read[simp]: "nullable (read c r) = nullable r"
by (induction r) auto
lemma final_read_final1: "final (read c r) = final1 r c"
by (induction r) auto
lemma move_follow_read: "move c r m = follow m (read c r)"
by (induction c r m rule: move.induct) (auto simp: final_read_final1)
(*<*)
end
(*>*)
|
module WebAssembly.Validation.Modules.ElementSegments
import WebAssembly.Structure
import WebAssembly.Validation.Conventions
import WebAssembly.Validation.Instructions
import WebAssembly.Validation.Types
||| [🔗 Spec](https://webassembly.github.io/spec/core/valid/modules.html#element-segments)
|||
||| ```
||| C.tablex[x] = limits funcref C ⊢ expr : [i32] C ⊢ expr const (C.funcs[y] = functype)*
||| ----------------------------------------------------------------------------------------------
||| C ⊢ {table x, offset expr, init y*} ok
||| ```
public export
data ValidElem : Context -> Elem -> Type where
MkValidElem : (c : Context)
-> (x : TableIdx)
-> (expr : Expr)
-> (ys : List FuncIdx)
-> {auto in_bounds: InBounds x (tables c)}
-> ValidSequence c expr ([] ->> [TI32])
-> ConstExpr c expr
-> (InBounds y ys -> InBounds y (funcs c))
-> ValidElem c (MkElem x expr ys)
|
/-
Copyright (c) 2017 Kevin Buzzard. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kevin Buzzard, Mario Carneiro
! This file was ported from Lean 3 source module data.complex.basic
! leanprover-community/mathlib commit 92ca63f0fb391a9ca5f22d2409a6080e786d99f7
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Data.Real.Sqrt
/-!
# The complex numbers
The complex numbers are modelled as ℝ^2 in the obvious way and it is shown that they form a field
of characteristic zero. The result that the complex numbers are algebraically closed, see
`FieldTheory.AlgebraicClosure`.
-/
open BigOperators
open Set Function
/-! ### Definition and basic arithmmetic -/
/-- Complex numbers consist of two `Real`s: a real part `re` and an imaginary part `im`. -/
structure Complex : Type where
re : ℝ
im : ℝ
#align complex Complex
notation "ℂ" => Complex
namespace Complex
open ComplexConjugate
noncomputable instance : DecidableEq ℂ :=
Classical.decEq _
/-- The equivalence between the complex numbers and `ℝ × ℝ`. -/
@[simps apply]
def equivRealProd : ℂ ≃ ℝ × ℝ where
toFun z := ⟨z.re, z.im⟩
invFun p := ⟨p.1, p.2⟩
left_inv := fun ⟨_, _⟩ => rfl
right_inv := fun ⟨_, _⟩ => rfl
#align complex.equiv_real_prod Complex.equivRealProd
@[simp]
theorem eta : ∀ z : ℂ, Complex.mk z.re z.im = z
| ⟨_, _⟩ => rfl
#align complex.eta Complex.eta
@[ext]
theorem ext : ∀ {z w : ℂ}, z.re = w.re → z.im = w.im → z = w
| ⟨_, _⟩, ⟨_, _⟩, rfl, rfl => rfl
#align complex.ext Complex.ext
theorem ext_iff {z w : ℂ} : z = w ↔ z.re = w.re ∧ z.im = w.im :=
⟨fun H => by simp [H], fun h => ext h.1 h.2⟩
#align complex.ext_iff Complex.ext_iff
theorem re_surjective : Surjective re := fun x => ⟨⟨x, 0⟩, rfl⟩
#align complex.re_surjective Complex.re_surjective
theorem im_surjective : Surjective im := fun y => ⟨⟨0, y⟩, rfl⟩
#align complex.im_surjective Complex.im_surjective
@[simp]
theorem range_re : range re = univ :=
re_surjective.range_eq
#align complex.range_re Complex.range_re
@[simp]
theorem range_im : range im = univ :=
im_surjective.range_eq
#align complex.range_im Complex.range_im
-- Porting note: refactored instance to allow `norm_cast` to work
/-- The natural inclusion of the real numbers into the complex numbers.
The name `Complex.ofReal` is reserved for the bundled homomorphism. -/
@[coe]
def ofReal' (r : ℝ) : ℂ :=
⟨r, 0⟩
instance : Coe ℝ ℂ :=
⟨ofReal'⟩
/- Porting note: `simp` attribute removed as this has a variable as head symbol of
the left-hand side (after whnfR)-/
@[norm_cast]
theorem ofReal_re (r : ℝ) : Complex.re (r : ℂ) = r :=
rfl
#align complex.of_real_re Complex.ofReal_re
@[simp, norm_cast]
theorem ofReal_im (r : ℝ) : (r : ℂ).im = 0 :=
rfl
#align complex.of_real_im Complex.ofReal_im
theorem ofReal_def (r : ℝ) : (r : ℂ) = ⟨r, 0⟩ :=
rfl
#align complex.of_real_def Complex.ofReal_def
@[simp, norm_cast]
theorem ofReal_inj {z w : ℝ} : (z : ℂ) = w ↔ z = w :=
⟨congrArg re, by apply congrArg⟩
#align complex.of_real_inj Complex.ofReal_inj
-- Porting note: made coercion explicit
theorem ofReal_injective : Function.Injective ((↑) : ℝ → ℂ) := fun _ _ => congrArg re
#align complex.of_real_injective Complex.ofReal_injective
-- Porting note: made coercion explicit
instance canLift : CanLift ℂ ℝ (↑) fun z => z.im = 0 where
prf z hz := ⟨z.re, ext rfl hz.symm⟩
#align complex.can_lift Complex.canLift
/-- The product of a set on the real axis and a set on the imaginary axis of the complex plane,
denoted by `s ×ℂ t`. -/
def Set.reProdIm (s t : Set ℝ) : Set ℂ :=
re ⁻¹' s ∩ im ⁻¹' t
#align set.re_prod_im Complex.Set.reProdIm
infixl:72 " ×ℂ " => Set.reProdIm
theorem mem_reProdIm {z : ℂ} {s t : Set ℝ} : z ∈ s ×ℂ t ↔ z.re ∈ s ∧ z.im ∈ t :=
Iff.rfl
#align complex.mem_re_prod_im Complex.mem_reProdIm
instance : Zero ℂ :=
⟨(0 : ℝ)⟩
instance : Inhabited ℂ :=
⟨0⟩
@[simp]
theorem zero_re : (0 : ℂ).re = 0 :=
rfl
#align complex.zero_re Complex.zero_re
@[simp]
theorem zero_im : (0 : ℂ).im = 0 :=
rfl
#align complex.zero_im Complex.zero_im
@[simp, norm_cast]
theorem ofReal_zero : ((0 : ℝ) : ℂ) = 0 :=
rfl
#align complex.of_real_zero Complex.ofReal_zero
@[simp]
theorem ofReal_eq_zero {z : ℝ} : (z : ℂ) = 0 ↔ z = 0 :=
ofReal_inj
#align complex.of_real_eq_zero Complex.ofReal_eq_zero
theorem ofReal_ne_zero {z : ℝ} : (z : ℂ) ≠ 0 ↔ z ≠ 0 :=
not_congr ofReal_eq_zero
#align complex.of_real_ne_zero Complex.ofReal_ne_zero
instance : One ℂ :=
⟨(1 : ℝ)⟩
@[simp]
theorem one_re : (1 : ℂ).re = 1 :=
rfl
#align complex.one_re Complex.one_re
@[simp]
theorem one_im : (1 : ℂ).im = 0 :=
rfl
#align complex.one_im Complex.one_im
@[simp, norm_cast]
theorem ofReal_one : ((1 : ℝ) : ℂ) = 1 :=
rfl
#align complex.of_real_one Complex.ofReal_one
@[simp]
theorem ofReal_eq_one {z : ℝ} : (z : ℂ) = 1 ↔ z = 1 :=
ofReal_inj
#align complex.of_real_eq_one Complex.ofReal_eq_one
theorem ofReal_ne_one {z : ℝ} : (z : ℂ) ≠ 1 ↔ z ≠ 1 :=
not_congr ofReal_eq_one
#align complex.of_real_ne_one Complex.ofReal_ne_one
instance : Add ℂ :=
⟨fun z w => ⟨z.re + w.re, z.im + w.im⟩⟩
@[simp]
theorem add_re (z w : ℂ) : (z + w).re = z.re + w.re :=
rfl
#align complex.add_re Complex.add_re
@[simp]
theorem add_im (z w : ℂ) : (z + w).im = z.im + w.im :=
rfl
#align complex.add_im Complex.add_im
section
set_option linter.deprecated false
@[simp]
theorem bit0_re (z : ℂ) : (bit0 z).re = bit0 z.re :=
rfl
#align complex.bit0_re Complex.bit0_re
@[simp]
theorem bit1_re (z : ℂ) : (bit1 z).re = bit1 z.re :=
rfl
#align complex.bit1_re Complex.bit1_re
@[simp]
theorem bit0_im (z : ℂ) : (bit0 z).im = bit0 z.im :=
Eq.refl _
#align complex.bit0_im Complex.bit0_im
@[simp]
theorem bit1_im (z : ℂ) : (bit1 z).im = bit0 z.im :=
add_zero _
#align complex.bit1_im Complex.bit1_im
@[simp, norm_cast]
theorem ofReal_add (r s : ℝ) : ((r + s : ℝ) : ℂ) = r + s :=
ext_iff.2 <| by simp [ofReal']
#align complex.of_real_add Complex.ofReal_add
@[simp, norm_cast]
theorem ofReal_bit0 (r : ℝ) : ((bit0 r : ℝ) : ℂ) = bit0 (r : ℂ) :=
ext_iff.2 <| by simp [bit0]
#align complex.of_real_bit0 Complex.ofReal_bit0
@[simp, norm_cast]
theorem ofReal_bit1 (r : ℝ) : ((bit1 r : ℝ) : ℂ) = bit1 (r : ℂ) :=
ext_iff.2 <| by simp [bit1]
#align complex.of_real_bit1 Complex.ofReal_bit1
end
instance : Neg ℂ :=
⟨fun z => ⟨-z.re, -z.im⟩⟩
@[simp]
theorem neg_re (z : ℂ) : (-z).re = -z.re :=
rfl
#align complex.neg_re Complex.neg_re
@[simp]
theorem neg_im (z : ℂ) : (-z).im = -z.im :=
rfl
#align complex.neg_im Complex.neg_im
@[simp, norm_cast]
theorem ofReal_neg (r : ℝ) : ((-r : ℝ) : ℂ) = -r :=
ext_iff.2 <| by simp [ofReal']
#align complex.of_real_neg Complex.ofReal_neg
instance : Sub ℂ :=
⟨fun z w => ⟨z.re - w.re, z.im - w.im⟩⟩
instance : Mul ℂ :=
⟨fun z w => ⟨z.re * w.re - z.im * w.im, z.re * w.im + z.im * w.re⟩⟩
@[simp]
theorem mul_re (z w : ℂ) : (z * w).re = z.re * w.re - z.im * w.im :=
rfl
#align complex.mul_re Complex.mul_re
@[simp]
theorem mul_im (z w : ℂ) : (z * w).im = z.re * w.im + z.im * w.re :=
rfl
#align complex.mul_im Complex.mul_im
@[simp, norm_cast]
theorem ofReal_mul (r s : ℝ) : ((r * s : ℝ) : ℂ) = r * s :=
ext_iff.2 <| by simp [ofReal']
#align complex.of_real_mul Complex.ofReal_mul
theorem ofReal_mul_re (r : ℝ) (z : ℂ) : (↑r * z).re = r * z.re := by simp [ofReal']
#align complex.of_real_mul_re Complex.ofReal_mul_re
theorem ofReal_mul_im (r : ℝ) (z : ℂ) : (↑r * z).im = r * z.im := by simp [ofReal']
#align complex.of_real_mul_im Complex.ofReal_mul_im
theorem ofReal_mul' (r : ℝ) (z : ℂ) : ↑r * z = ⟨r * z.re, r * z.im⟩ :=
ext (ofReal_mul_re _ _) (ofReal_mul_im _ _)
#align complex.of_real_mul' Complex.ofReal_mul'
/-! ### The imaginary unit, `I` -/
/-- The imaginary unit. -/
def I : ℂ :=
⟨0, 1⟩
set_option linter.uppercaseLean3 false in
#align complex.I Complex.I
@[simp]
theorem I_re : I.re = 0 :=
rfl
set_option linter.uppercaseLean3 false in
#align complex.I_re Complex.I_re
@[simp]
theorem I_im : I.im = 1 :=
rfl
set_option linter.uppercaseLean3 false in
#align complex.I_im Complex.I_im
@[simp]
theorem I_mul_I : I * I = -1 :=
ext_iff.2 <| by simp
set_option linter.uppercaseLean3 false in
#align complex.I_mul_I Complex.I_mul_I
theorem I_mul (z : ℂ) : I * z = ⟨-z.im, z.re⟩ :=
ext_iff.2 <| by simp
set_option linter.uppercaseLean3 false in
#align complex.I_mul Complex.I_mul
theorem I_ne_zero : (I : ℂ) ≠ 0 :=
mt (congr_arg im) zero_ne_one.symm
set_option linter.uppercaseLean3 false in
#align complex.I_ne_zero Complex.I_ne_zero
theorem mk_eq_add_mul_I (a b : ℝ) : Complex.mk a b = a + b * I :=
ext_iff.2 <| by simp [ofReal']
set_option linter.uppercaseLean3 false in
#align complex.mk_eq_add_mul_I Complex.mk_eq_add_mul_I
@[simp]
theorem re_add_im (z : ℂ) : (z.re : ℂ) + z.im * I = z :=
ext_iff.2 <| by simp [ofReal']
#align complex.re_add_im Complex.re_add_im
theorem mul_I_re (z : ℂ) : (z * I).re = -z.im := by simp
set_option linter.uppercaseLean3 false in
#align complex.mul_I_re Complex.mul_I_re
theorem mul_I_im (z : ℂ) : (z * I).im = z.re := by simp
set_option linter.uppercaseLean3 false in
#align complex.mul_I_im Complex.mul_I_im
theorem I_mul_re (z : ℂ) : (I * z).re = -z.im := by simp
set_option linter.uppercaseLean3 false in
#align complex.I_mul_re Complex.I_mul_re
theorem I_mul_im (z : ℂ) : (I * z).im = z.re := by simp
set_option linter.uppercaseLean3 false in
#align complex.I_mul_im Complex.I_mul_im
@[simp]
theorem equivRealProd_symm_apply (p : ℝ × ℝ) : equivRealProd.symm p = p.1 + p.2 * I := by
ext <;> simp [Complex.equivRealProd, ofReal']
#align complex.equiv_real_prod_symm_apply Complex.equivRealProd_symm_apply
/-! ### Commutative ring instance and lemmas -/
/- We use a nonstandard formula for the `ℕ` and `ℤ` actions to make sure there is no
diamond from the other actions they inherit through the `ℝ`-action on `ℂ` and action transitivity
defined in `Data.Complex.Module`. -/
instance : Nontrivial ℂ :=
pullback_nonzero re rfl rfl
-- Porting note: proof needed modifications and rewritten fields
instance addCommGroup : AddCommGroup ℂ :=
{ zero := (0 : ℂ)
add := (· + ·)
neg := Neg.neg
sub := Sub.sub
nsmul := fun n z => ⟨n • z.re - 0 * z.im, n • z.im + 0 * z.re⟩
zsmul := fun n z => ⟨n • z.re - 0 * z.im, n • z.im + 0 * z.re⟩
zsmul_zero':= by intros; ext <;> simp
nsmul_zero := by intros; ext <;> simp
nsmul_succ := by
intros; ext <;> simp [AddMonoid.nsmul_succ, add_mul, add_comm]
zsmul_succ' := by
intros; ext <;> simp [SubNegMonoid.zsmul_succ', add_mul, add_comm]
zsmul_neg' := by
intros; ext <;> simp [zsmul_neg', add_mul]
add_assoc := by intros; ext <;> simp [add_assoc]
zero_add := by intros; ext <;> simp
add_zero := by intros; ext <;> simp
add_comm := by intros; ext <;> simp [add_comm]
add_left_neg := by intros; ext <;> simp }
instance Complex.addGroupWithOne : AddGroupWithOne ℂ :=
{ Complex.addCommGroup with
natCast := fun n => ⟨n, 0⟩
natCast_zero := by
ext <;> simp [Nat.cast, AddMonoidWithOne.natCast_zero]
natCast_succ := fun _ => by ext <;> simp [Nat.cast, AddMonoidWithOne.natCast_succ]
intCast := fun n => ⟨n, 0⟩
intCast_ofNat := fun _ => by ext <;> rfl
intCast_negSucc := fun n => by
ext
· simp [AddGroupWithOne.intCast_negSucc]
show -(1: ℝ) + (-n) = -(↑(n + 1))
simp [Nat.cast_add, add_comm]
· simp [AddGroupWithOne.intCast_negSucc]
show im ⟨n, 0⟩ = 0
rfl
one := 1 }
-- Porting note: proof needed modifications and rewritten fields
instance commRing : CommRing ℂ :=
{ Complex.addGroupWithOne with
zero := (0 : ℂ)
add := (· + ·)
one := 1
mul := (· * ·)
npow := @npowRec _ ⟨(1 : ℂ)⟩ ⟨(· * ·)⟩
add_comm := by intros; ext <;> simp [add_comm]
left_distrib := by
intros; ext <;> simp [mul_re, mul_im] <;> ring
right_distrib := by
intros; ext <;> simp [mul_re, mul_im] <;> ring
zero_mul := by intros; ext <;> simp [zero_mul]
mul_zero := by intros; ext <;> simp [mul_zero]
mul_assoc := by intros; ext <;> simp [mul_assoc] <;> ring
one_mul := by intros; ext <;> simp [one_mul]
mul_one := by intros; ext <;> simp [mul_one]
mul_comm := by intros; ext <;> simp [mul_comm] ; ring }
/-- This shortcut instance ensures we do not find `Ring` via the noncomputable `Complex.field`
instance. -/
instance : Ring ℂ := by infer_instance
/-- This shortcut instance ensures we do not find `CommSemiring` via the noncomputable
`Complex.field` instance. -/
instance : CommSemiring ℂ :=
inferInstance
/-- The "real part" map, considered as an additive group homomorphism. -/
def reAddGroupHom : ℂ →+ ℝ where
toFun := re
map_zero' := zero_re
map_add' := add_re
#align complex.re_add_group_hom Complex.reAddGroupHom
@[simp]
theorem coe_reAddGroupHom : (reAddGroupHom : ℂ → ℝ) = re :=
rfl
#align complex.coe_re_add_group_hom Complex.coe_reAddGroupHom
/-- The "imaginary part" map, considered as an additive group homomorphism. -/
def imAddGroupHom : ℂ →+ ℝ where
toFun := im
map_zero' := zero_im
map_add' := add_im
#align complex.im_add_group_hom Complex.imAddGroupHom
@[simp]
theorem coe_imAddGroupHom : (imAddGroupHom : ℂ → ℝ) = im :=
rfl
#align complex.coe_im_add_group_hom Complex.coe_imAddGroupHom
section
set_option linter.deprecated false
@[simp]
theorem I_pow_bit0 (n : ℕ) : I ^ bit0 n = (-1) ^ n := by rw [pow_bit0', Complex.I_mul_I]
set_option linter.uppercaseLean3 false in
#align complex.I_pow_bit0 Complex.I_pow_bit0
@[simp]
theorem I_pow_bit1 (n : ℕ) : I ^ bit1 n = (-1) ^ n * I := by rw [pow_bit1', Complex.I_mul_I]
set_option linter.uppercaseLean3 false in
#align complex.I_pow_bit1 Complex.I_pow_bit1
--Porting note: new theorem
@[simp, norm_cast]
theorem ofReal_ofNat (n : ℕ) [n.AtLeastTwo] : ((OfNat.ofNat n : ℝ) : ℂ) = OfNat.ofNat n :=
rfl
@[simp]
theorem re_ofNat (n : ℕ) [n.AtLeastTwo] : (OfNat.ofNat n : ℂ).re = OfNat.ofNat n :=
rfl
@[simp]
theorem im_ofNat (n : ℕ) [n.AtLeastTwo] : (OfNat.ofNat n : ℂ).im = 0 :=
rfl
end
/-! ### Complex conjugation -/
/-- This defines the complex conjugate as the `star` operation of the `StarRing ℂ`. It
is recommended to use the ring endomorphism version `starRingEnd`, available under the
notation `conj` in the locale `ComplexConjugate`. -/
instance : StarRing ℂ where
star z := ⟨z.re, -z.im⟩
star_involutive x := by simp only [eta, neg_neg]
star_mul a b := by ext <;> simp [add_comm] <;> ring
star_add a b := by ext <;> simp [add_comm]
@[simp]
theorem conj_re (z : ℂ) : (conj z).re = z.re :=
rfl
#align complex.conj_re Complex.conj_re
@[simp]
theorem conj_im (z : ℂ) : (conj z).im = -z.im :=
rfl
#align complex.conj_im Complex.conj_im
theorem conj_ofReal (r : ℝ) : conj (r : ℂ) = r :=
ext_iff.2 <| by simp [star]
#align complex.conj_of_real Complex.conj_ofReal
@[simp]
theorem conj_I : conj I = -I :=
ext_iff.2 <| by simp
set_option linter.uppercaseLean3 false in
#align complex.conj_I Complex.conj_I
section
set_option linter.deprecated false
theorem conj_bit0 (z : ℂ) : conj (bit0 z) = bit0 (conj z) :=
ext_iff.2 <| by simp [bit0]
#align complex.conj_bit0 Complex.conj_bit0
theorem conj_bit1 (z : ℂ) : conj (bit1 z) = bit1 (conj z) :=
ext_iff.2 <| by simp [bit0]
#align complex.conj_bit1 Complex.conj_bit1
end
-- @[simp]
/- Porting note: `simp` attribute removed as the result could be proved
by `simp only [@map_neg, Complex.conj_i, @neg_neg]`
-/
theorem conj_neg_I : conj (-I) = I :=
ext_iff.2 <| by simp
set_option linter.uppercaseLean3 false in
#align complex.conj_neg_I Complex.conj_neg_I
theorem eq_conj_iff_real {z : ℂ} : conj z = z ↔ ∃ r : ℝ, z = r :=
⟨fun h => ⟨z.re, ext rfl <| eq_zero_of_neg_eq (congr_arg im h)⟩, fun ⟨h, e⟩ => by
rw [e, conj_ofReal]⟩
#align complex.eq_conj_iff_real Complex.eq_conj_iff_real
theorem eq_conj_iff_re {z : ℂ} : conj z = z ↔ (z.re : ℂ) = z :=
eq_conj_iff_real.trans ⟨by rintro ⟨r, rfl⟩ ; simp [ofReal'], fun h => ⟨_, h.symm⟩⟩
#align complex.eq_conj_iff_re Complex.eq_conj_iff_re
theorem eq_conj_iff_im {z : ℂ} : conj z = z ↔ z.im = 0 :=
⟨fun h => add_self_eq_zero.mp (neg_eq_iff_add_eq_zero.mp (congr_arg im h)), fun h =>
ext rfl (neg_eq_iff_add_eq_zero.mpr (add_self_eq_zero.mpr h))⟩
#align complex.eq_conj_iff_im Complex.eq_conj_iff_im
-- `simpNF` complains about this being provable by `is_R_or_C.star_def` even
-- though it's not imported by this file.
-- Porting note: linter `simpNF` not found
@[simp]
theorem star_def : (Star.star : ℂ → ℂ) = conj :=
rfl
#align complex.star_def Complex.star_def
/-! ### Norm squared -/
/-- The norm squared function. -/
-- Porting note: `@[pp_nodot]` not found
-- @[pp_nodot]
def normSq : ℂ →*₀ ℝ where
toFun z := z.re * z.re + z.im * z.im
map_zero' := by simp
map_one' := by simp
map_mul' z w := by
dsimp
ring
#align complex.norm_sq Complex.normSq
theorem normSq_apply (z : ℂ) : normSq z = z.re * z.re + z.im * z.im :=
rfl
#align complex.norm_sq_apply Complex.normSq_apply
@[simp]
theorem normSq_ofReal (r : ℝ) : normSq r = r * r := by
simp [normSq, ofReal']
#align complex.norm_sq_of_real Complex.normSq_ofReal
@[simp]
theorem normSq_mk (x y : ℝ) : normSq ⟨x, y⟩ = x * x + y * y :=
rfl
#align complex.norm_sq_mk Complex.normSq_mk
theorem normSq_add_mul_I (x y : ℝ) : normSq (x + y * I) = x ^ 2 + y ^ 2 := by
rw [← mk_eq_add_mul_I, normSq_mk, sq, sq]
set_option linter.uppercaseLean3 false in
#align complex.norm_sq_add_mul_I Complex.normSq_add_mul_I
theorem normSq_eq_conj_mul_self {z : ℂ} : (normSq z : ℂ) = conj z * z := by
ext <;> simp [normSq, mul_comm, ofReal']
#align complex.norm_sq_eq_conj_mul_self Complex.normSq_eq_conj_mul_self
-- @[simp]
/- Porting note: `simp` attribute removed as linter reports this can be proved
by `simp only [@map_zero]` -/
theorem normSq_zero : normSq 0 = 0 :=
normSq.map_zero
#align complex.norm_sq_zero Complex.normSq_zero
-- @[simp]
/- Porting note: `simp` attribute removed as linter reports this can be proved
by `simp only [@map_one]` -/
theorem normSq_one : normSq 1 = 1 :=
normSq.map_one
#align complex.norm_sq_one Complex.normSq_one
@[simp]
theorem normSq_I : normSq I = 1 := by simp [normSq]
set_option linter.uppercaseLean3 false in
#align complex.norm_sq_I Complex.normSq_I
theorem normSq_nonneg (z : ℂ) : 0 ≤ normSq z :=
add_nonneg (mul_self_nonneg _) (mul_self_nonneg _)
#align complex.norm_sq_nonneg Complex.normSq_nonneg
@[simp]
theorem range_normSq : range normSq = Ici 0 :=
Subset.antisymm (range_subset_iff.2 normSq_nonneg) fun x hx =>
⟨Real.sqrt x, by rw [normSq_ofReal, Real.mul_self_sqrt hx]⟩
#align complex.range_norm_sq Complex.range_normSq
theorem normSq_eq_zero {z : ℂ} : normSq z = 0 ↔ z = 0 :=
⟨fun h =>
ext (eq_zero_of_mul_self_add_mul_self_eq_zero h)
(eq_zero_of_mul_self_add_mul_self_eq_zero <| (add_comm _ _).trans h),
fun h => h.symm ▸ normSq_zero⟩
#align complex.norm_sq_eq_zero Complex.normSq_eq_zero
@[simp]
theorem normSq_pos {z : ℂ} : 0 < normSq z ↔ z ≠ 0 :=
(normSq_nonneg z).lt_iff_ne.trans <| not_congr (eq_comm.trans normSq_eq_zero)
#align complex.norm_sq_pos Complex.normSq_pos
@[simp]
theorem normSq_neg (z : ℂ) : normSq (-z) = normSq z := by simp [normSq]
#align complex.norm_sq_neg Complex.normSq_neg
@[simp]
theorem normSq_conj (z : ℂ) : normSq (conj z) = normSq z := by simp [normSq]
#align complex.norm_sq_conj Complex.normSq_conj
theorem normSq_mul (z w : ℂ) : normSq (z * w) = normSq z * normSq w :=
normSq.map_mul z w
#align complex.norm_sq_mul Complex.normSq_mul
theorem normSq_add (z w : ℂ) : normSq (z + w) = normSq z + normSq w + 2 * (z * conj w).re := by
dsimp [normSq] ; ring
#align complex.norm_sq_add Complex.normSq_add
theorem re_sq_le_normSq (z : ℂ) : z.re * z.re ≤ normSq z :=
le_add_of_nonneg_right (mul_self_nonneg _)
#align complex.re_sq_le_norm_sq Complex.re_sq_le_normSq
theorem im_sq_le_normSq (z : ℂ) : z.im * z.im ≤ normSq z :=
le_add_of_nonneg_left (mul_self_nonneg _)
#align complex.im_sq_le_norm_sq Complex.im_sq_le_normSq
theorem mul_conj (z : ℂ) : z * conj z = normSq z :=
ext_iff.2 <| by simp [normSq, mul_comm, sub_eq_neg_add, add_comm, ofReal']
#align complex.mul_conj Complex.mul_conj
theorem add_conj (z : ℂ) : z + conj z = (2 * z.re : ℝ) :=
ext_iff.2 <| by simp [two_mul, ofReal']
#align complex.add_conj Complex.add_conj
/-- The coercion `ℝ → ℂ` as a `RingHom`. -/
def ofReal : ℝ →+* ℂ where
toFun x := (x : ℂ)
map_one' := ofReal_one
map_zero' := ofReal_zero
map_mul' := ofReal_mul
map_add' := ofReal_add
#align complex.of_real Complex.ofReal
@[simp]
theorem ofReal_eq_coe (r : ℝ) : ofReal r = r :=
rfl
#align complex.of_real_eq_coe Complex.ofReal_eq_coe
@[simp]
theorem I_sq : I ^ 2 = -1 := by rw [sq, I_mul_I]
set_option linter.uppercaseLean3 false in
#align complex.I_sq Complex.I_sq
@[simp]
theorem sub_re (z w : ℂ) : (z - w).re = z.re - w.re :=
rfl
#align complex.sub_re Complex.sub_re
@[simp]
@[simp, norm_cast]
theorem ofReal_sub (r s : ℝ) : ((r - s : ℝ) : ℂ) = r - s :=
ext_iff.2 <| by simp [ofReal']
#align complex.of_real_sub Complex.ofReal_sub
@[simp, norm_cast]
theorem ofReal_pow (r : ℝ) (n : ℕ) : ((r ^ n : ℝ) : ℂ) = (r : ℂ) ^ n := by
induction n <;> simp [*, ofReal_mul, pow_succ]
#align complex.of_real_pow Complex.ofReal_pow
theorem sub_conj (z : ℂ) : z - conj z = (2 * z.im : ℝ) * I :=
ext_iff.2 <| by simp [two_mul, sub_eq_add_neg, ofReal']
#align complex.sub_conj Complex.sub_conj
theorem normSq_sub (z w : ℂ) : normSq (z - w) = normSq z + normSq w - 2 * (z * conj w).re := by
rw [sub_eq_add_neg, normSq_add]
simp only [RingHom.map_neg, mul_neg, neg_re, normSq_neg]
ring
#align complex.norm_sq_sub Complex.normSq_sub
/-! ### Inversion -/
noncomputable instance : Inv ℂ :=
⟨fun z => conj z * ((normSq z)⁻¹ : ℝ)⟩
theorem inv_def (z : ℂ) : z⁻¹ = conj z * ((normSq z)⁻¹ : ℝ) :=
rfl
#align complex.inv_def Complex.inv_def
@[simp]
theorem inv_re (z : ℂ) : z⁻¹.re = z.re / normSq z := by simp [inv_def, division_def, ofReal']
#align complex.inv_re Complex.inv_re
@[simp]
theorem inv_im (z : ℂ) : z⁻¹.im = -z.im / normSq z := by simp [inv_def, division_def, ofReal']
#align complex.inv_im Complex.inv_im
@[simp, norm_cast]
theorem ofReal_inv (r : ℝ) : ((r⁻¹ : ℝ) : ℂ) = (r : ℂ)⁻¹ :=
ext_iff.2 <| by simp [ofReal']
#align complex.of_real_inv Complex.ofReal_inv
protected theorem inv_zero : (0⁻¹ : ℂ) = 0 := by
rw [← ofReal_zero, ← ofReal_inv, inv_zero]
#align complex.inv_zero Complex.inv_zero
protected theorem mul_inv_cancel {z : ℂ} (h : z ≠ 0) : z * z⁻¹ = 1 := by
rw [inv_def, ← mul_assoc, mul_conj, ← ofReal_mul, mul_inv_cancel (mt normSq_eq_zero.1 h),
ofReal_one]
#align complex.mul_inv_cancel Complex.mul_inv_cancel
/-! ### Field instance and lemmas -/
noncomputable instance : Field ℂ :=
{ inv := Inv.inv
mul_inv_cancel := @Complex.mul_inv_cancel
inv_zero := Complex.inv_zero }
section
set_option linter.deprecated false
@[simp]
theorem I_zpow_bit0 (n : ℤ) : I ^ bit0 n = (-1) ^ n := by rw [zpow_bit0', I_mul_I]
set_option linter.uppercaseLean3 false in
#align complex.I_zpow_bit0 Complex.I_zpow_bit0
@[simp]
theorem I_zpow_bit1 (n : ℤ) : I ^ bit1 n = (-1) ^ n * I := by rw [zpow_bit1', I_mul_I]
set_option linter.uppercaseLean3 false in
#align complex.I_zpow_bit1 Complex.I_zpow_bit1
end
theorem div_re (z w : ℂ) : (z / w).re = z.re * w.re / normSq w + z.im * w.im / normSq w := by
simp [div_eq_mul_inv, mul_assoc, sub_eq_add_neg]
#align complex.div_re Complex.div_re
theorem div_im (z w : ℂ) : (z / w).im = z.im * w.re / normSq w - z.re * w.im / normSq w := by
simp [div_eq_mul_inv, mul_assoc, sub_eq_add_neg, add_comm]
#align complex.div_im Complex.div_im
theorem conj_inv (x : ℂ) : conj x⁻¹ = (conj x)⁻¹ :=
star_inv' _
#align complex.conj_inv Complex.conj_inv
@[simp, norm_cast]
theorem ofReal_div (r s : ℝ) : ((r / s : ℝ) : ℂ) = r / s :=
map_div₀ ofReal r s
#align complex.of_real_div Complex.ofReal_div
@[simp, norm_cast]
theorem ofReal_zpow (r : ℝ) (n : ℤ) : ((r ^ n : ℝ) : ℂ) = (r : ℂ) ^ n :=
map_zpow₀ ofReal r n
#align complex.of_real_zpow Complex.ofReal_zpow
@[simp]
theorem div_I (z : ℂ) : z / I = -(z * I) :=
(div_eq_iff_mul_eq I_ne_zero).2 <| by simp [mul_assoc]
set_option linter.uppercaseLean3 false in
#align complex.div_I Complex.div_I
@[simp]
theorem inv_I : I⁻¹ = -I := by
rw [inv_eq_one_div, div_I, one_mul]
set_option linter.uppercaseLean3 false in
#align complex.inv_I Complex.inv_I
-- @[simp]
/- Porting note: `simp` attribute removed as linter reports this can be proved
by `simp only [@map_inv₀]` -/
theorem normSq_inv (z : ℂ) : normSq z⁻¹ = (normSq z)⁻¹ :=
map_inv₀ normSq z
#align complex.norm_sq_inv Complex.normSq_inv
-- @[simp]
/- Porting note: `simp` attribute removed as linter reports this can be proved
by `simp only [@map_div₀]` -/
theorem normSq_div (z w : ℂ) : normSq (z / w) = normSq z / normSq w :=
map_div₀ normSq z w
#align complex.norm_sq_div Complex.normSq_div
/-! ### Cast lemmas -/
@[simp, norm_cast]
theorem ofReal_nat_cast (n : ℕ) : ((n : ℝ) : ℂ) = n :=
map_natCast ofReal n
#align complex.of_real_nat_cast Complex.ofReal_nat_cast
@[simp, norm_cast]
theorem nat_cast_re (n : ℕ) : (n : ℂ).re = n := by rw [← ofReal_nat_cast, ofReal_re]
#align complex.nat_cast_re Complex.nat_cast_re
@[simp, norm_cast]
theorem nat_cast_im (n : ℕ) : (n : ℂ).im = 0 := by rw [← ofReal_nat_cast, ofReal_im]
#align complex.nat_cast_im Complex.nat_cast_im
@[simp, norm_cast]
theorem ofReal_int_cast (n : ℤ) : ((n : ℝ) : ℂ) = n :=
map_intCast ofReal n
#align complex.of_real_int_cast Complex.ofReal_int_cast
@[simp, norm_cast]
theorem int_cast_re (n : ℤ) : (n : ℂ).re = n := by rw [← ofReal_int_cast, ofReal_re]
#align complex.int_cast_re Complex.int_cast_re
@[simp, norm_cast]
theorem int_cast_im (n : ℤ) : (n : ℂ).im = 0 := by rw [← ofReal_int_cast, ofReal_im]
#align complex.int_cast_im Complex.int_cast_im
@[simp, norm_cast]
theorem ofReal_rat_cast (n : ℚ) : ((n : ℝ) : ℂ) = (n : ℂ) :=
map_ratCast ofReal n
#align complex.of_real_rat_cast Complex.ofReal_rat_cast
-- Porting note: removed `norm_cast` attribute because the RHS can't start with `↑`
@[simp]
theorem rat_cast_re (q : ℚ) : (q : ℂ).re = (q : ℂ) := by
rw [← ofReal_rat_cast, ofReal_re]
#align complex.rat_cast_re Complex.rat_cast_re
-- Porting note: removed `norm_cast` attribute because the RHS can't start with `↑`
@[simp]
theorem rat_cast_im (q : ℚ) : (q : ℂ).im = 0 := by
rw [← ofReal_rat_cast, ofReal_im]
#align complex.rat_cast_im Complex.rat_cast_im
/-! ### Characteristic zero -/
instance charZero : CharZero ℂ :=
charZero_of_inj_zero fun n h => by
rwa [← ofReal_nat_cast, ofReal_eq_zero, Nat.cast_eq_zero] at h
#align complex.char_zero_complex Complex.charZero
/-- A complex number `z` plus its conjugate `conj z` is `2` times its real part. -/
theorem re_eq_add_conj (z : ℂ) : (z.re : ℂ) = (z + conj z) / 2 := by
have : (↑(↑2 : ℝ) : ℂ) = (2 : ℂ) := by rfl
simp only [add_conj, ofReal_mul, ofReal_one, ofReal_bit0, this,
mul_div_cancel_left (z.re : ℂ) two_ne_zero]
#align complex.re_eq_add_conj Complex.re_eq_add_conj
/-- A complex number `z` minus its conjugate `conj z` is `2i` times its imaginary part. -/
theorem im_eq_sub_conj (z : ℂ) : (z.im : ℂ) = (z - conj z) / (2 * I) := by
have : (↑2 : ℝ ) * I = 2 * I := by rfl
simp only [sub_conj, ofReal_mul, ofReal_one, ofReal_bit0, mul_right_comm, this,
mul_div_cancel_left _ (mul_ne_zero two_ne_zero I_ne_zero : 2 * I ≠ 0)]
#align complex.im_eq_sub_conj Complex.im_eq_sub_conj
/-! ### Absolute value -/
namespace AbsTheory
-- We develop enough theory to bundle `abs` into an `AbsoluteValue` before making things public;
-- this is so there's not two versions of it hanging around.
local notation "abs" z => Real.sqrt (normSq z)
private theorem mul_self_abs (z : ℂ) : ((abs z) * abs z) = normSq z :=
Real.mul_self_sqrt (normSq_nonneg _)
private theorem abs_nonneg' (z : ℂ) : 0 ≤ abs z :=
Real.sqrt_nonneg _
theorem abs_conj (z : ℂ) : (abs conj z) = abs z := by simp
#align complex.abs_theory.abs_conj Complex.AbsTheory.abs_conj
private theorem abs_re_le_abs (z : ℂ) : |z.re| ≤ abs z := by
rw [mul_self_le_mul_self_iff (abs_nonneg z.re) (abs_nonneg' _), abs_mul_abs_self, mul_self_abs]
apply re_sq_le_normSq
private theorem re_le_abs (z : ℂ) : z.re ≤ abs z :=
(abs_le.1 (abs_re_le_abs _)).2
private theorem abs_mul (z w : ℂ) : (abs z * w) = (abs z) * abs w := by
rw [normSq_mul, Real.sqrt_mul (normSq_nonneg _)]
private theorem abs_add (z w : ℂ) : (abs z + w) ≤ (abs z) + abs w :=
(mul_self_le_mul_self_iff (abs_nonneg' (z + w)) (add_nonneg (abs_nonneg' z) (abs_nonneg' w))).2 <|
by
rw [mul_self_abs, add_mul_self_eq, mul_self_abs, mul_self_abs, add_right_comm, normSq_add,
add_le_add_iff_left, mul_assoc, mul_le_mul_left (zero_lt_two' ℝ), ←
Real.sqrt_mul <| normSq_nonneg z, ← normSq_conj w, ← map_mul]
exact re_le_abs (z * conj w)
/-- The complex absolute value function, defined as the square root of the norm squared. -/
noncomputable def _root_.Complex.abs : AbsoluteValue ℂ ℝ where
toFun x := abs x
map_mul' := abs_mul
nonneg' := abs_nonneg'
eq_zero' _ := (Real.sqrt_eq_zero <| normSq_nonneg _).trans normSq_eq_zero
add_le' := abs_add
#align complex.abs Complex.abs
end AbsTheory
theorem abs_def : (Complex.abs : ℂ → ℝ) = fun z => (normSq z).sqrt :=
rfl
#align complex.abs_def Complex.abs_def
theorem abs_apply {z : ℂ} : Complex.abs z = (normSq z).sqrt :=
rfl
#align complex.abs_apply Complex.abs_apply
@[simp, norm_cast]
theorem abs_ofReal (r : ℝ) : Complex.abs r = |r| := by
simp [Complex.abs, normSq_ofReal, Real.sqrt_mul_self_eq_abs]
#align complex.abs_of_real Complex.abs_ofReal
nonrec theorem abs_of_nonneg {r : ℝ} (h : 0 ≤ r) : Complex.abs r = r :=
(Complex.abs_ofReal _).trans (abs_of_nonneg h)
#align complex.abs_of_nonneg Complex.abs_of_nonneg
theorem abs_of_nat (n : ℕ) : Complex.abs n = n :=
calc
Complex.abs n = Complex.abs (n : ℝ) := by rw [ofReal_nat_cast]
_ = _ := Complex.abs_of_nonneg (Nat.cast_nonneg n)
#align complex.abs_of_nat Complex.abs_of_nat
theorem mul_self_abs (z : ℂ) : Complex.abs z * Complex.abs z = normSq z :=
Real.mul_self_sqrt (normSq_nonneg _)
#align complex.mul_self_abs Complex.mul_self_abs
theorem sq_abs (z : ℂ) : Complex.abs z ^ 2 = normSq z :=
Real.sq_sqrt (normSq_nonneg _)
#align complex.sq_abs Complex.sq_abs
@[simp]
theorem sq_abs_sub_sq_re (z : ℂ) : Complex.abs z ^ 2 - z.re ^ 2 = z.im ^ 2 := by
rw [sq_abs, normSq_apply, ← sq, ← sq, add_sub_cancel']
#align complex.sq_abs_sub_sq_re Complex.sq_abs_sub_sq_re
@[simp]
theorem sq_abs_sub_sq_im (z : ℂ) : Complex.abs z ^ 2 - z.im ^ 2 = z.re ^ 2 := by
rw [← sq_abs_sub_sq_re, sub_sub_cancel]
#align complex.sq_abs_sub_sq_im Complex.sq_abs_sub_sq_im
@[simp]
theorem abs_I : Complex.abs I = 1 := by simp [Complex.abs]
set_option linter.uppercaseLean3 false in
#align complex.abs_I Complex.abs_I
@[simp]
theorem abs_two : Complex.abs 2 = 2 :=
calc
Complex.abs 2 = Complex.abs (2 : ℝ) := by rfl
_ = (2 : ℝ) := Complex.abs_of_nonneg (by norm_num)
#align complex.abs_two Complex.abs_two
@[simp]
theorem range_abs : range Complex.abs = Ici 0 :=
Subset.antisymm
(by simp only [range_subset_iff, Ici, mem_setOf_eq, map_nonneg, forall_const])
(fun x hx => ⟨x, Complex.abs_of_nonneg hx⟩)
#align complex.range_abs Complex.range_abs
@[simp]
theorem abs_conj (z : ℂ) : Complex.abs (conj z) = Complex.abs z :=
AbsTheory.abs_conj z
#align complex.abs_conj Complex.abs_conj
@[simp]
theorem abs_prod {ι : Type _} (s : Finset ι) (f : ι → ℂ) :
Complex.abs (s.prod f) = s.prod fun I => Complex.abs (f I) :=
map_prod Complex.abs _ _
#align complex.abs_prod Complex.abs_prod
-- @[simp]
/- Porting note: `simp` attribute removed as linter reports this can be proved
by `simp only [@map_pow]` -/
theorem abs_pow (z : ℂ) (n : ℕ) : Complex.abs (z ^ n) = Complex.abs z ^ n :=
map_pow Complex.abs z n
#align complex.abs_pow Complex.abs_pow
-- @[simp]
/- Porting note: `simp` attribute removed as linter reports this can be proved
by `simp only [@map_zpow₀]` -/
theorem abs_zpow (z : ℂ) (n : ℤ) : Complex.abs (z ^ n) = Complex.abs z ^ n :=
map_zpow₀ Complex.abs z n
#align complex.abs_zpow Complex.abs_zpow
theorem abs_re_le_abs (z : ℂ) : |z.re| ≤ Complex.abs z :=
Real.abs_le_sqrt <| by
rw [normSq_apply, ← sq]
exact le_add_of_nonneg_right (mul_self_nonneg _)
#align complex.abs_re_le_abs Complex.abs_re_le_abs
theorem abs_im_le_abs (z : ℂ) : |z.im| ≤ Complex.abs z :=
Real.abs_le_sqrt <| by
rw [normSq_apply, ← sq, ← sq]
exact le_add_of_nonneg_left (sq_nonneg _)
#align complex.abs_im_le_abs Complex.abs_im_le_abs
theorem re_le_abs (z : ℂ) : z.re ≤ Complex.abs z :=
(abs_le.1 (abs_re_le_abs _)).2
#align complex.re_le_abs Complex.re_le_abs
theorem im_le_abs (z : ℂ) : z.im ≤ Complex.abs z :=
(abs_le.1 (abs_im_le_abs _)).2
#align complex.im_le_abs Complex.im_le_abs
@[simp]
theorem abs_re_lt_abs {z : ℂ} : |z.re| < Complex.abs z ↔ z.im ≠ 0 := by
rw [Complex.abs, AbsoluteValue.coe_mk, MulHom.coe_mk, Real.lt_sqrt (abs_nonneg _), normSq_apply,
_root_.sq_abs, ← sq, lt_add_iff_pos_right, mul_self_pos]
#align complex.abs_re_lt_abs Complex.abs_re_lt_abs
@[simp]
theorem abs_im_lt_abs {z : ℂ} : |z.im| < Complex.abs z ↔ z.re ≠ 0 := by
simpa using @abs_re_lt_abs (z * I)
#align complex.abs_im_lt_abs Complex.abs_im_lt_abs
@[simp]
theorem abs_abs (z : ℂ) : |Complex.abs z| = Complex.abs z :=
_root_.abs_of_nonneg (AbsoluteValue.nonneg _ z)
#align complex.abs_abs Complex.abs_abs
-- Porting note: probably should be golfed
theorem abs_le_abs_re_add_abs_im (z : ℂ) : Complex.abs z ≤ |z.re| + |z.im| := by
simpa [re_add_im] using Complex.abs.add_le z.re (z.im * I)
#align complex.abs_le_abs_re_add_abs_im Complex.abs_le_abs_re_add_abs_im
-- Porting note: added so `two_pos` in the next proof works
-- TODO: move somewhere else
instance : NeZero (1 : ℝ) :=
⟨by apply one_ne_zero⟩
theorem abs_le_sqrt_two_mul_max (z : ℂ) : Complex.abs z ≤ Real.sqrt 2 * max (|z.re|) (|z.im|) := by
cases' z with x y
simp only [abs_apply, normSq_mk, ← sq]
by_cases hle : |x| ≤ |y|
· calc
Real.sqrt (x ^ 2 + y ^ 2) ≤ Real.sqrt (y ^ 2 + y ^ 2) :=
Real.sqrt_le_sqrt (add_le_add_right (sq_le_sq.2 hle) _)
_ = Real.sqrt 2 * max (|x|) (|y|) := by
rw [max_eq_right hle, ← two_mul, Real.sqrt_mul two_pos.le, Real.sqrt_sq_eq_abs]
· have hle' := le_of_not_le hle
rw [add_comm]
calc
Real.sqrt (y ^ 2 + x ^ 2) ≤ Real.sqrt (x ^ 2 + x ^ 2) :=
Real.sqrt_le_sqrt (add_le_add_right (sq_le_sq.2 hle') _)
_ = Real.sqrt 2 * max (|x|) (|y|) := by
rw [max_eq_left hle', ← two_mul, Real.sqrt_mul two_pos.le, Real.sqrt_sq_eq_abs]
#align complex.abs_le_sqrt_two_mul_max Complex.abs_le_sqrt_two_mul_max
theorem abs_re_div_abs_le_one (z : ℂ) : |z.re / Complex.abs z| ≤ 1 :=
if hz : z = 0 then by simp [hz, zero_le_one]
else by simp_rw [_root_.abs_div, abs_abs,
div_le_iff (AbsoluteValue.pos Complex.abs hz), one_mul, abs_re_le_abs]
#align complex.abs_re_div_abs_le_one Complex.abs_re_div_abs_le_one
theorem abs_im_div_abs_le_one (z : ℂ) : |z.im / Complex.abs z| ≤ 1 :=
if hz : z = 0 then by simp [hz, zero_le_one]
else by simp_rw [_root_.abs_div, abs_abs,
div_le_iff (AbsoluteValue.pos Complex.abs hz), one_mul, abs_im_le_abs]
#align complex.abs_im_div_abs_le_one Complex.abs_im_div_abs_le_one
-- Porting note: removed `norm_cast` attribute because the RHS can't start with `↑`
@[simp]
theorem abs_cast_nat (n : ℕ) : Complex.abs (n : ℂ) = n := by
rw [← ofReal_nat_cast, abs_of_nonneg (Nat.cast_nonneg n)]
#align complex.abs_cast_nat Complex.abs_cast_nat
@[simp, norm_cast]
theorem int_cast_abs (n : ℤ) : (|↑n|) = Complex.abs n := by
rw [← ofReal_int_cast, abs_ofReal]
#align complex.int_cast_abs Complex.int_cast_abs
theorem normSq_eq_abs (x : ℂ) : normSq x = (Complex.abs x) ^ 2 := by
simp [abs, sq, abs_def, Real.mul_self_sqrt (normSq_nonneg _)]
#align complex.norm_sq_eq_abs Complex.normSq_eq_abs
/-- We put a partial order on ℂ so that `z ≤ w` exactly if `w - z` is real and nonnegative.
Complex numbers with different imaginary parts are incomparable.
-/
protected def partialOrder : PartialOrder ℂ where
le z w := z.re ≤ w.re ∧ z.im = w.im
lt z w := z.re < w.re ∧ z.im = w.im
lt_iff_le_not_le z w := by
dsimp
rw [lt_iff_le_not_le]
tauto
le_refl x := ⟨le_rfl, rfl⟩
le_trans x y z h₁ h₂ := ⟨h₁.1.trans h₂.1, h₁.2.trans h₂.2⟩
le_antisymm z w h₁ h₂ := ext (h₁.1.antisymm h₂.1) h₁.2
#align complex.partial_order Complex.partialOrder
namespace _root_.ComplexOrder
-- Porting note: made section into namespace to allow scoping
scoped[ComplexOrder] attribute [instance] Complex.partialOrder
end _root_.ComplexOrder
section ComplexOrder
open ComplexOrder
theorem le_def {z w : ℂ} : z ≤ w ↔ z.re ≤ w.re ∧ z.im = w.im :=
Iff.rfl
#align complex.le_def Complex.le_def
theorem lt_def {z w : ℂ} : z < w ↔ z.re < w.re ∧ z.im = w.im :=
Iff.rfl
#align complex.lt_def Complex.lt_def
@[simp, norm_cast]
theorem real_le_real {x y : ℝ} : (x : ℂ) ≤ (y : ℂ) ↔ x ≤ y := by simp [le_def, ofReal']
#align complex.real_le_real Complex.real_le_real
@[simp, norm_cast]
theorem real_lt_real {x y : ℝ} : (x : ℂ) < (y : ℂ) ↔ x < y := by simp [lt_def, ofReal']
#align complex.real_lt_real Complex.real_lt_real
@[simp, norm_cast]
theorem zero_le_real {x : ℝ} : (0 : ℂ) ≤ (x : ℂ) ↔ 0 ≤ x :=
real_le_real
#align complex.zero_le_real Complex.zero_le_real
@[simp, norm_cast]
theorem zero_lt_real {x : ℝ} : (0 : ℂ) < (x : ℂ) ↔ 0 < x :=
real_lt_real
#align complex.zero_lt_real Complex.zero_lt_real
theorem not_le_iff {z w : ℂ} : ¬z ≤ w ↔ w.re < z.re ∨ z.im ≠ w.im := by
rw [le_def, not_and_or, not_le]
#align complex.not_le_iff Complex.not_le_iff
theorem not_lt_iff {z w : ℂ} : ¬z < w ↔ w.re ≤ z.re ∨ z.im ≠ w.im := by
rw [lt_def, not_and_or, not_lt]
#align complex.not_lt_iff Complex.not_lt_iff
theorem not_le_zero_iff {z : ℂ} : ¬z ≤ 0 ↔ 0 < z.re ∨ z.im ≠ 0 :=
not_le_iff
#align complex.not_le_zero_iff Complex.not_le_zero_iff
theorem not_lt_zero_iff {z : ℂ} : ¬z < 0 ↔ 0 ≤ z.re ∨ z.im ≠ 0 :=
not_lt_iff
#align complex.not_lt_zero_iff Complex.not_lt_zero_iff
theorem eq_re_ofReal_le {r : ℝ} {z : ℂ} (hz : (r : ℂ) ≤ z) : z = z.re := by
ext
rfl
simp only [← (Complex.le_def.1 hz).2, Complex.zero_im, Complex.ofReal_im]
#align complex.eq_re_of_real_le Complex.eq_re_ofReal_le
/-- With `z ≤ w` iff `w - z` is real and nonnegative, `ℂ` is a strictly ordered ring.
-/
protected def strictOrderedCommRing : StrictOrderedCommRing ℂ :=
{ zero_le_one := ⟨zero_le_one, rfl⟩
add_le_add_left := fun w z h y => ⟨add_le_add_left h.1 _, congr_arg₂ (· + ·) rfl h.2⟩
mul_pos := fun z w hz hw => by
simp [lt_def, mul_re, mul_im, ← hz.2, ← hw.2, mul_pos hz.1 hw.1]
mul_comm := by intros; ext <;> ring_nf }
#align complex.strict_ordered_comm_ring Complex.strictOrderedCommRing
scoped[ComplexOrder] attribute [instance] Complex.strictOrderedCommRing
/-- With `z ≤ w` iff `w - z` is real and nonnegative, `ℂ` is a star ordered ring.
(That is, a star ring in which the nonnegative elements are those of the form `star z * z`.)
-/
protected def starOrderedRing : StarOrderedRing ℂ :=
{ nonneg_iff := fun r => by
refine' ⟨fun hr => ⟨Real.sqrt r.re, _⟩, fun h => _⟩
· have h₁ : 0 ≤ r.re := by
rw [le_def] at hr
exact hr.1
have h₂ : r.im = 0 := by
rw [le_def] at hr
exact hr.2.symm
ext
· simp only [ofReal_im, star_def, ofReal_re, sub_zero, conj_re, mul_re, mul_zero, ←
Real.sqrt_mul h₁ r.re, Real.sqrt_mul_self h₁]
· simp only [h₂, add_zero, ofReal_im, star_def, zero_mul, conj_im, mul_im, mul_zero,
neg_zero]
· obtain ⟨s, rfl⟩ := h
simp only [← normSq_eq_conj_mul_self, normSq_nonneg, zero_le_real, star_def]
add_le_add_left := by intros; simp [le_def] at *; assumption }
#align complex.star_ordered_ring Complex.starOrderedRing
scoped[ComplexOrder] attribute [instance] Complex.starOrderedRing
end ComplexOrder
/-! ### Cauchy sequences -/
local notation "abs'" => Abs.abs
theorem isCauSeq_re (f : CauSeq ℂ Complex.abs) : IsCauSeq abs' fun n => (f n).re := fun ε ε0 =>
(f.cauchy ε0).imp fun i H j ij =>
lt_of_le_of_lt (by simpa using abs_re_le_abs (f j - f i)) (H _ ij)
#align complex.is_cau_seq_re Complex.isCauSeq_re
theorem isCauSeq_im (f : CauSeq ℂ Complex.abs) : IsCauSeq abs' fun n => (f n).im := fun ε ε0 =>
(f.cauchy ε0).imp fun i H j ij =>
lt_of_le_of_lt (by simpa using abs_im_le_abs (f j - f i)) (H _ ij)
#align complex.is_cau_seq_im Complex.isCauSeq_im
/-- The real part of a complex Cauchy sequence, as a real Cauchy sequence. -/
noncomputable def cauSeqRe (f : CauSeq ℂ Complex.abs) : CauSeq ℝ abs' :=
⟨_, isCauSeq_re f⟩
#align complex.cau_seq_re Complex.cauSeqRe
/-- The imaginary part of a complex Cauchy sequence, as a real Cauchy sequence. -/
noncomputable def cauSeqIm (f : CauSeq ℂ Complex.abs) : CauSeq ℝ abs' :=
⟨_, isCauSeq_im f⟩
#align complex.cau_seq_im Complex.cauSeqIm
theorem isCauSeq_abs {f : ℕ → ℂ} (hf : IsCauSeq Complex.abs f) :
IsCauSeq abs' (Complex.abs ∘ f) := fun ε ε0 =>
let ⟨i, hi⟩ := hf ε ε0
⟨i, fun j hj => lt_of_le_of_lt
(Complex.abs.abs_abv_sub_le_abv_sub _ _) (hi j hj)⟩
#align complex.is_cau_seq_abs Complex.isCauSeq_abs
/-- The limit of a Cauchy sequence of complex numbers. -/
noncomputable def limAux (f : CauSeq ℂ Complex.abs) : ℂ :=
⟨CauSeq.lim (cauSeqRe f), CauSeq.lim (cauSeqIm f)⟩
#align complex.lim_aux Complex.limAux
theorem equiv_limAux (f : CauSeq ℂ Complex.abs) :
f ≈ CauSeq.const Complex.abs (limAux f) := fun ε ε0 =>
(exists_forall_ge_and
(CauSeq.equiv_lim ⟨_, isCauSeq_re f⟩ _ (half_pos ε0))
(CauSeq.equiv_lim ⟨_, isCauSeq_im f⟩ _ (half_pos ε0))).imp
fun i H j ij => by
cases' H _ ij with H₁ H₂
apply lt_of_le_of_lt (abs_le_abs_re_add_abs_im _)
dsimp [limAux] at *
have := add_lt_add H₁ H₂
rwa [add_halves] at this
#align complex.equiv_lim_aux Complex.equiv_limAux
instance : CauSeq.IsComplete ℂ Complex.abs :=
⟨fun f => ⟨limAux f, equiv_limAux f⟩⟩
open CauSeq
theorem lim_eq_lim_im_add_lim_re (f : CauSeq ℂ Complex.abs) :
lim f = ↑(lim (cauSeqRe f)) + ↑(lim (cauSeqIm f)) * I :=
lim_eq_of_equiv_const <|
calc
f ≈ _ := equiv_limAux f
_ = CauSeq.const Complex.abs (↑(lim (cauSeqRe f)) + ↑(lim (cauSeqIm f)) * I) :=
CauSeq.ext fun _ =>
Complex.ext (by simp [limAux, cauSeqRe, ofReal']) (by simp [limAux, cauSeqIm, ofReal'])
#align complex.lim_eq_lim_im_add_lim_re Complex.lim_eq_lim_im_add_lim_re
theorem lim_re (f : CauSeq ℂ Complex.abs) : lim (cauSeqRe f) = (lim f).re := by
rw [lim_eq_lim_im_add_lim_re] ; simp [ofReal']
#align complex.lim_re Complex.lim_re
theorem lim_im (f : CauSeq ℂ Complex.abs) : lim (cauSeqIm f) = (lim f).im := by
rw [lim_eq_lim_im_add_lim_re] ; simp [ofReal']
#align complex.lim_im Complex.lim_im
theorem isCauSeq_conj (f : CauSeq ℂ Complex.abs) :
IsCauSeq Complex.abs fun n => conj (f n) := fun ε ε0 =>
let ⟨i, hi⟩ := f.2 ε ε0
⟨i, fun j hj => by
rw [← RingHom.map_sub, abs_conj] ; exact hi j hj⟩
#align complex.is_cau_seq_conj Complex.isCauSeq_conj
/-- The complex conjugate of a complex Cauchy sequence, as a complex Cauchy sequence. -/
noncomputable def cauSeqConj (f : CauSeq ℂ Complex.abs) : CauSeq ℂ Complex.abs :=
⟨_, isCauSeq_conj f⟩
#align complex.cau_seq_conj Complex.cauSeqConj
theorem lim_conj (f : CauSeq ℂ Complex.abs) : lim (cauSeqConj f) = conj (lim f) :=
Complex.ext (by simp [cauSeqConj, (lim_re _).symm, cauSeqRe])
(by simp [cauSeqConj, (lim_im _).symm, cauSeqIm, (lim_neg _).symm] ; rfl)
#align complex.lim_conj Complex.lim_conj
/-- The absolute value of a complex Cauchy sequence, as a real Cauchy sequence. -/
noncomputable def cauSeqAbs (f : CauSeq ℂ Complex.abs) : CauSeq ℝ abs' :=
⟨_, isCauSeq_abs f.2⟩
#align complex.cau_seq_abs Complex.cauSeqAbs
theorem lim_abs (f : CauSeq ℂ Complex.abs) : lim (cauSeqAbs f) = Complex.abs (lim f) :=
lim_eq_of_equiv_const fun ε ε0 =>
let ⟨i, hi⟩ := equiv_lim f ε ε0
⟨i, fun j hj => lt_of_le_of_lt (Complex.abs.abs_abv_sub_le_abv_sub _ _) (hi j hj)⟩
#align complex.lim_abs Complex.lim_abs
variable {α : Type _} (s : Finset α)
@[simp, norm_cast]
theorem ofReal_prod (f : α → ℝ) : ((∏ i in s, f i : ℝ) : ℂ) = ∏ i in s, (f i : ℂ) :=
map_prod ofReal _ _
#align complex.of_real_prod Complex.ofReal_prod
@[simp, norm_cast]
theorem ofReal_sum (f : α → ℝ) : ((∑ i in s, f i : ℝ) : ℂ) = ∑ i in s, (f i : ℂ) :=
map_sum ofReal _ _
#align complex.of_real_sum Complex.ofReal_sum
@[simp]
theorem re_sum (f : α → ℂ) : (∑ i in s, f i).re = ∑ i in s, (f i).re :=
reAddGroupHom.map_sum f s
#align complex.re_sum Complex.re_sum
@[simp]
theorem im_sum (f : α → ℂ) : (∑ i in s, f i).im = ∑ i in s, (f i).im :=
imAddGroupHom.map_sum f s
#align complex.im_sum Complex.im_sum
end Complex
|
/*
Copyright (C) 2016 Quaternion Risk Management Ltd
All rights reserved.
This file is part of ORE, a free-software/open-source library
for transparent pricing and risk analysis - http://opensourcerisk.org
ORE is free software: you can redistribute it and/or modify it
under the terms of the Modified BSD License. You should have received a
copy of the license along with this program.
The license is also available online at <http://opensourcerisk.org>
This program is distributed on the basis that it will form a useful
contribution to risk analytics and model standardisation, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the license for more details.
*/
#include "toplevelfixture.hpp"
#include <boost/test/unit_test.hpp>
#include <qle/termstructures/flatcorrelation.hpp>
#include <qle/termstructures/interpolatedcorrelationcurve.hpp>
#include <ql/quotes/simplequote.hpp>
#include <ql/time/calendars/nullcalendar.hpp>
#include <boost/make_shared.hpp>
using namespace QuantExt;
using namespace QuantLib;
using namespace boost::unit_test_framework;
BOOST_FIXTURE_TEST_SUITE(QuantExtTestSuite, qle::test::TopLevelFixture)
BOOST_AUTO_TEST_SUITE(CorrelationTermstructureTest)
BOOST_AUTO_TEST_CASE(testFlatCorrelation) {
boost::shared_ptr<SimpleQuote> q = boost::make_shared<SimpleQuote>(0.02);
Handle<Quote> hq(q);
Handle<FlatCorrelation> flatCorr(boost::make_shared<FlatCorrelation>(0, NullCalendar(), hq, Actual365Fixed()));
// check we get the expected quote value
BOOST_CHECK_MESSAGE(flatCorr->correlation(1) == 0.02, "unexpected correlation value: " << flatCorr->correlation(1));
// move market data
q->setValue(0.03);
BOOST_CHECK_MESSAGE(flatCorr->correlation(1) == 0.03, "unexpected correlation value: " << flatCorr->correlation(1));
// check failures
q->setValue(-1.1);
BOOST_CHECK_THROW(flatCorr->correlation(1), QuantLib::Error);
q->setValue(1.1);
BOOST_CHECK_THROW(flatCorr->correlation(1), QuantLib::Error);
}
BOOST_AUTO_TEST_CASE(testInterpolatedCorrelationCurve) {
// build interpolated correlation curve
std::vector<Time> times;
std::vector<boost::shared_ptr<SimpleQuote> > simpleQuotes;
std::vector<Handle<Quote> > quotes;
Size numYears = 10;
for (Size i = 1; i < numYears; i++) {
Real corr = 0.1;
simpleQuotes.push_back(boost::make_shared<SimpleQuote>(corr));
quotes.push_back(Handle<Quote>(simpleQuotes.back()));
times.push_back(static_cast<Time>(i));
}
Handle<PiecewiseLinearCorrelationCurve> interpCorr(
boost::make_shared<PiecewiseLinearCorrelationCurve>(times, quotes, Actual365Fixed(), NullCalendar()));
Time t = 1;
while (t < numYears) {
BOOST_CHECK_MESSAGE(interpCorr->correlation(t) == 0.1,
"unexpected correlation value: " << interpCorr->correlation(t));
t += 0.5;
}
// Now check quotes update
for (Size i = 0; i < simpleQuotes.size(); ++i) {
simpleQuotes[i]->setValue(1);
}
t = 1;
while (t < numYears) {
BOOST_CHECK_MESSAGE(interpCorr->correlation(t) == 1,
"unexpected correlation value: " << interpCorr->correlation(t));
t += 0.5;
}
// Now check interpolation
for (Size i = 0; i < simpleQuotes.size(); ++i) {
simpleQuotes[i]->setValue(0.1 + 0.01 * i);
}
Real tol = 1.0E-8;
BOOST_CHECK_CLOSE(interpCorr->correlation(1.5), 0.105, tol);
BOOST_CHECK_CLOSE(interpCorr->correlation(2.5), 0.115, tol);
BOOST_CHECK_CLOSE(interpCorr->correlation(3.5), 0.125, tol);
BOOST_CHECK_CLOSE(interpCorr->correlation(11), 0.18, tol);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
|
State Before: α : Sort u_1
p : α → Prop
x✝¹ : ∃! a, p a
a : α
ha : (fun a => p a) a
he : ∀ (y : α), (fun a => p a) y → y = a
x✝ : Subtype p
b : α
hb : p b
⊢ { val := b, property := hb } = default State After: case e_val
α : Sort u_1
p : α → Prop
x✝¹ : ∃! a, p a
a : α
ha : (fun a => p a) a
he : ∀ (y : α), (fun a => p a) y → y = a
x✝ : Subtype p
b : α
hb : p b
⊢ b = a Tactic: congr State Before: case e_val
α : Sort u_1
p : α → Prop
x✝¹ : ∃! a, p a
a : α
ha : (fun a => p a) a
he : ∀ (y : α), (fun a => p a) y → y = a
x✝ : Subtype p
b : α
hb : p b
⊢ b = a State After: no goals Tactic: exact he b hb
|
The function $x \mapsto xy$ is bounded linear.
|
Formal statement is: lemma Lim_null_comparison: fixes f :: "'a \<Rightarrow> 'b::real_normed_vector" assumes "eventually (\<lambda>x. norm (f x) \<le> g x) net" "(g \<longlongrightarrow> 0) net" shows "(f \<longlongrightarrow> 0) net" Informal statement is: If $f$ is eventually bounded by $g$ and $g$ converges to $0$, then $f$ converges to $0$.
|
Require Import RndHoare.sigma_algebra.
Require Import RndHoare.measurable_function.
Require Import RndHoare.regular_conditional_prob.
Require Import RndHoare.random_oracle.
Require Import RndHoare.random_variable.
Require Import RndHoare.meta_state.
Require Import RndHoare.probabilistic_pred.
Require Import RndHoare.imperative.
Import Randomized.
Module Normal.
Class Imperative : Type := {
acmd: Type;
expr: Type
}.
Inductive cmd {imp: Imperative}: Type :=
| Sifthenelse: expr -> cmd -> cmd -> cmd
| Swhile: expr -> cmd -> cmd
| Satomic: acmd -> cmd
| Ssequence: cmd -> cmd -> cmd
| Sskip: cmd.
Class SmallStepSemantics {imp: Imperative} : Type := {
state: Type;
state_sig: SigmaAlgebra state;
cmd_state_sig := (left_discreste_prod_sigma_alg cmd state);
ora: RandomOracle;
SFo: SigmaAlgebraFamily RandomHistory;
HBSFo: HistoryBasedSigF ora;
eval_bool: state -> expr -> option bool;
atomic_step: forall c: acmd, state -> forall {Omega: RandomVarDomain}, ProgState Omega state -> Prop
}.
Existing Instances state_sig cmd_state_sig ora SFo HBSFo.
Inductive step {imp: Imperative} {sss: SmallStepSemantics}: cmd * state -> forall {Omega: RandomVarDomain}, ProgState Omega (cmd * state)%type -> Prop :=
| step_atomic: forall (ac: acmd) (s: state) (Omega: RandomVarDomain) (cs: ProgState Omega state),
atomic_step ac s cs -> step (Satomic ac, s) (ProgState_pair_left Sskip cs)
| step_if_true: forall e c1 c2 s, eval_bool s e = Some true -> step (Sifthenelse e c1 c2, s) (non_branch_tstate (c1, s))
| step_if_false: forall e c1 c2 s, eval_bool s e = Some false -> step (Sifthenelse e c1 c2, s) (non_branch_tstate (c2, s))
| step_while_true: forall e c s, eval_bool s e = Some true -> step (Swhile e c, s) (non_branch_tstate (Ssequence c (Swhile e c), s))
| step_while_false: forall e c s, eval_bool s e = Some false -> step (Swhile e c, s) (non_branch_tstate (Sskip, s))
| step_skip: forall c s, step (Ssequence Sskip c, s) (non_branch_tstate (c, s)).
End Normal.
Instance normal_imp {Nimp: Normal.Imperative}: Imperative := Build_Imperative Normal.cmd Normal.expr Normal.Sskip Normal.Ssequence.
Instance normal_sss {Nimp: Normal.Imperative} {Nsss: Normal.SmallStepSemantics}: SmallStepSemantics := Build_SmallStepSemantics normal_imp Normal.state Normal.state_sig Normal.ora Normal.SFo Normal.HBSFo Normal.step.
Section HoareSound.
Context {Nimp: Normal.Imperative} {Nsss: Normal.SmallStepSemantics}.
Lemma Consequence: forall P P' Q Q' c, derives P' P -> derives Q Q' -> triple P c Q -> triple P' c Q'.
Proof.
intros.
unfold triple in *.
intros.
specialize (H _ s1); simpl in H.
specialize (H0 _ s2); simpl in H0.
specialize (H1 o1 s1 (H H2) o2 s2 H3).
destruct H1; auto.
Qed.
Lemma Sequence: forall P Q R c1 c2, triple P c1 Q -> triple Q c2 R -> triple P (Ssequence c1 c2) R.
Proof.
intros ? ? ? ? ? TRIPLE1 TRIPLE2.
unfold triple in *; intros.
destruct H0 as [path [? ?]].
Abort.
End HoareSound.
|
The second phase of building work undertaken by the Hospitallers began in the early 13th century and lasted decades . The outer walls were built in the last major construction on the site , lending the Krak des Chevaliers its current appearance . Standing 9 metres ( 30 ft ) high , the outer circuit had towers that projected strongly from the wall . While the towers of the inner court had a square plan and did not project far beyond the wall , the towers of the 13th @-@ century outer walls were rounded . This design was new and even contemporary Templar castles did not have rounded towers . The technique was developed at Château Gaillard in France by Richard the Lionheart between 1196 and 1198 . The extension to the southeast is of lesser quality than the rest of the circuit and was built at an unknown date . Probably around the 1250s a postern was added to the north wall .
|
function [fathom] = ft2fathom(ft)
% Convert length from feet to fathoms.
% Chad A. Greene 2012
fathom = ft*0.1666666666667;
|
import os
import pandas as pd
import pprint
import pickle
import numpy as np
from sklearn import metrics
import argparse
import logging
import itertools
import copy
DATA_PREFIX = ['train', 'val', 'test']
AGG_METRIC = ['mean', 'std', 'min', 'max']
def load_data(base_dir, folds):
data = {'train': [], 'val': [], 'test': []}
for i in range(folds):
for el in DATA_PREFIX:
ys = pickle.load(
open(os.path.join(base_dir, el+'_'+str(i)+".pkl"), 'rb'))[:, -1]
data[el].append(ys)
return data
def check_data(base_dir, folds):
for i in range(folds):
for el in DATA_PREFIX:
if not os.path.isfile(os.path.join(base_dir, el+'_'+str(i)+".pkl")):
logging.error("File not present at {}".format(
os.path.join(base_dir, el+'_'+str(i)+".pkl")))
return False
return True
"""
def make_path(arch, kl, neg_r, rho):
# exp_c-configs.fb15k_config_7_4.yml_k-0_n--0.5_r-0.01
#return "exp_c-{}_k-{}_n-{}_r-{}".format(arch, kl, neg_r, rho)
return "exp_c-{}_k-{}_n-{}_r-{}".format(arch, kl, neg_r, rho)
def get_params(directory):
exps = os.listdir(directory)
neg_rewards = set()
rhos = set()
kls = set()
archs = set()
ex = set()
for exp in exps:
if os.path.isdir(os.path.join(directory, exp)):
delim1 = exp.find("_c-")
delim2 = exp.find("_k-")
delim3 = exp.find("_n-")
delim4 = exp.find("_r-")
neg_r = exp[delim3+3:delim4]
rho = exp[delim4+3:]
kl = exp[delim2+3:delim3]
arch = exp[delim1+3:delim2]
neg_rewards.add(neg_r)
rhos.add(rho)
kls.add(kl)
archs.add(arch)
return (list(sorted(neg_rewards, key=float)), list(sorted(rhos, key=float)), list(sorted(kls, key=float)), list(sorted(archs, key=float)))
"""
def get_string(x):
return str(x).replace('/','.').replace(' ','.')
def get_params():
return get_params2()
def get_params2():
#COPIED FROM CREATE_MULTINODE_JOBS.PY
neg_reward = [-1, -2]
rho = [0.1, 0.125]
config = ['configs/fb15k_config.yml']
kldiv_lambda = [0, 1]
exclude_t_ids = ['2 5']
hidden_unit_list = ['90 40','7 5 5 3']
default_value = [0, -0.05, -0.1]
#
names = ['neg_reward','rho','kldiv_lambda','config','exclude_t_ids','hidden_unit_list','default_value']
all_params = [neg_reward,rho, kldiv_lambda, config,exclude_t_ids,hidden_unit_list,default_value]
short_names = ['n','r','k','c','ex','hul','df']
assert len(names) == len(all_params)
assert len(all_params) == len(short_names)
timing_key = 'hidden_unit_list'
timing = [10]*len(hidden_unit_list)
#assert(len(globals()[timing_key]) == len(timing))
assert len(all_params[names.index(timing_key)]) == len(timing),'len of timing should be same as len of timing_key param'
timing_dict = dict(zip(all_params[names.index(timing_key)],timing))
all_jobs = list(itertools.product(*all_params))
additional_names = ['train_ml','eval_ml']
additional_job_list = [
[0,0],
[1,1]
]
names = names + additional_names
additional_short_names = ['tml','eml']
short_names = short_names + additional_short_names
assert len(names) == len(short_names)
name2short = dict(zip(names,short_names))
all_jobs = list(itertools.product(all_jobs,additional_job_list))
sorted_names = copy.deepcopy(names)
sorted_names.sort()
#### WRITTEN AGAIN with MODIFICATION
for i,key in enumerate(additional_names):
all_params.append([x[i] for x in additional_job_list])
#
name2list = dict(zip(names,all_params))
for key in sorted_names:
name2list[key] = [ get_string(x) for x in name2list[key]]
all_settings = {}
for i, setting in enumerate(all_jobs):
setting = list(itertools.chain(*setting))
name_setting = {n: get_string(s) for n, s in zip(names, setting)}
log_str = '_'.join(['%s-%s' % (name2short[n], name_setting[n]) for n in sorted_names])
all_settings[log_str] = name_setting
#return all_settings, name2list, high level params, rows_cols
return all_settings, name2list,['config','exclude_t_ids','kldiv_lambda','train_ml','eval_ml','default_value','hidden_unit_list'],['rho','neg_reward']
def get_params1():
#COPY FROM CREATE_MULTINODE_JOBS.PY
neg_reward = [-1, -2]
rho = [0.125]
config = ['configs/fb15k_config_90_40.yml']
kldiv_lambda = [0, 1]
exclude_t_ids = ['2 5',None]
#exclude_t_ids = [None]
names = ['neg_reward','rho','kldiv_lambda','config','exclude_t_ids']
all_params = [neg_reward,rho, kldiv_lambda, config,exclude_t_ids]
short_names = ['n','r','k','c','ex']
name2list = dict(zip(names,all_params))
name2short = dict(zip(names,short_names))
sorted_names = copy.deepcopy(names)
sorted_names.sort()
for key in sorted_names:
name2list[key] = [ get_string(x) for x in name2list[key]]
sorted_names = copy.deepcopy(names)
all_settings = {}
sorted_names.sort()
all_jobs = list(itertools.product(*all_params))
for i, setting in enumerate(all_jobs):
setting = list(setting)
name_setting = {n: get_string(s) for n, s in zip(names, setting)}
log_str = '_'.join(['%s-%s' % (name2short[n], name_setting[n]) for n in sorted_names])
all_settings[log_str] = name_setting
#return all_settings, name2list, high level params, rows_cols
return all_settings, name2list,['config','kldiv_lambda','rho'],['exclude_t_ids','neg_reward']
def check_exp(directory, data, folds):
return check_exp2(directory, data, folds)
def check_exp2(directory, data, folds):
for el in DATA_PREFIX[1:]:
preds_fname = os.path.join(
directory, '{}_ml_{}'.format(el,args.eval_ml))
if os.path.isfile(preds_fname):
return True
else:
logging.error("File {} does not exist".format(preds_fname))
return False
return True
def check_exp1(directory, data, folds):
for i in range(folds):
for el in DATA_PREFIX[1:]:
if el == 'val':
preds_fname = os.path.join(
directory, 'exp_'+str(i), 'val_pred_ml_{}.txt'.format(args.eval_ml))
else:
preds_fname = os.path.join(
directory, 'exp_'+str(i), 'test_pred_ml_{}.txt'.format(args.eval_ml))
if os.path.isfile(preds_fname):
pred_data = np.loadtxt(preds_fname).tolist()
if len(data[el][i]) != len(pred_data):
logging.error(
"Length of {} does not match with ground truth data".format(preds_fname))
return False
else:
logging.error("File {} does not exist".format(preds_fname))
return False
return True
def begin_checks(base_dir, folds, runs):
data_present = check_data(base_dir, folds)
if not data_present:
logging.error("Data Check failed")
return (False, [])
data = load_data(base_dir, folds)
logging.info("Data Check Passed")
#cols, rows, kls, archs = get_params(os.path.join(base_dir, "run_1"))
all_settings, name2list,exp_classes, rows_cols = get_params()
invalid_exps = []
for run in range(1, runs+1):
for this_setting in all_settings:
directory = os.path.join(base_dir, "run_"+str(run), 'exp_'+this_setting)
if not check_exp(directory, data, folds):
invalid_exps.append(directory)
#
if (len(invalid_exps) == 0):
logging.info("All experiments are ok")
return (True, [])
else:
logging.error("Following experiments have error\n{}".format(
str(pprint.pformat(invalid_exps))))
logging.error("Length is {}".format(len(invalid_exps)))
return (False, invalid_exps)
def write_invalid(invalid_exps, fname):
if fname is not None:
with open(fname, 'w') as f:
for exp in invalid_exps:
f.write('/'.join(exp.split('/')[-2:])+'\n')
logging.error("Written invalid experiments to {}".format(fname))
def calc_exp(directory, data, folds):
return calc_exp_read(directory, data,folds)
def calc_exp_read(directory, data, folds):
#directory/test_ml_0
#directory/test_ml_1
#directory/val_ml_0
#directory/val_ml_1
rv = []
count = []
for el in DATA_PREFIX[1:]:
fname = os.path.join(directory, '{}_ml_{}'.format(el, args.eval_ml))
f = pd.read_csv(fname,header=None).to_numpy()
#assert f.shape[0] == 5
if f.shape[0] != folds:
print("Num folds: {}. #folds present: {}. ASSUMING 0 performance for them".format(folds, f.shape[0]))
rv.append(f[:,-1].sum()/folds)
count.append(f.shape[0])
#
return (rv[0], rv[1], count[0], count[1])
def calc_exp_compute(directory, data, folds):
predictions = {'val': [], 'test': []}
true = {'val': [], 'test': []}
logging.info("Calculating results for experiment {}".format(directory))
for i in range(folds):
for el in DATA_PREFIX[1:]:
if el == 'val':
preds_fname = os.path.join(
directory, 'exp_'+str(i), 'valid_preds.txt')
else:
preds_fname = os.path.join(
directory, 'exp_'+str(i), 'test_preds.txt')
pred_data = np.loadtxt(preds_fname).tolist()
predictions[el].extend(pred_data)
true[el].extend(data[el][i])
mif_val = metrics.f1_score(true['val'], predictions['val'], labels = args.t_ids, average='micro')
mif_test = metrics.f1_score(true['test'], predictions['test'], labels= args.t_ids, average='micro')
return (mif_val, mif_test)
"""
def calc_run_results(directory, rows, cols, KLs, Archs, data, folds):
results_one_run = {'neg_re': [], 'rho': [], 'val': [], 'test': [], 'kl':[], 'arch':[]}
for col in cols:
for row in rows:
for kl in KLs:
for arch in Archs:
exp_directory = os.path.join(directory, make_path(arch, kl, col, row))
val, test = calc_exp(exp_directory, data, folds)
results_one_run['neg_re'].append(col)
results_one_run['rho'].append(row)
results_one_run['val'].append(val)
results_one_run['test'].append(test)
results_one_run['kl'].append(kl)
results_one_run['arch'].append(arch)
df = pd.DataFrame(data=results_one_run)
return df
"""
def calc_run_results(directory, all_settings, name2list, data, folds):
results_one_run = None
param_keys = None
for key in all_settings:
this_setting = all_settings[key]
exp_directory = os.path.join(directory,'exp_'+key)
val, test,val_count, test_count = calc_exp(exp_directory, data, folds)
if results_one_run is None:
results_one_run = {}
param_keys = list(this_setting.keys())
for this_param in this_setting:
results_one_run[this_param] = [this_setting[this_param]]
#
results_one_run['val'] = [val]
results_one_run['test'] = [test]
results_one_run['val_count'] = [val_count]
results_one_run['test_count'] = [test_count]
else:
for this_param in this_setting:
results_one_run[this_param].append(this_setting[this_param])
#
results_one_run['val'].append(val)
results_one_run['test'].append(test)
results_one_run['val_count'].append(val_count)
results_one_run['test_count'].append(test_count)
df = pd.DataFrame(data=results_one_run)
return df, param_keys
def write_to_file(table, header, f):
f.write(header+'\n')
table.to_csv(f, float_format='%.4f')
f.write('\n')
if __name__ == "__main__":
logging.basicConfig(format='%(levelname)s :: %(asctime)s - %(message)s',
level=logging.INFO, datefmt='%d/%m/%Y %I:%M:%S %p')
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--dir', help="Path of the base directory", required=True)
parser.add_argument('--folds', type=int, default=5, required=True)
parser.add_argument('--eval_ml', type=int, default=0, required=True, help='collate results of multi label evaluation?' )
parser.add_argument('--runs', type=int, default=5, required=True)
parser.add_argument(
'--ifile', help='File to write the experiment names which failed', type=str, default=None)
parser.add_argument('--t_ids', nargs='+', type=int, required=False,default=[1,2,3,4,5,6], help='List of templates')
args = parser.parse_args()
ok, invalid_exps = begin_checks(args.dir, args.folds, args.runs)
if not ok and len(invalid_exps) > 0:
write_invalid(invalid_exps, args.ifile)
exit(0)
logging.info("All Checks passed")
#cols, rows, KLs, Archs = get_params(os.path.join(args.dir, "run_1"))
#all_settings, name2list = get_params()
all_settings, name2list,exp_classes, rows_cols = get_params()
data = load_data(args.dir, args.folds)
all_run_results = []
for run in range(1, args.runs+1):
logging.info("Calculating results for run {}".format(run))
df,param_keys = calc_run_results(os.path.join(args.dir, "run_"+str(run)), all_settings, name2list, data, args.folds)
df['run_id'] = run
#df = calc_run_results(os.path.join(
# args.dir, "run_"+str(run)), rows, cols, KLs, Archs data, args.folds)
all_run_results.append(df)
final_result = pd.concat(all_run_results)
final_result = final_result.reset_index(drop=True)
agg_table = final_result.groupby(param_keys).agg({DATA_PREFIX[1]:AGG_METRIC, DATA_PREFIX[2]: AGG_METRIC, DATA_PREFIX[1]+'_count': 'sum', DATA_PREFIX[2]+'_count': 'sum'})
#agg_table.columns = [d+"_"+a for d,
# a in itertools.product(DATA_PREFIX[1:], AGG_METRIC)]
agg_table.columns = ['_'.join(x) for x in agg_table.columns.to_flat_index()]
agg_table = agg_table.reset_index()
agg_table.to_csv(os.path.join(args.dir, 'all_performance.csv'))
fh = open(os.path.join(args.dir, 'summary.csv'), 'w')
for this_param in itertools.product(*([name2list[x] for x in exp_classes] + [DATA_PREFIX[1:], AGG_METRIC])):
d = this_param[-2]
a = this_param[-1]
selection = None
for ind in range(len(exp_classes)):
this_selection= agg_table[exp_classes[ind]] == this_param[ind]
if selection is None:
selection = this_selection
else:
selection = selection & this_selection
#
tbl = agg_table[selection].pivot_table(columns = rows_cols[0], index = rows_cols[1], values = d+'_'+a, fill_value = -1)
print(tbl)
header = ' '.join(list(map(lambda x: x[0] + '_' + str(x[1]), zip(exp_classes + ['',''], this_param))))
write_to_file(tbl, header, fh)
"""
for d, a, kl, arch in itertools.product(DATA_PREFIX[1:], AGG_METRIC, KLs, Archs):
tbl = agg_table[agg_table["kl"]==kl & agg_table["arch"]==arch].pivot_table(
columns='neg_re', index='rho', values=d+'_'+a, fill_value=-1)
print(tbl)
write_to_file(tbl, (d+'_'+a+'_'+kl+'_'+arch).upper(), fh)
"""
logging.info("Written results to {}".format(
os.path.join(args.dir, 'summary.csv')))
|
{-# OPTIONS --without-K --safe #-}
module Math.Combinatorics.Function.Properties.Lemma where
open import Data.Unit using (tt)
open import Data.Product
open import Data.Sum
open import Data.Nat
open import Data.Nat.Properties
open import Data.Nat.DivMod
open import Data.Nat.Solver using (module +-*-Solver)
open import Relation.Binary
open import Relation.Binary.PropositionalEquality
open import Relation.Nullary
open import Relation.Nullary.Decidable
open import Function
open import Algebra.FunctionProperties
open ≤-Reasoning
1≤n⇒n≢0 : ∀ {n} → 1 ≤ n → n ≢ 0
1≤n⇒n≢0 .{suc _} (s≤s z≤n) ()
-- TODO: use m<n⇒0<n∸m
m<n⇒n∸m≢0 : ∀ {m n} → m < n → n ∸ m ≢ 0
m<n⇒n∸m≢0 {m} {n} m<n n∸m≡0 = (λ x → x (sym n∸m≡0)) $ <⇒≢ $ +-cancelʳ-< 0 (n ∸ m) $ begin-strict
m <⟨ m<n ⟩
n ≡⟨ sym $ m∸n+n≡m (<⇒≤ m<n) ⟩
(n ∸ m) + m ∎
≤⇒≡∨< : ∀ {m n} → m ≤ n → (m ≡ n) ⊎ (m < n)
≤⇒≡∨< {m} {n} m≤n with m ≟ n
... | yes m≡n = inj₁ m≡n
... | no m≢n = inj₂ (≤∧≢⇒< m≤n m≢n)
^-monoˡ-≤ : ∀ n → (_^ n) Preserves _≤_ ⟶ _≤_
^-monoˡ-≤ 0 {m} {o} m≤o = ≤-refl
^-monoˡ-≤ (suc n) {m} {o} m≤o = begin
m ^ suc n ≡⟨⟩
m * m ^ n ≤⟨ *-mono-≤ m≤o (^-monoˡ-≤ n m≤o) ⟩
o * o ^ n ≡⟨⟩
o ^ suc n ∎
1≤[1+m]^n : ∀ m n → 1 ≤ (1 + m) ^ n
1≤[1+m]^n m zero = ≤-refl
1≤[1+m]^n m (suc n) = begin
1 * 1 ≤⟨ *-mono-≤ (s≤s {0} {m} z≤n) (1≤[1+m]^n m n) ⟩
suc m * suc m ^ n ∎
^-monoʳ-≤ : ∀ n → (suc n ^_) Preserves _≤_ ⟶ _≤_
^-monoʳ-≤ n {.0} {o} z≤n = begin
1 ≤⟨ 1≤[1+m]^n n o ⟩
suc n ^ o ∎
^-monoʳ-≤ n {.(suc _)} {.(suc _)} (s≤s {m} {o} m≤o) = begin
suc n ^ suc m ≡⟨⟩
suc n * suc n ^ m ≤⟨ *-monoʳ-≤ (suc n) (^-monoʳ-≤ n m≤o) ⟩
suc n * suc n ^ o ≡⟨⟩
suc n ^ suc o ∎
^-mono-≤ : ∀ {m n o p} → m ≤ n → o ≤ p → suc m ^ o ≤ suc n ^ p
^-mono-≤ {m} {n} {o} {p} m≤n o≤p = begin
suc m ^ o ≤⟨ ^-monoˡ-≤ o (s≤s m≤n) ⟩
suc n ^ o ≤⟨ ^-monoʳ-≤ n o≤p ⟩
suc n ^ p ∎
*-cancelʳ-≤′ : ∀ m n {o} → False (o ≟ 0) → m * o ≤ n * o → m ≤ n
*-cancelʳ-≤′ m n {suc o} tt = *-cancelʳ-≤ m n o
-- TODO upadte stdlib
*-cancelʳ-≡′ : ∀ m n {o} → False (o ≟ 0) → m * o ≡ n * o → m ≡ n
*-cancelʳ-≡′ m n {suc o} tt = *-cancelʳ-≡ m n
*-monoʳ-<′ : ∀ n → False (n ≟ 0) → (n *_) Preserves _<_ ⟶ _<_
*-monoʳ-<′ (suc n) tt gt = *-monoʳ-< n gt
*-monoˡ-<′ : ∀ n → False (n ≟ 0) → (_* n) Preserves _<_ ⟶ _<_
*-monoˡ-<′ (suc n) tt gt = *-monoˡ-< n gt
m≡n+o⇒m∸o≡n : ∀ m n o → m ≡ n + o → m ∸ o ≡ n
m≡n+o⇒m∸o≡n m n o m≡n+o = trans (cong (_∸ o) m≡n+o) (m+n∸n≡m n o)
lemma₃ : ∀ m n → (∃ λ o → (n ≡ o + m)) ⊎ (n < m)
lemma₃ m n with compare m n
lemma₃ m .(suc (m + k)) | less .m k = inj₁ (suc k , cong suc (+-comm m k))
lemma₃ m .m | equal .m = inj₁ (0 , +-identityˡ m)
lemma₃ .(suc (n + k)) n | greater .n k = inj₂ (s≤s (≤-stepsʳ k ≤-refl))
m≡n*o⇒n≡m/o : ∀ m n o → (wit : False (o ≟ 0)) → m ≡ n * o → n ≡ _/_ m o {wit}
m≡n*o⇒n≡m/o m n o@(suc o-1) tt m≡n*o = sym $ begin-equality
m / o ≡⟨ cong (_/ o) $ m≡n*o ⟩
(n * o) / o ≡⟨ m*n/n≡m n o ⟩
n ∎
m*n≡o⇒m≡o/n : ∀ m n o → (wit : False (n ≟ 0)) → m * n ≡ o → m ≡ _/_ o n {wit}
m*n≡o⇒m≡o/n m n o wit m*n≡o = m≡n*o⇒n≡m/o o m n wit (sym m*n≡o)
*-pres-≢0 : ∀ {a b} → a ≢ 0 → b ≢ 0 → a * b ≢ 0
*-pres-≢0 {0} {b} a≢0 b≢0 a*b≡0 = a≢0 refl
*-pres-≢0 {suc a} {0} a≢0 b≢0 a*b≡0 = b≢0 refl
-- TODO numbering
lemma₅ : ∀ m n o p → (m + n) * (o * p) ≡ (o * (m * p)) + n * (o * p)
lemma₅ = solve 4 (λ m n o p →
(m :+ n) :* (o :* p) := (o :* (m :* p)) :+ (n :* (o :* p))) refl
where open +-*-Solver
lemma₇ : ∀ m n o → m * n * o ≡ m * o * n
lemma₇ = solve 3 (λ m n o → m :* n :* o := m :* o :* n) refl
where open +-*-Solver
lemma₈ : ∀ m n o → m * (n * o) ≡ n * m * o
lemma₈ = solve 3 (λ m n o → m :* (n :* o) := n :* m :* o) refl
where open +-*-Solver
lemma₉ : ∀ m n o p → m * n * (o * p) ≡ (m * o) * (n * p)
lemma₉ = solve 4 (λ m n o p → m :* n :* (o :* p) := (m :* o) :* (n :* p)) refl
where open +-*-Solver
lemma₁₀ : ∀ m n o p → m * n * o * p ≡ (m * o) * (n * p)
lemma₁₀ = solve 4 (λ m n o p → m :* n :* o :* p := (m :* o) :* (n :* p)) refl
where open +-*-Solver
lemma₁₁ : ∀ m n o p → (m * n) * (o * p) ≡ m * o * p * n
lemma₁₁ = solve 4 (λ m n o p → (m :* n) :* (o :* p) := m :* o :* p :* n) refl
where open +-*-Solver
lemma₁₂ : ∀ m n o → m * n * o ≡ n * (m * o)
lemma₁₂ = solve 3 (λ m n o → m :* n :* o := n :* (m :* o)) refl
where open +-*-Solver
lemma₁₃ : ∀ m n o → m * n * n * o ≡ o * m * n * n
lemma₁₃ = solve 3 (λ m n o → m :* n :* n :* o := o :* m :* n :* n) refl
where open +-*-Solver
lemma₁₄ : ∀ m n o → m * n * (n * o * o) ≡ m * (n * o) * (n * o)
lemma₁₄ = solve 3 (λ m n o → m :* n :* (n :* o :* o) := m :* (n :* o) :* (n :* o)) refl
where open +-*-Solver
lemma₁₅ : ∀ n → (2 + 2 * n) * (1 + 2 * n) ≡ 2 * (1 + 2 * n) * (1 + n)
lemma₁₅ = solve 1 (λ n →
(con 2 :+ con 2 :* n) :* (con 1 :+ con 2 :* n) :=
con 2 :* (con 1 :+ con 2 :* n) :* (con 1 :+ n)
) refl
where open +-*-Solver
lemma₁₆ : ∀ m n o p → m * n * (o * p * p) ≡ m * o * (n * p * p)
lemma₁₆ = solve 4 (λ m n o p →
m :* n :* (o :* p :* p) := m :* o :* (n :* p :* p)
) refl
where open +-*-Solver
lemma₁₇ : ∀ m n o p → m * n * (o * p) ≡ o * m * (n * p)
lemma₁₇ = solve 4 (λ m n o p → m :* n :* (o :* p) := o :* m :* (n :* p)) refl
where open +-*-Solver
|
C$Procedure DSKI04 ( DSK, fetch integer type 4 data )
SUBROUTINE DSKI04 ( HANDLE, DLADSC, ITEM, START, ROOM, N, VALUES )
C$ Abstract
C
C Fetch integer data from a type 4 DSK segment.
C
C$ Disclaimer
C
C THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE
C CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S.
C GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE
C ADMINISTRATION (NASA). THE SOFTWARE IS TECHNOLOGY AND SOFTWARE
C PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS"
C TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY
C WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A
C PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC
C SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE
C SOFTWARE AND RELATED MATERIALS, HOWEVER USED.
C
C IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA
C BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT
C LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND,
C INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS,
C REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE
C REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY.
C
C RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF
C THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY
C CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE
C ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE.
C
C$ Required_Reading
C
C DAS
C DSK
C
C$ Keywords
C
C DAS
C DSK
C FILES
C
C$ Declarations
IMPLICIT NONE
INCLUDE 'dla.inc'
INCLUDE 'dskdsc.inc'
INCLUDE 'dsk04.inc'
INTEGER HANDLE
INTEGER DLADSC ( * )
INTEGER ITEM
INTEGER START
INTEGER ROOM
INTEGER N
INTEGER VALUES ( * )
C$ Brief_I/O
C
C Variable I/O Description
C -------- --- --------------------------------------------------
C HANDLE I DSK file handle.
C DLADSC I DLA descriptor.
C ITEM I Keyword identifying item to fetch.
C START I Start index.
C ROOM I Amount of room in output array.
C N O Number of values returned.
C VALUES O Array containing requested item.
C
C$ Detailed_Input
C
C HANDLE is the handle of a DSK file containing a type 4
C segment from which data are to be fetched.
C
C DLADSC is the DLA descriptor associated with the segment
C from which data are to be fetched.
C
C ITEM is an integer "keyword" parameter designating the
C item to fetch. In the descriptions below, note
C that "model" refers to the model represented by
C the designated segment. This model may be a
C subset of a larger model.
C
C See the INCLUDE file dsk04.inc for values
C associated with the keyword parameters.
C
C
C START is the start index within the specified data item
C from which data are to be fetched. The index of
C the first element of each data item is 1. START
C has units of integers; for example, the start
C index of the second plate is 4, since each plate
C occupies three integers.
C
C ROOM is the amount of room in the output array. It is
C permissible to provide an output array that has
C too little room to fetch an item in one call. ROOM
C has units of integers: for example, the room
C required to fetch one plate is 3.
C
C$ Detailed_Output
C
C N is the number of elements fetched to the output
C array VALUES. N is normally in the range
C 1:ROOM; if an error occurs on the call, N is
C undefined.
C
C VALUES is a contiguous set of elements of the item
C designated by ITEM. The correspondence of
C VALUES at the elements of the data item is:
C
C VALUES(1) ITEM(START)
C ... ...
C VALUES(N) ITEM(START+N-1)
C
C If an error occurs on the call, VALUES is
C undefined.
C
C$ Parameters
C
C See the INCLUDE files
C
C dla.inc
C dsk04.inc
C dskdsc.inc
C
C$ Exceptions
C
C 1) If the input handle is invalid, the error will be diagnosed by
C routines in the call tree of this routine.
C
C 2) If a file read error occurs, the error will be diagnosed by
C routines in the call tree of this routine.
C
C 3) If the input DLA descriptor is invalid, the effect of this
C routine is undefined. The error *may* be diagnosed by routines
C in the call tree of this routine, but there are no
C guarantees.
C
C 4) If ROOM is non-positive, the error SPICE(VALUEOUTOFRANGE)
C is signaled.
C
C 5) If the input keyword parameter is not recognized, the error
C SPICE(NOTSUPPORTED) is signaled.
C
C 6) If START is less than 1 or greater than the size of the
C item to be fetched, the error SPICE(INDEXOUTOFRANGE) is
C signaled.
C
C$ Files
C
C See input argument HANDLE.
C
C$ Particulars
C
C DSK files are built using the DLA low-level format and
C the DAS architecture; DLA files are a specialized type of DAS
C file in which data are organized as a doubly linked list of
C segments. Each segment's data belong to contiguous components of
C character, double precision, and integer type.
C
C Note that the DSK descriptor for the segment is not needed by
C this routine; the DLA descriptor contains the base address and
C size information for the integer, double precision, and character
C components of the segment, and these suffice for the purpose of
C fetching data.
C
C$ Examples
C
C None.
C
C$ Restrictions
C
C 1) This is a prototype routine. The interface is not expected
C to change, but there are no guarantees.
C
C 2) This routine uses discovery check-in to boost execution
C speed. However, this routine is in violation of NAIF
C standards for use of discovery check-in: routines called
C from this routine may signal errors. If errors are signaled
C in called routines, this routine's name will be missing from
C the traceback message.
C
C 3) This routine does not initialize the nested grid addressing
C routines.
C
C
C$ Literature_References
C
C None.
C
C$ Author_and_Institution
C
C N.J. Bachman (JPL)
C
C$ Version
C
C- DSKBRIEF Version 1.1.0, 06-OCT-2016 (NJB)
C
C Removed call to ZZDSK4GI. This routine no longer
C intializes the nested grid addressing routines.
C
C Removed unused variables.
C
C- DSKBRIEF Version 1.0.0, 04-OCT-2012 (NJB)
C
C-&
C$ Index_Entries
C
C fetch integer data from a type_4_dsk segment
C
C-&
C
C SPICELIB functions
C
LOGICAL DLASSG
LOGICAL FAILED
LOGICAL RETURN
C
C Local parameters
C
C
C DBFSIZ is the size of a d.p. buffer used to
C read parameters from the segment.
C
INTEGER DBFSIZ
PARAMETER ( DBFSIZ = 19 + DSKDSZ )
C
C Local variables
C
DOUBLE PRECISION DBUFF ( DBFSIZ )
INTEGER B
INTEGER DBASE
INTEGER E
INTEGER IBASE
INTEGER NC
INTEGER NDAT
INTEGER NDIMS
INTEGER NR
INTEGER PIXPTR
INTEGER PRVDSC ( DLADSZ )
INTEGER PRVHAN
INTEGER SIZE
LOGICAL PASS1
C
C Saved variables
C
SAVE DBUFF
SAVE NC
SAVE NR
SAVE PASS1
SAVE PIXPTR
SAVE PRVDSC
SAVE PRVHAN
C
C Initial values
C
DATA PASS1 / .TRUE. /
DATA PRVHAN / 0 /
DATA PRVDSC / DLADSZ * 0 /
IF ( RETURN() ) THEN
RETURN
END IF
C
C Use discovery check-in. This is done for efficiency; note
C however that this routine does not meet SPICE standards for
C discovery check-in eligibility.
C
IF ( ROOM .LE. 0 ) THEN
CALL CHKIN ( 'DSKI04' )
CALL SETMSG ( 'ROOM was #; must be positive.' )
CALL ERRINT ( '#', ROOM )
CALL SIGERR ( 'SPICE(VALUEOUTOFRANGE)' )
CALL CHKOUT ( 'DSKI04' )
RETURN
END IF
IBASE = DLADSC ( IBSIDX )
DBASE = DLADSC ( DBSIDX )
C
C Either a new file or new segment in the same file
C will require looking up the segment parameters.
C To determine whether the segment is new, we don't
C need to compare the entire DLA descriptor: just
C comparing the three base addresses of the descriptor
C against the saved base addresses is sufficient.
C
IF ( PASS1 .OR. .NOT.
. DLASSG( HANDLE, PRVHAN, DLADSC, PRVDSC ) ) THEN
C
C Treat the input file and segment as new.
C
C Read the d.p. parameters first. These are located at the
C beginning of the d.p. component of the segment.
C
CALL DASRDD ( HANDLE, DBASE+1, DBASE+DBFSIZ, DBUFF )
C
C Update the pixel pointer.
C
PIXPTR = IBASE + NINT( DBUFF(IXPIXP) )
C
C Update the grid dimensions.
C
NC = NINT( DBUFF(IXNC) )
NR = NINT( DBUFF(IXNR) )
C
C This call may be reinstated for N0067. It's currently
C unnecessary.
C
C CALL ZZDSK4GI ( HANDLE, DLADSC )
C
IF ( .NOT. FAILED() ) THEN
PASS1 = .FALSE.
C
C Update the saved handle value.
C
PRVHAN = HANDLE
C
C Update the saved DLA descriptor.
C
CALL MOVEI ( DLADSC, DLADSZ, PRVDSC )
END IF
END IF
C
C Branch based on the item to be returned.
C
C Note that we haven't checked the validity of START; we'll do this
C after the IF block.
C
IF ( ITEM .EQ. KWRAW ) THEN
C
C Return the specified raw data.
C
C The raw grid has NR rows and NC/2 columns.
C There are two 16-bit pixels per stored integer.
C The data are stored in row-major order.
C
C The data are returned in packed form: two adjacent
C 16-bit values are returned in each integer.
C
NDAT = ( NC / 2 ) * NR
C
C START must be in the range 1:NDAT.
C
IF ( ( START .LT. 1 ) .OR. ( START .GT. NDAT ) ) THEN
CALL CHKIN ( 'DSKI04' )
CALL SETMSG ( 'START must be in the range 1:# but was #.' )
CALL ERRINT ( '#', NDAT )
CALL ERRINT ( '#', START )
CALL SIGERR ( 'SPICE(VALUEOUTOFRANGE)' )
CALL CHKOUT ( 'DSKI04' )
RETURN
END IF
C
C Let B be the base address of the set of stored
C integers we'll read.
C
B = PIXPTR + START - 2
C
C Read data into the output array.
C
N = MIN ( ROOM, NDAT - START + 1 )
CALL DASRDI ( HANDLE, B+1, B+N, VALUES )
C Exit here, since we're not going to use the generic
C data transfer code at the end of this routine.
C
C There's no CHKOUT call here since we're using
C discovery check-in.
C
RETURN
ELSE IF ( ITEM .EQ. KWNDIM ) THEN
C
C The item is the number of nested grid dimensions.
C
SIZE = 1
B = IBASE + IXNDIM
E = B
ELSE IF ( ITEM .EQ. KWGDIM ) THEN
C
C The item is the array of grid dimensions.
C
B = IBASE + IXNDIM
CALL DASRDI ( HANDLE, B, B, NDIMS )
B = IBASE + IXGDIM
SIZE = 2 * NDIMS
ELSE
CALL CHKIN ( 'DSKI04' )
CALL SETMSG ( 'Keyword parameter # was not recognized.' )
CALL ERRINT ( '#', ITEM )
CALL SIGERR ( 'SPICE(NOTSUPPORTED)' )
CALL CHKOUT ( 'DSKI04' )
RETURN
END IF
C
C The valid range for START is 1:SIZE.
C
IF ( ( START .LT. 1 ) .OR. ( START .GT. SIZE ) ) THEN
CALL CHKIN ( 'DSKI04' )
CALL SETMSG ( 'START must be in the range defined ' //
. 'by the size of the data associated ' //
. 'with the keyword parameter #, ' //
. 'namely 1:#. Actual value of START ' //
. 'was #.' )
CALL ERRINT ( '#', ITEM )
CALL ERRINT ( '#', SIZE )
CALL ERRINT ( '#', START )
CALL SIGERR ( 'SPICE(INDEXOUTOFRANGE)' )
CALL CHKOUT ( 'DSKI04' )
RETURN
END IF
C
C Read the requested data. We already have the start address B.
C
N = MIN ( ROOM, SIZE - START + 1 )
E = B + N - 1
CALL DASRDI ( HANDLE, B, E, VALUES )
RETURN
END
|
Require Import ConstructiveEpsilon.
Module Enum_WO.
Definition Dec {X} (p : X -> Prop) := forall x : X, {p x} + {~p x}.
Definition Enum X := { g & forall x : X, exists n : nat, g n = Some x}.
Definition WO X := forall p : X -> Prop, Dec p -> ex p -> sigT p.
Definition WO_nat := constructive_indefinite_ground_description_nat.
(* Every enumerable type has a witness operator. *)
Lemma lemma X : Enum X -> WO X.
Proof.
intros [g Hg] p Dec_p H.
enough (exists n, match g n with Some x => p x | _ => False end) as [n Gn]%WO_nat.
- destruct (g n); now eauto.
- intros n. destruct (g n); auto.
- destruct H as [x ], (Hg x) as [n Hn].
exists n. now rewrite Hn.
Qed.
End Enum_WO.
|
import System
import System.Concurrency.Pipe
{-- TODO: REWRITE EVERYTHING!
senderError : (senderID : Int) -> (errMsg : String) -> IO ()
senderError senderID errMsg =
putStrLn $ "S" ++ (show senderID) ++ ": ERROR: " ++ errMsg
receiverError : (receiverID : Int) -> (errMsg : String) -> IO ()
receiverError receiverID errMsg =
putStrLn $ "R" ++ (show receiverID) ++ ": ERROR: " ++ errMsg
testReceiveEmpty : IO ()
testReceiveEmpty =
do putStrLn "--- Begin testReceiveEmpty ---"
cRef <- makeChannel
chan <- readIORef cRef
rawMsg <- receive chan
case rawMsg of
Nothing => putStrLn "Success."
Just m =>
putStrLn "ERROR: Got a message from the void"
putStrLn "--- End testReceiveEmpty ---\n"
testSendReceive_SENDER : (chan : Channel) -> IO ()
testSendReceive_SENDER chan =
do sendAndSignal chan (prepare "Hello")
putStrLn "S1: Sent 'Hello' and signalled"
testSendReceive_RECEIVER : (chan : Channel) -> IO ()
testSendReceive_RECEIVER chan =
do rawMsg <- receive chan
case rawMsg of
Nothing => receiverError 1 "Received Nothing"
Just msg => case unsafeOpen msg String of
"Hello" => putStrLn "R1: Success."
_ => receiverError 1 "Didn't receive 'Hello'"
testSendReceive : IO ()
testSendReceive =
do putStrLn "--- Begin testSendReceive ---"
cRef <- makeChannel
chan_SENDER <- makeSender cRef
chan_RECEIVER <- makeReceiver cRef
let sender = testSendReceive_SENDER chan_SENDER
let receiver = testSendReceive_RECEIVER chan_RECEIVER
pid_SENDER <- fork sender
sleep 1
pid_RECEIVER <- fork receiver
sleep 1
putStrLn "--- End testSendReceive ---\n"
testReceiveReply_SENDER : (chan : Channel) -> IO ()
testReceiveReply_SENDER chan =
do rawReply <- receive chan
case rawReply of
Nothing => senderError 1 "Received Nothing as reply"
Just msg => case unsafeOpen msg String of
"World" => putStrLn "S1: Success."
_ => senderError 1 "Didn't receive 'World'"
testReceiveReply_RECEIVER : (chan : Channel) -> IO ()
testReceiveReply_RECEIVER chan =
do sendAndSignal chan (prepare "World")
putStrLn "R1: Sent 'World' and signalled"
testReceiveReply : IO ()
testReceiveReply =
do putStrLn "--- Begin testReceiveReply ---"
cRef <- makeChannel
chan_SENDER <- makeSender cRef
chan_RECEIVER <- makeReceiver cRef
let sender = testReceiveReply_SENDER chan_SENDER
let receiver = testReceiveReply_RECEIVER chan_RECEIVER
-- fork in reverse order for minimal risk of runtime disruption
pid_RECEIVER <- fork receiver
sleep 1
pid_SENDER <- fork sender
sleep 1
putStrLn "--- End testReceiveReply ---\n"
testSendSignalAwait_SENDER : (chan : Channel) -> IO ()
testSendSignalAwait_SENDER chan =
do sendAndSignal chan (prepare "Hello")
putStrLn "S1: Sent 'Hello' and signalled"
testSendSignalAwait_RECEIVER : (chan : Channel) -> IO ()
testSendSignalAwait_RECEIVER chan =
do rawMsg <- await chan
case rawMsg of
Nothing => receiverError 1 "Await got Nothing"
Just msg => case unsafeOpen msg String of
"Hello" => putStrLn "R1: Success."
_ => receiverError 1 "Await didn't get 'Hello'"
testSendSignalAwait : IO ()
testSendSignalAwait =
do putStrLn "--- Begin testSendSignalAwait ---"
cRef <- makeChannel
chan_SENDER <- makeSender cRef
chan_RECEIVER <- makeReceiver cRef
let sender = testSendSignalAwait_SENDER chan_SENDER
let receiver = testSendSignalAwait_RECEIVER chan_RECEIVER
pid_SENDER <- fork sender
pid_RECEIVER <- fork receiver
sleep 1
putStrLn "--- End testSendSignalAwait ---\n"
testSendSignalAwait2_SENDER : (chan : Channel) -> IO ()
testSendSignalAwait2_SENDER chan =
do sleep 1
sendAndSignal chan (prepare "Hello")
putStrLn "S1: Sent 'Hello' and signalled"
testSendSignalAwait2_RECEIVER : (chan : Channel) -> IO ()
testSendSignalAwait2_RECEIVER chan =
do rawMsg <- await chan
case rawMsg of
Nothing => receiverError 1 "Await got Nothing"
Just msg => case unsafeOpen msg String of
"Hello" => putStrLn "R1: Success."
_ => receiverError 1 "Got something weird"
-- test that `await` works, as long as there is some update at some point
testSendSignalAwait2 : IO ()
testSendSignalAwait2 =
do putStrLn "--- Begin testSendSignalAwait2 ---"
cRef <- makeChannel
chan_SENDER <- makeSender cRef
chan_RECEIVER <- makeReceiver cRef
let sender = testSendSignalAwait2_SENDER chan_SENDER
let receiver = testSendSignalAwait2_RECEIVER chan_RECEIVER
pid_SENDER <- fork sender
pid_RECEIVER <- fork receiver
sleep 2
putStrLn "--- End testSendSignalAwait2 ---\n"
testAwaitTimedOut_RECEIVER : (chan : Channel) -> IO ()
testAwaitTimedOut_RECEIVER chan =
do timeoutRes <- awaitTimeout chan 500000
case timeoutRes of
TimedOut => putStrLn "R1: Success."
Succeeded msg => receiverError 1 "Got a message from the void"
testAwaitTimedOut : IO ()
testAwaitTimedOut =
do putStrLn "--- Begin testAwaitTimedOut ---"
cRef <- makeChannel
chan_RECEIVER <- makeReceiver cRef
let receiver = testAwaitTimedOut_RECEIVER chan_RECEIVER
pid_RECEIVER <- fork receiver
sleep 1
putStrLn "--- End testAwaitTimedOut ---\n"
testSendSignalAwaitTimedOut_SENDER : (chan : Channel) -> IO ()
testSendSignalAwaitTimedOut_SENDER chan =
do sendAndSignal chan (prepare "Hello")
putStrLn "S1: Sent 'Hello' and signalled"
testSendSignalAwaitTimedOut_RECEIVER : (chan : Channel) -> IO ()
testSendSignalAwaitTimedOut_RECEIVER chan =
do timeoutRes <- awaitTimeout chan 500000
case timeoutRes of
TimedOut => receiverError 1 "Timed out"
Succeeded rawMsg => case unsafeOpen rawMsg String of
"Hello" => putStrLn "R1: Success."
_ => receiverError 1 "Didn't get 'Hello'"
testSendSignalAwaitTimedOut : IO ()
testSendSignalAwaitTimedOut =
do putStrLn "--- Begin testSendSignalAwaitTimedOut ---"
cRef <- makeChannel
chan_SENDER <- makeSender cRef
chan_RECEIVER <- makeReceiver cRef
let sender = testSendSignalAwaitTimedOut_SENDER chan_SENDER
let receiver = testSendSignalAwaitTimedOut_RECEIVER chan_RECEIVER
pid_SENDER <- fork sender
pid_RECEIVER <- fork receiver
sleep 1
putStrLn "--- End testSendSignalAwaitTimedOut ---\n"
testSendSignalAwaitTooSlow_SENDER : (chan : Channel) -> IO ()
testSendSignalAwaitTooSlow_SENDER chan =
do sleep 1
sendAndSignal chan (prepare "Sorry I'm late")
putStrLn "S1: After 1 second, sent \"Sorry I'm late\" and signalled"
testSendSignalAwaitTooSlow_RECEIVER : (chan : Channel) -> IO ()
testSendSignalAwaitTooSlow_RECEIVER chan =
do timeoutRes <- awaitTimeout chan 500000
case timeoutRes of
TimedOut => putStrLn "R1: Success."
Succeeded rawMsg => receiverError 1 "Somehow got a message."
testSendSignalAwaitTooSlow : IO ()
testSendSignalAwaitTooSlow =
do putStrLn "--- Begin testSendSignalAwaitTooSlow ---"
cRef <- makeChannel
chan_SENDER <- makeSender cRef
chan_RECEIVER <- makeReceiver cRef
let sender = testSendSignalAwaitTooSlow_SENDER chan_SENDER
let receiver = testSendSignalAwaitTooSlow_RECEIVER chan_RECEIVER
pid_SENDER <- fork sender
pid_RECEIVER <- fork receiver
sleep 2
putStrLn "--- End testSendSignalAwaitTooSlow ---\n"
testSendSignalPeek_SENDER : (chan : Channel) -> IO ()
testSendSignalPeek_SENDER chan =
do sendAndSignal chan (prepare "Hello")
putStrLn "S1: Sent 'Hello' and signalled"
testSendSignalPeek_RECEIVER : (chan : Channel) -> IO ()
testSendSignalPeek_RECEIVER chan =
do maybePeek <- peek chan
case maybePeek of
Nothing => receiverError 1 "Peek got Nothing"
Just rawMsg => case unsafeOpen rawMsg String of
-- `peek` internals, i.e. msg isn't accidentally
-- dequeued, tested in MTTestQueues.idr
"Hello" => putStrLn "R1: Success."
_ => receiverError 1 "Did not get 'Hello'"
testSendSignalPeek : IO ()
testSendSignalPeek =
do putStrLn "--- Begin testSendSignalPeek ---"
cRef <- makeChannel
chan_SENDER <- makeSender cRef
chan_RECEIVER <- makeReceiver cRef
let sender = testSendSignalPeek_SENDER chan_SENDER
let receiver = testSendSignalPeek_RECEIVER chan_RECEIVER
pid_SENDER <- fork sender
sleep 1
pid_RECEIVER <- fork receiver
sleep 1
putStrLn "--- End testSendSignalPeek ---\n"
testSendSignalSpy_SENDER : (chan : Channel) -> IO ()
testSendSignalSpy_SENDER chan =
do sendAndSignal chan (prepare "Hello")
putStrLn "S1: Sent 'Hello' and signalled"
testSendSignalSpy_RECEIVER : (chan : Channel) -> IO ()
testSendSignalSpy_RECEIVER chan =
do rawMsg <- spy chan
case rawMsg of
Nothing => receiverError 1 "Spy got Nothing"
Just msg => case unsafeOpen msg String of
"Hello" => putStrLn "R1: Success."
_ => receiverError 1 "Spy didn't get 'Hello'"
testSendSignalSpy : IO ()
testSendSignalSpy =
do putStrLn "--- Begin testSendSignalSpy ---"
cRef <- makeChannel
chan_SENDER <- makeSender cRef
chan_RECEIVER <- makeReceiver cRef
let sender = testSendSignalSpy_SENDER chan_SENDER
let receiver = testSendSignalSpy_RECEIVER chan_RECEIVER
pid_SENDER <- fork sender
pid_RECEIVER <- fork receiver
sleep 1
putStrLn "--- End testSendSignalSpy ---\n"
testSendSignalSpy2_SENDER : (chan : Channel) -> IO ()
testSendSignalSpy2_SENDER chan =
do sleep 1
sendAndSignal chan (prepare "Hello")
putStrLn "S1: Sent 'Hello' and signalled"
testSendSignalSpy2_RECEIVER : (chan : Channel) -> IO ()
testSendSignalSpy2_RECEIVER chan =
do maybeMsg <- spy chan
case maybeMsg of
Nothing => receiverError 1 "Spy got Nothing"
Just msg => case unsafeOpen msg String of
"Hello" => putStrLn "R1: Success."
_ => receiverError 1 "Spy didn't get 'Hello'"
testSendSignalSpy2 : IO ()
testSendSignalSpy2 =
do putStrLn "--- Begin testSendSignalSpy2 ---"
cRef <- makeChannel
chan_SENDER <- makeSender cRef
chan_RECEIVER <- makeReceiver cRef
let sender = testSendSignalSpy2_SENDER chan_SENDER
let receiver = testSendSignalSpy2_RECEIVER chan_RECEIVER
pid_SENDER <- fork sender
pid_RECEIVER <- fork receiver
sleep 2
putStrLn "--- End testSendSignalSpy2 ---\n"
testSendSignalSpyTimedOut_RECEIVER : (chan : Channel) -> IO ()
testSendSignalSpyTimedOut_RECEIVER chan =
do timeoutRes <- spyTimeout chan 500000
case timeoutRes of
TimedOut => putStrLn "R1: Success."
Succeeded msg => receiverError 1 "Spy got a message from the void"
testSendSignalSpyTimedOut : IO ()
testSendSignalSpyTimedOut =
do putStrLn "--- Begin testSendSignalSpyTimedOut ---"
cRef <- makeChannel
chan_RECEIVER <- makeReceiver cRef
let receiver = testSendSignalSpyTimedOut_RECEIVER chan_RECEIVER
pid_RECEIVER <- fork receiver
sleep 1
putStrLn "--- End testSendSignalSpyTimedOut ---\n"
testSendSignalSpyTooSlow_SENDER : (chan : Channel) -> IO ()
testSendSignalSpyTooSlow_SENDER chan =
do sleep 1
sendAndSignal chan (prepare "I'm late! I'm late!")
putStrLn "S1: After 1 second, sent a message and signalled"
testSendSignalSpyTooSlow_RECEIVER : (chan : Channel) -> IO ()
testSendSignalSpyTooSlow_RECEIVER chan =
do timeoutRes <- spyTimeout chan 500000
case timeoutRes of
TimedOut => putStrLn "R1: Success."
Succeeded rawMsg => receiverError 1 "Got a message despite impatience"
testSendSignalSpyTooSlow : IO ()
testSendSignalSpyTooSlow =
do putStrLn "--- Begin testSendSignalSpyTooSlow ---"
cRef <- makeChannel
chan_SENDER <- makeSender cRef
chan_RECEIVER <- makeReceiver cRef
let sender = testSendSignalSpyTooSlow_SENDER chan_SENDER
let receiver = testSendSignalSpyTooSlow_RECEIVER chan_RECEIVER
pid_SENDER <- fork sender
pid_RECEIVER <- fork receiver
sleep 2
putStrLn "--- End testSendSignalSpyTooSlow ---\n"
runAll : IO ()
runAll = do testReceiveEmpty
testSendReceive
testReceiveReply
testSendSignalAwait
testSendSignalAwait2
testAwaitTimedOut
testSendSignalAwaitTimedOut
testSendSignalAwaitTooSlow
testSendSignalPeek
testSendSignalSpy
testSendSignalSpy2
testSendSignalSpyTimedOut
testSendSignalSpyTooSlow
main : IO ()
main = runAll
-- }
|
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝³ : CommMonoid β
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
f : α → β
a b c : α
⊢ ∏ x in Ico a b, f (x + c) = ∏ x in Ico (a + c) (b + c), f x
[PROOFSTEP]
rw [← map_add_right_Ico, prod_map]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝³ : CommMonoid β
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
f : α → β
a b c : α
⊢ ∏ x in Ico a b, f (x + c) = ∏ x in Ico a b, f (↑(addRightEmbedding c) x)
[PROOFSTEP]
rfl
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝³ : CommMonoid β
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
f : α → β
a b c : α
⊢ ∏ x in Ico a b, f (c + x) = ∏ x in Ico (a + c) (b + c), f x
[PROOFSTEP]
convert prod_Ico_add' f a b c using 2
[GOAL]
case h.e'_2.a
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝¹ : α
g f✝ : α → β
inst✝³ : CommMonoid β
inst✝² : OrderedCancelAddCommMonoid α
inst✝¹ : ExistsAddOfLE α
inst✝ : LocallyFiniteOrder α
f : α → β
a b c x✝ : α
a✝ : x✝ ∈ Ico a b
⊢ f (c + x✝) = f (x✝ + c)
[PROOFSTEP]
rw [add_comm]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝ : CommMonoid β
a b : ℕ
hab : a ≤ b
f : ℕ → β
⊢ ∏ k in Ico a (b + 1), f k = (∏ k in Ico a b, f k) * f b
[PROOFSTEP]
rw [Nat.Ico_succ_right_eq_insert_Ico hab, prod_insert right_not_mem_Ico, mul_comm]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝ : CommMonoid β
a b : ℕ
hab : a < b
f : ℕ → β
⊢ ∏ k in Ico a b, f k = f a * ∏ k in Ico (a + 1) b, f k
[PROOFSTEP]
have ha : a ∉ Ico (a + 1) b := by simp
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝ : CommMonoid β
a b : ℕ
hab : a < b
f : ℕ → β
⊢ ¬a ∈ Ico (a + 1) b
[PROOFSTEP]
simp
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝ : CommMonoid β
a b : ℕ
hab : a < b
f : ℕ → β
ha : ¬a ∈ Ico (a + 1) b
⊢ ∏ k in Ico a b, f k = f a * ∏ k in Ico (a + 1) b, f k
[PROOFSTEP]
rw [← prod_insert ha, Nat.Ico_insert_succ_left hab]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
m n k : ℕ
hmn : m ≤ n
hnk : n ≤ k
⊢ (∏ i in Ioc m n, f i) * ∏ i in Ioc n k, f i = ∏ i in Ioc m k, f i
[PROOFSTEP]
rw [← Ioc_union_Ioc_eq_Ioc hmn hnk, prod_union]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
m n k : ℕ
hmn : m ≤ n
hnk : n ≤ k
⊢ Disjoint (Ioc m n) (Ioc n k)
[PROOFSTEP]
apply disjoint_left.2 fun x hx h'x => _
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
m n k : ℕ
hmn : m ≤ n
hnk : n ≤ k
⊢ ∀ (x : ℕ), x ∈ Ioc m n → x ∈ Ioc n k → False
[PROOFSTEP]
intros x hx h'x
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
m n k : ℕ
hmn : m ≤ n
hnk : n ≤ k
x : ℕ
hx : x ∈ Ioc m n
h'x : x ∈ Ioc n k
⊢ False
[PROOFSTEP]
exact lt_irrefl _ ((mem_Ioc.1 h'x).1.trans_le (mem_Ioc.1 hx).2)
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝ : CommMonoid β
a b : ℕ
hab : a ≤ b
f : ℕ → β
⊢ ∏ k in Ioc a (b + 1), f k = (∏ k in Ioc a b, f k) * f (b + 1)
[PROOFSTEP]
rw [← prod_Ioc_consecutive _ hab (Nat.le_succ b), Nat.Ioc_succ_singleton, prod_singleton]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝¹ : CommMonoid β
δ : Type u_1
inst✝ : CommGroup δ
f : ℕ → δ
m n : ℕ
h : m ≤ n
⊢ (∏ k in Ico m n, f k) * ∏ k in range m, f k = ∏ k in range n, f k
[PROOFSTEP]
rw [mul_comm]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝¹ : CommMonoid β
δ : Type u_1
inst✝ : CommGroup δ
f : ℕ → δ
m n : ℕ
h : m ≤ n
⊢ (∏ k in range m, f k) * ∏ k in Ico m n, f k = ∏ k in range n, f k
[PROOFSTEP]
exact prod_range_mul_prod_Ico f h
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝¹ : CommMonoid β
δ : Type u_1
inst✝ : CommGroup δ
f : ℕ → δ
m n : ℕ
h : m ≤ n
⊢ ∏ k in Ico m n, f k = (∏ k in range n, f k) / ∏ k in range m, f k
[PROOFSTEP]
simpa only [div_eq_mul_inv] using prod_Ico_eq_mul_inv f h
[GOAL]
α✝ : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α✝
a : α✝
g f✝ : α✝ → β
inst✝¹ : CommMonoid β
α : Type u_1
inst✝ : CommGroup α
f : ℕ → α
n m : ℕ
hnm : n ≤ m
⊢ (∏ k in range m, f k) / ∏ k in range n, f k = ∏ k in filter (fun k => n ≤ k) (range m), f k
[PROOFSTEP]
rw [← prod_Ico_eq_div f hnm]
[GOAL]
α✝ : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α✝
a : α✝
g f✝ : α✝ → β
inst✝¹ : CommMonoid β
α : Type u_1
inst✝ : CommGroup α
f : ℕ → α
n m : ℕ
hnm : n ≤ m
⊢ ∏ k in Ico n m, f k = ∏ k in filter (fun k => n ≤ k) (range m), f k
[PROOFSTEP]
congr
[GOAL]
case e_s
α✝ : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α✝
a : α✝
g f✝ : α✝ → β
inst✝¹ : CommMonoid β
α : Type u_1
inst✝ : CommGroup α
f : ℕ → α
n m : ℕ
hnm : n ≤ m
⊢ Ico n m = filter (fun k => n ≤ k) (range m)
[PROOFSTEP]
apply Finset.ext
[GOAL]
case e_s.a
α✝ : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α✝
a : α✝
g f✝ : α✝ → β
inst✝¹ : CommMonoid β
α : Type u_1
inst✝ : CommGroup α
f : ℕ → α
n m : ℕ
hnm : n ≤ m
⊢ ∀ (a : ℕ), a ∈ Ico n m ↔ a ∈ filter (fun k => n ≤ k) (range m)
[PROOFSTEP]
simp only [mem_Ico, mem_filter, mem_range, *]
[GOAL]
case e_s.a
α✝ : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α✝
a : α✝
g f✝ : α✝ → β
inst✝¹ : CommMonoid β
α : Type u_1
inst✝ : CommGroup α
f : ℕ → α
n m : ℕ
hnm : n ≤ m
⊢ ∀ (a : ℕ), n ≤ a ∧ a < m ↔ a < m ∧ n ≤ a
[PROOFSTEP]
tauto
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a b : ℕ
f : ℕ → ℕ → M
⊢ ∑ i in Ico a b, ∑ j in Ico i b, f i j = ∑ j in Ico a b, ∑ i in Ico a (j + 1), f i j
[PROOFSTEP]
rw [Finset.sum_sigma', Finset.sum_sigma']
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a b : ℕ
f : ℕ → ℕ → M
⊢ ∑ x in Finset.sigma (Ico a b) fun i => Ico i b, f x.fst x.snd =
∑ x in Finset.sigma (Ico a b) fun j => Ico a (j + 1), f x.snd x.fst
[PROOFSTEP]
refine'
Finset.sum_bij' (fun (x : Σ _ : ℕ, ℕ) _ => (⟨x.2, x.1⟩ : Σ _ : ℕ, ℕ)) _ (fun _ _ => rfl)
(fun (x : Σ _ : ℕ, ℕ) _ => (⟨x.2, x.1⟩ : Σ _ : ℕ, ℕ)) _ (by (rintro ⟨⟩ _; rfl)) (by (rintro ⟨⟩ _; rfl))
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a b : ℕ
f : ℕ → ℕ → M
⊢ ∀ (a_1 : (_ : ℕ) × ℕ) (ha : a_1 ∈ Finset.sigma (Ico a b) fun i => Ico i b),
(fun x x_1 => { fst := x.snd, snd := x.fst }) ((fun x x_1 => { fst := x.snd, snd := x.fst }) a_1 ha)
(_ : (fun x x_1 => { fst := x.snd, snd := x.fst }) a_1 ha ∈ ?m.28850) =
a_1
[PROOFSTEP]
rintro ⟨⟩ _
[GOAL]
case mk
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a b : ℕ
f : ℕ → ℕ → M
fst✝ snd✝ : ℕ
ha✝ : { fst := fst✝, snd := snd✝ } ∈ Finset.sigma (Ico a b) fun i => Ico i b
⊢ (fun x x_1 => { fst := x.snd, snd := x.fst })
((fun x x_1 => { fst := x.snd, snd := x.fst }) { fst := fst✝, snd := snd✝ } ha✝)
(_ : (fun x x_1 => { fst := x.snd, snd := x.fst }) { fst := fst✝, snd := snd✝ } ha✝ ∈ ?m.28850) =
{ fst := fst✝, snd := snd✝ }
[PROOFSTEP]
rfl
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a b : ℕ
f : ℕ → ℕ → M
⊢ ∀ (a_1 : (_ : ℕ) × ℕ) (ha : a_1 ∈ Finset.sigma (Ico a b) fun j => Ico a (j + 1)),
(fun x x_1 => { fst := x.snd, snd := x.fst }) ((fun x x_1 => { fst := x.snd, snd := x.fst }) a_1 ha)
(_ : (fun x x_1 => { fst := x.snd, snd := x.fst }) a_1 ha ∈ ?m.28849) =
a_1
[PROOFSTEP]
rintro ⟨⟩ _
[GOAL]
case mk
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a b : ℕ
f : ℕ → ℕ → M
fst✝ snd✝ : ℕ
ha✝ : { fst := fst✝, snd := snd✝ } ∈ Finset.sigma (Ico a b) fun j => Ico a (j + 1)
⊢ (fun x x_1 => { fst := x.snd, snd := x.fst })
((fun x x_1 => { fst := x.snd, snd := x.fst }) { fst := fst✝, snd := snd✝ } ha✝)
(_ : (fun x x_1 => { fst := x.snd, snd := x.fst }) { fst := fst✝, snd := snd✝ } ha✝ ∈ ?m.28849) =
{ fst := fst✝, snd := snd✝ }
[PROOFSTEP]
rfl
[GOAL]
case refine'_1
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a b : ℕ
f : ℕ → ℕ → M
⊢ ∀ (a_1 : (_ : ℕ) × ℕ) (ha : a_1 ∈ Finset.sigma (Ico a b) fun i => Ico i b),
(fun x x_1 => { fst := x.snd, snd := x.fst }) a_1 ha ∈ Finset.sigma (Ico a b) fun j => Ico a (j + 1)
[PROOFSTEP]
simp only [Finset.mem_Ico, Sigma.forall, Finset.mem_sigma]
[GOAL]
case refine'_2
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a b : ℕ
f : ℕ → ℕ → M
⊢ ∀ (a_1 : (_ : ℕ) × ℕ) (ha : a_1 ∈ Finset.sigma (Ico a b) fun j => Ico a (j + 1)),
(fun x x_1 => { fst := x.snd, snd := x.fst }) a_1 ha ∈ Finset.sigma (Ico a b) fun i => Ico i b
[PROOFSTEP]
simp only [Finset.mem_Ico, Sigma.forall, Finset.mem_sigma]
[GOAL]
case refine'_1
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a b : ℕ
f : ℕ → ℕ → M
⊢ ∀ (a_1 b_1 : ℕ), (a ≤ a_1 ∧ a_1 < b) ∧ a_1 ≤ b_1 ∧ b_1 < b → (a ≤ b_1 ∧ b_1 < b) ∧ a ≤ a_1 ∧ a_1 < b_1 + 1
[PROOFSTEP]
rintro a b ⟨⟨h₁, h₂⟩, ⟨h₃, h₄⟩⟩
[GOAL]
case refine'_2
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a b : ℕ
f : ℕ → ℕ → M
⊢ ∀ (a_1 b_1 : ℕ), (a ≤ a_1 ∧ a_1 < b) ∧ a ≤ b_1 ∧ b_1 < a_1 + 1 → (a ≤ b_1 ∧ b_1 < b) ∧ b_1 ≤ a_1 ∧ a_1 < b
[PROOFSTEP]
rintro a b ⟨⟨h₁, h₂⟩, ⟨h₃, h₄⟩⟩
[GOAL]
case refine'_1.intro.intro.intro
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝¹ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a✝ b✝ : ℕ
f : ℕ → ℕ → M
a b : ℕ
h₁ : a✝ ≤ a
h₂ : a < b✝
h₃ : a ≤ b
h₄ : b < b✝
⊢ (a✝ ≤ b ∧ b < b✝) ∧ a✝ ≤ a ∧ a < b + 1
[PROOFSTEP]
refine' ⟨⟨_, _⟩, ⟨_, _⟩⟩
[GOAL]
case refine'_2.intro.intro.intro
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝¹ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a✝ b✝ : ℕ
f : ℕ → ℕ → M
a b : ℕ
h₁ : a✝ ≤ a
h₂ : a < b✝
h₃ : a✝ ≤ b
h₄ : b < a + 1
⊢ (a✝ ≤ b ∧ b < b✝) ∧ b ≤ a ∧ a < b✝
[PROOFSTEP]
refine' ⟨⟨_, _⟩, ⟨_, _⟩⟩
[GOAL]
case refine'_1.intro.intro.intro.refine'_1
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝¹ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a✝ b✝ : ℕ
f : ℕ → ℕ → M
a b : ℕ
h₁ : a✝ ≤ a
h₂ : a < b✝
h₃ : a ≤ b
h₄ : b < b✝
⊢ a✝ ≤ b
[PROOFSTEP]
linarith
[GOAL]
case refine'_1.intro.intro.intro.refine'_2
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝¹ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a✝ b✝ : ℕ
f : ℕ → ℕ → M
a b : ℕ
h₁ : a✝ ≤ a
h₂ : a < b✝
h₃ : a ≤ b
h₄ : b < b✝
⊢ b < b✝
[PROOFSTEP]
linarith
[GOAL]
case refine'_1.intro.intro.intro.refine'_3
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝¹ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a✝ b✝ : ℕ
f : ℕ → ℕ → M
a b : ℕ
h₁ : a✝ ≤ a
h₂ : a < b✝
h₃ : a ≤ b
h₄ : b < b✝
⊢ a✝ ≤ a
[PROOFSTEP]
linarith
[GOAL]
case refine'_1.intro.intro.intro.refine'_4
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝¹ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a✝ b✝ : ℕ
f : ℕ → ℕ → M
a b : ℕ
h₁ : a✝ ≤ a
h₂ : a < b✝
h₃ : a ≤ b
h₄ : b < b✝
⊢ a < b + 1
[PROOFSTEP]
linarith
[GOAL]
case refine'_2.intro.intro.intro.refine'_1
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝¹ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a✝ b✝ : ℕ
f : ℕ → ℕ → M
a b : ℕ
h₁ : a✝ ≤ a
h₂ : a < b✝
h₃ : a✝ ≤ b
h₄ : b < a + 1
⊢ a✝ ≤ b
[PROOFSTEP]
linarith
[GOAL]
case refine'_2.intro.intro.intro.refine'_2
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝¹ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a✝ b✝ : ℕ
f : ℕ → ℕ → M
a b : ℕ
h₁ : a✝ ≤ a
h₂ : a < b✝
h₃ : a✝ ≤ b
h₄ : b < a + 1
⊢ b < b✝
[PROOFSTEP]
linarith
[GOAL]
case refine'_2.intro.intro.intro.refine'_3
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝¹ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a✝ b✝ : ℕ
f : ℕ → ℕ → M
a b : ℕ
h₁ : a✝ ≤ a
h₂ : a < b✝
h₃ : a✝ ≤ b
h₄ : b < a + 1
⊢ b ≤ a
[PROOFSTEP]
linarith
[GOAL]
case refine'_2.intro.intro.intro.refine'_4
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a✝¹ : α
g f✝ : α → β
inst✝¹ : CommMonoid β
M : Type u_1
inst✝ : AddCommMonoid M
a✝ b✝ : ℕ
f : ℕ → ℕ → M
a b : ℕ
h₁ : a✝ ≤ a
h₂ : a < b✝
h₃ : a✝ ≤ b
h₄ : b < a + 1
⊢ a < b✝
[PROOFSTEP]
linarith
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
m n : ℕ
⊢ ∏ k in Ico m n, f k = ∏ k in range (n - m), f (m + k)
[PROOFSTEP]
by_cases h : m ≤ n
[GOAL]
case pos
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
m n : ℕ
h : m ≤ n
⊢ ∏ k in Ico m n, f k = ∏ k in range (n - m), f (m + k)
[PROOFSTEP]
rw [← Nat.Ico_zero_eq_range, prod_Ico_add, zero_add, tsub_add_cancel_of_le h]
[GOAL]
case neg
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
m n : ℕ
h : ¬m ≤ n
⊢ ∏ k in Ico m n, f k = ∏ k in range (n - m), f (m + k)
[PROOFSTEP]
replace h : n ≤ m := le_of_not_ge h
[GOAL]
case neg
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
m n : ℕ
h : n ≤ m
⊢ ∏ k in Ico m n, f k = ∏ k in range (n - m), f (m + k)
[PROOFSTEP]
rw [Ico_eq_empty_of_le h, tsub_eq_zero_iff_le.mpr h, range_zero, prod_empty, prod_empty]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
k m n : ℕ
h : m ≤ n + 1
⊢ ∏ j in Ico k m, f (n - j) = ∏ j in Ico (n + 1 - m) (n + 1 - k), f j
[PROOFSTEP]
have : ∀ i < m, i ≤ n := by
intro i hi
exact (add_le_add_iff_right 1).1 (le_trans (Nat.lt_iff_add_one_le.1 hi) h)
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
k m n : ℕ
h : m ≤ n + 1
⊢ ∀ (i : ℕ), i < m → i ≤ n
[PROOFSTEP]
intro i hi
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
k m n : ℕ
h : m ≤ n + 1
i : ℕ
hi : i < m
⊢ i ≤ n
[PROOFSTEP]
exact (add_le_add_iff_right 1).1 (le_trans (Nat.lt_iff_add_one_le.1 hi) h)
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
k m n : ℕ
h : m ≤ n + 1
this : ∀ (i : ℕ), i < m → i ≤ n
⊢ ∏ j in Ico k m, f (n - j) = ∏ j in Ico (n + 1 - m) (n + 1 - k), f j
[PROOFSTEP]
cases' lt_or_le k m with hkm hkm
[GOAL]
case inl
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
k m n : ℕ
h : m ≤ n + 1
this : ∀ (i : ℕ), i < m → i ≤ n
hkm : k < m
⊢ ∏ j in Ico k m, f (n - j) = ∏ j in Ico (n + 1 - m) (n + 1 - k), f j
[PROOFSTEP]
rw [← Nat.Ico_image_const_sub_eq_Ico (this _ hkm)]
[GOAL]
case inl
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
k m n : ℕ
h : m ≤ n + 1
this : ∀ (i : ℕ), i < m → i ≤ n
hkm : k < m
⊢ ∏ j in Ico k m, f (n - j) = ∏ j in image (fun x => n - x) (Ico k m), f j
[PROOFSTEP]
refine' (prod_image _).symm
[GOAL]
case inl
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
k m n : ℕ
h : m ≤ n + 1
this : ∀ (i : ℕ), i < m → i ≤ n
hkm : k < m
⊢ ∀ (x : ℕ), x ∈ Ico k m → ∀ (y : ℕ), y ∈ Ico k m → n - x = n - y → x = y
[PROOFSTEP]
simp only [mem_Ico]
[GOAL]
case inl
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
k m n : ℕ
h : m ≤ n + 1
this : ∀ (i : ℕ), i < m → i ≤ n
hkm : k < m
⊢ ∀ (x : ℕ), k ≤ x ∧ x < m → ∀ (y : ℕ), k ≤ y ∧ y < m → n - x = n - y → x = y
[PROOFSTEP]
rintro i ⟨_, im⟩ j ⟨_, jm⟩ Hij
[GOAL]
case inl.intro.intro
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
k m n : ℕ
h : m ≤ n + 1
this : ∀ (i : ℕ), i < m → i ≤ n
hkm : k < m
i : ℕ
left✝¹ : k ≤ i
im : i < m
j : ℕ
left✝ : k ≤ j
jm : j < m
Hij : n - i = n - j
⊢ i = j
[PROOFSTEP]
rw [← tsub_tsub_cancel_of_le (this _ im), Hij, tsub_tsub_cancel_of_le (this _ jm)]
[GOAL]
case inr
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
k m n : ℕ
h : m ≤ n + 1
this : ∀ (i : ℕ), i < m → i ≤ n
hkm : m ≤ k
⊢ ∏ j in Ico k m, f (n - j) = ∏ j in Ico (n + 1 - m) (n + 1 - k), f j
[PROOFSTEP]
have : n + 1 - k ≤ n + 1 - m := by
rw [tsub_le_tsub_iff_left h]
exact hkm
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
k m n : ℕ
h : m ≤ n + 1
this : ∀ (i : ℕ), i < m → i ≤ n
hkm : m ≤ k
⊢ n + 1 - k ≤ n + 1 - m
[PROOFSTEP]
rw [tsub_le_tsub_iff_left h]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
k m n : ℕ
h : m ≤ n + 1
this : ∀ (i : ℕ), i < m → i ≤ n
hkm : m ≤ k
⊢ m ≤ k
[PROOFSTEP]
exact hkm
[GOAL]
case inr
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
k m n : ℕ
h : m ≤ n + 1
this✝ : ∀ (i : ℕ), i < m → i ≤ n
hkm : m ≤ k
this : n + 1 - k ≤ n + 1 - m
⊢ ∏ j in Ico k m, f (n - j) = ∏ j in Ico (n + 1 - m) (n + 1 - k), f j
[PROOFSTEP]
simp only [ge_iff_le, hkm, Ico_eq_empty_of_le, prod_empty, tsub_le_iff_right, Ico_eq_empty_of_le this]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
n : ℕ
⊢ ∏ j in range n, f (n - 1 - j) = ∏ j in range n, f j
[PROOFSTEP]
cases n
[GOAL]
case zero
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
⊢ ∏ j in range zero, f (zero - 1 - j) = ∏ j in range zero, f j
[PROOFSTEP]
simp
[GOAL]
case succ
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
n✝ : ℕ
⊢ ∏ j in range (succ n✝), f (succ n✝ - 1 - j) = ∏ j in range (succ n✝), f j
[PROOFSTEP]
simp only [← Nat.Ico_zero_eq_range, Nat.succ_sub_succ_eq_sub, tsub_zero]
[GOAL]
case succ
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
n✝ : ℕ
⊢ ∏ x in Ico 0 (succ n✝), f (n✝ - x) = ∏ x in Ico 0 (succ n✝), f x
[PROOFSTEP]
rw [prod_Ico_reflect _ _ le_rfl]
[GOAL]
case succ
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f✝ : α → β
inst✝ : CommMonoid β
f : ℕ → β
n✝ : ℕ
⊢ ∏ j in Ico (n✝ + 1 - (n✝ + 1)) (n✝ + 1 - 0), f j = ∏ x in Ico 0 (succ n✝), f x
[PROOFSTEP]
simp
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f : α → β
inst✝ : CommMonoid β
n : ℕ
⊢ ∏ x in Ico 1 (n + 1 + 1), x = (n + 1)!
[PROOFSTEP]
rw [prod_Ico_succ_top <| Nat.succ_le_succ <| Nat.zero_le n, Nat.factorial_succ, prod_Ico_id_eq_factorial n,
Nat.succ_eq_add_one, mul_comm]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f : α → β
inst✝ : CommMonoid β
n : ℕ
⊢ ∏ x in range (n + 1), (x + 1) = (n + 1)!
[PROOFSTEP]
simp [Finset.range_succ, prod_range_add_one_eq_factorial n]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f : α → β
inst✝ : CommMonoid β
n : ℕ
⊢ (∑ i in range n, i) * 2 = ∑ i in range n, i + ∑ i in range n, (n - 1 - i)
[PROOFSTEP]
rw [sum_range_reflect (fun i => i) n, mul_two]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f : α → β
inst✝ : CommMonoid β
n : ℕ
⊢ ∑ i in range n, (n - 1) = n * (n - 1)
[PROOFSTEP]
rw [sum_const, card_range, Nat.nsmul_eq_mul]
[GOAL]
α : Type u
β : Type v
γ : Type w
s₂ s₁ s : Finset α
a : α
g f : α → β
inst✝ : CommMonoid β
n : ℕ
⊢ ∑ i in range n, i = n * (n - 1) / 2
[PROOFSTEP]
rw [← sum_range_id_mul_two n, Nat.mul_div_cancel _ zero_lt_two]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
⊢ ∑ i in Ico m n, f i • g i =
f (n - 1) • ∑ i in range n, g i - f m • ∑ i in range m, g i -
∑ i in Ico m (n - 1), (f (i + 1) - f i) • ∑ i in range (i + 1), g i
[PROOFSTEP]
have h₁ : (∑ i in Ico (m + 1) n, f i • G i) = ∑ i in Ico m (n - 1), f (i + 1) • G (i + 1) :=
by
rw [← Nat.sub_add_cancel (Nat.one_le_of_lt hmn), ← sum_Ico_add']
simp only [ge_iff_le, tsub_le_iff_right, add_le_iff_nonpos_left, nonpos_iff_eq_zero, tsub_eq_zero_iff_le,
add_tsub_cancel_right]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
⊢ ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
[PROOFSTEP]
rw [← Nat.sub_add_cancel (Nat.one_le_of_lt hmn), ← sum_Ico_add']
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
⊢ ∑ x in Ico m (n - 1), f (x + 1) • ∑ i in range (x + 1), g i =
∑ i in Ico m (n - 1 + 1 - 1), f (i + 1) • ∑ i in range (i + 1), g i
[PROOFSTEP]
simp only [ge_iff_le, tsub_le_iff_right, add_le_iff_nonpos_left, nonpos_iff_eq_zero, tsub_eq_zero_iff_le,
add_tsub_cancel_right]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
⊢ ∑ i in Ico m n, f i • g i =
f (n - 1) • ∑ i in range n, g i - f m • ∑ i in range m, g i -
∑ i in Ico m (n - 1), (f (i + 1) - f i) • ∑ i in range (i + 1), g i
[PROOFSTEP]
have h₂ :
(∑ i in Ico (m + 1) n, f i • G (i + 1)) =
(∑ i in Ico m (n - 1), f i • G (i + 1)) + f (n - 1) • G n - f m • G (m + 1) :=
by
rw [← sum_Ico_sub_bot _ hmn, ← sum_Ico_succ_sub_top _ (Nat.le_pred_of_lt hmn), Nat.sub_add_cancel (pos_of_gt hmn),
sub_add_cancel]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
⊢ ∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
[PROOFSTEP]
rw [← sum_Ico_sub_bot _ hmn, ← sum_Ico_succ_sub_top _ (Nat.le_pred_of_lt hmn), Nat.sub_add_cancel (pos_of_gt hmn),
sub_add_cancel]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
⊢ ∑ i in Ico m n, f i • g i =
f (n - 1) • ∑ i in range n, g i - f m • ∑ i in range m, g i -
∑ i in Ico m (n - 1), (f (i + 1) - f i) • ∑ i in range (i + 1), g i
[PROOFSTEP]
rw [sum_eq_sum_Ico_succ_bot hmn]
-- porting note: the following used to be done with `conv`
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
⊢ f m • g m + ∑ k in Ico (m + 1) n, f k • g k =
f (n - 1) • ∑ i in range n, g i - f m • ∑ i in range m, g i -
∑ i in Ico m (n - 1), (f (i + 1) - f i) • ∑ i in range (i + 1), g i
[PROOFSTEP]
have h₃ :
(Finset.sum (Ico (m + 1) n) fun i => f i • g i) =
(Finset.sum (Ico (m + 1) n) fun i =>
f i • ((Finset.sum (Finset.range (i + 1)) g) - (Finset.sum (Finset.range i) g))) :=
by congr; funext; rw [← sum_range_succ_sub_sum g]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
⊢ ∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
[PROOFSTEP]
congr
[GOAL]
case e_f
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
⊢ (fun i => f i • g i) = fun i => f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
[PROOFSTEP]
funext
[GOAL]
case e_f.h
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
x✝ : ℕ
⊢ f x✝ • g x✝ = f x✝ • (Finset.sum (range (x✝ + 1)) g - Finset.sum (range x✝) g)
[PROOFSTEP]
rw [← sum_range_succ_sub_sum g]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
h₃ :
∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
⊢ f m • g m + ∑ k in Ico (m + 1) n, f k • g k =
f (n - 1) • ∑ i in range n, g i - f m • ∑ i in range m, g i -
∑ i in Ico m (n - 1), (f (i + 1) - f i) • ∑ i in range (i + 1), g i
[PROOFSTEP]
rw [h₃]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
h₃ :
∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
⊢ f m • g m + ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g) =
f (n - 1) • ∑ i in range n, g i - f m • ∑ i in range m, g i -
∑ i in Ico m (n - 1), (f (i + 1) - f i) • ∑ i in range (i + 1), g i
[PROOFSTEP]
simp_rw [smul_sub, sum_sub_distrib, h₂, h₁]
-- porting note: the following used to be done with `conv`
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
h₃ :
∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
⊢ f m • g m +
(∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i -
∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i) =
f (n - 1) • ∑ i in range n, g i - f m • ∑ i in range m, g i -
∑ i in Ico m (n - 1), (f (i + 1) - f i) • ∑ i in range (i + 1), g i
[PROOFSTEP]
have h₄ :
((((Finset.sum (Ico m (n - 1)) fun i => f i • Finset.sum (range (i + 1)) fun i => g i) +
f (n - 1) • Finset.sum (range n) fun i => g i) -
f m • Finset.sum (range (m + 1)) fun i => g i) -
Finset.sum (Ico m (n - 1)) fun i => f (i + 1) • Finset.sum (range (i + 1)) fun i => g i) =
f (n - 1) • (range n).sum g - f m • (range (m + 1)).sum g +
Finset.sum (Ico m (n - 1)) (fun i => f i • (range (i + 1)).sum g - f (i + 1) • (range (i + 1)).sum g) :=
by rw [← add_sub, add_comm, ← add_sub, ← sum_sub_distrib]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
h₃ :
∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
⊢ ∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i -
∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i =
f (n - 1) • Finset.sum (range n) g - f m • Finset.sum (range (m + 1)) g +
∑ i in Ico m (n - 1), (f i • Finset.sum (range (i + 1)) g - f (i + 1) • Finset.sum (range (i + 1)) g)
[PROOFSTEP]
rw [← add_sub, add_comm, ← add_sub, ← sum_sub_distrib]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
h₃ :
∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
h₄ :
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i -
∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i =
f (n - 1) • Finset.sum (range n) g - f m • Finset.sum (range (m + 1)) g +
∑ i in Ico m (n - 1), (f i • Finset.sum (range (i + 1)) g - f (i + 1) • Finset.sum (range (i + 1)) g)
⊢ f m • g m +
(∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i -
∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i) =
f (n - 1) • ∑ i in range n, g i - f m • ∑ i in range m, g i -
∑ i in Ico m (n - 1), (f (i + 1) - f i) • ∑ i in range (i + 1), g i
[PROOFSTEP]
rw [h₄]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
h₃ :
∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
h₄ :
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i -
∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i =
f (n - 1) • Finset.sum (range n) g - f m • Finset.sum (range (m + 1)) g +
∑ i in Ico m (n - 1), (f i • Finset.sum (range (i + 1)) g - f (i + 1) • Finset.sum (range (i + 1)) g)
⊢ f m • g m +
(f (n - 1) • Finset.sum (range n) g - f m • Finset.sum (range (m + 1)) g +
∑ i in Ico m (n - 1), (f i • Finset.sum (range (i + 1)) g - f (i + 1) • Finset.sum (range (i + 1)) g)) =
f (n - 1) • ∑ i in range n, g i - f m • ∑ i in range m, g i -
∑ i in Ico m (n - 1), (f (i + 1) - f i) • ∑ i in range (i + 1), g i
[PROOFSTEP]
have : ∀ i, f i • G (i + 1) - f (i + 1) • G (i + 1) = -((f (i + 1) - f i) • G (i + 1)) :=
by
intro i
rw [sub_smul]
abel
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
h₃ :
∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
h₄ :
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i -
∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i =
f (n - 1) • Finset.sum (range n) g - f m • Finset.sum (range (m + 1)) g +
∑ i in Ico m (n - 1), (f i • Finset.sum (range (i + 1)) g - f (i + 1) • Finset.sum (range (i + 1)) g)
⊢ ∀ (i : ℕ),
f i • ∑ i in range (i + 1), g i - f (i + 1) • ∑ i in range (i + 1), g i =
-((f (i + 1) - f i) • ∑ i in range (i + 1), g i)
[PROOFSTEP]
intro i
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
h₃ :
∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
h₄ :
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i -
∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i =
f (n - 1) • Finset.sum (range n) g - f m • Finset.sum (range (m + 1)) g +
∑ i in Ico m (n - 1), (f i • Finset.sum (range (i + 1)) g - f (i + 1) • Finset.sum (range (i + 1)) g)
i : ℕ
⊢ f i • ∑ i in range (i + 1), g i - f (i + 1) • ∑ i in range (i + 1), g i =
-((f (i + 1) - f i) • ∑ i in range (i + 1), g i)
[PROOFSTEP]
rw [sub_smul]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
h₃ :
∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
h₄ :
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i -
∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i =
f (n - 1) • Finset.sum (range n) g - f m • Finset.sum (range (m + 1)) g +
∑ i in Ico m (n - 1), (f i • Finset.sum (range (i + 1)) g - f (i + 1) • Finset.sum (range (i + 1)) g)
i : ℕ
⊢ f i • ∑ i in range (i + 1), g i - f (i + 1) • ∑ i in range (i + 1), g i =
-(f (i + 1) • ∑ i in range (i + 1), g i - f i • ∑ i in range (i + 1), g i)
[PROOFSTEP]
abel
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
h₃ :
∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
h₄ :
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i -
∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i =
f (n - 1) • Finset.sum (range n) g - f m • Finset.sum (range (m + 1)) g +
∑ i in Ico m (n - 1), (f i • Finset.sum (range (i + 1)) g - f (i + 1) • Finset.sum (range (i + 1)) g)
i : ℕ
⊢ f i • ∑ i in range (i + 1), g i - f (i + 1) • ∑ i in range (i + 1), g i =
-(f (i + 1) • ∑ i in range (i + 1), g i - f i • ∑ i in range (i + 1), g i)
[PROOFSTEP]
abel
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
h₃ :
∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
h₄ :
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i -
∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i =
f (n - 1) • Finset.sum (range n) g - f m • Finset.sum (range (m + 1)) g +
∑ i in Ico m (n - 1), (f i • Finset.sum (range (i + 1)) g - f (i + 1) • Finset.sum (range (i + 1)) g)
this :
∀ (i : ℕ),
f i • ∑ i in range (i + 1), g i - f (i + 1) • ∑ i in range (i + 1), g i =
-((f (i + 1) - f i) • ∑ i in range (i + 1), g i)
⊢ f m • g m +
(f (n - 1) • Finset.sum (range n) g - f m • Finset.sum (range (m + 1)) g +
∑ i in Ico m (n - 1), (f i • Finset.sum (range (i + 1)) g - f (i + 1) • Finset.sum (range (i + 1)) g)) =
f (n - 1) • ∑ i in range n, g i - f m • ∑ i in range m, g i -
∑ i in Ico m (n - 1), (f (i + 1) - f i) • ∑ i in range (i + 1), g i
[PROOFSTEP]
simp_rw [this, sum_neg_distrib, sum_range_succ, smul_add]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
h₃ :
∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
h₄ :
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i -
∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i =
f (n - 1) • Finset.sum (range n) g - f m • Finset.sum (range (m + 1)) g +
∑ i in Ico m (n - 1), (f i • Finset.sum (range (i + 1)) g - f (i + 1) • Finset.sum (range (i + 1)) g)
this :
∀ (i : ℕ),
f i • ∑ i in range (i + 1), g i - f (i + 1) • ∑ i in range (i + 1), g i =
-((f (i + 1) - f i) • ∑ i in range (i + 1), g i)
⊢ f m • g m +
(f (n - 1) • Finset.sum (range n) g - (f m • ∑ x in range m, g x + f m • g m) +
-∑ x in Ico m (n - 1), ((f (x + 1) - f x) • ∑ x in range x, g x + (f (x + 1) - f x) • g x)) =
f (n - 1) • ∑ x in range n, g x - f m • ∑ x in range m, g x -
∑ x in Ico m (n - 1), ((f (x + 1) - f x) • ∑ x in range x, g x + (f (x + 1) - f x) • g x)
[PROOFSTEP]
abel
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hmn : m < n
h₁ : ∑ i in Ico (m + 1) n, f i • ∑ i in range i, g i = ∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i
h₂ :
∑ i in Ico (m + 1) n, f i • ∑ i in range (i + 1), g i =
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i
h₃ :
∑ i in Ico (m + 1) n, f i • g i = ∑ i in Ico (m + 1) n, f i • (Finset.sum (range (i + 1)) g - Finset.sum (range i) g)
h₄ :
∑ i in Ico m (n - 1), f i • ∑ i in range (i + 1), g i + f (n - 1) • ∑ i in range n, g i -
f m • ∑ i in range (m + 1), g i -
∑ i in Ico m (n - 1), f (i + 1) • ∑ i in range (i + 1), g i =
f (n - 1) • Finset.sum (range n) g - f m • Finset.sum (range (m + 1)) g +
∑ i in Ico m (n - 1), (f i • Finset.sum (range (i + 1)) g - f (i + 1) • Finset.sum (range (i + 1)) g)
this :
∀ (i : ℕ),
f i • ∑ i in range (i + 1), g i - f (i + 1) • ∑ i in range (i + 1), g i =
-((f (i + 1) - f i) • ∑ i in range (i + 1), g i)
⊢ f m • g m +
(f (n - 1) • Finset.sum (range n) g - (f m • ∑ x in range m, g x + f m • g m) +
-∑ x in Ico m (n - 1), ((f (x + 1) - f x) • ∑ x in range x, g x + (f (x + 1) - f x) • g x)) =
f (n - 1) • ∑ x in range n, g x - f m • ∑ x in range m, g x -
∑ x in Ico m (n - 1), ((f (x + 1) - f x) • ∑ x in range x, g x + (f (x + 1) - f x) • g x)
[PROOFSTEP]
abel
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
⊢ ∑ i in range n, f i • g i =
f (n - 1) • ∑ i in range n, g i - ∑ i in range (n - 1), (f (i + 1) - f i) • ∑ i in range (i + 1), g i
[PROOFSTEP]
by_cases hn : n = 0
[GOAL]
case pos
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hn : n = 0
⊢ ∑ i in range n, f i • g i =
f (n - 1) • ∑ i in range n, g i - ∑ i in range (n - 1), (f (i + 1) - f i) • ∑ i in range (i + 1), g i
[PROOFSTEP]
simp [hn]
[GOAL]
case neg
R : Type u_1
M : Type u_2
inst✝² : Ring R
inst✝¹ : AddCommGroup M
inst✝ : Module R M
f : ℕ → R
g : ℕ → M
m n : ℕ
hn : ¬n = 0
⊢ ∑ i in range n, f i • g i =
f (n - 1) • ∑ i in range n, g i - ∑ i in range (n - 1), (f (i + 1) - f i) • ∑ i in range (i + 1), g i
[PROOFSTEP]
rw [range_eq_Ico, sum_Ico_by_parts f g (Nat.pos_of_ne_zero hn), sum_range_zero, smul_zero, sub_zero, range_eq_Ico]
|
State Before: n✝ : ℕ
R : Type ?u.34539
inst✝ : Ring R
n : ℕ
a b : ZMod n
⊢ ↑(a + b) = ↑(if ↑n ≤ ↑a + ↑b then a + b - ↑n else a + b) State After: case zero
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
a b : ZMod Nat.zero
⊢ ↑(a + b) = ↑(if ↑Nat.zero ≤ ↑a + ↑b then a + b - ↑Nat.zero else a + b)
case succ
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
n✝ : ℕ
a b : ZMod (Nat.succ n✝)
⊢ ↑(a + b) = ↑(if ↑(Nat.succ n✝) ≤ ↑a + ↑b then a + b - ↑(Nat.succ n✝) else a + b) Tactic: cases n State Before: case succ
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
n✝ : ℕ
a b : ZMod (Nat.succ n✝)
⊢ ↑(a + b) = ↑(if ↑(Nat.succ n✝) ≤ ↑a + ↑b then a + b - ↑(Nat.succ n✝) else a + b) State After: case succ
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
n✝ : ℕ
a b : ZMod (Nat.succ n✝)
⊢ ↑(a + b) = ↑(if ↑(Nat.succ n✝) ≤ ↑a + ↑b then a + b - ↑(Nat.succ n✝) else a + b) Tactic: simp only [Fin.val_add_eq_ite, ← Int.ofNat_add, ← Int.ofNat_succ, Int.ofNat_le] State Before: case succ
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
n✝ : ℕ
a b : ZMod (Nat.succ n✝)
⊢ ↑(a + b) = ↑(if ↑(Nat.succ n✝) ≤ ↑a + ↑b then a + b - ↑(Nat.succ n✝) else a + b) State After: case succ.inl
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
n✝ : ℕ
a b : ZMod (Nat.succ n✝)
h : ↑(Nat.succ n✝) ≤ ↑a + ↑b
⊢ ↑(a + b) = ↑(a + b - ↑(Nat.succ n✝))
case succ.inr
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
n✝ : ℕ
a b : ZMod (Nat.succ n✝)
h : ¬↑(Nat.succ n✝) ≤ ↑a + ↑b
⊢ ↑(a + b) = ↑(a + b) Tactic: split_ifs with h State Before: case zero
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
a b : ZMod Nat.zero
⊢ ↑(a + b) = ↑(if ↑Nat.zero ≤ ↑a + ↑b then a + b - ↑Nat.zero else a + b) State After: no goals Tactic: simp State Before: case succ.inl
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
n✝ : ℕ
a b : ZMod (Nat.succ n✝)
h : ↑(Nat.succ n✝) ≤ ↑a + ↑b
⊢ ↑(a + b) = ↑(a + b - ↑(Nat.succ n✝)) State After: case succ.inl
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
n✝ : ℕ
a b : ZMod (Nat.succ n✝)
h : ↑(Nat.succ n✝) ≤ ↑a + ↑b
⊢ ↑(a + b) = ↑(a + b - ↑(Nat.succ n✝)) Tactic: norm_cast State Before: case succ.inl
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
n✝ : ℕ
a b : ZMod (Nat.succ n✝)
h : ↑(Nat.succ n✝) ≤ ↑a + ↑b
⊢ ↑(a + b) = ↑(a + b - ↑(Nat.succ n✝)) State After: case succ.inl.e_a
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
n✝ : ℕ
a b : ZMod (Nat.succ n✝)
h : ↑(Nat.succ n✝) ≤ ↑a + ↑b
⊢ a + b = a + b - ↑(Nat.succ n✝) Tactic: congr State Before: case succ.inl.e_a
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
n✝ : ℕ
a b : ZMod (Nat.succ n✝)
h : ↑(Nat.succ n✝) ≤ ↑a + ↑b
⊢ a + b = a + b - ↑(Nat.succ n✝) State After: no goals Tactic: simp State Before: case succ.inr
n : ℕ
R : Type ?u.34539
inst✝ : Ring R
n✝ : ℕ
a b : ZMod (Nat.succ n✝)
h : ¬↑(Nat.succ n✝) ≤ ↑a + ↑b
⊢ ↑(a + b) = ↑(a + b) State After: no goals Tactic: rfl
|
"""
ZeroInflatedPoisson(λ, p)
A *Zero-Inflated Poisson distribution* is a mixture distribution in which data arise from two processes. The first process is is a Poisson distribution, with mean λ, that descibes the number of independent events occurring within a unit time interval:
```math
P(X = k) = (1 - p) \\frac{\\lambda^k}{k!} e^{-\\lambda}, \\quad \\text{ for } k = 0,1,2,\\ldots.
```
Zeros may arise from this process, an additional Bernoulli process, where the probability of observing an excess zero is given as p:
```math
P(X = 0) = p + (1 - p) e^{-\\lambda}
```
As p approaches 0, the distribution tends toward Poisson(λ).
```julia
ZeroInflatedPoisson() # Zero-Inflated Poisson distribution with rate parameter 1, and probability of observing a zero 0.5
ZeroInflatedPoisson(λ) # ZeroInflatedPoisson distribution with rate parameter λ, and probability of observing a zero 0.5
params(d) # Get the parameters, i.e. (λ, p)
mean(d) # Get the mean of the mixture distribution
var(d) # Get the variance of the mixture distribution
```
External links:
* [Zero-inflated Poisson Regression on UCLA IDRE Statistical Consulting](https://stats.idre.ucla.edu/stata/dae/zero-inflated-poisson-regression/)
* [Zero-inflated model on Wikipedia](https://en.wikipedia.org/wiki/Zero-inflated_model)
* McElreath, R. (2020). Statistical Rethinking: A Bayesian Course with Examples in R and Stan (2nd ed.). Chapman and Hall/CRC. https://doi.org/10.1201/9780429029608
"""
struct ZeroInflatedPoisson{T<:Real} <: DiscreteUnivariateDistribution
λ::T
p::T
function ZeroInflatedPoisson{T}(λ::T, p::T) where {T <: Real}
return new{T}(λ, p)
end
end
function ZeroInflatedPoisson(λ::T, p::T; check_args = true) where {T <: Real}
if check_args
@check_args(Poisson, λ >= zero(λ))
@check_args(ZeroInflatedPoisson, zero(p) <= p <= one(p))
end
return ZeroInflatedPoisson{T}(λ, p)
end
ZeroInflatedPoisson(λ::Real, p::Real) = ZeroInflatedPoisson(promote(λ, p)...)
ZeroInflatedPoisson(λ::Integer, p::Integer) = ZeroInflatedPoisson(float(λ), float(p))
ZeroInflatedPoisson(λ::Real) = ZeroInflatedPoisson(λ, 0.0)
ZeroInflatedPoisson() = ZeroInflatedPoisson(1.0, 0.0, check_args = false)
@distr_support ZeroInflatedPoisson 0 (d.λ == zero(typeof(d.λ)) ? 0 : Inf)
### Statistics
mean(d::ZeroInflatedPoisson) = (1 - d.p) * d.λ
var(d::ZeroInflatedPoisson) = d.λ * (1 - d.p) * (1 + d.p * d.λ)
#### Conversions
function convert(::Type{ZeroInflatedPoisson{T}}, λ::Real, p::Real) where {T<:Real}
return ZeroInflatedPoisson(T(λ), T(p))
end
function convert(::Type{ZeroInflatedPoisson{T}}, d::ZeroInflatedPoisson{S}) where {T <: Real, S <: Real}
return ZeroInflatedPoisson(T(d.λ), T(d.p), check_args = false)
end
#### Parameters
params(d::ZeroInflatedPoisson) = (d.λ, d.p,)
partype(::ZeroInflatedPoisson{T}) where {T} = T
rate(d::ZeroInflatedPoisson) = d.λ
excessprob(d::ZeroInflatedPoisson) = d.p
#### Evaluation
function logpdf(d::ZeroInflatedPoisson, y::Real)
lp = if iszero(y)
logaddexp(log(d.p), log1p(-d.p) - d.λ)
else
log1p(-d.p) + logpdf(Poisson(d.λ), y)
end
return lp
end
function cdf(d::ZeroInflatedPoisson, x::Real)
pd = Poisson(d.λ)
deflat_limit = -1.0 / expm1(d.λ)
if x < 0
out = 0.0
elseif d.p < deflat_limit
out = NaN
else
out = d.p + (1 - d.p) * cdf(pd, x)
end
return out
end
# quantile
function quantile(d::ZeroInflatedPoisson, q::Real)
deflat_limit = -1.0 / expm1(d.λ)
if (q <= d.p)
out = 0
elseif (d.p < deflat_limit)
out = convert(Int64, NaN)
elseif (d.p < q) & (deflat_limit <= d.p) & (q < 1.0)
qp = (q - d.p) / (1.0 - d.p)
pd = Poisson(d.λ)
out = quantile(pd, qp) # handles d.p == 1 as InexactError(Inf)
end
return out
end
#### Fitting
struct ZeroInflatedPoissonStats <: SufficientStats
sx::Float64 # (weighted) sum of x
p0::Float64 # observed proportion of zeros
tw::Float64 # total sample weight
end
suffstats(::Type{<:ZeroInflatedPoisson}, x::AbstractArray{T}) where {T<:Integer} = ZeroInflatedPoissonStats(
sum(x),
mean(iszero, x),
length(x)
)
# weighted
function suffstats(::Type{<:ZeroInflatedPoisson}, x::AbstractArray{T}, w::AbstractArray{Float64}) where T<:Integer
n = length(x)
n == length(w) || throw(DimensionMismatch("Inconsistent array lengths."))
sx = 0.
tw = 0.
p0 = 0.
for i = 1 : n
@inbounds wi = w[i]
@inbounds sx += x[i] * wi
tw += wi
@inbounds p0i = (x[i] == 0) * wi
p0 += p0i
end
return ZeroInflatedPoissonStats(sx, p0, tw)
end
function fit_mle(::Type{<:ZeroInflatedPoisson}, ss::ZeroInflatedPoissonStats)
m = ss.sx / ss.tw
s = m / (1 - ss.p0)
λhat = lambertw(-s * exp(-s)) + s
phat = 1 - (m / λhat)
return ZeroInflatedPoisson(λhat, phat)
end
function fit_mle(::Type{<:ZeroInflatedPoisson}, x::AbstractArray{T}) where T<:Real
pstat = suffstats(ZeroInflatedPoisson, x)
return fit_mle(ZeroInflatedPoisson, pstat)
end
function fit_mle(::Type{<:ZeroInflatedPoisson}, x::AbstractArray{T}, w::AbstractArray{Float64}) where T<:Real
pstat = suffstats(ZeroInflatedPoisson, x, w)
return fit_mle(ZeroInflatedPoisson, pstat)
end
|
from pathlib import Path
import numpy
import pygame
from bafd.sprites import map as sprites
from bafd.utils.window import vsize
from ... import time
from . import spread
__folder__ = Path(__file__).parent
# Read csv of land/water tiles
landmap = numpy.fliplr(
numpy.genfromtxt(__folder__ / 'land.csv', delimiter=',')
)
class Map(pygame.Surface):
def __init__(self, size):
pygame.Surface.__init__(self, size)
self.tiles = numpy.ndarray(shape=landmap.shape, dtype=numpy.object_)
# Placeholder coord object
point = Coord((0, 0), mode="map")
# Iterate through all coords
for x, vals in enumerate(landmap):
for y, val in enumerate(vals):
point.map = (x, y)
if val == 1:
# If tile is land, style it according to influence maps
self.tiles[x, y] = Tile((x, y))
self.blit(self.tiles[x, y].surface, point.screen)
elif val == 0:
# If tile is water, just draw it
self.tiles[x, y] = sprites.water
self.blit(self.tiles[x, y], point.screen)
# Store neighbours
for x, row in enumerate(self.tiles):
for y, cell in enumerate(row):
# Skip non-tiles
if not isinstance(cell, Tile):
continue
# Get nearby tiles
near = self.tiles[x-1:x+1, y-1:y+1].flatten()
for neighbour in near:
if isinstance(neighbour, Tile):
# Store non-None neighbours in tile attr
cell.neighbours.append(neighbour)
# Set starting year
self.year = time.Year(-1500, length=0.1, map=self)
def advance_year(self):
# Iterate through tiles
for cell in self.tiles.flatten():
if isinstance(cell, Tile):
# Advance year on each cell
cell.advance_year()
# Reblit
self.blit(cell.surface, cell.coords.screen)
def on_click(self, pos):
i = Coord(pos, mode="screen").map
if hasattr(self.tiles[i], "on_click"):
self.tiles[i].on_click(pos)
class Tile:
def __init__(self, coords, resources=[]):
# Validate
if not isinstance(coords, Coord):
coords = Coord(coords, mode="map")
# Initialise surface
self.surface = sprites.land.copy()
# Store values given
self.coords = coords
self.resources = resources
# Blank array to store neighbours in
self.neighbours = []
# Initialise an overlay for each culture
self.overlays = {
culture: getattr(sprites, culture).copy()
for culture in spread.cultures
}
# Apply initial inluence overlays
self.update_overlay()
@property
def demographics(self):
"""Demographic (cultural) makeup of this tile"""
return spread.demographics[self.coords.map]
@demographics.setter
def demographics(self, value):
# Validate
assert isinstance(value, numpy.ndarray)
assert value.dtype == self.demographics.dtype
# Set
spread.demographics[self.coords.map] = value
@property
def influence(self):
"""Cultural influence emitted by this tile"""
return self.demographics
def advance_year(self):
"""Progress by 1 year"""
# Recalculate demographics of each culture according to influence of neighbours
adj = numpy.array(
[cell.influence for cell in self.neighbours],
dtype=self.demographics.dtype
)
for culture in spread.cultures:
spread.demographics[culture][self.coords.map] += numpy.nanmean(adj[culture]) * numpy.random.choice([0.9, 1, 1.1])
# Re-normalise demographics
total = sum(spread.demographics[self.coords.map])
for culture in spread.cultures:
spread.demographics[culture][self.coords.map] /= total
# Update overlay
self.update_overlay()
def update_overlay(self):
# Reset tile
self.surface.blit(sprites.land, (0, 0))
# Overlay for each culture
for culture in spread.cultures:
# Set overlay opacity
self.overlays[culture].set_alpha(self.demographics[culture]*255)
# Merge
self.surface.blit(self.overlays[culture], (0, 0), special_flags=pygame.BLEND_ALPHA_SDL2)
def on_click(self, pos):
print(self.coords.map)
class Coord:
"""A class to handle seamless conversion between map and screen coordinates"""
def __init__(self, coord, mode="screen"):
# Validate
if mode not in ['map', 'screen']:
raise ValueError(f"Invalid value {mode} for coordinate mode")
# Set some default values
self._map = (0, 0)
self._screen = (0, 0)
# Supply value to setter methods
setattr(self, mode, coord)
def __repr__(self):
return f"<Coord object: map={self.map} screen={self.screen}>"
@property
def map(self):
"""Location of this coordinate on the map"""
return self._map
@map.setter
def map(self, value):
# Validate
assert isinstance(value, (list, tuple))
assert len(value) == 2
# Set value
self._map = value
# Calculate screen coords
x, y = value
w, h = vsize
# Elimination method
self._screen = (
int(numpy.floor(x*5 - y*5 + w/2 - 4)),
int(numpy.floor(y*4 + x*4 - h/2 - 30))
)
@property
def screen(self):
"""Location of this coordinate on the screen"""
return self._screen
@screen.setter
def screen(self, value):
# Validate
assert isinstance(value, (list, tuple))
assert len(value) == 2
# Set value
self._screen = value
# Calculate map coords
x, y = value
w, h = vsize
"""
x = X*5 - Y*5 + (w-8)/2
y = Y*4 + X*4 - (h+60)/2
X = x/5 - (w-8)/10 + Y
Y = y/4 + (h+60)/8 - X
X - Y = x/5 - (w-8)/10
Y + X = y/4 + (h+60)/8
X = x/10 - (w-8)/20 + y/8 + (h+60)/16
Y = y/8 + (h+60)/16 - x/10 + (w-8)/20
"""
self._map = (
int(numpy.floor(x/10 - (w-8)/20 + y/8 + (h+60)/16)),
int(numpy.floor(y/8 + (h+60)/16 - x/10 + (w-8)/20))
)
|
//=============================================================================================================
/**
* @file realtimesourceestimatewidget.cpp
* @author Christoph Dinh <[email protected]>;
* Juan Garcia-Prieto <[email protected]>;
* Lorenz Esch <[email protected]>
* @version dev
* @date February, 2013
*
* @section LICENSE
*
* Copyright (C) 2013, Christoph Dinh, Juan Garcia-Prieto, Lorenz Esch. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that
* the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of conditions and the
* following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
* the following disclaimer in the documentation and/or other materials provided with the distribution.
* * Neither the name of MNE-CPP authors nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*
* @brief Definition of the RealTimeSourceEstimateWidget Class.
*
*/
//*************************************************************************************************************
//=============================================================================================================
// INCLUDES
//=============================================================================================================
#include "realtimesourceestimatewidget.h"
#include <scMeas/realtimesourceestimate.h>
#include <disp/viewers/quickcontrolview.h>
#include <disp3D/engine/model/items/sourcedata/mnedatatreeitem.h>
#include <disp3D/viewers/sourceestimateview.h>
//*************************************************************************************************************
//=============================================================================================================
// QT INCLUDES
//=============================================================================================================
#include <QGridLayout>
#include <QVector3D>
//*************************************************************************************************************
//=============================================================================================================
// Eigen INCLUDES
//=============================================================================================================
#include <Eigen/Core>
//*************************************************************************************************************
//=============================================================================================================
// USED NAMESPACES
//=============================================================================================================
using namespace SCDISPLIB;
using namespace DISP3DLIB;
using namespace DISPLIB;
using namespace SCMEASLIB;
//*************************************************************************************************************
//=============================================================================================================
// DEFINE MEMBER METHODS
//=============================================================================================================
RealTimeSourceEstimateWidget::RealTimeSourceEstimateWidget(QSharedPointer<RealTimeSourceEstimate> &pRTSE, QWidget* parent)
: MeasurementWidget(parent)
, m_pRTSE(pRTSE)
, m_bInitialized(false)
, m_pRtItem(Q_NULLPTR)
, m_pSourceEstimateView(SourceEstimateView::SPtr::create())
{
m_pActionQuickControl = new QAction(QIcon(":/images/quickControl.png"), tr("Show quick control widget (F9)"),this);
m_pActionQuickControl->setShortcut(tr("F9"));
m_pActionQuickControl->setStatusTip(tr("Show quick control widget (F9)"));
connect(m_pActionQuickControl.data(), &QAction::triggered,
this, &RealTimeSourceEstimateWidget::showQuickControlView);
addDisplayAction(m_pActionQuickControl);
m_pActionQuickControl->setVisible(true);
QGridLayout *mainLayoutView = new QGridLayout;
mainLayoutView->addWidget(m_pSourceEstimateView.data(),0,0);
QList<QSharedPointer<QWidget> > lControlWidgets = m_pRTSE->getControlWidgets();
m_pSourceEstimateView->setQuickControlWidgets(lControlWidgets);
m_pQuickControlView = m_pSourceEstimateView->getQuickControl();
m_pQuickControlView->setWindowFlags(Qt::Window | Qt::CustomizeWindowHint | Qt::WindowStaysOnTopHint);
m_pQuickControlView->setDraggable(true);
m_pQuickControlView->setVisiblityHideOpacityClose(true);
mainLayoutView->setContentsMargins(0,0,0,0);
this->setLayout(mainLayoutView);
getData();
}
//*************************************************************************************************************
RealTimeSourceEstimateWidget::~RealTimeSourceEstimateWidget()
{
// Store Settings
if(!m_pRTSE->getName().isEmpty()) {
}
}
//*************************************************************************************************************
void RealTimeSourceEstimateWidget::update(SCMEASLIB::Measurement::SPtr)
{
getData();
}
//*************************************************************************************************************
void RealTimeSourceEstimateWidget::getData()
{
if(m_bInitialized) {
QList<MNESourceEstimate::SPtr> lMNEData = m_pRTSE->getValue();
// Add source estimate data
if(!lMNEData.isEmpty()) {
if(!m_pRtItem && m_pRTSE->getAnnotSet() && m_pRTSE->getSurfSet() && m_pRTSE->getFwdSolution()) {
//qDebug()<<"RealTimeSourceEstimateWidget::getData - Creating m_lRtItem list";
m_pRtItem = m_pSourceEstimateView->addData("Subject", "Data",
*lMNEData.first(),
*m_pRTSE->getFwdSolution(),
*m_pRTSE->getSurfSet(),
*m_pRTSE->getAnnotSet());
m_pRtItem->setLoopState(false);
m_pRtItem->setTimeInterval(17);
m_pRtItem->setThresholds(QVector3D(0.0,5,10));
m_pRtItem->setColormapType("Hot");
m_pRtItem->setVisualizationType("Annotation based");
m_pRtItem->setNumberAverages(17);
m_pRtItem->setAlpha(1.0);
m_pRtItem->setStreamingState(true);
m_pRtItem->setSFreq(m_pRTSE->getFiffInfo()->sfreq);
} else {
//qDebug()<<"RealTimeSourceEstimateWidget::getData - Working with m_lRtItem list";
if(m_pRtItem) {
m_pRtItem->addData(*lMNEData.first());
}
}
}
} else {
init();
}
}
//*************************************************************************************************************
void RealTimeSourceEstimateWidget::init()
{
m_bInitialized = true;
}
//*************************************************************************************************************
void RealTimeSourceEstimateWidget::showQuickControlView()
{
if(m_pQuickControlView) {
m_pQuickControlView->raise();
m_pQuickControlView->show();
}
}
|
Worth the waiting, awesome car. Great looks. Exceptional power. Unmatched comfort. Must buy.
A place where you can find everything about any car. Old, new or upcoming. Really CarDekho is helpful for suggesting the good cars.
This car is a very beautiful car I like this beauty I love the Hyundai company.
Hyundai Santa Fe 2019 is an excellent car good looks it is forward then Kudos to Hyundai.
Q. Is Hyundai Palisade launched in India?
There is no official announcement from the brand's end regarding the launch of this car in India.
Q. What is the seating capacity and exact launch date?
As of now, there is no official update regarding its relaunch in India from the brand.
|
mutable struct brookstone_supertype_t <: LCMType
utime::Int64
newvariable::generic_variable_t
newfactor::generic_factor_t
img::image_t
end
@lcmtypesetup(brookstone_supertype_t)
|
module Arity where
open import OscarPrelude
record Arity : Set
where
constructor ⟨_⟩
field
arity : Nat
open Arity public
instance EqArity : Eq Arity
Eq._==_ EqArity _ = decEq₁ (cong arity) ∘ (_≟_ on arity $ _)
|
Require Import List.
Require Import EqNat.
Definition alist := list (nat * bool).
Fixpoint in_assignment n (a : alist) : Prop :=
match a with
| nil => False
| (h,_)::t => if beq_nat n h
then True
else in_assignment n t
end.
Lemma in_empty : forall a, in_assignment a nil -> False.
intros; compute in H; apply H.
Qed.
Fixpoint find_assignment n (a : alist) : in_assignment n a -> bool :=
match a with
| nil => fun pf => match (in_empty n) pf with end
| (h, tv)::t => if beq_nat h n
then fun _ => tv
else find_assignment n t
end.
|
lemma convergent_LIMSEQ_iff: "convergent X \<longleftrightarrow> X \<longlonglongrightarrow> lim X"
|
Formal statement is: lemma AE_count_space: "(AE x in count_space A. P x) \<longleftrightarrow> (\<forall>x\<in>A. P x)" Informal statement is: For any countable set $A$, the set of points $x \in A$ for which a property $P$ holds is either all of $A$ or none of $A$.
|
lemma filtermap_at_right_shift: "filtermap (\<lambda>x. x - d) (at_right a) = at_right (a - d)" for a d :: "real"
|
Formal statement is: lemma emeasure_single_in_space: "emeasure M {x} \<noteq> 0 \<Longrightarrow> x \<in> space M" Informal statement is: If the measure of a singleton set is nonzero, then the singleton set is in the space.
|
State Before: R : Type u_1
S : Type u_2
T : Type ?u.717680
inst✝⁷ : CommRing R
inst✝⁶ : Ring S
inst✝⁵ : Algebra R S
A : Type ?u.717983
B✝ : Type ?u.717986
inst✝⁴ : CommRing A
inst✝³ : CommRing B✝
inst✝² : IsDomain B✝
inst✝¹ : Algebra A B✝
K : Type ?u.718408
inst✝ : Field K
B : PowerBasis R S
x : S
hx : B.gen ∈ adjoin R {x}
⊢ adjoin R {x} = ⊤ State After: R : Type u_1
S : Type u_2
T : Type ?u.717680
inst✝⁷ : CommRing R
inst✝⁶ : Ring S
inst✝⁵ : Algebra R S
A : Type ?u.717983
B✝ : Type ?u.717986
inst✝⁴ : CommRing A
inst✝³ : CommRing B✝
inst✝² : IsDomain B✝
inst✝¹ : Algebra A B✝
K : Type ?u.718408
inst✝ : Field K
B : PowerBasis R S
x : S
hx : B.gen ∈ adjoin R {x}
⊢ adjoin R {B.gen} ≤ adjoin R {x} Tactic: rw [_root_.eq_top_iff, ← B.adjoin_gen_eq_top] State Before: R : Type u_1
S : Type u_2
T : Type ?u.717680
inst✝⁷ : CommRing R
inst✝⁶ : Ring S
inst✝⁵ : Algebra R S
A : Type ?u.717983
B✝ : Type ?u.717986
inst✝⁴ : CommRing A
inst✝³ : CommRing B✝
inst✝² : IsDomain B✝
inst✝¹ : Algebra A B✝
K : Type ?u.718408
inst✝ : Field K
B : PowerBasis R S
x : S
hx : B.gen ∈ adjoin R {x}
⊢ adjoin R {B.gen} ≤ adjoin R {x} State After: R : Type u_1
S : Type u_2
T : Type ?u.717680
inst✝⁷ : CommRing R
inst✝⁶ : Ring S
inst✝⁵ : Algebra R S
A : Type ?u.717983
B✝ : Type ?u.717986
inst✝⁴ : CommRing A
inst✝³ : CommRing B✝
inst✝² : IsDomain B✝
inst✝¹ : Algebra A B✝
K : Type ?u.718408
inst✝ : Field K
B : PowerBasis R S
x : S
hx : B.gen ∈ adjoin R {x}
⊢ {B.gen} ⊆ ↑(adjoin R {x}) Tactic: refine' adjoin_le _ State Before: R : Type u_1
S : Type u_2
T : Type ?u.717680
inst✝⁷ : CommRing R
inst✝⁶ : Ring S
inst✝⁵ : Algebra R S
A : Type ?u.717983
B✝ : Type ?u.717986
inst✝⁴ : CommRing A
inst✝³ : CommRing B✝
inst✝² : IsDomain B✝
inst✝¹ : Algebra A B✝
K : Type ?u.718408
inst✝ : Field K
B : PowerBasis R S
x : S
hx : B.gen ∈ adjoin R {x}
⊢ {B.gen} ⊆ ↑(adjoin R {x}) State After: no goals Tactic: simp [hx]
|
I’d be track pants and a tee shirt.
Kunal Rawal at home and Dolce and Gabbana internationally.
Name the best styled male celebrities.
David Beckham, Saif Ali Khan.
Up West, who’s your style icon?
David Beckham and Ryan Gosling.
I guess skinny jeans for men.
Having to dress up for others. I prefer dressing for myself.
Is there too much pressure from the media to look your best?
Yes and no. I believe you should clean up your act when you’re in front of the media. That’s a ‘good’ pressure. Other than that, you should wear what you like.
What item do you go overboard shopping?
Tee shirts, sunglasses and track pants. Actually, I overbuy everything – especially shoes.
Actually, you should first have your own sense of style. And then you can take guidance.
Don’t wear jeans that are tight.
On a scale of 1 to 10, how would you rate your personal style?
It would be at least 7/10 because I do make an effort when I step out.
Layering is always welcome. I love chukka boots. And there’s nothing cooler than a plain tee shirt and a leather jacket.
My accessories – sunglasses, watches and caps.
|
HOWLER Tutorials: What is HOWLER?
HOWLER is an acronym for Help Online With Library Education and Research. Our goal is to share information literacy strategies and knowledge with Madison College students. These tutorials serve to both reinforce and enhance our in-person, library information literacy sessions and to help online and hybrid students with access to information literacy best practices and concepts.
|
-- exercises in "Type-Driven Development with Idris"
-- chapter 3
import Data.Vect
-- check that all functions are total
%default total
--
-- section 3.2
--
-- exercise 3.2.1
my_length : List a -> Nat
my_length [] = 0
my_length (x :: xs) = 1 + my_length xs
-- exercise 3.2.2
my_reverse : List a -> List a
my_reverse [] = []
my_reverse (x :: xs) = my_reverse xs ++ [x]
-- exercise 3.2.3
my_map : (a -> b) -> List a -> List b
my_map f [] = []
my_map f (x :: xs) = f x :: my_map f xs
-- exercise 3.2.4
my_vect_map : (a -> b) -> Vect n a -> Vect n b
my_vect_map f [] = []
my_vect_map f (x :: xs) = f x :: my_vect_map f xs
--
-- section 3.3
-- see Matrix.idr
--
|
using DFTK: bzmesh_uniform, bzmesh_ir_wedge, ElementCoulomb, Vec3, Mat3
using DFTK: pymatgen_structure, load_lattice, standardize_atoms
using LinearAlgebra
using PyCall
using Test
include("testcases.jl")
@testset "bzmesh_uniform agrees with spglib" begin
spglib = pyimport_conda("spglib", "spglib")
function test_against_spglib(kgrid_size; kshift=[0, 0, 0])
kgrid_size = Vec3(kgrid_size)
identity = [reshape(Mat3{Int}(I), 1, 3, 3)]
is_shift = ifelse.(kshift .== 0, 0, 1)
_, grid = spglib.get_stabilized_reciprocal_mesh(kgrid_size, identity,
is_shift=is_shift)
kcoords_spglib = [(kshift .+ Vec3{Int}(grid[ik, :])) .// kgrid_size
for ik in 1:size(grid, 1)]
kcoords_spglib = DFTK.normalize_kpoint_coordinate.(kcoords_spglib)
sort!(kcoords_spglib)
kcoords, _ = bzmesh_uniform(kgrid_size, kshift=kshift)
sort!(kcoords)
@test kcoords == kcoords_spglib
end
test_against_spglib([ 2, 3, 2])
test_against_spglib([ 3, 3, 3])
test_against_spglib([ 3, 3, 3], kshift=[1//2, 0, 0])
test_against_spglib([ 2, 3, 4])
test_against_spglib([ 9, 11, 13])
end
@testset "bzmesh_ir_wedge is correct reduction" begin
function test_reduction(system, kgrid_size; supercell=[1, 1, 1], kshift=[0, 0, 0])
lattice = system.lattice
atoms = [ElementCoulomb(system.atnum) => system.positions]
if supercell != [1, 1, 1] # Make a supercell
pystruct = pymatgen_structure(lattice, atoms)
pystruct.make_supercell(supercell)
lattice = load_lattice(pystruct)
el = ElementCoulomb(system.atnum)
atoms = [el => [s.frac_coords for s in pystruct.sites]]
end
red_kcoords, _ = bzmesh_uniform(kgrid_size, kshift=kshift)
irred_kcoords, ksymops = bzmesh_ir_wedge(kgrid_size, DFTK.symmetry_operations(lattice, atoms); kshift=kshift)
# Try to reproduce all kcoords from irred_kcoords
all_kcoords = Vector{Vec3{Rational{Int}}}()
for (ik, k) in enumerate(irred_kcoords)
append!(all_kcoords, [S * k for (S, τ) in ksymops[ik]])
end
# Normalize the obtained k-Points and test for equality
red_kcoords = sort([mod.(k .* kgrid_size, kgrid_size) for k in red_kcoords])
all_kcoords = sort([mod.(k .* kgrid_size, kgrid_size) for k in all_kcoords])
@test all_kcoords == red_kcoords
end
test_reduction(silicon, [ 2, 3, 2])
test_reduction(silicon, [ 3, 3, 3])
test_reduction(silicon, [ 2, 3, 4])
test_reduction(silicon, [ 9, 11, 13])
test_reduction(silicon, [ 3, 3, 3], kshift=[1//2, 1//2, 1//2])
test_reduction(silicon, [ 3, 3, 3], kshift=[1//2, 0, 1//2])
test_reduction(silicon, [ 3, 3, 3], kshift=[0, 1//2, 0])
test_reduction(silicon, [ 1, 4, 4], supercell=[2, 1, 1])
test_reduction(silicon, [ 1, 16, 16], supercell=[4, 1, 1])
test_reduction(magnesium, [ 2, 3, 2])
test_reduction(magnesium, [ 3, 3, 3])
test_reduction(magnesium, [ 2, 3, 4])
test_reduction(magnesium, [ 9, 11, 13])
end
@testset "standardize_atoms" begin
# Test unperturbed structure
atoms = [ElementCoulomb(:Si) => silicon.positions]
slattice, satoms = standardize_atoms(silicon.lattice, atoms, primitive=true)
@test length(atoms) == 1
@test atoms[1][1] == ElementCoulomb(:Si)
@test length(atoms[1][2]) == 2
@test atoms[1][2][1] == ones(3) ./ 8
@test atoms[1][2][2] == -ones(3) ./ 8
# Perturb structure
plattice = silicon.lattice .+ 1e-8rand(3, 3)
patoms = [ElementCoulomb(:Si) => [p + 1e-8rand(3) for p in silicon.positions]]
plattice, patoms = standardize_atoms(plattice, patoms, primitive=true)
# And check we get the usual silicon primitive cell back:
a = plattice[1, 2]
@test plattice == [0 a a; a 0 a; a a 0]
@test length(atoms) == 1
@test atoms[1][1] == ElementCoulomb(:Si)
@test length(atoms[1][2]) == 2
@test atoms[1][2][1] - atoms[1][2][2] == ones(3) ./ 4
end
|
#' @details
#' Provides access to arguments of nested functions. Sort of an alterative mechanism to passing `...` arguments but with more features.
#' Provides access to higher level call's arguments (including `...` dots arguments) without explicitly passing it through calling stack and allows updating default values that are explicitly set throughout calling stack (i.e., lower calls take prevalence).
#' @keywords internal
"_PACKAGE"
|
########## This R code is meant to simulate data from a simple dag.
### z.
### x= f(c,z)
### y = f2(c).
### z -> x, c -> x,c -> y.
### z and c are from the set {1,0}.
### The domain of x is {}.
### The domain of y is {}
|
The square of the complex square root of $z$ is $z$.
|
import cv2
import numpy as np
from scipy import ndimage
from skimage.segmentation import watershed
from skimage.feature import peak_local_max
from skimage.morphology import reconstruction
from lib.models.Colors import Color
class Channel:
def __init__(self, name=None, label=None, image=None, th=None):
self.name = name
self.label = label
self.th = th
self.image = image
####################################################################
################### LOADING AND SAVING FUNCTIONS ###################
####################################################################
def save(self, sample):
np.savez_compressed(f'samples/{sample}/{self.name}.npz', self.image_norm)
def load_images(self, im_type = 'image', img = None):
setattr(self, im_type, img)
return self
def dump_images(self):
'''Dumps all images of the channel
Returns
-------
Channel
'''
self.image = None
self.image_norm = None
self.image_cont = None
self.image_thre = None
return self
####################################################################
######################### IMAGE PROCESSING #########################
####################################################################
def apply_mask(self, mask, img = None):
if isinstance(img, np.ndarray): return np.where(mask, img, 0)
else: return np.where(mask, self.image, 0)
####################################################################
############################ ANALYSIS ##############################
####################################################################
def analyse(self, mask):
mask_positive = np.logical_and(mask, self.image >= self.th)
positive_pixels = self.image[mask_positive]
all_pixels = self.image[mask]
mean_positive = np.mean(positive_pixels)
area_positive = np.sum(mask_positive)
mean_all = np.mean(all_pixels)
area_all = np.sum(mask)
positive_fraction = float(area_positive)/float(area_all)
summary_dict = {
'Channel': self.name,
'Threshold': self.th,
'Positive Area' : area_positive,
'Positive Mean' : mean_positive,
'Total Area': area_all,
'Total Mean': mean_all,
' Positive Fraction': positive_fraction
}
return summary_dict
def segment_fibers(self, mask):
img = np.array(self.image_thre * 255, dtype='uint8')
inverted = np.invert(img)
seed = np.copy(inverted)
seed = np.where(inverted > 0, inverted.max(), 0)
filled = reconstruction(seed, inverted, method='erosion')
filled = np.invert(np.array(filled, dtype='uint8'))
thresh = cv2.threshold(filled, 0, 255, cv2.THRESH_BINARY_INV)[1]
thresh = self.apply_mask(mask, img=thresh)
kernel = np.ones((4, 4), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations = 1)
kernel = np.array([
[0,1,1,0],
[1,1,1,1],
[1,1,1,1],
[0,1,1,0]
], np.uint8)
erode = cv2.erode(opening, kernel, iterations=2)
D = ndimage.distance_transform_edt(erode)
localMax = peak_local_max(D, indices=False, min_distance=20, labels=erode)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask=thresh)
return labels
|
From iris.program_logic Require Export weakestpre.
From iris.heap_lang Require Export lang.
From iris.proofmode Require Import tactics.
From iris.heap_lang Require Import proofmode notation.
From iris.algebra Require Import auth gset.
From iris.heap_lang.lib Require Export lock.
Set Default Proof Using "Type".
Import uPred.
Definition wait_loop: val :=
rec: "wait_loop" "x" "lk" :=
let: "o" := !(Fst "lk") in
if: "x" = "o"
then #() (* my turn *)
else "wait_loop" "x" "lk".
Definition newlock : val :=
λ: <>, ((* owner *) ref #0, (* next *) ref #0).
Definition acquire : val :=
rec: "acquire" "lk" :=
let: "n" := !(Snd "lk") in
if: CAS (Snd "lk") "n" ("n" + #1)
then wait_loop "n" "lk"
else "acquire" "lk".
Definition release : val :=
λ: "lk", (Fst "lk") <- !(Fst "lk") + #1.
(** The CMRAs we need. *)
Class tlockG Σ :=
tlock_G :> inG Σ (authR (prodUR (optionUR (exclR natC)) (gset_disjUR nat))).
Definition tlockΣ : gFunctors :=
#[ GFunctor (authR (prodUR (optionUR (exclR natC)) (gset_disjUR nat))) ].
Instance subG_tlockΣ {Σ} : subG tlockΣ Σ → tlockG Σ.
Proof. solve_inG. Qed.
Section proof.
Context `{!heapG Σ, !probG Σ, !tlockG Σ} (N : namespace).
Definition lock_inv (γ : gname) (lo ln : loc) (R : iProp Σ) : iProp Σ :=
(∃ o n : nat,
lo ↦ #o ∗ ln ↦ #n ∗
own γ (● (Excl' o, GSet (seq_set 0 n))) ∗
((own γ (◯ (Excl' o, GSet ∅)) ∗ R) ∨ own γ (◯ (ε, GSet {[ o ]}))))%I.
Definition is_lock (γ : gname) (lk : val) (R : iProp Σ) : iProp Σ :=
(∃ lo ln : loc,
⌜lk = (#lo, #ln)%V⌝ ∗ inv N (lock_inv γ lo ln R))%I.
Definition issued (γ : gname) (x : nat) : iProp Σ :=
own γ (◯ (ε, GSet {[ x ]}))%I.
Definition locked (γ : gname) : iProp Σ := (∃ o, own γ (◯ (Excl' o, GSet ∅)))%I.
Global Instance lock_inv_ne γ lo ln :
NonExpansive (lock_inv γ lo ln).
Proof. solve_proper. Qed.
Global Instance is_lock_ne γ lk : NonExpansive (is_lock γ lk).
Proof. solve_proper. Qed.
Global Instance is_lock_persistent γ lk R : Persistent (is_lock γ lk R).
Proof. apply _. Qed.
Global Instance locked_timeless γ : Timeless (locked γ).
Proof. apply _. Qed.
Lemma locked_exclusive (γ : gname) : locked γ -∗ locked γ -∗ False.
Proof.
iDestruct 1 as (o1) "H1". iDestruct 1 as (o2) "H2".
iDestruct (own_valid_2 with "H1 H2") as %[[] _].
Qed.
Lemma newlock_spec (R : iProp Σ) :
{{{ R }}} newlock #() {{{ lk γ, RET lk; is_lock γ lk R }}}.
Proof.
iIntros (Φ) "HR HΦ". rewrite -wp_fupd /newlock /=.
wp_seq. wp_alloc lo as "Hlo". wp_alloc ln as "Hln".
iMod (own_alloc (● (Excl' 0%nat, GSet ∅) ⋅ ◯ (Excl' 0%nat, GSet ∅))) as (γ) "[Hγ Hγ']".
{ by rewrite -auth_both_op. }
iMod (inv_alloc _ _ (lock_inv γ lo ln R) with "[-HΦ]").
{ iNext. rewrite /lock_inv.
iExists 0%nat, 0%nat. iFrame. iLeft. by iFrame. }
iModIntro. iApply ("HΦ" $! (#lo, #ln)%V γ). iExists lo, ln. eauto.
Qed.
Lemma wait_loop_spec γ lk x R :
{{{ is_lock γ lk R ∗ issued γ x }}} wait_loop #x lk {{{ RET #(); locked γ ∗ R }}}.
Proof.
iIntros (Φ) "[Hl Ht] HΦ". iDestruct "Hl" as (lo ln ->) "#Hinv".
iLöb as "IH". wp_rec. subst. wp_let. wp_proj. wp_bind (! _)%E.
iInv N as (o n) "(Hlo & Hln & Ha)" "Hclose".
wp_load. destruct (decide (x = o)) as [->|Hneq].
- iDestruct "Ha" as "[Hainv [[Ho HR] | Haown]]".
+ iMod ("Hclose" with "[Hlo Hln Hainv Ht]") as "_".
{ iNext. iExists o, n. iFrame. eauto. }
iModIntro. wp_let. wp_op. case_bool_decide; [|done].
wp_if.
iApply ("HΦ" with "[-]"). rewrite /locked. iFrame. eauto.
+ iDestruct (own_valid_2 with "Ht Haown") as % [_ ?%gset_disj_valid_op].
set_solver.
- iMod ("Hclose" with "[Hlo Hln Ha]").
{ iNext. iExists o, n. by iFrame. }
iModIntro. wp_let.
wp_op. case_bool_decide; [simplify_eq |].
wp_if. iApply ("IH" with "Ht"). iNext. by iExact "HΦ".
Qed.
Lemma acquire_spec γ lk R :
{{{ is_lock γ lk R }}} acquire lk {{{ RET #(); locked γ ∗ R }}}.
Proof.
iIntros (ϕ) "Hl HΦ". iDestruct "Hl" as (lo ln ->) "#Hinv".
iLöb as "IH". wp_rec. wp_bind (! _)%E. simplify_eq/=. wp_proj.
iInv N as (o n) "[Hlo [Hln Ha]]" "Hclose".
wp_load. iMod ("Hclose" with "[Hlo Hln Ha]") as "_".
{ iNext. iExists o, n. by iFrame. }
iModIntro. wp_let. wp_proj. wp_op.
wp_bind (CAS _ _ _).
iInv N as (o' n') "(>Hlo' & >Hln' & >Hauth & Haown)" "Hclose".
destruct (decide (#n' = #n))%V as [[= ->%Nat2Z.inj] | Hneq].
- wp_cas_suc.
iMod (own_update with "Hauth") as "[Hauth Hofull]".
{ eapply auth_update_alloc, prod_local_update_2.
eapply (gset_disj_alloc_empty_local_update _ {[ n ]}).
apply (seq_set_S_disjoint 0). }
rewrite -(seq_set_S_union_L 0).
iMod ("Hclose" with "[Hlo' Hln' Haown Hauth]") as "_".
{ iNext. iExists o', (S n).
rewrite Nat2Z.inj_succ -Z.add_1_r. by iFrame. }
iModIntro. wp_if.
iApply (wait_loop_spec γ (#lo, #ln) with "[-HΦ]").
+ iFrame. rewrite /is_lock; eauto 10.
+ by iNext.
- wp_cas_fail.
iMod ("Hclose" with "[Hlo' Hln' Hauth Haown]") as "_".
{ iNext. iExists o', n'. by iFrame. }
iModIntro. wp_if. by iApply "IH"; auto.
Qed.
Lemma release_spec γ lk R :
{{{ is_lock γ lk R ∗ locked γ ∗ R }}} release lk {{{ RET #(); True }}}.
Proof.
iIntros (Φ) "(Hl & Hγ & HR) HΦ". iDestruct "Hl" as (lo ln ->) "#Hinv".
iDestruct "Hγ" as (o) "Hγo".
wp_let. wp_proj. wp_proj. wp_bind (! _)%E.
iInv N as (o' n) "(>Hlo & >Hln & >Hauth & Haown)" "Hclose".
wp_load.
iDestruct (own_valid_2 with "Hauth Hγo") as
%[[<-%Excl_included%leibniz_equiv _]%prod_included _]%auth_valid_discrete_2.
iMod ("Hclose" with "[Hlo Hln Hauth Haown]") as "_".
{ iNext. iExists o, n. by iFrame. }
iModIntro. wp_op.
iInv N as (o' n') "(>Hlo & >Hln & >Hauth & Haown)" "Hclose".
wp_store.
iDestruct (own_valid_2 with "Hauth Hγo") as
%[[<-%Excl_included%leibniz_equiv _]%prod_included _]%auth_valid_discrete_2.
iDestruct "Haown" as "[[Hγo' _]|Haown]".
{ iDestruct (own_valid_2 with "Hγo Hγo'") as %[[] ?]. }
iMod (own_update_2 with "Hauth Hγo") as "[Hauth Hγo]".
{ apply auth_update, prod_local_update_1.
by apply option_local_update, (exclusive_local_update _ (Excl (S o))). }
iMod ("Hclose" with "[Hlo Hln Hauth Haown Hγo HR]") as "_"; last by iApply "HΦ".
iNext. iExists (S o), n'.
rewrite Nat2Z.inj_succ -Z.add_1_r. iFrame. iLeft. by iFrame.
Qed.
End proof.
Typeclasses Opaque is_lock issued locked.
Canonical Structure ticket_lock `{!heapG Σ, !probG Σ, !tlockG Σ} : lock Σ :=
{| lock.locked_exclusive := locked_exclusive; lock.newlock_spec := newlock_spec;
lock.acquire_spec := acquire_spec; lock.release_spec := release_spec |}.
|
State Before: Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
⊢ ↑(single a) r * ↑(single b) s = ↑(single (a + b)) (r * s) State After: case coeff.h
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
x : Γ
⊢ coeff (↑(single a) r * ↑(single b) s) x = coeff (↑(single (a + b)) (r * s)) x Tactic: ext x State Before: case coeff.h
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
x : Γ
⊢ coeff (↑(single a) r * ↑(single b) s) x = coeff (↑(single (a + b)) (r * s)) x State After: case pos
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
x : Γ
h : x = a + b
⊢ coeff (↑(single a) r * ↑(single b) s) x = coeff (↑(single (a + b)) (r * s)) x
case neg
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
x : Γ
h : ¬x = a + b
⊢ coeff (↑(single a) r * ↑(single b) s) x = coeff (↑(single (a + b)) (r * s)) x Tactic: by_cases h : x = a + b State Before: case pos
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
x : Γ
h : x = a + b
⊢ coeff (↑(single a) r * ↑(single b) s) x = coeff (↑(single (a + b)) (r * s)) x State After: case pos
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
x : Γ
h : x = a + b
⊢ coeff (↑(single a) r) a * s = coeff (↑(single (a + b)) (r * s)) (a + b) Tactic: rw [h, mul_single_coeff_add] State Before: case pos
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
x : Γ
h : x = a + b
⊢ coeff (↑(single a) r) a * s = coeff (↑(single (a + b)) (r * s)) (a + b) State After: no goals Tactic: simp State Before: case neg
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
x : Γ
h : ¬x = a + b
⊢ coeff (↑(single a) r * ↑(single b) s) x = coeff (↑(single (a + b)) (r * s)) x State After: case neg
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
x : Γ
h : ¬x = a + b
⊢ ∀ (x_1 : Γ × Γ),
x_1 ∈ addAntidiagonal (_ : Set.IsPwo (support (↑(single a) r))) (_ : Set.IsPwo (support (↑(single b) s))) x →
coeff (↑(single a) r) x_1.fst * coeff (↑(single b) s) x_1.snd = 0 Tactic: rw [single_coeff_of_ne h, mul_coeff, sum_eq_zero] State Before: case neg
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
x : Γ
h : ¬x = a + b
⊢ ∀ (x_1 : Γ × Γ),
x_1 ∈ addAntidiagonal (_ : Set.IsPwo (support (↑(single a) r))) (_ : Set.IsPwo (support (↑(single b) s))) x →
coeff (↑(single a) r) x_1.fst * coeff (↑(single b) s) x_1.snd = 0 State After: case neg
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
x : Γ
h : ¬x = a + b
⊢ ∀ (x_1 : Γ × Γ),
x_1.fst ∈ support (↑(single a) r) ∧ x_1.snd ∈ support (↑(single b) s) ∧ x_1.fst + x_1.snd = x →
coeff (↑(single a) r) x_1.fst * coeff (↑(single b) s) x_1.snd = 0 Tactic: simp_rw [mem_addAntidiagonal] State Before: case neg
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
x : Γ
h : ¬x = a + b
⊢ ∀ (x_1 : Γ × Γ),
x_1.fst ∈ support (↑(single a) r) ∧ x_1.snd ∈ support (↑(single b) s) ∧ x_1.fst + x_1.snd = x →
coeff (↑(single a) r) x_1.fst * coeff (↑(single b) s) x_1.snd = 0 State After: case neg.mk.intro.intro
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
y z : Γ
hy : (y, z).fst ∈ support (↑(single a) r)
hz : (y, z).snd ∈ support (↑(single b) s)
h : ¬(y, z).fst + (y, z).snd = a + b
⊢ coeff (↑(single a) r) (y, z).fst * coeff (↑(single b) s) (y, z).snd = 0 Tactic: rintro ⟨y, z⟩ ⟨hy, hz, rfl⟩ State Before: case neg.mk.intro.intro
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
y z : Γ
hy : (y, z).fst ∈ support (↑(single a) r)
hz : (y, z).snd ∈ support (↑(single b) s)
h : ¬(y, z).fst + (y, z).snd = a + b
⊢ coeff (↑(single a) r) (y, z).fst * coeff (↑(single b) s) (y, z).snd = 0 State After: case neg.mk.intro.intro
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
y z : Γ
hy : (y, z).fst ∈ support (↑(single a) r)
hz : (y, z).snd ∈ support (↑(single b) s)
h : ¬a + b = a + b
⊢ coeff (↑(single a) r) (y, z).fst * coeff (↑(single b) s) (y, z).snd = 0 Tactic: rw [eq_of_mem_support_single hy, eq_of_mem_support_single hz] at h State Before: case neg.mk.intro.intro
Γ : Type u_1
R : Type u_2
inst✝¹ : OrderedCancelAddCommMonoid Γ
inst✝ : NonUnitalNonAssocSemiring R
a b : Γ
r s : R
y z : Γ
hy : (y, z).fst ∈ support (↑(single a) r)
hz : (y, z).snd ∈ support (↑(single b) s)
h : ¬a + b = a + b
⊢ coeff (↑(single a) r) (y, z).fst * coeff (↑(single b) s) (y, z).snd = 0 State After: no goals Tactic: exact (h rfl).elim
|
import Data.Primitives.Views
import System
%default total
data InfIO: Type where
Do: IO a -> (a -> Inf InfIO) -> InfIO
(>>=) : IO a -> (a -> Inf InfIO) -> InfIO
(>>=) = Do
data Fuel = Dry | More (Lazy Fuel)
partial
forever: Fuel
forever = More forever
run: Fuel -> InfIO -> IO()
run Dry y = putStrLn "Out of fuel"
run (More fuel) (Do action cont) =
do
res <- action
run fuel (cont res)
quiz : Stream Int -> (score: Nat) -> InfIO
quiz (num1 :: num2 :: nums) score =
do
putStrLn ("Score so far: " ++ show score)
putStr (show num1 ++ " * " ++ show num2 ++ "? ")
answer <- getLine
if cast answer == num1 * num2
then do putStrLn "Correct!"
quiz nums (score + 1)
else do putStrLn ("Wrong, the answer is " ++ show (num1 * num2))
quiz nums score
randoms : Int -> Stream Int
randoms seed =
let seed' = 1664525 * seed + 1013904223 in
(seed' `shiftR` 2) :: randoms seed'
arithInputs : Int -> Stream Int
arithInputs seed = map bound (randoms seed)
where
bound : Int -> Int
bound num with (divides num 12)
bound ((12 * div) + rem) | (DivBy prf) = rem + 1
partial
main : IO()
main =
do
seed <- time
run forever (quiz (arithInputs (fromInteger seed)) 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.