text
stringlengths 0
3.34M
|
---|
```python
import numpy as np
```
```python
a=np.array([1,-2,5,6,8,-9])
```
```python
np.amin(a)
```
-9
```python
import matplotlib.pyplot as plt
import sympy as sp
import numpy as np
from qiskit import *
from random import randint
def ansatz(ansatzList,theta=3.1415):
q = QuantumRegister(2)
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
for gate in ansatzList:
if gate[0]=='H':
p=int(gate[1])
circuit.h(q[p])
if gate[0]=='C':
p0=int(gate[1])
p1=int(gate[2])
circuit.cx(q[p0], q[p1])
if gate[0]=='R':
p=int(gate[1])
circuit.rx(theta, q[p])
if gate[0]=='Y':
p=int(gate[1])
circuit.y(q[p])
if gate[0]=='Z':
p=int(gate[1])
circuit.z(q[p])
if gate[0]=='X':
p=int(gate[1])
circuit.x(q[p])
circuit.measure(q,c)
return circuit
def get_expectation(theta, ansatzList):
circuit = ansatz(theta,ansatzList)
shots = 10000
backend = BasicAer.get_backend('qasm_simulator')
job = execute(circuit, backend, shots=shots)
result = job.result()
counts = result.get_counts()
return counts
```
```python
lock=get_expectation(['H0','Z0','H1'],np.pi/64)
```
```python
ansatz(['H0','Z0','H1','C10']).draw('mpl')
```
```python
from qiskit.visualization import plot_histogram, plot_bloch_multivector
plot_histogram(get_expectation(['H0','Z0','H1','C10'],np.pi))
```
```python
!dir
```
Volume in drive C is OS
Volume Serial Number is A6A4-3F24
Directory of C:\Users\gerar\Documents\GitHub\QCHack2021Repo
04/10/2021 03:59 PM <DIR> .
04/10/2021 03:59 PM <DIR> ..
01/30/2021 09:56 AM 3,174 .gitignore
04/10/2021 03:27 PM <DIR> .ipynb_checkpoints
04/10/2021 03:30 PM <DIR> .vscode
01/30/2021 03:03 PM 6,709 ansatz.PNG
01/31/2021 11:06 AM 660 comparison.py
04/10/2021 12:48 PM 1,372 currentmatrix.png
01/31/2021 10:43 AM 369 game.py
04/10/2021 03:27 PM <DIR> gifs
04/10/2021 03:59 PM 9,391 histogram.png
04/10/2021 03:26 PM 2,873 jv.py
01/31/2021 11:20 AM 21,687 legacy.py
04/10/2021 03:26 PM 1,089 LICENSE
01/30/2021 03:00 PM 5,917 logo.png
01/31/2021 09:42 AM 17,478 main2.py
02/02/2021 06:50 PM 19,436 main_new.py
04/10/2021 03:27 PM <DIR> profiles
04/10/2021 03:26 PM 147 README.md
01/30/2021 03:59 PM 73 Test.md
04/10/2021 03:58 PM 15,880 Untitled.ipynb
04/10/2021 03:34 PM 4,711 vqeCalc.py
04/10/2021 03:27 PM <DIR> __pycache__
16 File(s) 110,966 bytes
7 Dir(s) 45,315,129,344 bytes free
```python
ansatz(['X0', 'Y0', 'Z0', 'H0', 'C01', 'X1', 'Y1', 'Z1', 'H1', 'C10']).draw('mpl')
```
```python
get_expectation(['X0', 'Y0', 'Z0', 'H0', 'C01', 'X1', 'Y1', 'Z1', 'H1', 'C10'])
```
```python
sum(lock.values())
```
10000
```python
get_expectation(['X0', 'H1'],np.pi)
```
{'01': 4968, '11': 5032}
```python
def ansatz(ansatzList,theta=3.1415):
q = QuantumRegister(2)
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
for gate in ansatzList:
if gate[0]=='H':
p=int(gate[1])
circuit.h(q[p])
if gate[0]=='C':
p0=int(gate[1])
p1=int(gate[2])
circuit.cx(q[p0], q[p1])
"""
if gate[0]=='R':
p=int(gate[1])
circuit.rx(theta, q[p])
"""
if gate[0]=='Y':
p=int(gate[1])
circuit.y(q[p])
if gate[0]=='Z':
p=int(gate[1])
circuit.z(q[p])
if gate[0]=='X':
p=int(gate[1])
circuit.x(q[p])
circuit.measure(q,c)
return circuit
def get_expectation(ansatzList, theta=3.1415):
circuit = ansatz(ansatzList, theta)
shots = 10000
backend = BasicAer.get_backend('qasm_simulator')
job = execute(circuit, backend, shots=shots)
result = job.result()
counts = result.get_counts()
return counts
def comparison(ansatzList,theta=3.1415):
estimate=get_expectation(ansatzList,theta)
total=sum(estimate.values())
if '11' in estimate.keys():
if (estimate['11']/total)>0.4:
return True
else:
return False
else:
return False
```
```python
get_expectation(['X0', 'H1'])
```
{'01': 4958, '11': 5042}
```python
comparison(['X0', 'H1'])
```
True
```python
estimate=get_expectation(['X0', 'H1'])
```
```python
estimate.keys()
```
dict_keys(['01', '11'])
```python
plot_histogram(get_expectation(['H0']),)
```
```python
from qiskit import QuantumCircuit, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
import matplotlib.pyplot as plt
import numpy as np
from qiskit import IBMQ, BasicAer
from qiskit.providers.ibmq import *
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
# import basic plot tools
from qiskit.visualization import plot_histogram
n = 7 # number of qubits used to represent s
def wordToBV(s) :
#convert text to binary
a_byte_array = bytearray(s, "utf8")
byte_list = []
for byte in a_byte_array:
binary_representation = bin(byte)
byte_list.append(binary_representation[9-n:])
#chop off the "0b" at the beginning. can also truncate the binary to fit on a device with N qubits
#binary has 2 extra digits for "0b", so it starts at 9 for our 7 bit operation.
print(byte_list)
circuit_array = []
length = len(byte_list)
for i in range(length):
s = byte_list[i]
#do all this stuff for every letter
# We need a circuit with n qubits, plus one ancilla qubit
# Also need n classical bits to write the output to
bv_circuit = QuantumCircuit(n+1, n)
# put ancilla in state |->
bv_circuit.h(n)
bv_circuit.z(n)
# Apply Hadamard gates before querying the oracle
for i in range(n):
bv_circuit.h(i)
# Apply barrier
bv_circuit.barrier()
# Apply the inner-product oracle
s = s[::-1] # reverse s to fit qiskit's qubit ordering
for q in range(n):
if s[q] == '0':
bv_circuit.i(q)
else:
bv_circuit.cx(q, n)
# Apply barrier
bv_circuit.barrier()
#Apply Hadamard gates after querying the oracle
for i in range(n):
bv_circuit.h(i)
# Measurement
for i in range(n):
bv_circuit.measure(i, i)
circuit_array.append(bv_circuit)
return circuit_array
circuit_to_run = wordToBV('Toronto')
#run the first letter on a simulator
backend = BasicAer.get_backend('qasm_simulator')
shots = 4096
results = execute(circuit_to_run[0], backend=backend, shots=shots).result()
answer = results.get_counts()
plot_histogram(answer)
```
```python
```
|
module Data.Sexp
import Data.List
import Data.String
%default total
public export
data Sexp : Type where
SAtom : String -> Sexp
SList : List Sexp -> Sexp
public export
interface ToSexp a where
toSexp : a -> Sexp
public export
interface FromSexp a where
fromSexp : Sexp -> Either String a
safeChar : Char -> Bool
safeChar c = (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == '-'
isSafe : String -> Bool
isSafe "" = False
isSafe s = let l = prim__strLength s in
if l > 20 then False else
go s l 0 where
go : String -> Int -> Int -> Bool
go s l i = if i >= l then True
else safeChar (assert_total $ strIndex s i) && (assert_total $ go s l (i+1))
escapeChar : Char -> List Char
escapeChar '\n' = ['\\', 'n']
escapeChar '\t' = ['\\', 't']
escapeChar c = ('\\'::'{'::(unpack $ cast $ cast {to=Int} c)) ++ ['}']
safeCharInQuotes : Char -> Bool
safeCharInQuotes c = (c >= ' ' && c <= '~') && (c /= '\\') && (c /= '"')
safeShow : String -> String
safeShow s = fastPack ('"' :: reverse ('"' :: safeShow' (unpack s) []))
where
safeShow' : List Char -> List Char -> List Char
safeShow' [] acc = acc
safeShow' (c::rest) acc =
if safeCharInQuotes c
then safeShow' rest (c::acc)
else safeShow' rest (reverseOnto acc (escapeChar c))
showSep : String -> List String -> String
showSep sep xs = showSepGo True xs "" where
showSepGo : Bool -> List String -> String -> String
showSepGo first [] acc = acc
showSepGo first (x::xs) acc = if first then showSepGo False xs (acc ++ x)
else showSepGo False xs (acc ++ " " ++ x)
export
Show Sexp where
show (SAtom s) = if isSafe s then s else (safeShow s)
show (SList xs) = "(" ++ (showSep " " (assert_total $ map show xs)) ++ ")"
export
unAtom : (errmsg : String) -> Sexp -> Either String String
unAtom _ (SAtom s) = Right s
unAtom m v = Left $ "expected an atom as " ++ m ++ ", got: " ++ show v
|
module MLPlots
using Reexport
@reexport using Plots
import Plots: Series, Plot, Subplot
using LearnBase
export
TracePlot
# ---------------------------------------------------------------------
# a helper class to track many variables at once over time
type TracePlot{I,T}
indices::I
plt::Plot{T}
sp::Subplot{T}
series::Vector{Series}
end
function TracePlot(n::Int = 1; maxn::Int = typemax(Int), sp = nothing, kw...)
indices = if n > maxn
# sample maxn
shuffle(1:n)[1:maxn]
else
1:n
end
if sp == nothing
plt = plot(length(indices); kw...)
sp = plt[1]
else
plt = plot!(sp, length(indices); kw...)
end
TracePlot(indices, plt, sp, sp.series_list)
end
function Base.push!(tp::TracePlot, x::Number, y::AbstractVector)
for (i,idx) in enumerate(tp.indices)
push!(tp.series[i], x, y[idx])
end
end
Base.push!(tp::TracePlot, x::Number, y::Number) = push!(tp, x, [y])
# ---------------------------------------------------------------------
# optional
function is_installed(name::AbstractString)
try
Pkg.installed(name) === nothing ? false: true
catch
false
end
end
# using Requires
# @require OnlineAI include("OnlineAI/onlineai.jl")
# @require ROCAnalysis include("ROCAnalysis/roc.jl")
if is_installed("Transformations")
include("optional/transformations.jl")
end
# if is_installed("OnlineAI")
# include("optional/onlineai.jl")
# end
if is_installed("ROCAnalysis")
include("optional/roc.jl")
end
end # module
|
applySD = function( f, method="slow", newnames=NULL ) {
cnames= c("id", "x")
if (is.null( newnames )) newnames=names(f)
nv = ncol(f)
names(f) = cnames[1:nv]
if (method=="slow") {
r = by( f, f$id, with, sd( x, na.rm=TRUE ))
out = data.frame( id=names(r), x=as.numeric(as.vector(r)) , stringsAsFactors=FALSE )
}
if (method=="compact_but_slow") {
rowindex = 1:nrow(f)
out = aggregate(rowindex ~ id, f, function(i) sd(f$x[i], na.rm=TRUE ), na.action=na.omit ) #arithmetic mean
}
names(out) = newnames[1:2]
return( out )
}
|
//=============================================================================
// Copyright (c) 2015-2018 glywk
// https://github.com/glywk
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//=============================================================================
#ifndef CPP_PROPETIES_TEST_LEXER_HPP
#define CPP_PROPETIES_TEST_LEXER_HPP
#include <cpp_properties/lexer.hpp>
#include <boost/config/warning_disable.hpp>
#include <boost/detail/lightweight_test.hpp>
#include <boost/spirit/include/phoenix_operator.hpp>
#include <boost/spirit/include/phoenix_statement.hpp>
#include <boost/spirit/include/phoenix_algorithm.hpp>
#include <boost/spirit/include/phoenix_core.hpp>
#include <list>
using std::list;
using std::string;
namespace lex = boost::spirit::lex;
namespace cp = cpp_properties;
using namespace cp::token;
struct expected_token {
int id;
string value;
expected_token(string text, int token_id):id(token_id), value(text) {}
};
class tokenizer {
typedef lex::lexertl::token<char const*, boost::mpl::vector0<>, boost::mpl::true_> token_type;
typedef lex::lexertl::actor_lexer<token_type> lexer_type;
void load_token_name_id() {
name[ID_SPACES] = "ID_SPACES";
name[ID_CR] = "ID_CR";
name[ID_LF] = "ID_LF";
name[ID_EOL] = "ID_EOL";
name[ID_LINE_BREAK_CR] = "ID_LINE_BREAK_CR";
name[ID_LINE_BREAK_LF] = "ID_LINE_BREAK_LF";
name[ID_LINE_BREAK_EOL] = "ID_LINE_BREAK_EOL";
name[ID_COMMENT_SHARP] = "ID_COMMENT_SHARP";
name[ID_COMMENT_EXCLAMATION] = "ID_COMMENT_EXCLAMATION";
name[ID_COMMENT_CHARS] = "ID_COMMENT_CHARS";
name[ID_COMMENT_SPACES] = "ID_COMMENT_SPACES";
name[ID_COMMENT_CR] = "ID_COMMENT_CR";
name[ID_COMMENT_LF] = "ID_COMMENT_LF";
name[ID_COMMENT_EOL] = "ID_COMMENT_EOL";
name[ID_KEY_CHARS] = "ID_KEY_CHARS";
name[ID_KEY_ESCAPE_CHAR] = "ID_KEY_ESCAPE_CHAR";
name[ID_KEY_UNICODE] = "ID_KEY_UNICODE";
name[ID_KEY_BAD_UNICODE] = "ID_KEY_BAD_UNICODE";
name[ID_KEY_CR] = "ID_KEY_CR";
name[ID_KEY_LF] = "ID_KEY_LF";
name[ID_KEY_EOL] = "ID_KEY_EOL";
name[ID_KEY_LINE_BREAK_CR] = "ID_KEY_LINE_BREAK_CR";
name[ID_KEY_LINE_BREAK_LF] = "ID_KEY_LINE_BREAK_LF";
name[ID_KEY_LINE_BREAK_EOL] = "ID_KEY_LINE_BREAK_EOL";
name[ID_SEPARATOR_COLON] = "ID_SEPARATOR_COLON";
name[ID_SEPARATOR_EQUAL] = "ID_SEPARATOR_EQUAL";
name[ID_SEPARATOR_SPACES] = "ID_SEPARATOR_SPACES";
name[ID_SEPARATOR_CR] = "ID_SEPARATOR_CR";
name[ID_SEPARATOR_LF] = "ID_SEPARATOR_LF";
name[ID_SEPARATOR_EOL] = "ID_SEPARATOR_EOL";
name[ID_SEPARATOR_LINE_BREAK_CR] = "ID_SEPARATOR_LINE_BREAK_CR";
name[ID_SEPARATOR_LINE_BREAK_LF] = "ID_SEPARATOR_LINE_BREAK_LF";
name[ID_SEPARATOR_LINE_BREAK_EOL] = "ID_SEPARATOR_LINE_BREAK_EOL";
name[ID_VALUE_SPACES] = "ID_VALUE_SPACES";
name[ID_VALUE_CHARS] = "ID_VALUE_CHARS";
name[ID_VALUE_ESCAPE_CHAR] = "ID_VALUE_ESCAPE_CHAR";
name[ID_VALUE_UNICODE] = "ID_VALUE_UNICODE";
name[ID_VALUE_BAD_UNICODE] = "ID_VALUE_BAD_UNICODE";
name[ID_VALUE_CR] = "ID_VALUE_CR";
name[ID_VALUE_LF] = "ID_VALUE_LF";
name[ID_VALUE_EOL] = "ID_VALUE_EOL";
name[ID_VALUE_LINE_BREAK_CR] = "ID_VALUE_LINE_BREAK_CR";
name[ID_VALUE_LINE_BREAK_LF] = "ID_VALUE_LINE_BREAK_LF";
name[ID_VALUE_LINE_BREAK_EOL] = "ID_VALUE_LINE_BREAK_EOL";
}
public:
typedef expected_token value_type;
tokenizer(const string &input): text(input) {
load_token_name_id();
}
tokenizer(const tokenizer &rhs): text(rhs.text) {
load_token_name_id();
}
bool as(const list<value_type>& rhs) {
char const* first = text.c_str();
char const* last = &first[text.size()];
cp::cpp_properties_lexer<lexer_type> lexer;
lexer_type::iterator_type iter = lexer.begin(first, last);
lexer_type::iterator_type end = lexer.end();
list<value_type>::const_iterator citer_expected = rhs.cbegin();
list<value_type>::const_iterator cend_expected = rhs.cend();
int i = 0;
while (iter != end && token_is_valid(*iter) && citer_expected != cend_expected) {
if (iter->id() != citer_expected->id) {
std::cout << "state: " << iter->state() << ", "
<< "id: expected=" << name[citer_expected->id] << ", actual=" << name[iter->id()] << ", "
<< "string:>" << iter->value() << "<\n";
return false;
}
if (iter->value() != citer_expected->value) {
std::cout << "state: " << iter->state() << ", "
<< "id: " << name[iter->id()] << ", "
<< "string: expected=>" << iter->value() << "<, actual=>" << citer_expected->value << "<\n";
return false;
}
++i;
++iter;
++citer_expected;
}
if (iter == end) {
std::cout << "*** Debug: " << i << " token(s) found\n";
if (citer_expected != cend_expected) {
std::cout << "missing expected tokens\n";
return false;
} else {
return true;
}
} else {
std::string rest(first, last);
std::cout << "Lexical analysis failed\n" << "stopped at: \""
<< rest << "\"\n";
}
return false;
}
private:
const string text;
std::map<int, std::string> name;
};
bool test(const string& text, const list<tokenizer::value_type>& tokens) {
return tokenizer(text).as(tokens);
}
#endif
|
# Realization of Non-Recursive Filters
*This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [[email protected]](mailto:[email protected]).*
## Fast Convolution
The straightforward convolution of two finite-length signals $x[k]$ and $h[k]$ is a numerically complex task. This has led to the development of various techniques with considerably lower complexity. The basic concept of the *fast convolution* is to exploit the correspondence between the convolution and the scalar multiplication in the frequency domain.
### Convolution of Finite-Length Signals
The convolution of a causal signal $x_L[k]$ of length $L$ with a causal impulse response $h_N[k]$ of length $N$ is given as
\begin{equation}
y[k] = x_L[k] * h_N[k] = \sum_{\kappa = 0}^{L-1} x_L[\kappa] \; h_N[k - \kappa] = \sum_{\kappa = 0}^{N-1} h_N[\kappa] \; x_L[k - \kappa]
\end{equation}
where $x_L[k] = 0$ for $k<0 \wedge k \geq L$ and $h_N[k] = 0$ for $k<0 \wedge k \geq N$. The resulting signal $y[k]$ is of finite length $M = N+L-1$. The computation of $y[k]$ for $k=0,1, \dots, M-1$ requires $M \cdot N$ multiplications and $M \cdot (N-1)$ additions. The computational complexity of the convolution is consequently [in the order of](https://en.wikipedia.org/wiki/Big_O_notation) $\mathcal{O}(M \cdot N)$. Discrete-time Fourier transformation (DTFT) of above relation yields
\begin{equation}
Y(e^{j \Omega}) = X_L(e^{j \Omega}) \cdot H_N(e^{j \Omega})
\end{equation}
Discarding the effort of transformation, the computationally complex convolution is replaced by a scalar multiplication with respect to the frequency $\Omega$. However, $\Omega$ is a continuous frequency variable which limits the numerical evaluation of this scalar multiplication. In practice, the DTFT is replaced by the discrete Fourier transformation (DFT). Two aspects have to be considered before a straightforward application of the DFT
1. The DFTs $X_L[\mu]$ and $H_N[\mu]$ are of length $L$ and $N$ respectively and cannot be multiplied straightforwardly
2. For $N = L$, the multiplication of the two spectra $X_L[\mu]$ and $H_L[\mu]$ would result in the [periodic/circular convolution](https://en.wikipedia.org/wiki/Circular_convolution) $x_L[k] \circledast_L h_L[k]$ due to the periodicity of the DFT. Since we aim at realizing the linear convolution $x_L[k] * h_N[k]$ with the DFT, special care has to be taken to avoid cyclic effects.
### Linear Convolution by Periodic Convolution
The periodic convolution of the two signals $x_L[k]$ and $h_N[k]$ is defined as
\begin{equation}
x_L[k] \circledast_M h_N[k] = \sum_{\kappa=0}^{M-1} \tilde{x}_M[k - \kappa] \; \tilde{h}_M[\kappa]
\end{equation}
where the periodic continuations $\tilde{x}_M[k]$ of $x_L[k]$ and $\tilde{h}_M[k]$ of $h_N[k]$ with period $M$ are given as
\begin{align}
\tilde{x}_M[k] &= \sum_{m = -\infty}^{\infty} x_L[m \cdot M + k] \\
\tilde{h}_M[k] &= \sum_{m = -\infty}^{\infty} h_N[m \cdot M + k]
\end{align}
The result of the circular convolution has a periodicity of $M$.
To compute the linear convolution by the periodic convolution one has to take care that the result of the linear convolution fits into one period of the periodic convolution. Hence, the periodicity has to be chosen as $M \geq N+L-1$. This can be achieved by zero-padding of $x_L[k]$ and $h_N[k]$ to a total length of $M$
\begin{align}
x_M[k] &= \begin{cases}
x_L[k] & \mathrm{for} \; k=0, 1, \dots, L-1 \\
0 & \mathrm{for} \; k=L, L+1, \dots, M-1
\end{cases}
\\
h_M[k] &= \begin{cases}
h_N[k] & \mathrm{for} \; k=0, 1, \dots, N-1 \\
0 & \mathrm{for} \; k=N, N+1, \dots, M-1
\end{cases}
\end{align}
This results in the desired equality of linear and periodic convolution
\begin{equation}
x_L[k] * h_N[k] = x_M[k] \circledast_M h_M[k]
\end{equation}
for $k = 0,1,\dots, M-1$ with $M = N+L-1$.
#### Example - Linear by periodic convolution
The following example computes the linear, periodic and linear by periodic convolution of a rectangular signal $x[k] = \text{rect}_L[k]$ of length $L$ with a triangular signal $h[k] = \Lambda_N[k]$ of length $N$.
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sig
L = 32 # length of signal x[k]
N = 16 # length of signal h[k]
M = 16 # periodicity of periodic convolution
def periodic_summation(x, N):
"Zero-padding to length N or periodic summation with period N."
M = len(x)
rows = int(np.ceil(M/N))
if (M < int(N*rows)):
x = np.pad(x, (0, int(N*rows-M)), 'constant')
x = np.reshape(x, (rows, N))
return np.sum(x, axis=0)
def periodic_convolve(x, y, P):
"Periodic convolution of two signals x and y with period P."
x = periodic_summation(x, P)
h = periodic_summation(y, P)
return np.array([np.dot(np.roll(x[::-1], k+1), h) for k in range(P)], float)
# generate signals
x = np.ones(L)
h = sig.triang(N)
# linear convolution
y1 = np.convolve(x, h, 'full')
# periodic convolution
y2 = periodic_convolve(x, h, M)
# linear convolution via periodic convolution
xp = np.append(x, np.zeros(N-1))
hp = np.append(h, np.zeros(L-1))
y3 = periodic_convolve(xp, hp, L+N-1)
def plot_signal(x):
'''Plots the signals in stem plot.'''
plt.figure(figsize=(10, 3))
plt.stem(x)
plt.xlabel(r'$k$')
plt.ylabel(r'$y[k]$')
plt.axis([0, N+L, 0, 1.1*x.max()])
# plot results
plot_signal(x)
plt.title('Signal $x[k]$')
plot_signal(y1)
plt.title('Linear convolution')
plot_signal(y2)
plt.title('Periodic convolution with period M = %d' % M)
plot_signal(y3)
plt.title('Linear convolution by periodic convolution');
```
**Exercise**
* Change the lengths `L`, `N` and `M` and check how the results for the different convolutions change
### The Fast Convolution
Using the above derived equality of the linear and periodic convolution one can express the linear convolution $y[k] = x_L[k] * h_N[k]$ by the DFT as
\begin{equation}
y[k] = \text{IDFT}_M \{ \; \text{DFT}_M\{ x_M[k] \} \cdot \text{DFT}_M\{ h_M[k] \} \; \}
\end{equation}
This operation requires three DFTs of length $M$ and $M$ complex multiplications. On first sight this does not seem to be an improvement, since one DFT/IDFT requires $M^2$ complex multiplications and $M \cdot (M-1)$ complex additions. The overall numerical complexity is hence in the order of $\mathcal{O}(M^2)$. The DFT can be realized efficiently by the [fast Fourier transformation](https://en.wikipedia.org/wiki/Fast_Fourier_transform) (FFT), which lowers the computational complexity to $\mathcal{O}(M \log_2 M)$. The resulting algorithm is known as *fast convolution* due to its computational efficiency.
The fast convolution algorithm is composed of the following steps
1. Zero-padding of the two input signals $x_L[k]$ and $h_N[k]$ to at least a total length of $M \geq N+L-1$
2. Computation of the DFTs $X[\mu]$ and $H[\mu]$ using a FFT of length $M$
3. Multiplication of the spectra $Y[\mu] = X[\mu] \cdot H[\mu]$
4. Inverse DFT of $Y[\mu]$ using an inverse FFT of length $M$
The overall complexity depends on the particular implementation of the FFT. Many FFTs are most efficient for lengths which are a power of two. It therefore can make sense, in terms of computational complexity, to choose $M$ as a power of two instead of the shortest possible length $N+L-1$. For real valued signals $x[k] \in \mathbb{R}$ and $h[k] \in \mathbb{R}$ the computational complexity can be reduced significantly by using a real valued FFT.
#### Example - Fast convolution
The implementation of the fast convolution algorithm is straightforward. Most implementations of the FFT include zero-padding to a given length $M$, e.g in `numpy` by `numpy.fft.fft(x, M)`. In the following example an implementation of the fast convolution is shown. For illustration the convolution of a rectangular signal $x[k] = \text{rect}_L[k]$ of length $L$ with a triangular signal $h[k] = \Lambda_N[k]$ of length $N$ is considered.
```python
L = 16 # length of signal x[k]
N = 16 # length of signal h[k]
M = N+L-1
# generate signals
x = np.ones(L)
h = sig.triang(N)
# linear convolution
y1 = np.convolve(x, h, 'full')
# fast convolution
y2 = np.fft.ifft(np.fft.fft(x, M) * np.fft.fft(h, M))
plt.figure(figsize=(10, 6))
plt.subplot(211)
plt.stem(y1)
plt.xlabel(r'$k$')
plt.ylabel(r'$y[k] = x_L[k] * h_N[k]$')
plt.title('Result of linear convolution')
plt.subplot(212)
plt.stem(y1)
plt.xlabel(r'$k$')
plt.ylabel(r'$y[k] = x_L[k] * h_N[k]$')
plt.title('Result of fast convolution')
plt.tight_layout()
```
#### Example - Numerical complexity
It was already argued that the numerical complexity of the fast convolution is considerably lower due to the usage of the FFT. The gain with respect to the convolution is evaluated in the following. In order to measure the execution times for both algorithms the `timeit` module is used. The algorithms are evaluated for the convolution of two random signals $x_L[k]$ and $h_N[k]$ of length $L=N=2^n$ for $n=0, 1, \dots, 16$.
```python
import timeit
n = np.arange(17) # lengths = 2**n to evaluate
reps = 50 # number of repetitions for timeit
gain = np.zeros(len(n))
for N in n:
length = 2**N
# setup environment for timeit
tsetup = 'import numpy as np; from numpy.fft import rfft, irfft; \
x=np.random.randn(%d); h=np.random.randn(%d)' % (length, length)
# direct convolution
tc = timeit.timeit('np.convolve(x, x, mode="full")', setup=tsetup, number=reps)
# fast convolution
tf = timeit.timeit('irfft(rfft(x, %d) * rfft(h, %d))' % (2*length, 2*length), setup=tsetup, number=reps)
# speedup by using the fast convolution
gain[N] = tc/tf
# show the results
plt.figure(figsize = (15, 10))
plt.barh(n, gain, log=True)
plt.plot([1, 1], [-1, n[-1]+1], 'r-')
plt.yticks(n, 2**n)
plt.xlabel('Gain of fast convolution')
plt.ylabel('Length of signals')
plt.title('Comparison between direct/fast convolution')
plt.grid()
```
**Exercise**
* When is the fast convolution more efficient/faster than a direct convolution?
* Why is it slower below a given signal length?
* Is the trend of the gain as expected by the numerical complexity of the FFT?
Solution: The gain in execution time of a fast convolution over a direct implementation of the the convolution for different signal lengths depends heavily on the particular implementation and hardware used. The fast convolution in this example is faster for two signals having a length equal or larger than 1024 samples. Discarding the outliers and short lengths, the overall trend in the gain is approximately logarithmic as predicted above.
**Copyright**
This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples*.
|
\subsection{Java}
\begin{verbatim}
import java.util.*;
import java.io.*;
import java.math.*;
public class Main {
public static void main(String[] args) {
InputStream inputStream = System.in;
OutputStream outputStream = System.out;
/*
try {
inputStream = new FileInputStream("b.in");
//outputStream = new FileOutputStream("b.out");
} catch (FileNotFoundException e) {
System.err.println("File not found");
return;
}*/
InputReader in = new InputReader(inputStream);
PrintWriter out = new PrintWriter(outputStream);
Solver solver = new Solver();
solver.solve(in, out);
out.close();
}
}
class Solver {
public void solve(InputReader in, PrintWriter out) {
}
}
class InputReader {
private BufferedReader reader;
private StringTokenizer tokenizer;
public InputReader(InputStream stream) {
reader = new BufferedReader(new InputStreamReader(stream));
tokenizer = null;
}
public String next() {
while (tokenizer == null || !tokenizer.hasMoreTokens()) {
try {
tokenizer = new StringTokenizer(reader.readLine());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return tokenizer.nextToken();
}
public long nextLong() {
return Long.parseLong(next());
}
public int nextInt() {
return Integer.parseInt(next());
}
public double nextDouble() {
return Double.parseDouble(next());
}
}
\end{verbatim}
|
Require Import Basics.
Require Import ListCtx.
Require Import Coq.Bool.Bool.
(* Define new ModuleType for id and ty, for later using them as key-value types of the
Key-Value Set. *)
Module Type ModuleTy <: ValModuleType.
Definition T := ty.
Definition equal : T -> T -> bool := ty_eq.
Definition eq_refl : forall t, equal t t = true.
Proof.
intros. apply ty_eq_refl.
Qed.
End ModuleTy.
(* Now, here is the Declarative Typing using a ListContext. *)
Module Type DeclarativeTyping
( mid : ModuleId )
( mty : ModuleTy )
( kvs : ListCtx.ListCtx mid mty ).
Import kvs.
(* Context *)
Notation "'Ø'" := (empty).
Notation "G '∷' x T" := (append G x T) (at level 29, left associativity).
Notation "G1 '∪' G2" := (mult G1 G2) (at level 40, left associativity).
Definition ctx : Type := kvs.T.
(* Context Split (see Figure 1-4) *)
Reserved Notation "G '≜' G1 '∘' G2" (at level 80).
Inductive split' : T -> T -> T -> Prop :=
| M_Empty : empty ≜ empty ∘ empty
| M_Un : forall G G1 G2 x P,
G ≜ G1 ∘ G2 -> (append G x (P qun)) ≜ (append G1 x (P qun)) ∘ (append G2 x (P qun))
| M_Lin1 : forall G G1 G2 x pp,
G ≜ G1 ∘ G2 -> (append G x pp) ≜ (append G1 x pp) ∘ G2
| M_Lin2 : forall G G1 G2 x pp,
G ≜ G1 ∘ G2 -> (append G x pp) ≜ G1 ∘ (append G2 x pp)
where "G '≜' G1 '∘' G2" := (split' G G1 G2).
Lemma split_to_empty : forall G G',
G ≜ G' ∘ Ø -> G = G'.
Proof.
intros G G'. generalize dependent G. induction G'.
- intros. inversion H. subst G. auto.
- intros. inversion H. apply IHG' in H5. rewrite -> H5. auto.
Qed.
(* Context Split Axioms, TODO *)
Fixpoint unr (G : T) : T :=
match G with
| empty => empty
| append G' x (ty_bool qun) => append (unr G') x (ty_bool qun)
| append G' x (ty_pair ti1 ti2 qun) => append (unr G') x (ty_pair ti1 ti2 qun)
| append G' x (ty_arrow ti1 ti2 qun) => append (unr G') x (ty_arrow ti1 ti2 qun)
| append G' x _ => unr G'
end.
Parameter split_id_l : forall G, G ≜ (unr G) ∘ G.
Parameter split_id_r : forall G, G ≜ G ∘ (unr G).
Parameter unr_members : forall G x P Q, contains (unr G) x (P Q) -> Q = qun.
Parameter esplit_id_l : forall G, exists G', G ≜ G' ∘ G.
Parameter esplit_id_r : forall G, exists G', G ≜ G ∘ G'.
Axiom split_comm : forall G G1 G2, G ≜ G1 ∘ G2 -> G ≜ G2 ∘ G1.
Axiom split_contains : forall G G1 G2 k v,
G ≜ G1 ∘ G2 ->
contains G2 k v ->
contains G k v.
(* Split to Union *)
Lemma split_to_union : forall G G1 G2,
G ≜ G1 ∘ G2 -> G = G1 ∪ (set_minus G2 G1).
Proof.
intros G G1. generalize dependent G. induction G1.
- intros G G2 H. inversion H; auto. simpl. rewrite -> H1. rewrite -> H3.
apply split_comm in H. apply split_to_empty in H. auto.
- intros G G2 H. inversion H.
+ rewrite -> set_minus_right_eliminate. apply IHG1 in H5. rewrite -> commut.
rewrite -> commut in H5. rewrite -> H5. apply equal_commut.
apply append_concat. auto.
+ assert (HH : ~ contains G2 k v). { admit. } (* wlg *)
apply set_minus_append_non_member with (G' := G1) in HH.
rewrite -> HH. apply IHG1 in H5. rewrite -> commut.
rewrite -> commut in H5. rewrite -> H5. apply equal_commut.
apply append_concat. auto.
+ admit.
Qed.
(* Relations between Quantifiers and Types *)
Reserved Notation "Q1 '<<' Q2" (at level 70). (* Q1 ⊑ Q2 *)
Inductive q_rel : q -> q -> Prop :=
| Q_Ref : forall Q, Q << Q
| Q_Axiom : qlin << qun
where "Q1 '<<' Q2" := (q_rel Q1 Q2).
Lemma q_rel_trans : forall Q Q' Q'', Q << Q' -> Q' << Q'' -> Q << Q''.
Proof.
induction Q; induction Q'; induction Q''; intros H H';
try inversion H; try inversion H'; try apply Q_Ref; try apply Q_Axiom.
Qed.
Inductive q_rel' : q -> ty -> Prop := (* q(T) *)
| Q_Rel_Type : forall Q Q' P, Q << Q' -> q_rel' Q (P Q').
Reserved Notation "Q '〔' G '〕'" (at level 30). (* q(Γ) *)
Inductive q_rel'' : q -> ctx -> Prop :=
| Q_Rel_Ctx_Empty : forall Q, Q 〔empty〕
| Q_Rel_Ctx_Update : forall Q G x T,
q_rel' Q T ->
Q 〔G〕 ->
Q 〔append G x T〕
where "Q '〔' G '〕'" := (q_rel'' Q G).
Lemma q_rel''_unr : forall G Q, Q 〔unr G〕.
Proof. Admitted.
Lemma q_rel''_concat_ctx : forall Q G1 G2,
Q 〔G1〕 ->
Q 〔G2〕 ->
Q 〔G1 ∪ G2〕.
Proof. Admitted.
Lemma q_rel''_concat_ctx' : forall G1 G2 Q,
Q 〔G1 ∪ G2〕 -> Q 〔G1〕 /\ Q 〔G2〕.
Proof. Admitted.
(* Declarative Typing [Figure 1-5] *)
Reserved Notation "G '|-' t '|' T" (at level 60). (* G ⊢ t : T *)
Inductive ctx_ty : ctx -> tm -> ty -> Prop :=
| T_Var : forall G1 G2 x T,
qun 〔G1 ∪ G2〕 ->
( (append G1 x T) ∪ G2 ) |- (tmvar x) | T
| T_Bool : forall G (Q : q) (B : b),
qun 〔G〕 ->
G |- (tmbool Q B) | ty_bool Q
| T_If : forall G G1 G2 t1 t2 t3 Q T,
G1 |- t1 | ty_bool Q ->
G2 |- t2 | T ->
G2 |- t3 | T ->
G ≜ G1 ∘ G2 ->
G |- tmif t1 t2 t3 | T
| T_Pair : forall G G1 G2 t1 t2 T1 T2 Q,
G1 |- t1 | T1 ->
G2 |- t2 | T2 ->
q_rel' Q T1 ->
q_rel' Q T2 ->
G ≜ G1 ∘ G2 ->
G |- tmpair Q t1 t2 | (T1 ** T2) Q
| T_Split : forall G G1 G2 t1 t2 T1 T2 T x y Q,
G1 |- t1 | (T1 ** T2) Q ->
append (append G2 x T1) y T2 |- t2 | T ->
G ≜ G1 ∘ G2 ->
G |- tmsplit t1 x y t2 | T
| T_Abs : forall Q G t2 T1 T2 x,
Q 〔G〕 ->
append G x T1 |- t2 | T2 ->
G |- tmabs Q x T1 t2 | (T1 --> T2) Q
| T_App : forall G G1 G2 t1 t2 T11 T12 Q,
G1 |- t1 | (T11 --> T12) Q ->
G2 |- t2 | T11 ->
G ≜ G1 ∘ G2 ->
G |- tmapp t1 t2 | T12
where "G '|-' t '|' T" := (ctx_ty G t T).
Hint Constructors ctx_ty.
(* Three Lemmas *)
Lemma exchange_lemma : forall t x1 x2 T1 T2 T G1 G2,
(append (append G1 x1 T1) x2 T2) ∪ G2 |- t | T ->
(append (append G1 x2 T2) x1 T1) ∪ G2 |- t | T.
Proof.
intros.
assert (H' : (append (append G1 x1 T1) x2 T2) ∪ G2 = (append (append G1 x2 T2) x1 T1) ∪ G2).
{ rewrite -> append_to_concat with (k := x2).
rewrite -> append_to_concat with (k := x1).
rewrite -> append_to_concat with (s' := append G1 x2 T2).
rewrite -> append_to_concat with (s' := G1).
apply exchange. }
rewrite <- H'. apply H.
Qed.
Lemma unrestricted_weakening : forall G t x T P,
G |- t | T ->
append G x (P qun) |- t | T.
Proof.
intros. generalize dependent G. generalize dependent T0. induction t; intros; inversion H; subst.
- rewrite -> append_to_concat. rewrite -> assoc. eapply T_Var. apply q_rel''_concat_ctx' in H2.
inversion H2 as [H2l H2r]. apply q_rel''_concat_ctx; try apply H2l. apply q_rel''_concat_ctx; try apply H2r.
eapply Q_Rel_Ctx_Update; try apply Q_Rel_Ctx_Empty. apply Q_Rel_Type. apply Q_Ref.
- apply T_Bool. apply Q_Rel_Ctx_Update; try apply H4. apply Q_Rel_Type. apply Q_Ref.
- eapply T_If.
+ apply IHt1 in H3. apply H3.
+ apply IHt2 in H5. apply H5.
+ apply IHt3 in H7. apply H7.
+ apply M_Un with (x := x) (P := P) in H8. apply H8.
- apply T_Pair with (G1 := append G1 x (P qun)) (G2 := append G2 x (P qun)); try apply H6; try apply H8.
+ apply IHt1 in H3. apply H3.
+ apply IHt2 in H4. apply H4.
+ apply M_Un with (x := x) (P := P) in H9. apply H9.
- eapply T_Split.
+ apply IHt1 in H6. apply H6.
+ assert ( H' : forall G x ti x' ti' x'' ti'', append (append (append G x ti ) x' ti') x'' ti'' =
G ∪ (append empty x ti) ∪ (append empty x' ti') ∪ (append empty x'' ti'') ).
{ intros. repeat rewrite <- append_to_concat. reflexivity. }
assert ( weak_set_exchange : forall A B C, A ∪ B ∪ C = A ∪ C ∪ B ).
{ intros. rewrite <- id_r. rewrite <- id_r with (m := A ∪ B ∪ C). apply exchange. }
rewrite -> H'. rewrite -> exchange. rewrite -> weak_set_exchange. repeat rewrite <- kvs.append_to_concat.
apply IHt2. apply H7.
+ apply M_Un with (x := x) (P := P) in H8. apply H8.
- apply T_Abs.
+ rewrite -> append_to_concat. apply q_rel''_concat_ctx; try apply H6. apply Q_Rel_Ctx_Update; try apply Q_Rel_Ctx_Empty.
apply Q_Rel_Type. destruct q; try apply Q_Axiom. apply Q_Ref.
+ assert ( H' : append (append G x (P qun)) i t = G ∪ (append empty x (P qun)) ∪ (append empty i t) ).
{ repeat rewrite <- append_to_concat. reflexivity. }
assert ( weak_set_exchange : forall A B C, A ∪ B ∪ C = A ∪ C ∪ B ).
{ intros. rewrite <- id_r. rewrite -> exchange. rewrite -> id_r. reflexivity. }
rewrite -> H'. rewrite -> weak_set_exchange. repeat rewrite <- append_to_concat. apply IHt. apply H7.
- eapply T_App.
+ apply IHt1 in H2. apply H2.
+ apply IHt2 in H4. apply H4.
+ apply M_Un with (x := x) (P := P) in H6. apply H6.
Qed.
Lemma unrestricted_weakening_union : forall G G' t T,
G |- t | T ->
qun 〔G'〕 ->
(G ∪ G') |- t | T.
Proof.
intros G. induction G'.
- intros t T H H'. rewrite -> id_r. auto.
- intros t T H H'. inversion H'. subst Q G0 x T0. remember H as HH.
clear HeqHH. apply IHG' in HH; auto. inversion H3. subst Q. inversion H0.
rewrite -> append_concat with (s' := G') (k := k) (v := P qun); auto.
apply unrestricted_weakening. auto.
Qed.
Lemma unrestricted_weakening_split : forall G G1 G2 t T,
G1 |- t | T ->
qun 〔G2〕 ->
G ≜ G1 ∘ G2 ->
G |- t | T.
Proof.
Admitted.
Lemma unrestricted_contraction : forall G t x1 x2 x3 T P,
append (append G x2 (P qun) ) x3 (P qun) |- t | T ->
append G x1 (P qun) |- rpi (rpi t x2 x1) x3 x1 | T.
Proof.
Admitted.
End DeclarativeTyping.
|
If the complement of a set $S$ is bounded, then $S$ is unbounded.
|
/*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*
** **
** This file forms part of the Underworld geophysics modelling application. **
** **
** For full license and copyright information, please refer to the LICENSE.md file **
** located at the project root, or contact the authors. **
** **
**~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*/
#ifndef _writeMat_h
#define _writeMat_h
#include <petsc.h>
#include <petscmat.h>
#include <petscvec.h>
void bsscr_writeMat(Mat A, char name[], char message[]);
void bsscr_writeVec(Vec V, char name[], char message[]);
#endif
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Mario Carneiro
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.set_theory.cardinal_ordinal
import Mathlib.PostPort
universes u_1 u v u_2
namespace Mathlib
/-!
# Cofinality on ordinals, regular cardinals
-/
namespace order
/-- Cofinality of a reflexive order `≼`. This is the smallest cardinality
of a subset `S : set α` such that `∀ a, ∃ b ∈ S, a ≼ b`. -/
def cof {α : Type u_1} (r : α → α → Prop) [is_refl α r] : cardinal :=
cardinal.min sorry
fun (S : Subtype fun (S : set α) => ∀ (a : α), ∃ (b : α), ∃ (H : b ∈ S), r a b) =>
cardinal.mk ↥S
theorem cof_le {α : Type u_1} (r : α → α → Prop) [is_refl α r] {S : set α}
(h : ∀ (a : α), ∃ (b : α), ∃ (H : b ∈ S), r a b) : cof r ≤ cardinal.mk ↥S :=
sorry
theorem le_cof {α : Type u_1} {r : α → α → Prop} [is_refl α r] (c : cardinal) :
c ≤ cof r ↔ ∀ {S : set α}, (∀ (a : α), ∃ (b : α), ∃ (H : b ∈ S), r a b) → c ≤ cardinal.mk ↥S :=
sorry
end order
theorem rel_iso.cof.aux {α : Type u} {β : Type v} {r : α → α → Prop} {s : β → β → Prop}
[is_refl α r] [is_refl β s] (f : r ≃r s) :
cardinal.lift (order.cof r) ≤ cardinal.lift (order.cof s) :=
sorry
theorem rel_iso.cof {α : Type u} {β : Type v} {r : α → α → Prop} {s : β → β → Prop} [is_refl α r]
[is_refl β s] (f : r ≃r s) : cardinal.lift (order.cof r) = cardinal.lift (order.cof s) :=
le_antisymm sorry sorry
def strict_order.cof {α : Type u_1} (r : α → α → Prop) [h : is_irrefl α r] : cardinal :=
order.cof fun (x y : α) => ¬r y x
namespace ordinal
/-- Cofinality of an ordinal. This is the smallest cardinal of a
subset `S` of the ordinal which is unbounded, in the sense
`∀ a, ∃ b ∈ S, ¬(b > a)`. It is defined for all ordinals, but
`cof 0 = 0` and `cof (succ o) = 1`, so it is only really
interesting on limit ordinals (when it is an infinite cardinal). -/
def cof (o : ordinal) : cardinal := quot.lift_on o (fun (_x : Well_order) => sorry) sorry
theorem cof_type {α : Type u_1} (r : α → α → Prop) [is_well_order α r] :
cof (type r) = strict_order.cof r :=
rfl
theorem le_cof_type {α : Type u_1} {r : α → α → Prop} [is_well_order α r] {c : cardinal} :
c ≤ cof (type r) ↔
∀ (S : set α), (∀ (a : α), ∃ (b : α), ∃ (H : b ∈ S), ¬r b a) → c ≤ cardinal.mk ↥S :=
sorry
theorem cof_type_le {α : Type u_1} {r : α → α → Prop} [is_well_order α r] (S : set α)
(h : ∀ (a : α), ∃ (b : α), ∃ (H : b ∈ S), ¬r b a) : cof (type r) ≤ cardinal.mk ↥S :=
iff.mp le_cof_type (le_refl (cof (type r))) S h
theorem lt_cof_type {α : Type u_1} {r : α → α → Prop} [is_well_order α r] (S : set α)
(hl : cardinal.mk ↥S < cof (type r)) : ∃ (a : α), ∀ (b : α), b ∈ S → r b a :=
iff.mp not_forall_not
fun (h : ∀ (x : α), ¬∀ (b : α), b ∈ S → r b x) =>
not_le_of_lt hl (cof_type_le S fun (a : α) => iff.mp not_ball (h a))
theorem cof_eq {α : Type u_1} (r : α → α → Prop) [is_well_order α r] :
∃ (S : set α), (∀ (a : α), ∃ (b : α), ∃ (H : b ∈ S), ¬r b a) ∧ cardinal.mk ↥S = cof (type r) :=
sorry
theorem ord_cof_eq {α : Type u_1} (r : α → α → Prop) [is_well_order α r] :
∃ (S : set α),
(∀ (a : α), ∃ (b : α), ∃ (H : b ∈ S), ¬r b a) ∧
type (subrel r S) = cardinal.ord (cof (type r)) :=
sorry
theorem lift_cof (o : ordinal) : cardinal.lift (cof o) = cof (lift o) := sorry
theorem cof_le_card (o : ordinal) : cof o ≤ card o := sorry
theorem cof_ord_le (c : cardinal) : cof (cardinal.ord c) ≤ c := sorry
@[simp] theorem cof_zero : cof 0 = 0 := sorry
@[simp] theorem cof_eq_zero {o : ordinal} : cof o = 0 ↔ o = 0 := sorry
@[simp] theorem cof_succ (o : ordinal) : cof (succ o) = 1 := sorry
@[simp] theorem cof_eq_one_iff_is_succ {o : ordinal} : cof o = 1 ↔ ∃ (a : ordinal), o = succ a :=
sorry
@[simp] theorem cof_add (a : ordinal) (b : ordinal) : b ≠ 0 → cof (a + b) = cof b := sorry
@[simp] theorem cof_cof (o : ordinal) : cof (cardinal.ord (cof o)) = cof o := sorry
theorem omega_le_cof {o : ordinal} : cardinal.omega ≤ cof o ↔ is_limit o := sorry
@[simp] theorem cof_omega : cof omega = cardinal.omega :=
le_antisymm
(eq.mpr (id (Eq._oldrec (Eq.refl (cof omega ≤ cardinal.omega)) (Eq.symm card_omega)))
(cof_le_card omega))
(iff.mpr omega_le_cof omega_is_limit)
theorem cof_eq' {α : Type u_1} (r : α → α → Prop) [is_well_order α r] (h : is_limit (type r)) :
∃ (S : set α), (∀ (a : α), ∃ (b : α), ∃ (H : b ∈ S), r a b) ∧ cardinal.mk ↥S = cof (type r) :=
sorry
theorem cof_sup_le_lift {ι : Type u_1} (f : ι → ordinal) (H : ∀ (i : ι), f i < sup f) :
cof (sup f) ≤ cardinal.lift (cardinal.mk ι) :=
sorry
theorem cof_sup_le {ι : Type u} (f : ι → ordinal) (H : ∀ (i : ι), f i < sup f) :
cof (sup f) ≤ cardinal.mk ι :=
sorry
theorem cof_bsup_le_lift {o : ordinal} (f : (a : ordinal) → a < o → ordinal) :
(∀ (i : ordinal) (h : i < o), f i h < bsup o f) → cof (bsup o f) ≤ cardinal.lift (card o) :=
sorry
theorem cof_bsup_le {o : ordinal} (f : (a : ordinal) → a < o → ordinal) :
(∀ (i : ordinal) (h : i < o), f i h < bsup o f) → cof (bsup o f) ≤ card o :=
sorry
@[simp] theorem cof_univ : cof univ = cardinal.univ := sorry
theorem sup_lt_ord {ι : Type u} (f : ι → ordinal) {c : ordinal} (H1 : cardinal.mk ι < cof c)
(H2 : ∀ (i : ι), f i < c) : sup f < c :=
sorry
theorem sup_lt {ι : Type u} (f : ι → cardinal) {c : cardinal}
(H1 : cardinal.mk ι < cof (cardinal.ord c)) (H2 : ∀ (i : ι), f i < c) : cardinal.sup f < c :=
sorry
/-- If the union of s is unbounded and s is smaller than the cofinality, then s has an unbounded member -/
theorem unbounded_of_unbounded_sUnion {α : Type u_1} (r : α → α → Prop) [wo : is_well_order α r]
{s : set (set α)} (h₁ : unbounded r (⋃₀s)) (h₂ : cardinal.mk ↥s < strict_order.cof r) :
∃ (x : set α), ∃ (H : x ∈ s), unbounded r x :=
sorry
/-- If the union of s is unbounded and s is smaller than the cofinality, then s has an unbounded member -/
theorem unbounded_of_unbounded_Union {α : Type u} {β : Type u} (r : α → α → Prop)
[wo : is_well_order α r] (s : β → set α) (h₁ : unbounded r (set.Union fun (x : β) => s x))
(h₂ : cardinal.mk β < strict_order.cof r) : ∃ (x : β), unbounded r (s x) :=
sorry
/-- The infinite pigeonhole principle-/
theorem infinite_pigeonhole {β : Type u} {α : Type u} (f : β → α)
(h₁ : cardinal.omega ≤ cardinal.mk β)
(h₂ : cardinal.mk α < cof (cardinal.ord (cardinal.mk β))) :
∃ (a : α), cardinal.mk ↥(f ⁻¹' singleton a) = cardinal.mk β :=
sorry
/-- pigeonhole principle for a cardinality below the cardinality of the domain -/
theorem infinite_pigeonhole_card {β : Type u} {α : Type u} (f : β → α) (θ : cardinal)
(hθ : θ ≤ cardinal.mk β) (h₁ : cardinal.omega ≤ θ) (h₂ : cardinal.mk α < cof (cardinal.ord θ)) :
∃ (a : α), θ ≤ cardinal.mk ↥(f ⁻¹' singleton a) :=
sorry
theorem infinite_pigeonhole_set {β : Type u} {α : Type u} {s : set β} (f : ↥s → α) (θ : cardinal)
(hθ : θ ≤ cardinal.mk ↥s) (h₁ : cardinal.omega ≤ θ)
(h₂ : cardinal.mk α < cof (cardinal.ord θ)) :
∃ (a : α),
∃ (t : set β),
∃ (h : t ⊆ s),
θ ≤ cardinal.mk ↥t ∧ ∀ {x : β} (hx : x ∈ t), f { val := x, property := h hx } = a :=
sorry
end ordinal
namespace cardinal
/-- A cardinal is a limit if it is not zero or a successor
cardinal. Note that `ω` is a limit cardinal by this definition. -/
def is_limit (c : cardinal) := c ≠ 0 ∧ ∀ (x : cardinal), x < c → succ x < c
/-- A cardinal is a strong limit if it is not zero and it is
closed under powersets. Note that `ω` is a strong limit by this definition. -/
def is_strong_limit (c : cardinal) := c ≠ 0 ∧ ∀ (x : cardinal), x < c → bit0 1 ^ x < c
theorem is_strong_limit.is_limit {c : cardinal} (H : is_strong_limit c) : is_limit c :=
{ left := and.left H,
right :=
fun (x : cardinal) (h : x < c) =>
lt_of_le_of_lt (iff.mpr succ_le (cantor x)) (and.right H x h) }
/-- A cardinal is regular if it is infinite and it equals its own cofinality. -/
def is_regular (c : cardinal) := omega ≤ c ∧ ordinal.cof (ord c) = c
theorem cof_is_regular {o : ordinal} (h : ordinal.is_limit o) : is_regular (ordinal.cof o) :=
{ left := iff.mpr ordinal.omega_le_cof h, right := ordinal.cof_cof o }
theorem omega_is_regular : is_regular omega := sorry
theorem succ_is_regular {c : cardinal} (h : omega ≤ c) : is_regular (succ c) := sorry
theorem sup_lt_ord_of_is_regular {ι : Type u} (f : ι → ordinal) {c : cardinal} (hc : is_regular c)
(H1 : mk ι < c) (H2 : ∀ (i : ι), f i < ord c) : ordinal.sup f < ord c :=
ordinal.sup_lt_ord (fun (i : ι) => f i)
(eq.mpr (id (Eq._oldrec (Eq.refl (mk ι < ordinal.cof (ord c))) (and.right hc))) H1) H2
theorem sup_lt_of_is_regular {ι : Type u} (f : ι → cardinal) {c : cardinal} (hc : is_regular c)
(H1 : mk ι < c) (H2 : ∀ (i : ι), f i < c) : sup f < c :=
ordinal.sup_lt (fun (i : ι) => f i)
(eq.mpr (id (Eq._oldrec (Eq.refl (mk ι < ordinal.cof (ord c))) (and.right hc))) H1) H2
theorem sum_lt_of_is_regular {ι : Type u} (f : ι → cardinal) {c : cardinal} (hc : is_regular c)
(H1 : mk ι < c) (H2 : ∀ (i : ι), f i < c) : sum f < c :=
lt_of_le_of_lt (sum_le_sup f) (mul_lt_of_lt (and.left hc) H1 (sup_lt_of_is_regular f hc H1 H2))
/-- A cardinal is inaccessible if it is an
uncountable regular strong limit cardinal. -/
def is_inaccessible (c : cardinal) := omega < c ∧ is_regular c ∧ is_strong_limit c
theorem is_inaccessible.mk {c : cardinal} (h₁ : omega < c) (h₂ : c ≤ ordinal.cof (ord c))
(h₃ : ∀ (x : cardinal), x < c → bit0 1 ^ x < c) : is_inaccessible c :=
sorry
/- Lean's foundations prove the existence of ω many inaccessible
cardinals -/
theorem univ_inaccessible : is_inaccessible univ := sorry
theorem lt_power_cof {c : cardinal} : omega ≤ c → c < c ^ ordinal.cof (ord c) := sorry
theorem lt_cof_power {a : cardinal} {b : cardinal} (ha : omega ≤ a) (b1 : 1 < b) :
a < ordinal.cof (ord (b ^ a)) :=
sorry
end Mathlib
|
(* Victor B. F. Gomes, University of Cambridge
Martin Kleppmann, University of Cambridge
Dominic P. Mulligan, University of Cambridge
Alastair R. Beresford, University of Cambridge
*)
section\<open>Observed-Remove Set\<close>
text\<open>The ORSet is a well-known CRDT for implementing replicated sets, supporting two operations:
the \emph{insertion} and \emph{deletion} of an arbitrary element in the shared set.\<close>
theory
ORSet
imports
Network
begin
datatype ('id, 'a) operation = Add "'id" "'a" | Rem "'id set" "'a"
type_synonym ('id, 'a) state = "'a \<Rightarrow> 'id set"
definition op_elem :: "('id, 'a) operation \<Rightarrow> 'a" where
"op_elem oper \<equiv> case oper of Add i e \<Rightarrow> e | Rem is e \<Rightarrow> e"
definition interpret_op :: "('id, 'a) operation \<Rightarrow> ('id, 'a) state \<rightharpoonup> ('id, 'a) state" ("\<langle>_\<rangle>" [0] 1000) where
"interpret_op oper state \<equiv>
let before = state (op_elem oper);
after = case oper of Add i e \<Rightarrow> before \<union> {i} | Rem is e \<Rightarrow> before - is
in Some (state ((op_elem oper) := after))"
definition valid_behaviours :: "('id, 'a) state \<Rightarrow> 'id \<times> ('id, 'a) operation \<Rightarrow> bool" where
"valid_behaviours state msg \<equiv>
case msg of
(i, Add j e) \<Rightarrow> i = j |
(i, Rem is e) \<Rightarrow> is = state e"
locale orset = network_with_constrained_ops _ interpret_op "\<lambda>x. {}" valid_behaviours
lemma (in orset) add_add_commute:
shows "\<langle>Add i1 e1\<rangle> \<rhd> \<langle>Add i2 e2\<rangle> = \<langle>Add i2 e2\<rangle> \<rhd> \<langle>Add i1 e1\<rangle>"
by(auto simp add: interpret_op_def op_elem_def kleisli_def, fastforce)
lemma (in orset) add_rem_commute:
assumes "i \<notin> is"
shows "\<langle>Add i e1\<rangle> \<rhd> \<langle>Rem is e2\<rangle> = \<langle>Rem is e2\<rangle> \<rhd> \<langle>Add i e1\<rangle>"
using assms by(auto simp add: interpret_op_def kleisli_def op_elem_def, fastforce)
lemma (in orset) apply_operations_never_fails:
assumes "xs prefix of i"
shows "apply_operations xs \<noteq> None"
using assms proof(induction xs rule: rev_induct, clarsimp)
case (snoc x xs) thus ?case
proof (cases x)
case (Broadcast e) thus ?thesis
using snoc by force
next
case (Deliver e) thus ?thesis
using snoc by (clarsimp, metis interpret_op_def interp_msg_def bind.bind_lunit prefix_of_appendD)
qed
qed
lemma (in orset) add_id_valid:
assumes "xs prefix of j"
and "Deliver (i1, Add i2 e) \<in> set xs"
shows "i1 = i2"
proof -
have "\<exists>s. valid_behaviours s (i1, Add i2 e)"
using assms deliver_in_prefix_is_valid by blast
thus ?thesis
by(simp add: valid_behaviours_def)
qed
definition (in orset) added_ids :: "('id \<times> ('id, 'b) operation) event list \<Rightarrow> 'b \<Rightarrow> 'id list" where
"added_ids es p \<equiv> List.map_filter (\<lambda>x. case x of Deliver (i, Add j e) \<Rightarrow> if e = p then Some j else None | _ \<Rightarrow> None) es"
lemma (in orset) added_ids_Broadcast_collapse [simp]:
shows "added_ids ([Broadcast e]) e' = []"
by (auto simp: added_ids_def map_filter_append map_filter_def)
lemma (in orset) added_ids_Deliver_Rem_collapse [simp]:
shows "added_ids ([Deliver (i, Rem is e)]) e' = []"
by (auto simp: added_ids_def map_filter_append map_filter_def)
lemma (in orset) added_ids_Deliver_Add_diff_collapse [simp]:
shows "e \<noteq> e' \<Longrightarrow> added_ids ([Deliver (i, Add j e)]) e' = []"
by (auto simp: added_ids_def map_filter_append map_filter_def)
lemma (in orset) added_ids_Deliver_Add_same_collapse [simp]:
shows "added_ids ([Deliver (i, Add j e)]) e = [j]"
by (auto simp: added_ids_def map_filter_append map_filter_def)
lemma (in orset) added_id_not_in_set:
assumes "i1 \<notin> set (added_ids [Deliver (i, Add i2 e)] e)"
shows "i1 \<noteq> i2"
using assms by simp
lemma (in orset) apply_operations_added_ids:
assumes "es prefix of j"
and "apply_operations es = Some f"
shows "f x \<subseteq> set (added_ids es x)"
using assms proof (induct es arbitrary: f rule: rev_induct, force)
case (snoc x xs) thus ?case
proof (cases x, force)
case (Deliver e)
moreover obtain a b where "e = (a, b)" by force
ultimately show ?thesis
using snoc by(case_tac b; clarsimp simp: interp_msg_def split: bind_splits,
force split: if_split_asm simp add: op_elem_def interpret_op_def)
qed
qed
lemma (in orset) Deliver_added_ids:
assumes "xs prefix of j"
and "i \<in> set (added_ids xs e)"
shows "Deliver (i, Add i e) \<in> set xs"
using assms proof (induct xs rule: rev_induct, clarsimp)
case (snoc x xs) thus ?case
proof (cases x, force)
case (Deliver e')
moreover obtain a b where "e' = (a, b)" by force
ultimately show ?thesis
using snoc apply (case_tac b; clarsimp)
apply (metis added_ids_Deliver_Add_diff_collapse added_ids_Deliver_Add_same_collapse
empty_iff list.set(1) set_ConsD add_id_valid in_set_conv_decomp prefix_of_appendD)
apply force
done
qed
qed
lemma (in orset) Broadcast_Deliver_prefix_closed:
assumes "xs @ [Broadcast (r, Rem ix e)] prefix of j"
and "i \<in> ix"
shows "Deliver (i, Add i e) \<in> set xs"
proof -
obtain y where "apply_operations xs = Some y"
using assms broadcast_only_valid_msgs by blast
moreover hence "ix = y e"
by (metis (mono_tags, lifting) assms(1) broadcast_only_valid_msgs operation.case(2) option.simps(1)
valid_behaviours_def case_prodD)
ultimately show ?thesis
using assms Deliver_added_ids apply_operations_added_ids by blast
qed
lemma (in orset) Broadcast_Deliver_prefix_closed2:
assumes "xs prefix of j"
and "Broadcast (r, Rem ix e) \<in> set xs"
and "i \<in> ix"
shows "Deliver (i, Add i e) \<in> set xs"
using assms Broadcast_Deliver_prefix_closed by (induction xs rule: rev_induct; force)
lemma (in orset) concurrent_add_remove_independent_technical:
assumes "i \<in> is"
and "xs prefix of j"
and "(i, Add i e) \<in> set (node_deliver_messages xs)" and "(ir, Rem is e) \<in> set (node_deliver_messages xs)"
shows "hb (i, Add i e) (ir, Rem is e)"
proof -
obtain pre k where "pre@[Broadcast (ir, Rem is e)] prefix of k"
using assms delivery_has_a_cause events_before_exist prefix_msg_in_history by blast
moreover hence "Deliver (i, Add i e) \<in> set pre"
using Broadcast_Deliver_prefix_closed assms(1) by auto
ultimately show ?thesis
using hb.intros(2) events_in_local_order by blast
qed
lemma (in orset) Deliver_Add_same_id_same_message:
assumes "Deliver (i, Add i e1) \<in> set (history j)" and "Deliver (i, Add i e2) \<in> set (history j)"
shows "e1 = e2"
proof -
obtain pre1 pre2 k1 k2 where *: "pre1@[Broadcast (i, Add i e1)] prefix of k1" "pre2@[Broadcast (i, Add i e2)] prefix of k2"
using assms delivery_has_a_cause events_before_exist by meson
moreover hence "Broadcast (i, Add i e1) \<in> set (history k1)" "Broadcast (i, Add i e2) \<in> set (history k2)"
using node_histories.prefix_to_carriers node_histories_axioms by force+
ultimately show ?thesis
using msg_id_unique by fastforce
qed
lemma (in orset) ids_imply_messages_same:
assumes "i \<in> is"
and "xs prefix of j"
and "(i, Add i e1) \<in> set (node_deliver_messages xs)" and "(ir, Rem is e2) \<in> set (node_deliver_messages xs)"
shows "e1 = e2"
proof -
obtain pre k where "pre@[Broadcast (ir, Rem is e2)] prefix of k"
using assms delivery_has_a_cause events_before_exist prefix_msg_in_history by blast
moreover hence "Deliver (i, Add i e2) \<in> set pre"
using Broadcast_Deliver_prefix_closed assms(1) by blast
moreover have "Deliver (i, Add i e1) \<in> set (history j)"
using assms(2) assms(3) prefix_msg_in_history by blast
ultimately show ?thesis
by (metis fst_conv msg_id_unique network.delivery_has_a_cause network_axioms operation.inject(1)
prefix_elem_to_carriers prefix_of_appendD prod.inject)
qed
corollary (in orset) concurrent_add_remove_independent:
assumes "\<not> hb (i, Add i e1) (ir, Rem is e2)" and "\<not> hb (ir, Rem is e2) (i, Add i e1)"
and "xs prefix of j"
and "(i, Add i e1) \<in> set (node_deliver_messages xs)" and "(ir, Rem is e2) \<in> set (node_deliver_messages xs)"
shows "i \<notin> is"
using assms ids_imply_messages_same concurrent_add_remove_independent_technical by fastforce
lemma (in orset) rem_rem_commute:
shows "\<langle>Rem i1 e1\<rangle> \<rhd> \<langle>Rem i2 e2\<rangle> = \<langle>Rem i2 e2\<rangle> \<rhd> \<langle>Rem i1 e1\<rangle>"
by(unfold interpret_op_def op_elem_def kleisli_def, fastforce)
lemma (in orset) concurrent_operations_commute:
assumes "xs prefix of i"
shows "hb.concurrent_ops_commute (node_deliver_messages xs)"
proof -
{ fix a b x y
assume "(a, b) \<in> set (node_deliver_messages xs)"
"(x, y) \<in> set (node_deliver_messages xs)"
"hb.concurrent (a, b) (x, y)"
hence "interp_msg (a, b) \<rhd> interp_msg (x, y) = interp_msg (x, y) \<rhd> interp_msg (a, b)"
apply(unfold interp_msg_def, case_tac "b"; case_tac "y"; simp add: add_add_commute rem_rem_commute hb.concurrent_def)
apply (metis add_id_valid add_rem_commute assms concurrent_add_remove_independent hb.concurrentD1 hb.concurrentD2 prefix_contains_msg)+
done
} thus ?thesis
by(fastforce simp: hb.concurrent_ops_commute_def)
qed
sublocale sec: strong_eventual_consistency weak_hb hb interp_msg
"\<lambda>ops.\<exists>xs i. xs prefix of i \<and> node_deliver_messages xs = ops" "\<lambda>x.{}"
apply(standard; clarsimp simp add: hb_consistent_prefix node_deliver_messages_distinct
concurrent_operations_commute)
apply(metis (no_types, lifting) apply_operations_def bind.bind_lunit not_None_eq
hb.apply_operations_Snoc kleisli_def apply_operations_never_fails interp_msg_def)
using drop_last_message apply blast
done
end
end
|
------------------------------------------------------------------------
-- Raw monads
------------------------------------------------------------------------
-- Note that this module is not parametrised by an axiomatisation of
-- equality. This module is reexported from Monad.
{-# OPTIONS --without-K --safe #-}
module Monad.Raw where
open import Prelude
-- Raw monads.
record Raw-monad {d c} (M : Type d → Type c) : Type (lsuc d ⊔ c) where
constructor mk
infixl 6 _⟨$⟩_ _⊛_
infixl 5 _>>=_ _>>_
infixr 5 _=<<_
field
return : ∀ {A} → A → M A
_>>=_ : ∀ {A B} → M A → (A → M B) → M B
-- Variants of _>>=_.
_>>_ : ∀ {A B} → M A → M B → M B
x >> y = x >>= const y
_=<<_ : ∀ {A B} → (A → M B) → M A → M B
_=<<_ = flip _>>=_
-- A map function.
map : ∀ {A B} → (A → B) → M A → M B
map f x = x >>= return ∘ f
-- A synonym.
_⟨$⟩_ : ∀ {A B} → (A → B) → M A → M B
_⟨$⟩_ = map
-- Applicative functor application.
_⊛_ : ∀ {A B} → M (A → B) → M A → M B
f ⊛ x = f >>= λ f → x >>= λ x → return (f x)
-- The sequence function (for lists).
sequence : ∀ {A} → List (M A) → M (List A)
sequence [] = return []
sequence (x ∷ xs) = _∷_ ⟨$⟩ x ⊛ sequence xs
open Raw-monad ⦃ … ⦄ public
-- Raw monad transformers.
record Raw-monad-transformer
{d c₁ c₂} (F : (Type d → Type c₁) → (Type d → Type c₂)) :
Type (lsuc (c₁ ⊔ d) ⊔ c₂) where
constructor mk
field
transform : ∀ {M} ⦃ is-raw-monad : Raw-monad M ⦄ → Raw-monad (F M)
liftʳ : ∀ {M A} ⦃ is-raw-monad : Raw-monad M ⦄ → M A → F M A
open Raw-monad-transformer ⦃ … ⦄ public using (liftʳ)
|
\section{Religions}
Religion has always played a central role in the life of every civilization's history, and continues to do so, even if we, as individuals living in the modern age mistakenly think otherwise. Religions shape the identities, morals and various other cultural aspects of even otherwise fully secular societies, and historically have even been the cause of political events and military conflicts.
Since World of Artograch is primarily intended to be played in a fantasy setting inspired by the Middle Ages and Antiquity, religion plays a very overt and visible role in people's lives - not to mention, that in a fantasy setting where priests are also users of Clerical Magic, religion gets to be tied directly to magic - or at least, Clerical Magic.
\subsection{Religious attributes}
Religions have the following attributes:
\begin{itemize}
\item \textbf{The religions's name}
\item \textbf{The religions's virtues:} These determine what kind of actions are approved by the deity or deities of the religion, thus letting the character gain back favour, in case they previously incurred the wrath of the deitiy or deities. A religion's virtues also directly determine which Schools of Magic are open to Clerical Magic spellcasters following the religion.
\item \textbf{The religions's hierarchy of sins:} These determine what kind of actions are necessary to be taken to incur the wrath of the religion's deity or deities. Commiting major sins results in loss of divine favour, which has serious consequences for spellcasters of the Clerical variety - while gameplay-wise, these have no affects on non-spellcasters, when roleplaying as an otherwise religious character, they should feel guilt when commiting sins.
\end{itemize}
\subsection{Religion on the Character Sheet}
\begin{itemize}
\item \textbf{Religion Name:} Which is \textit{"N/A"} if the character is an Atheist. Otherwise, it's the name of a defined religion.
\item \textbf{Divine Favour:} Which is a \textit{"N/A"} if the character is an Atheist - otherwise an integer between 0 and 10, where higher numbers correspond to more saintly behaviour, while lower numbers correspond to more sinful behaviour. Breaking of the religion's rules causes the character's Divine Favour to plummet - the lower the value, the greater the sin has to be to lower it even further \Parentheses{see the religion's Hierarchy of Sins}. Reaching a Divine Favour of 0 should permanently strip a user of Clerical Magic from their powers.
\end{itemize}
%\begin{table}[]
\begin{tabular}{|c|c|}
\hline
\multicolumn{2}{|c|}{\textbf{Divine Favour}} \\ \hline
10 & Messianic \\ \hline
9 & Saintly \\ \hline
8 & Examplary \\ \hline
7 & Virtuous \\ \hline
6 & Devout \\ \hline
5 & Lukewarm \\ \hline
4 & Lapsed \\ \hline
3 & Sinful \\ \hline
2 & Decadent \\ \hline
1 & Degenerate \\ \hline
0 & Damned \\ \hline
\end{tabular}
%\end{table}
|
The metal antimony was known to German chemist Andreas Libavius in 1615 who obtained it by adding iron to a molten mixture of antimony sulfide , salt and potassium tartrate . This procedure produced antimony with a crystalline or starred surface .
|
lemma homotopic_with_sym: "homotopic_with P X Y f g \<longleftrightarrow> homotopic_with P X Y g f"
|
using FreeTypeAbstraction, Colors, ColorVectorSpace, GeometryBasics
using Test
using FreeTypeAbstraction: boundingbox, Vec, glyph_rects, get_extent, FTFont, kerning, glyph_ink_size
using FreeType
face = FreeTypeAbstraction.findfont("hack"; additional_fonts=@__DIR__)
@test repr(face) == "FTFont (family = Hack, style = Regular)"
bb = boundingbox("asdasd", face, 64)
@test round.(Int, minimum(bb)) == Vec(4, -1)
@test round.(Int, widths(bb)) == Vec2(221, 50)
FA = FreeTypeAbstraction
FA.set_pixelsize(face, 64) # should be the default
img, extent = renderface(face, 'C', 64)
@test size(img) == (30, 49)
@test typeof(img) == Array{UInt8,2}
a = renderstring!(zeros(UInt8, 20, 100), "helgo", face, 10, 10, 10)
@test any(a[3:12, :] .!= 0)
@test all(a[vcat(1:2, 13:20), :] .== 0)
@test any(a[:, 11:40] .!= 0)
@test all(a[:, vcat(1:10, 41:100)] .== 0)
a = renderstring!(zeros(UInt8, 20, 100), "helgo", face, 10, 15, 70)
@test any(a[8:17, :] .!= 0)
@test all(a[vcat(1:7, 18:20), :] .== 0)
@test any(a[:, 71:100] .!= 0)
@test all(a[:, 1:70] .== 0)
a = renderstring!(
zeros(UInt8, 20, 100),
"helgo",
face,
10,
10,
50,
valign = :vtop,
)
@test all(a[1:10, :] .== 0)
@test any(a[11:20, :] .!= 0)
a = renderstring!(
zeros(UInt8, 20, 100),
"helgo",
face,
10,
10,
50,
valign = :vcenter,
)
@test all(a[vcat(1:5, 16:end), :] .== 0)
@test any(a[6:15, :] .!= 0)
a = renderstring!(
zeros(UInt8, 20, 100),
"helgo",
face,
10,
10,
50,
valign = :vbaseline,
)
@test all(a[vcat(1:2, 13:end), :] .== 0)
@test any(a[3:12, :] .!= 0)
a = renderstring!(
zeros(UInt8, 20, 100),
"helgo",
face,
10,
10,
50,
valign = :vbottom,
)
@test any(a[1:10, :] .!= 0)
@test all(a[11:20, :] .== 0)
a = renderstring!(
zeros(UInt8, 20, 100),
"helgo",
face,
10,
10,
50,
halign = :hleft,
)
@test all(a[:, 1:50] .== 0)
@test any(a[:, 51:100] .!= 0)
a = renderstring!(
zeros(UInt8, 20, 100),
"helgo",
face,
10,
10,
50,
halign = :hcenter,
)
@test all(a[:, vcat(1:35, 66:end)] .== 0)
@test any(a[:, 36:65] .!= 0)
a = renderstring!(
zeros(UInt8, 20, 100),
"helgo",
face,
10,
10,
50,
halign = :hright,
)
@test any(a[:, 1:50] .!= 0)
@test all(a[:, 51:100] .== 0)
a = renderstring!(
zeros(UInt8, 20, 100),
"helgo",
face,
10,
10,
50,
fcolor = 0x80,
)
@test maximum(a) <= 0x80
a = renderstring!(
zeros(UInt8, 20, 100),
"helgo",
face,
10,
10,
50,
fcolor = 0x80,
bcolor = 0x40,
)
@test any(a .== 0x40)
a = renderstring!(
fill(0x01, 20, 100),
"helgo",
face,
10,
10,
50,
bcolor = nothing,
)
@test !any(a .== 0x00)
a = renderstring!(zeros(Float32, 20, 100), "helgo", face, 10, 10, 50)
@test maximum(a) <= 1.0
a = renderstring!(zeros(Float64, 20, 100), "helgo", face, 10, 10, 50)
@test maximum(a) <= 1.0
renderstring!(zeros(Gray, 20, 100), "helgo", face, 10, 10, 50)
renderstring!(
zeros(Gray{Float64}, 20, 100),
"helgo",
face,
10,
10,
50,
fcolor = Gray(0.5),
)
renderstring!(
zeros(UInt8, 20, 100),
"helgo",
face,
10,
0,
0,
halign = :hcenter,
valign = :vcenter,
)
renderstring!(zeros(UInt8, 20, 100), "helgo", face, 10, 25, 80)
# Find fonts
# these fonts should be available on all platforms:
# debug travis... does it even have fonts?
fontpaths = FreeTypeAbstraction.fontpaths()
isempty(fontpaths) && println("OS doesn't have any font folder")
if Sys.islinux()
fonts = ["dejavu sans"]
# apple on gh-actions doesn't seem to have any fonts...
elseif Sys.isapple()
fonts = []
else # windows have some more fonts installed per default
fonts = [
"Times New Roman",
"Arial",
"Comic Sans MS",
"Impact",
"Tahoma",
"Trebuchet MS",
"Verdana",
"Courier New",
]
end
@testset "finding fonts" begin
for font in fonts
@testset "finding $font" begin
@test findfont(font) != nothing
end
end
@testset "find in additional dir" begin
@test findfont("Hack") == nothing
@test findfont("Hack", additional_fonts = @__DIR__) != nothing
end
end
@testset "loading lots of fonts" begin
for i = 1:10
for font in fonts
@time findfont(font)
end
end
@test "No Error" == "No Error"
end
|
State Before: 𝕜 : Type u_2
E : Type u_1
F : Type ?u.172872
G : Type ?u.172875
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
s : Set E
hs : Convex 𝕜 s
x✝ x : E
hx : x ∈ s
⊢ 1 • x ∈ s State After: no goals Tactic: rwa [one_smul]
|
macro aimport(mod)
if isdefined(mod)
quote
import $mod
end
else
esc(quote
arequire(string($(QuoteNode(mod))))
import $mod
end)
end
end
macro ausing(mod)
if isdefined(mod)
quote
using $mod
end
else
esc(quote
arequire(string($(QuoteNode(mod))))
using $mod
end)
end
end
|
/-
Copyright (c) 2014 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
-/
prelude
import Init.SizeOf
import Init.Data.Nat.Basic
universe u v
set_option codegen false
inductive Acc {α : Sort u} (r : α → α → Prop) : α → Prop where
| intro (x : α) (h : (y : α) → r y x → Acc r y) : Acc r x
abbrev Acc.ndrec.{u1, u2} {α : Sort u2} {r : α → α → Prop} {C : α → Sort u1}
(m : (x : α) → ((y : α) → r y x → Acc r y) → ((y : α) → (a : r y x) → C y) → C x)
{a : α} (n : Acc r a) : C a :=
Acc.rec (motive := fun α _ => C α) m n
abbrev Acc.ndrecOn.{u1, u2} {α : Sort u2} {r : α → α → Prop} {C : α → Sort u1}
{a : α} (n : Acc r a)
(m : (x : α) → ((y : α) → r y x → Acc r y) → ((y : α) → (a : r y x) → C y) → C x)
: C a :=
Acc.rec (motive := fun α _ => C α) m n
namespace Acc
variable {α : Sort u} {r : α → α → Prop}
def inv {x y : α} (h₁ : Acc r x) (h₂ : r y x) : Acc r y :=
Acc.recOn (motive := fun (x : α) _ => r y x → Acc r y)
h₁ (fun x₁ ac₁ ih h₂ => ac₁ y h₂) h₂
end Acc
inductive WellFounded {α : Sort u} (r : α → α → Prop) : Prop where
| intro (h : ∀ a, Acc r a) : WellFounded r
namespace WellFounded
def apply {α : Sort u} {r : α → α → Prop} (wf : WellFounded r) (a : α) : Acc r a :=
WellFounded.recOn (motive := fun x => (y : α) → Acc r y)
wf (fun p => p) a
section
variable {α : Sort u} {r : α → α → Prop} (hwf : WellFounded r)
theorem recursion {C : α → Sort v} (a : α) (h : ∀ x, (∀ y, r y x → C y) → C x) : C a := by
induction (apply hwf a) with
| intro x₁ ac₁ ih => exact h x₁ ih
theorem induction {C : α → Prop} (a : α) (h : ∀ x, (∀ y, r y x → C y) → C x) : C a :=
recursion hwf a h
variable {C : α → Sort v}
variable (F : ∀ x, (∀ y, r y x → C y) → C x)
def fixF (x : α) (a : Acc r x) : C x := by
induction a with
| intro x₁ ac₁ ih => exact F x₁ ih
def fixFEq (x : α) (acx : Acc r x) : fixF F x acx = F x (fun (y : α) (p : r y x) => fixF F y (Acc.inv acx p)) := by
induction acx with
| intro x r ih => exact rfl
end
variable {α : Sort u} {C : α → Sort v} {r : α → α → Prop}
-- Well-founded fixpoint
def fix (hwf : WellFounded r) (F : ∀ x, (∀ y, r y x → C y) → C x) (x : α) : C x :=
fixF F x (apply hwf x)
-- Well-founded fixpoint satisfies fixpoint equation
theorem fix_eq (hwf : WellFounded r) (F : ∀ x, (∀ y, r y x → C y) → C x) (x : α) :
fix hwf F x = F x (fun y h => fix hwf F y) :=
fixFEq F x (apply hwf x)
end WellFounded
open WellFounded
-- Empty relation is well-founded
def emptyWf {α : Sort u} : WellFounded (@emptyRelation α) := by
apply WellFounded.intro
intro a
apply Acc.intro a
intro b h
cases h
-- Subrelation of a well-founded relation is well-founded
namespace Subrelation
variable {α : Sort u} {r q : α → α → Prop}
def accessible {a : α} (h₁ : Subrelation q r) (ac : Acc r a) : Acc q a := by
induction ac with
| intro x ax ih =>
apply Acc.intro
intro y h
exact ih y (h₁ h)
def wf (h₁ : Subrelation q r) (h₂ : WellFounded r) : WellFounded q :=
⟨fun a => accessible @h₁ (apply h₂ a)⟩
end Subrelation
-- The inverse image of a well-founded relation is well-founded
namespace InvImage
variable {α : Sort u} {β : Sort v} {r : β → β → Prop}
private def accAux (f : α → β) {b : β} (ac : Acc r b) : (x : α) → f x = b → Acc (InvImage r f) x := by
induction ac with
| intro x acx ih =>
intro z e
apply Acc.intro
intro y lt
subst x
apply ih (f y) lt y rfl
def accessible {a : α} (f : α → β) (ac : Acc r (f a)) : Acc (InvImage r f) a :=
accAux f ac a rfl
def wf (f : α → β) (h : WellFounded r) : WellFounded (InvImage r f) :=
⟨fun a => accessible f (apply h (f a))⟩
end InvImage
def invImage (f : α → β) (h : WellFounded r) : WellFounded (InvImage r f) :=
InvImage.wf f h
-- The transitive closure of a well-founded relation is well-founded
namespace TC
variable {α : Sort u} {r : α → α → Prop}
def accessible {z : α} (ac : Acc r z) : Acc (TC r) z := by
induction ac with
| intro x acx ih =>
apply Acc.intro x
intro y rel
induction rel with
| base a b rab => exact ih a rab
| trans a b c rab rbc ih₁ ih₂ => apply Acc.inv (ih₂ acx ih) rab
def wf (h : WellFounded r) : WellFounded (TC r) :=
⟨fun a => accessible (apply h a)⟩
end TC
-- less-than is well-founded
def Nat.lt_wf : WellFounded Nat.lt := by
apply WellFounded.intro
intro n
induction n with
| zero =>
apply Acc.intro 0
intro _ h
apply absurd h (Nat.not_lt_zero _)
| succ n ih =>
apply Acc.intro (Nat.succ n)
intro m h
have : m = n ∨ m < n := Nat.eq_or_lt_of_le (Nat.le_of_succ_le_succ h)
match this with
| Or.inl e => subst e; assumption
| Or.inr e => exact Acc.inv ih e
def Measure {α : Sort u} : (α → Nat) → α → α → Prop :=
InvImage (fun a b => a < b)
def measure {α : Sort u} (f : α → Nat) : WellFounded (Measure f) :=
invImage f Nat.lt_wf
def SizeofMeasure (α : Sort u) [SizeOf α] : α → α → Prop :=
Measure sizeOf
def sizeofMeasure (α : Sort u) [SizeOf α] : WellFounded (SizeofMeasure α) :=
measure sizeOf
namespace Prod
open WellFounded
section
variable {α : Type u} {β : Type v}
variable (ra : α → α → Prop)
variable (rb : β → β → Prop)
-- Lexicographical order based on ra and rb
inductive Lex : α × β → α × β → Prop where
| left {a₁} (b₁) {a₂} (b₂) (h : ra a₁ a₂) : Lex (a₁, b₁) (a₂, b₂)
| right (a) {b₁ b₂} (h : rb b₁ b₂) : Lex (a, b₁) (a, b₂)
-- relational product based on ra and rb
inductive RProd : α × β → α × β → Prop where
| intro {a₁ b₁ a₂ b₂} (h₁ : ra a₁ a₂) (h₂ : rb b₁ b₂) : RProd (a₁, b₁) (a₂, b₂)
end
section
variable {α : Type u} {β : Type v}
variable {ra : α → α → Prop} {rb : β → β → Prop}
def lexAccessible (aca : (a : α) → Acc ra a) (acb : (b : β) → Acc rb b) (a : α) (b : β) : Acc (Lex ra rb) (a, b) := by
induction (aca a) generalizing b with
| intro xa aca iha =>
induction (acb b) with
| intro xb acb ihb =>
apply Acc.intro (xa, xb)
intro p lt
cases lt with
| left _ _ h => apply iha _ h
| right _ h => apply ihb _ h
-- The lexicographical order of well founded relations is well-founded
def lex (ha : WellFounded ra) (hb : WellFounded rb) : WellFounded (Lex ra rb) :=
⟨fun (a, b) => lexAccessible (WellFounded.apply ha) (WellFounded.apply hb) a b⟩
-- relational product is a Subrelation of the Lex
def RProdSubLex (a : α × β) (b : α × β) (h : RProd ra rb a b) : Lex ra rb a b := by
cases h with
| intro h₁ h₂ => exact Lex.left _ _ h₁
-- The relational product of well founded relations is well-founded
def rprod (ha : WellFounded ra) (hb : WellFounded rb) : WellFounded (RProd ra rb) := by
apply Subrelation.wf (r := Lex ra rb) (h₂ := lex ha hb)
intro a b h
exact RProdSubLex a b h
end
end Prod
namespace PSigma
section
variable {α : Sort u} {β : α → Sort v}
variable (r : α → α → Prop)
variable (s : ∀ a, β a → β a → Prop)
-- Lexicographical order based on r and s
inductive Lex : PSigma β → PSigma β → Prop where
| left : ∀ {a₁ : α} (b₁ : β a₁) {a₂ : α} (b₂ : β a₂), r a₁ a₂ → Lex ⟨a₁, b₁⟩ ⟨a₂, b₂⟩
| right : ∀ (a : α) {b₁ b₂ : β a}, s a b₁ b₂ → Lex ⟨a, b₁⟩ ⟨a, b₂⟩
end
section
variable {α : Sort u} {β : α → Sort v}
variable {r : α → α → Prop} {s : ∀ (a : α), β a → β a → Prop}
def lexAccessible {a} (aca : Acc r a) (acb : (a : α) → WellFounded (s a)) (b : β a) : Acc (Lex r s) ⟨a, b⟩ := by
induction aca with
| intro xa aca iha =>
induction (WellFounded.apply (acb xa) b) with
| intro xb acb ihb =>
apply Acc.intro
intro p lt
cases lt with
| left => apply iha; assumption
| right => apply ihb; assumption
-- The lexicographical order of well founded relations is well-founded
def lex (ha : WellFounded r) (hb : (x : α) → WellFounded (s x)) : WellFounded (Lex r s) :=
WellFounded.intro fun ⟨a, b⟩ => lexAccessible (WellFounded.apply ha a) hb b
end
section
variable {α : Sort u} {β : Sort v}
def lexNdep (r : α → α → Prop) (s : β → β → Prop) :=
Lex r (fun a => s)
def lexNdepWf {r : α → α → Prop} {s : β → β → Prop} (ha : WellFounded r) (hb : WellFounded s) : WellFounded (lexNdep r s) :=
WellFounded.intro fun ⟨a, b⟩ => lexAccessible (WellFounded.apply ha a) (fun x => hb) b
end
section
variable {α : Sort u} {β : Sort v}
-- Reverse lexicographical order based on r and s
inductive RevLex (r : α → α → Prop) (s : β → β → Prop) : @PSigma α (fun a => β) → @PSigma α (fun a => β) → Prop where
| left : {a₁ a₂ : α} → (b : β) → r a₁ a₂ → RevLex r s ⟨a₁, b⟩ ⟨a₂, b⟩
| right : (a₁ : α) → {b₁ : β} → (a₂ : α) → {b₂ : β} → s b₁ b₂ → RevLex r s ⟨a₁, b₁⟩ ⟨a₂, b₂⟩
end
section
open WellFounded
variable {α : Sort u} {β : Sort v}
variable {r : α → α → Prop} {s : β → β → Prop}
def revLexAccessible {b} (acb : Acc s b) (aca : (a : α) → Acc r a): (a : α) → Acc (RevLex r s) ⟨a, b⟩ := by
induction acb with
| intro xb acb ihb =>
intro a
induction (aca a) with
| intro xa aca iha =>
apply Acc.intro
intro p lt
cases lt with
| left => apply iha; assumption
| right => apply ihb; assumption
def revLex (ha : WellFounded r) (hb : WellFounded s) : WellFounded (RevLex r s) :=
WellFounded.intro fun ⟨a, b⟩ => revLexAccessible (apply hb b) (WellFounded.apply ha) a
end
section
def SkipLeft (α : Type u) {β : Type v} (s : β → β → Prop) : @PSigma α (fun a => β) → @PSigma α (fun a => β) → Prop :=
RevLex emptyRelation s
def skipLeft (α : Type u) {β : Type v} {s : β → β → Prop} (hb : WellFounded s) : WellFounded (SkipLeft α s) :=
revLex emptyWf hb
def mkSkipLeft {α : Type u} {β : Type v} {b₁ b₂ : β} {s : β → β → Prop} (a₁ a₂ : α) (h : s b₁ b₂) : SkipLeft α s ⟨a₁, b₁⟩ ⟨a₂, b₂⟩ :=
RevLex.right _ _ h
end
end PSigma
|
/-
Copyright (c) 2021 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import data.nat.interval
import data.pnat.defs
/-!
# Finite intervals of positive naturals
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file proves that `ℕ+` is a `locally_finite_order` and calculates the cardinality of its
intervals as finsets and fintypes.
-/
open finset pnat
instance : locally_finite_order ℕ+ := subtype.locally_finite_order _
namespace pnat
variables (a b : ℕ+)
lemma Icc_eq_finset_subtype : Icc a b = (Icc (a : ℕ) b).subtype (λ (n : ℕ), 0 < n) := rfl
lemma Ico_eq_finset_subtype : Ico a b = (Ico (a : ℕ) b).subtype (λ (n : ℕ), 0 < n) := rfl
lemma Ioc_eq_finset_subtype : Ioc a b = (Ioc (a : ℕ) b).subtype (λ (n : ℕ), 0 < n) := rfl
lemma Ioo_eq_finset_subtype : Ioo a b = (Ioo (a : ℕ) b).subtype (λ (n : ℕ), 0 < n) := rfl
lemma map_subtype_embedding_Icc : (Icc a b).map (function.embedding.subtype _) = Icc (a : ℕ) b :=
map_subtype_embedding_Icc _ _ _ (λ c _ x hx _ hc _, hc.trans_le hx)
lemma map_subtype_embedding_Ico : (Ico a b).map (function.embedding.subtype _) = Ico (a : ℕ) b :=
map_subtype_embedding_Ico _ _ _ (λ c _ x hx _ hc _, hc.trans_le hx)
lemma map_subtype_embedding_Ioc : (Ioc a b).map (function.embedding.subtype _) = Ioc (a : ℕ) b :=
map_subtype_embedding_Ioc _ _ _ (λ c _ x hx _ hc _, hc.trans_le hx)
lemma map_subtype_embedding_Ioo : (Ioo a b).map (function.embedding.subtype _) = Ioo (a : ℕ) b :=
map_subtype_embedding_Ioo _ _ _ (λ c _ x hx _ hc _, hc.trans_le hx)
@[simp] lemma card_Icc : (Icc a b).card = b + 1 - a :=
by rw [←nat.card_Icc, ←map_subtype_embedding_Icc, card_map]
@[simp] lemma card_Ico : (Ico a b).card = b - a :=
by rw [←nat.card_Ico, ←map_subtype_embedding_Ico, card_map]
@[simp] lemma card_Ioc : (Ioc a b).card = b - a :=
by rw [←nat.card_Ioc, ←map_subtype_embedding_Ioc, card_map]
@[simp] lemma card_Ioo : (Ioo a b).card = b - a - 1 :=
by rw [←nat.card_Ioo, ←map_subtype_embedding_Ioo, card_map]
@[simp] lemma card_fintype_Icc : fintype.card (set.Icc a b) = b + 1 - a :=
by rw [←card_Icc, fintype.card_of_finset]
@[simp] lemma card_fintype_Ico : fintype.card (set.Ico a b) = b - a :=
by rw [←card_Ico, fintype.card_of_finset]
@[simp] lemma card_fintype_Ioc : fintype.card (set.Ioc a b) = b - a :=
by rw [←card_Ioc, fintype.card_of_finset]
@[simp] lemma card_fintype_Ioo : fintype.card (set.Ioo a b) = b - a - 1 :=
by rw [←card_Ioo, fintype.card_of_finset]
end pnat
|
function scale = FindChromaticyScale(M, I)
%
% scale = FindChromaticyScale(M, I)
%
%
% Input:
%
%
% Output:
%
%
% Copyright (C) 2016 Francesco Banterle
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see <http://www.gnu.org/licenses/>.
%
l_m = length(M);
l_I = length(I);
if((l_m ~= l_I) || isempty(M) || isempty(I))
error('FindChromaticyScale: input colors have different color channels.');
end
function err = residualFunction(p)
I_c = I .* p;
I_c_n = I_c / norm(I_c);
M_n = M / norm(M);
err = sum((I_c_n - M_n).^2);
end
opts = optimset('Display', 'none', 'TolFun', 1e-8, 'TolX', 1e-8);
scale = fminsearch(@residualFunction, ones(1, l_m), opts);
end
|
% Chapter 3 - Complex Iterative Maps.
% Program 3b - The Mandelbrot Set.
% Thanks to Steve Lord from The MathWorks for his help.
% Copyright Birkhauser 2013. Stephen Lynch.
% Vectorized program.
% Plot the Mandelbrot set in black and white (Figure 3.2).
Nmax = 50; scale = 0.005;
xmin = -2.4; xmax = 1.2;
ymin = -1.5; ymax = 1.5;
% Generate x and y coordinates and z complex values
[x,y]=meshgrid(xmin:scale:xmax,ymin:scale:ymax);
z = x+1i*y;
% Generate w accumulation matrix and k counting matrix
w = zeros(size(z));
k = zeros(size(z));
N = 0;
while N<Nmax && ~all(k(:))
w = w.^2+z;
N = N+1;
k(~k & abs(w)>4) = N;
end
k(k==0) = Nmax;
figure
s = pcolor(x, y, mod(k, 2));
colormap([0 0 0;1 1 1])
set(s,'edgecolor','none')
axis([xmin xmax -ymax ymax])
fsize=15;
set(gca,'XTick',xmin:0.4:xmax,'FontSize',fsize)
set(gca,'YTick',-ymax:0.5:ymax,'FontSize',fsize)
xlabel('Re z','FontSize',fsize)
ylabel('Im z','FontSize',fsize)
% End of Program 3b
|
module RecursionSchemes
data Fix : (f : Type -> Type) -> Type where
Fx : (f (Fix f)) -> Fix f
data ListF : Type -> Type -> Type where
Nil : ListF a f
Cons : a -> f -> ListF a f
Functor (ListF a) where
map f [] = []
map f (Cons x xs) = Cons x (f xs)
List' : Type -> Type
List' a = Fix (ListF a)
nil : List' a
nil = Fx Nil
(::) : a -> List' a -> List' a
(::) x xs = Fx (Cons x xs)
cata : Functor f => (f a -> a) -> Fix f -> a
cata alg (Fx f) = (alg . map (cata alg)) f
ana : Functor f => (a -> f a) -> a -> Fix f
ana f a = (Fx . map (ana f) . f) a
hylo : Functor f => (f b -> b) -> (a -> f a) -> a -> b
hylo f g a = (cata f . ana g) a
-- Catamorphism examples
count : List' a -> Int
count xs = cata countF xs
where
countF : ListF a Int -> Int
countF [] = 0
countF (Cons _ c) = 1 + c
sum : List' Int -> Int
sum xs = cata sumF xs
where
sumF : ListF Int Int -> Int
sumF [] = 0
sumF (Cons x c) = x + c
example : List' Int
example = 1 :: 2 :: 3 :: 4 :: 5 :: nil
testCount : Int
testCount = count example
testSum : Int
testSum = sum example
-- Anamorphism examples
repeat : Int -> List' Int
repeat n = ana countF n
where
countF : Int -> ListF Int Int
countF n = if n <= 0 then Nil else Cons n (n - 1)
testRepeat : Int
testRepeat = sum (repeat 5)
|
= Livin ' the Dream =
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 28 21:29:38 2016
@author: zackakil
"""
import pandas as pa
import numpy as np
from PrimeText import PrimeText
import matplotlib.pyplot as plt
pt = PrimeText()
ytData = pa.read_csv("utube.csv",encoding ='ISO-8859-1')
comments = ytData['comment']
pt.cleanData(comments)
pt.assembleDictionary()
#
pt.indexDictionary()
pt.indexComments()
keyText = []
keyCount = []
for key, value in pt.indexedDictionary.iteritems():
c = pt.countInRecords([key])
keyText.append(key)
keyCount.append(c)
s1 = pa.Series(keyCount,index=keyText)
sortedS1 = s1.sort_values(ascending= False)[:100]
sortedS1.plot.bar()
|
%!TEX encoding = UTF-8 Unicode
\documentclass[letterpaper,9pt,english]{article}
\usepackage[utf8]{inputenc}
\usepackage[empty]{fullpage}
\usepackage{titlesec}
\usepackage{caption}
\usepackage{afterpage}
\usepackage[inline]{enumitem}
\usepackage[pdftex]{hyperref}
\usepackage{fancyhdr}
\usepackage[yyyymmdd]{datetime}
\usepackage{xcolor}
\usepackage{graphicx}
\renewcommand{\dateseparator}{--}
\newcommand\blfootnote[1]{%
\let\thefootnote\relax%
\footnotetext{#1}%
\let\thefootnote\svthefootnote%
}
\pagestyle{fancy}
\fancyhf{} % clear all header and footer fields
\fancyfoot{}
\renewcommand{\headrulewidth}{0in}
\renewcommand{\footrulewidth}{0in}
% Adjust margins
\addtolength{\oddsidemargin}{-0.55in}
\addtolength{\evensidemargin}{-0.5in}
\addtolength{\textheight}{1in}
\addtolength{\textwidth}{1in}
\addtolength{\topmargin}{-.5in}
\setlength{\tabcolsep}{0in}
% Sections formatting
\titleformat{\section}{
\vspace{-5pt}\scshape\raggedright\large
}{}{0em}{}[\color{black}\titlerule \vspace{-2pt}]
%-------------------------
% Custom commands
\newcommand{\resumeSubheading}[4]{
\vspace{2pt}\item
\begin{tabular*}{0.97\textwidth}{l@{\extracolsep{\fill}}r}
\textbf{#1} & \textit{\small #2} \\
#3 & \small #4 \\
\end{tabular*}\vspace{6pt}
}
\renewcommand{\labelitemii}{$\circ$}
% Ensure that generate pdf is machine readable/ATS parsable
\pdfgentounicode=1
%\renewcommand{\familydefault}{\sfdefault}
%-------------------------------------------
%%%%%%-CV-%%%%%%
\begin{document}
%----------HEADING-----------------
\begin{tabular*}{\textwidth\raggedright}{l@{\extracolsep{\fill}}r}
\textbf{\Large Enrico Stefanel} \small\textit{born 1998--04--29} (Italian and Canadian Citizenship) & Email: \href{mailto:[email protected]}{\texttt{[email protected]}}\\
\href{https://www.linkedin.com/in/enricostefanel/?locale=en_US}{\texttt{linkedin.com/in/enricostefanel}} & Mobile: \href{tel:+393450014810}{+39~345~001~4810}\\
\end{tabular*}
%-----------EXPERIENCE-----------------
\section{Experience}
\begin{itemize}[leftmargin=*]
\resumeSubheading
{Internship as \textit{Data Scientist}}{2021--10 \textrightarrow~2022--01}
{\href{https://www.dca.it}{Danieli Automation S.p.A.}, Buttrio (UD), ITA}{}\\
I have experimented Computer Vision techniques for the development and evaluation of a \textit{Multiclass Object Detection} system for the recognition of annotations on casting products, using \emph{state-of-the-art} models like EfficientNet and YOLO.
\end{itemize}
%-----------EDUCATION-----------------
\section{Education}
\begin{itemize}[leftmargin=*]
\resumeSubheading
{Bachelor Degree Course in \href{https://www.uniud.it/en/education/offer/courses/scientific-area/mathematics-computer-sciences-multimedia-physics/bachelor-degree-courses-undergraduate-180-ects/internet-of-things-big-data-machine-learning?set_language=en}{\textit{Internet of Things, Big Data \& Web}}}{2017--10 \textrightarrow~Present}
{\href{https://www.uniud.it/en/uniud-international?set_language=en/}{University of Udine}, Udine, ITA}{ \iffalse [\dots/110] EQF 6 \fi }\\
The course focuses on the technologies, tools and methodologies that characterize the fields of Big Data and Machine Learning,
but it is organized to give also solid theoretical basis of Mathematics, Statistics and of course Computer~Science.
Some teachings I have attended, grouped by interest fields, are the followings:
\begin{itemize}\vspace{-0.5em}
\setlength\itemsep{0em}
\item{Machine Learning, Data Science, Statistics and Databases;}
\item{Algorithms, Data Structures, (Object-Oriented) Programming and Software Engineering.}
\end{itemize}\vspace{-0.5em}
During my studies, I also served from 2019--01 to 2021--09 as the student representative in the university's \href{https://nuva.uniud.it/funzioni/il-personale/001-digitale-nucleo-di-valutazione-modifica.pdf}{\textit{Nucleus of evaluation}}. My main role was to analyze data from all courses of study, identifying those with some type of problems.
\vspace{0.5em}
\resumeSubheading
{Scientific High School Diploma}{2012--09 \textrightarrow~2017--07}
{\href{http://www.malignani.ud.it/}{I.S.I.S.~Arturo Malignani}, Udine, ITA}{[77/100] EQF 4}\\
The \textit{Applied Science Course} I have attended provides advanced skills in studies relating to scientific-technological
culture, with particular reference to mathematical, physical, chemical, biological, information technology and their applications.
I have participated at \href{http://olimpiadi.dm.unibo.it/}{National Mathematical Olympiad} finals,%
competing both alone and with my School Math team. % (in 2014) and (in 2016)
\end{itemize}
%--------PROGRAMMING SKILLS------------
\section{Programming Skills}
%\textbf{Fields of Interest}{: Data Analysis, Machine Learning, \dots}
%\hfill
\textbf{Languages}{: Python, R, Java, SQL, \dots}
\hfill
\textbf{Technologies}{: Git, RaspberryPi, \dots}\vspace{0.5em}
\\
\textbf{Projects:}
\begin{itemize}\vspace{-0.5em}
\setlength\itemsep{0em}
\item{\textbf{FVG emergency rooms situation} (\href{https://enst.it/0Q3X}{\texttt{https://enst.it/0Q3X}}): data visualization project about the emergency room system in my home country during the 2021 Summer.}
\end{itemize}\vspace{-0.5em}
%--------OTHER COURSES------------
\section{Other Courses}
\begin{itemize}[leftmargin=*]
\item \textbf{Programming online courses}{: I have attended \href{https://www.kaggle.com/enstit/}{\textit{Kaggle.com}} Data Science free courses
including those on Data Visualization, Deep Learning, Computer Vision, Geospatial Analysis and Natural Language Processing,
and some of the \href{https://www.hackerrank.com/enstit/}{\textit{HackerRank.com}} Algorithms and Programming courses.}\vspace{-2pt}
\item \textbf{Fire-fighting course}{: I have attended a course about security on work and have obtained the
\textit{Certificate of technical competence for carrying out the task of fire-fighting officer}, including for high-risk situations.}\vspace{-2pt}
\item \textbf{\href{https://www.latex-project.org}{\LaTeX} course}{: I daily use \LaTeX{} for writing presentations, essays and letters. Even this document is written in \LaTeX{}.}
\end{itemize}
%--------LANGUAGES------------
\section{Languages}
\textbf{Italian}{: Mother tongue}
\hfill
\textbf{English}{: Upper-intermediate level (B2 self-rated)}
%--------HOBBIES------------
%\section{Hobbies and Lifestyle}
% \begin{itemize}[leftmargin=*]
% \item \textbf{Minimalist lifestyle}{: Since I was 18, \dots}\vspace{-2pt}
% \end{itemize}
%--------ATTACHMENTS------------
\section{Attachments}
\begin{enumerate}[label={[\arabic*]}]
\item{A photo of me}
%\item{A cover letter \dots}
\end{enumerate}
\afterpage{
\begin{figure}
\begin{center}
\includegraphics[height=0.4\textwidth]{enrico-bw.jpg}
\caption*{A photo of me\\\footnotesize Self-taken on 2021--10--10}
\end{center}
\end{figure}
\clearpage
}
%--------FOOTNOTES------------
\blfootnote{I hereby authorize the processing of my personal data according to the \href{http://data.europa.eu/eli/reg/2016/679/oj}{2016/679 European Union regulation (\textit{GDPR})}.}
\blfootnote{Dates are rapresended with \href{https://www.iso.org/iso-8601-date-and-time-format.html}{\texttt{ISO-8601}} notation (\textit{YYYY--MM--DD} format).}
\blfootnote{Last updated: \today}
\end{document}
|
Three diverse natural areas, not less than 1200km² is the green blessing of our surroundings. Mecsek hills, Zselic slopes and the riverbanks of the Danube and Dráva guarantee an abundant fauna and flora to discover. Sports and other outdoor activities are found according each ones age and interest. Upon your arrival Hunza will provide all detailed information to get to the hidden treasures of the region.
a rich historical heritage of arts, architecture and traditions in our region. Pécs is undoubtedly one of the most bursting cultural and artistic centres of Hungary, with on top a Mediterranean atmosphere.
let you travel back in time.
Diverse terroirs and a wine tradition which dates back to the Roman era make Hungarian wines well worth to discover. In the surroundings of Hunza dry white wines are made in the Pécs wine region, renowned reds are produced in and around Villány and Szekszárd. The gastronomical centre of Pécs brings all of this together in cozy bars, fine restaurants and many food and wine festivals.
Hungarians know what’s good in life: thermal baths, sauna, massage and a good chat. Since centuries this is part of the Hungarian culture. The many thermal springs have specific beneficial qualifications according to the depth from where the hot water comes and its mineral content. Nowadays most thermal baths have a full wellness complex with all modern facilities and playing possibilities for kids and youngsters.
|
function [ R ] = V2R( V )
%V2R converts a 1x3 angle-axis vector into a 3x3 rotation matrix
% Inputs -
% V - 1x3 vector of form [rx,ry,rz] where rx,ry,rz is an angle-axis
% representation of the angle where the unit vector representing the axis
% has been multipled by the angle of rotation about it
%
% Outputs -
% R - a standard 3x3 transformation matrix
validateattributes(V, {'numeric'},{'size',[1,3]});
V = double(V(:));
s = norm(V);
if(s == 0)
R = eye(3);
return;
end
V = [V/s; s];
R = vrrotvec2mat(V);
end
|
(* Title: HOL/Auth/n_mutualEx_lemma_on_inv__3.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
theory n_mutualEx_lemma_on_inv__3 imports n_mutualEx_base
begin
section{*All lemmas on causal relation between inv__3 and some rule r*}
lemma n_TryVsinv__3:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_Try i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__3 p__Inv3 p__Inv4)"
shows "invHoldForRule Interp s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_Try i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__3 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule Interp s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule Interp s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule Interp s f r (invariants N)" by auto
}
ultimately show "invHoldForRule Interp s f r (invariants N)" by satx
qed
lemma n_CritVsinv__3:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_Crit i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__3 p__Inv3 p__Inv4)"
shows "invHoldForRule Interp s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_Crit i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__3 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule Interp s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Para (Ident ''n'') p__Inv4)) (Const E)) (eqn (IVar (Ident ''x'')) (Const true))))" in exI, auto) done
then have "invHoldForRule Interp s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule Interp s f r (invariants N)" by auto
}
ultimately show "invHoldForRule Interp s f r (invariants N)" by satx
qed
lemma n_ExitVsinv__3:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_Exit i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__3 p__Inv3 p__Inv4)"
shows "invHoldForRule Interp s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_Exit i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__3 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Para (Ident ''n'') p__Inv4)) (Const C)) (eqn (IVar (Para (Ident ''n'') p__Inv3)) (Const C))))" in exI, auto) done
then have "invHoldForRule Interp s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule Interp s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule Interp s f r (invariants N)" by auto
}
ultimately show "invHoldForRule Interp s f r (invariants N)" by satx
qed
lemma n_IdleVsinv__3:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_Idle i)" and
a2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__3 p__Inv3 p__Inv4)"
shows "invHoldForRule Interp s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_Idle i" apply fastforce done
from a2 obtain p__Inv3 p__Inv4 where a2:"p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__3 p__Inv3 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i=p__Inv3)\<or>(i~=p__Inv3\<and>i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule Interp s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv3)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule Interp s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv3\<and>i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule Interp s f r (invariants N)" by auto
}
ultimately show "invHoldForRule Interp s f r (invariants N)" by satx
qed
end
|
% absolute difference in orientation between a fly and the closest fly
% according to type
function [data,units] = compute_absthetadiff(trx,n,type)
flies = trx.exp2flies{n};
nflies = numel(flies);
data = cell(1,nflies);
for i1 = 1:nflies,
fly1 = flies(i1);
% fly closest to fly1 according to type
closestfly = trx(fly1).(['closestfly_',type]);
% orientation of fly1
theta_mm1 = trx(fly1).theta_mm;
% loop over all flies
for i2 = 1:nflies,
fly2 = flies(i2);
if i1 == i2, continue; end
% frames where this fly is closest
idx = find(closestfly == fly2);
if isempty(idx), continue; end
% orientation of fly2
off = trx(fly1).firstframe - trx(fly2).firstframe;
theta_mm2 = trx(fly2).theta_mm(off+idx);
% absolute difference in orientation
data{i1}(idx) = abs(modrange(theta_mm2 - theta_mm1(idx),-pi,pi));
end
end
units = parseunits('rad');
|
Require Export XR_Rminus.
Require Export XR_Rplus_opp_r.
Local Open Scope R_scope.
Implicit Type r : R.
Lemma Rminus_diag_eq : forall r1 r2, r1 = r2 -> r1 - r2 = R0.
Proof.
intros x y heq.
subst y.
unfold Rminus.
rewrite Rplus_opp_r.
reflexivity.
Qed.
|
% !TeX root = ../thesis.tex
% !TeX spellcheck = en_GB
% !TeX encoding = UTF-8
A path-dependent option is a type of exotic option in which the payoff depends not only on the price of the underlying asset at maturity, but also on the history of the underlying's price till that point. Typical examples of popular exotic options are Asian options, lookback options, barrier options, and digital options.
In the Black-Scholes market model, it is not possible to find closed-form analytical formulae for the payoffs of most exotic options. This inspires the requirement of fast numerical algorithms to determine the fair value of such options. Such algorithms may be clustered into categories. Some algorithms utilise the convergence of prices calculated using a discrete model to those of a continuous model, as a limit when the time step is reduced. Other approaches include use of numerical methods to solve partial differential equation, or simulation using Monte Carlo methods.
Under the Cox-Ross-Rubinstein model, Gaudenzi\footnote{\url{http://people.uniud.it/page/marcellino.gaudenzi}}, Zanette\footnote{\url{http://people.uniud.it/page/antonino.zanette}} and Lepellere\footnote{\url{https://www.researchgate.net/profile/Maria_Lepellere}} introduced the singular points method. It is a numerical method to price Asian and lookback options. A modification enables pricing of cliquet options. In the method, in each node of the binomial tree of the underlying risky asset, the price is represented as a continuous function of the path-dependent parameter. An advantage of this method over pre-existing methods is its low order of computational and space complexity. It is convergent, and allows us to set \emph{a priori} upper and lower bounds on the error.
In the master's thesis, we present an exposition on the \emph{singular points} method and how it may be used to price exotic options. We also explore the extensibility of the method to similar types of options, like Asian options with geometric mean as opposed to arithmetic mean, and whether it may be used for variable local volatilities and interest rates. We also found the computational order of complexity of the method in the case of cliquet options.
\paragraph{Prerequisites}
The reader is expected to be familiar about basic Probability Theory and Stochastic Processes. In particular, (s)he should be comfortable with probability spaces and measures, filtrations, random variables, stochastic processes, martingales, Brownian motion, and elementary stochastic calculus. There is no strict requirement of knowledge of financial concepts, as we introduce the required terminologies in the introductory chapters.
\paragraph{Note to the reader}
The theory part of the thesis borrows heavily from the book titled Introduction to Stochastic Calculus Applied to Finance by Damien Lamberton and Bernard Lapeyre \cite[]{Lamberton1996}, as well as from the lecture notes of Mathematical Finance authored by Prof. Fabio Antonelli. This course was offered in the Fall semester of 2014-15 in Università degli Studi dell'Aquila, Italy.
The chapters on the singular point method has been motivated by the series of papers published by Gaudenzi, Zanette and Lepellere on the same topic.
\paragraph{Structure of the thesis}
In Chapter \ref{cha:prologue}, we briefly discuss financial assets and financial markets. In Chapter \ref{cha:models}, we introduce the Cox-Ross-Rubinstein model and the Black-Scholes model. In chapters \ref{cha:asian} and \ref{cha:cliquet}, we see how the singular points method may be used to price Asian and cliquet options, respectively.
%%% Local Variables:
%%% mode: latex
%%% TeX-master: t
%%% End:
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <gbpLib.h>
#include <gbpMath.h>
#include <gbpCosmo_linear_theory.h>
#include <gsl/gsl_multimin.h>
typedef struct init_gbpCosmo2gbpCosmo_integrand_params_struct init_gbpCosmo2gbpCosmo_integrand_params;
struct init_gbpCosmo2gbpCosmo_integrand_params_struct {
double inv_s;
double z_source;
double z_target;
double R_1;
double R_2;
cosmo_info ** cosmo_source;
cosmo_info ** cosmo_target;
int n_int;
gsl_integration_workspace *wspace;
};
double init_gbpCosmo2gbpCosmo_integrand(double R, void *params_in) {
init_gbpCosmo2gbpCosmo_integrand_params *params = (init_gbpCosmo2gbpCosmo_integrand_params *)params_in;
double inv_s = params->inv_s;
double z_source = params->z_source;
double z_target = params->z_target;
cosmo_info ** cosmo_source = params->cosmo_source;
cosmo_info ** cosmo_target = params->cosmo_target;
return (pow(1. - (sigma_R(cosmo_source, inv_s * R, z_source, PSPEC_LINEAR_TF, PSPEC_ALL_MATTER)) /
(sigma_R(cosmo_target, R, z_target, PSPEC_LINEAR_TF, PSPEC_ALL_MATTER)),
2.) /
R);
}
double init_gbpCosmo2gbpCosmo_minimize_function(const gsl_vector *v_i, void *params_in) {
// Set variable integrand parameters
init_gbpCosmo2gbpCosmo_integrand_params *params = (init_gbpCosmo2gbpCosmo_integrand_params *)params_in;
params->inv_s = gsl_vector_get(v_i, 0);
params->z_target = gsl_vector_get(v_i, 1);
// Perform integral to minimize
gsl_function integrand;
double delta_i;
double abs_error;
integrand.function = init_gbpCosmo2gbpCosmo_integrand;
integrand.params = params_in;
gsl_integration_qag(&integrand, params->R_1, params->R_2, 0, 1e-3, params->n_int, GSL_INTEG_GAUSS61, params->wspace, &delta_i, &abs_error);
return (delta_i / take_ln(params->R_2 / params->R_1));
}
void init_gbpCosmo2gbpCosmo(cosmo_info ** cosmo_source,
cosmo_info ** cosmo_target,
double z_min,
double M_min,
double M_max,
gbpCosmo2gbpCosmo_info *gbpCosmo2gbpCosmo) {
SID_log("Initializing cosmology scaling...", SID_LOG_OPEN | SID_LOG_TIMER);
SID_set_verbosity(SID_SET_VERBOSITY_RELATIVE, -1);
// Store some infor in the gbpCosmo2gbpCosmo_info structure
gbpCosmo2gbpCosmo->M_min = M_min;
gbpCosmo2gbpCosmo->M_max = M_max;
gbpCosmo2gbpCosmo->z_min = z_min;
gbpCosmo2gbpCosmo->cosmo_source = (*cosmo_source);
gbpCosmo2gbpCosmo->cosmo_target = (*cosmo_target);
// Perform minimization
// const gsl_multimin_fminimizer_type *T=gsl_multimin_fminimizer_nmsimplex2;
const gsl_multimin_fminimizer_type *T = gsl_multimin_fminimizer_nmsimplex;
gsl_multimin_fminimizer * s = NULL;
gsl_vector * ss, *x;
gsl_multimin_function minex_func;
// Starting point
x = gsl_vector_alloc(2);
gsl_vector_set(x, 0, 1.); // inv_s
gsl_vector_set(x, 1, z_min); // z_scaled
// Set initial step sizes to 1
ss = gsl_vector_alloc(2);
gsl_vector_set_all(ss, 1.0);
// Set parameters
init_gbpCosmo2gbpCosmo_integrand_params params;
params.cosmo_source = cosmo_source;
params.cosmo_target = cosmo_target;
params.z_source = z_min;
params.R_1 = R_of_M(M_min, *cosmo_source);
params.R_2 = R_of_M(M_max, *cosmo_source);
params.inv_s = gsl_vector_get(x, 0);
params.z_target = gsl_vector_get(x, 1);
params.n_int = 100;
params.wspace = gsl_integration_workspace_alloc(params.n_int);
// Initialize method
minex_func.n = 2;
minex_func.f = init_gbpCosmo2gbpCosmo_minimize_function;
minex_func.params = (void *)(¶ms);
s = gsl_multimin_fminimizer_alloc(T, 2);
gsl_multimin_fminimizer_set(s, &minex_func, x, ss);
// Perform minimization
double size;
int status;
size_t iter = 0;
size_t iter_max = 200;
do {
iter++;
status = gsl_multimin_fminimizer_iterate(s);
if(status)
SID_exit_error("Error encountered during minimisation in init_gbpCosmo2gbpCosmo() (status=%d).",
SID_ERROR_LOGIC, status);
size = gsl_multimin_fminimizer_size(s);
status = gsl_multimin_test_size(size, 1e-2);
} while(status == GSL_CONTINUE && iter <= iter_max);
if(status != GSL_SUCCESS)
SID_exit_error("Failed to converge during minimisation in init_gbpCosmo2gbpCosmo() (status=%d,iter=%d).",
SID_ERROR_LOGIC, status, iter);
// Finalize results
double Omega_M_source = ((double *)ADaPS_fetch(*cosmo_source, "Omega_M"))[0];
double H_Hubble_source = 1e2 * ((double *)ADaPS_fetch(*cosmo_source, "h_Hubble"))[0];
double Omega_M_target = ((double *)ADaPS_fetch(*cosmo_target, "Omega_M"))[0];
double H_Hubble_target = 1e2 * ((double *)ADaPS_fetch(*cosmo_target, "h_Hubble"))[0];
gbpCosmo2gbpCosmo->s_L = 1. / gsl_vector_get(s->x, 0);
gbpCosmo2gbpCosmo->s_M = (Omega_M_target * H_Hubble_target) / (Omega_M_source * H_Hubble_source) * pow((gbpCosmo2gbpCosmo->s_L), 3.);
gbpCosmo2gbpCosmo->z_min_scaled = gsl_vector_get(s->x, 1);
;
// Calculate growth factors needed for
// determining redshift mappings
gbpCosmo2gbpCosmo->D_prime_z_min = linear_growth_factor(z_min, cosmo_target);
gbpCosmo2gbpCosmo->D_z_scaled = linear_growth_factor(gbpCosmo2gbpCosmo->z_min_scaled, cosmo_source);
gbpCosmo2gbpCosmo->D_ratio = gbpCosmo2gbpCosmo->D_prime_z_min / gbpCosmo2gbpCosmo->D_z_scaled;
// Clean-up
gsl_vector_free(x);
gsl_vector_free(ss);
gsl_multimin_fminimizer_free(s);
gsl_integration_workspace_free(params.wspace);
SID_set_verbosity(SID_SET_VERBOSITY_DEFAULT);
SID_log("Done.", SID_LOG_CLOSE);
}
|
{-
Idris
-}
module Main
record Language where
constructor MkLanguage
name: String
-- intentionally convoluted to avoid inlining
Idris : IO Language
Idris = pure $ MkLanguage "Idris"
main : IO ()
main = putStrLn $ name !Idris
|
function cg_rc_test ( )
%*****************************************************************************80
%
%% CG_RC_TEST tests the CG_RC library.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 12 January 2013
%
% Author:
%
% John Burkardt
%
timestamp ( );
fprintf ( 1, '\n' );
fprintf ( 1, 'CG_RC_TEST:\n' );
fprintf ( 1, ' MATLAB version\n' );
fprintf ( 1, ' Test the CG_RC library.\n' );
cg_test01 ( );
cg_test02 ( );
%
% Terminate.
%
fprintf ( 1, '\n' );
fprintf ( 1, 'CG_RC_TEST:\n' );
fprintf ( 1, ' Normal end of execution.\n' );
fprintf ( 1, '\n' );
timestamp ( );
return
end
|
function [reg_c,rho_c,eta_c] = l_corner(rho,eta,reg_param,U,s,b,method,M)
%L_CORNER Locate the "corner" of the L-curve.
%
% [reg_c,rho_c,eta_c] =
% l_corner(rho,eta,reg_param)
% l_corner(rho,eta,reg_param,U,s,b,method,M)
% l_corner(rho,eta,reg_param,U,sm,b,method,M) , sm = [sigma,mu]
%
% Locates the "corner" of the L-curve in log-log scale.
%
% It is assumed that corresponding values of || A x - b ||, || L x ||,
% and the regularization parameter are stored in the arrays rho, eta,
% and reg_param, respectively (such as the output from routine l_curve).
%
% If nargin = 3, then no particular method is assumed, and if
% nargin = 2 then it is issumed that reg_param = 1:length(rho).
%
% If nargin >= 6, then the following methods are allowed:
% method = 'Tikh' : Tikhonov regularization
% method = 'tsvd' : truncated SVD or GSVD
% method = 'dsvd' : damped SVD or GSVD
% method = 'mtsvd' : modified TSVD,
% and if no method is specified, 'Tikh' is default. If the Spline Toolbox
% is not available, then only 'Tikh' and 'dsvd' can be used.
%
% An eighth argument M specifies an upper bound for eta, below which
% the corner should be found.
% Per Christian Hansen, IMM, July 26, 2007.
% Set default regularization method.
if (nargin <= 3)
method = 'none';
if (nargin==2), reg_param = (1:length(rho))'; end
else
if (nargin==6), method = 'Tikh'; end
end
% Set this logical variable to 1 (true) if the corner algorithm
% should always be used, even if the Spline Toolbox is available.
alwayscorner = 0;
% Set threshold for skipping very small singular values in the
% analysis of a discrete L-curve.
s_thr = eps; % Neglect singular values less than s_thr.
% Set default parameters for treatment of discrete L-curve.
deg = 2; % Degree of local smooting polynomial.
q = 2; % Half-width of local smoothing interval.
order = 4; % Order of fitting 2-D spline curve.
% Initialization.
if (length(rho) < order)
error('Too few data points for L-curve analysis')
end
if (nargin > 3)
[p,ps] = size(s); [m,n] = size(U);
beta = U'*b;
if (m>n), b0 = b - U*beta; end
if (ps==2)
s = s(p:-1:1,1)./s(p:-1:1,2);
beta = beta(p:-1:1);
end
xi = beta./s;
end
% Restrict the analysis of the L-curve according to M (if specified).
if (nargin==8)
index = find(eta < M);
rho = rho(index); eta = eta(index); reg_param = reg_param(index);
end
if (strncmp(method,'Tikh',4) | strncmp(method,'tikh',4))
% The L-curve is differentiable; computation of curvature in
% log-log scale is easy.
% Compute g = - curvature of L-curve.
g = lcfun(reg_param,s,beta,xi);
% Locate the corner. If the curvature is negative everywhere,
% then define the leftmost point of the L-curve as the corner.
[gmin,gi] = min(g);
reg_c = fminbnd('lcfun',...
reg_param(min(gi+1,length(g))),reg_param(max(gi-1,1)),...
optimset('Display','off'),s,beta,xi); % Minimizer.
kappa_max = - lcfun(reg_c,s,beta,xi); % Maximum curvature.
if (kappa_max < 0)
lr = length(rho);
reg_c = reg_param(lr); rho_c = rho(lr); eta_c = eta(lr);
else
f = (s.^2)./(s.^2 + reg_c^2);
eta_c = norm(f.*xi);
rho_c = norm((1-f).*beta);
if (m>n), rho_c = sqrt(rho_c^2 + norm(b0)^2); end
end
elseif (strncmp(method,'tsvd',4) | strncmp(method,'tgsv',4) | ...
strncmp(method,'mtsv',4) | strncmp(method,'none',4))
% Use the adaptive pruning algorithm to find the corner, if the
% Spline Toolbox is not available.
if ~exist('splines','dir') | alwayscorner
%error('The Spline Toolbox in not available so l_corner cannot be used')
reg_c = corner(rho,eta);
rho_c = rho(reg_c);
eta_c = eta(reg_c);
return
end
% Othersise use local smoothing followed by fitting a 2-D spline curve
% to the smoothed discrete L-curve. Restrict the analysis of the L-curve
% according to s_thr.
if (nargin > 3)
if (nargin==8) % In case the bound M is in action.
s = s(index,:);
end
index = find(s > s_thr);
rho = rho(index); eta = eta(index); reg_param = reg_param(index);
end
% Convert to logarithms.
lr = length(rho);
lrho = log(rho); leta = log(eta); slrho = lrho; sleta = leta;
% For all interior points k = q+1:length(rho)-q-1 on the discrete
% L-curve, perform local smoothing with a polynomial of degree deg
% to the points k-q:k+q.
v = (-q:q)'; A = zeros(2*q+1,deg+1); A(:,1) = ones(length(v),1);
for j = 2:deg+1, A(:,j) = A(:,j-1).*v; end
for k = q+1:lr-q-1
cr = A\lrho(k+v); slrho(k) = cr(1);
ce = A\leta(k+v); sleta(k) = ce(1);
end
% Fit a 2-D spline curve to the smoothed discrete L-curve.
sp = spmak((1:lr+order),[slrho';sleta']);
pp = ppbrk(sp2pp(sp),[4,lr+1]);
% Extract abscissa and ordinate splines and differentiate them.
% Compute as many function values as default in spleval.
P = spleval(pp); dpp = fnder(pp);
D = spleval(dpp); ddpp = fnder(pp,2);
DD = spleval(ddpp);
ppx = P(1,:); ppy = P(2,:);
dppx = D(1,:); dppy = D(2,:);
ddppx = DD(1,:); ddppy = DD(2,:);
% Compute the corner of the discretized .spline curve via max. curvature.
% No need to refine this corner, since the final regularization
% parameter is discrete anyway.
% Define curvature = 0 where both dppx and dppy are zero.
k1 = dppx.*ddppy - ddppx.*dppy;
k2 = (dppx.^2 + dppy.^2).^(1.5);
I_nz = find(k2 ~= 0);
kappa = zeros(1,length(dppx));
kappa(I_nz) = -k1(I_nz)./k2(I_nz);
[kmax,ikmax] = max(kappa);
x_corner = ppx(ikmax); y_corner = ppy(ikmax);
% Locate the point on the discrete L-curve which is closest to the
% corner of the spline curve. Prefer a point below and to the
% left of the corner. If the curvature is negative everywhere,
% then define the leftmost point of the L-curve as the corner.
if (kmax < 0)
reg_c = reg_param(lr); rho_c = rho(lr); eta_c = eta(lr);
else
index = find(lrho < x_corner & leta < y_corner);
if ~isempty(index)
[dummy,rpi] = min((lrho(index)-x_corner).^2 + (leta(index)-y_corner).^2);
rpi = index(rpi);
else
[dummy,rpi] = min((lrho-x_corner).^2 + (leta-y_corner).^2);
end
reg_c = reg_param(rpi); rho_c = rho(rpi); eta_c = eta(rpi);
end
elseif (strncmp(method,'dsvd',4) | strncmp(method,'dgsv',4))
% The L-curve is differentiable; computation of curvature in
% log-log scale is easy.
% Compute g = - curvature of L-curve.
g = lcfun(reg_param,s,beta,xi,1);
% Locate the corner. If the curvature is negative everywhere,
% then define the leftmost point of the L-curve as the corner.
[gmin,gi] = min(g);
reg_c = fminbnd('lcfun',...
reg_param(min(gi+1,length(g))),reg_param(max(gi-1,1)),...
optimset('Display','off'),s,beta,xi,1); % Minimizer.
kappa_max = - lcfun(reg_c,s,beta,xi,1); % Maximum curvature.
if (kappa_max < 0)
lr = length(rho);
reg_c = reg_param(lr); rho_c = rho(lr); eta_c = eta(lr);
else
f = s./(s + reg_c);
eta_c = norm(f.*xi);
rho_c = norm((1-f).*beta);
if (m>n), rho_c = sqrt(rho_c^2 + norm(b0)^2); end
end
else
error('Illegal method')
end
|
\section{Introduction}
%In connectomics, neuroscientists annotate neurons and their connectivity within 3D volumes to gain insight into the functional structure of the brain. Rapid progress in automatic sample preparation and electron microscopy (EM) acquisition techniques has made it possible to image large volumes of brain tissue at $\approx4\, nm$ per pixel to identify cells, synapses, and vesicles. For $40\, nm$ thick sections, a $1\, mm^3$ volume of brain contains $10^{15}$ voxels, or 1 petabyte of data. With so much data, manual annotation is infeasible, and automatic annotation methods are needed~\cite{jain2010,Liu2014,GALA2014,kaynig2015large}.
%Automatic annotation by segmentation and classification of brain tissue is challenging~\cite{isbi_challenge}. The state of the art uses supervised learning with convolutional neural networks~\cite{Ciresan:2012f}, or potentially even unsupervised learning~\cite{BogovicHJ13}. Typically, cell membranes are detected in 2D images, and the resulting region segmentation is grouped into geometrically-consistent cells across registered sections. Cells may also be segmented across registered sections in 3D directly. Using dynamic programming techniques~\cite{Masci:2013a} and a GPU cluster, these classifiers can segment $\approx1$ terabyte of data per hour ~\cite{kasthuri2015saturated}. This is sufficient to keep up with the 2D data capture process on state-of-the-art electron microscopes (though 3D registration is still an expensive offline operation).
In connectomics, neuroscientists annotate neurons and their connectivity within
3D volumes to gain insight into the functional structure of the brain. Rapid
progress in automatic sample preparation and electron microscopy (EM)
acquisition techniques has made it possible to image large volumes of brain
tissue at nanometer resolution. With a voxel size of
$4\times4\times40~\text{nm}^3$, a cubic millimeter volume is one petabyte of
data. With so much data, manual annotation is not feasible, and automatic
annotation methods are needed~\cite{jain2010,Liu2014,GALA2014,kaynig2015large}.
Automatic annotation by segmentation and classification of brain tissue is
challenging~\cite{isbi_challenge} and all available methods make errors, so
the results must be \emph{proofread} by humans. This crucial
task serves two purposes: 1) to correct errors in the segmentation, and 2) to
increase the body of labeled data from which to train better automatic
segmentation methods. Recent proofreading tools provide intuitive user
interfaces to browse segmentation data in 2D and 3D and to identify and manually
correct errors~\cite{markus_proofreading,raveler,mojo2,haehn_dojo_2014}. Many
kinds of errors exist, such as inaccurate boundaries, but the most common are
\emph{split errors}, where a single segment is labeled as two, and \emph{merge
errors}, where two segments are labeled as one
(Fig.~\ref{fig:merge_and_slit_errors}). With user interaction, split errors can
be joined, and the missing boundary in a merge error can be defined with
manually-seeded watersheds~\cite{haehn_dojo_2014}. However, the visual
inspection to find errors takes the majority of the time, even with
semi-automatic correction tools~\cite{proofreading_bottleneck}.
\begin{figure}[t]
\begin{center}
\includegraphics[width=\linewidth]{gfx/merge_and_split_errors.pdf}
\end{center}
\vspace{-4mm}
\caption{The most common proofreading corrections are fixing split errors (red arrows) and merge errors (yellow arrow). A fixed segmentation matches the cell borders.}
\vspace{-4mm}
\label{fig:merge_and_slit_errors}
\end{figure}
Our goal is to automatically detect potential split and merge errors to reduce visual
inspection time. Further, to reduce correction time, we propose
corrections to the user to accept or reject. We call this process \textit{guided
proofreading}.
We train a classifier for split error detection with a convolutional neural network
(CNN). This takes as input patches of membrane segmentation probabilities, cell
segmentation masks, and boundary masks, and outputs a split-probability score. As we
must process large data, this classifier only operates on cell boundaries, which
reduces computation over methods that analyze every pixel. For merge errors, we
invert and reuse the split classification network, and ask it to rate a
set of generated boundaries that hypothesize a split. %We compute
%corrections for both types of errors.
Possible erroneous regions are sorted by their score, and a candidate correction is generated for each
region. Then, a user works through this list of regions and corrections. In a
forced choice setting, the user either selects a correction or skips it to
advance to the next region. In an automatic setting, errors with a high probability are automatically corrected first, given an appropriate
probability threshold, after which the user would take over. Finally, to test
the limits of performance, we create an oracle which only accepts corrections
that improve the segmentation, based on knowledge of the ground truth. This is
guided proofreading with a perfect user.
We evaluate these methods on multiple connectomics datasets. For the forced
choice setting, we perform a quantitative user study with 20 novice users who
have no previous experience of proofreading EM data. We ask participants to
proofread a small segmentation volume in a fixed time frame. In a
between-subjects design, we compare guided proofreading to the semi-automatic
\textit{focused proofreading} approach by Plaza~\cite{focused_proofreading}. In
addition, we compare against the manual interactive proofreading tool
\textit{Dojo} by Haehn~\etal~\cite{haehn_dojo_2014}. We also asked four domain
experts to use guided proofreading and focused proofreading for comparison.
This paper makes the following contributions.
%
First, we present a CNN-based boundary classifier for split errors, plus a merge
error classifier that inverts the split error classifier. This proposes merge error corrections,
which removes the need to manually draw the missing
edge. These classifiers automatically reduce error with little training data, which is expensive to collect for connectomics data.
%
Second, we develop a human-guided proofreading approach to correcting segmentation
volumes, and compare forced-choice interaction with
automatic and oracle proofreading.
%
Third, we present results of a quantitative user study assessing
guided proofreading. Our method is able to reduce segmentation
error faster than state-of-the-art semi-automatic tools for both novice and
expert users.
%
Fourth, we present the first connectomics proofreading benchmark, with image, label, and human interaction data, and evaluation code.
Guided proofreading is applicable to all existing automatic segmentation methods that produce a label map.
While we train models based on traditional CNN architectures, we propose an error correction approach with human interaction that works with many classifiers.
As such, we believe that our approach is a promising direction to proofread segmentations more efficiently and better tackle large volumes of connectomics imagery.
%Our goal is to add automatic detection of split and merge errors to proofreading tools. We design automatic classifiers that detect split and merge errors in segmentations so the user does not need to visually inspect the whole data volume to spot errors. A proofreading tool then recommends regions with a high probability of an error to the user, and suggest corrections to accept or reject. We call this process \textit{guided proofreading}.
%
%In this paper, we introduce classifiers to detect merge- and split errors based on a convolutional neural network (CNN). We believe that this is the first time that deep learning is applied to the task of proofreading. Our classifiers work on top of any existing automatic segmentation method to find potential errors and suggest corrections. Given a membrane segmentation from a fast automatic method, our classifiers operate on the boundaries of whole cell regions. Compared to techniques that must analyze every input pixel, we reduce the data analysis to the boundaries. First, we train a CNN to detect only split errors. The output of this network is a probability whether a boundary between two segments is valid or not. We then reuse the same network to also detect merge errors by generating possible boundaries within a cell and inverting the split error score. We create corrections for both types of errors which can be accepted or rejected. This reduces the proofreading operation to simple yes/no decisions.
%
%We further propose a greedy algorithm to perform proofreading. Possible erroneous regions are sorted by their score and the algorithm iteratively suggests a correction for each region. A user then works through this stream of regions and corrections. In a forced choice setting, the user either selects a correction or skips it to advance to the next region. This choice can be also performed automatically by running the algorithm until a configurable threshold is reached. In addition, if ground truth data is available, we can use a selection oracle to drive the forced choice selection. The oracle only accepts corrections which improve the automatic segmentation. This equals perfect proofreading.
%
%We evaluate our method automatically by threshold and oracle on multiple real-world connectomics datasets. To evaluate the forced choice setting, we perform a quantitative user study. The study targets non-experts with no previous experience of proofreading electron microscopy data. We ask the participants to proofread a small segmentation volume in a fixed time frame by performing yes/no decisions. The user study is designed as a between-subjects experiment and compares guided proofreading against two other methods: a recently published fully interactive proofreading tool named \textit{Dojo} by Haehn~\etal~\cite{haehn_dojo_2014} and the semi-automatic \textit{focused proofreading} approach by Plaza~\cite{focused_proofreading}. We also asked four domain experts to use guided proofreading and focused proofreading for additional comparison.
%
%Our first contribution is a classifier for split error detection based on a convolutional neural network. The classifier performs well even when trained with little amounts of training data. This is important since generating ground truth labels in connectomics requires manually labeling pixels and is very time-consuming. Our second contribution is a mechanism to identify merge-errors by re-using the split error classifier. Merge errors are usually less common than split errors in the oversegmented automatic labelings. However, they require more interaction during correction since split lines need to be manually drawn. Our method reduces this to a single click by providing the potential correction. The split and merge error identification is executed as a greedy algorithm to correct segmentation volumes, the third contribution of this paper. The algorithm can be driven automatically with a threshold, by an oracle based on ground truth and interactively in a forced choice setting. Our final contribution is our quantitative user study. We present statistically significant results showing that novice and expert users of guided proofreading are able to proofread a given dataset better and faster than with existing interactive and semi-automatic proofreading tools. As a consequence, we are able to provide tools to proofread segmentations more efficiently, and so better tackle large volumes of connectomics imagery.
|
State Before: R : Type u_1
inst✝ : CommSemiring R
f : PowerSeries R
a b : R
⊢ ↑(rescale b) (↑(rescale a) f) = ↑(rescale (a * b)) f State After: case h
R : Type u_1
inst✝ : CommSemiring R
f : PowerSeries R
a b : R
n : ℕ
⊢ ↑(coeff R n) (↑(rescale b) (↑(rescale a) f)) = ↑(coeff R n) (↑(rescale (a * b)) f) Tactic: ext n State Before: case h
R : Type u_1
inst✝ : CommSemiring R
f : PowerSeries R
a b : R
n : ℕ
⊢ ↑(coeff R n) (↑(rescale b) (↑(rescale a) f)) = ↑(coeff R n) (↑(rescale (a * b)) f) State After: case h
R : Type u_1
inst✝ : CommSemiring R
f : PowerSeries R
a b : R
n : ℕ
⊢ b ^ n * (a ^ n * ↑(coeff R n) f) = (a * b) ^ n * ↑(coeff R n) f Tactic: simp_rw [coeff_rescale] State Before: case h
R : Type u_1
inst✝ : CommSemiring R
f : PowerSeries R
a b : R
n : ℕ
⊢ b ^ n * (a ^ n * ↑(coeff R n) f) = (a * b) ^ n * ↑(coeff R n) f State After: no goals Tactic: rw [mul_pow, mul_comm _ (b ^ n), mul_assoc]
|
function RevertDirEdit(topH)
% dirS = RevertDirEdit(topH);
%
% Returns all the editable fields in the dialog to their original values,
% and reverts the directory structure.
%
% DBR 6/99
% Revert the session and directory structures:
uiData = get(topH, 'UserData');
dirS = uiData.origDirS;
uiData.dirS = dirS;
set(topH, 'UserData', uiData)
% Revert the directory dialog fields
nDir = length(uiData.dirData);
for iField=1:nDir
editFlag = uiData.dirData(iField).edit;
if editFlag
evalStr = ['dirS.', uiData.dirData(iField).field];
set(uiData.dirData(iField).handle, 'string', mat2str(eval(evalStr)));
end
end
|
module kd_tree
!! Provides routines for a k-dimensional tree, a space-partitioning data structure used for organizing points in k-dimensional space. Note that nearest neightbor searches within k-d trees are not suitable for high dimensional space and will likely perform no better than an exhaustive search. As a general rule of thumb, \(N \gg 2^k\). For an introduction to k-d trees, see "An introductory tutorial on kd-trees" (Andrew Moore, 1991).
type kdbox
!! Represents a hyperrectangle / box / n-orthotope)
real :: min, max
end type kdbox
type kdnode
!! Represents a node in the kdtree
private
integer :: split
!! The split dimension of this node
type(kdnode), pointer :: left, right
!! The left and right child nodes
type(kdbox), pointer :: orthotope => null()
!! The bounding hyperrectangle of this node
end type kdnode
type kdtree
!! Represents the kdtree itself
real, pointer :: set(:,:) => null()
!! The input (sorted, row-major) data set associated with this kd tree
type(kdnode), pointer :: root => null()
!! The root node of the kd tree
integer :: dim = 0
!! The rank of the data set's domain
integer :: num = 0
!! The rank of the data set's range
end type kdtree
contains
function build_Kd_Tree(S) result(kdt)
!! UX to build a balanced k-d tree from a set of sorted data points, /(S/).
!! @warning Note that /(S/) must be input as a *row-major* array, /(\texttt{S(1:dim, 1:num)}/),
!! NOT in Fortran's usual column-major notation.
use statistics, only: median
use assert, only: assert_x_is_ge_y
real, target :: S(:,:)
type(kdtree), pointer :: kdt
! Store data set and properties in kdtree structure
allocate(kdt)
kdt%set => S
kdt%dim = size(S,1)
kdt%num = size(S,2)
! The number of data points must be greater than the number of its dimensions.
call assert_x_is_ge_y(kdt%num, kdt%dim)
! The data set is bounded by a _dim_-orthotope. Find these bounds.
! Recursively build the kd tree
kdt%root => build(kdt)
! The root of the kd tree is determined by the initial split dimension, which we set
! as the median of the (sorted) data set.
! Recursively build the tree
end function build_Kd_Tree
recursive function build(tree) result (node)
type(kdtree), pointer :: tree
type(kdnode), pointer :: node
end function build
end module kd_tree
|
State Before: R : Type u
S : Type v
a✝ b c d : R
n m : ℕ
inst✝¹ : Ring R
inst✝ : Nontrivial R
a : R
⊢ degree (X - ↑C a) = 1 State After: no goals Tactic: rw [sub_eq_add_neg, ← map_neg C a, degree_X_add_C]
|
Or emptied some dull opiate to the drains
|
[STATEMENT]
lemma cri_asym2: "x <\<^sub>p y \<longrightarrow> x \<noteq> y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x<\<^sub>py \<longrightarrow> x \<noteq> y
[PROOF STEP]
by (simp add: cri_less_def)
|
module Language.Elab.Deriving.Selectors
import public Language.Reflection
import public Language.Elab.Syntax
import public Language.Elab.Types
import public Util
%language ElabReflection
{-
This pattern comes up a lot
isNoSugar : UnelabMode -> Bool
isNoSugar (NoSugar _) = True
isNoSugar _ = False
isDefImp : PiInfo t -> Bool
isDefImp (DefImplicit _) = True
isDefImp _ = False
We should have a deriving or converstion for this sort of thing, where you
select the 'true' case and all others are false. e.g.
data Foo : Type where
Bif : Foo
Waf : Int -> Foo
deriveSelectors Foo
results in
isBif : Foo -> Bool
isBif Bif = True
isBif _ = False
isWaf : Foo -> Bool
isWaf (Waf _) = True
isWaf _ = False
-}
-- Is this name pollution? Should we prefer `isCon Waf`
-- e.g. isCon Waf isCon Bif
data Foo : Type where
Bif : Foo
Waf : Int -> Foo
(:::) : Bool -> Bool -> Foo -- not really doable with singular functions, is::: ?
select : TTImp -> Visibility -> Constructor -> Elab ()
select dty vis con = do
let n = mapName ("is" ++) con.name -- isWaf
c = iClaim MW vis [] $ mkTy n `( ~dty -> Bool)
expargs = filter (isExplicitPi . piInfo) con.args
c1 = patClause (iVar n `iApp` foldl (\xs,_ => `(~xs _) ) (iVar con.name) expargs)
`(True)
catchall = patClause `(~(iVar n) _) `(False)
d = iDef n [c1,catchall]
declare [c,d]
pure ()
||| Derives a selector for any non-operator constructor
||| Simply because I'm not sure how to let you type the letters: e.g. is:::
export
deriveSelectors : Visibility -> Name -> Elab ()
deriveSelectors vis tn = do
ti <- makeTypeInfo tn
let cons' = filter (not . isOpName . name) ti.cons
traverse_ (select ti.type vis) cons'
fetchRoot : TTImp -> TTImp
fetchRoot (IApp _ y _) = fetchRoot y
fetchRoot ty = ty
isCon : Name -> Constructor -> Elab Clause
isCon cn con = ?isCon_rhs
-- This is kind of tough, we don't have a good way to say
-- isFooCon (:::) isFooCon Waf at the same type.
-- If we take a Name, e.g.:
-- isFooCon `{{(:::)}} isFooCon `{{Waf}}
-- We don't have a good way to enforce it's valid to use
-- adding Maybe is a bit too much work for the user to deal with
deriveIsCon : Visibility -> Name -> Elab ()
deriveIsCon vis cn = do
ti <- makeTypeInfo cn
let n = mapName (\n => "is" ++ n ++ "Con") ti.name -- isFooCon, need to get Foo from cn
let c = iClaim MW vis [] $ mkTy `{{Foo}} `( Name -> Bool)
let catchall = patClause `(~(iVar n) _) `(False)
defs <- traverse (isCon ti.name) ti.cons
pure ()
%runElab deriveSelectors Private `{{Foo}}
-- %runElab deriveIsCon Private `{{Foo}}
foo1 : Foo -> Bool
foo1 x = isWaf x
foo2 : Foo -> Bool
foo2 x = isBif x
-- foo3 : Foo -> Bool
-- foo3 x = is::: x -- isn't generated
|
theory M
imports TermsAndClauses Sig
begin
subsection{* Well-typed (well-formed) terms, clauses, literals and problems *}
context Signature begin
text{* The type of a term *}
fun tpOf where
"tpOf (Var x) = tpOfV x"
|
"tpOf (Fn f Tl) = resOf f"
(* Well-typed terms *)
fun wt where
"wt (Var x) \<longleftrightarrow> True"
|
"wt (Fn f Tl) \<longleftrightarrow>
wtFsym f \<and> list_all wt Tl \<and> arOf f = map tpOf Tl"
(* Well-typed atoms (atomic formulas) *)
fun wtA where
"wtA (Eq T1 T2) \<longleftrightarrow> wt T1 \<and> wt T2 \<and> tpOf T1 = tpOf T2"
|
"wtA (Pr p Tl) \<longleftrightarrow>
wtPsym p \<and> list_all wt Tl \<and> parOf p = map tpOf Tl"
(* Well-typed literals *)
fun wtL where
"wtL (Pos a) \<longleftrightarrow> wtA a"
|
"wtL (Neg a) \<longleftrightarrow> wtA a"
(* Well-typed clauses *)
definition "wtC \<equiv> list_all wtL"
lemma wtC_append[simp]: "wtC (c1 @ c2) \<longleftrightarrow> wtC c1 \<and> wtC c2"
unfolding wtC_def by simp
(* Well-typed problems *)
definition "wtPB \<Phi> \<equiv> \<forall> c \<in> \<Phi>. wtC c"
lemma wtPB_Un[simp]: "wtPB (\<Phi>1 \<union> \<Phi>2) \<longleftrightarrow> wtPB \<Phi>1 \<and> wtPB \<Phi>2"
unfolding wtPB_def by auto
lemma wtPB_UN[simp]: "wtPB (\<Union> i \<in> I. \<Phi> i) \<longleftrightarrow> (\<forall> i \<in> I. wtPB (\<Phi> i))"
unfolding wtPB_def by auto
lemma wtPB_sappend[simp]:
assumes "wtPB \<Phi>1" and "wtPB \<Phi>2" shows "wtPB (\<Phi>1 @@ \<Phi>2)"
using assms unfolding wtPB_def sappend_def by auto
(* Well-typed substitutions *)
definition "wtSB \<pi> \<equiv> \<forall> x. wt (\<pi> x) \<and> tpOf (\<pi> x) = tpOfV x"
lemma wtSB_wt[simp]: "wtSB \<pi> \<Longrightarrow> wt (\<pi> x)"
unfolding wtSB_def by auto
lemma wtSB_tpOf[simp]: "wtSB \<pi> \<Longrightarrow> tpOf (\<pi> x) = tpOfV x"
unfolding wtSB_def by auto
lemma wt_tpOf_subst:
assumes "wtSB \<pi>" and "wt T"
shows "wt (subst \<pi> T) \<and> tpOf (subst \<pi> T) = tpOf T"
using assms apply(induct T) by (auto simp add: list_all_iff)
lemmas wt_subst[simp] = wt_tpOf_subst[THEN conjunct1]
lemmas tpOf_subst[simp] = wt_tpOf_subst[THEN conjunct2]
lemma wtSB_o:
assumes 1: "wtSB \<pi>1" and 2: "wtSB \<pi>2"
shows "wtSB (subst \<pi>1 o \<pi>2)"
using 2 unfolding wtSB_def using 1 by auto
(* Getting variable terms for given types: *)
definition "getTvars \<sigma>l \<equiv> map Var (getVars \<sigma>l)"
lemma length_getTvars[simp]: "length (getTvars \<sigma>l) = length \<sigma>l"
unfolding getTvars_def by auto
lemma wt_getTvars[simp]: "list_all wt (getTvars \<sigma>l)"
unfolding list_all_length getTvars_def by simp
lemma wt_nth_getTvars[simp]:
"i < length \<sigma>l \<Longrightarrow> wt (getTvars \<sigma>l ! i)"
unfolding getTvars_def by auto
lemma map_tpOf_getTvars[simp]: "map tpOf (getTvars \<sigma>l) = \<sigma>l"
unfolding getTvars_def unfolding list_eq_iff by auto
lemma tpOf_nth_getTvars[simp]:
"i < length \<sigma>l \<Longrightarrow> tpOf (getTvars \<sigma>l ! i) = \<sigma>l ! i"
unfolding getTvars_def by auto
end (* context Signature *)
subsection {* Structures *}
text{* We split a structre into a ``type structure'' that interprets the types
and the rest of the structure that interprets the function and relation symbols. *}
text{* Type structures: *}
locale Tstruct =
fixes intT :: "'tp \<Rightarrow> 'univ \<Rightarrow> bool"
assumes NE_intT: "NE (intT \<sigma>)"
text{* Environment: *}
type_synonym ('tp,'univ) env = "'tp \<Rightarrow> var \<Rightarrow> 'univ"
text{* Structures: *}
locale Struct = Signature wtFsym wtPsym arOf resOf parOf +
Tstruct intT
for wtFsym and wtPsym
and arOf :: "'fsym \<Rightarrow> 'tp list"
and resOf :: "'fsym \<Rightarrow> 'tp"
and parOf :: "'psym \<Rightarrow> 'tp list"
and intT :: "'tp \<Rightarrow> 'univ \<Rightarrow> bool"
+
fixes
intF :: "'fsym \<Rightarrow> 'univ list \<Rightarrow> 'univ"
and intP :: "'psym \<Rightarrow> 'univ list \<Rightarrow> bool"
assumes
intF: "\<lbrakk>wtFsym f; list_all2 intT (arOf f) al\<rbrakk> \<Longrightarrow> intT (resOf f) (intF f al)"
and
dummy: "intP = intP"
begin
text{* Well-typed environment: *}
definition "wtE \<xi> \<equiv> \<forall> x. intT (tpOfV x) (\<xi> x)"
lemma wtTE_intT[simp]: "wtE \<xi> \<Longrightarrow> intT (tpOfV x) (\<xi> x)"
unfolding wtE_def dom_def by auto
(* Picking an element from the domain of a given type: *)
definition "pickT \<sigma> \<equiv> SOME a. intT \<sigma> a"
lemma pickT[simp]: "intT \<sigma> (pickT \<sigma>)"
unfolding pickT_def apply(rule someI_ex) using NE_intT by auto
text{* Picking a well-typed environment: *}
definition
"pickE (xl::var list) al \<equiv>
SOME \<xi>. wtE \<xi> \<and> (\<forall> i < length xl. \<xi> (xl!i) = al!i)"
lemma ex_pickE:
assumes "length xl = length al"
and "distinct xl" and "\<And> i. i < length xl \<Longrightarrow> intT (tpOfV (xl!i)) (al!i)"
shows "\<exists> \<xi>. wtE \<xi> \<and> (\<forall> i < length xl. \<xi> (xl!i) = al!i)"
using assms proof(induct rule: list_induct2)
case Nil show ?case apply(rule exI[of _ "\<lambda> x. pickT (tpOfV x)"])
unfolding wtE_def by auto
next
case (Cons x xl a al)
then obtain \<xi> where 1: "wtE \<xi>" and 2: "\<forall> i < length xl. \<xi> (xl!i) = al!i" by force
def \<xi>' \<equiv> "\<lambda> x'. if x = x' then a else \<xi> x'"
show ?case proof(rule exI[of _ \<xi>'], unfold wtE_def, safe)
fix x' show "intT (tpOfV x') (\<xi>' x')"
using 1 Cons.prems(2)[of 0] unfolding \<xi>'_def by auto
next
fix i assume i: "i < length (x # xl)"
thus "\<xi>' ((x # xl) ! i) = (a # al) ! i"
proof(cases i)
case (Suc j) hence j: "j < length xl" using i by auto
have "\<not> x = (x # xl) ! i" using Suc i Cons.prems(1) by auto
thus ?thesis using Suc using j Cons.prems(1) Cons.hyps 2 unfolding \<xi>'_def by auto
qed(insert Cons.prems(1) Cons.hyps 2, unfold \<xi>'_def, simp)
qed
qed
lemma wtE_pickE_pickE:
assumes "length xl = length al"
and "distinct xl" and "\<And> i. i < length xl \<Longrightarrow> intT (tpOfV (xl!i)) (al!i)"
shows "wtE (pickE xl al) \<and> (\<forall> i. i < length xl \<longrightarrow> pickE xl al (xl!i) = al!i)"
proof-
let ?phi = "\<lambda> \<xi>. wtE \<xi> \<and> (\<forall> i < length xl. \<xi> (xl!i) = al!i)"
show ?thesis unfolding pickE_def apply(rule someI_ex[of ?phi])
using ex_pickE[OF assms] by simp
qed
lemmas wtE_pickE[simp] = wtE_pickE_pickE[THEN conjunct1]
lemma pickE[simp]:
assumes "length xl = length al"
and "distinct xl" and "\<And> i. i < length xl \<Longrightarrow> intT (tpOfV (xl!i)) (al!i)"
and "i < length xl"
shows "pickE xl al (xl!i) = al!i"
using assms wtE_pickE_pickE by auto
definition "pickAnyE \<equiv> pickE [] []"
lemma wtE_pickAnyE[simp]: "wtE pickAnyE"
unfolding pickAnyE_def by (rule wtE_pickE) auto
(* Interpretation of terms: *)
fun int where
"int \<xi> (Var x) = \<xi> x"
|
"int \<xi> (Fn f Tl) = intF f (map (int \<xi>) Tl)"
(* Satisfaction of atoms: *)
fun satA where
"satA \<xi> (Eq T1 T2) \<longleftrightarrow> int \<xi> T1 = int \<xi> T2"
|
"satA \<xi> (Pr p Tl) \<longleftrightarrow> intP p (map (int \<xi>) Tl)"
(* Satisfaction literals: *)
fun satL where
"satL \<xi> (Pos a) \<longleftrightarrow> satA \<xi> a"
|
"satL \<xi> (Neg a) \<longleftrightarrow> \<not> satA \<xi> a"
(* Satisfaction of clauses: *)
definition "satC \<xi> \<equiv> list_ex (satL \<xi>)"
lemma satC_append[simp]: "satC \<xi> (c1 @ c2) \<longleftrightarrow> satC \<xi> c1 \<or> satC \<xi> c2"
unfolding satC_def by auto
lemma satC_iff_set: "satC \<xi> c \<longleftrightarrow> (\<exists> l \<in> set c. satL \<xi> l)"
unfolding satC_def Bex_set[symmetric] ..
(* satisfaction of problems *)
definition "satPB \<xi> \<Phi> \<equiv> \<forall> c \<in> \<Phi>. satC \<xi> c"
lemma satPB_UN[simp]: "satPB \<xi> (\<Union> i \<in> I. \<Phi> i) \<longleftrightarrow> (\<forall> i \<in> I. satPB \<xi> (\<Phi> i))"
unfolding satPB_def by auto
lemma satPB_sappend[simp]: "satPB \<xi> (\<Phi>1 @@ \<Phi>2) \<longleftrightarrow> satPB \<xi> \<Phi>1 \<or> satPB \<xi> \<Phi>2"
unfolding satPB_def sappend_def by (fastforce simp: satC_append)
definition "SAT \<Phi> \<equiv> \<forall> \<xi>. wtE \<xi> \<longrightarrow> satPB \<xi> \<Phi>"
lemma SAT_UN[simp]: "SAT (\<Union> i \<in> I. \<Phi> i) \<longleftrightarrow> (\<forall> i \<in> I. SAT (\<Phi> i))"
unfolding SAT_def by auto
text{* Soundness of typing w.r.t. interpretation: *}
lemma wt_int:
assumes wtE: "wtE \<xi>" and wt: "wt T"
shows "intT (tpOf T) (int \<xi> T)"
using wt apply(induct T) using wtE
by (auto intro!: intF simp add: list_all2_map_map)
lemma satA_cong:
assumes "\<And>x. x \<in> varsA at \<Longrightarrow> \<xi>1 x = \<xi>2 x"
shows "satA \<xi>1 at \<longleftrightarrow> satA \<xi>2 at"
using assms int_cong[of _ \<xi>1 \<xi>2]
apply(cases at) apply(fastforce intro!: int_cong[of _ \<xi>1 \<xi>2])
apply simp by (metis (hide_lams, mono_tags) map_eq_conv)
lemma satL_cong:
assumes "\<And> x. x \<in> varsL l \<Longrightarrow> \<xi>1 x = \<xi>2 x"
shows "satL \<xi>1 l \<longleftrightarrow> satL \<xi>2 l"
using assms satA_cong[of _ \<xi>1 \<xi>2] by (cases l, auto)
lemma satC_cong:
assumes "\<And> x. x \<in> varsC c \<Longrightarrow> \<xi>1 x = \<xi>2 x"
shows "satC \<xi>1 c \<longleftrightarrow> satC \<xi>2 c"
using assms satL_cong[of _ \<xi>1 \<xi>2] unfolding satC_def varsC_def
apply (induct c) by (fastforce intro!: satL_cong[of _ \<xi>1 \<xi>2])+
lemma satPB_cong:
assumes "\<And> x. x \<in> varsPB \<Phi> \<Longrightarrow> \<xi>1 x = \<xi>2 x"
shows "satPB \<xi>1 \<Phi> \<longleftrightarrow> satPB \<xi>2 \<Phi>"
by (force simp: satPB_def varsPB_def intro!: satC_cong ball_cong assms)
lemma int_o:
"int (int \<xi> o \<rho>) T = int \<xi> (subst \<rho> T)"
apply(induct T) apply simp_all unfolding list_all_iff o_def
using map_ext by (metis (lifting, no_types))
lemmas int_subst = int_o[symmetric]
lemma int_o_subst:
"int \<xi> o subst \<rho> = int (int \<xi> o \<rho>)"
apply(rule ext) apply(subst comp_def) unfolding int_o[symmetric] ..
lemma satA_o:
"satA (int \<xi> o \<rho>) at = satA \<xi> (substA \<rho> at)"
by (cases at, simp_all add: int_o_subst int_o[of \<xi> \<rho>])
lemmas satA_subst = satA_o[symmetric]
lemma satA_o_subst:
"satA \<xi> o substA \<rho> = satA (int \<xi> o \<rho>)"
apply(rule ext) apply(subst comp_def) unfolding satA_o[symmetric] ..
lemma satL_o:
"satL (int \<xi> o \<rho>) l = satL \<xi> (substL \<rho> l)"
using satA_o[of \<xi> \<rho>] by (cases l, simp_all)
lemmas satL_subst = satL_o[symmetric]
lemma satL_o_subst:
"satL \<xi> o substL \<rho> = satL (int \<xi> o \<rho>)"
apply(rule ext) apply(subst comp_def) unfolding satL_o[symmetric] ..
lemma satC_o:
"satC (int \<xi> o \<rho>) c = satC \<xi> (substC \<rho> c)"
using satL_o[of \<xi> \<rho>] unfolding satC_def substC_def by (induct c, auto)
lemmas satC_subst = satC_o[symmetric]
lemma satC_o_subst:
"satC \<xi> o substC \<rho> = satC (int \<xi> o \<rho>)"
apply(rule ext) apply(subst comp_def) unfolding satC_o[symmetric] ..
lemma satPB_o:
"satPB (int \<xi> o \<rho>) \<Phi> = satPB \<xi> (substPB \<rho> \<Phi>)"
using satC_o[of \<xi> \<rho>] unfolding satPB_def substPB_def by auto
lemmas satPB_subst = satPB_o[symmetric]
lemma satPB_o_subst:
"satPB \<xi> o substPB \<rho> = satPB (int \<xi> o \<rho>)"
apply(rule ext) apply(subst comp_def) unfolding satPB_o[symmetric] ..
lemma wtE_o:
assumes 1: "wtE \<xi>" and 2: "wtSB \<rho>"
shows "wtE (int \<xi> o \<rho>)"
unfolding wtE_def proof
fix x have 0: "tpOfV x = tpOf (\<rho> x)" using 2 by auto
show "intT (tpOfV x) ((int \<xi> \<circ> \<rho>) x)" apply(subst 0) unfolding comp_def
apply(rule wt_int[OF 1]) using 2 by auto
qed
(* fixme: unify compE and int \<xi> o \<rho>, since they are the same *)
definition "compE \<rho> \<xi> x \<equiv> int \<xi> (\<rho> x)"
lemma wtE_compE:
assumes "wtSB \<rho>" and "wtE \<xi>" shows "wtE (compE \<rho> \<xi>)"
unfolding wtE_def using assms wt_int
unfolding wtSB_def compE_def by fastforce
lemma compE_upd: "compE (\<rho> (x := T)) \<xi> = (compE \<rho> \<xi>) (x := int \<xi> T)"
unfolding compE_def[abs_def] by auto
end (* context Struct *)
context Signature begin
(* The function symbols of: *)
fun fsyms where
"fsyms (Var x) = {}"
|
"fsyms (Fn f Tl) = {f} \<union> (\<Union> set (map fsyms Tl))"
fun fsymsA where
"fsymsA (Eq T1 T2) = fsyms T1 \<union> fsyms T2"
|
"fsymsA (Pr p Tl) = \<Union> set (map fsyms Tl)"
fun fsymsL where
"fsymsL (Pos at) = fsymsA at"
|
"fsymsL (Neg at) = fsymsA at"
definition "fsymsC c = \<Union> set (map fsymsL c)"
definition "fsymsPB \<Phi> = \<Union> {fsymsC c | c. c \<in> \<Phi>}"
lemma fsyms_int_cong:
assumes S1: "Struct wtFsym wtPsym arOf resOf intT intF1 intP"
and S2: "Struct wtFsym wtPsym arOf resOf intT intF2 intP"
and 0: "\<And> f. f \<in> fsyms T \<Longrightarrow> intF1 f = intF2 f"
shows "Struct.int intF1 \<xi> T = Struct.int intF2 \<xi> T"
using 0 proof(induct T)
case (Fn f Tl)
hence 1: "map (Struct.int intF1 \<xi>) Tl = map (Struct.int intF2 \<xi>) Tl"
unfolding list_all_iff map_ext by auto
show ?case
using Fn Struct.int.simps[OF S1, of \<xi>] Struct.int.simps[OF S2, of \<xi>] apply simp
using 1 by metis
qed (auto simp: Struct.int.simps[OF S1, of \<xi>] Struct.int.simps[OF S2, of \<xi>])
lemma fsyms_satA_cong:
assumes S1: "Struct wtFsym wtPsym arOf resOf intT intF1 intP"
and S2: "Struct wtFsym wtPsym arOf resOf intT intF2 intP"
and 0: "\<And> f. f \<in> fsymsA at \<Longrightarrow> intF1 f = intF2 f"
shows "Struct.satA intF1 intP \<xi> at \<longleftrightarrow> Struct.satA intF2 intP \<xi> at"
using 0 fsyms_int_cong[OF S1 S2]
apply(cases at)
apply(fastforce intro!: fsyms_int_cong[OF S1 S2, of _ \<xi>]
simp: Struct.satA.simps[OF S1, of \<xi>] Struct.satA.simps[OF S2, of \<xi>])
apply (simp add: Struct.satA.simps[OF S1, of \<xi>] Struct.satA.simps[OF S2, of \<xi>])
by (metis (hide_lams, mono_tags) map_eq_conv)
lemma fsyms_satL_cong:
assumes S1: "Struct wtFsym wtPsym arOf resOf intT intF1 intP"
and S2: "Struct wtFsym wtPsym arOf resOf intT intF2 intP"
and 0: "\<And> f. f \<in> fsymsL l \<Longrightarrow> intF1 f = intF2 f"
shows "Struct.satL intF1 intP \<xi> l \<longleftrightarrow> Struct.satL intF2 intP \<xi> l"
using 0 fsyms_satA_cong[OF S1 S2]
by (cases l, auto simp: Struct.satL.simps[OF S1, of \<xi>] Struct.satL.simps[OF S2, of \<xi>])
lemma fsyms_satC_cong:
assumes S1: "Struct wtFsym wtPsym arOf resOf intT intF1 intP"
and S2: "Struct wtFsym wtPsym arOf resOf intT intF2 intP"
and 0: "\<And> f. f \<in> fsymsC c \<Longrightarrow> intF1 f = intF2 f"
shows "Struct.satC intF1 intP \<xi> c \<longleftrightarrow> Struct.satC intF2 intP \<xi> c"
using 0 fsyms_satL_cong[OF S1 S2]
unfolding Struct.satC_def[OF S1] Struct.satC_def[OF S2] fsymsC_def
apply (induct c) by (fastforce intro!: fsyms_satL_cong[OF S1 S2])+
lemma fsyms_satPB_cong:
assumes S1: "Struct wtFsym wtPsym arOf resOf intT intF1 intP"
and S2: "Struct wtFsym wtPsym arOf resOf intT intF2 intP"
and 0: "\<And> f. f \<in> fsymsPB \<Phi> \<Longrightarrow> intF1 f = intF2 f"
shows "Struct.satPB intF1 intP \<xi> \<Phi> \<longleftrightarrow> Struct.satPB intF2 intP \<xi> \<Phi>"
by (force simp: Struct.satPB_def[OF S1] Struct.satPB_def[OF S2] fsymsPB_def
intro!: fsyms_satC_cong[OF S1 S2] ball_cong 0)
lemma fsymsPB_Un[simp]: "fsymsPB (\<Phi>1 \<union> \<Phi>2) = fsymsPB \<Phi>1 \<union> fsymsPB \<Phi>2"
unfolding fsymsPB_def by auto
lemma fsymsC_append[simp]: "fsymsC (c1 @ c2) = fsymsC c1 \<union> fsymsC c2"
unfolding fsymsC_def by auto
lemma fsymsPB_sappend_incl[simp]:
"fsymsPB (\<Phi>1 @@ \<Phi>2) \<subseteq> fsymsPB \<Phi>1 \<union> fsymsPB \<Phi>2"
by (unfold fsymsPB_def sappend_def, fastforce)
lemma fsymsPB_sappend[simp]:
assumes 1: "\<Phi>1 \<noteq> {}" and 2: "\<Phi>2 \<noteq> {}"
shows "fsymsPB (\<Phi>1 @@ \<Phi>2) = fsymsPB \<Phi>1 \<union> fsymsPB \<Phi>2"
proof safe
fix x
{assume "x \<in> fsymsPB \<Phi>1"
then obtain c1 c2 where "x \<in> fsymsC c1" and "c1 \<in> \<Phi>1" and "c2 \<in> \<Phi>2"
using 2 unfolding fsymsPB_def by auto
thus "x \<in> fsymsPB (\<Phi>1 @@ \<Phi>2)" unfolding sappend_def fsymsPB_def by fastforce
}
{assume "x \<in> fsymsPB \<Phi>2"
then obtain c1 c2 where "x \<in> fsymsC c2" and "c1 \<in> \<Phi>1" and "c2 \<in> \<Phi>2"
using 1 unfolding fsymsPB_def by auto
thus "x \<in> fsymsPB (\<Phi>1 @@ \<Phi>2)" unfolding sappend_def fsymsPB_def by fastforce
}
qed(unfold fsymsPB_def sappend_def, fastforce)
lemma Struct_upd:
assumes "Struct wtFsym wtPsym arOf resOf intT intF intP"
and "\<And> al. list_all2 intT (arOf ef) al \<Longrightarrow> intT (resOf ef) (EF al)"
shows "Struct wtFsym wtPsym arOf resOf intT (intF (ef := EF)) intP"
apply default using assms
unfolding Struct_def Struct_axioms_def Tstruct_def by auto
end (* context Signature *)
subsection{* Problems *}
text{* A problem is a potentially infinitary formula in clausal form, i.e.,
a potentially infinite conjunction of clauses. *}
locale Problem = Signature wtFsym wtPsym arOf resOf parOf
for wtFsym wtPsym
and arOf :: "'fsym \<Rightarrow> 'tp list"
and resOf :: "'fsym \<Rightarrow> 'tp"
and parOf :: "'psym \<Rightarrow> 'tp list"
+
fixes \<Phi> :: "('fsym, 'psym) prob"
assumes wt_\<Phi>: "wtPB \<Phi>"
subsection{* Models of a problem *}
text{* Model of a problem: *}
locale Model = Problem + Struct +
assumes SAT: "SAT \<Phi>"
begin
lemma sat_\<Phi>: "wtE \<xi> \<Longrightarrow> satPB \<xi> \<Phi>"
using SAT unfolding SAT_def by auto
end
end
|
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
ImportAll(paradigms.vector);
Declare(ASPF);
Class(flipBot, PermClass, rec(
def := k -> rec(size := 2*k),
lambda := self >> let(k := self.params[1],
i := Ind(2*k),
Lambda(i, cond(leq(i, k-1), i, i + 1 - 2*imod(i,2))))
));
Class(ASPF, TaggedNonTerminal, rec(
_short_print := true,
abbrevs := [ (alg, tbasis, fbasis) -> Checked(
IsASPAlgebra(alg), IsASPTimeBasis(tbasis), IsASPFreqBasis(fbasis),
[alg, tbasis, fbasis]) ],
isReal := self >> self.params[3].dim() > 1,
dims := self >> let(n:=self.params[1].n, [n, n]),
print := (self, i, is) >> let(base_print := NonTerminal.print,
Print(base_print(self, i, is),
When(self.tags<>[], Print(".withTags(", self.tags, ")")))),
hashAs := self >> let(p := self.params,
t := ObjId(self)(p[1].hashAs(), p[2], p[3].hashAs()).withTags(self.getTags()),
When(self.transposed, t.transpose(), t)),
HashId := self >> [ # NOTE: why is this function necessary?
[ObjId(self.params[1]), self.params[1].n], self.params[2], ObjId(self.params[3]),
self.getTags(), self.transposed ],
norm := self >> CopyFields(self, rec(params :=
[self.params[1], self.params[2].norm(), self.params[3].norm()])),
_scale12 := (self, s1, s2) >> let(n := self.params[1].n, Cond(
n<=2, s1,
IsEvenInt(n), Diag(diagDirsum(fConst(TReal, 2, s1), fConst(TReal, n-2, s2))),
<# else #> Diag(diagDirsum(fConst(TReal, 1, s1), fConst(TReal, n-1, s2))))),
_scale34 := (self, s1, s2) >> let(n := self.params[1].n, Cond(IsEvenInt(n),
s2,
Diag(diagDirsum(fConst(TReal, n-1, s2), fConst(TReal, 1, s1))))),
terminate := self >> let(
A := self.params[1], T := self.params[2], F := self.params[3], s1 := F.scale1d,
n := EvalScalar(A.n),
res := CondPat(self,
[ASPF, XN_min_1, Time_TX, Freq_1, ...], s1 * DFT (n, A.rot),
[ASPF, XN_min_1, Time_TX, Freq_1H, ...], s1 * DFT2(n, A.rot),
[ASPF, XN_plus_1, Time_TX, Freq_1, ...], s1 * DFT3(n, A.rot),
[ASPF, XN_plus_1, Time_TX, Freq_1H, ...], s1 * DFT4(n, A.rot),
[ASPF, XN_skew, @, @(1, [Freq_1,Freq_1H]), ...], let(
aa := EvalScalar(A.a),
p := When(aa > 1/2, J(n), I(n)) * LIJ(n).transpose(),
p * s1 * Cond(ObjId(F)=Freq_1, BSkewDFT3(n, aa, A.rot), BSkewDFT4(n, aa, A.rot)) * T.toX(A)),
[ASPF, XN_min_1, ...],
DirectSum(List(A.rspectrum(), a -> F.from1X(a))) * BRDFT1(n, A.rot) * T.toX(A),
[ASPF, XN_min_1U, ...],
DirectSum(List(A.rspectrum(), a -> F.from1X(a))) * UBRDFT1(n, A.rot) * T.toX(A),
[ASPF, XN_plus_1, ...],
DirectSum(List(A.rspectrum(), a -> F.from1X(a))) * BRDFT3(n, 1/4, A.rot) * T.toX(A),
[ASPF, XN_skew, ...], let(aa := EvalScalar(A.a),
p := When(aa > 1/2, RC(J(n/2)), RC(I(n/2))) * RC(LIJ(n/2).transpose()),
DirectSum(List(A.rspectrum(), s -> F.from1X(s))) * p * BRDFT3(n, aa, A.rot) * T.toX(A)),
Error("not implemented")),
Cond(self.transposed, TerminateSPL(res).transpose(), TerminateSPL(res))),
transpose := self >> Cond(
ObjId(self.params[1]) = XN_min_1 and self.params[2] = Time_TX and self.params[3]=Freq_1(1),
self, # DFT
CopyFields(self, rec(
transposed := not self.transposed,
dimensions := Reversed(self.dimensions) ))),
conjTranspose := self >> Cond(
self.isReal(),
self.transpose(),
CopyFields(self, rec(
params := [self.params[1].conj(), self.params[2], self.params[3]],
transposed := not self.transposed,
dimensions := Reversed(self.dimensions) ))),
normalizedArithCost := self >> let(n := self.params[1].n,
nlogn := n * log(n) / log(2),
When(self.params[3].dim() = 1, floor(5*nlogn), floor(2.5*nlogn)))
));
Class(ASP, rec(
rot := 1,
__call__ := (self, rot) >> WithBases(self, rec(rot := rot, operations := PrintOps)),
print := self >> Print(self.__name__, "(", self.rot, ")"),
DFT := (self, n) >> ASPF(XN_min_1(n,self.rot), Time_TX, Freq_1(1)),
RDFT := (self, n) >> ASPF(XN_min_1(n,self.rot), Time_TX, Freq_E(1,1)),
URDFT := (self, n) >> ASPF(XN_min_1U(n,self.rot), Time_TX, Freq_E(1,1)),
DHT := (self, n) >> ASPF(XN_min_1(n,self.rot), Time_TX, Freq_H(1,1)),
UDHT := (self, n) >> ASPF(XN_min_1U(n,self.rot), Time_TX, Freq_H(1,1)),
BRDFT := (self, n) >> ASPF(XN_min_1(n,self.rot), Time_TX, Freq_T(1,1)),
MBRDFT := (self, n) >> ASPF(XN_min_1(n,self.rot), Time_SX, Freq_S(1,1)), # Murakami BRDFT variant
IRDFT := (self, n) >> ASPF(XN_min_1(n,self.rot), Time_TX, Freq_E(1,2)).transpose(),
IURDFT := (self, n) >> ASPF(XN_min_1U(n,self.rot), Time_TX, Freq_E(1,2)).transpose(),
DFT2 := (self, n) >> ASPF(XN_min_1(n,self.rot), Time_TX, Freq_1H(1)),
RDFT2 := (self, n) >> ASPF(XN_min_1(n,self.rot), Time_TX, Freq_EH(1,1)),
DHT2 := (self, n) >> ASPF(XN_min_1(n,self.rot), Time_TX, Freq_HH(1,1)),
BRDFT2 := (self, n) >> ASPF(XN_min_1(n,self.rot), Time_TX, Freq_TH(1,1)),
SBRDFT2:= (self, n) >> ASPF(XN_min_1(n,self.rot), Time_TX, Freq_THU(1,1)),
DFT3 := (self, n) >> ASPF(XN_plus_1(n,self.rot), Time_TX, Freq_1(1)),
RDFT3 := (self, n) >> ASPF(XN_plus_1(n,self.rot), Time_TX, Freq_E(1,1)),
DHT3 := (self, n) >> ASPF(XN_plus_1(n,self.rot), Time_TX, Freq_H(1,1)),
BRDFT3 := (self, n) >> ASPF(XN_plus_1(n,self.rot), Time_TX, Freq_T(1,1)),
MBRDFT3:= (self, n) >> ASPF(XN_plus_1(n,self.rot), Time_SX, Freq_S(1,1)), # Murakami BRDFT variant
BDFT := (self, n,a) >> ASPF(XN_skew(n, a,self.rot), Time_EX, Freq_1(1)),
rDFT := (self, n,a) >> ASPF(XN_skew(n, a,self.rot), Time_EX, Freq_E(1,1)),
rDFTII := (self, n,a) >> ASPF(XN_skew(n, a,self.rot), Time_EX, Freq_EH(1,1)),
rDHT := (self, n,a) >> ASPF(XN_skew(n, a,self.rot), Time_HX, Freq_H(1,1)),
rDHTII := (self, n,a) >> ASPF(XN_skew(n, a,self.rot), Time_HX, Freq_HH(1,1)),
brDFT := (self, n,a) >> ASPF(XN_skew(n, a,self.rot), Time_SX, Freq_E(1,1)),
brDFTII := (self, n,a) >> ASPF(XN_skew(n, a,self.rot), Time_SX, Freq_EH(1,1)),
brDHT := (self, n,a) >> ASPF(XN_skew(n, a,self.rot), Time_SX, Freq_H(1,1)),
brDHTII := (self, n,a) >> ASPF(XN_skew(n, a,self.rot), Time_SX, Freq_HH(1,1)),
bRDFT3 := (self, n,a) >> ASPF(XN_skew(n,a,self.rot), Time_SX, Freq_S(1,1)),
skewSS := (self, n,a) >> ASPF(XN_skew(n,a,self.rot), Time_SX, Freq_S(1,1)),
skewTT := (self, n,a) >> ASPF(XN_skew(n,a,self.rot), Time_TX, Freq_T(1,1)),
DFT4 := (self, n) >> ASPF(XN_plus_1(n,self.rot), Time_TX, Freq_1H(1)),
RDFT4 := (self, n) >> ASPF(XN_plus_1(n,self.rot), Time_TX, Freq_EH(1,1)),
DHT4 := (self, n) >> ASPF(XN_plus_1(n,self.rot), Time_TX, Freq_HH(1,1)),
BRDFT4 := (self, n,a) >> ASPF(XN_skew(n,a, self.rot), Time_TX, Freq_TH(1,1)),
SBRDFT4:= (self, n,a) >> ASPF(XN_skew(n,a, self.rot), Time_TX, Freq_THU(1,1)),
));
Class(TCodelet, Tagged_tSPL_Container);
_mid := F -> When(ObjId(F)=Freq_1, Freq_E, ObjId(F));
Freq_H.fixbot := k -> Prm(flipBot(k));
Freq_E.fixbot := k -> Diag(BHD(k, 1.0, -1.0));
Freq_1.fixbot := k -> I(2*k);
Freq_S.fixbot := k -> I(2*k);
Freq_T.fixbot := k -> I(2*k);
# NOTE: what is it for others?
# NOTE: explain fixbot
nsFiltered := (lst, func) -> Cond(
IsValue(lst), Filtered(lst.v, func),
IsSymbolic(lst), lst,
Filtered(lst, func)
);
ASPF.isType1 := self >> ObjId(self.params[1]) in [XN_min_1, XN_min_1U];
ASPF.isType3 := self >> ObjId(self.params[1]) in [XN_plus_1];
ASPF.isSkew := self >> ObjId(self.params[1]) in [XN_skew];
ASPF.size := self >> self.params[1].n;
ASPF_Breakdown_Rule := rec(
a := rec(
extraLeftTags := [],
extraRightTags := [],
inplaceTag := false,
maxRadix := -1,
),
apply := (t, C, N) -> C[1],
inplace := (self, x) >> When(self.getA("inplaceTag"), Inplace(x), x),
leftTags := (self, t) >> Concatenation(t.tags, self.getA("extraLeftTags", [])),
forTransposition := true,
);
Class(even, AutoFoldExp, rec(
ev := self >> let(a := self.args[1].ev(),
Cond(not IsInt(a), Error("even(<n>) works only with integer <n>"),
IsEvenInt(a), 1,
0)),
computeType := self >> TInt
));
Class(odd, AutoFoldExp, rec(
ev := self >> let(a := self.args[1].ev(),
Cond(not IsInt(a), Error("odd(<n>) works only with integer <n>"),
IsOddInt(a), 1,
0)),
computeType := self >> TInt
));
Class(firstEltRft, FuncClass, rec(
def := (n,f) -> Checked(IsIntSym(n), IsFunction(f), let(d:=f.domain(), r:=f.range(),
rec(n := n, N := 2*r-odd(n)))),
domain := self >> self.params[1],
range := self >> 2*self.params[2].range() - odd(self.params[1]),
lambda := self >> let(
i := Ind(self.domain()), n := self.params[1], f := self.params[2],
fst := 1+even(n), o := odd(n),
Lambda(i, cond(lt(i,fst), i, imod(i+o, 2) + 2*f.at(idiv(i+o,2)) - o))),
transpose := self >> self.__bases__[1](self.params[1], self.self.params[2].transpose())
));
Class(ASPF_CT1_DFT_Mat, TaggedNonTerminal, rec(
isAuxNonTerminal := true,
abbrevs := [ k -> Checked(IsPosIntSym(k), [k]) ],
dims := self >> [2*self.params[1], 2*self.params[1]],
isReal := self >> true,
terminate := self >> let(k := self.params[1],
mat := Mat([[1,1],[-E(4), E(4)]]),
res := L(2*k, 2) * RC(Tensor(I(k/2), mat) * MM(2,k/2)) * Diag(BHD(k,1.0,-1.0)),
Cond(self.transposed, res.transpose(), res)
)
));
NewRulesFor(ASPF_CT1_DFT_Mat, rec(
ASPF_CT1_DFT_Mat_terminate := rec(
applicable := t -> true,
freedoms := t -> [],
child := (t, fr) -> [],
apply := (self, t, C, Nonterms) >> t.terminate()
)
));
NewRulesFor(ASPF, rec(
# Type 1
#
ASPF_CT1_URFT := CopyFields(ASPF_Breakdown_Rule, rec(
applicable := t -> let(n := t.size(),
logic_and(t.isType1(), logic_and(eq(n mod 2, 0), logic_neg(isPrime(idiv(n,2)))))),
freedoms := (self, t) >> let(maxR := self.getA("maxRadix", -1),
[ nsFiltered(divisorsIntNonTriv(t.size()/2), x -> (maxR < 0) or (x <= maxR/2)) ]),
child := (self, t, fr) >> let(
ltags := self.leftTags(t), rtags := self.getA("extraRightTags", []),
A := t.params[1], F := t.params[3], T := t.params[2],
k := fr[1], Fmid := _mid(F), Tmid := Fmid.timeBasis(),
m := A.n / (2*k), j := Ind(m-1), aj := fdiv(j+1, 2*m),
C1 := ASPF(ObjId(A) (2*k, A.rot), Time_TX, F),
C2 := ASPF(XN_skew (2*k, aj, A.rot), Tmid, F),
C3 := ASPF(XN_min_1U(2*m, A.rot), T, Fmid(1,1)),
[ GT(C1.withTags(ltags), fId(Cols(C1)), fId(Rows(C1)), []),
C2.withTags(ltags),
GT(C3.withTags(rtags), GTVec, GTVec, [k]).withTags(t.tags),
InfoNt(j) ]
),
apply := (self, t, C, Nonterms) >> let(
A := t.params[1], F := t.params[3], j := Nonterms[4].params[1],
k := Cols(Nonterms[1])/2, m := Cols(Nonterms[3].params[1])/2,
When(F.dim()=1, Scat(Refl0_u(m, 2*k)), # if transform is complex
RC(Scat(Refl0_u(m, k)))) * # if it is real
DirectSum(
C[1],
IDirSum(j, F.fixbot(k) * C[2])) *
self.inplace(C[3])
)
)),
ASPF_CT1Prm_URFT := CopyFields(~.ASPF_CT1_URFT, rec(
child := (self, t, fr) >> let(
ltags := self.leftTags(t), rtags := self.getA("extraRightTags", []),
A := t.params[1], T := t.params[2], F := t.params[3],
k := fr[1], Fmid := _mid(F), Tmid := Fmid.timeBasis(),
m := div(A.n, 2*k), j := Ind(m-1), aj := fdiv(j+1, 2*m),
C1 := ASPF(ObjId(A) (2*k, A.rot), Time_TX, F),
C2 := ASPF(XN_skew (2*k, aj, A.rot), Tmid, F),
C3 := ASPF(XN_min_1U(2*m, A.rot), T, Fmid(1,1)),
[ GT(C1.withTags(ltags), fId(Cols(C1)), fId(Rows(C1)), []),
C2.withTags(ltags),
GT(C3.withTags(rtags), GTVec, GTPar, [k]).withTags(t.tags),
InfoNt(j) ]
),
apply := (self, t, C, Nonterms) >> let(
A := t.params[1], F := t.params[3], j := Nonterms[4].params[1],
N := t.params[1].n, k := div(Cols(C[1]), 2), tr := Tr(k, 2),
j := Nonterms[4].params[1], m := div(N, 2*k),
fixbot := t.params[3].fixbot(k),
When(F.dim()=1, Scat(Refl0_u(m, 2*k)), # if transform is complex
RC(Scat(Refl0_u(m, k)))) * # if it is real
DirectSum(
C[1] * tr,
IDirSum(j, F.fixbot(k) * C[2] * tr)) *
RC(Tr(k, m)) * C[3]
)
)),
ASPF_CT1_DFT := CopyFields(ASPF_Breakdown_Rule, rec(
applicable := t -> let(n := t.size(),
logic_and(t.isType1(), logic_and(eq(n mod 4, 0), logic_neg(isPrime(idiv(n,4)))))),
freedoms := (self, t) >> let(maxR := self.getA("maxRadix", -1),
[ nsFiltered(divisorsIntNonTriv(t.size()/4), x -> (maxR < 0) or (x <= maxR/2)) ]),
child := (self, t, fr) >> [let(
ltags := self.leftTags(t),
A := t.params[1], F := t.params[3], T := t.params[2],
k := 2*fr[1], Fmid := _mid(F), Tmid := Fmid.timeBasis(),
m := A.n / (2*k), j := Ind(m-1), aj := fdiv(j+1, 2*m),
mat := Mat([[1,1],[-E(4), E(4)]]),
C1 := ASPF(ObjId(A) (2*k, A.rot), Time_TX, F),
C2 := ASPF(XN_skew (2*k, aj, A.rot), Tmid, CopyFields(F, rec(scale2d:=1/2*F.scale2d))),
C3 := UDFT(2*m, A.rot),
P := When(F.dim()=1, Scat(Refl0_u(m, 2*k)), # if transform is complex
RC(Scat(Refl0_u(m, k)))), # if it is real
Pt := When(F.dim()=1, Prm(Refl0_u(m, 2*k)), # if transform is complex
RC(Prm(Refl0_u(m, k)))), # if it is real
self.inplace(
P *
DirectSum(
C1.withTags(ltags) * RC(L(k, 2)),
IDirSum(j, F.fixbot(k) * C2.withTags(ltags) * PushL(ASPF_CT1_DFT_Mat(k)))
) *
Pt
) *
RC(Tensor(I(k/2), C3) *
Tr(2*m,k/2))
)]
)),
ASPF_CT1Odd_RFT := CopyFields(~.ASPF_CT1_URFT, rec(
applicable := t -> let(n := t.size(),
logic_and(t.isType1(), logic_and(logic_neg(isPrime(n)), hasOddDivisors(n)))),
freedoms := (self, t) >> let(maxR := self.getA("maxRadix", -1),
[ nsFiltered(oddDivisorsIntNonTriv(t.size()), x -> (maxR < 0) or (n/x <= maxR/2)) ]),
# m is odd here
child := (self, t, fr) >> [let(
ltags := self.leftTags(t),
A := t.params[1], T := t.params[2], F := t.params[3], N := A.n,
m := fr[1], Fmid := _mid(F), Tmid := Fmid.timeBasis(), mf := (m-1)/2,
k := N / m, j := ind(mf,1), aj := fdiv(j+1, m), Nc := idiv(N+1, 2),
kc:= idiv(k+1,2),
C1 := ASPF(ObjId(A) ( k, A.rot), Time_TX, F).withTags(ltags),
C2 := ASPF(XN_skew (2*k, aj, A.rot), Tmid, F).withTags(ltags),
C3 := ASPF(XN_min_1 ( m, A.rot), T, Fmid(1,1)),
fst := 1 + even(k),
cpx := F.dim()=1,
# rp := fDirsum(fId(1+even(k)), fTensor(Refl0_odd(mf, k, 1), fId(2))),
# When(F.dim()=1, Scat(Refl0_odd(mf, 2*k, 0)), Scat(rp)) *
SUM(
Scat(Cond(cpx, HH(N,k,0,[m]), firstEltRft(k, HH(Nc,kc,0,[m])))) * C1 * Gath(HH(N, k, 0, [1])),
GT(F.fixbot(k) * C2, # reflect = N-2 in BH compensates for fAdd shift
HH(N, 2*k, k, [1, 2*k]),
Cond(cpx, BHH(N, 2*k, 1, [m,1], 2*N),
fCompose(HH(N, N-fst, fst, [1]), fTensor(BHH(Nc-1,k,0,[m,1], N-2), fId(2)))),
[mf])) *
self.inplace(
GT(C3, GTVec, GTVec, [k]).withTags(t.tags))
)]
)),
# ASPF_CT1Inp_URFT := CopyFields(~.ASPF_CT1_URFT, rec(
# child := (self, t, fr) >> let(
# A := t.params[1], T := t.params[2], F := t.params[3], r := A.rot,
# k := fr[1], Fmid := _mid(F), Tmid := Fmid.timeBasis(),
# m := div(A.n, 2*k), j := Ind(m-1), aj := fdiv(j+1, 2*m),
# [ ASPF(ObjId(A) (2*k, r), Time_TX, F).withTags([ANoRecurse()]),
# ASPF(XN_skew (2*k, aj, r), Tmid, F).withTags([ANoRecurse()]),
# ASPF(XN_min_1U(2*m, r), T, Fmid(1,1)),
# InfoNt(j) ]),
# apply := (self, t, C, Nonterms) >> let(
# N := t.params[1].n, k := div(Cols(C[1]), 2),
# j := Nonterms[4].params[1], m := div(N, 2*k),
# RC(Scat(Refl0_u(m, k))) *
# DirectSum(BB(C[1]*Tr(k,2)),
# IDirSum(j, BB(Diag(BHD(k,1,-1))*C[2]*Tr(k,2)*RC(MM(2,k/2))))) *
# RC(Gath(Refl0_u(m, k))) *
# RC(condIOS(k*m, m))*
# Tensor(I(k), C[3]) * Tr(2*m, k)
# )
# )),
# Type 3
#
ASPF_CT3_RFT := CopyFields(ASPF_Breakdown_Rule, rec(
aj := (j, a, m) -> fdiv(j+1/2, 2*m),
libApplicable := t -> logic_and(eq(imod(t.params[1].n, 2),0), logic_neg(isPrime(div(t.params[1].n,2)))),
applicable := t -> let(A := ObjId(t.params[1]), n := t.params[1].n,
A = XN_plus_1 and (IsSymbolic(n) or (n > 4 and IsEvenInt(n)))),
freedoms := (self, t) >> let(n := t.params[1].n, maxR := self.getA("maxRadix", -1),
[ When(IsSymbolic(n), divisorsIntNonTriv(div(n,2)),
Filtered(divisorsIntNonTriv(div(n,2)).ev(), x -> (maxR < 0) or (x <= maxR/2))) ]),
child := (self, t, fr) >> let(
A := t.params[1], T := t.params[2], F := t.params[3], r := A.rot,
k := fr[1], Fmid := _mid(F), Tmid := Fmid.timeBasis(), a := A.a,
m := div(A.n, 2*k), j := Ind(m), aj := self.aj(j, a, m),
C1 := ASPF(XN_skew(2*k, aj, r), Tmid, F),
C2 := ASPF(CopyFields(A, rec(n:=_unwrap(2*m))), T, Fmid(1,1)),
[ C1.withTags(t.tags), GT(C2, GTVec, GTVec, [k]).withTags(t.tags), InfoNt(j) ]),
apply := (self, t, C, Nonterms) >> let(
N := t.params[1].n, k := div(Cols(C[1]), 2),
j := Nonterms[3].params[1], m := div(N, 2*k),
When(t.params[3].dim()=1,
Scat(Refl1(m, 2*k)) * IDirSum(j, C[1]) * C[2],
RC(Scat(Refl1(m, k))) * IDirSum(j, Diag(BHD(k,1.0,-1.0))*C[1]) * C[2]))
)),
# Skew
#
ASPF_CTSkew_RFT := CopyFields(~.ASPF_CT3_RFT, rec(
aj := (j, a, m) -> fdiv(j+a, m),
applicable := t -> let(A := ObjId(t.params[1]), n := t.params[1].n,
A = XN_skew and (IsSymbolic(n) or (n > 4 and IsEvenInt(n)))),
apply := (self, t, C, Nonterms) >> let(
N := t.params[1].n, k := div(Cols(C[1]),2),
j := Nonterms[3].params[1], m := div(N, 2*k),
When(t.params[3].dim()=1,
Tr(m, 2*k) * IDirSum(j, C[1]) * C[2],
RC(Tr(m, k)) * IDirSum(j, C[1]) * C[2]))
))
));
NewRulesFor(ASPF, rec(
ASPF_CT1_URFT_LftInplace := CopyFields(ASPF_Breakdown_Rule, rec(
switch := false,
applicable := t -> let(A := ObjId(t.params[1]), n := t.params[1].n,
A in [XN_min_1, XN_min_1U] and
(IsSymbolic(n) or (n > 2 and IsEvenInt(n))) and
ObjId(t.params[3]) = Freq_1),
freedoms := (self, t) >> let(n := t.params[1].n, maxR := self.getA("maxRadix", -1),
[ Filtered(DivisorsIntNonTrivSym(div(n,2)), x -> (maxR < 0) or (x <= maxR/2)) ]),
child := ASPF_CT1_URFT.child,
apply := (self, t, C, Nonterms) >> let(
N := t.params[1].n, k := Cols(C[1])/2, j := Nonterms[4].params[1],
m := div(N, 2*k),
cplx := ObjId(t.params[3])=Freq_1,
jj := Ind( Int(j.range/2) ),
pp := When(IsOddInt(m), DirectSum(I(1), condM(m-1, (m-1)/2)),
condMp(m, m/2)),
# NOTE: add a GT_IJ' transform (ie with IJ' on either side), and an inplace rule for it
# also GT_IJ transforms (ie with IJ on either side), and an inplace rule for it
When(not cplx, Error("not implemented"), # RC(condKp(div(n,2), k))
Inplace(
DirectSum(
C[1],
When(IsOddInt(m), [], Data(j, V( (j.range-1)/2), C[2])),
When(jj.range = 0, [],
IDirSum(jj,
condM(4*k, 2)*L(4*k,2*k) *
DirectSum(Data(j, jj, C[2]), Data(j, j.range-1-jj, C[2])))))
^ (L(N, m) * Tensor(I(2*k), pp))) *
Grp(L(N, 2*k) * C[3])))
))
));
#
# Base cases
#
NewRulesFor(ASPF, rec(
ASPF_Base2 := rec(
requiredFirstTag := ANoTag,
forTransposition := true,
applicable := t -> t.params[1].n = 2,
apply := (t, C, Nonterms) -> t.terminate()
),
# This rules provides the base case for any complex transform (ie for any of the algebras)
# via the real transform.
#
# The trouble here, is that in different cases there are some 1d spectral components
# in the real transform already, and that makes permutations different.
#
ASPF_SmallCpx := rec(
requiredFirstTag := ANoTag,
forTransposition := true,
a := rec(maxSize := 512),
applicable := (self, t) >> ObjId(t.params[3]) = Freq_1 and t.params[1].n <= self.getA("maxRadix", -1),
children := t -> let(F := t.params[3], s := F.scale1d,
[[ ASPF(t.params[1], t.params[2], Freq_E(s, s)) ]]),
apply := (t, C, Nonterms) -> let(
A := t.params[1], n := EvalScalar(A.n),
bcK := nn -> K(nn, 2) * Tensor(I(nn/2), Mat([[1, E(4)], [1, -E(4)]])),
bcL := nn -> L(nn, 2) * Tensor(I(nn/2), Mat([[1, E(4)], [1, -E(4)]])),
# x^n-1
Cond(ObjId(A) = XN_min_1 and IsEvenInt(n),
When(n=2, I(2), DirectSum(I(1), Z(n/2, 1), I(n/2-1)) * DirectSum(I(2), bcK(n-2))) * C[1],
ObjId(A) = XN_min_1 and IsOddInt(n),
DirectSum(I(1), bcK(n-1)) * C[1],
# x^n+1
ObjId(A) = XN_plus_1 and IsEvenInt(n),
bcK(n) * C[1],
ObjId(A) = XN_plus_1 and IsOddInt(n),
DirectSum(I((n-1)/2), Z((n+1)/2, -1)) * DirectSum(bcK(n-1), I(1)) * C[1],
# skew
ObjId(A) = XN_skew, bcL(n) * C[1]
))
),
ASPF_NonSkew_Base_VecN := rec(
requiredFirstTag := [AVecReg, AVecRegCx],
forTransposition := false,
applicable := t -> let(v:=t.firstTag().v, alg := t.params[1],
ObjId(alg) in [XN_min_1, XN_min_1U, XN_plus_1] and
2 <= alg.n and alg.n <= 2*4*v
),
apply := (t, C, Nonterms) -> VectorizedMatSPL(t.firstTag().isa, t)
),
# need a separate rule because VectorizedMatSPL returns a non-transposeable result,
ASPF_NonSkew_Base_VecN_tr := rec(
requiredFirstTag := [AVecReg, AVecRegCx],
forTransposition := false,
transposed := true,
applicable := t -> let(v:=t.firstTag().v, alg := t.params[1],
ObjId(alg) in [XN_min_1, XN_min_1U, XN_plus_1] and
2 <= alg.n and alg.n <= 2*4*v
),
apply := (t, C, Nonterms) -> VectorizedMatSPL(t.firstTag().isa, t)
),
ASPF_BRDFT3_Base4 := rec(
requiredFirstTag := ANoTag,
forTransposition := true,
applicable := t -> t.hashAs() = ASP.bRDFT3(4,1/16), # skew versionm but rule is probably invalid up to a permutation
apply := (t, C, Nonterms) -> let(
rot := t.params[1].rot, a := t.params[1].a, D := Dat1d(TReal, 2),
s := t.params[3].scale2d, dd := When(s=1, I(2), Diag(s, 1)),
Data(D, fPrecompute(FList(TReal, [2*s*cospi(rot*a), s*(4*cospi(rot*a)^2 - 1)])),
L(4,2) * VStack(
F(2) * dd * Mat([[1, 0, -1, 0 ],
[0, 0, 0, -nth(D,0)]]),
F(2) * Mat([[0, s, 0, nth(D,1)],
[0, 0, nth(D,0), 0 ]]))))
),
ASPF_URDFT_Base4 := rec(
requiredFirstTag := ANoTag,
forTransposition := true,
applicable := t -> t.hashAs() = ASP.URDFT(4),
apply := (t, C, Nonterms) -> let(rot := EvalScalar(t.params[1].rot) mod 4,
#NoPull
BB(
Cond(rot=1, I(4), Diag(1,1,1,-1)) *
Tensor(t.params[3].scale2d * F(2), I(2))))
),
ASPF_URDFT_Base4_Vec2 := rec(
requiredFirstTag := [AVecReg, AVecRegCx],
forTransposition := true,
applicable := t -> t.hashAs() = ASP.URDFT(4).withTags(t.tags) and t.firstTag().v = 2,
apply := (t, C, Nonterms) ->
When(EvalScalar(t.params[1].rot)=1,
VTensor(t.params[3].scale2d * F(2), 2),
DirectSum(VBase(I(2), 2), VDiag(FList(TReal, [1,-1]), 2)) *
VTensor(t.params[3].scale2d * F(2), 2))
),
ASPF_RDFT1_Base4 := rec(
requiredFirstTag := ANoTag,
forTransposition := true,
applicable := t -> t.hashAs() = ASP.RDFT(4),
apply := (t, C, Nonterms) -> let(rot := t.params[1].rot,
DirectSum(F(2), When(rot=1, I(2), Diag(1,-1))) *
Tensor(Diag(t.params[3].scale1d, t.params[3].scale2d) * F(2), I(2)))
),
ASPF_RDFT_toPRDFT := rec(
requiredFirstTag := ANoTag,
forTransposition := true,
a := rec(maxSize := false),
applicable := (self, t) >> let(n:=t.params[1].n,
not Is2Power(n) and
(self.getA("maxSize")=false or n <= self.getA("maxSize")) and
When(IsEvenInt(n), t.hashAs() in [ASP.RDFT(n), ASP.URDFT(n)],
t.hashAs() = ASP.RDFT(n))),
freedoms := (self, t) >> [],
child := (self, t, fr) >> [ PRDFT(EvalScalar(t.params[1].n), EvalScalar(t.params[1].rot)) ],
apply := (t, C, Nonterms) -> let(n := EvalScalar(t.params[1].n),
rdft := Perm_CCS(n) * C[1],
When(IsEvenInt(n) and t.hashAs() = ASP.URDFT(n),
DirectSum((1/2)*F(2), I(n-2)) * rdft,
rdft))
),
ASPF_RDFT1_Base4_Vec2 := rec(
requiredFirstTag := [AVecReg, AVecRegCx],
forTransposition := true,
applicable := t -> t.hashAs() = ASP.RDFT(4).withTags(t.tags) and t.firstTag().v = 2,
apply := (t, C, Nonterms) -> let(rot := t.params[1].rot, vt := TVect(TReal, 2),
DirectSum(VBlk( [[ vt.value([1, -1]), vt.value([1,1]) ]], 2) * _VVStack([VTensor(I(1), 2), VIxJ2(2)], 2),
When(rot=1, VBase(I(2), 2), VDiag(FList(TReal, [1,-1]), 2))) *
VTensor(Diag(t.params[3].scale1d, t.params[3].scale2d) * F(2), 2))
),
ASPF_RDFT1_toTRDFT := rec(
requiredFirstTag := [AVecReg, AVecRegCx],
forTransposition := true,
a := rec(maxSize := 512),
applicable := (self, t) >> let(n:=t.params[1].n,
t.hashAs() = ASP.RDFT(n).withTags(t.tags) and n <= self.getA("maxSize", 512)),
freedoms := (self, t) >> [],
child := (self, t, fr) >> [ TRDFT(t.params[1].n, t.params[1].rot).withTags(t.tags) ],
apply := (t, C, Nonterms) -> let(
n := t.params[1].n,
s := Double(t.params[3].scale1d),
s2 := Double(t.params[3].scale2d),
Cond(s=1 and s2=1, C[1],
IsEvenInt(n), Diag(diagDirsum(fConst(TReal, 2, s), fConst(TReal, n-2, s2))) * C[1],
IsOddInt(n), Diag(diagDirsum(fConst(TReal, 1, s), fConst(TReal, n-1, s2))) * C[1]
)
)
),
ASPF_rDFT_Base4 := rec(
requiredFirstTag := ANoTag,
forTransposition := true,
applicable := t -> t.hashAs() = ASP.rDFT(4,1/16),
apply := (t, C, Nonterms) -> let(
a := t.params[1].a,
rot := t.params[1].rot,
s := t.params[3].scale2d,
NoPull( #Diag(1,1,1,-1) *
Tensor(F(2), I(2)) *
DirectSum(s*I(2), RCDiag(fPrecompute(FList(TReal, [s*cospi(rot*a), s*sinpi(rot*a)])))) *
L(4,2)))
),
ASPF_rDFT_toSkewDFT := rec(
requiredFirstTag := [ANoTag, ATwidOnline],
forTransposition := true,
a := rec(maxSize := 512),
applicable := (self, t) >> let(n:=t.params[1].n,
IsEvenInt(n) and n >= 4 and n <= self.getA("maxSize", 512) and t.hashAs().setTags([]) = ASP.rDFT(n,1/16)),
freedoms := (self, t) >> [],
child := (self, t, fr) >> let(alg := t.params[1],
[ SkewDFT(alg.n/2, alg.a, alg.rot).withTags(t.getTags()) ]),
apply := (t, C, Nonterms) -> let(n:=t.params[1].n, s := t.params[3].scale2d,
RC(s * C[1]) * L(n, n/2))
),
ASPF_rDFT_BaseN := rec(
requiredFirstTag := ANoTag,
forTransposition := true,
a := rec(maxSize := 512),
applicable := (self, t) >> let(n:=t.params[1].n,
IsEvenInt(n) and n >= 4 and n <= self.getA("maxSize", 512) and t.hashAs() = ASP.rDFT(n,1/16)),
freedoms := (self, t) >> [],
child := (self, t, fr) >> [ DFT(EvalScalar(t.params[1].n/2), EvalScalar(t.params[1].rot)) ],
apply := (t, C, Nonterms) -> let(
n := t.params[1].n,
a := t.params[1].a,
rot := t.params[1].rot,
s := Double(t.params[3].scale2d), # NOTE: if Double() is not used, vector code uses _mm_set1_epi32(..) intead of _ps(..)
j := Ind(n-2),
exp := rot*a*(fdiv(4,n)),
twid := Lambda(j, cond(neq(imod(j, 2),0), s*sinpi(exp*idiv(j+2,2)), s*cospi(exp*idiv(j+2,2)))),
#NoPull
(
RC(C[1]) *
# XXX NOTE: Buf below delays sucking in, terrible hack,
# due to VJamData performance problems, fix it right now
# NOTE: fPrecompute inside diagDirsum causes twiddles to be vpacked on the fly in the code
Buf(RCDiag(diagDirsum(fConst(TReal, 1, s), fConst(TReal, 1, 0.0), #FList(TReal, [s,0]),
fPrecompute(twid)))) *
L(n,n/2)))
),
ASPF_rDHT_BaseN := rec(
# requiredFirstTag := ANoTag,
forTransposition := true,
a := rec(maxSize := 512),
applicable := (self, t) >> let(n:=t.params[1].n,
IsEvenInt(n) and n >= 4 and n <= self.getA("maxSize", 512) and t.setTags([]).hashAs() = ASP.rDHT(n,1/16)),
freedoms := (self, t) >> [],
child := (self, t, fr) >> [ DFT(EvalScalar(t.params[1].n/2), EvalScalar(t.params[1].rot)) ],
apply := (t, C, Nonterms) -> let(
n := t.params[1].n,
a := t.params[1].a,
rot := t.params[1].rot,
s := t.params[3].scale2d,
j := Ind(n-2),
exp := rot*a*(fdiv(4,n)),
twid := Lambda(j, cond(neq(imod(j, 2),0), s*sinpi(exp*idiv(j+2,2)), s*cospi(exp*idiv(j+2,2)))),
NoPull(
Tensor(I(n/2), Diag(1,-1)) *
RC(C[1]) *
DirectSum(s*I(2), RCDiag(fPrecompute(twid))) *
L(n,n/2) * Diag(diagDirsum(fConst(TReal, n/2, 1), fConst(TReal, n/2, -1)))))
),
ASPF_DHT1_Base4 := rec(
# requiredFirstTag := ANoTag,
forTransposition := true,
applicable := t -> t.setTags([]).hashAs() = ASP.DHT(4),
apply := (t, C, Nonterms) -> let(rot := t.params[1].rot,
DirectSum(F(2), F(2)*When(rot=1, I(2), Diag(1,-1))) *
Tensor(Diag(t.params[3].scale1d, t.params[3].scale2d) * F(2), I(2)))
),
ASPF_UDHT1_Base4 := rec(
# requiredFirstTag := ANoTag,
forTransposition := true,
applicable := t -> t.setTags([]).hashAs() = ASP.UDHT(4),
apply := (t, C, Nonterms) -> let(rot := t.params[1].rot,
NoPull(
DirectSum(I(2), F(2)*When(rot=1, I(2), Diag(1,-1))) *
Tensor(Diag(t.params[3].scale1d, t.params[3].scale2d) * F(2), I(2))))
),
ASPF_rDHT_Base4 := rec(
requiredFirstTag := ANoTag,
forTransposition := true,
applicable := t -> t.hashAs() = ASP.rDHT(4,1/16),
apply := (t, C, Nonterms) -> let(
a := t.params[1].a,
rot := t.params[1].rot,
s := t.params[3].scale2d,
NoPull(Diag(1,-1,1,-1) *
Tensor(F(2), I(2)) *
DirectSum(s*I(2), RCDiag(fPrecompute(FList(TReal, [s*cospi(rot*a), s*sinpi(rot*a)])))) *
L(4,2)*Diag(1,1,-1,-1)))),
ASPF_rDFT_Base4_Vec2 := rec(
requiredFirstTag := [AVecReg, AVecRegCx],
forTransposition := true,
applicable := t -> t.hashAs() = ASP.rDFT(4,1/16).withTags(t.tags) and t.firstTag().v = 2,
freedoms := t -> [],
child := (t, fr) -> [TL(4,2,1,1).withTags(t.getTags())],
apply := (t, C, Nonterms) -> let(
a := t.params[1].a,
rot := t.params[1].rot,
s := t.params[3].scale2d,
#VDiag(FList(TReal, [1,1,1,-1]), 2) *
VTensor(F(2), 2) *
C[1] *
VRCDiag(fPrecompute(VData(FList(TReal, [s, s*cospi(rot*a), 0, s*sinpi(rot*a)]), 2)), 2))
),
ASPF_rDFT_Base_VecN_Drop := rec(
requiredFirstTag := [AVecReg, AVecRegCx],
forTransposition := true,
applicable := t -> IsEvenInt(t.params[1].n) and t.hashAs() = ASP.rDFT(t.params[1].n, 1/16).withTags(t.tags),
freedoms := t -> [],
child := (t, fr) -> [ t.withoutFirstTag() ],
apply := (t, C, Nonterms) -> C[1]
),
ASPF_rDFT_Base_VecN := rec(
requiredFirstTag := [AVecReg, AVecRegCx],
forTransposition := false,
applicable := t -> IsEvenInt(t.params[1].n) and t.hashAs() = ASP.rDFT(t.params[1].n, 1/16).withTags(t.tags),
freedoms := t -> [],
child := (t, fr) -> let(
n := t.params[1].n,
a := t.params[1].a,
rot := t.params[1].rot,
s := Double(t.params[3].scale2d), # NOTE: if Double() is not used, vector code uses _mm_set1_epi32(..) intead of _ps(..)
j := Ind(n/2-1),
exp := rot*a*(fdiv(4,n)),
twid := Lambda(j, s*omegapi(exp*(j+1))),
[ TConj(
TRC(DFT(n/2, rot) *
TDiag(fPrecompute(diagDirsum(fConst(TReal, 1, s), twid)))), # NOTE: fPrecompute inside diagDirsum causes twiddles to be vpacked on the fly in the code
fId(n), L(n, n/2)
).withTags(t.tags) ]
),
apply := (t, C, Nonterms) -> C[1]
),
ASPF_rDFT_Base_VecN_tr := rec(
requiredFirstTag := [AVecReg, AVecRegCx],
forTransposition := false,
transposed := true,
applicable := t -> IsEvenInt(t.params[1].n) and t.hashAs() = ASP.rDFT(t.params[1].n, 1/16).withTags(t.tags).transpose(),
freedoms := t -> [],
child := (t, fr) -> let(
n := t.params[1].n,
a := t.params[1].a,
rot := t.params[1].rot,
s := Double(t.params[3].scale2d), # NOTE: if Double() is not used, vector code uses _mm_set1_epi32(..) intead of _ps(..)
j := Ind(n/2-1),
exp := rot*a*(fdiv(4,n)),
twid := Lambda(j, s*omegapi(exp*(j+1))),
[ TConj(
TRC( TDiag(fPrecompute(diagDirsum(fConst(TReal, 1, s), twid))).conjTranspose() *
DFT(n/2, rot).conjTranspose()
), # NOTE: fPrecompute inside diagDirsum causes twiddles to be vpacked on the fly in the code
L(n, 2), fId(n)
).withTags(t.tags) ]
),
apply := (t, C, Nonterms) -> C[1]
),
ASPF_Cpx_rDFT_Base_VecN := rec(
requiredFirstTag := [AVecReg, AVecRegCx],
forTransposition := true,
applicable := t -> IsEvenInt(t.params[1].n) and
ObjId(t.params[1])=XN_skew and t.params[2]=Time_EX and
ObjId(t.params[3])=Freq_1,
freedoms := t -> [],
child := (t, fr) -> let(
n := t.params[1].n,
a := t.params[1].a,
rot := t.params[1].rot,
s := Double(t.params[3].scale1d), # NOTE: if Double() is not used, vector code uses _mm_set1_epi32(..) intead of _ps(..)
j := Ind(n/2-1),
exp := rot*a*(fdiv(4,n)),
twid := Lambda(j, s*omegapi(exp*(j+1))),
mat := Mat([[1, E(4)], [1, -E(4)]]),
[ GT(mat, GTVec, GTVec, [n/2]).withTags(t.tags) *
TConj(
TRC(DFT(n/2, rot) *
TDiag(fPrecompute(diagDirsum(fConst(TReal, 1, s), twid)))), # NOTE: fPrecompute inside diagDirsum causes twiddles to be vpacked on the fly in the code
L(n, 2), L(n, n/2)
).withTags(t.tags) ]
),
apply := (t, C, Nonterms) -> C[1]
),
# ASPF_rDFT_Base4_Vec2b := rec(
# requiredFirstTag := [AVecReg, AVecRegCx],
# forTransposition := true,
# applicable := t -> t.hashAs() = ASP.rDFT(4,1/16).withTags(t.tags) and t.firstTag().v = 2,
# apply := (t, C, Nonterms) -> let(
# a := t.params[1].a,
# rot := t.params[1].rot,
# s := t.params[3].scale2d,
# VTensor(F(2), 2) *
# DirectSum(VBase(I(2), 2), VBase(J(2), 2)*VDiag(FList(TReal, [1,-1]), 2)) *
# VTensor(Mat([[1,0],[0,1]]),2) *
# VRCDiag(fPrecompute(VData(FList(TReal, [s, s*sinpi(rot*a), 0, -s*cospi(rot*a)]), 2)), 2))
# ),
ASPF_RDFT3_Base4 := rec(
requiredFirstTag := ANoTag,
forTransposition := true,
applicable := t -> t.hashAs() = ASP.RDFT3(4),
apply := (t, C, Nonterms) -> let(
D := Dat1d(TReal, 2), d := Diag(1,-1), s := t.params[3].scale2d,
rot := t.params[1].rot, pm := s*sinpi(rot*1/2), # pm = +/- s
NoPull(
Data(D, fPrecompute(FList(TReal, [s*cospi(rot*1/4), s*sinpi(rot*1/4) ])),
L(4,2) *
DirectSum(F(2)*Diag(1,nth(D,0)), d*F(2)*Diag(1, nth(D,1))) *
Mat([[s, 0, 0, 0 ],
[0, 1, 0, -1 ],
[0, 0, pm, 0 ],
[0, 1, 0, 1 ]]))))
)
));
|
Set Automatic Coercions Import.
Require Import util c_util flow stability containers Morphisms.
Require EquivDec.
Set Implicit Arguments.
Open Local Scope CR_scope.
Record System: Type :=
{ Point: CSetoid
; Location: Set
; Location_eq_dec: EquivDec.EqDec Location eq
; State := Location * Point
; locations: ExhaustiveList Location
; NoDup_locations: NoDup locations
; initial: Location * Point -> Prop
(*
; invariant': morpher (@eq Location ==> @cs_eq Point ==> iff)%signature
*)
; invariant: Location * Point -> Prop
; invariant_initial: initial ⊆ invariant
; invariant_mor: Proper ((@eq _) ==> (@cs_eq _) ==> iff) (curry invariant)
(* hm, can't we just use a unary morphism with product setoid equality on State? *)
; invariant_stable: forall s, Stable (invariant s)
; flow: Location -> Flow Point
; guard: Location * Point -> Location -> Prop
; reset: Location -> Location -> Point -> Point
(* this separation of guard and reset seems to cause a problem:
the paper allows having different transitions from a given (l, x) to
some (l', x'), because you can have different transitions. we only allow one! *)
}.
Existing Instance invariant_mor.
Hint Resolve Location_eq_dec locations: typeclass_instances.
Implicit Arguments Build_System [Point Location invariant initial].
Implicit Arguments initial [s].
Implicit Arguments invariant [[s]].
Section transitions_and_reachability.
Variable system: System.
Let State: Type := State system.
Definition location: State -> Location system := fst.
Definition point: State -> Point system := snd.
Definition can_flow l: relation (Point system) := fun p p' =>
exists d: Duration,
(forall t, 0 <= t -> t <= `d -> invariant (l, flow system l p t))%CR
/\ flow system l p (`d) [=] p'.
Definition cont_trans: relation State := fun s s' =>
location s = location s' /\
can_flow (location s) (point s) (point s').
Definition disc_trans: relation State := fun s s' =>
guard system s (location s') /\
reset system (location s) (location s') (point s) = point s' /\
invariant s /\ invariant s'.
Definition trans: relation State
:= fun s s' => disc_trans s s' \/ cont_trans s s'.
Notation "s ->_C s'" := (cont_trans s s') (at level 70).
Notation "s ->_D s'" := (disc_trans s s') (at level 70).
Notation "s ->_T s'" := (trans s s') (at level 90).
Lemma cont_trans_refl s: invariant s -> s ->_C s.
Proof with auto.
revert s.
intros [l p] H.
split...
exists NonNegCR_zero.
split. intros.
rewrite (curry_eq (@invariant system)),
(snd (CRle_def t 0)), flow_zero...
apply flow_zero.
Qed.
Hint Resolve cont_trans_refl.
Hint Resolve invariant_stable.
Lemma cont_trans_trans: Transitive cont_trans.
Proof with auto.
intros [l p] [l' p'] [l'' p''] [ll' [t [i f]]] [l'l'' [t' [i' f']]].
simpl location in *. simpl point in *. subst.
split...
exists (NonNegCR_plus t t').
destruct t as [t nt]. destruct t' as [t' nt'].
split.
simpl proj1_sig in *.
intros. simpl.
apply (DN_apply (CRle_dec t t0))...
intros [A | B]...
rename t0 into x.
rewrite
(curry_eq (@invariant system)),
(t11 t x),
(flow_additive (flow system l'') p t (x - t)),
f.
apply i'.
rewrite <- (Ropp_def CR_ring_theory t).
apply t2...
rewrite (t11 t t').
rewrite (Radd_assoc CR_ring_theory).
apply t2...
simpl. rewrite flow_additive, f...
Qed.
Hint Resolve cont_trans_trans.
Lemma cont_trans_preserves_location s s': s ->_C s' -> fst s = fst s'.
Proof. intros. destruct H; auto... Qed.
(* hm, the paper distinguishes between R^n and the
subset that is the continuous state space for the HS, and i
seem to recall that flowing could actually end up outside the
latter. i don't see any of this in our definition *)
Definition reachable (s: State): Prop :=
exists i: State, initial i /\ trans_refl_closure.R trans i s.
Definition unreachable (s: State): Prop := ~ reachable s.
Hint Unfold reachable.
Definition trans_kind (b: bool) := if b then disc_trans else cont_trans.
Hint Unfold trans_kind.
Definition reachable_alternating (s: State): Prop :=
exists i: State, initial i /\ alternate trans_kind i s.
Lemma reachable_invariant: reachable ⊆ invariant.
Proof with auto with real.
intros dst [src [src_init src_reach_dst]].
induction src_reach_dst as [ | s [l' p'] [l'' p''] R IH T].
apply invariant_initial...
destruct T as [[_ [_ [_ H]]] | [A [B [C D]]]]...
simpl in A. subst.
unfold In, predicate_container.
rewrite (curry_eq invariant).
rewrite <- D.
apply C...
apply (CRnonNeg_le_zero (`B))...
Qed.
Lemma alternating_reachable: forall s, reachable_alternating s <-> reachable s.
Proof with eauto.
split.
intros [s' [i [b t]]].
exists s'.
split... clear i.
induction t...
apply trans_refl_closure.step with y...
destruct b; [left | right]...
intros [x [H H0]].
exists x. split...
induction H0. exists true...
destruct IHR...
destruct H1; [exists true | exists false]; destruct x...
apply end_with_next with b...
apply end_with_next with b...
apply cont_trans_refl.
apply reachable_invariant.
eauto 20.
inversion_clear H2...
Qed.
End transitions_and_reachability.
Implicit Arguments reachable [[system]].
Implicit Arguments unreachable [[system]].
Hint Unfold cont_trans.
Hint Unfold can_flow.
|
module Formalization.SimplyTypedLambdaCalculus where
import Lvl
open import Numeral.Natural
open import Type as _ using (TYPE)
data Type (B : TYPE) : TYPE₁ where
Base : B → Type(B)
Function : Type(B) → Type(B) → Type(B)
data Term (B : TYPE) : TYPE₁ where
Apply : Term(B) → Term(B) → Term(B)
Abstract : Type(B) → Term(B) → Term(B)
Var : ℕ → Term(B)
Const : B → Term(B)
module _ {B} where
data _⊢_::_ (Γ : Term(B) → Type(B) → TYPE) : Term(B) → Type(B) → TYPE₁ where
intro : ∀{a}{T} → Γ(a)(T) → (Γ ⊢ a :: T)
-- const : ∀{a}{T} → Γ(a)(T) → (Γ ⊢ a :: T)
abstr : ∀{body}{A B} → (Γ ⊢ body :: B) → (Γ ⊢ Abstract A body :: Function A B)
apply : ∀{f x}{A B} → (Γ ⊢ f :: Function A B) → (Γ ⊢ x :: A) → (Γ ⊢ Apply f x :: B)
{-
A,B ::= Base | A ⟶ B
t ::= k | t t | λ t | Const b
b = true | false
Γ ⊢ Const b : Base
v ::= Const b | λ t
(⊢ t : Base) → ∃ v t ⟶* v
(⊢ t : A) → ∃ v t ⟶* v
Red(A)(t) definition "to be reducible"
Red(Base)(t) = ∃v(t ⟶* v)
Red(A→B)(t) = ∀u. Red(A)(u) → Red(B)(t u)
(t ⟶ t') → Red(A)(t') → Red(A)(t)
• A = Base:
∃v(t* ⟶* v) → ∃v(t ⟶* v)
t → t' ⟶* v
• A = B→C:
∀u. Red(B)(u) → Red(C)(t' u)
to show ∀u. Red(B)(u) → Red(C)(t u)
and just use induction on the first thing mentioned
Red(C)(t' u)
to get
Red(C)(t u)
data ⟶β : Set where
red1 : t ⟶β u
red2 : (λ t) u ⟶β substitute t u
red3 : (t ⟶β t') → ((t u) ⟶β (t' u))
red4 : (u ⟶β u') → (t u ⟶β t u')
⟶β-confluent : Confluent (_⟶β_)
module CallByName where
data ⟶β : Set where
red2 : (λ t) u ⟶β substitute t u
red3 : (t ⟶β t') → ((t u) ⟶β (t' u))
substitute-preservation : (Γ ⊢ ((λ u) u' : A)) → (Γ ⊢ (substitute u u' : A))
β⟶-preservation : (Γ ⊢ (t : A)) → (t ⟶β t') → (Γ ⊢ (t' : A))
Red(Γ)(σ)
Γ = () | Γ.A -- context
σ : ℕ → Term
∀k. Γ ⊢ k : A
Represent variable as a context
σ is a formal definition of "reducible" substitution
Example:
Γ.A ⊢ 0 : A
Γ.A.B ⊢ 1 : A
σ-substitution?:
Red(Γ)(σ) ∧ (Γ ⊢ t:A) → Red(A)(t σ)
Proof by induction on t
• t = Ref k
Red(A)(k σ) = Red(A)(σ(k))
• t = t₀ t₁
t_σ = (t₀ σ) (t₁ σ)
Γ ⊢ t₀ : B → A
Γ ⊢ t₁ : B
by induction hypothesis:
Red(B→A)(t₀ σ)
Red(B)(t₁ σ)
so Red(A)(t₀ σ)(t₁ σ)
• t = λ u
A = B → C
Γ.B ⊢ u : C
To show: Red(B→C)((λ u) σ)
it means: ∀ u',Red(B)(u'), show Red(C)((λv) σ u')
Define:
(_,_) : (σ : ℕ → Term) (u' : Term) : ℕ → Term
(σ,u')(0) = u'
(σ,u')(𝐒 n) = σ(n)
Claim: (λu) σ u' ⟶ u(σ,u')
Γ.B ⊢ (k : T) → Red(T)((σ,u') k)
k=0: Γ.B ⊢ 0
k=n+1 (Γ ⊢ u : T) → (Γ ,B ⊢ n+1 : T)
Summary:
This is all to prove ∃v(t ⟶* v) if ⊢ t : Base, which is Red(A)(t)?
Direct proof for application.
Generalize the statement to: (⊢ t : A) → Red(A)(t)
and then to: (Γ ⊢ t:A) → Red(Γ)(σ) → Red(A)(t σ)
Something else: Krivine abstract machine
How to evaluate λ-terms without substitution (defined before LISP eval with substitution)
Term: k | tt | λ t
Closure: t, ρ -- note: first mention of closure in this field? (denoted u v,‥)
Environment: list of closures
Stack: list of closures in stack order (denoted S)
3 components: Term t,ENv ρ, Stack S → Term Env STack
t₁ | ρ₁ | S₁ | t₂ | ρ₂ | S₂
λ t | ρ | u : S | t | (ρ,u) | S
t₀t1 | ρ | S | t₀ | ρ | (t₁,ρ) : S
0 | ρ,(t,ρ') | S | t | ρ' | S
k+1 | ρ,(t,p') | S | k | ρ | S
Those are the transformations/reductions of the machine.
Simplified description of how functional programs are evaluated.
-}
|
Formal statement is: lemma islimpt_approachable_le: "x islimpt S \<longleftrightarrow> (\<forall>e>0. \<exists>x'\<in> S. x' \<noteq> x \<and> dist x' x \<le> e)" for x :: "'a::metric_space" Informal statement is: A point $x$ is a limit point of a set $S$ if and only if for every $\epsilon > 0$, there exists a point $x' \in S$ such that $x' \neq x$ and $d(x', x) \leq \epsilon$.
|
[STATEMENT]
lemma mset_tree_empty: "mset_tree t = {#} \<longleftrightarrow> t = Leaf"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (mset_tree t = {#}) = (t = \<langle>\<rangle>)
[PROOF STEP]
by(cases t) auto
|
module IdrisWeb.DB.SQLite.SQLiteNew
import Effects
import IdrisWeb.DB.SQLite.SQLiteCodes
%link C "sqlite3api.o"
%include C "sqlite3api.h"
%lib C "sqlite3"
%access public
data ConnectionPtr = ConnPtr Ptr
data StmtPtr = PSPtr Ptr
data DBVal = DBInt Int
| DBText String
| DBFloat Float
| DBNull
-- Type synonym for a table
ResultSet : Type
ResultSet = List (List DBVal)
DBName : Type
DBName = String
QueryString : Type
QueryString = String
Column : Type
Column = Int
ArgPos : Type
ArgPos = Int
data BindError = BE ArgPos SQLiteCode
{- Connection-stage resources -}
data SQLiteConnected : Type where
SQLConnection : ConnectionPtr -> SQLiteConnected
{- PreparedStatement resources -}
data BindStep = Binding | Bound
data SQLitePSSuccess : BindStep -> Type where
-- We record potential bind failures within the resource,
-- and branch on the finishBind step. This prevents us from
-- having to branch on every bind, which would be impractical.
SQLitePS : ConnectionPtr -> StmtPtr -> SQLitePSSuccess a
SQLiteBindFail : ConnectionPtr -> StmtPtr -> BindError -> SQLitePSSuccess a
data SQLitePSFail : Type where
PSFail : ConnectionPtr -> SQLitePSFail
data SQLiteFinishBindFail : Type where
SQLiteFBFail : ConnectionPtr -> StmtPtr -> SQLiteFinishBindFail
{- Executing Resources -}
-- Tag used to indicate whether another row may be fetched
data ExecutionResult = ValidRow
| InvalidRow
data SQLiteExecuting : ExecutionResult -> Type where
SQLiteE : ConnectionPtr -> StmtPtr -> SQLiteExecuting a
data QueryError = ConnectionError SQLiteCode
| BindingError BindError
| StatementError SQLiteCode
| ExecError String
| InternalError
instance Show QueryError where
show (ConnectionError code) = "Error connecting to database, code: " ++ (show code)
show (BindingError (BE ap code)) = "Error binding variable, pos: " ++ (show ap) ++ ", code: " ++ (show code)
show (StatementError code) = "Error creating prepared statement, code: " ++ (show code)
show (ExecError err) = err
show (InternalError) = "Internal Error."
data Sqlite : Effect where
-- Opens a connection to the database
OpenDB : DBName -> Sqlite () (Either () SQLiteConnected) (Either QueryError ())
-- Closes the database handle
CloseDB : Sqlite (SQLiteConnected) () ()
-- Prepares a statement, given a basic query string
PrepareStatement : QueryString -> Sqlite (SQLiteConnected) (Either (SQLitePSFail) (SQLitePSSuccess Binding))
(Either QueryError ())
-- Binds arguments to the given argument position
BindInt : ArgPos -> Int -> Sqlite (SQLitePSSuccess Binding) (SQLitePSSuccess Binding) ()
BindFloat : ArgPos -> Float -> Sqlite (SQLitePSSuccess Binding) (SQLitePSSuccess Binding) ()
BindText : ArgPos -> String -> Int -> Sqlite (SQLitePSSuccess Binding) (SQLitePSSuccess Binding) ()
BindNull : ArgPos -> Sqlite (SQLitePSSuccess Binding) (SQLitePSSuccess Binding) ()
-- Checks to see whether all the binds were successful, if not then fails with the bind error
FinishBind : Sqlite (SQLitePSSuccess Binding) (Either SQLiteFinishBindFail (SQLitePSSuccess Bound))
(Maybe QueryError)
-- Executes the statement, and fetches the first row
ExecuteStatement : Sqlite (SQLitePSSuccess Bound) (Either (SQLiteExecuting InvalidRow)
(SQLiteExecuting ValidRow)) StepResult
RowStep : Sqlite (SQLiteExecuting ValidRow) (Either (SQLiteExecuting InvalidRow)
(SQLiteExecuting ValidRow)) StepResult
-- We need two separate effects, but this is entirely non-user-facing due to
-- if_valid in the wrapper function
ResetFromEnd : Sqlite (SQLiteExecuting InvalidRow)
(Either (SQLiteExecuting InvalidRow)
(SQLiteExecuting ValidRow)) StepResult
Reset : Sqlite (SQLiteExecuting ValidRow) (Either (SQLiteExecuting InvalidRow)
(SQLiteExecuting ValidRow)) StepResult
-- Column access functions
GetColumnName : Column -> Sqlite (SQLiteExecuting ValidRow) (SQLiteExecuting ValidRow) String
GetColumnDataSize : Column -> Sqlite (SQLiteExecuting ValidRow) (SQLiteExecuting ValidRow) Int
GetColumnText : Column -> Sqlite (SQLiteExecuting ValidRow) (SQLiteExecuting ValidRow) String
GetColumnInt : Column -> Sqlite (SQLiteExecuting ValidRow) (SQLiteExecuting ValidRow) Int
-- Finalisation Functions
FinaliseValid : Sqlite (SQLiteExecuting ValidRow) (SQLiteConnected) ()
FinaliseInvalid : Sqlite (SQLiteExecuting InvalidRow) (SQLiteConnected) ()
-- Cleanup functions to handle error states
CleanupPSFail : Sqlite (SQLitePSFail) () ()
CleanupBindFail : Sqlite (SQLiteFinishBindFail) () ()
private
foreignGetError : ConnectionPtr -> IO Int
foreignGetError (ConnPtr ptr) = mkForeign (FFun "idr_errcode" [FPtr] FInt) ptr
private
foreignNextRow : ConnectionPtr -> IO StepResult
foreignNextRow (ConnPtr ptr) =
map stepResult (mkForeign (FFun "sqlite3_step_idr" [FPtr] FInt) ptr)
private
foreignFinalise : ConnectionPtr -> IO ()
foreignFinalise (ConnPtr c) = do mkForeign (FFun "sqlite3_finalize_idr" [FPtr] FInt) c
return ()
private
foreignClose : ConnectionPtr -> IO ()
foreignClose (ConnPtr c) = do mkForeign (FFun "sqlite3_close_idr" [FPtr] FInt) c
return ()
-- That's the painful bit done, since exception branching will allow us to not have to do
-- the ugliness of pass-through handlers
instance Handler Sqlite IO where
handle () (OpenDB file) k = do
ff <- mkForeign (FFun "sqlite3_open_idr" [FString] FPtr) file
is_null <- nullPtr ff
if (not is_null) then k (Right (SQLConnection (ConnPtr ff))) (Right ())
else k (Left ()) (Left (ConnectionError sqlite_ERROR))
handle (SQLConnection (ConnPtr conn) ) CloseDB k = do
mkForeign (FFun "sqlite3_close_idr" [FPtr] FInt) conn
k () ()
handle (SQLConnection (ConnPtr conn)) (PrepareStatement str) k = do
res <- mkForeign (FFun "sqlite3_prepare_idr" [FPtr, FString] FPtr) conn str
is_null <- nullPtr res
if (not is_null) then k (Right (SQLitePS (ConnPtr conn) (PSPtr res))) (Right ())
else do err <- foreignGetError (ConnPtr conn)
k (Left (PSFail (ConnPtr conn))) (Left (StatementError err))
handle (SQLitePS (ConnPtr conn) (PSPtr res)) (BindInt pos i) k = do
res <- mkForeign (FFun "sqlite3_bind_int_idr" [FPtr, FInt, FInt] FPtr) conn pos i
is_null <- nullPtr res
if (not is_null) then k (SQLitePS (ConnPtr conn) (PSPtr res)) ()
else do err <- foreignGetError (ConnPtr conn)
-- putStrLn $ "BindInt error: " ++ (show err)
k (SQLiteBindFail (ConnPtr conn) (PSPtr res) (BE pos err)) ()
handle (SQLitePS (ConnPtr conn) (PSPtr res)) (BindFloat pos f) k = do
res <- mkForeign (FFun "sqlite3_bind_float_idr" [FPtr, FInt, FFloat] FPtr) conn pos f
is_null <- nullPtr res
if (not is_null) then k (SQLitePS (ConnPtr conn) (PSPtr res)) ()
else do err <- foreignGetError (ConnPtr conn)
k (SQLiteBindFail (ConnPtr conn) (PSPtr res) (BE pos err)) ()
handle (SQLitePS (ConnPtr conn) (PSPtr res)) (BindText pos str str_len) k = do
res <- mkForeign (FFun "sqlite3_bind_text_idr" [FPtr, FString, FInt, FInt] FPtr) conn str pos str_len
is_null <- nullPtr res
if (not is_null) then k (SQLitePS (ConnPtr conn) (PSPtr res)) ()
else do err <- foreignGetError (ConnPtr conn)
-- putStrLn $ "BindStr error: " ++ (show err)
k (SQLiteBindFail (ConnPtr conn) (PSPtr res) (BE pos err)) ()
handle (SQLitePS (ConnPtr conn) (PSPtr res)) (BindNull pos) k = do
res <- mkForeign (FFun "sqlite3_bind_null_idr" [FPtr, FInt] FPtr) conn pos
is_null <- nullPtr res
if (not is_null) then k (SQLitePS (ConnPtr conn) (PSPtr res)) ()
else do err <- foreignGetError (ConnPtr conn)
k (SQLiteBindFail (ConnPtr conn) (PSPtr res) (BE pos err)) ()
-- Ok, I lied, we have to do *some* pass-throughs. But they're not terrible.
handle (SQLiteBindFail conn ps be) (BindInt _ _) k = k (SQLiteBindFail conn ps be) ()
handle (SQLiteBindFail conn ps be) (BindText _ _ _) k = k (SQLiteBindFail conn ps be) ()
handle (SQLiteBindFail conn ps be) (BindFloat _ _) k = k (SQLiteBindFail conn ps be) ()
handle (SQLiteBindFail conn ps be) (BindNull _) k = k (SQLiteBindFail conn ps be) ()
-- Finishing binding, reporting any bind errors if they occurred
handle (SQLitePS c p) (FinishBind) k =
k (Right (SQLitePS c p)) Nothing
handle (SQLiteBindFail c ps be) (FinishBind) k =
k (Left (SQLiteFBFail c ps)) (Just (BindingError be))
handle (SQLitePS (ConnPtr c) (PSPtr p)) (ExecuteStatement) k = do
step <- foreignNextRow (ConnPtr c)
case step of
StepComplete => k (Right (SQLiteE (ConnPtr c) (PSPtr p))) step
StepFail => k (Left (SQLiteE (ConnPtr c) (PSPtr p))) step
NoMoreRows => k (Left (SQLiteE (ConnPtr c) (PSPtr p))) step
handle (SQLiteE (ConnPtr c) (PSPtr p)) (RowStep) k = do
step <- foreignNextRow (ConnPtr c)
case step of
StepComplete => k (Right (SQLiteE (ConnPtr c) (PSPtr p))) step
StepFail => k (Left (SQLiteE (ConnPtr c) (PSPtr p))) step
NoMoreRows => k (Left (SQLiteE (ConnPtr c) (PSPtr p))) step
-- Getting values from the current row
handle (SQLiteE (ConnPtr c) (PSPtr p)) (GetColumnName i) k = do
res <- mkForeign (FFun "sqlite3_column_name_idr" [FPtr, FInt] FString) c i
k (SQLiteE (ConnPtr c) (PSPtr p)) res
handle (SQLiteE (ConnPtr c) (PSPtr p)) (GetColumnDataSize i) k = do
res <- mkForeign (FFun "sqlite3_column_bytes_idr" [FPtr, FInt] FInt) c i
k (SQLiteE (ConnPtr c) (PSPtr p)) res
handle (SQLiteE (ConnPtr c) (PSPtr p)) (GetColumnInt i) k = do
res <- mkForeign (FFun "sqlite3_column_int_idr" [FPtr, FInt] FInt) c i
k (SQLiteE (ConnPtr c) (PSPtr p)) res
handle (SQLiteE (ConnPtr c) (PSPtr p)) (GetColumnText i) k = do
res <- mkForeign (FFun "sqlite3_column_text_idr" [FPtr, FInt] FString) c i
k (SQLiteE (ConnPtr c) (PSPtr p)) res
-- Resetting our position
handle (SQLiteE (ConnPtr c) (PSPtr p)) (Reset) k = do
mkForeign (FFun "sqlite3_reset_idr" [FPtr] FInt) c
step <- foreignNextRow (ConnPtr c)
case step of
StepComplete => k (Right (SQLiteE (ConnPtr c) (PSPtr p))) step
StepFail => k (Left (SQLiteE (ConnPtr c) (PSPtr p))) step
NoMoreRows => k (Left (SQLiteE (ConnPtr c) (PSPtr p))) step
handle (SQLiteE (ConnPtr c) (PSPtr p)) (ResetFromEnd) k = do
mkForeign (FFun "sqlite3_reset_idr" [FPtr] FInt) c
step <- foreignNextRow (ConnPtr c)
case step of
StepComplete => k (Right (SQLiteE (ConnPtr c) (PSPtr p))) step
StepFail => k (Left (SQLiteE (ConnPtr c) (PSPtr p))) step
NoMoreRows => k (Left (SQLiteE (ConnPtr c) (PSPtr p))) step
-- Finalising the SQL Statement
handle (SQLiteE c p) (FinaliseValid) k = do
foreignFinalise c
k (SQLConnection c) ()
handle (SQLiteE c p) (FinaliseInvalid) k = do
foreignFinalise c
k (SQLConnection c) ()
handle (PSFail c) CleanupPSFail k = do
foreignClose c
k () ()
handle (SQLiteFBFail c p) CleanupBindFail k = do
foreignFinalise c
foreignClose c
k () ()
SQLITE : Type -> EFFECT
SQLITE t = MkEff t Sqlite
{- User-facing functions -}
openDB : DBName -> EffM IO [SQLITE ()] [SQLITE (Either () SQLiteConnected)]
(Either QueryError ())
openDB name = (OpenDB name)
closeDB : EffM IO [SQLITE (SQLiteConnected)] [SQLITE ()] ()
closeDB = CloseDB
prepareStatement : QueryString -> EffM IO [SQLITE SQLiteConnected]
[SQLITE (Either SQLitePSFail
(SQLitePSSuccess Binding))]
(Either QueryError ())
prepareStatement stmt = (PrepareStatement stmt)
bindInt : ArgPos -> Int -> Eff IO [SQLITE (SQLitePSSuccess Binding)] ()
bindInt pos i = (BindInt pos i)
bindFloat : ArgPos -> Float -> Eff IO [SQLITE (SQLitePSSuccess Binding)] ()
bindFloat pos f = (BindFloat pos f)
bindText : ArgPos -> String -> Eff IO [SQLITE (SQLitePSSuccess Binding)] ()
bindText pos str = (BindText pos str str_len)
where natToInt : Nat -> Int
natToInt Z = 0
natToInt (S k) = 1 + (natToInt k)
str_len : Int
str_len = natToInt (length str)
bindNull : ArgPos -> Eff IO [SQLITE (SQLitePSSuccess Binding)] ()
bindNull pos = (BindNull pos)
finishBind : EffM IO [SQLITE (SQLitePSSuccess Binding)]
[SQLITE (Either SQLiteFinishBindFail (SQLitePSSuccess Bound))]
(Maybe QueryError)
finishBind = FinishBind
nextRow : EffM IO [SQLITE (SQLiteExecuting ValidRow)]
[SQLITE (Either (SQLiteExecuting InvalidRow)
(SQLiteExecuting ValidRow))] StepResult
nextRow = RowStep
reset : EffM IO [SQLITE (Either (SQLiteExecuting InvalidRow) (SQLiteExecuting ValidRow))]
[SQLITE (Either (SQLiteExecuting InvalidRow)
(SQLiteExecuting ValidRow))] StepResult
reset = if_left then ResetFromEnd else Reset
getColumnName : Column -> Eff IO [SQLITE (SQLiteExecuting ValidRow)] String
getColumnName col = (GetColumnName col)
getColumnText: Column -> Eff IO [SQLITE (SQLiteExecuting ValidRow)] String
getColumnText col = (GetColumnText col)
getColumnInt : Column -> Eff IO [SQLITE (SQLiteExecuting ValidRow)] Int
getColumnInt col = (GetColumnInt col)
getColumnDataSize : Column -> Eff IO [SQLITE (SQLiteExecuting ValidRow)] Int
getColumnDataSize col = (GetColumnDataSize col)
finaliseValid : EffM IO [SQLITE (SQLiteExecuting ValidRow)] [SQLITE (SQLiteConnected)] ()
finaliseValid = FinaliseValid
finaliseInvalid : EffM IO [SQLITE (SQLiteExecuting InvalidRow)] [SQLITE (SQLiteConnected)] ()
finaliseInvalid = FinaliseInvalid
--isOne : (a : Type) -> Either a b
finalise : EffM IO [SQLITE (Either (SQLiteExecuting InvalidRow) (SQLiteExecuting ValidRow))]
[SQLITE (SQLiteConnected)] ()
finalise = if_valid then finaliseValid else finaliseInvalid
cleanupPSFail : EffM IO [SQLITE (SQLitePSFail)] [SQLITE ()] ()
cleanupPSFail = CleanupPSFail
cleanupBindFail : EffM IO [SQLITE (SQLiteFinishBindFail)] [SQLITE ()] ()
cleanupBindFail = CleanupBindFail
-- Just makes it a tad nicer to write
executeStatement : EffM IO [SQLITE (SQLitePSSuccess Bound)]
[SQLITE (Either (SQLiteExecuting InvalidRow)
(SQLiteExecuting ValidRow))] StepResult
executeStatement = ExecuteStatement
getQueryError : Either QueryError b -> QueryError
getQueryError (Left qe) = qe
getQueryError _ = InternalError
multiBind' : List (Int, DBVal) -> Eff IO [SQLITE (SQLitePSSuccess Binding)] ()
multiBind' [] = Effects.pure ()
multiBind' ((pos, (DBInt i)) :: xs) = do bindInt pos i
multiBind' xs
multiBind' ((pos, (DBFloat f)) :: xs) = do bindFloat pos f
multiBind' xs
multiBind' ((pos, (DBText t)) :: xs) = do bindText pos t
multiBind' xs
-- Binds multiple values within a query
multiBind : List (Int, DBVal) ->
EffM IO [SQLITE (SQLitePSSuccess Binding)]
[SQLITE (Either (SQLiteFinishBindFail) (SQLitePSSuccess Bound))]
(Maybe QueryError)
multiBind vals = do
multiBind' vals
finishBind
getRowCount' : StepResult -> EffM IO [SQLITE (Either (SQLiteExecuting InvalidRow) (SQLiteExecuting ValidRow))]
[SQLITE ()]
(Either QueryError Int)
getRowCount' id_res = do
if_valid then do
last_insert_id <- getColumnInt 0
finaliseValid
closeDB
return $ Right last_insert_id
else do finaliseInvalid
closeDB
case id_res of
NoMoreRows => return $ Left (ExecError "Unable to get row count")
StepFail => return $ Left (ExecError "Error whilst getting row count")
getBindError : Maybe QueryError -> QueryError
getBindError (Just (BindingError be)) = (BindingError be)
getBindError _ = InternalError
getRowCount : EffM IO [SQLITE (SQLiteConnected)] [SQLITE ()] (Either QueryError Int)
getRowCount = do
let insert_id_sql = "SELECT last_insert_rowid()"
sql_prep_res <- prepareStatement insert_id_sql
if_valid then do
bind_res_2 <- finishBind
if_valid then do
exec_res <- executeStatement
getRowCount' exec_res
else do
let be = getBindError bind_res_2
cleanupBindFail
return $ Left be
else do
cleanupPSFail
return $ Left (getQueryError sql_prep_res)
executeInsert : String ->
String ->
List (Int, DBVal) ->
Eff IO [SQLITE ()] (Either QueryError Int)
executeInsert db_name query bind_vals = do
db_res <- openDB db_name
if_valid then do
ps_res <- prepareStatement query
if_valid then do
bind_res <- multiBind bind_vals
if_valid then do
er_1 <- executeStatement
finalise
case er_1 of
StepFail => do closeDB
return $ Left (ExecError "Error inserting")
Unstarted => do closeDB
return $ Left (ExecError "Internal error: 'Unstarted' after execution")
_ => getRowCount
else do
let be = getBindError bind_res
cleanupBindFail
return $ Left be
else do
cleanupPSFail
return $ Left (getQueryError ps_res)
else
return $ Left (getQueryError db_res)
-- Helper functions for selection from a DB
collectResults : (Eff IO [SQLITE (SQLiteExecuting ValidRow)] (List DBVal)) ->
EffM IO [SQLITE (Either (SQLiteExecuting InvalidRow)
(SQLiteExecuting ValidRow))]
[SQLITE (SQLiteExecuting InvalidRow)] ResultSet
collectResults fn = do
if_valid then do
results <- fn
step_res <- nextRow
xs <- collectResults fn
return $ results :: xs
else return []
-- Convenience function to abstract around some of the boilerplate code.
-- Takes in the DB name, query, a list of (position, variable value) tuples,
-- a function to process the returned data,
executeSelect : String ->
String ->
List (Int, DBVal) ->
(Eff IO [SQLITE (SQLiteExecuting ValidRow)] (List DBVal)) ->
Eff IO [SQLITE ()] (Either QueryError ResultSet)
executeSelect db_name q bind_vals fn = do
conn_res <- openDB db_name
if_valid then do
ps_res <- prepareStatement q
if_valid then do
bind_res <- multiBind bind_vals
if_valid then do
executeStatement
res <- collectResults fn
finaliseInvalid
closeDB
return $ Right res
else do
let be = getBindError bind_res
cleanupBindFail
return $ Left be
else do
cleanupPSFail
return $ Left (getQueryError ps_res)
else
return $ Left (getQueryError conn_res)
-- Helper function for when there's no binding needed to the PS
-- noBinds : EffM IO [SQLITE (
|
theory Chapter16_1_Type
imports DeBruijnEnvironment
begin
datatype type =
Tyvar var
| Arrow type type
| Unit
| Prod type type
| Void
| Sum type type
| Rec type
primrec type_insert :: "var => type => type"
where "type_insert n (Tyvar v) = Tyvar (incr n v)"
| "type_insert n (Arrow t1 t2) = Arrow (type_insert n t1) (type_insert n t2)"
| "type_insert n Unit = Unit"
| "type_insert n (Prod t1 t2) = Prod (type_insert n t1) (type_insert n t2)"
| "type_insert n Void = Void"
| "type_insert n (Sum t1 t2) = Sum (type_insert n t1) (type_insert n t2)"
| "type_insert n (Rec t) = Rec (type_insert (next n) t)"
primrec type_subst :: "type => var => type => type"
where "type_subst e' n (Tyvar v) = (if v = n then e' else Tyvar (subr n v))"
| "type_subst e' n (Arrow t1 t2) = Arrow (type_subst e' n t1) (type_subst e' n t2)"
| "type_subst e' n Unit = Unit"
| "type_subst e' n (Prod t1 t2) = Prod (type_subst e' n t1) (type_subst e' n t2)"
| "type_subst e' n Void = Void"
| "type_subst e' n (Sum t1 t2) = Sum (type_subst e' n t1) (type_subst e' n t2)"
| "type_subst e' n (Rec t) = Rec (type_subst (type_insert first e') (next n) t)"
end
|
-- Pertenencia a uniones e intersecciones de familias
-- ==================================================
import data.set
open set
variables {I U : Type}
variables {A : I → set U}
variable {x : U}
-- ----------------------------------------------------
-- Ej. 1. Demostrar que
-- (x ∈ ⋃ i, A i) ↔ (∃ i, x ∈ A i)
-- ----------------------------------------------------
-- 1ª demostración
example :
(x ∈ ⋃ i, A i) ↔ (∃ i, x ∈ A i) :=
-- by library_search
mem_Union
-- 2ª demostración
example :
(x ∈ ⋃ i, A i) ↔ (∃ i, x ∈ A i) :=
by simp
-- ----------------------------------------------------
-- Ej. 2. Demostrar que
-- (x ∈ ⋂ i, A i) ↔ (∀ i, x ∈ A i)
-- ----------------------------------------------------
-- 1ª demostración
example :
(x ∈ ⋂ i, A i) ↔ (∀ i, x ∈ A i) :=
-- by library_search
mem_Inter
-- 2ª demostración
example :
(x ∈ ⋂ i, A i) ↔ (∀ i, x ∈ A i) :=
by simp
|
If $a$ and $b$ are real numbers, then $a + b$ is a real number.
|
module FileIOEffect where
open import Effect
import IO.Primitive as IO
open import Data.String using (String)
open import Data.Bool using (Bool ; if_then_else_ ; false ; true)
open import Data.Unit using (⊤ ; tt)
open import Category.Monad using (RawMonad)
open import Level using (zero)
open import Data.List using (List ; _∷_ ; [])
open import Data.List.All using (All ; lookup ; _∷_ ; [])
open import Data.List.Any using (here ; there)
open import Relation.Binary.PropositionalEquality using (_≡_ ; refl)
open import Membership-equality using (_∈_)
open import Data.Product using (Σ ; _,_ ; _×_)
data FileIOState : Set where
opened closed : FileIOState
data FileHandle : Set where
FH : String → FileHandle
data FileIOEff : Effect zero where
`openFile : String → FileIOEff Bool ⊤ λ ok → if ok then FileHandle else ⊤
`closeFile : FileIOEff ⊤ FileHandle λ h → ⊤
-- Should we really use this now?
FileIO : EFFECT zero
FileIO = mkEff ⊤ FileIOEff
IOMonad : RawMonad IO.IO
IOMonad = record { return = IO.return ; _>>=_ = IO._>>=_ {zero} {zero}}
myOpClose : ∀ {m} → String → EffM m ⊤ (FileIO ∷ []) λ _ → (FileIO ∷ [])
myOpClose file = effect (here refl) (`openFile file) >>= λ
{ true → effect (here refl) `closeFile
; false → return tt}
main : IO.IO ⊤
main = run IO.IO (record { return = IO.return ; _>>=_ = IO._>>=_ }) (myOpClose ".gitignore") myEnv
where
FileIOHandler : Handler IO.IO FileIOEff
FileIOHandler v (`openFile x) k = k true (FH x)
FileIOHandler (FH x) `closeFile k = k tt tt
myEnv : Env IO.IO (FileIO ∷ [])
myEnv = (FileIOHandler , tt) ∷ []
|
State Before: R✝ : Type u
A✝ : Type v
B : Type w
inst✝⁷ : CommSemiring R✝
inst✝⁶ : Semiring A✝
inst✝⁵ : Algebra R✝ A✝
inst✝⁴ : Semiring B
inst✝³ : Algebra R✝ B
S✝ : Subalgebra R✝ A✝
α : Type ?u.1950480
β : Type ?u.1950483
R : Type u_1
A : Type u_2
inst✝² : CommSemiring R
inst✝¹ : CommSemiring A
inst✝ : Algebra R A
S : Subalgebra R A
⊢ RingHom.rangeS (algebraMap { x // x ∈ S } A) = S.toSubsemiring State After: no goals Tactic: rw [algebraMap_eq, Algebra.id.map_eq_id, RingHom.id_comp, ← toSubsemiring_subtype,
Subsemiring.rangeS_subtype]
|
"""
num_chunks(::Val{B})
Determine the number and type of chunks needed to store `B` bits.
"""
function num_chunks(::Val{B}) where {B}
if B ≤ 0
throw(ArgumentError("`B` must be positive!"))
elseif B ≤ 8
return 1, UInt8
elseif B ≤ 16
return 1, UInt16
elseif B ≤ 32
return 1, UInt32
else
return (B - 1) ÷ 64 + 1, UInt64
end
end
"""
check_bitstring_typeparams(::Val{B}, ::Val{N})
Check if number of bits `B` is consistent with number of chunks `N`. Throw an error if not.
"""
function check_bitstring_typeparams(::Val{B}, ::Val{N}, ::Type{UInt64}) where {B,N}
if B > N * 64
error("$B bits do not fit into $N 64-bit chunks")
elseif B ≤ (N - 1) * 64
error("$B bits fit into $(N - 1) 64-bit chunks, but $N chunks were provided")
end
end
function check_bitstring_typeparams(::Val{B}, ::Val{1}, ::Type{T}) where {B,T}
if B > sizeof(T) * 8
error("$B bits do not fit into a $(sizeof(T) * 8)-bit chunk")
end
end
function check_bitstring_typeparams(::Val{B}, ::Val{1}, ::Type{UInt64}) where {B}
if B > 64
error("$B bits do not fit into a 64-bit chunk")
end
end
function check_bitstring_typeparams(::Val{B}, ::Val{N}, ::Type{T}) where {B,N,T}
error("Only `UInt64` is supported for multi-bit chunks")
end
"""
BitString{B,N,T<:Unsigned}
Type for storing bitstrings of static size. Holds `B` bits in `N` chunks, where each chunk is
of type `T`.
`N` is chosen automatically to accommodate `B` bits as efficiently as possible.
# Constructors
* `BitString{B,N,T}(::SVector{N,T})`: unsafe constructor. Does not check for ghost bits.
* `BitString{B,N,T}(i::T)`: as above, but sets `i` as the rightmost chunk.
* `BitString{B}(::Integer)`: Convert integer to `BitString`. Integer is truncated to the
correct number of bits.
"""
struct BitString{B,N,T<:Unsigned}
chunks::SVector{N,T}
# This constructor is only to be used internally. It doesn't check for ghost bits.
function BitString{B,N,T}(s::SVector{N,T}) where {B,N,T}
check_bitstring_typeparams(Val(B), Val(N), T)
return new{B,N,T}(s)
end
function BitString{B,N,T}(i::T) where {B,N,T<:Unsigned}
check_bitstring_typeparams(Val(B), Val(N), T)
return new{B,N,T}(setindex(zero(SVector{N,UInt64}), i, N))
end
end
###
### Basic properties.
###
"""
num_chunks(::Type{<:BitString})
num_chunks(s::BitString)
Number of chunks in bitstring. Equivalent to `length(chunks(s))`.
"""
num_chunks(::Type{<:BitString{<:Any,N}}) where {N} = N
"""
chunk_type(::Type{<:BitString})
chunk_type(s::BitString)
Type of unsigned integer used to store the chunks.
"""
chunk_type(::Type{<:BitString{<:Any,<:Any,T}}) where {T} = T
"""
num_bits(::Type{<:BitString})
num_bits(s::BitString)
Total number of bits stored in bitstring.
"""
num_bits(::Type{<:BitString{B}}) where {B} = B
"""
top_chunk_bits(::Type{<:BitString})
top_chunk_bits(s::BitString)
Number of bits stored in top chunk. Equivalent to `chunk_bits(s, 1)`.
"""
function top_chunk_bits(::Type{<:BitString{B}}) where B
return B % 64 == 0 ? 64 : B % 64
end
for f in (:num_chunks, :chunk_type, :num_bits, :top_chunk_bits)
@eval $f(s::BitString) = $f(typeof(s))
end
"""
chunks(s::BitString)
`SVector` that stores the chunks of `s`.
"""
chunks(s::BitString) = s.chunks
"""
chunks_bits(::Type{<:BitString}, i)
chunks_bits(s, i)
Number of bits in the `i`-th chunk of `s`.
"""
chunk_bits(s, i) = chunk_bits(typeof(s), i)
chunk_bits(::Type{<:BitString{B,1}}, _) where {B} = B
function chunk_bits(::Type{S}, i) where {S<:BitString}
return ifelse(i == 1, top_chunk_bits(S), 64)
end
function ghost_bit_mask(::Type{S}) where S<:BitString
T = chunk_type(S)
unused_bits = sizeof(T) * 8 - top_chunk_bits(S)
return ~zero(T) >>> unused_bits
end
"""
remove_ghost_bits(s::BitString)
Remove set bits outside data field if any are present.
See also: [`has_ghost_bits`](@ref).
"""
function remove_ghost_bits(s::S) where {S<:BitString}
mask = ghost_bit_mask(S)
return S(setindex(s.chunks, s.chunks[1] & mask, 1))
end
@inline function remove_ghost_bits(s::S) where {S<:BitString{<:Any,1}}
mask = ghost_bit_mask(S)
return S(chunks(s) .& mask)
end
"""
has_ghost_bits(s::BitString)
Check for bits outside data field.
See also: [`remove_ghost_bits`](@ref).
"""
function has_ghost_bits(s::S) where {S<:BitString}
top = first(chunks(s))
mask = ~zero(UInt64) << top_chunk_bits(S)
return top & mask > 0
end
###
### Alternative/useful constructors. These are not super efficient, but they are safe.
###
function BitString{B}(i::Union{Int128,Int64,Int32,Int16,Int8}) where {B}
return remove_ghost_bits(BitString{B}(unsigned(i)))
end
function BitString{B}(i::Union{UInt64,UInt32,UInt16,UInt8}) where {B}
N, T = num_chunks(Val(B))
s = setindex(zero(SVector{N,T}), T(i), N)
return remove_ghost_bits(BitString{B,N,T}(s))
end
function BitString{B}(i::UInt128) where {B}
N, T = num_chunks(Val(B))
left = i >>> 0x40 % T # left will only be used if T == UInt64 and N > 1
right = i % T
s = ntuple(Val(N)) do i
i == N ? right : i == N - 1 ? left : zero(T)
end
return remove_ghost_bits(BitString{B,N,T}(SVector{N,T}(s)))
end
function BitString{B}(i::BigInt) where {B}
N, T = num_chunks(Val(B))
s = zero(SVector{N,T})
j = N
while i ≠ 0
chunk = i & typemax(T) % T
i >>>= 64 # Can use 64 here, as only 1-chunk addresses can be smaller
s = setindex(s, chunk, j)
j -= 1
end
return remove_ghost_bits(BitString{B,N,T}(s))
end
function Base.zero(S::Type{<:BitString{B}}) where {B}
N, T = num_chunks(Val(B))
BitString{B,N,T}(zero(SVector{N,T}))
end
Base.zero(s::BitString) = zero(typeof(s))
function Base.show(io::IO, s::BitString{B,N}) where {B,N}
str = join(map(i -> repr(i)[3:end], s.chunks), '_')
print(io, "BitString{$B}(big\"0x", str, "\")")
end
Base.bitstring(s::BitString{B}) where {B} = join(bitstring.(s.chunks))[(end - B + 1):end]
###
### Operations on BitStrings
###
for op in (:⊻, :&, :|)
@eval (Base.$op)(l::S, r::S) where S<:BitString = S($op.(l.chunks, r.chunks))
end
Base.:~(s::S) where S<:BitString = remove_ghost_bits(S(.~(s.chunks)))
Base.count_ones(s::BitString) = sum(count_ones, s.chunks)
Base.count_zeros(s::BitString) = num_bits(s) - count_ones(s)
function _trailing(f, s::BitString)
result = 0
i = 0
# Idea: if all whole chunk is the same digit, you have to look at the next one.
# This gets compiled away if N=1
for i in num_chunks(s):-1:1
r = f(s.chunks[i])
result += r
r == chunk_bits(s, i) || break
end
# If top chunk occupies the whole integer, result will always be smaller or equal to B.
if f ≢ trailing_ones && top_chunk_bits(s) ≠ 64
return min(num_bits(s), result)
else
return result
end
end
function _leading(f, s::BitString)
N = sizeof(chunk_type(s)) * 8
# First chunk is a special case - we have to ignore the empty space before the string.
result = min(f(s.chunks[1] << (N - top_chunk_bits(s))), top_chunk_bits(s))
# This gets compiled away if N=1
if num_chunks(s) > 1 && result == top_chunk_bits(s)
for i in 2:num_chunks(s)
r = f(s.chunks[i])
result += r
r == 64 || break
end
end
return result
end
Base.trailing_ones(s::BitString) = _trailing(trailing_ones, s)
Base.trailing_zeros(s::BitString) = _trailing(trailing_zeros, s)
Base.leading_ones(s::BitString) = _leading(leading_ones, s)
Base.leading_zeros(s::BitString) = _leading(leading_zeros, s)
@generated function _right_shift(s::S, k) where {S<:BitString}
N = num_chunks(S)
quote
$(Expr(:meta, :inline))
# equivalent to d, r = divrem(k, 64)
d = k >>> 0x6
r = k & 63
ri = 64 - r
mask = ~zero(UInt64) >>> ri # 2^r-1 # 0b0...01...1 with `r` 1s
c = chunks(s)
@nif $(N + 1) l -> (d < l) l -> (
S(SVector((@ntuple l - 1 k -> zero(UInt64))... ,c[1] >>> r,
(@ntuple $N-l q -> (c[q + 1] >>> r | ((c[q] & mask) << ri)))...
))
) l -> (
return zero(S)
)
end
end
function _left_shift(s::S, k) where {S<:BitString}
result = zeros(MVector{num_chunks(S),UInt64})
# d, r = divrem(k, 64)
d = k >>> 0x6
r = k & 63
shift = s.chunks .<< (r % UInt64)
carry = s.chunks .>>> ((64 - r) % UInt64)
for i in d + 1:length(result)
@inbounds result[i - d] = shift[i] | get(carry, i + 1, zero(UInt64))
end
# This bit removes ghost bits.
result[1] &= ghost_bit_mask(S)
return S(SVector(result))
end
Base.:>>(s::BitString, k) = k ≥ 0 ? _right_shift(s, k) : _left_shift(s, -k)
Base.:<<(s::BitString, k) = k > 0 ? _left_shift(s, k) : _right_shift(s, -k)
Base.:>>>(s::BitString, k) = s >> k
# remove ghost bits must be applied to both because k might be negative.
Base.:>>(s::S, k) where S<:BitString{<:Any,1} = remove_ghost_bits(S(s.chunks .>> k))
Base.:>>(s::S, k::Unsigned) where S<:BitString{<:Any,1} = S(s.chunks .>> k)
Base.:<<(s::S, k) where S<:BitString{<:Any,1} = remove_ghost_bits(S(s.chunks .<< k))
function Base.isless(s1::B, s2::B) where {B<:BitString}
for i in 1:num_chunks(B)
if chunks(s1)[i] ≠ chunks(s2)[i]
return chunks(s1)[i] < chunks(s2)[i]
end
end
return false
end
Base.isodd(s::BitString) = isodd(chunks(s)[end])
Base.iseven(s::BitString) = iseven(chunks(s)[end])
# For compatibility. Changing any of the hashes will slightly change results and make the
# tests fail.
Base.hash(b::BitString{<:Any,1}, h::UInt) = hash(b.chunks[1], h)
Base.hash(b::BitString, h::UInt) = hash(b.chunks.data, h)
"""
partial_left_shift(bs::BitString, i, j)
Shift a part of the bitstring left by one place with boundaries `i < j`.
In a `BoseFS` bitstring, it moves a particle at offset `i` to the position at
offset `j`.
See also: [`excitation`](@ref), [`partial_right_shift`](@ref).
"""
function partial_left_shift(chunk::T, i, j) where {T<:Unsigned}
# Mask of one spanning from i to j
mask = (T(1) << T(j - i + 1) - T(1)) << T(i)
# Shift the part of the string that needs to be shifted, ensure a one is added at the end
# swap shift to move in other direction
#println(bitstring(mask))
shifted_part = ((chunk & mask) << 0x1) & mask
# Leave the rest intact
intact_part = chunk & ~mask
return shifted_part | intact_part | T(1) << T(i)
end
"""
partial_right_shift(bs::BitString, i, j)
Shift a part of the bitstring right by one place with boundaries `i < j`.
In a `BoseFS` bitstring, it moves a particle at offset `j` to the position at
offset `i`.
See also: [`partial_left_shift`](@ref), [`excitation`](@ref).
"""
function partial_right_shift(chunk::T, i, j) where {T<:Unsigned}
# Mask of one spanning from i to j
mask = (T(1) << T(j - i + 1) - T(1)) << T(i)
# Shift the part of the string that needs to be shifted, ensure a one is added at the end
# swap shift to move in other direction
shifted_part = ((chunk & mask) >> 0x1) & mask
# Leave the rest intact
intact_part = chunk & ~mask
#println(lpad("↑" * " "^j, length(bitstring(chunk))))
return shifted_part | intact_part | T(1) << T(j)
end
function partial_left_shift(bs::S, i, j) where {S<:BitString{<:Any,1}}
return S(partial_left_shift(bs.chunks[1], i, j))
end
function partial_right_shift(bs::S, i, j) where {S<:BitString{<:Any,1}}
return S(partial_right_shift(bs.chunks[1], i, j))
end
function partial_left_shift(bs::S, i, j) where {N,S<:BitString{<:Any,N}}
result = MVector(bs.chunks)
lo_idx = N - (i >>> 0x6)
hi_idx = N - (j >>> 0x6)
lo_off = i & 63
hi_off = j & 63
@inbounds if hi_idx == lo_idx
result[hi_idx] = partial_left_shift(result[hi_idx], lo_off, hi_off)
else
# Top part first.
chunk = result[hi_idx]
chunk = partial_left_shift(chunk, 0, hi_off)
# Carry bit.
chunk &= -UInt(1) << 0x1
chunk |= result[hi_idx + 1] >> 63
result[hi_idx] = chunk
idx = hi_idx + 1
while idx < lo_idx
chunk = result[idx]
chunk <<= 0x1
chunk |= result[idx + 1] >> 63
result[idx] = chunk
idx += 1
end
# Bottom part.
chunk = result[lo_idx]
chunk = partial_left_shift(chunk, lo_off, 64)
result[lo_idx] = chunk
end
return S(SVector(result))
end
function partial_right_shift(bs::S, i, j) where {N,S<:BitString{<:Any,N}}
result = MVector(bs.chunks)
lo_idx = N - (i >>> 0x6)
hi_idx = N - (j >>> 0x6)
lo_off = i & 63
hi_off = j & 63
@inbounds if hi_idx == lo_idx
result[hi_idx] = partial_right_shift(result[hi_idx], lo_off, hi_off)
else
# Bottom first
chunk = result[lo_idx]
chunk = partial_right_shift(chunk, lo_off, 64)
# Carry bit.
chunk &= -UInt(1) >> 0x1
chunk |= result[lo_idx - 1] << 63
result[lo_idx] = chunk
idx = lo_idx - 1
while idx > hi_idx
chunk = result[idx]
chunk >>= 0x1
chunk |= result[idx - 1] << 63
result[idx] = chunk
idx -= 1
end
# Top part.
chunk = result[hi_idx]
chunk = partial_right_shift(chunk, 0, hi_off)
result[hi_idx] = chunk
end
return S(SVector(result))
end
function bit_mask(bs::BitString, i, j)
end
|
import os, sys, json
import numpy as np
from collections import defaultdict
dataset = sys.argv[1]
### Paths
# Location of raw dataset (HDF5 format)
dataset_folder = "./datasets"
# Output folder
output_folder = "workflow_output"
dataset_output_folder = os.path.join(output_folder, dataset)
# Param file
param_file = os.path.join(output_folder, dataset, dataset + '.params.txt')
### Parameters
with open(param_file) as input:
params = json.load(input)
###
print params.keys()
print params["falconn_knn_graph_trunc_py"]
n = params["num_points"]
partition = []
with open(params["kahip_output"], "r") as input:
for line in input:
partition.append(line.strip())
if len(partition) != n:
raise Exception("wrong length")
counter = defaultdict(int)
for x in partition:
counter[x] += 1
print counter
knn_graph = np.load(params["falconn_knn_graph_trunc_py"])
print knn_graph.shape
cut_size = 0
total_p2 = 0
for i in range(knn_graph.shape[0]):
total_p2 += counter[partition[i]]
for j in range(knn_graph.shape[1]):
if partition[i] != partition[knn_graph[i][j]]:
cut_size += 1
print float(cut_size) / float(knn_graph.shape[0] * knn_graph.shape[1])
print float(total_p2) / float(knn_graph.shape[0]), float(total_p2) / (float(knn_graph.shape[0])**2)
|
Happy Halloween: Smiley’s Dog Costumes!
Our first event came up unexpectedly, the Bridge and Tunnel Brewery was just a five minute walk from our apartment but we had no costume! I quickly cut out an avocado out of Dollar Tree cardboard paper and it was a success! Smiley won some toys and we got a growler for our fun last-minute costume.
You may have noticed the dog pictures on my Instagram, or read the two posts I wrote about our rescue Smiley – you may have even read the viral Dodo article about his rescue story! Our unexpected adoption (it only took two weeks of fostering to realize that Smiley belonged with us forever) changed all our plans. All those crazy Halloween parties we wanted to go to turned into us going to dog-friendly pub events, the dog parade and tonight we’ll be going to the big NYC Halloween Parade with Smiley in tow.
We ended up going to another last minute event, the Tompkins Square dog parade, so Smiley wore his same avocadog costume and got outshined by popes and other crazy intricate creative costumes! It was a lot of fun and we’re definitely going again next year with a better costume. Although most dogs wore hot dogs, delivery costumes and they rocked them, as a crafter I got inspired to do something a little more creative!
Tonight, the parade is mainly for people but Smiley will hopefully steal the show – as long as he actually wears the costume I made him and it doesn’t fall apart. Stay tuned for photos of tonights shenanigans.
Author olenakaguiPosted on October 31, 2018 Categories Animals and Pets, New YorkTags Avocadog, Halloween, Halloween parade, Halloween party, New York, Smiley, TompkinsLeave a comment on Happy Halloween: Smiley’s Dog Costumes!
Two days ago we sent a $300 adoption fee to Pound Hounds Res-Q, the place that pulled Smiley from Brooklyn’s high kill shelter, NYACC. We had originally planned on fostering Smiley, the cute 6 year old pit mix who was abandoned because his former owners got pregnant. We were in no position to get a dog – we don’t make enough money and live in a small apartment, but we couldn’t let Smiley die.
We ended up adopting Smiley after just two weeks of fostering, mostly because we fell in love with our foster boy and also because of Pound Hounds Res-Q. In addition to the fee, we also gave a small donation because of everything the rescue has done for us. Donating to rescues helps them save more dogs so I encourage everyone to do the same!
The amazing thing about fostering, is that it’s practically free and saves lives! Shelters such as the NYACC become overcrowded and put animals on the kill list very quickly. They barely get a chance to get adopted! If you foster a dog, a rescue will cover the vet bills and any necessary training. Then you help the dog get decompressed, preferably crate trained and then you help them find a new forever home.
Fostering is a great way to help animals without making a lifetime commitment, that many people can’t do. If we hadn’t ended up adopting Smiley, we would have likely kept fostering, because it has been so rewarding. Let me tell you a bit about it!
Smiley went from the NYACC to the vet to get neutered, then a special service was hired to bring him to us. We had never met him or even see him in real life! Smiley was friendly and curious but he wouldn’t look up at me or make eye contact. He also didn’t respond to commands, although we were told that he knew to sit, stay and come.
When I brought him inside, he explored the place, sniffing every corner and demanding to know what was behind every door. It took over an hour to get him to slow down and rest on his doggy bed. He lay there for a while until I tried to put his harness on for a walk. He wouldn’t let me put it on I’m and bared his teeth, so I backed off and let him sleep. A few hours later, after my husband came home, he let us put the harness on without any fuss.
On his first walk with us, he pulled like crazy, giving us rope burn. He was so strong and wouldn’t listen at all outside. We immediately ordered a front pulling harness, but got dragged around painfully for three whole days. We also watched some videos on how to get a dogs attention on walks and we tried to implement them, which only half worked.
The first evening while we watched TV, we saw him watching us from his bed. It was the first time he looked at us. Not surprising after being handed off from one person to another for ten days. He was scared and confused.
We crated him that first night as instructed by the rescue. People want to adopt crate trained dogs and we had to try our best although we didn’t like the idea of him being in a small cage. He barked a bit but quickly went to sleep. The next morning he woke up wagging his tail at us, it was progress!
The second day he acted like a spoiled child, pushing his boundaries. He would jump on the sofa, demand treats and he pulled me even harder outside. I may have had one or two breakdowns that day because I couldn’t connect with him. I couldn’t see anyone adopting a dog that was this crazy and I also didn’t know how long I could spend with him, but I didn’t want to disappoint him like humans have in the past.
That night he barked more in his crate at night, and I was stressed knowing that we would have to leave him alone for up to six hours the next day. Everyone reassured me that he would be okay, as long as he wore a cone – he was recently neutered and could rip his stitches.
When we came home after our trip, that we couldn’t cancel although we had wanted to, we found his cone out of shape and he was practically hanging by it because a piece got stuck in his crate. It was around his neck so tight that he coughed when I cut it off. He seemed fine otherwise and extra friendly, but I was traumatized by the experience. I was too scared of putting him back in the crate while he had his cone, so we let him sleep in our bedroom on his bed.
When he woke up that Sunday (we had gotten him in the afternoon on Thursday) he was a completely different dog. He was so calm at home, looked at us, asked us for pats and actually listened when we gave him commands. That day we took him to a beer festival because we didn’t want to leave him home alone.
We had been told that he shouldn’t be around other dogs or kids so we were extra careful. But he was calm around kids, accepting treats gently and he wagged his tail when he saw other dogs. We let him sniff a few and it seemed fine, we were starting to doubt everything that the kill shelter had said about him. We found a quiet spot at the chaotic festival and he sat with us, observing. People came over to meet him and he was so friendly and loving to everyone. No one believed that we had just rescued him.
The next day we had received a front clipping harness and the moment we put it on him, he stopped pulling on walks. He still got distracted outside but he was so much better at walking calmly. We let him sniff more dogs and discovered that he was super friendly but couldn’t tell between dogs that wanted to play or fight.
The following week he started to feel at home. We let him sit with us on the couch but still kept him out of bed. He behaved better every day. Except the one time he jumped on the bed when I screamed because I saw a spider – but that’s because I screamed and I assume that he was trying to protect me.
During the week we also let him play with some dogs while leashed. He seemed to get along with everyone, ignoring the dogs who were aggressive and he backed away when an angry cat jumped out at him. He was clearly a good gentle boy!
That second weekend we had him, we took him to an amazing day care Petbuddy Services for a trial day. We were nervous to see how he would get a long with other dogs but it went really well! After a day of playing with dogs, he was even calmer on walks and less jumpy when he met new dogs.
The day he spent at doggy daycare we went out, but came home before picking Smiley up. Our home felt so empty without him even though he had only been there for ten days! That’s when we decided that we’d be keeping him. But we wanted to wait in case there were any issues with our landlord, although those were unlikely.
On Wednesday we took him to the vet again, and even though the rescue knew we were 99% likely to adopt him, Pound Hounds Res-Q paid his bill. Later that evening, we officially paid his adoption fee and he became a part of our family. The next day, I let him play off leash with a bunch of friendly dogs at the park and he did so well, people didn’t believe me that he was a new rescue.
Since getting Smiley there has been more stress in my life – we need to figure out where he will be while we’re on holiday. We need to worry about him getting sick, hurting himself or feeling lonely when he’s alone at home. I also have a companion at home now, a smiling face that makes me happier and I can’t walk him down the street without him getting compliments left and right. Oh, he also gets me out of the house more and breathing that fresh New York air! I’ve even met a few local dog owners that are quickly becoming friends.
Of course I’m already worrying about Smiley dying one day, but that’s just how my brain works. He has already made my life so much better and more worthwhile. As much as I think that everyone should adopt a dog, or five, immediately, I understand that not everyone is int he position to do so.
Foster! If you can’t adopt. Donate to rescues, if you want to help but can’t foster. Dogs, and animals in general, bring so much happiness and unconditional love to our hectic lives. They deserve our help and love, they should all feel safe and happy – the same way they make us feel. Please consider donating to Pound Hounds Res-Q today, without them we wouldn’t have this beautiful dog as a part of our family!
We’ve been to Tonsai twice now – once in December and the second time in July. December was hot, dry, lively and exciting! In July it was dead, everything was closed, the weather was miserable! Tonsai is a great place that’s still pretty empty compared to Phi Phi Island and even neighboring beaches.
While it’s the perfect getaway during high (and dry) season, it’s really not enjoyable during the low (monsoon) season. Unless you enjoy being one of 10 tourists on a hard to get to spot with only two available restaurants that mainly serve fried food. If you come any other time, you can enjoy a long tail boat ride to the beach, a choice of many restaurants, food vendors and bars, there are lots of people hanging out on the beach or at the hostels but there are also thieving mischievous monkeys!
We’re Fostering a Dog Saved from the Kill Shelter!
Smiley was pulled from the NYACC by Pound Hounds Res-Q. We are considering adopting him ourselves because he’s such a good boy. The Dodo wrote about him a week ago with the hopes that his story will inspire others to adopt, foster and rescue dogs in need!
Author olenakaguiPosted on October 23, 2018 October 22, 2018 Categories Animals and Pets, New YorkTags dog saved, foster, fostering, kill shelter, Pound Hounds, Pound Hounds Resq, rescue, rescue dog, SmileyLeave a comment on We’re Fostering a Dog Saved from the Kill Shelter!
The second time we visited Thailand was the middle of July, which is right when monsoon season is wreaking havoc on the island and keeping all those selfie-taking tourists away – but clearly not all of them.
The first time we visited Thailand was during the dry winter months when it’s hot, overpopulated with tourists and low tied can ruin boat rides, kayaking and other water adventures. The second time we came in the middle of July, which is right when monsoon season is wreaking havoc on the island and keeping all those selfie-taking tourists away – but clearly not all of them.
A week before we arrived the famous case of football camp boys who were stuck in a flooded cave and a ferry had sunk, drowning many on board. We didn’t know about this when we booked and it was quite scary to be there. We got lucky and the weather ended up clearing up, but all ferry and boat rides were rough, scary and puke-inducing.
Puffballs of all sizes grow in the forest, alongside roads, in the middle of a green grassy lawn, they can really grow anywhere! You can stir fry them, cook them in the oven and my absolute favorite, is pretending they are pancakes!
It’s prime mushroom picking season but it’s quickly cooling down. You might be seeing mushrooms all around you, even in city parks and on the side of highways! There’s still some time to go out and forage before winter chases us indoors for Netflix, hot chocolate and hibernation. However, most mushrooms take a lot of experience to identify which can be scary and discouraging.
It is also extremely dangerous to eat anything that you’re not certain about. Although there are many YouTube videos and Facebook groups are not always a reliable way to be sure that you will be safe. Even after you read this article, go talk to experienced mushroom pickers, join a mycological society and always be overly careful.
It is not legal to pick mushrooms everywhere. Ask a police officer, park ranger or at the info center where you can pick them. If you go anywhere else, you risk getting a hefty fine. In NYC it can be up to $250!
Wild mushrooms are not like the ones you buy at the store. Some people might experience an allergy to a specific type even though they are not allergic to others. Some edible mushrooms have skins that certain people might react to with a stomach ache. Although I am lucky to be allergy free and tolerate everything I’ve tried so far, I’m aware that I might eat a perfectly good edible mushroom that might make me feel sick.
Let’s get down to business, there are mushrooms that are growing everywhere around New York right now and they happen to taste delicious! The giant puffball (tiny ones are good too) is really easy to identify, grows to be huge so it’s satisfying to find and it’s a great way to begin your mushroom picking lifestyle.
Puffballs come in round shapes and in various sizes. They don’t have any gills, or stems, they grow right out of the ground. The regular kind is small, round, white/grayish/yellowish with tiny bumps and they get dark and dry when they are ready to release their spores. When they are dry, you can stomp on them and they will puff out dark-colored spores, which is why they are called puffballs.
Giant puffballs don’t taste any better than small ones, arguably they taste worse. But one big puffball can feed a family of three for a day. They can grow to be larger than your head! In their prime they are pure white on the outside and inside.
If they are any there color, or have any markings, be cautious.
Unlike the hen-of-the-woods that I wrote about earlier this week, giant puffballs do have a very poisonous lookalike, but it’s extremely easy to differentiate them! Once you pick a puffball, cut it down the middle from top to bottom, an edible puffball will have firm purely white firm flesh without any markings. It should look like sliced mozzarella!
If the mushroom is off-white, it’s an edible kind that’s past it’s prime. Don’t eat it! There are several stages of a puffball going bad. First, the inside will get yellower but will remain firm. Then it’ll get wetter and darker, at this point the outside might be getting yellower too. Finally, it dries up and releases dark gray/black spores.
Even when a puffball is yellow, it’s technically not poisonous. The only type of puffball that is poisonous, is one with black lines that look like intricate designs. These will be on the outside and on the inside. These puffballs are very poisonous and should NOT be consumed.
Puffballs of all sizes grow in the forest, alongside roads, in the middle of a green grassy lawn, they can really grow anywhere! Just make sure to cut every single mushroom in half, and throw any puffball that isn’t pure white on the inside. The rule of thumb with mushrooms is, if there is any doubts, leave it behind.
If you thought that the hen-of-the-woods had many recipes, just wait until you bring home a haul of puffballs. You can stir fry them, cook them in the oven and my absolute favorite, is pretending they are pancakes!
All you need to do is cut 3/4 inch slices, fry them in the oven or on the pan with some coconut oil and cinnamon and serve with maple syrup. If you cook them long enough you can barely taste the difference – they are soft on the inside and crispy on the outside. Delicious. Here are some other recipes.
Just a quick reminder, some people don’t react well to the skin of puffball mushrooms (or any mushroom skins). Some mushrooms have tough skin that’s supposed to protect the mushroom from being eaten, this skin can be hard to digest and some people are more sensitive than others.
It’s very similar to the skin on certain nuts, if you’ve ever pooped out an intact almond, you know what I’m talking about. Personally, I eat the skin but please be aware that you might have a reaction to it! You can always start by eating a small piece with the skin on, wait a few hours and see how you feel. It can be scary to feel sick after eating a wild mushroom and not know why.
Do you have any stories to share about puffballs? What’s your record find (size or amount)? What’s your favorite recipe? Share your puffball stories with us in the comments below!
I find these mushrooms the most rewarding to pick, because even a small one is enough as a side dish to any meal and the bigger ones can feed an entire family! Every time I’ve looked for hens, I found at least one and usually had 2-4 kilos (4-9 pounds) in just 30 minutes of searching. Of course, you can also find a single mushroom that weighs 13+ kilos (30+ pounds).
Let’s get down to business, there are mushrooms that are growing everywhere around New York right now and they happen to taste delicious! The hen-of-the-woods (maitake) doesn’t have any poisonous look alikes, grows to be huge so it’s satisfying to find and it’s a great way to begin your mushroom picking lifestyle.
The hen-of-the-woods looks a little like the fluffy feathers of a hen, hence it’s name. Here are some photos of how they can look at different stages of growth.
Don’t confuse the hen-of-the-woods with the chicken-of-the-woods. Chickens can grow higher up on the tree, they are orange and they grow in layers. I have heard a lot about these mushrooms and I’m told they’re delicious, however I haven’t found any yet and I don’t know how to tell them apart from the many other orange mushrooms that grow on trees! Stick with hens at the beginning, you can’t go wrong with them and there are plenty to go around!
Hen-of-the-woods mushrooms typically grow on oak trees, so if you see acorns on the ground you’re in the right spot. If you look at the roots of an oak, you’ll notice that they have a clumpy shape with many little lumps which is very similar to the hen mushroom. From a distance, you might confuse a hen mushroom with some dried leaves. Always go check it out, and take a look around the entire tree. They tend to grow on bigger, wider oaks and there can be a several clusters of them around a single tree!
Hen-of-the-woods are either off-white, kind of beige/grayish or more brownish, especially around it’s rounded edges. They always grow in clumps, so from a distance you could confuse them with clustered mushrooms – remember that even though it looks like a cluster, it’s one big mushroom that spreads out and looks fluffy. If you cut it at the stem and you see many individual mushrooms, it’s not a hen. If you see any gills (pictured below) it is NOT a hen.
Hens are smooth, fluffy looking, single-stemmed mushrooms.
Remember, the picture above is NOT a hen-of-the-woods. It’s an example of gills, that hens do NOT have.
Sometimes, the hen-of-the-woods grows on trees other than oaks. If you want to be extra safe, you can stick to only eating the ones that you find on oak trees, although like I already said, there isn’t any dangerous look-a-like. Hens are very unique looking.
Don’t forget to bring some common sense with you to the forest. If there are too many bugs on the hen, if it looks dry or off-color, then it’s probably not good for eating. While most insects are pretty safe to eat if cooked correctly, you should stay away from unintentional entomophagy. Plus, insects can cause allergic reactions in people who are also allergic to shellfish.
Another common sense move is to avoid picking mushrooms in forests where there’s a mark on the tree, or a colorful rope tied around an area. These could mark a protected area, a sick tree, some sort of pest, pollution, disease, etc. If it looks questionable, don’t go there.
Same goes for roadside mushrooms. Think about the pollution that they are exposed to. Would you like some car exhaust with your mushrooms? I don’t think so. Don’t pick anything that looks unclean (not including natural forest dirt) or could be polluted. Similarly to road exhaust, some places could be using pesticides or other chemicals in the area. Although most of us are already exposed to them from the food we buy in supermarkets, the less chemicals we consume, the better.
When you find a hen and bring it home, watch a video on how to clean it correctly. It has many layers, the thick white stem isn’t as yummy as the rest of it so you want to cut that off and if possible, clean it outside or in a large tub to avoid clogging your drain with forest debris. If you see any holes, cut into them and remove any insects, spiders or slugs.
How do you cook a hen?
There are so many ways to cook these mushrooms! Chop them into tiny pieces and stir fry, with other veggies, or if you’re not vegetarian then maybe some meat, lard or eggs to make the perfect mushroomy omelette.
The nutritional value of hens varies depending on the website, but everyone agrees that they have very little fat or protein and lots of vitamin D. Some websites claim that they have very few carbs, others claim that they are 70% carbs. Almost everyone agrees that they are a healthy addition to a balanced diet!
I tend to use too much olive oil and caramelized onions to make a fatty, crispy, scrumptious meal but there are much healthier alternatives. You can cook them in the oven with coconut oil, you can steam them, boil them, grill them probably even air fry them. Whatever you chose to do, make sure to cook them well as they can be a bit chewy and hard on the stomach if you undercook hens, although this is true for mushrooms in general. You can find various recipes here.
If you didn’t manage to find any, or didn’t want to risk picking the wrong mushroom, you can still enjoy eating wild hen-of-the-woods. Check out your local farmers markets from August until November and I guarantee that you’ll find some hens – generally for $1 per pound!
Do you enjoy picking mushrooms? Have you tried hen-of-the-woods? Share your stories, tips, recipes and favorite mushroom picking spots in the comments below!
Silkworms are popular insects in China because they are native to the region. In school, instead of watching a caterpillar turn into a butterfly, people watch silkworms turn into moths!
Soi Cowboy is the crazy street in Thailand known for strip clubs and prostitutes, but there are no ping-pong shows here! We visited the street just to people watch and it was quite a show. We saw women aggressively dragging single men into the clubs and offering all sorts of services.
Soi Cowboy is the crazy street in Thailand known for strip clubs and prostitutes, but there are no ping-pong shows here! We visited the street just to people watch and it was quite a show. We saw women aggressively dragging single men into the clubs and offering all sorts of services. They also had a great happy hour deals which I enjoyed and it was an interesting place to visit. Of course, you couldn’t film inside and I only went in quickly to use the bathroom!
|
[STATEMENT]
lemma le_sup_equiv2: "(a \<le> b) = (a \<squnion> b = b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (a \<le> b) = (a \<squnion> b = b)
[PROOF STEP]
by (rule sup1_dual.le_sup_equiv)
|
#include <boost/test/unit_test.hpp>
#include <boost/test/test_case_template.hpp>
#include <boost/mpl/list.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/path.hpp>
#include <string>
#include <time.h>
#include <math.h>
#include <fstream>
#include <iostream>
#include <cstdio>
#include <am/tc/BTree.h>
#include <am/range/AmIterator.h>
#define DIR_PREFIX "./tmp/am_tc_Db_"
using namespace izenelib::am::tc;
using namespace izenelib::am;
using namespace std;
namespace bfs = boost::filesystem;
using namespace boost::unit_test;
BOOST_AUTO_TEST_SUITE(tc_Db_test)
BOOST_AUTO_TEST_CASE(BTreeIterator)
{
bfs::path db_dir(DIR_PREFIX);
boost::filesystem::remove_all(db_dir);
bfs::create_directories(db_dir);
std::string db_dir_str = db_dir.string();
typedef std::vector<std::pair<int, std::string> > ValueType;
typedef BTree<std::string, ValueType> BTreeType;
BTreeType h(db_dir_str+"/BTreeIterator_test");
BOOST_CHECK(h.open());
{
std::string key("1");
ValueType value;
value.push_back(std::make_pair(1, "a"));
BOOST_CHECK(h.insert(key, value));
BOOST_CHECK(h.size() == 1);
}
{
std::string key("2");
ValueType value;
value.push_back(std::make_pair(1, "a"));
value.push_back(std::make_pair(2, "b"));
BOOST_CHECK(h.insert(key, value));
BOOST_CHECK(h.size() == 2);
}
{
std::string key("3");
ValueType value;
value.push_back(std::make_pair(1, "a"));
value.push_back(std::make_pair(2, "b"));
value.push_back(std::make_pair(3, "c"));
BOOST_CHECK(h.insert(key, value));
BOOST_CHECK(h.size() == 3);
}
std::cout<<"Forward Iterator for TC from beginning"<<std::endl;
typedef AMIterator<BTreeType> AMIteratorType;
AMIteratorType iter(h);
AMIteratorType end;
for(; iter != end; ++iter)
{
const std::string& k = iter->first;
const ValueType& v = iter->second;
for (ValueType::const_iterator vit = v.begin();
vit != v.end(); ++vit)
{
std::cout << k << " " << vit->first << " " << vit->second << std::endl;
}
}
std::cout<<"Forward Iterator for TC with start"<<std::endl;
AMIteratorType iter2(h, std::string("2"));
for(; iter2 != end; ++iter2)
{
const std::string& k = iter2->first;
const ValueType& v = iter2->second;
for (ValueType::const_iterator vit = v.begin();
vit != v.end(); ++vit)
{
std::cout << k << " " << vit->first << " " << vit->second << std::endl;
}
}
typedef AMReverseIterator<BTreeType > AMRIteratorType;
AMRIteratorType iter3(h,std::string("3"));
AMRIteratorType end2;
std::cout<<"Reverse Iterator for TC with start"<<std::endl;
for(; iter3 != end2; ++iter3)
{
const std::string& k = iter3->first;
const ValueType& v = iter3->second;
for (ValueType::const_iterator vit = v.begin();
vit != v.end(); ++vit)
{
std::cout << k << " " << vit->first << " " << vit->second << std::endl;
}
}
AMRIteratorType iter4(h);
std::cout<<"Reverse Iterator for TC"<<std::endl;
for(; iter4 != end2; ++iter4)
{
const std::string& k = iter4->first;
const ValueType& v = iter4->second;
for (ValueType::const_iterator vit = v.begin();
vit != v.end(); ++vit)
{
std::cout << k << " " << vit->first << " " << vit->second << std::endl;
}
}
}
BOOST_AUTO_TEST_SUITE_END() // tc_Db_test
|
(* Title: ZF/Bin.thy
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
Copyright 1994 University of Cambridge
The sign Pls stands for an infinite string of leading 0's.
The sign Min stands for an infinite string of leading 1's.
A number can have multiple representations, namely leading 0's with sign
Pls and leading 1's with sign Min. See twos-compl.ML/int_of_binary for
the numerical interpretation.
The representation expects that (m mod 2) is 0 or 1, even if m is negative;
For instance, \<not>5 div 2 = \<not>3 and \<not>5 mod 2 = 1; thus \<not>5 = (\<not>3)*2 + 1
*)
section\<open>Arithmetic on Binary Integers\<close>
theory Bin
imports Int Datatype
begin
consts bin :: i
datatype
"bin" = Pls
| Min
| Bit ("w \<in> bin", "b \<in> bool") (infixl \<open>BIT\<close> 90)
consts
integ_of :: "i\<Rightarrow>i"
NCons :: "[i,i]\<Rightarrow>i"
bin_succ :: "i\<Rightarrow>i"
bin_pred :: "i\<Rightarrow>i"
bin_minus :: "i\<Rightarrow>i"
bin_adder :: "i\<Rightarrow>i"
bin_mult :: "[i,i]\<Rightarrow>i"
primrec
integ_of_Pls: "integ_of (Pls) = $# 0"
integ_of_Min: "integ_of (Min) = $-($#1)"
integ_of_BIT: "integ_of (w BIT b) = $#b $+ integ_of(w) $+ integ_of(w)"
(** recall that cond(1,b,c)=b and cond(0,b,c)=0 **)
primrec (*NCons adds a bit, suppressing leading 0s and 1s*)
NCons_Pls: "NCons (Pls,b) = cond(b,Pls BIT b,Pls)"
NCons_Min: "NCons (Min,b) = cond(b,Min,Min BIT b)"
NCons_BIT: "NCons (w BIT c,b) = w BIT c BIT b"
primrec (*successor. If a BIT, can change a 0 to a 1 without recursion.*)
bin_succ_Pls: "bin_succ (Pls) = Pls BIT 1"
bin_succ_Min: "bin_succ (Min) = Pls"
bin_succ_BIT: "bin_succ (w BIT b) = cond(b, bin_succ(w) BIT 0, NCons(w,1))"
primrec (*predecessor*)
bin_pred_Pls: "bin_pred (Pls) = Min"
bin_pred_Min: "bin_pred (Min) = Min BIT 0"
bin_pred_BIT: "bin_pred (w BIT b) = cond(b, NCons(w,0), bin_pred(w) BIT 1)"
primrec (*unary negation*)
bin_minus_Pls:
"bin_minus (Pls) = Pls"
bin_minus_Min:
"bin_minus (Min) = Pls BIT 1"
bin_minus_BIT:
"bin_minus (w BIT b) = cond(b, bin_pred(NCons(bin_minus(w),0)),
bin_minus(w) BIT 0)"
primrec (*sum*)
bin_adder_Pls:
"bin_adder (Pls) = (\<lambda>w\<in>bin. w)"
bin_adder_Min:
"bin_adder (Min) = (\<lambda>w\<in>bin. bin_pred(w))"
bin_adder_BIT:
"bin_adder (v BIT x) =
(\<lambda>w\<in>bin.
bin_case (v BIT x, bin_pred(v BIT x),
\<lambda>w y. NCons(bin_adder (v) ` cond(x and y, bin_succ(w), w),
x xor y),
w))"
(*The bin_case above replaces the following mutually recursive function:
primrec
"adding (v,x,Pls) = v BIT x"
"adding (v,x,Min) = bin_pred(v BIT x)"
"adding (v,x,w BIT y) = NCons(bin_adder (v, cond(x and y, bin_succ(w), w)),
x xor y)"
*)
definition
bin_add :: "[i,i]\<Rightarrow>i" where
"bin_add(v,w) \<equiv> bin_adder(v)`w"
primrec
bin_mult_Pls:
"bin_mult (Pls,w) = Pls"
bin_mult_Min:
"bin_mult (Min,w) = bin_minus(w)"
bin_mult_BIT:
"bin_mult (v BIT b,w) = cond(b, bin_add(NCons(bin_mult(v,w),0),w),
NCons(bin_mult(v,w),0))"
syntax
"_Int0" :: i (\<open>#' 0\<close>)
"_Int1" :: i (\<open>#' 1\<close>)
"_Int2" :: i (\<open>#' 2\<close>)
"_Neg_Int1" :: i (\<open>#-' 1\<close>)
"_Neg_Int2" :: i (\<open>#-' 2\<close>)
translations
"#0" \<rightleftharpoons> "CONST integ_of(CONST Pls)"
"#1" \<rightleftharpoons> "CONST integ_of(CONST Pls BIT 1)"
"#2" \<rightleftharpoons> "CONST integ_of(CONST Pls BIT 1 BIT 0)"
"#-1" \<rightleftharpoons> "CONST integ_of(CONST Min)"
"#-2" \<rightleftharpoons> "CONST integ_of(CONST Min BIT 0)"
syntax
"_Int" :: "num_token \<Rightarrow> i" (\<open>#_\<close> 1000)
"_Neg_Int" :: "num_token \<Rightarrow> i" (\<open>#-_\<close> 1000)
ML_file \<open>Tools/numeral_syntax.ML\<close>
declare bin.intros [simp,TC]
lemma NCons_Pls_0: "NCons(Pls,0) = Pls"
by simp
lemma NCons_Pls_1: "NCons(Pls,1) = Pls BIT 1"
by simp
lemma NCons_Min_0: "NCons(Min,0) = Min BIT 0"
by simp
lemma NCons_Min_1: "NCons(Min,1) = Min"
by simp
lemma NCons_BIT: "NCons(w BIT x,b) = w BIT x BIT b"
by (simp add: bin.case_eqns)
lemmas NCons_simps [simp] =
NCons_Pls_0 NCons_Pls_1 NCons_Min_0 NCons_Min_1 NCons_BIT
(** Type checking **)
lemma integ_of_type [TC]: "w \<in> bin \<Longrightarrow> integ_of(w) \<in> int"
apply (induct_tac "w")
apply (simp_all add: bool_into_nat)
done
lemma NCons_type [TC]: "\<lbrakk>w \<in> bin; b \<in> bool\<rbrakk> \<Longrightarrow> NCons(w,b) \<in> bin"
by (induct_tac "w", auto)
lemma bin_succ_type [TC]: "w \<in> bin \<Longrightarrow> bin_succ(w) \<in> bin"
by (induct_tac "w", auto)
lemma bin_pred_type [TC]: "w \<in> bin \<Longrightarrow> bin_pred(w) \<in> bin"
by (induct_tac "w", auto)
lemma bin_minus_type [TC]: "w \<in> bin \<Longrightarrow> bin_minus(w) \<in> bin"
by (induct_tac "w", auto)
(*This proof is complicated by the mutual recursion*)
lemma bin_add_type [rule_format]:
"v \<in> bin \<Longrightarrow> \<forall>w\<in>bin. bin_add(v,w) \<in> bin"
unfolding bin_add_def
apply (induct_tac "v")
apply (rule_tac [3] ballI)
apply (rename_tac [3] "w'")
apply (induct_tac [3] "w'")
apply (simp_all add: NCons_type)
done
declare bin_add_type [TC]
lemma bin_mult_type [TC]: "\<lbrakk>v \<in> bin; w \<in> bin\<rbrakk> \<Longrightarrow> bin_mult(v,w) \<in> bin"
by (induct_tac "v", auto)
subsubsection\<open>The Carry and Borrow Functions,
\<^term>\<open>bin_succ\<close> and \<^term>\<open>bin_pred\<close>\<close>
(*NCons preserves the integer value of its argument*)
lemma integ_of_NCons [simp]:
"\<lbrakk>w \<in> bin; b \<in> bool\<rbrakk> \<Longrightarrow> integ_of(NCons(w,b)) = integ_of(w BIT b)"
apply (erule bin.cases)
apply (auto elim!: boolE)
done
lemma integ_of_succ [simp]:
"w \<in> bin \<Longrightarrow> integ_of(bin_succ(w)) = $#1 $+ integ_of(w)"
apply (erule bin.induct)
apply (auto simp add: zadd_ac elim!: boolE)
done
lemma integ_of_pred [simp]:
"w \<in> bin \<Longrightarrow> integ_of(bin_pred(w)) = $- ($#1) $+ integ_of(w)"
apply (erule bin.induct)
apply (auto simp add: zadd_ac elim!: boolE)
done
subsubsection\<open>\<^term>\<open>bin_minus\<close>: Unary Negation of Binary Integers\<close>
lemma integ_of_minus: "w \<in> bin \<Longrightarrow> integ_of(bin_minus(w)) = $- integ_of(w)"
apply (erule bin.induct)
apply (auto simp add: zadd_ac zminus_zadd_distrib elim!: boolE)
done
subsubsection\<open>\<^term>\<open>bin_add\<close>: Binary Addition\<close>
lemma bin_add_Pls [simp]: "w \<in> bin \<Longrightarrow> bin_add(Pls,w) = w"
by (unfold bin_add_def, simp)
lemma bin_add_Pls_right: "w \<in> bin \<Longrightarrow> bin_add(w,Pls) = w"
unfolding bin_add_def
apply (erule bin.induct, auto)
done
lemma bin_add_Min [simp]: "w \<in> bin \<Longrightarrow> bin_add(Min,w) = bin_pred(w)"
by (unfold bin_add_def, simp)
lemma bin_add_Min_right: "w \<in> bin \<Longrightarrow> bin_add(w,Min) = bin_pred(w)"
unfolding bin_add_def
apply (erule bin.induct, auto)
done
lemma bin_add_BIT_Pls [simp]: "bin_add(v BIT x,Pls) = v BIT x"
by (unfold bin_add_def, simp)
lemma bin_add_BIT_Min [simp]: "bin_add(v BIT x,Min) = bin_pred(v BIT x)"
by (unfold bin_add_def, simp)
lemma bin_add_BIT_BIT [simp]:
"\<lbrakk>w \<in> bin; y \<in> bool\<rbrakk>
\<Longrightarrow> bin_add(v BIT x, w BIT y) =
NCons(bin_add(v, cond(x and y, bin_succ(w), w)), x xor y)"
by (unfold bin_add_def, simp)
lemma integ_of_add [rule_format]:
"v \<in> bin \<Longrightarrow>
\<forall>w\<in>bin. integ_of(bin_add(v,w)) = integ_of(v) $+ integ_of(w)"
apply (erule bin.induct, simp, simp)
apply (rule ballI)
apply (induct_tac "wa")
apply (auto simp add: zadd_ac elim!: boolE)
done
(*Subtraction*)
lemma diff_integ_of_eq:
"\<lbrakk>v \<in> bin; w \<in> bin\<rbrakk>
\<Longrightarrow> integ_of(v) $- integ_of(w) = integ_of(bin_add (v, bin_minus(w)))"
unfolding zdiff_def
apply (simp add: integ_of_add integ_of_minus)
done
subsubsection\<open>\<^term>\<open>bin_mult\<close>: Binary Multiplication\<close>
lemma integ_of_mult:
"\<lbrakk>v \<in> bin; w \<in> bin\<rbrakk>
\<Longrightarrow> integ_of(bin_mult(v,w)) = integ_of(v) $* integ_of(w)"
apply (induct_tac "v", simp)
apply (simp add: integ_of_minus)
apply (auto simp add: zadd_ac integ_of_add zadd_zmult_distrib elim!: boolE)
done
subsection\<open>Computations\<close>
(** extra rules for bin_succ, bin_pred **)
lemma bin_succ_1: "bin_succ(w BIT 1) = bin_succ(w) BIT 0"
by simp
lemma bin_succ_0: "bin_succ(w BIT 0) = NCons(w,1)"
by simp
lemma bin_pred_1: "bin_pred(w BIT 1) = NCons(w,0)"
by simp
lemma bin_pred_0: "bin_pred(w BIT 0) = bin_pred(w) BIT 1"
by simp
(** extra rules for bin_minus **)
lemma bin_minus_1: "bin_minus(w BIT 1) = bin_pred(NCons(bin_minus(w), 0))"
by simp
lemma bin_minus_0: "bin_minus(w BIT 0) = bin_minus(w) BIT 0"
by simp
(** extra rules for bin_add **)
lemma bin_add_BIT_11: "w \<in> bin \<Longrightarrow> bin_add(v BIT 1, w BIT 1) =
NCons(bin_add(v, bin_succ(w)), 0)"
by simp
lemma bin_add_BIT_10: "w \<in> bin \<Longrightarrow> bin_add(v BIT 1, w BIT 0) =
NCons(bin_add(v,w), 1)"
by simp
lemma bin_add_BIT_0: "\<lbrakk>w \<in> bin; y \<in> bool\<rbrakk>
\<Longrightarrow> bin_add(v BIT 0, w BIT y) = NCons(bin_add(v,w), y)"
by simp
(** extra rules for bin_mult **)
lemma bin_mult_1: "bin_mult(v BIT 1, w) = bin_add(NCons(bin_mult(v,w),0), w)"
by simp
lemma bin_mult_0: "bin_mult(v BIT 0, w) = NCons(bin_mult(v,w),0)"
by simp
(** Simplification rules with integer constants **)
lemma int_of_0: "$#0 = #0"
by simp
lemma int_of_succ: "$# succ(n) = #1 $+ $#n"
by (simp add: int_of_add [symmetric] natify_succ)
lemma zminus_0 [simp]: "$- #0 = #0"
by simp
lemma zadd_0_intify [simp]: "#0 $+ z = intify(z)"
by simp
lemma zadd_0_right_intify [simp]: "z $+ #0 = intify(z)"
by simp
lemma zmult_1_intify [simp]: "#1 $* z = intify(z)"
by simp
lemma zmult_1_right_intify [simp]: "z $* #1 = intify(z)"
by (subst zmult_commute, simp)
lemma zmult_0 [simp]: "#0 $* z = #0"
by simp
lemma zmult_0_right [simp]: "z $* #0 = #0"
by (subst zmult_commute, simp)
lemma zmult_minus1 [simp]: "#-1 $* z = $-z"
by (simp add: zcompare_rls)
lemma zmult_minus1_right [simp]: "z $* #-1 = $-z"
apply (subst zmult_commute)
apply (rule zmult_minus1)
done
subsection\<open>Simplification Rules for Comparison of Binary Numbers\<close>
text\<open>Thanks to Norbert Voelker\<close>
(** Equals (=) **)
lemma eq_integ_of_eq:
"\<lbrakk>v \<in> bin; w \<in> bin\<rbrakk>
\<Longrightarrow> ((integ_of(v)) = integ_of(w)) \<longleftrightarrow>
iszero (integ_of (bin_add (v, bin_minus(w))))"
unfolding iszero_def
apply (simp add: zcompare_rls integ_of_add integ_of_minus)
done
lemma iszero_integ_of_Pls: "iszero (integ_of(Pls))"
by (unfold iszero_def, simp)
lemma nonzero_integ_of_Min: "\<not> iszero (integ_of(Min))"
unfolding iszero_def
apply (simp add: zminus_equation)
done
lemma iszero_integ_of_BIT:
"\<lbrakk>w \<in> bin; x \<in> bool\<rbrakk>
\<Longrightarrow> iszero (integ_of (w BIT x)) \<longleftrightarrow> (x=0 \<and> iszero (integ_of(w)))"
apply (unfold iszero_def, simp)
apply (subgoal_tac "integ_of (w) \<in> int")
apply typecheck
apply (drule int_cases)
apply (safe elim!: boolE)
apply (simp_all (asm_lr) add: zcompare_rls zminus_zadd_distrib [symmetric]
int_of_add [symmetric])
done
lemma iszero_integ_of_0:
"w \<in> bin \<Longrightarrow> iszero (integ_of (w BIT 0)) \<longleftrightarrow> iszero (integ_of(w))"
by (simp only: iszero_integ_of_BIT, blast)
lemma iszero_integ_of_1: "w \<in> bin \<Longrightarrow> \<not> iszero (integ_of (w BIT 1))"
by (simp only: iszero_integ_of_BIT, blast)
(** Less-than (<) **)
lemma less_integ_of_eq_neg:
"\<lbrakk>v \<in> bin; w \<in> bin\<rbrakk>
\<Longrightarrow> integ_of(v) $< integ_of(w)
\<longleftrightarrow> znegative (integ_of (bin_add (v, bin_minus(w))))"
unfolding zless_def zdiff_def
apply (simp add: integ_of_minus integ_of_add)
done
lemma not_neg_integ_of_Pls: "\<not> znegative (integ_of(Pls))"
by simp
lemma neg_integ_of_Min: "znegative (integ_of(Min))"
by simp
lemma neg_integ_of_BIT:
"\<lbrakk>w \<in> bin; x \<in> bool\<rbrakk>
\<Longrightarrow> znegative (integ_of (w BIT x)) \<longleftrightarrow> znegative (integ_of(w))"
apply simp
apply (subgoal_tac "integ_of (w) \<in> int")
apply typecheck
apply (drule int_cases)
apply (auto elim!: boolE simp add: int_of_add [symmetric] zcompare_rls)
apply (simp_all add: zminus_zadd_distrib [symmetric] zdiff_def
int_of_add [symmetric])
apply (subgoal_tac "$#1 $- $# succ (succ (n #+ n)) = $- $# succ (n #+ n) ")
apply (simp add: zdiff_def)
apply (simp add: equation_zminus int_of_diff [symmetric])
done
(** Less-than-or-equals (<=) **)
lemma le_integ_of_eq_not_less:
"(integ_of(x) $\<le> (integ_of(w))) \<longleftrightarrow> \<not> (integ_of(w) $< (integ_of(x)))"
by (simp add: not_zless_iff_zle [THEN iff_sym])
(*Delete the original rewrites, with their clumsy conditional expressions*)
declare bin_succ_BIT [simp del]
bin_pred_BIT [simp del]
bin_minus_BIT [simp del]
NCons_Pls [simp del]
NCons_Min [simp del]
bin_adder_BIT [simp del]
bin_mult_BIT [simp del]
(*Hide the binary representation of integer constants*)
declare integ_of_Pls [simp del] integ_of_Min [simp del] integ_of_BIT [simp del]
lemmas bin_arith_extra_simps =
integ_of_add [symmetric]
integ_of_minus [symmetric]
integ_of_mult [symmetric]
bin_succ_1 bin_succ_0
bin_pred_1 bin_pred_0
bin_minus_1 bin_minus_0
bin_add_Pls_right bin_add_Min_right
bin_add_BIT_0 bin_add_BIT_10 bin_add_BIT_11
diff_integ_of_eq
bin_mult_1 bin_mult_0 NCons_simps
(*For making a minimal simpset, one must include these default simprules
of thy. Also include simp_thms, or at least (\<not>False)=True*)
lemmas bin_arith_simps =
bin_pred_Pls bin_pred_Min
bin_succ_Pls bin_succ_Min
bin_add_Pls bin_add_Min
bin_minus_Pls bin_minus_Min
bin_mult_Pls bin_mult_Min
bin_arith_extra_simps
(*Simplification of relational operations*)
lemmas bin_rel_simps =
eq_integ_of_eq iszero_integ_of_Pls nonzero_integ_of_Min
iszero_integ_of_0 iszero_integ_of_1
less_integ_of_eq_neg
not_neg_integ_of_Pls neg_integ_of_Min neg_integ_of_BIT
le_integ_of_eq_not_less
declare bin_arith_simps [simp]
declare bin_rel_simps [simp]
(** Simplification of arithmetic when nested to the right **)
lemma add_integ_of_left [simp]:
"\<lbrakk>v \<in> bin; w \<in> bin\<rbrakk>
\<Longrightarrow> integ_of(v) $+ (integ_of(w) $+ z) = (integ_of(bin_add(v,w)) $+ z)"
by (simp add: zadd_assoc [symmetric])
lemma mult_integ_of_left [simp]:
"\<lbrakk>v \<in> bin; w \<in> bin\<rbrakk>
\<Longrightarrow> integ_of(v) $* (integ_of(w) $* z) = (integ_of(bin_mult(v,w)) $* z)"
by (simp add: zmult_assoc [symmetric])
lemma add_integ_of_diff1 [simp]:
"\<lbrakk>v \<in> bin; w \<in> bin\<rbrakk>
\<Longrightarrow> integ_of(v) $+ (integ_of(w) $- c) = integ_of(bin_add(v,w)) $- (c)"
unfolding zdiff_def
apply (rule add_integ_of_left, auto)
done
lemma add_integ_of_diff2 [simp]:
"\<lbrakk>v \<in> bin; w \<in> bin\<rbrakk>
\<Longrightarrow> integ_of(v) $+ (c $- integ_of(w)) =
integ_of (bin_add (v, bin_minus(w))) $+ (c)"
apply (subst diff_integ_of_eq [symmetric])
apply (simp_all add: zdiff_def zadd_ac)
done
(** More for integer constants **)
declare int_of_0 [simp] int_of_succ [simp]
lemma zdiff0 [simp]: "#0 $- x = $-x"
by (simp add: zdiff_def)
lemma zdiff0_right [simp]: "x $- #0 = intify(x)"
by (simp add: zdiff_def)
lemma zdiff_self [simp]: "x $- x = #0"
by (simp add: zdiff_def)
lemma znegative_iff_zless_0: "k \<in> int \<Longrightarrow> znegative(k) \<longleftrightarrow> k $< #0"
by (simp add: zless_def)
lemma zero_zless_imp_znegative_zminus: "\<lbrakk>#0 $< k; k \<in> int\<rbrakk> \<Longrightarrow> znegative($-k)"
by (simp add: zless_def)
lemma zero_zle_int_of [simp]: "#0 $\<le> $# n"
by (simp add: not_zless_iff_zle [THEN iff_sym] znegative_iff_zless_0 [THEN iff_sym])
lemma nat_of_0 [simp]: "nat_of(#0) = 0"
by (simp only: natify_0 int_of_0 [symmetric] nat_of_int_of)
lemma nat_le_int0_lemma: "\<lbrakk>z $\<le> $#0; z \<in> int\<rbrakk> \<Longrightarrow> nat_of(z) = 0"
by (auto simp add: znegative_iff_zless_0 [THEN iff_sym] zle_def zneg_nat_of)
lemma nat_le_int0: "z $\<le> $#0 \<Longrightarrow> nat_of(z) = 0"
apply (subgoal_tac "nat_of (intify (z)) = 0")
apply (rule_tac [2] nat_le_int0_lemma, auto)
done
lemma int_of_eq_0_imp_natify_eq_0: "$# n = #0 \<Longrightarrow> natify(n) = 0"
by (rule not_znegative_imp_zero, auto)
lemma nat_of_zminus_int_of: "nat_of($- $# n) = 0"
by (simp add: nat_of_def int_of_def raw_nat_of zminus image_intrel_int)
lemma int_of_nat_of: "#0 $\<le> z \<Longrightarrow> $# nat_of(z) = intify(z)"
apply (rule not_zneg_nat_of_intify)
apply (simp add: znegative_iff_zless_0 not_zless_iff_zle)
done
declare int_of_nat_of [simp] nat_of_zminus_int_of [simp]
lemma int_of_nat_of_if: "$# nat_of(z) = (if #0 $\<le> z then intify(z) else #0)"
by (simp add: int_of_nat_of znegative_iff_zless_0 not_zle_iff_zless)
lemma zless_nat_iff_int_zless: "\<lbrakk>m \<in> nat; z \<in> int\<rbrakk> \<Longrightarrow> (m < nat_of(z)) \<longleftrightarrow> ($#m $< z)"
apply (case_tac "znegative (z) ")
apply (erule_tac [2] not_zneg_nat_of [THEN subst])
apply (auto dest: zless_trans dest!: zero_zle_int_of [THEN zle_zless_trans]
simp add: znegative_iff_zless_0)
done
(** nat_of and zless **)
(*An alternative condition is @{term"$#0 \<subseteq> w"} *)
lemma zless_nat_conj_lemma: "$#0 $< z \<Longrightarrow> (nat_of(w) < nat_of(z)) \<longleftrightarrow> (w $< z)"
apply (rule iff_trans)
apply (rule zless_int_of [THEN iff_sym])
apply (auto simp add: int_of_nat_of_if simp del: zless_int_of)
apply (auto elim: zless_asym simp add: not_zle_iff_zless)
apply (blast intro: zless_zle_trans)
done
lemma zless_nat_conj: "(nat_of(w) < nat_of(z)) \<longleftrightarrow> ($#0 $< z \<and> w $< z)"
apply (case_tac "$#0 $< z")
apply (auto simp add: zless_nat_conj_lemma nat_le_int0 not_zless_iff_zle)
done
(*This simprule cannot be added unless we can find a way to make eq_integ_of_eq
unconditional!
[The condition "True" is a hack to prevent looping.
Conditional rewrite rules are tried after unconditional ones, so a rule
like eq_nat_number_of will be tried first to eliminate #mm=#nn.]
lemma integ_of_reorient [simp]:
"True \<Longrightarrow> (integ_of(w) = x) \<longleftrightarrow> (x = integ_of(w))"
by auto
*)
lemma integ_of_minus_reorient [simp]:
"(integ_of(w) = $- x) \<longleftrightarrow> ($- x = integ_of(w))"
by auto
lemma integ_of_add_reorient [simp]:
"(integ_of(w) = x $+ y) \<longleftrightarrow> (x $+ y = integ_of(w))"
by auto
lemma integ_of_diff_reorient [simp]:
"(integ_of(w) = x $- y) \<longleftrightarrow> (x $- y = integ_of(w))"
by auto
lemma integ_of_mult_reorient [simp]:
"(integ_of(w) = x $* y) \<longleftrightarrow> (x $* y = integ_of(w))"
by auto
(** To simplify inequalities involving integer negation and literals,
such as -x = #3
**)
lemmas [simp] =
zminus_equation [where y = "integ_of(w)"]
equation_zminus [where x = "integ_of(w)"]
for w
lemmas [iff] =
zminus_zless [where y = "integ_of(w)"]
zless_zminus [where x = "integ_of(w)"]
for w
lemmas [iff] =
zminus_zle [where y = "integ_of(w)"]
zle_zminus [where x = "integ_of(w)"]
for w
lemmas [simp] =
Let_def [where s = "integ_of(w)"] for w
(*** Simprocs for numeric literals ***)
(** Combining of literal coefficients in sums of products **)
lemma zless_iff_zdiff_zless_0: "(x $< y) \<longleftrightarrow> (x$-y $< #0)"
by (simp add: zcompare_rls)
lemma eq_iff_zdiff_eq_0: "\<lbrakk>x \<in> int; y \<in> int\<rbrakk> \<Longrightarrow> (x = y) \<longleftrightarrow> (x$-y = #0)"
by (simp add: zcompare_rls)
lemma zle_iff_zdiff_zle_0: "(x $\<le> y) \<longleftrightarrow> (x$-y $\<le> #0)"
by (simp add: zcompare_rls)
(** For combine_numerals **)
lemma left_zadd_zmult_distrib: "i$*u $+ (j$*u $+ k) = (i$+j)$*u $+ k"
by (simp add: zadd_zmult_distrib zadd_ac)
(** For cancel_numerals **)
lemma eq_add_iff1: "(i$*u $+ m = j$*u $+ n) \<longleftrightarrow> ((i$-j)$*u $+ m = intify(n))"
apply (simp add: zdiff_def zadd_zmult_distrib)
apply (simp add: zcompare_rls)
apply (simp add: zadd_ac)
done
lemma eq_add_iff2: "(i$*u $+ m = j$*u $+ n) \<longleftrightarrow> (intify(m) = (j$-i)$*u $+ n)"
apply (simp add: zdiff_def zadd_zmult_distrib)
apply (simp add: zcompare_rls)
apply (simp add: zadd_ac)
done
context fixes n :: i
begin
lemmas rel_iff_rel_0_rls =
zless_iff_zdiff_zless_0 [where y = "u $+ v"]
eq_iff_zdiff_eq_0 [where y = "u $+ v"]
zle_iff_zdiff_zle_0 [where y = "u $+ v"]
zless_iff_zdiff_zless_0 [where y = n]
eq_iff_zdiff_eq_0 [where y = n]
zle_iff_zdiff_zle_0 [where y = n]
for u v
lemma less_add_iff1: "(i$*u $+ m $< j$*u $+ n) \<longleftrightarrow> ((i$-j)$*u $+ m $< n)"
apply (simp add: zdiff_def zadd_zmult_distrib zadd_ac rel_iff_rel_0_rls)
done
lemma less_add_iff2: "(i$*u $+ m $< j$*u $+ n) \<longleftrightarrow> (m $< (j$-i)$*u $+ n)"
apply (simp add: zdiff_def zadd_zmult_distrib zadd_ac rel_iff_rel_0_rls)
done
end
lemma le_add_iff1: "(i$*u $+ m $\<le> j$*u $+ n) \<longleftrightarrow> ((i$-j)$*u $+ m $\<le> n)"
apply (simp add: zdiff_def zadd_zmult_distrib)
apply (simp add: zcompare_rls)
apply (simp add: zadd_ac)
done
lemma le_add_iff2: "(i$*u $+ m $\<le> j$*u $+ n) \<longleftrightarrow> (m $\<le> (j$-i)$*u $+ n)"
apply (simp add: zdiff_def zadd_zmult_distrib)
apply (simp add: zcompare_rls)
apply (simp add: zadd_ac)
done
ML_file \<open>int_arith.ML\<close>
subsection \<open>examples:\<close>
text \<open>\<open>combine_numerals_prod\<close> (products of separate literals)\<close>
lemma "#5 $* x $* #3 = y" apply simp oops
schematic_goal "y2 $+ ?x42 = y $+ y2" apply simp oops
lemma "oo : int \<Longrightarrow> l $+ (l $+ #2) $+ oo = oo" apply simp oops
lemma "#9$*x $+ y = x$*#23 $+ z" apply simp oops
lemma "y $+ x = x $+ z" apply simp oops
lemma "x : int \<Longrightarrow> x $+ y $+ z = x $+ z" apply simp oops
lemma "x : int \<Longrightarrow> y $+ (z $+ x) = z $+ x" apply simp oops
lemma "z : int \<Longrightarrow> x $+ y $+ z = (z $+ y) $+ (x $+ w)" apply simp oops
lemma "z : int \<Longrightarrow> x$*y $+ z = (z $+ y) $+ (y$*x $+ w)" apply simp oops
lemma "#-3 $* x $+ y $\<le> x $* #2 $+ z" apply simp oops
lemma "y $+ x $\<le> x $+ z" apply simp oops
lemma "x $+ y $+ z $\<le> x $+ z" apply simp oops
lemma "y $+ (z $+ x) $< z $+ x" apply simp oops
lemma "x $+ y $+ z $< (z $+ y) $+ (x $+ w)" apply simp oops
lemma "x$*y $+ z $< (z $+ y) $+ (y$*x $+ w)" apply simp oops
lemma "l $+ #2 $+ #2 $+ #2 $+ (l $+ #2) $+ (oo $+ #2) = uu" apply simp oops
lemma "u : int \<Longrightarrow> #2 $* u = u" apply simp oops
lemma "(i $+ j $+ #12 $+ k) $- #15 = y" apply simp oops
lemma "(i $+ j $+ #12 $+ k) $- #5 = y" apply simp oops
lemma "y $- b $< b" apply simp oops
lemma "y $- (#3 $* b $+ c) $< b $- #2 $* c" apply simp oops
lemma "(#2 $* x $- (u $* v) $+ y) $- v $* #3 $* u = w" apply simp oops
lemma "(#2 $* x $* u $* v $+ (u $* v) $* #4 $+ y) $- v $* u $* #4 = w" apply simp oops
lemma "(#2 $* x $* u $* v $+ (u $* v) $* #4 $+ y) $- v $* u = w" apply simp oops
lemma "u $* v $- (x $* u $* v $+ (u $* v) $* #4 $+ y) = w" apply simp oops
lemma "(i $+ j $+ #12 $+ k) = u $+ #15 $+ y" apply simp oops
lemma "(i $+ j $* #2 $+ #12 $+ k) = j $+ #5 $+ y" apply simp oops
lemma "#2 $* y $+ #3 $* z $+ #6 $* w $+ #2 $* y $+ #3 $* z $+ #2 $* u = #2 $* y' $+ #3 $* z' $+ #6 $* w' $+ #2 $* y' $+ #3 $* z' $+ u $+ vv" apply simp oops
lemma "a $+ $-(b$+c) $+ b = d" apply simp oops
lemma "a $+ $-(b$+c) $- b = d" apply simp oops
text \<open>negative numerals\<close>
lemma "(i $+ j $+ #-2 $+ k) $- (u $+ #5 $+ y) = zz" apply simp oops
lemma "(i $+ j $+ #-3 $+ k) $< u $+ #5 $+ y" apply simp oops
lemma "(i $+ j $+ #3 $+ k) $< u $+ #-6 $+ y" apply simp oops
lemma "(i $+ j $+ #-12 $+ k) $- #15 = y" apply simp oops
lemma "(i $+ j $+ #12 $+ k) $- #-15 = y" apply simp oops
lemma "(i $+ j $+ #-12 $+ k) $- #-15 = y" apply simp oops
text \<open>Multiplying separated numerals\<close>
lemma "#6 $* ($# x $* #2) = uu" apply simp oops
lemma "#4 $* ($# x $* $# x) $* (#2 $* $# x) = uu" apply simp oops
end
|
(*
Author: Norbert Schirmer
Maintainer: Norbert Schirmer, norbert.schirmer at web de
License: LGPL
*)
(* Title: HoarePartialDef.thy
Author: Norbert Schirmer, TU Muenchen
Copyright (C) 2004-2008 Norbert Schirmer
Some rights reserved, TU Muenchen
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
This library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
USA
*)
section \<open>Hoare Logic for Partial Correctness\<close>
theory HoarePartialDef imports Semantic begin
type_synonym ('s,'p) quadruple = "('s assn \<times> 'p \<times> 's assn \<times> 's assn)"
subsection \<open>Validity of Hoare Tuples: \<open>\<Gamma>,\<Theta>\<Turnstile>\<^bsub>/F\<^esub> P c Q,A\<close>\<close>
definition
valid :: "[('s,'p,'f) body,'f set,'s assn,('s,'p,'f) com,'s assn,'s assn] => bool"
("_\<Turnstile>\<^bsub>'/_\<^esub>/ _ _ _,_" [61,60,1000, 20, 1000,1000] 60)
where
"\<Gamma>\<Turnstile>\<^bsub>/F\<^esub> P c Q,A \<equiv> \<forall>s t. \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t \<longrightarrow> s \<in> Normal ` P \<longrightarrow> t \<notin> Fault ` F
\<longrightarrow> t \<in> Normal ` Q \<union> Abrupt ` A"
definition
cvalid::
"[('s,'p,'f) body,('s,'p) quadruple set,'f set,
's assn,('s,'p,'f) com,'s assn,'s assn] =>bool"
("_,_\<Turnstile>\<^bsub>'/_\<^esub>/ _ _ _,_" [61,60,60,1000, 20, 1000,1000] 60)
where
"\<Gamma>,\<Theta>\<Turnstile>\<^bsub>/F\<^esub> P c Q,A \<equiv> (\<forall>(P,p,Q,A)\<in>\<Theta>. \<Gamma>\<Turnstile>\<^bsub>/F\<^esub> P (Call p) Q,A) \<longrightarrow> \<Gamma> \<Turnstile>\<^bsub>/F\<^esub> P c Q,A"
definition
nvalid :: "[('s,'p,'f) body,nat,'f set,
's assn,('s,'p,'f) com,'s assn,'s assn] => bool"
("_\<Turnstile>_:\<^bsub>'/_\<^esub>/ _ _ _,_" [61,60,60,1000, 20, 1000,1000] 60)
where
"\<Gamma>\<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A \<equiv> \<forall>s t. \<Gamma>\<turnstile>\<langle>c,s \<rangle> =n\<Rightarrow> t \<longrightarrow> s \<in> Normal ` P \<longrightarrow> t \<notin> Fault ` F
\<longrightarrow> t \<in> Normal ` Q \<union> Abrupt ` A"
definition
cnvalid::
"[('s,'p,'f) body,('s,'p) quadruple set,nat,'f set,
's assn,('s,'p,'f) com,'s assn,'s assn] \<Rightarrow> bool"
("_,_\<Turnstile>_:\<^bsub>'/_\<^esub>/ _ _ _,_" [61,60,60,60,1000, 20, 1000,1000] 60)
where
"\<Gamma>,\<Theta>\<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A \<equiv> (\<forall>(P,p,Q,A)\<in>\<Theta>. \<Gamma>\<Turnstile>n:\<^bsub>/F\<^esub> P (Call p) Q,A) \<longrightarrow> \<Gamma> \<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A"
notation (ASCII)
valid ("_|='/_/ _ _ _,_" [61,60,1000, 20, 1000,1000] 60) and
cvalid ("_,_|='/_/ _ _ _,_" [61,60,60,1000, 20, 1000,1000] 60) and
nvalid ("_|=_:'/_/ _ _ _,_" [61,60,60,1000, 20, 1000,1000] 60) and
cnvalid ("_,_|=_:'/_/ _ _ _,_" [61,60,60,60,1000, 20, 1000,1000] 60)
subsection \<open>Properties of Validity\<close>
lemma valid_iff_nvalid: "\<Gamma>\<Turnstile>\<^bsub>/F\<^esub> P c Q,A = (\<forall>n. \<Gamma>\<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A)"
apply (simp only: valid_def nvalid_def exec_iff_execn )
apply (blast dest: exec_final_notin_to_execn)
done
lemma cnvalid_to_cvalid: "(\<forall>n. \<Gamma>,\<Theta>\<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A) \<Longrightarrow> \<Gamma>,\<Theta>\<Turnstile>\<^bsub>/F\<^esub> P c Q,A"
apply (unfold cvalid_def cnvalid_def valid_iff_nvalid [THEN eq_reflection])
apply fast
done
lemma nvalidI:
"\<lbrakk>\<And>s t. \<lbrakk>\<Gamma>\<turnstile>\<langle>c,Normal s \<rangle> =n\<Rightarrow> t;s \<in> P; t\<notin> Fault ` F\<rbrakk> \<Longrightarrow> t \<in> Normal ` Q \<union> Abrupt ` A\<rbrakk>
\<Longrightarrow> \<Gamma>\<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A"
by (auto simp add: nvalid_def)
lemma validI:
"\<lbrakk>\<And>s t. \<lbrakk>\<Gamma>\<turnstile>\<langle>c,Normal s \<rangle> \<Rightarrow> t;s \<in> P; t\<notin>Fault ` F\<rbrakk> \<Longrightarrow> t \<in> Normal ` Q \<union> Abrupt ` A\<rbrakk>
\<Longrightarrow> \<Gamma>\<Turnstile>\<^bsub>/F\<^esub> P c Q,A"
by (auto simp add: valid_def)
lemma cvalidI:
"\<lbrakk>\<And>s t. \<lbrakk>\<forall>(P,p,Q,A)\<in>\<Theta>. \<Gamma>\<Turnstile>\<^bsub>/F\<^esub> P (Call p) Q,A;\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> \<Rightarrow> t;s \<in> P;t\<notin>Fault ` F\<rbrakk>
\<Longrightarrow> t \<in> Normal ` Q \<union> Abrupt ` A\<rbrakk>
\<Longrightarrow> \<Gamma>,\<Theta>\<Turnstile>\<^bsub>/F\<^esub> P c Q,A"
by (auto simp add: cvalid_def valid_def)
lemma cvalidD:
"\<lbrakk>\<Gamma>,\<Theta>\<Turnstile>\<^bsub>/F\<^esub> P c Q,A;\<forall>(P,p,Q,A)\<in>\<Theta>. \<Gamma>\<Turnstile>\<^bsub>/F\<^esub> P (Call p) Q,A;\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> \<Rightarrow> t;s \<in> P;t\<notin>Fault ` F\<rbrakk>
\<Longrightarrow> t \<in> Normal ` Q \<union> Abrupt ` A"
by (auto simp add: cvalid_def valid_def)
lemma cnvalidI:
"\<lbrakk>\<And>s t. \<lbrakk>\<forall>(P,p,Q,A)\<in>\<Theta>. \<Gamma>\<Turnstile>n:\<^bsub>/F\<^esub> P (Call p) Q,A;
\<Gamma>\<turnstile>\<langle>c,Normal s \<rangle> =n\<Rightarrow> t;s \<in> P;t\<notin>Fault ` F\<rbrakk>
\<Longrightarrow> t \<in> Normal ` Q \<union> Abrupt ` A\<rbrakk>
\<Longrightarrow> \<Gamma>,\<Theta>\<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A"
by (auto simp add: cnvalid_def nvalid_def)
lemma cnvalidD:
"\<lbrakk>\<Gamma>,\<Theta>\<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A;\<forall>(P,p,Q,A)\<in>\<Theta>. \<Gamma>\<Turnstile>n:\<^bsub>/F\<^esub> P (Call p) Q,A;
\<Gamma>\<turnstile>\<langle>c,Normal s \<rangle> =n\<Rightarrow> t;s \<in> P;
t\<notin>Fault ` F\<rbrakk>
\<Longrightarrow> t \<in> Normal ` Q \<union> Abrupt ` A"
by (auto simp add: cnvalid_def nvalid_def)
lemma nvalid_augment_Faults:
assumes validn:"\<Gamma>\<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A"
assumes F': "F \<subseteq> F'"
shows "\<Gamma>\<Turnstile>n:\<^bsub>/F'\<^esub> P c Q,A"
proof (rule nvalidI)
fix s t
assume exec: "\<Gamma>\<turnstile>\<langle>c,Normal s \<rangle> =n\<Rightarrow> t"
assume P: "s \<in> P"
assume F: "t \<notin> Fault ` F'"
with F' have "t \<notin> Fault ` F"
by blast
with exec P validn
show "t \<in> Normal ` Q \<union> Abrupt ` A"
by (auto simp add: nvalid_def)
qed
lemma valid_augment_Faults:
assumes validn:"\<Gamma>\<Turnstile>\<^bsub>/F\<^esub> P c Q,A"
assumes F': "F \<subseteq> F'"
shows "\<Gamma>\<Turnstile>\<^bsub>/F'\<^esub> P c Q,A"
proof (rule validI)
fix s t
assume exec: "\<Gamma>\<turnstile>\<langle>c,Normal s \<rangle> \<Rightarrow> t"
assume P: "s \<in> P"
assume F: "t \<notin> Fault ` F'"
with F' have "t \<notin> Fault ` F"
by blast
with exec P validn
show "t \<in> Normal ` Q \<union> Abrupt ` A"
by (auto simp add: valid_def)
qed
lemma nvalid_to_nvalid_strip:
assumes validn:"\<Gamma>\<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A"
assumes F': "F' \<subseteq> -F"
shows "strip F' \<Gamma>\<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A"
proof (rule nvalidI)
fix s t
assume exec_strip: "strip F' \<Gamma>\<turnstile>\<langle>c,Normal s \<rangle> =n\<Rightarrow> t"
assume P: "s \<in> P"
assume F: "t \<notin> Fault ` F"
from exec_strip obtain t' where
exec: "\<Gamma>\<turnstile>\<langle>c,Normal s \<rangle> =n\<Rightarrow> t'" and
t': "t' \<in> Fault ` (-F') \<longrightarrow> t'=t" "\<not> isFault t' \<longrightarrow> t'=t"
by (blast dest: execn_strip_to_execn)
show "t \<in> Normal ` Q \<union> Abrupt ` A"
proof (cases "t' \<in> Fault ` F")
case True
with t' F F' have False
by blast
thus ?thesis ..
next
case False
with exec P validn
have *: "t' \<in> Normal ` Q \<union> Abrupt ` A"
by (auto simp add: nvalid_def)
with t' have "t'=t"
by auto
with * show ?thesis
by simp
qed
qed
lemma valid_to_valid_strip:
assumes valid:"\<Gamma>\<Turnstile>\<^bsub>/F\<^esub> P c Q,A"
assumes F': "F' \<subseteq> -F"
shows "strip F' \<Gamma>\<Turnstile>\<^bsub>/F\<^esub> P c Q,A"
proof (rule validI)
fix s t
assume exec_strip: "strip F' \<Gamma>\<turnstile>\<langle>c,Normal s \<rangle> \<Rightarrow> t"
assume P: "s \<in> P"
assume F: "t \<notin> Fault ` F"
from exec_strip obtain t' where
exec: "\<Gamma>\<turnstile>\<langle>c,Normal s \<rangle> \<Rightarrow> t'" and
t': "t' \<in> Fault ` (-F') \<longrightarrow> t'=t" "\<not> isFault t' \<longrightarrow> t'=t"
by (blast dest: exec_strip_to_exec)
show "t \<in> Normal ` Q \<union> Abrupt ` A"
proof (cases "t' \<in> Fault ` F")
case True
with t' F F' have False
by blast
thus ?thesis ..
next
case False
with exec P valid
have *: "t' \<in> Normal ` Q \<union> Abrupt ` A"
by (auto simp add: valid_def)
with t' have "t'=t"
by auto
with * show ?thesis
by simp
qed
qed
subsection \<open>The Hoare Rules: \<open>\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P c Q,A\<close>\<close>
lemma mono_WeakenContext: "A \<subseteq> B \<Longrightarrow>
(\<lambda>(P, c, Q, A'). (\<Gamma>, \<Theta>, F, P, c, Q, A') \<in> A) x \<longrightarrow>
(\<lambda>(P, c, Q, A'). (\<Gamma>, \<Theta>, F, P, c, Q, A') \<in> B) x"
apply blast
done
inductive "hoarep"::"[('s,'p,'f) body,('s,'p) quadruple set,'f set,
's assn,('s,'p,'f) com, 's assn,'s assn] => bool"
("(3_,_/\<turnstile>\<^bsub>'/_ \<^esub>(_/ (_)/ _,/_))" [60,60,60,1000,20,1000,1000]60)
for \<Gamma>::"('s,'p,'f) body"
where
Skip: "\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> Q Skip Q,A"
| Basic: "\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> {s. f s \<in> Q} (Basic f) Q,A"
| Spec: "\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> {s. (\<forall>t. (s,t) \<in> r \<longrightarrow> t \<in> Q) \<and> (\<exists>t. (s,t) \<in> r)} (Spec r) Q,A"
| Seq: "\<lbrakk>\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P c\<^sub>1 R,A; \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> R c\<^sub>2 Q,A\<rbrakk>
\<Longrightarrow>
\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P (Seq c\<^sub>1 c\<^sub>2) Q,A"
| Cond: "\<lbrakk>\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> (P \<inter> b) c\<^sub>1 Q,A; \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> (P \<inter> - b) c\<^sub>2 Q,A\<rbrakk>
\<Longrightarrow>
\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P (Cond b c\<^sub>1 c\<^sub>2) Q,A"
| While: "\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> (P \<inter> b) c P,A
\<Longrightarrow>
\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P (While b c) (P \<inter> - b),A"
| Guard: "\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> (g \<inter> P) c Q,A
\<Longrightarrow>
\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> (g \<inter> P) (Guard f g c) Q,A"
| Guarantee: "\<lbrakk>f \<in> F; \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> (g \<inter> P) c Q,A\<rbrakk>
\<Longrightarrow>
\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P (Guard f g c) Q,A"
| CallRec:
"\<lbrakk>(P,p,Q,A) \<in> Specs;
\<forall>(P,p,Q,A) \<in> Specs. p \<in> dom \<Gamma> \<and> \<Gamma>,\<Theta>\<union>Specs\<turnstile>\<^bsub>/F\<^esub> P (the (\<Gamma> p)) Q,A \<rbrakk>
\<Longrightarrow> \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P (Call p) Q,A"
| DynCom:
"\<forall>s \<in> P. \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P (c s) Q,A
\<Longrightarrow>
\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P (DynCom c) Q,A"
| Throw: "\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> A Throw Q,A"
| Catch: "\<lbrakk>\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P c\<^sub>1 Q,R; \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> R c\<^sub>2 Q,A\<rbrakk> \<Longrightarrow> \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P Catch c\<^sub>1 c\<^sub>2 Q,A"
| Conseq: "\<forall>s \<in> P. \<exists>P' Q' A'. \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P' c Q',A' \<and> s \<in> P' \<and> Q' \<subseteq> Q \<and> A' \<subseteq> A
\<Longrightarrow> \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P c Q,A"
| Asm: "\<lbrakk>(P,p,Q,A) \<in> \<Theta>\<rbrakk>
\<Longrightarrow>
\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P (Call p) Q,A"
| ExFalso: "\<lbrakk>\<forall>n. \<Gamma>,\<Theta>\<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A; \<not> \<Gamma>\<Turnstile>\<^bsub>/F\<^esub> P c Q,A\<rbrakk> \<Longrightarrow> \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P c Q,A"
\<comment> \<open>This is a hack rule that enables us to derive completeness for
an arbitrary context \<open>\<Theta>\<close>, from completeness for an empty context.\<close>
text \<open>Does not work, because of rule ExFalso, the context \<open>\<Theta>\<close> is to blame.
A weaker version with empty context can be derived from soundness
and completeness later on.\<close>
lemma hoare_strip_\<Gamma>:
assumes deriv: "\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P p Q,A"
shows "strip (-F) \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P p Q,A"
using deriv
proof induct
case Skip thus ?case by (iprover intro: hoarep.Skip)
next
case Basic thus ?case by (iprover intro: hoarep.Basic)
next
case Spec thus ?case by (iprover intro: hoarep.Spec)
next
case Seq thus ?case by (iprover intro: hoarep.Seq)
next
case Cond thus ?case by (iprover intro: hoarep.Cond)
next
case While thus ?case by (iprover intro: hoarep.While)
next
case Guard thus ?case by (iprover intro: hoarep.Guard)
(*next
case CallSpec thus ?case by (iprover intro: hoarep.CallSpec)
next
case (CallRec A Abr Abr' Init P Post Pre Procs Q R Result Return Z \<Gamma> \<Theta> init p
result return )
from CallRec.hyps
have "\<forall>p\<in>Procs. \<forall>Z. (strip \<Gamma>),\<Theta> \<union>
(\<Union>\<^bsub>p\<in>Procs\<^esub>
\<Union>\<^bsub>Z\<^esub> {(Pre p Z, Call (Init p) p (Return p) (Result p),
Post p Z, Abr p Z)})\<turnstile>
(Pre p Z) (the (\<Gamma> p)) (R p Z),(Abr' p Z)" by blast
hence "\<forall>p\<in>Procs. \<forall>Z. (strip \<Gamma>),\<Theta> \<union>
(\<Union>\<^bsub>p\<in>Procs\<^esub>
\<Union>\<^bsub>Z\<^esub> {(Pre p Z, Call (Init p) p (Return p) (Result p),
Post p Z, Abr p Z)})\<turnstile>
(Pre p Z) (the ((strip \<Gamma>) p)) (R p Z),(Abr' p Z)"
by (auto intro: hoarep.StripI)
then show ?case
apply -
apply (rule hoarep.CallRec)
apply (assumption | simp only:dom_strip)+
done*)
next
case DynCom
thus ?case
by - (rule hoarep.DynCom,best elim!: ballE exE)
next
case Throw thus ?case by (iprover intro: hoarep.Throw)
next
case Catch thus ?case by (iprover intro: hoarep.Catch)
(*next
case CONSEQ thus ?case apply (auto intro: hoarep.CONSEQ)*)
next
case Asm thus ?case by (iprover intro: hoarep.Asm)
next
case ExFalso
thus ?case
oops
lemma hoare_augment_context:
assumes deriv: "\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P p Q,A"
shows "\<And>\<Theta>'. \<Theta> \<subseteq> \<Theta>' \<Longrightarrow> \<Gamma>,\<Theta>'\<turnstile>\<^bsub>/F\<^esub> P p Q,A"
using deriv
proof (induct)
case CallRec
case (CallRec P p Q A Specs \<Theta> F \<Theta>')
from CallRec.prems
have "\<Theta>\<union>Specs
\<subseteq> \<Theta>'\<union>Specs"
by blast
with CallRec.hyps (2)
have "\<forall>(P,p,Q,A)\<in>Specs. p \<in> dom \<Gamma> \<and> \<Gamma>,\<Theta>'\<union>Specs \<turnstile>\<^bsub>/F\<^esub> P (the (\<Gamma> p)) Q,A"
by fastforce
with CallRec show ?case by - (rule hoarep.CallRec)
next
case DynCom thus ?case by (blast intro: hoarep.DynCom)
next
case (Conseq P \<Theta> F c Q A \<Theta>')
from Conseq
have "\<forall>s \<in> P.
(\<exists>P' Q' A'. \<Gamma>,\<Theta>' \<turnstile>\<^bsub>/F\<^esub> P' c Q',A' \<and> s \<in> P' \<and> Q' \<subseteq> Q \<and> A' \<subseteq> A)"
by blast
with Conseq show ?case by - (rule hoarep.Conseq)
next
case (ExFalso \<Theta> F P c Q A \<Theta>')
have valid_ctxt: "\<forall>n. \<Gamma>,\<Theta>\<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A" "\<Theta> \<subseteq> \<Theta>'" by fact+
hence "\<forall>n. \<Gamma>,\<Theta>'\<Turnstile>n:\<^bsub>/F\<^esub> P c Q,A"
by (simp add: cnvalid_def) blast
moreover have invalid: "\<not> \<Gamma>\<Turnstile>\<^bsub>/F\<^esub> P c Q,A" by fact
ultimately show ?case
by (rule hoarep.ExFalso)
qed (blast intro: hoarep.intros)+
subsection \<open>Some Derived Rules\<close>
lemma Conseq': "\<forall>s. s \<in> P \<longrightarrow>
(\<exists>P' Q' A'.
(\<forall> Z. \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> (P' Z) c (Q' Z),(A' Z)) \<and>
(\<exists>Z. s \<in> P' Z \<and> (Q' Z \<subseteq> Q) \<and> (A' Z \<subseteq> A)))
\<Longrightarrow>
\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P c Q,A"
apply (rule Conseq)
apply (rule ballI)
apply (erule_tac x=s in allE)
apply (clarify)
apply (rule_tac x="P' Z" in exI)
apply (rule_tac x="Q' Z" in exI)
apply (rule_tac x="A' Z" in exI)
apply blast
done
lemma conseq:"\<lbrakk>\<forall>Z. \<Gamma>,\<Theta> \<turnstile>\<^bsub>/F\<^esub> (P' Z) c (Q' Z),(A' Z);
\<forall>s. s \<in> P \<longrightarrow> (\<exists> Z. s\<in>P' Z \<and> (Q' Z \<subseteq> Q) \<and> (A' Z \<subseteq> A))\<rbrakk>
\<Longrightarrow>
\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P c Q,A"
by (rule Conseq) blast
theorem conseqPrePost [trans]:
"\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P' c Q',A' \<Longrightarrow> P \<subseteq> P' \<Longrightarrow> Q' \<subseteq> Q \<Longrightarrow> A' \<subseteq> A \<Longrightarrow> \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P c Q,A"
by (rule conseq [where ?P'="\<lambda>Z. P'" and ?Q'="\<lambda>Z. Q'"]) auto
lemma conseqPre [trans]: "\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P' c Q,A \<Longrightarrow> P \<subseteq> P' \<Longrightarrow> \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P c Q,A"
by (rule conseq) auto
lemma conseqPost [trans]: "\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P c Q',A' \<Longrightarrow> Q' \<subseteq> Q \<Longrightarrow> A' \<subseteq> A
\<Longrightarrow> \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> P c Q,A"
by (rule conseq) auto
lemma CallRec':
"\<lbrakk>p\<in>Procs; Procs \<subseteq> dom \<Gamma>;
\<forall>p\<in>Procs.
\<forall>Z. \<Gamma>,\<Theta> \<union> (\<Union>p\<in>Procs. \<Union>Z. {((P p Z),p,Q p Z,A p Z)})
\<turnstile>\<^bsub>/F\<^esub> (P p Z) (the (\<Gamma> p)) (Q p Z),(A p Z)\<rbrakk>
\<Longrightarrow>
\<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> (P p Z) (Call p) (Q p Z),(A p Z)"
apply (rule CallRec [where Specs="\<Union>p\<in>Procs. \<Union>Z. {((P p Z),p,Q p Z,A p Z)}"])
apply blast
apply blast
done
end
|
O for a draught of vintage ! that hath been
|
= Sarnia =
|
# James Rekow
intraCohortInteraction = function(chrt, M, N, lambda = 0, threshold = 10 ^ (-6),
maxSteps = 10 ^ 4, tStep = 10 ^ (-2), intTime = 1,
interSmplMult= 0.01, conGraph = NULL){
# ARGS: chrt - input cohort in the form list(list(abd, gr, imat)), where abundances have
# already been integrated
# lambda - rate of exponential random variable describing time between interactions
# intTime - time over which interactions are to be simulated
# interSmplMult - the fraction of a sample's abundances that get transmitted
# upon contact with another sample (transmission is bi-directional
# and does not deplete the abundance of the dog from which it is
# transmitted)
# conGraph - if a graph describing the connectivity of samples in the cohort is
# supplied, then samples can interact if and only if their corresponding
# vertices are connected by an edge
#
# RETURNS: abdList - list of interacted and re-integrated abundances from chrt
# intIx - numerical vector containing the indices of all samples within the cohort
# that are connected (capable of interacting) with at least one other sample.
# Only returned if retIntIx == TRUE, in which case the output is of the form
# list(abdList, intIx)
#
# NOTE: exactly one interaction occurs during each iteration of the loop. If cumTime is less
# than intTime but the next wait time makes cumTime greater than intTime then the interaction
# corresponding to that wait time does not occur.
library(igraph)
source("eulerIntegrate.r")
## store the indices of all interacting pairs of samples and count how many such pairs there are
# if conGraph is not specified, all samples interact with all other samples
# (e.g. they form a complete graph)
if(is.null(conGraph)){
smplPairs = combn(M, 2)
numPairs = choose(M, 2)
} # end if
# if conGraph is specified, use edges in conGraph for smplPairs
if(!is.null(conGraph)){
smplPairs = t(ends(conGraph, E(conGraph)))
numPairs = ncol(smplPairs)
} # end if
# precompute coefficient for use in interaction step
tempSmplMult = 1 - interSmplMult
# track cumulative time and terminate interaction step once cumulative time exceeds intTime
cumTime = rexp(n = 1, rate = lambda)
# interact samples
while(cumTime < intTime){
iPairIx = sample(numPairs, 1)
iPair = smplPairs[ , iPairIx]
# select the abundance vectors of the interacting samples
abd1 = chrt[[iPair[1]]][[1]]
abd2 = chrt[[iPair[2]]][[1]]
# compute the weighted sum
tempSum = interSmplMult * {abd1 + abd2}
# update sample abundances in the cohort
chrt[[iPair[1]]][[1]] = tempSmplMult * abd1 + tempSum
chrt[[iPair[2]]][[1]] = tempSmplMult * abd2 + tempSum
# re-integrate samples that have interacted
for(ii in iPair){
chrt[[ii]][[1]] = eulerIntegrate(chrt[[ii]], threshold = threshold, maxSteps = maxSteps,
tStep = tStep)
} # end for
cumTime = cumTime + rexp(n = 1, rate = lambda)
} # end while loop
abdList = lapply(chrt, "[[", 1)
return(abdList)
} # end intraCohortInteraction function
|
{-# LANGUAGE FlexibleContexts, FlexibleInstances, NamedFieldPuns #-}
module School.FileIO.Source
( Source(..)
, pool
, source
) where
import Conduit ((.|), ConduitM, MonadResource, mapMC, nullC, sourceFileBS, takeCE)
import Control.Monad ((>=>), unless)
import Control.Monad.Except (MonadError)
import Data.ByteString (ByteString)
import Numeric.LinearAlgebra (Matrix, R)
import School.FileIO.BinConversion (binConversion)
import School.FileIO.ConduitHeader (conduitHeader)
import School.FileIO.FileHeader (FileHeader(..))
import School.FileIO.FilePath (FilePath)
import School.Types.DataType (DataType(..), getSize)
import School.Types.Decoding (binToDouble, binToListInt, binToMatrixDouble)
import School.Types.Error (Error)
import School.Types.LiftResult (LiftResult(..))
class Source a where
decodeByteString :: FileHeader -> ByteString -> Either Error a
pool :: (Monad m)
=> Int
-> (ByteString -> m a)
-> ConduitM ByteString a m ()
pool chunkSize transformer = loop where
loop = do
takeCE chunkSize .| mapMC transformer
isEmpty <- nullC
unless isEmpty loop
source :: (LiftResult m, MonadError Error m, MonadResource m, Source a)
=> FileHeader
-> FilePath
-> ConduitM () a m ()
source header@FileHeader { dataType, cols, rows } path =
let size = getSize dataType * cols * rows
byteSource = sourceFileBS path
cHeader = conduitHeader header
trans = liftResult . decodeByteString header
in byteSource .| cHeader .| pool size trans
instance Source (Matrix R) where
decodeByteString FileHeader{ dataType, cols, rows } =
binToMatrixDouble dataType rows cols
instance Source Double where
decodeByteString FileHeader{ dataType } =
binConversion dataType DBL64B >=> binToDouble
instance Source [Int] where
decodeByteString FileHeader{ dataType } = binToListInt dataType
|
how_many_dalmatians <- function( n ) {
if( n == 101 ) "101 DALMATIONS!!!"
else if( n > 79 ) "Woah that's a lot of dogs!"
else if( n > 11 ) "More than a handful!"
else "Hardly any"
}
|
program five
implicit none
integer::i,n,x(999),y(999),j
write(*,*)"enter n"
read(*,*)n
do i=1,n
read(*,*)x(i)
end do
call numper(x,n,y,j)
do i=1,j
write(*,*)y(i)
end do
write(x,x)j
end program
subroutine numper(x,n,y,j)
implicit none
integer intent(in)::n
integer intent(in)::x(n)
integer,intent(out)::y(n),j
integer::i
y=0
j=1
do i=1,n
if(mod(x(i),2)==0)then
y(j)=x(i)
j=j+1
end if
end do
end subroutine numper
|
import numpy as np
import matplotlib.pyplot as plt
import all_paths as ap
import engformat as ef
def create():
data = np.loadtxt(ap.MODULE_DATA_PATH + 'basic_raw_data.csv', skiprows=1, delimiter=',').T
x = data[0]
y = data[1]
ps = np.polyfit(x, y, deg=2)
y_fit = ps[0] * x ** 2 + ps[1] * x + ps[2]
bf, subplot = plt.subplots()
for i in range(len(x)):
subplot.plot(x[i], y[i], 'o', c='b', alpha=0.5, label='Raw data')
subplot.plot(x, y_fit, c='r', label='Fitted')
subplot.axvspan(0.5, 1.5, color='orange', alpha=1)
cline = ef.create_custom_legend_patch(label='Critical zone', c='orange', alpha=0.3)
ef.revamp_legend(subplot, loc='upper left', add_handles=[cline])
plt.show()
if __name__ == '__main__':
create()
|
# Copyright (C) 2021 ServiceNow, Inc.
""" Preprocessing functions running on df str column inputs
(optimized for df computation)
These functions will **fail** if the string column contains null values
"""
from unidecode import unidecode, unidecode_expect_nonascii, unidecode_expect_ascii
import re
import numpy as np
import string
import enchant
import nrcan_p2.data_processing.preprocessing_str as preprocessing_str
EN_DICT = enchant.Dict("en_CA")
SPECIAL_CASES = {
'ofthe': 'of the',
'inthe': 'in the',
'forthe': 'for the',
'onthe': 'on the'
}
SPECIAL_CASES_2 = {
'ofthe': 'of the',
'inthe': 'in the',
'forthe': 'for the',
'onthe': 'on the',
"andiron": "and iron"
}
def rm_dbl_space(dfcol):
""" Reduce multiple whitespace to single space (do not touch newlines) """
return dfcol.str.replace(r'[^\S\n]+', ' ', regex=True)
def rm_cid(dfcol):
""" Remove (cid:X) where X is a number """
return dfcol.str.replace(r'\(cid:[0-9]+\)', '', regex=True)
def strip_space(dfcol):
""" Strip spaces (not newline) """
return dfcol.str.strip(r' ')
def rm_dbl_punct(dfcol):
""" Remove doubled punctuation characters (the same character, repeated)
Except for period, which we allow to exist 3 or more times.
"""
# everything except .
s = r"([!\-\"#$%&\'()*+,/:;<=>?@[\\\]^_`{|}~])\1+"
# period
ss = r"([.])\1{3,}"
return dfcol.str.replace(s, r'\1', regex=True).str.replace(ss, r'\1', regex=True)
def rm_word_all_punct(dfcol):
""" Remove words that are entirely punctuation """
punct = re.escape(string.punctuation)
ss = f"((?<=\s)|^)([{punct}]+)((?=\s)|$)"
return dfcol.str.replace(ss, r'', regex=True)
def convert_to_ascii(dfcol):
""" Convert non-ascii characters to their ascii equivalent if it exists """
# failures in the unidecode function replace characters with [?], which we must replace
return dfcol.apply(lambda x: unidecode_expect_ascii(x)).str.replace('\[\?\]', ' ', regex=True)
def lower(dfcol):
""" Lowercase """
return dfcol.str.lower()
def rm_newline(dfcol):
""" Remove newlines (also re-removing any double spaces)"""
return rm_dbl_space(dfcol.str.replace(r"\n", " ", regex=True))
def rm_newline_except_end(dfcol):
""" Remove newlines, except those at string end
This function is useful for debugging.
"""
return rm_dbl_space(dfcol.str.replace(r"\n\s*(?!$)", " ", regex=True))
def rm_nonprintable(dfcol):
""" Remove all non printable characters """
remove_printables_str = f'[^{re.escape(string.printable)}]'
return rm_dbl_space(dfcol.str.replace(remove_printables_str, ' ', regex=True))
def rm_punct(dfcol):
return rm_dbl_space(dfcol.str.replace(r'[^\w\s]|_',' ', regex=True))
def rm_mid_word_punct(dfcol):
""" Aggressively remove punctuation mid-word """
punct = re.escape(string.punctuation)
mid_text_illegal_punct = re.escape('!"#$%&\()*+,/:;<=>?@[\\]^_`{|}~') #text punctuation - , ' or .
rstr = f'(?<=[a-zA-Z0-9]|[{punct}])[{mid_text_illegal_punct}]+(?=[a-zA-Z0-9]|[{punct}])'
rstr2 = f'((?<=[a-zA-Z0-9][{punct}])[{punct}]+(?=[a-zA-Z0-9]))|((?<=[a-zA-Z0-9])[{punct}]+(?=[{punct}][a-zA-Z0-9]))'
col = dfcol.str.replace(rstr, '', regex=True)
col = col.str.replace(rstr2, '', regex=True)
return col
def rm_punct_mid_punct(dfcol):
""" Remove punctuation that is bordered by other punctuation (even if there's a space in between)
The case this removes is the .,", which it converts to ."
"""
punct = re.escape(string.punctuation)
ss = f"(?<=[{punct}])([{punct}]+)(?=[{punct}])"
return rm_dbl_space(dfcol.str.replace(ss, r'', regex=True))
def rm_mid_word_punct(dfcol):
punct = re.escape(string.punctuation)
mid_text_illegal_punct = re.escape('!"#$%&\()*+,/:;<=>?@[\\]^_`{|}~') #text punctuation - no dash ' or .
rstr = f'(?<=[a-zA-Z]|[{punct}])[{mid_text_illegal_punct}]+(?=[a-zA-Z]|[{punct}])' # (a|?)(?+)(a|?)
rstr = f'(?<=[a-zA-Z]|[{punct}])[{mid_text_illegal_punct}]+(?=[a-zA-Z]|[{punct}])' # (a|?)(?+)(a|?)
col = dfcol.str.replace(rstr, '', regex=True)
return col
def rm_email(dfcol):
return rm_dbl_space(dfcol.str.replace(r'([\w.\-]+@[\w\-.]+[.]([\w\-.]+)?[\w])', ' ', regex=True))
def rm_doi(dfcol):
# remove url before running this one
return rm_dbl_space(dfcol.str.replace(r'(((doi.?:?\s?)|(doi\.org/)|(https://doi\.org/))\s*)(10\.([A-Za-z0-9.\/-]+)?[A-Za-z0-9\/])', ' ', regex=True))
def rm_url(dfcol):
# run this after removing newline hyphenation (urls often break across pages)
return rm_dbl_space(dfcol.str.replace(
r'\b((http(s)?|ftp):\/\/)?(www\.)?(([-a-zA-Z0-9@:%_\+~#=]+\.){1,256})[a-z]{2,6}\b(([-a-zA-Z0-9@:%_\+~#?&//=.]+[-a-zA-Z0-9@%_\+~#?&//=])|[-a-zA-Z0-9@%_\+~#?&//=])?', ' ', regex=True))
def rm_phonenumber(dfcol):
def replace_func(matchobj):
g0 = matchobj.group(0)
#print(g0)
if re.search(r'^\D?\s?\d{3,4}[- ]\d{4}$', g0.strip()):
return g0
if not re.search(r"[-+ ]", g0.strip()):
return g0
if g0.strip()[0] == "-":
return g0
else:
return ' '
return rm_dbl_space(dfcol.str.replace(r'(?:(\b|\s|^))(?<![-])(\+?\d{1,2} ?)?1?[-. ]?((\(\d{3}\))|(\d{3}))?[ .-]?\d{3}[ .-]?\d{4}(?:(\b|\s|$))', replace_func, regex=True))
def rm_8d_code_no_dash(dfcol):
return rm_dbl_space(dfcol.str.replace(r'([0-9A-Z]+[A-Z][0-9A-Z]+[0-9][0-9A-Z]{4,})|([0-9A-Z]+[0-9][0-9A-Z]+[A-Z][0-9A-Z]{4,})', regex=True))
def rm_beg_end_word_punct(dfcol):
""" Remove illegal punctuation from the beginning and end of words
Only ").,?!;: may exist at the end of a word
Only "( may exist at the beginning
Note that "word" here means letter (numbers will be unaffected).
Multiple such punctuation will be removed.
"""
punct = re.escape(string.punctuation)
beg_illegal_punct = re.escape('!.,#$%&)\*+,/:;<=>-?@\'[\\]^_`{|}~')
end_illegal_punct = re.escape('#$%&(\*+/<=>@[\\]\'-^_`{|}~')
rstr2 = f'((?=\s|^)[{beg_illegal_punct}]+(?=[a-zA-Z]|[{punct}])|(?<=[a-zA-Z]|[{punct}])[{end_illegal_punct}]+(?=\s|$))'
return dfcol.str.replace(rstr2, '', regex=True)
def sep_brackets(dfcol):
return rm_dbl_space(dfcol.str.replace(r'(\([^\)]+\))', ' \\1 ', regex=True))
def rm_mid_num_punct(dfcol):
mid_num_punct = re.escape("""!"#$%&'()*+,-/:;<=>?@[\]^_`{|}~""")
mid_num_punct_r = f'(((?<=[0-9])(\+\-|\-\+)(?=[0-9]))|((?<=[0-9])[{mid_num_punct}](?=[0-9])))'
mid_num_punct2 = re.escape(""",""")
mid_num_punct2_r = f'(((?<=[0-9])(\+\-|\-\+)(?=[0-9]))|((?<=[0-9])[{mid_num_punct}](?=[0-9])))'
col = rm_dbl_space(dfcol.str.replace(mid_num_punct_r, ' \\1 ', regex=True))
return rm_dbl_space(col.str.replace(mid_num_punct2_r, '\\1 ', regex=True))
def rm_triple_chars(dfcol):
return rm_dbl_space(dfcol.str.replace(r'((.)\2{2,})', r'\2', regex=True))
def rm_non_textual_punct(dfcol):
""" Aggressively remove almost all punctuation (except .,?:;- ) """
text_punct = '.,?:;-' #this also removes + <> $ %
nontext_punct = [char for char in string.punctuation if char not in text_punct]
nontext_punct = re.escape(''.join(nontext_punct))
return dfcol.str.replace(f'[{nontext_punct}]', '', regex=True)
def rm_newline_hyphenation(dfcol):
""" Remove hyphens at the end of lines (continuation character) and merge the text """
return dfcol.str.replace('([a-z])(-\s*\n\s*)([a-z])', r'\1\3', regex=True)
def merge_words(dfcol, en_dict=EN_DICT):
""" Merge a words with extra whitespace """
res = dfcol.str.split().apply(lambda x: compute_best_joining(x, en_dict))
res = res.str.join(' ')
return res
def merge_words_2(dfcol, en_dict=EN_DICT, special_cases=SPECIAL_CASES_2):
""" Merge a words with extra whitespace """
res = dfcol.str.split().apply(lambda x: compute_best_joining(x, en_dict))
res = res.str.join(' ')
return res
def merge_words_bkwd(dfcol, en_dict=EN_DICT):
""" Merge a words with extra whitespace (backward algorithm) """
res = dfcol.str.split().apply(lambda x: compute_best_joining_bkwd_recursive(x, en_dict))
res = res.str.join(' ')
return res
def compute_best_joining_recursive(s_split, en_dict, special_cases=SPECIAL_CASES):
""" Merge a set of words with extra whitespace (recursive version)
e.g 'th e on ly w ay' -> 'the only way'
"""
if len(s_split) == 0:
return []
if len(s_split) == 1:
return [s_split[0]]
# compute the longest legal word combo starting with word_0
new_first_word = s_split[0]
i = 1
# consider the first word to always be legal (just incase nothing else is)
legal_first_words = [(s_split[0], 0)]
while i < len(s_split) and len(new_first_word) < 20:
new_first_word += s_split[i]
if en_dict.check(new_first_word):
legal_first_words.append((new_first_word, i))
if new_first_word in special_cases:
found_item = [legal_tuple for legal_tuple in legal_first_words if legal_tuple[0] == special_cases[new_first_word].split()[0]]
if len(found_item) > 0:
found_item = found_item[0]
else:
# something weird happened (E.g. o nt he -> on the)
found_item = (special_cases[new_first_word], i)
legal_first_words.append(found_item)
i += 1
# [(a, 0), (and, 1)]
longest_legal_word, index_of_last_captured = legal_first_words[-1]
# repeat for the next segment
subsequent_legal_list = compute_best_joining_recursive(s_split[index_of_last_captured + 1:], en_dict)
retval = [longest_legal_word] + subsequent_legal_list
return retval
def compute_best_joining(s_split, en_dict):
""" Merge a set of words with extra whitespace (iterative version)
e.g 'th e on ly w ay' -> 'the only way'
"""
retval = []
while len(s_split) >= 1:
if len(s_split) == 1:
retval.append(s_split[0])
break
special_cases = {
'ofthe': 'of the',
'inthe': 'in the',
'forthe': 'for the',
'onthe': 'on the',
}
# compute the longest legal word combo starting with word_0
new_first_word = s_split[0]
i = 1
# consider the first word to always be legal (just incase nothing else is)
legal_first_words = [(s_split[0], 0)]
while i < len(s_split) and len(new_first_word) < 20:
new_first_word += s_split[i]
if en_dict.check(new_first_word):
legal_first_words.append((new_first_word, i))
if new_first_word in special_cases:
found_item = [legal_tuple for legal_tuple in legal_first_words if legal_tuple[0] == special_cases[new_first_word].split()[0]]
if len(found_item) > 0:
found_item = found_item[0]
else:
# something weird happened (E.g. o nt he -> on the)
found_item = (special_cases[new_first_word], i)
legal_first_words.append(found_item)
i += 1
# [(a, 0), (and, 1)]
longest_legal_word, index_of_last_captured = legal_first_words[-1]
# repeat for the next segment
s_split = s_split[index_of_last_captured + 1:]
retval.append(longest_legal_word)
return retval
def compute_best_joining_bkwd_recursive(s_split, en_dict):
""" Merge a set of words with extra whitespace (recursive version, backward algorithm)
e.g 'th e on ly w ay' -> 'the only way'
"""
if len(s_split) == 0:
return []
if len(s_split) == 1:
return [s_split[0]]
# compute the longest legal word combo starting with word_0
new_first_word = s_split[-1]
i = len(s_split) - 2
# consider the first word to always be legal (just incase nothing else is)
legal_first_words = [(s_split[-1], len(s_split)-1)]
while i >= 0 and len(new_first_word) < 20:
#old_new_first_word = new_first_word
new_first_word = s_split[i] + new_first_word
if en_dict.check(new_first_word):
legal_first_words.append((new_first_word, i))
i -=1
# [(a, 0), (and, 1)]
longest_legal_word, index_of_last_captured = legal_first_words[-1]
# repeat for the next segment
subsequent_legal_list = compute_best_joining_bkwd_recursive(s_split[0: index_of_last_captured:], en_dict)
retval = subsequent_legal_list + [longest_legal_word]
return retval
def rm_stopwords_spacy(dfcol):
from spacy.lang.en.stop_words import STOP_WORDS
col = dfcol
for word in STOP_WORDS:
col = col.str.replace(f'\b{word}\b', '')
return col
def tokenize_spacy_lg(dfcol):
return dfcol.apply(preprocessing_str.tokenize_spacy_lg)
def rm_stopwords_spacy(dfcol):
return dfcol.apply(preprocessing_str.rm_stopwords_spacy)
def rm_slash(dfcol):
return rm_dbl_space(dfcol.str.replace(r'/', ' ', regex=True))
def rm_hyphen(dfcol):
return rm_dbl_space(dfcol.str.replace(r'[-]', ' ', regex=True))
def add_space_to_bracket(dfcol):
col = dfcol.str.replace(r'\(', ' ( ', regex=True)
col = col.replace(r'\)', ' ) ', regex=True)
return rm_dbl_space(col)
def squish_punct(dfcol):
def replacement_func(matchobj):
g0 = matchobj.group(0)
g2 = matchobj.group(1)
if len(g0) < 5:
if g0 in [".,", "(\")", "\")", ",\"", ";\"", "\",", "\"." ".,", "),", ").", ".)", "%;", "%.", "%,", ".\"" "\";", "):", "?)", "%:"]:
return g0
if g0 in ["(?)", "(%)"]:
return g0
if len(g0.split()) == 0:
return ""
m0 = g0.split()[0]
if m0[-1] in [".", ",", ";"]:
return m0[-1]
return g0[0]
punct = re.escape(string.punctuation)
ms = f"([{punct}]" + "{2,}" + f")"
return rm_dbl_space(dfcol.str.replace(ms, replacement_func, regex=True))
def squish_spaced_punct_no_bracket(dfcol):
def replacement_func(matchobj):
g0 = matchobj.group(0)
g2 = matchobj.group(1)
return g2[0] + " "
ms = r"(([!\"\#\$%\&'\*\+,\-\./:;<=>\?@\[\\\]\^_`\{\|\}\~]\s+){3,})"
return rm_dbl_space(dfcol.str.replace(ms, replacement_func, regex=True))
def add_space_to_various_punct(dfcol):
return rm_dbl_space(dfcol.str.replace(r"([+=\[\]\(\)\/\-*:])", ' \\1 '))
def rm_deg(dfcol):
return rm_dbl_space(dfcol.str.replace(r"\S*[0-9]deg[0-9NSEW]\S*", " "))
|
module Hamming
import Data.Vect
public export
data Nucleotide = A | C | G | T
public export
implementation Eq Nucleotide where
l == r = ?eq_rhs
export
hamming_distance : Eq a => Vect n a -> Vect n a -> Nat
hamming_distance s1 s2 = ?hamming_distance_rhs
export
version : String
version = "1.0.0"
|
\title{Enumeration of the Building Game}
\author{
Daniel Johnson \& Govind Menon\\
Division of Applied Mathematics\\
Brown University
}
\date{\today}
\documentclass[12pt]{article}
\usepackage{graphicx,amsmath,mathtools,bbm,amsthm,enumerate}
\usepackage{mathrsfs}
%\usepackage[subnum]{cases}
%\usepackage[titletoc,toc,title]{appendix}
\newtheorem{mythm}{Theorem}
\newtheorem{mylem}{Lemma}
\newtheorem{mycor}{Corollary}
\newtheorem{mydef}{Definition}
\newcommand{\colorA}{white}
\newcommand{\colorB}{black}
\newcommand{\colorAsm}{w}
\newcommand{\colorBsm}{b}
\newcommand{\poly}{$\mathscr{P}$}
\newcommand{\faceset}{F\left(\mathscr{P}\right)}
\newcommand{\spc}{ }
\newcommand{\xj}{$x^j$}
\newcommand{\xk}{$x^k$}
\newcommand{\Sjk}{$S_{jk}$}
\newcommand{\Skj}{$S_{kj}$}
\DeclareMathOperator{\diag}{diag}
\begin{document}
\maketitle
\begin{abstract}
The Building Game is a sequential coloring process on polyhedra. We enumerate the Building Game state space for all polyhedra in the Platonic, Archimedean, and Catalan solids classes of up to 30 faces. By putting a probability distribution on each step of the Building Game process, a distribution is induced on the entire state space. With the help of a finite group theoretic identity, we find the explicit form of these distributions. Finally, we examine the properties of the resulting distributions.
\end{abstract}
\section{Introduction}
The Building Game (BG) was first considered by Zlotnick~\cite{Zlotnick1994} as a model for the assembly of polyhedral viral capsids. We formalize the idea as a sequential coloring process that progresses from a polyhedron \poly\spc with each face colored \colorA, through a number of intermediate states each having a mix of \colorA\spc and \colorB\spc faces, and ending with all of the faces colored \colorB.
\begin{mydef}
A Building Game \textbf{intermediate} $x$ is a function from the faces of \poly\spc, $F\left(\mathscr{P}\right)$, to a color in $\left\{\colorA,\colorB\right\}$ such that the set $\left\{f_m \in F\left(\mathscr{P}\right) : x\left(f_m\right) = \colorB\right\}$ is edge connected along with the equivalence relation $x \sim x'$ if there is an element $g$ of \poly's rotation group $G$ that satisfies $x(f_m) = x'(g.f_m)$ for every $f_m \in F(\mathscr{P})$.
\end{mydef}
For ease of exposition, we use the notational shorthand $\left(x\right)_m$ for $x\left(f_m\right)$ and $x = g.x'$ when $x(f_m) = x'(g.f_m)$ for every $f_m \in F(\mathscr{P})$. Additionally, we denote the intermediate satisfying $\left(x\right)_m = \colorA$ for all $f_m \in \faceset$ as $x^\colorAsm$ and similarly $x^\colorBsm$ is the intermediate with $\left(x\right)_m = \colorB$ for all $f_m \in \faceset$. The function counting the number of \colorB\spc faces an intermediate has is denoted $h\left(x\right) \doteq |\left\{f_m \in \faceset : \left(x\right)_m = \colorB\right\}|$.
\begin{mydef}
Two intermediates $x^j$ and $x^k$ are \textbf{connected} ($x^j \leftrightarrow x^k$) if $\left(x^j\right)_m = \left(x^k\right)_m$ for all $f_m \in \faceset$ except for exactly one face $f_n$ that has $\left(x^j\right)_n \neq \left(x^k\right)_n$.
\end{mydef}
\begin{mydef}
A Building Game \textbf{pathway} is a sequence of intermediates $x^{p_0}, x^{p_1}, x^{p_2}, \dots, x^{p_N}$ such that $x^{p_0} = x^\colorAsm$, $x^{p_N} = x^\colorBsm$, $x^{p_i}$ is connected to $x^{p_{i+1}}$ and $h\left(x^{p_i}\right) = i$.
\end{mydef}
In this way it is useful to think of intermediates as connected if it is possible to color one face of the first intermediate to get the second and a pathway as a sequence of these connections between $x^\colorAsm$ and $x^\colorBsm$. Figure~\ref{fig:DodecBG} shows a Building Game pathway for the dodecahedron using Schlegel diagrams. The pathways has 13 intermediates since there must be exactly one intermediate $x^{p_i}$ satisfying $h\left(x^{p_i}\right) = i$ for each $i = 0,1,2,\dots,12$.
\begin{figure}[ht]
\caption{One Building Game pathway on the dodecahedron.}
\label{fig:DodecBG}
\end{figure}
With many pairs of connected intermediates, we organize these relations in a graph.
\begin{mydef}
The Building Game \textbf{state space} for a polyhedron \poly\spc is a graph in which the nodes are \poly's intermediates and a graph edge exists between two intermediates if and only if they are connected.
\end{mydef}
When the intermediates are partitioned by their value of $h$, it is natural to arrange the state space as a tiered graph according to this partition. Figure~\ref{fig:CubeSS} shows the Building Game state space for the cube. As seen, each tier has intermediates with the same number of \colorB faces and connections thus exist with intermediates that are either in the tier directly above or below them. We can also see that there are three distinct pathways contained in the state space.
\begin{figure}[ht]
\caption{The Building Game state space of the cube.}
\label{fig:CubeSS}
\end{figure}
Interestingly, it is not the case that the recoloring of each face of \xj\spc results in a distinct intermediate.
\begin{mydef}
The number of different faces $\left|\left\{f_m \in \faceset : x^j + e^m \in \left[x^k\right]\right\}\right|$ of \xj\spc that can be colored to form \xk\spc is called the \textbf{degeneracy number} \Sjk.
\end{mydef}
It is important to note that in general the degeneracy number is not symmetric, i.e. \Sjk$\neq$\Skj\spc for some connections \xj\spc$\leftrightarrow$\spc\xk\spc in the state space. Both figures~\ref{fig:DodecBG} and ~\ref{fig:DodecBG} show the forward and backward degeneracy numbers for each connection.
\subsection{Related Work}
--Like polyominos on polyhedra
\subsection{Applications}
--Viral capsid assembly
--Self-assembly of molecular cages
--Self assembly for manufacturing purposes
%~\cite{Endres2005}
\subsection{Paper Overview}
-- Summary of subsequent sections
\section{Enumerative Results}
As we consider polyhedra with more and more faces, there is a combinatorial explosion in the number intermediates in state space. While the 6-faced cube state space has only 8 nodes and 9 nodes, the 20-faced icosahedron state space has 2,649 nodes and 17,241 nodes and the 26-faced truncated cuboctahedron state space has 1,525,605 nodes and 17,672,377. Figure \ref{fig:bgtable} details state space sizes of all polyhedra in the Platonic, Archimedean, and Catalan solid classes of up to 26 faces.
Also something about pathway statistics.
\begin{figure}[ht]
\scalebox{0.6}{
%{\footnotesize
\begin{tabular}{ l | c | c | c | c || r | r | r}
Polyhedra Name & Class & F$\left(\mathscr{P}\right)$ & E$\left(\mathscr{P}\right)$ & V$\left(\mathscr{P}\right)$ & Intermediates & Connections & Pathways \\
\hline
Tetrahedron & P & 4 & 6 & 4 & 5 & 4 & 1\\
Cube & P & 6 & 12 & 8 & 9 & 10 & 3\\
Octahedron & P & 8 & 12 & 6 & 15 & 22 & 14\\
Dodecahedron & P & 12 & 30 & 20 & 74 & 264 & 17,696 \\
Icosahedron & P & 20 & 30 & 12 & 2,650 & 17,242 & 57,396,146,640\\
Truncated Tetrahedron & A & 8 & 18 & 12 & 29 & 65 & 402\\
Cuboctahedron & A & 14 & 24 & 12 & 341 & 1,636 & 10,170,968\\
Truncated Cube & A & 14 & 36 & 24 & 500 & 2,731 & 101,443,338 \\
Truncated Octahedron & A & 14 & 36 & 24 & 556 & 3,071 & 68,106,377\\
Rhombicuboctahedron & A & 26 & 48 & 24 & 638,851 & 6,459,804 & 16,494,392,631,838,879,380\\
Truncated Cuboctahedron & A & 26 & 72 & 48 & 1,525,605 & 17,672,377 & ? \\
Icosidodecahedron & A & 32 & 60 & 30 & ? & ? & ?\\
Truncated Dodecahedron & A & 32 & 90 & 60 & ? & ? & ? \\
Truncated Icosahedron & A & 32 & 90 & 60 & ? & ? & ?\\
Triakis Tetrahedron & C & 12 & 18 & 8 & 99 & 319 & 38,938\\
Rhombic Dodecahedron & C & 12 & 24 & 14 & 128 & 494 & 76,936\\
Triakis Octahedron & C & 24 & 36 & 14 & 12,749 & 81,297 & 169,402,670,046,670\\
Tetrakis Hexahedron & C & 24 & 36 & 14 & 50,768 & 394,278 & 4,253,948,297,210,346\\
Deltoidal Icositetrahedron & C & 24 & 48 & 26 & 209,676 & 1,989,549 & ? \\
Pentagonal Icositetrahedron & C & 24 & 60 & 38 & 345,939 & 3,544,988 & 2,828,128,000,716,774,492\\
Rhombic Triacontahedron & C & 30 & 60 & 32 & ? & ? & 5,266,831,101,345,821,968\\
\hline
\end{tabular}
}
\caption{Table of polyhedra in the Platonic (P), Archimedean (A), and Catalan (C) solid classes of up to 32 faces and their Building Game state space statistics.}
\label{fig:bgtable}
\end{figure}
\begin{figure}[ht]
\scalebox{0.6}{
%{\footnotesize
\begin{tabular}{ l | c | c | c | c || r | r | r}
Polyhedra Name & Class & F$\left(\mathscr{P}\right)$ & E$\left(\mathscr{P}\right)$ & V$\left(\mathscr{P}\right)$ & Intermediates & Connections & Pathways \\
\hline
Tetrahedron & P & 4 & 6 & 4 & 5 & 4 & 1\\
Cube & P & 6 & 12 & 8 & 8 & 8 & 2\\
Octahedron & P & 8 & 12 & 6 & 12 & 12 & 14 \\
Dodecahedron & P & 12 & 30 & 20 & 53 & 156 & 2166\\
Icosahedron & P & 20 & 30 & 12 & 468 & 1984 & 105999738\\
Truncated Tetrahedron & A & 8 & 18 & 12 & 22 & 42 & 174\\
Cuboctahedron & A & 14 & 24 & 12 & 137 & 470 & 477776\\
Truncated Cube & A & 14 & 36 & 24 & 248 & 1002 & 5232294\\
Truncated Octahedron & A & 14 & 36 & 24 & 343 & 1466 & 5704138\\
Rhombicuboctahedron & A & 26 & 48 & 24 & 70836 & 462149 & 48399693494788840\\
Truncated Cuboctahedron & A & 26 & 72 & 48 & ? & ? & ?\\
Icosidodecahedron & A & 32 & 60 & 30 & ? & ? & ?\\
Truncated Dodecahedron & A & 32 & 90 & 60 & ? & ? & ?\\
Truncated Icosahedron & A & 32 & 90 & 60 & ? & ? & ?\\
Triakis Tetrahedron & C & 12 & 18 & 8 & 49 & 116 & 5012\\
Rhombic Dodecahedron & C & 12 & 24 & 14 & 68 & 196 & 6258\\
Triakis Octahedron & C & 24 & 36 & 14 & 667 & 2383 & 15255459\\
Tetrakis Hexahedron & C & 24 & 36 & 14 & 4220 & 21079 & 5854799360107\\
Deltoidal Icositetrahedron & C & 24 & 48 & 26 & ? & ? & ?\\
Pentagonal Icositetrahedron & C & 24 & 60 & 38 & 95127 & 654537 & 5607231936129109\\
Rhombic Triacontahedron & C & 30 & 60 & 32 & 97368 & 697623 & 6889989896241902854\\
\hline
\end{tabular}
}
\caption{Table of polyhedra in the Platonic (P), Archimedean (A), and Catalan (C) solid classes of up to 32 faces and their Building Game state space shellability statistics.}
\label{fig:bgtable_shell}
\end{figure}
\subsection{Bounds and Asymptotics}
Have upper, but what about lower?
\subsection{Methods}
\section{The Building Game as a Stochastic Process}
\label{sec:Prob}
Since the Building Game is a sequential process with several choices at each step, it is natural to consider it as a stochastic process. By putting a distribution on all possible faces that can be colored \colorB\spc at each step of the Building game, a distribution on the space of pathways is implicitly defined. Thus, for a choice of this transition rule, we can ask questions about the likelihood of the different pathways.
--Math and graphical results about putting a distribution on pathways
\subsection{Forward and Backward Transitions}
If we allow faces be changed both from \colorA\spc to \colorB\spc and from \colorB\spc to \colorA, the process consists of transitions from intermediate to intermediate along state space connections. By specifying a distribution on these transitions, it will induce a stationary measure on the state space.
We define the Markov process $X_t$ by the transition rate matrix $Q$, with the heuristic that the rate of transition to an intermediate \xk from an intermediate \xk should be proportional to the number of faces of \xj that can be colored to reach the intermediate \xk. For this reason, we include the degeneracy number \Sjk\spc as a factor in the transition rate matrix. Furthermore, we model the process after and energetic model in which each intermediate has an energy and to transtion between intermediates, an energy barrier $E_{jk} = E_{kj}$ must be overcome.
%\begin{align}
%\label{eq:TransitionProbability}
% P_{jk} = \frac{1}{z_j}S_{jk}\rh
%\end{align}
\begin{align}
\label{eq:TransitionRate}
Q_{jk} &= S_{jk}e^{-\beta\left(E_{jk} - E_j\right)} \\
Q_{jj} &= -z_j \\
\end{align}
Here, $z_j \doteq \sum_{\ell: \ell \neq j} S_{j\ell}e^{-\beta\left(E_{j\ell} - E_j\right)}$ is the rate at which the process leaves \xj.
\begin{mythm}
\label{thm:StatDist}
If the transition rate matrix $Q$ can be decomposed as $Q = DC$ where $D$ is diagonal with each entry of the diagonal positive and $C$ is a non-negative symmetric matrix with $C_{jk} > 0$ if and only if $x^j$ and $x^k$ are connected, then $X_t$ has the unique stationary distribution $\pi = \diag\left(D^{-1}\right)$.
\end{mythm}
\begin{proof}
First, we show $Q$ and $\pi$ satisfy detailed balance.
\begin{align}
\pi_jQ_{jk} &= \left(\frac{1}{D_{jj}}\right)\left(D_{jj}C_{jk}\right) \\
&= C_{jk} \\
&= C_{kj} \\
&= \left(\frac{1}{D_{kk}}\right)\left(D_{kk}C_{kj}\right) \\
&= \pi_kQ_{kj}
\end{align}
-- Prove aperiodicity
-- Prove positive reccurence
\end{proof}
In order to use theorem~\ref{thm:StatDist} to find the stationary distribution for the transition rule~\ref{eq:TransitionProbability}, we must be able to decompose the degeneracy number \Sjk\spc to fit the template of $\mathbf{C}$ and $\mathbf{D}$. In the following section we derive group theoretic identities to show that this is possible.
\subsection{Hitting Times}
\begin{align}
\tau^{A}_{j} &\doteq \inf\left\{t \geq 0 : X_t \in A, X_0 = x^j\right\}
\end{align}
\begin{align}
\nu^{A}_{j} &\doteq \inf\left\{n \geq 0 : Y_n \in A, Y_0 = x^j\right\}
\end{align}
For $j \not\in A$.
\begin{align}
E\left[\tau^{A}_{j}\right] &= E\left[E\left[\tau^{A}_{j} | Y_1 \right]\right] \\
&= E\left[ Exp\left(z_j\right) + \tau^{A}_{Y_1} \right] \\
&= \frac{1}{z_j} + E\left[\sum_{k}\tau^{A}_{Y_1}\mathbbm{1}_{Y_1 = k}\right] \\
&= \frac{1}{z_j} + \sum_{k: k\neq j}E\left[\tau^{A}_{k}\right] P\left(Y_1 = k\right) \\
&= \frac{1}{z_j}\left(1 + \sum_{k: k\neq j}q_{jk}E\left[\tau^{A}_{k}\right]\right) \\
\sum_{k}q_{jk}E\left[\tau^{A}_{k}\right] &= 1 \\
\end{align}
For $j \in A$.
\begin{align}
E\left[\tau^{A}_{j}\right] &= 0 \\
\end{align}
As a linear system:
\begin{align}
\left(\diag\left(\mathbbm{1}_A\right) - \diag\left(\mathbbm{1}_{A^c}\right)Q\right)E\left[\tau^{A}\right] =\mathbbm{1}_{A^c}\\
\end{align}
\begin{align}
\psi_j^A\left(t\right) &\doteq P\left(\tau^A_j \leq t\right) \\
\psi_j^A\left(0\right) &= \mathbbm{1}_{j\in A} \\
\psi_j^A\left(t\right) &= 0 \forall j \in A \\
\end{align}
For $j \not\in A$.
\begin{align}
\psi_j^A\left(t\right) &\doteq P\left(\tau^A_j \leq t\right) \\
&= \sum_k P\left(\tau^A_j \leq t | Y_1 = x^k\right) P\left(Y_1 = x^k\right) \\
&= \frac{1}{z_j}\sum_{k: k \neq j} q_{jk} P\left(Exp\left(z_j\right)\tau^A_j \leq t\right) \\
&= \frac{1}{z_j}\sum_{k: k \neq j} q_{jk} \int^t_0 P\left(\tau^A_j \leq t - s\right) z_j e^{-z_j s} ds \\
&= \sum_{k: k \neq j} q_{jk} \int^t_0\psi^A_k\left(t-s\right)e^{-z_j s} ds \\
&= \sum_{k: k \neq j} q_{jk} \int^t_0\psi^A_k\left(r\right)e^{-z_j\left(t-r\right)} dr \\
e^{z_jt}\psi^A_j\left(t\right) &= \sum_{k: k \neq j} q_{jk} \int^t_0 e^{z_jr}\psi^A_k\left(r\right) dr \\
e^{z_jt}\frac{d\psi^A_j}{dt} + z_j e^{z_j t} \psi^A_j\left(t\right) &= \sum_{k: k \neq j} q_{jk} e^{z_jt}\psi^A_k\left(t\right) \\
\frac{d\psi^A_j}{dt} &= \sum_{k} q_{jk} \psi^A_k\left(t\right)
\end{align}
Combining both cases, we get the linear system and solution.
\begin{align}
\frac{d\psi^A}{dt} &= \diag\left(\mathbbm{1}_{A^c}\right)Q\psi^A \\
\psi^A\left(0\right) &= \mathbbm{1}_{A} \\
\psi^A\left(t\right) &= e^{\diag\left(\mathbbm{1}_{A^c}\right)Qt} \mathbbm{1}_{A} \\
\end{align}
This is the solution for the CDF of the stopping time $\tau^A$, but we can also compute the PDF explicitly for $t > 0$.
\begin{align}
p\left(\tau^A = t\right) &= \frac{d\psi^A}{dt} \\
&= \diag\left(\mathbbm{1}_{A^c}\right)Q\psi^A
\end{align}
\section{A Finite Geometric Result}
Since we define Building Game intermediates as rotationally unique from each other, it is useful to think about the problem in the context of $\mathscr{P}$'s rotational symmetry group $G \doteq G\left(\mathscr{P}\right)$ and group actions. For an intermediate $x^j$, the number of symmetries $r_j$ is the order of the stabilizer subgroup $G_{x^j} \doteq \left\{g \in G : g.x^j = x^j\right\}$ of $G$ that fixes $x^j$. Suppose $x^j$ and $x^k$ are connected in the state space and $\varphi$ is one of the $S_{jk}$ faces that, when added to $x^j$, forms $x^k$. We say $x^j + \varphi = x^k$. The degeneracy number $S_{jk}$ can then be expressed as the order of the orbit $\left(G_{x^j}\right).\varphi$ of $\varphi$ with respect to $x^j$'s stabilizer subgroup. Analogously, we define the reverse degeneracy number as $S_{kj} \doteq \left|\left(G_{x^k}\right).\varphi\right|$
\begin{mylem}
\label{lem:I}
For Building Game intermediates $x^j$ and $x^k$ connected in the state space and a face $f_m \in \faceset$ satisfying $x^j + e^m = x^k$, the stabilizer subgroup $G_{x^j,e^m}$ that fixes both $x^j$ and $e^m$ is the same stabilizer subgroup $G_{x^k,e^m}$ that fixes $x^k$ and $e^m$.
\end{mylem}
\begin{proof}
\begin{align}
G_{x^j,e^m} &\doteq \left\{g \in G | g.x^j = x^j, g.e^m = e^m \right\} \\
&= \left\{g \in G | g.\left(x^k - e^m\right) = x^k - e^m, g.e^m = e^m \right\} \\
&= \left\{g \in G | g.x^k = x^k, g.e^m = e^m \right\} \\
&\doteq G_{x^k,e^m}
\end{align}
\end{proof}
\begin{mythm}
\label{thm:J}
For two Building Game intermediates $x^j$ and $x^k$ are connected in the BG state space, $r_kS_{jk} = r_jS_{kj}$.
\end{mythm}
\begin{proof}
Let $e^m$ be a face such that $x^k = x^j + e^m$. Then, by the orbit-stabilizer theorem, Lagrange's Theorem and lemma~\ref{lem:I} we have the following~\cite{Rotman1995}.
\begin{align}
\frac{r_j}{S_{jk}} &\doteq \frac{\left|G_{x^j}\right|}{\left|\left(G_{x^j}\right).e^m\right|} \\
&= \left[G_{x^j} : \left(G_{x^j}\right).e^m \right] \\
&= \left|G_{x^j,e^m}\right| \\
&= \left|G_{x^k,e^m}\right| \\
&= \left[G_{x^k} : \left(G_{x^k}\right).e^m \right] \\
&= \frac{\left|G_{x^k}\right|}{\left|\left(G_{x^k}\right).e^m\right|} \\
&\doteq \frac{r_k}{S_{kj}}
\end{align}
The result $r_kS_{jk} = r_jS_{kj}$ follows.
\end{proof}
\section{Stationarity}
\begin{mythm}
\label{thm:E}
The Markov process $X_t$ defined by the transition rate matrix $Q$ in equation~\ref{eq:TransitionRate} admits the unique stationary distribution $\frac{1}{zr_j}e^{-\beta E_j}$ where $z \doteq \sum_\ell \frac{1}{r_\ell}e^{-\beta E_\ell}$ is the partition function.
\end{mythm}
\begin{proof}
We take $C_{jk} \doteq \frac{S_{jk}}{zr_j}e^{-\beta E_{jk}}$ and notice that it is symmetric by theorem~\ref{thm:J}. With $D_{jj} \doteq zr_je^{\beta E_j}$ we have our partition.
\begin{align}
Q_{jk} &= S_{jk}e^{-\beta\left(E_{jk} - E_j\right)} \\
&= \left(zr_je^{\beta E_j}\right) \left(\frac{S_{jk}}{zr_j}e^{-\beta E_{jk}}\right) \\
&= D_{jj}C_{jk}
\end{align}
Thus, by theorem~\ref{thm:StatDist}, $\pi_j = \frac{1}{D_{jj}} = \frac{1}{zr_j}e^{-\beta E_j}$.
\end{proof}
\section{Discussion}
\subsection{Nonenumerative Approaches}
\section*{Acknowledgments}\label{ackowledgements}
Supported by NSF grants DMS 07-48482 and EFRI 10-22638
\section{Potential Citations (temp)}
~\cite{Coxeter1963}
~\cite{Grunbaum2003}
~\cite{Cromwell1997}
~\cite{Ziegler1995}
~\cite{Gidas1995}
~\cite{Eden1961}
~\cite{Grayson2012}
\bibliographystyle{plain}
%\nocite{*}
\bibliography{Master}
%\appendix
%\section{Proof of Theorem ???}
%\section{Proof of Theorem ???}
\end{document}
%%%%%%%%%%%%%%%%%%%% CCCCUUUUTTTT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{A Curious Observation}
As part of the exploration of the nature of the degeneracy numbers $S_{jk}$ to find a stationary distribution, an interesting phenomena was noticed. Given a closed path $x^{k_0} \to x^{k_1} \to \cdots \to x^{k_n}$ through the state space, starting at $x^{k_0}$ and ending back at $x^{k_n} \doteq x^{k_0}$, the the identity given in equation~\ref{eq:DegenIdent} was observed to hold independent of the particular closed path.
\begin{align}
\prod_{i=1}^n \frac{S_{k_i,k_{i-1}}}{S_{k_{i-1},k_i}} = 1
\label{eq:DegenIdent}
\end{align}
At first this relation between ratios of degeneracy numbers along the path was noticed in the cube's state space and it can easily be seen by examining figure~\ref{fig:CubeSS}. To test this conjecture more fully, it was subsequently verified on our library of state spaces for other polyhedra. This exploration lead to the discovery of a more general relation between intermediates that it described below.
In an attempt to get the the heart of the relation in equation~\ref{eq:DegenIdent}, we compiled a list of different geometric and combinatorial statistics for each state space connection $x_j \leftrightarrow x_k$. These statistics included number of faces in $x_j$ and $x_k$, the order of $x_j$ and $x_k$'s rotation groups $r_j$ and $r_k$, and a few others. With the ansatz that there may be a multiplicative relation between the degeneracy number $S_{jk}$ and some of these $d$ statistics $Z_{jk}^{\left(i\right)} \in \mathbbm{Z}^d$ for each connection, the following linear regression model was used.
\begin{align}
\log\left(S_{jk}\right) = \sum_{i=1}^d \beta^{\left(i\right)}\log\left(Z_{jk}^{\left(i\right)}\right)
\label{eq:LinReg}
\end{align}
The regression coefficients $\boldsymbol{\beta} \in \mathbbm{R}^d$ that minimize the least-squares residual were solved for and it was found that a perfect relation was found in the sense that equation~\ref{eq:LinReg} held exactly for all connections tested. The three non-zero entries in $\boldsymbol{\beta}$ corresponded to $r_j$, $r_k$ and $S_{kj}$. This result meant that $\log\left(S_{jk}\right) = \log\left(S_{kj}\right) + \log\left(r_{j}\right) - \log\left(r_{k}\right)$, or
\begin{align}
S_{jk} = \frac{r_jS_{kj}}{r_k}
\label{eq:RSConjecture}
\end{align}
for all connections we tested. This leads to the obvious conjecture that equation~\ref{eq:RSConjecture} holds for any connection in any Building Game state space.
In fact, if this is the case, it would explain why equation~\ref{eq:DegenIdent} holds.
\begin{align}
\prod_{i=1}^n \frac{S_{k_i,k_{i-1}}}{S_{k_{i-1},k_i}} &= \prod_{i=1}^n \frac{r_{k_i}}{r_{k_{i-1}}} \\
&= \frac{r_{k_n}}{r_{k_{0}}} \\
&= \frac{r_{k_0}}{r_{k_{0}}} \\
&= 1
\end{align}
\begin{figure}[ht]
\scalebox{0.6}{
%{\footnotesize
\begin{tabular}{ l | c | c | c | c || r | r | r}
Polyhedra Name & Class & F$\left(\mathscr{P}\right)$ & E$\left(\mathscr{P}\right)$ & V$\left(\mathscr{P}\right)$ & Intermediates & Connections & Pathways \\
\hline
Tetrahedron & P & 4 & 6 & 4 & 5 & 4 & 1\\
Cube & P & 6 & 12 & 8 & 9 & 10 & 3\\
Octahedron & P & 8 & 12 & 6 & 15 & 22 & 14\\
Dodecahedron & P & 12 & 30 & 20 & 74 & 264 & 17,696 \\
Icosahedron & P & 20 & 30 & 12 & 2,650 & 17,242 & 57,396,146,640\\
Truncated Tetrahedron & A & 8 & 18 & 12 & 29 & 65 & 402\\
Cuboctahedron & A & 14 & 24 & 12 & 341 & 1,636 & 10,170,968\\
Truncated Cube & A & 14 & 36 & 24 & 500 & 2,731 & 101,443,338 \\
Truncated Octahedron & A & 14 & 36 & 24 & 556 & 3,071 & 68,106,377\\
Rhombicuboctahedron & A & 26 & 48 & 24 & 638,851 & 6,459,804 & 16,494,392,631,838,879,380\\
Truncated Cuboctahedron & A & 26 & 72 & 48 & 1,525,605 & 17,672,377 & ? \\
Icosidodecahedron & A & 32 & 60 & 30 & ? & ? & ?\\
Truncated Dodecahedron & A & 32 & 90 & 60 & ? & ? & ? \\
Truncated Icosahedron & A & 32 & 90 & 60 & ? & ? & ?\\
Triakis Tetrahedron & C & 12 & 18 & 8 & 99 & 319 & 38,938\\
Rhombic Dodecahedron & C & 12 & 24 & 14 & 128 & 494 & 76,936\\
Triakis Octahedron & C & 24 & 36 & 14 & 12,749 & 81,297 & 169,402,670,046,670\\
Tetrakis Hexahedron & C & 24 & 36 & 14 & 50,768 & 394,278 & 4,253,948,297,210,346\\
Deltoidal Icositetrahedron & C & 24 & 48 & 26 & 209,676 & 1,989,549 & ? \\
Pentagonal Icositetrahedron & C & 24 & 60 & 38 & 345,939 & 3,544,988 & 2,828,128,000,716,774,492\\
Rhombic Triacontahedron & C & 30 & 60 & 32 & ? & ? & 5,266,831,101,345,821,968\\
\hline
\end{tabular}
}
\caption{Table of polyhedra in the Platonic (P), Archimedean (A), and Catalan (C) solid classes of up to 32 faces and their Building Game state space statistics.}
\label{fig:bgtable}
\end{figure}
%%%%%%%
The process begins with each face of a polyhedron \poly\spc all colored the same color, say \colorA. A face is then chosen and its color is changed to a second color, \colorB. From there, at each step a \colorA\spc face that is edge-adjacent to a \colorB\spc face is chosen and recolored \colorB. The process continues until all of \poly's faces are \colorB. We show one instance of the Building Game process for the dodecahedron in figure~\ref{fig:DodecBG}.
Each possible coloring of the polyhedron from the Building Game is referred to as an \textbf{intermediate}. Since we assume each face of the same color is otherwise indistinguishable, there is a rotational equivalence class on intermediate such that two intermediates are equivalent if the first is a rotation of the second.
\begin{figure}[ht]
\caption{One instance of the Building Game on the dodecahedron.}
\label{fig:DodecBG}
\end{figure}
We define the \textbf{state space} of the Building Game to be a graph in which the nodes are all of the possible intermediates allowed by the BG. Connections exist between two intermediates \xj\spc and \xk\spc if it is possible to color a single face of \xj\spc to form \xk. Interestingly, it is not the case that the recoloring of each face of \xj\spc maps to a distinct intermediate. Thus, the number of different faces of \xj\spc that can be colored to form \xk\spc is called the \textbf{degeneracy number} \Sjk. It is important to note that in general the degeneracy number is not symmetric, i.e. \Sjk$\neq$\Skj\spc for some connections \xj\spc$\leftrightarrow$\spc\xk\spc in the state space.
Since the state space can be partitioned by the number of \colorB\spc faces each intermediate has, it is natural to view the state space as a tiered graph in which intermediates in each tier have the same number of \colorB\spc faces. Organized this way, intermediates can only connect to those in the tier above or below them. A \textbf{pathway} in the state space is a sequence of intermediates $x^{k_0} \to x^{k_1}\to \cdots \to x^{k_F}$ connected in the state space such that $x^{k_0}$ is the intermediate with all \colorA\spc faces, $x^{k_F}$ is the intermediate with all \colorB\spc faces, and $x^{k_m}$ has $m$ \colorB\spc faces.
%%%%%%%%
To find the stationary measure of this process, we check if it is possible for our transition measure to satisfy the detailed balance equation $\pi_jP_{jk} = \pi_kP_{kj}$. As seen in equation~\ref{eq:DBTry}, for detailed balance to be satisfied, we must find a way to separate the ratio $\frac{S_{jk}}{S_{kj}}$ into a part depending only on $j$ terms and a part depending only on $k$ terms.
\begin{align}
\frac{\pi_k}{\pi_j} &= \frac{P_{jk}}{P_{kj}} \\
&= \frac{S_{jk}\rho_{jk}z_k}{S_{kj}\rho_{kj}z_j} \\
&= \frac{z_k}{z_j}\frac{S_{jk}}{S_{kj}} \label{eq:DBTry}
\end{align}
While detailed balance is not a necessary condition for the existence of a stationary distribution, along with positive recurrence of the process, it is sufficient and would provide the exact form of the necessarily unique stationary distribution.
%%%%%%%%%%
As stated in section~\ref{sec:Prob}, if we can find a distribution $\pi$ such that the detailed balance equation $\pi_jP_{j,k} = \pi_kP_{k,j}$ is satisfied, we know that $\pi$ is the unique stationary distribution under $P_{jk}$. With theorem~\ref{thm:J} we find that $\frac{S_{jk}}{S_{kj}} = \frac{r_j}{r_k}$ is indeed separable as conjectured in equation~\ref{eq:DBTry}. This allows us to find the stationary distribution.
\begin{mythm}
\label{thm:E}
The Markov chain $X_t$ defined by the transition rule $P_{jk}$ in equation~\ref{eq:TransitionProbability} admits the unique stationary distribution $\pi_j = \frac{1}{z}\left(\frac{z_j}{r_j}\right)$ where $z \doteq \sum_i \frac{z_i}{r_i}$ is the partition function.
\end{mythm}
\begin{proof}
It suffices to show detailed balance and that each state is positively recurrent. By our definitions of $\pi$ and $P$ along with Theorem~\ref{thm:J}, we find detailed balance.
\begin{align}
\pi_jP_{jk} &= \frac{z_j}{z r_j}\frac{1}{z_j}S_{jk}\rho_{jk} \\
&= \frac{1}{z}\left(\frac{S_{jk}}{r_{j}}\right)\rho_{jk} \\
&= \frac{1}{z}\left(\frac{S_{kj}}{r_k}\right)\rho_{kj} \\
&= \frac{z_k}{z r_k}\frac{1}{z_k}S_{kj}\rho_{kj} \\
&= \pi_kP_{kj}
\end{align}
Clearly $X_t$ is positive recurrent since it is a finite irreducible Markov chain on a connected state space in which there exist forward and backward transitions between each connected node with positive probabilities.
\end{proof}
|
Install py-pde library
```python
%pip install py-pde
```
Requirement already satisfied: py-pde in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (0.17.1)
Requirement already satisfied: matplotlib>=3.1.0 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from py-pde) (3.5.1)
Requirement already satisfied: sympy>=1.5.0 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from py-pde) (1.9)
Requirement already satisfied: numpy>=1.18.0 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from py-pde) (1.21.5)
Requirement already satisfied: numba>=0.50.0 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from py-pde) (0.55.1)
Requirement already satisfied: scipy>=1.4.0 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from py-pde) (1.7.3)
Requirement already satisfied: cycler>=0.10 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from matplotlib>=3.1.0->py-pde) (0.11.0)
Requirement already satisfied: pillow>=6.2.0 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from matplotlib>=3.1.0->py-pde) (9.0.1)
Requirement already satisfied: pyparsing>=2.2.1 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from matplotlib>=3.1.0->py-pde) (3.0.7)
Requirement already satisfied: fonttools>=4.22.0 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from matplotlib>=3.1.0->py-pde) (4.29.1)
Requirement already satisfied: packaging>=20.0 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from matplotlib>=3.1.0->py-pde) (21.3)
Requirement already satisfied: python-dateutil>=2.7 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from matplotlib>=3.1.0->py-pde) (2.8.2)
Requirement already satisfied: kiwisolver>=1.0.1 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from matplotlib>=3.1.0->py-pde) (1.3.2)
Requirement already satisfied: setuptools in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from numba>=0.50.0->py-pde) (58.0.4)
Requirement already satisfied: llvmlite<0.39,>=0.38.0rc1 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from numba>=0.50.0->py-pde) (0.38.0)
Requirement already satisfied: six>=1.5 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from python-dateutil>=2.7->matplotlib>=3.1.0->py-pde) (1.16.0)
Requirement already satisfied: mpmath>=0.19 in /Users/sebastian/opt/anaconda3/envs/neuralOperator37/lib/python3.7/site-packages (from sympy>=1.5.0->py-pde) (1.2.1)
Note: you may need to restart the kernel to use updated packages.
Generate dataset
```python
from pde import CartesianGrid, ScalarField, solve_poisson_equation
import torch
import numpy as np
num_param_steps = 64
field_values = np.linspace(0,10,num_param_steps)
grid_size = 64
training_input = torch.zeros(num_param_steps, grid_size,2)
training_output = torch.zeros(num_param_steps, grid_size,1)
grid = CartesianGrid([[0, 1]], grid_size, periodic=False)
for index, val in enumerate(field_values):
field = ScalarField(grid, val)
result = solve_poisson_equation(field, bc=[{"value": 0}, {"derivative": 1}])
training_input[index,:,0] = torch.tensor(field.data)
training_input[index,:,1] = torch.linspace(0,1, grid_size)
training_output[index,:,0] = torch.tensor(result.data)
```
```python
import math as m
radius = 3.0
area = m.pi * m.pow(radius, 2)
print(area)
```
28.274333882308138
```python
import torch
!python --version
!python -m site --user-site
#!pip list -v
torch.__version__
```
Python 3.7.11
/Users/sebastian/.local/lib/python3.7/site-packages
'1.10.2'
Model definitions copied from https://github.com/zongyi-li/fourier_neural_operator
```python
import torch.nn as nn
import torch.nn.functional as F
################################################################
# 1d fourier layer
################################################################
class SpectralConv1d(nn.Module):
def __init__(self, in_channels, out_channels, modes1):
super(SpectralConv1d, self).__init__()
"""
1D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1
self.scale = (1 / (in_channels*out_channels))
self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, dtype=torch.cfloat))
# Complex multiplication
def compl_mul1d(self, input, weights):
# (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)
return torch.einsum("bix,iox->box", input, weights)
def forward(self, x):
batchsize = x.shape[0]
#Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize, self.out_channels, x.size(-1)//2 + 1, device=x.device, dtype=torch.cfloat)
out_ft[:, :, :self.modes1] = self.compl_mul1d(x_ft[:, :, :self.modes1], self.weights1)
#Return to physical space
x = torch.fft.irfft(out_ft, n=x.size(-1))
return x
class FNO1d(nn.Module):
def __init__(self, modes, width):
super(FNO1d, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the initial condition and location (a(x), x)
input shape: (batchsize, x=s, c=2)
output: the solution of a later timestep
output shape: (batchsize, x=s, c=1)
"""
self.modes1 = modes
self.width = width
self.padding = 2 # pad the domain if input is non-periodic
self.fc0 = nn.Linear(2, self.width) # input channel is 2: (a(x), x)
self.conv0 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv1 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv2 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv3 = SpectralConv1d(self.width, self.width, self.modes1)
self.w0 = nn.Conv1d(self.width, self.width, 1)
self.w1 = nn.Conv1d(self.width, self.width, 1)
self.w2 = nn.Conv1d(self.width, self.width, 1)
self.w3 = nn.Conv1d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
x = self.fc0(x)
x = x.permute(0, 2, 1)
# x = F.pad(x, [0,self.padding]) # pad the domain if input is non-periodic
x1 = self.conv0(x)
x2 = self.w0(x)
x = F.gelu(x1) + x2
x1 = self.conv1(x)
x2 = self.w1(x)
x = F.gelu(x1) + x2
x1 = self.conv2(x)
x2 = self.w2(x)
x = F.gelu(x1) + x2
x1 = self.conv3(x)
x2 = self.w3(x)
x = F.gelu(x1) + x2
# x = x[..., :-self.padding] # pad the domain if input is non-periodic
x = x.permute(0, 2, 1)
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
```
```python
modes = 32
width = 16
epochs = 5000
learning_rate = 1e-4
batch_size = 64
model = FNO1d(modes, width) #.to('cuda')
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr = 1e-3, epochs=epochs, steps_per_epoch= num_param_steps // batch_size)
dataloader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(training_input, training_output), batch_size=batch_size, shuffle=True)
for ep in range(epochs):
for input, output in dataloader:
#input, output = input.cuda(), output.cuda()
optimizer.zero_grad()
pred_output = model(input)
loss = torch.nn.functional.mse_loss(pred_output, output)
loss.backward()
optimizer.step()
scheduler.step()
print("\r",'loss:' + str(loss.detach().cpu().numpy()), end = "")
```
loss:nan51985
Check output
```python
import matplotlib.pyplot as plt
grid_start = 0
grid_end = 1
test_grid_size = 64
field_val = 2
test_field = ScalarField(CartesianGrid([[grid_start, grid_end]], test_grid_size, periodic=False), field_val)
test_result = solve_poisson_equation(test_field, bc=[{"value": 0}, {"derivative": 1}])
model_input = torch.tensor(test_field.data, dtype= torch.float).reshape(1,test_grid_size,1)
model_input = torch.cat((model_input.reshape(1,test_grid_size,1), torch.linspace(grid_start,grid_end, test_grid_size).reshape(1,test_grid_size,1)), dim = -1)
#model_input = model_input.to('cuda')
model_result = model(model_input)
plt.figure()
plt.plot(test_result.data)
plt.plot(model_result.detach().cpu().flatten().numpy())
```
```python
```
|
The derivative of a polynomial $a_0 + a_1 x + \cdots + a_n x^n$ is $a_1 + 2 a_2 x + \cdots + n a_n x^{n-1}$.
|
(***************************************************************)
(* Coq code for PCAP *)
(* *)
(* List of Type *)
(* *)
(* *)
(***************************************************************)
Require Import Le.
From AIM Require Export Coqlib.
Section ListT.
Variable A : Type.
Set Implicit Arguments.
Inductive listT : Type :=
| nilT : listT
| consT : A -> listT -> listT.
Infix "::" := consT
(at level 60, right associativity) : listt_scope.
Open Scope listt_scope.
(*************************)
(** Discrimination *)
(*************************)
Lemma nilT_consT : forall (a : A) (m : listT), nilT <> a :: m.
Proof.
intros; discriminate.
Qed.
(*************************)
(** Concatenation *)
(*************************)
Fixpoint appT (l m : listT) {struct l} : listT:=
match l with
| nilT => m
| a :: l1 => a :: appT l1 m
end.
Infix "++" := appT (right associativity, at level 60) : listt_scope.
Lemma appT_nilT_end : forall l : listT, l = l ++ nilT.
Proof.
induction l; simpl in |- *; auto.
rewrite <- IHl; auto.
Qed.
Hint Resolve appT_nilT_end.
Ltac now_show c := change c in |- *.
Lemma appT_ass : forall l m n : listT, (l ++ m) ++ n = l ++ m ++ n.
Proof.
intros. induction l; simpl in |- *; auto.
now_show (a :: (l ++ m) ++ n = a :: l ++ m ++ n).
rewrite <- IHl; auto.
Qed.
Hint Resolve appT_ass.
Lemma ass_appT : forall l m n:listT, l ++ m ++ n = (l ++ m) ++ n.
Proof.
auto.
Qed.
Hint Resolve ass_appT.
Lemma appT_comm_consT : forall (x y:listT) (a:A), a :: x ++ y = (a :: x) ++ y.
Proof.
auto.
Qed.
Lemma appT_eq_nilT : forall x y:listT, x ++ y = nilT -> x = nilT /\ y = nilT.
Proof.
destruct x as [| a l]; [ destruct y as [| a l] | destruct y as [| a0 l0] ];
simpl in |- *; auto.
intros H; discriminate H.
intros; discriminate H.
Qed.
Lemma appT_consT_not_nilT : forall (x y:listT) (a:A), nilT <> x ++ a :: y.
Proof.
unfold not in |- *.
destruct x as [| a l]; simpl in |- *; intros.
discriminate H.
discriminate H.
Qed.
Lemma appT_eq_unit :
forall (x y:listT) (a:A),
x ++ y = a :: nilT -> x = nilT /\ y = a :: nilT \/ x = a :: nilT /\ y = nilT.
Proof.
destruct x as [| a l]; [ destruct y as [| a l] | destruct y as [| a0 l0] ];
simpl in |- *.
intros a H; discriminate H.
left; split; auto.
right; split; auto.
generalize H.
generalize (appT_nilT_end l); intros E.
rewrite <- E; auto.
intros.
injection H.
intro.
cut (nilT = l ++ a0 :: l0); auto.
intro.
generalize (appT_consT_not_nilT _ _ _ H1); intro.
elim H2.
Qed.
Lemma appT_inj_tail :
forall (x y:listT) (a b:A), x ++ a :: nilT = y ++ b :: nilT -> x = y /\ a = b.
Proof.
induction x as [| x l IHl];
[ destruct y as [| a l] | destruct y as [| a l0] ];
simpl in |- *; auto.
intros a b H.
injection H.
auto.
intros a0 b H.
injection H; intros.
generalize (appT_consT_not_nilT _ _ _ H0); destruct 1.
intros a b H.
injection H; intros.
cut (nilT = l ++ a :: nilT); auto.
intro.
generalize (appT_consT_not_nilT _ _ _ H2); destruct 1.
intros a0 b H.
injection H; intros.
destruct (IHl l0 a0 b H0).
split; auto.
rewrite <- H1; rewrite <- H2; reflexivity.
Qed.
(*************************)
(** Head and tail *)
(*************************)
Definition head (l : listT) :=
match l return optionT A with
| nilT => NoneT
| x :: _ => SomeT x
end.
Definition tail (l:listT) : listT:=
match l with
| nilT => nilT
| a :: m => m
end.
(****************************************)
(** Length of lists *)
(****************************************)
Fixpoint lengthT (l:listT) : nat :=
match l with
| nilT => 0
| _ :: m => S (lengthT m)
end.
(******************************)
(** LengthT order of lists *)
(******************************)
Section lengthT_order.
Definition lel (l m:listT) := lengthT l <= lengthT m.
Variables a b : A.
Variables l m n : listT.
Lemma lel_refl : lel l l.
Proof.
unfold lel in |- *; auto with arith.
Qed.
Lemma lel_trans : lel l m -> lel m n -> lel l n.
Proof.
unfold lel in |- *; intros.
now_show (lengthT l <= lengthT n).
apply le_trans with (lengthT m); auto with arith.
Qed.
Lemma lel_consT_consT : lel l m -> lel (a :: l) (b :: m).
Proof.
unfold lel in |- *; simpl in |- *; auto with arith.
Qed.
Lemma lel_consT : lel l m -> lel l (b :: m).
Proof.
unfold lel in |- *; simpl in |- *; auto with arith.
Qed.
Lemma lel_tail : lel (a :: l) (b :: m) -> lel l m.
Proof.
unfold lel in |- *; simpl in |- *; auto with arith.
Qed.
Lemma lel_nilT : forall l':listT, lel l' nilT -> nilT = l'.
Proof.
intro l'; elim l'; auto with arith.
intros a' y H H0.
now_show (nilT = a' :: y).
absurd (S (lengthT y) <= 0); auto with arith.
Qed.
End lengthT_order.
Hint Resolve lel_refl lel_consT_consT lel_consT lel_nilT lel_nilT nilT_consT.
(*********************************)
(** The [In] predicate *)
(*********************************)
Fixpoint In (a:A) (l:listT) {struct l} : Prop :=
match l with
| nilT => False
| b :: m => b = a \/ In a m
end.
Lemma in_eq : forall (a:A) (l:listT), In a (a :: l).
Proof.
simpl in |- *; auto.
Qed.
Hint Resolve in_eq.
Lemma in_consT : forall (a b:A) (l:listT), In b l -> In b (a :: l).
Proof.
simpl in |- *; auto.
Qed.
Hint Resolve in_consT.
Lemma in_nilT : forall a:A, ~ In a nilT.
Proof.
unfold not in |- *; intros a H; inversion_clear H.
Qed.
Lemma in_inv : forall (a b:A) (l:listT), In b (a :: l) -> a = b \/ In b l.
Proof.
intros a b l H; inversion_clear H; auto.
Qed.
Lemma In_dec :
(forall x y:A, {x = y} + {x <> y}) ->
forall (a:A) (l:listT), {In a l} + {~ In a l}.
Proof.
intro H.
induction l as [| a0 l IHl].
right; apply in_nilT.
destruct (H a0 a); simpl in |- *; auto.
destruct IHl; simpl in |- *; auto.
right; unfold not in |- *; intros [Hc1| Hc2]; auto.
Qed.
Lemma in_appT_or : forall (l m:listT) (a:A), In a (l ++ m) -> In a l \/ In a m.
Proof.
intros l m a.
elim l; simpl in |- *; auto.
intros a0 y H H0.
now_show ((a0 = a \/ In a y) \/ In a m).
elim H0; auto.
intro H1.
now_show ((a0 = a \/ In a y) \/ In a m).
elim (H H1); auto.
Qed.
Hint Immediate in_appT_or.
Lemma in_or_appT : forall (l m:listT) (a:A), In a l \/ In a m -> In a (l ++ m).
Proof.
intros l m a.
elim l; simpl in |- *; intro H.
now_show (In a m).
elim H; auto; intro H0.
now_show (In a m).
elim H0. (* subProof completed *)
intros y H0 H1.
now_show (H = a \/ In a (y ++ m)).
elim H1; auto 4.
intro H2.
now_show (H = a \/ In a (y ++ m)).
elim H2; auto.
Qed.
Hint Resolve in_or_appT.
(***************************)
(** Set inclusion on listT *)
(***************************)
Definition incl (l m:listT) := forall a:A, In a l -> In a m.
Hint Unfold incl.
Lemma incl_refl : forall l:listT, incl l l.
Proof.
auto.
Qed.
Hint Resolve incl_refl.
Lemma incl_tl : forall (a:A) (l m:listT), incl l m -> incl l (a :: m).
Proof.
auto.
Qed.
Hint Immediate incl_tl.
Lemma incl_tran : forall l m n:listT, incl l m -> incl m n -> incl l n.
Proof.
auto.
Qed.
Lemma incl_appTl : forall l m n:listT, incl l n -> incl l (n ++ m).
Proof.
auto.
Qed.
Hint Immediate incl_appTl.
Lemma incl_appTr : forall l m n:listT, incl l n -> incl l (m ++ n).
Proof.
auto.
Qed.
Hint Immediate incl_appTr.
Lemma incl_consT :
forall (a:A) (l m:listT), In a m -> incl l m -> incl (a :: l) m.
Proof.
unfold incl in |- *; simpl in |- *; intros a l m H H0 a0 H1.
now_show (In a0 m).
elim H1.
now_show (a = a0 -> In a0 m).
elim H1; auto; intro H2.
now_show (a = a0 -> In a0 m).
elim H2; auto. (* solves subgoal *)
now_show (In a0 l -> In a0 m).
auto.
Qed.
Hint Resolve incl_consT.
Lemma incl_appT : forall l m n:listT, incl l n -> incl m n -> incl (l ++ m) n.
Proof.
unfold incl in |- *; simpl in |- *; intros l m n H H0 a H1.
now_show (In a n).
elim (in_appT_or _ _ _ H1); auto.
Qed.
Hint Resolve incl_appT.
(**************************)
(** Nth element of a listT*)
(**************************)
Fixpoint nth (n:nat) (l:listT) (default:A) {struct l} : A :=
match n, l with
| O, x :: l' => x
| O, other => default
| S m, nilT => default
| S m, x :: t => nth m t default
end.
Fixpoint nth_ok (n:nat) (l:listT) (default:A) {struct l} : bool :=
match n, l with
| O, x :: l' => true
| O, other => false
| S m, nilT => false
| S m, x :: t => nth_ok m t default
end.
Lemma nth_in_or_default :
forall (n:nat) (l:listT) (d:A), {In (nth n l d) l} + {nth n l d = d}.
(* Realizer nth_ok. Program_all. *)
Proof.
intros n l d; generalize n; induction l; intro n0.
right; case n0; trivial.
case n0; simpl in |- *.
auto.
intro n1; elim (IHl n1); auto.
Qed.
Lemma nth_S_consT :
forall (n:nat) (l:listT) (d a:A),
In (nth n l d) l -> In (nth (S n) (a :: l) d) (a :: l).
Proof.
simpl in |- *; auto.
Qed.
(********************************)
(** Decidable equality on lists *)
(********************************)
Lemma list_eq_dec :
(forall x y:A, {x = y} + {x <> y}) -> forall x y:listT, {x = y} + {x <> y}.
Proof.
intro H.
induction x as [| a l IHl]; destruct y as [| a0 l0]; auto.
destruct (H a a0) as [e| e].
destruct (IHl l0) as [e'| e'].
left; rewrite e; rewrite e'; trivial.
right; red in |- *; intro.
apply e'; injection H0; trivial.
right; red in |- *; intro.
apply e; injection H0; trivial.
Qed.
(*************************)
(** Reverse *)
(*************************)
Fixpoint rev (l:listT) : listT:=
match l with
| nilT => nilT
| x :: l' => rev l' ++ x :: nilT
end.
Lemma distr_rev : forall x y:listT, rev (x ++ y) = rev y ++ rev x.
Proof.
induction x as [| a l IHl].
destruct y as [| a l].
simpl in |- *.
auto.
simpl in |- *.
apply appT_nilT_end; auto.
intro y.
simpl in |- *.
rewrite (IHl y).
apply (appT_ass (rev y) (rev l) (a :: nilT)).
Qed.
Remark rev_unit : forall (l:listT) (a:A), rev (l ++ a :: nilT) = a :: rev l.
Proof.
intros.
apply (distr_rev l (a :: nilT)); simpl in |- *; auto.
Qed.
Lemma rev_involutive : forall l:listT, rev (rev l) = l.
Proof.
induction l as [| a l IHl].
simpl in |- *; auto.
simpl in |- *.
rewrite (rev_unit (rev l) a).
rewrite IHl; auto.
Qed.
(*********************************************)
(** Reverse Induction Principle on Lists *)
(*********************************************)
Section Reverse_Induction.
Unset Implicit Arguments.
Remark rev_list_ind :
forall P:listT-> Prop,
P nilT ->
(forall (a:A) (l:listT), P (rev l) -> P (rev (a :: l))) ->
forall l:listT, P (rev l).
Proof.
induction l; auto.
Qed.
Set Implicit Arguments.
Lemma rev_ind :
forall P:listT-> Prop,
P nilT ->
(forall (x:A) (l:listT), P l -> P (l ++ x :: nilT)) -> forall l:listT, P l.
Proof.
intros.
generalize (rev_involutive l).
intros E; rewrite <- E.
apply (rev_list_ind P).
auto.
simpl in |- *.
intros.
apply (H0 a (rev l0)).
auto.
Qed.
End Reverse_Induction.
End ListT.
Arguments nilT {A}.
Hint Resolve nilT_consT appT_nilT_end ass_appT appT_ass: datatypes v62.
Hint Resolve appT_comm_consT appT_consT_not_nilT: datatypes v62.
Hint Immediate appT_eq_nilT: datatypes v62.
Hint Resolve appT_eq_unit appT_inj_tail: datatypes v62.
Hint Resolve lel_refl lel_consT_consT lel_consT lel_nilT lel_nilT nilT_consT:
datatypes v62.
Hint Resolve in_eq in_consT in_inv in_nilT in_appT_or in_or_appT: datatypes v62.
Hint Resolve incl_refl incl_tl incl_tran incl_appTl incl_appTr incl_consT
incl_appT: datatypes v62.
(** Exporting listTnotations *)
Infix "::" := consT (at level 60, right associativity) : listt_scope.
Infix "++" := appT (right associativity, at level 60) : listt_scope.
Open Scope listt_scope.
(** Declare Scope list_scope with key listT*)
Delimit Scope listt_scope with listT.
Bind Scope listt_scope with listT.
|
__Chapter 12 - Implementing a Multilayer Artificial Neural Network from Scratch__
1. [Import](#Import)
1. [Modeling complex functions with artificial neural networks](#Modeling-complex-functions-with-artificial-neural-networks)
1. [Activating a neural network via forward propagation](#Activating-a-neural-network-via-forward-propagation)
1. [Classifying handwritten digits](#Classifying-handwritten-digits)
1. [Implementing a multilayer perceptron](#Implementing-a-multilayer-perceptron)
1. [Homegrown implementation](#Homegrown-implementation)
1. [Training an artificial neural network](#Training-an-artificial-neural-network)
1. [Logistic cost function refresher](#Logistic-cost-function-refresher)
1. [Computing the logistic cost function](#Computing-the-logistic-cost-function)
1. [Developing intuition for backpropagation](#Developing-your-intuition-for-backpropagation)
1. [Training neural networks via backpropagation](#Training-neural-networks-via-backpropagation)
# Import
<a id = 'Import'></a>
```python
# standard libary and settings
import os
import sys
import importlib
import itertools
from io import StringIO
import warnings
warnings.simplefilter("ignore")
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
# data extensions and settings
import numpy as np
np.set_printoptions(threshold=np.inf, suppress=True)
import pandas as pd
pd.set_option("display.max_rows", 500)
pd.options.display.float_format = "{:,.6f}".format
# modeling extensions
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.cluster import KMeans, DBSCAN, AgglomerativeClustering
from sklearn.datasets import load_boston, load_wine, load_iris, load_breast_cancer, make_blobs, make_moons
from sklearn.decomposition import PCA, LatentDirichletAllocation
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, ExtraTreesClassifier, IsolationForest
from sklearn.feature_extraction.text import CounterVectorizer, TfidfTransformer, TfidfVectorizer, HashingVectorizer
from sklearn.feature_selection import f_classif, f_regression, VarianceThreshold, SelectFromModel, SelectKBest
from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression, LogisticRegression, SGDRegressor
from sklearn.metrics import precision_score, recall_score, f1_score, explained_variance_score, mean_squared_log_error, mean_absolute_error, median_absolute_error, mean_squared_error, r2_score, confusion_matrix, roc_curve, accuracy_score, roc_auc_score, homogeneity_score, completeness_score, classification_report, silhouette_samples
from sklearn.model_selection import KFold, train_test_split, GridSearchCV, StratifiedKFold, cross_val_score, RandomizedSearchCV
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.pipeline import make_pipeline, Pipeline, FeatureUnion
from sklearn.preprocessing import StandardScaler, RobustScaler, PolynomialFeatures, OrdinalEncoder, LabelEncoder, OneHotEncoder, KBinsDiscretizer, QuantileTransformer, PowerTransformer, MinMaxScaler
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import sklearn.utils as utils
# visualization extensions and settings
import seaborn as sns
import matplotlib.pyplot as plt
# custom extensions and settings
sys.path.append("/home/mlmachine") if "/home/mlmachine" not in sys.path else None
sys.path.append("/home/prettierplot") if "/home/prettierplot" not in sys.path else None
import mlmachine as mlm
from prettierplot.plotter import PrettierPlot
import prettierplot.style as style
# magic functions
%matplotlib inline
```
<style>.container { width:95% !important; }</style>
# Modeling complex functions with artificial neural networks
A fully connected network, also known as a multilayer perceptron (MLP), has one input layer of neurons, one hidden layer and one output layer. The units in the hidden layer are fully connected to the input layer, and the output layer is fully connected to the hidden layer. If more than one hidden layer is present then the MLP is considered to be a deep artificial neural network.
Each neuron, or activation unit, can be identified by its position amongst the other activation neurons and the layer in which it appear - $a_i^l$ is the $i$th neuron in the $l$th layer. For simplicity, this walkthrough will use the $l$ values of $in, h, out$ to describe the input, hidden and output layer. So $a_i^{out}$ is the $i$th activation unit of the outer layer. The input and hidden layers each have bias units, $a_0^{in}$ and $a_0^{out}$ and these are set to one. This means the input layer is just the input values plus the bias unit:
$$
a^{in}
=
\begin{bmatrix} a_0^{in} \\ a_1^{in} \\ \vdots \\ a_m^{in} \end{bmatrix}
=
\begin{bmatrix} 1 \\ x_1^{in} \\ \vdots \\ x_m^{in} \end{bmatrix}
$$
Each activation unit in layer $l$ is connected to all of the units in layer $l$ + 1 by a weight coefficient. As an example, the connection between the $k$th unit layer $l$ to the $j$th in layer $l$ + 1 is written as $w_{k,j}^l$. So the weight matrix that connects the input layer to the hidden layer is $\mathbf{W}^{h}$, the weight matrix that connects the hidden layer to the output layer is $\mathbf{W}^{out}$. The weight matrix that connects, for example, the input and hidden layers is $\mathbf{W}^h \in \mathbb{R}^{m \times d}$, where $d$ is the number of hidden units and $m$ is the numnber of input units (including bias).
Having one unit in the output layer is sufficient for a binary classificaiton task, but having more than one enables multiclass classification through one-hot vector representation of the multiclass labels.:
$$
0
=
\begin{bmatrix} 1 \\ 0 \\ 0 \end{bmatrix}
1
=
\begin{bmatrix} 0 \\ 1 \\ 0 \end{bmatrix}
2
=
\begin{bmatrix} 0 \\ 0 \\ 1 \end{bmatrix}
$$
<a id = 'Modeling-complex-functions-with-artificial-neural-networks'></a>
## Activating a neural network via forward propagation
The MLP learning procedure in three steps:
1. Starting at the input layer, forward propagate the patterns of the training data through the network to generate an output
2. Using the output, calculate the error to be minimized using a cost function
3. Backpropagate the error, find its derivative with respect to each weight in the network, then update the model
In feedforward networks, each layer serves as the input to the next layer without any loops. This contrasts with recurrent neural networks. The three steps above are repeated for multiple epochs to learn the best weights, and then forward propagation is used to the calculate the network output and apply a threshold function to obtain the predicted class labels represented in the one-hot format above. Describing each step in more detail:
The first activation unit in the hidden layer $a_1^{h}$ is connected to all units in the input layer, and is calculated by:
$$
z_1^h = a_0^{in}w_{0,1}^h + a_1^{in}w_{1,1}^h + ... a_m^{in}w_{m,1}^h
$$
$$
a_1^h = \phi\big(z_1^h\big)
$$
$z_1^h$ is the net input and $\phi(\cdot)$ is the activation function that acts on $z_1^h$. This activation function need to be differentiable to learn the weights that connect the neurons using a gradient-based approach. Non-linear activation function are also possible and are used to solve complex problems like image classification. One familiar non-linear activation function is the sigmoid function, which arose in the context of logistic regression:
$$
\phi(z) = \frac{1}{1 + e^{-z}}
$$
This is an S-shaped curve that maps the input $z$ onto a logistic distribution that ranges from 0 to 1 and cross the y-axis at $z$ = 0. Given this, we can think of each neuron as logistic regression units that return values in the number range of 0 to 1. To describe this activation function in linear algebra notation:
$$
\begin{equation}
\textbf{z}^{h} = \textbf{a}^{in}\textbf{W}^h
\\
\textbf{a}^h = \phi\big(\textbf{z}^h\big)
\end{equation}
$$
$\textbf{a}^{in}$ is the 1 x $m$ dimensional feature vector for a sample $\textbf{x}^{in}$, plus the bias unit. $\textbf{W}^{h}$ is the $m$ x $d$ dimensional weight matrix where $d$ is the number of units in the hidden layer. Through matrix-vector multiplication, we obtain a 1 x $d$ dimensional net input vector $\textbf{z}^h$ to be used to calculate the activation $\textbf{a}^{h}$ ($\textbf{a}^{h} \in \mathbb{R}^{1 \times d}$). This computation can be generalized to all $n$ samples in the training set by:
$$
\textbf{Z}^{h} = \textbf{A}^{in}\textbf{W}^{h}
$$
In this representation, $\textbf{A}^{in}$ is an $n$ x $m$ matrix, and the matrix-matrix multiplication results in an $n$ x $d$ dimensional net input matrix $\textbf{Z}^{h}$. Lastly, apply the activation function $\phi(\cdot)$ to each value in the net input matrix to get the $n$ x $d$ dimensional matrix $\textbf{A}^{h}$ for the next layer, which in this case is the output layer:
$$
A^h = \phi\big(\textbf{Z}^{h}\big)
$$
Just as above, we can write the activation function of the output layer in vectorized form for multiple samples:
$$
\textbf{Z}^{out} = \textbf{A}^{h}\textbf{W}^{out}
$$
In this last step, we multiply the $d$ x $t$ matrix $\textbf{W}^{out}$ (where $t$ is the number of output units) by the $n$ by $d$ dimensional matrix $\textbf{A}^{h}$ to obtain the $n$ by $t$ dimensional matrix $\textbf{Z}^{out}$, where the columns in this matrix represent the outputs for each sample. The last step is to apply the sgmoid activation function to obtain the number valued ouput of the network:
$$
\textbf{A}^{out} = \phi\big(\textbf{Z}^{out}\big), \textbf{A}^{out} \in \mathbb{R}^{n \times t}
$$
<a id = 'Activating-a-neural-network-via-forward-propagation'></a>
# Classifying handwritten digits
Implement and train our first MLP to classify handwritten digits from the Mixed National Institute of Standards and Techngology (MNIST). It consists of handwritten digits from 250 people - half high school students and half Census Bureau employees.
<a id = 'Classifying-handwritten-digits'></a>
```python
# Load data and print dimensions
df_train = pd.read_csv("s3://tdp-ml-datasets/kaggle-mnist//train.csv", sep=",")
df_test = pd.read_csv("s3://tdp-ml-datasets/kaggle-mnist//test.csv", sep=",")
print("Training data dimensions: {}".format(df_train.shape))
print("Test data dimensions: {}".format(df_test.shape))
# separate
df_train_label = df_train["label"]
df_train = df_train.drop(labels="label", axis=1)
```
Training data dimensions: (42000, 785)
Test data dimensions: (28000, 784)
```python
# train/test split
X_train, X_test, y_train, y_test = train_test_split(
df_train, df_train_label, test_size=0.2
)
```
```python
# visualize samples digits
fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True)
ax = ax.flatten()
for i in range(10):
img = np.array(X_train[y_train == i].iloc[0]).reshape(28, 28)
ax[i].imshow(img, cmap="Greys")
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
```
```python
# visualize multiple samples of the same digit
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True)
ax = ax.flatten()
for i in range(25):
img = np.array(X_train[y_train == 7].iloc[i]).reshape(28, 28)
ax[i].imshow(img, cmap="Greys")
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
```
# Implementing a multilayer perceptron
<a id = 'Implementing-a-multilayer-perceptron'></a>
## Homegrown implementation
<a id = 'Homegrown-implementation'></a>
```python
# multilayer perceptron custom class
class NeuralNetMLP:
"""
Info:
Description:
Feedforward neural network / Multilayer perceptron classifier
Parameters:
n_hidden : int, default=30
Number of hidden units
l2 : float, default=0.
Lambda value for L2-regularization
No regularization if l2 = 0.
epochs : int (default : 100)
Number of passes through training set
eta : float, default=0.001
Learning rate
shuffle : bool, default=True
Shuffle data after every epoch if True to prevent circles
minibatch_size : int, default=1
# of training samples per mini batch
seed : int, default=None
Random seed for weight initialization and shuffling
Attributes:
eval_ : dict
Dictionary that collects cost, training accuracy and validation
accuracy for each epoch during training
"""
def __init__(
self,
n_hidden=30,
l2=0.0,
epochs=100,
eta=0.001,
shuffle=True,
minibatch_size=1,
seed=None,
):
self.random = np.random.RandomState(seed)
self.n_hidden = n_hidden
self.l2 = l2
self.epochs = epochs
self.eta = eta
self.shuffle = shuffle
self.minibatch_size = minibatch_size
def _onehot(self, y, n_classes):
"""
Info:
Description:
Encode labels into one-hot representation
Parameters:
y : numpy array
Target values
n_classes : int
Number of unique classes
Returns:
onehot : array, shape = (n_samples, n_labels)
"""
onehot = np.zeros((n_classes, y.shape[0]))
for idx, val in enumerate(y.astype(int)):
onehot[val, idx] = 1.0
return onehot.T
def _sigmoid(self, z):
"""
Info:
Description:
Compute logistic function (sigmoid)
Parameters:
z : float
net input
"""
return 1.0 / (1.0 + np.exp(-np.clip(z, -250, 250)))
def _forward(self, X):
"""
Info:
Description:
Compute forward propagation step
Parameters:
X : array
Input data
"""
# step 1: net input of hidden layer
# [n_samples, n_features] dot [n_features, n_hidden] -> [n_samples, n_hidden]
z_h = np.dot(X, self.w_h) + self.b_h
# step 2: activation of hidden layer
a_h = self._sigmoid(z_h)
# step 3: net input of output layer
# [n_samples, n_hidden] dot [n_hidden, n_classlabels] -> [n_samples, n_classlabels]
z_out = np.dot(a_h, self.w_out) + self.b_out
# step 4: activation of output layer
a_out = self._sigmoid(z_out)
return z_h, a_h, z_out, a_out
def _compute_cost(self, y_enc, output):
"""
Info:
Description:
Compute cost function
Parameters:
y_enc : array, shape = (n_samples, n_labes)
one-hot encoded class labels
output : array, shape = (n_samples, n_output_units)
Activation of the output layer (forward propagation)
Returns:
cost : float
Regularized cost of epoch
"""
L2_term = self.l2 * (np.sum(self.w_h ** 2.0) + np.sum(self.w_out ** 2.0))
term1 = -y_enc * (np.log(output))
term2 = (1.0 - y_enc) * np.log(1.0 - output)
cost = np.sum(term1 - term2) + L2_term
return cost
def predict(self, X):
"""
Info:
Description:
Predict class labels
Parameters:
X : array, shape = [n_samples, n_features]
Returns:
y_pred : array, shape = [n_samples]
Predicted class labels
"""
z_h, a_h, z_out, a_out = self._forward(X)
y_pred = np.argmax(z_out, axis=1)
return y_pred
def fit(self, X_train, y_train, X_valid, y_valid):
"""
Info:
Description:
Learn weights from training data
Parameters:
X_train : array, shape = [n_samples, n_features]
Input layer with original features
y_train : array, shape = [n_samples]
Correct target labels
X_valid : array, shape = [n_samples, n_features]
Sample features for validation during training
y_valid : array, shape = [n_samples]
Sample labels
"""
n_output = np.unique(y_train).shape[0] # number of class labels
n_features = X_train.shape[1]
#######################
# weight initialization
# weights for input to hidden
self.b_h = np.zeros(self.n_hidden)
self.w_h = self.random.normal(
loc=0.0, scale=0.1, size=(n_features, self.n_hidden)
)
# weights for hidden to output
self.b_out = np.zeros(n_output)
self.w_out = self.random.normal(
loc=0.0, scale=0.1, size=(self.n_hidden, n_output)
)
epoch_strlen = len(str(self.epochs)) # for progr. format
self.eval_ = {"cost": [], "train_acc": [], "valid_acc": []}
y_train_enc = self._onehot(y_train, n_output)
# iterate over training epochs
for i in range(self.epochs):
# iterate over mini batches
indices = np.arange(X_train.shape[0])
if self.shuffle:
self.random.shuffle(indices)
for start_idx in range(
0, indices.shape[0] - self.minibatch_size + 1, self.minibatch_size
):
batch_idx = indices[start_idx : start_idx + self.minibatch_size]
# forward propagation
z_h, a_h, z_out, a_out = self._forward(X_train[batch_idx])
#######################
# backpropagation
# [n_samples, n_classlabels]
sigma_out = a_out - y_train_enc[batch_idx]
# [n_samples, n_hidden]
sigmoid_derivative_h = a_h * (1.0 - a_h)
# [n_samples, n_classlabels] dot [n_classlabels, n_hidden] -> [n_samples, n_hidden]
sigma_h = np.dot(sigma_out, self.w_out.T) * sigmoid_derivative_h
# [n_features, n_samples] dot [n_samples, n_hidden] -> [n_features, n_hidden]
grad_w_h = np.dot(X_train[batch_idx].T, sigma_h)
grad_b_h = np.sum(sigma_h, axis=0)
# [n_hidden, n_samples] dot [n_samples, n_classlabels] -> [n_hidden, n_classlabels]
grad_w_out = np.dot(a_h.T, sigma_out)
grad_b_out = np.sum(sigma_out, axis=0)
# regularization and weight updates
delta_w_h = grad_w_h + self.l2 * self.w_h
delta_b_h = grad_b_h # bias is not regularized
self.w_h -= self.eta * delta_w_h
self.b_h -= self.eta * delta_b_h
delta_w_out = grad_w_out + self.l2 * self.w_out
delta_b_out = grad_b_out # bias is not regularized
self.w_out -= self.eta * delta_w_out
self.b_out -= self.eta * delta_b_out
#######################
# evaluation
z_h, a_h, z_out, a_out = self._forward(X_train)
cost = self._compute_cost(y_enc=y_train_enc, output=a_out)
y_train_pred = self.predict(X_train)
y_valid_pred = self.predict(X_valid)
train_acc = (np.sum(y_train == y_train_pred)).astype(
np.float
) / X_train.shape[0]
valid_acc = (np.sum(y_valid == y_valid_pred)).astype(
np.float
) / X_valid.shape[0]
sys.stderr.write(
"\r%0*d/%d | Cost: %.2f | Train/Valid Accuracy: %.2f%%/%.2f%%"
% (
epoch_strlen,
i + 1,
self.epochs,
cost,
train_acc * 100,
valid_acc * 100,
)
)
sys.stderr.flush()
self.eval_["cost"].append(cost)
self.eval_["train_acc"].append(train_acc)
self.eval_["valid_acc"].append(valid_acc)
return self
```
```python
# initialize a 784-100-10 MLP - a neural network with 784 samples,
# 100 hidden units and 10 output units
nn = NeuralNetMLP(
n_hidden=100,
l2=0.01,
epochs=50,
eta=0.0005,
minibatch_size=100,
shuffle=True,
seed=1,
)
# train neural network using first 30,000 training samples
# and use the remainder for validation
nn.fit(
X_train=X_train.values[:30000],
y_train=y_train.values[:30000],
X_valid=X_train.values[30000:],
y_valid=y_train.values[30000:],
)
```
50/50 | Cost: 12305.40 | Train/Valid Accuracy: 94.21%/92.83%
<__main__.NeuralNetMLP at 0x7f1c865875c0>
```python
# plot cost over the 200 epochs
p = PrettierPlot()
ax = p.make_canvas(
title="Cost reduction over time", x_label="Epochs", y_label="Cost", y_shift=0.8
)
p.line(
x=np.arange(nn.epochs),
y=np.array(nn.eval_["cost"]),
linecolor=style.style_hex_mid[0],
x_ticks=np.arange(0, 51, 5),
bbox=(1.2, 0.9),
marker_on=False,
ax=ax,
)
```
> Remarks - The cost decreased substantially over the first 100 epochs before slowing afterwards. Even after the 100th epoch, it seems there is gradual improvement and would likely be more if this were allowed to run for more epochs.
```python
# plot cost over the 200 epochs
p = PrettierPlot()
ax = p.make_canvas(
title="Accuracy improvement over time - train vs. test",
x_label="Epochs",
y_label="Accuracy",
y_shift=0.7,
)
p.multi_line(
x=np.arange(nn.epochs),
y=np.c_[nn.eval_["train_acc"], nn.eval_["valid_acc"]],
label=["Training", "Validation"],
x_units="f",
y_units="fff",
bbox=(1.2, 0.9),
marker_on=False,
ax=ax,
)
```
> Remarks - The plot shows an increasing gap between the training and validation increases.
```python
# calculate the prediction accuracy on test
y_testPred = nn.predict(X_test)
acc = np.sum(y_test == y_testPred).astype(np.float) / X_test.shape[0]
print("Test accuracy: {:.2f}%".format(acc * 100))
```
Test accuracy: 93.17%
> Remarks - Despite overfitting on the training data, this model performed pretty well by achieving 97.54 percent accuracy on the unseen test data. To further refine the model, we can adjust the number of hidden units, the regularization parameter, the learning rate, among other things.
```python
# review some of the misclassified images
miscl_img = X_test[y_test != y_testPred][:25]
actual_label = y_test[y_test != y_testPred][:25]
predicted_label = y_testPred[y_test != y_testPred][:25]
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True)
ax = ax.flatten()
for i in range(25):
img = np.array(miscl_img.iloc[i]).reshape(28, 28)
ax[i].imshow(img, cmap="Greys", interpolation="nearest")
ax[i].set_title(
"%d) t: %d p: %d" % (i + 1, actual_label.iloc[i], predicted_label[i]),
fontsize=10,
)
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
```
# Training an artificial neural network
<a id = 'Training-an-artificial-neural-network'></a>
## Logistic cost function refresher
With linear regression, we seek to minimize the sum of squares to learn the weights of the models. We did this by minimizing this cost function:
$$
J(\textbf{w}) = \sum_i\frac{1}{2}\big(\phi(z^i) - y^i\big)^2
$$
With the logistic cost function, we want to maximize likelihood. Assuming the samples in our dataset are independent of one another, likelihood $L$ is defined as:
$$
L(\textbf{x}) = P(\textbf{y}\vert\textbf{x};\textbf{w}) = \prod^n_{i=1}P(y^i\vert\textbf{x}^i;\textbf{w}) = \prod^n_{i=1}\big(\phi\big(z^i\big)\big)^{y^i}\big(1-\phi\big(z^i\big)\big)^{1-y^i}
$$
The crux of this function is the logistic sigmoid function:
$$
\phi(z) = \frac{1}{1+e^{-z}}
$$
Where z is the net input, which is the linear combination of weights and sample feature values: $z = w_0x_0 + w_1x_1 + ... + w_mx_m$
And in practice, it is more straightforward to maximize the (natural) log of the cost function, which is called the log likelihood function:
$$
l(\textbf{w}) = \mbox{log}L(\textbf{w}) = \sum^n_{i=1}\bigg[y^i\mbox{log}\big(\phi\big(z^i\big)\big) + \big(1-y^i\big)\mbox{log}\big(1-\phi(z^i)\big)\bigg]
$$
Applying the log function reduces the potential for numberal underflow, which can occur if the likelihoods are very small. Further, converting the product of factors into a summation of factors makes it easier to obtain the derivative of this function. With this cost function, we can either maximize it using gradient ascent or rewrite it as a cost function to be minimized. This function can be rewritten as:
$$
J(\textbf{w}) = \sum^n_{i=1}\bigg[-y^i\mbox{log}\big(\phi\big(z^i\big)\big) - \big(1-y^i\big)\mbox{log}\big(1-\phi(z^i)\big)\bigg]
$$
To better understand this function, we can look at the cost that we calculate for one training sample:
$$
J(\phi(z),y;\textbf{w}) = -y\mbox{log}\big(\phi\big(z\big)\big) - \big(1-y\big)\mbox{log}\big(1-\phi(z)\big)
$$
The first term becomes zero if $y$ = 0 and the second term becomes zero if $y$ = 1, so this cost function can be reshaped as:
$$
J(\phi(z),y;\textbf{w}) =
\left\{
\begin{array}{ll}
-y\mbox{log}\big(\phi\big(z\big)\big) & \mbox{if } y = 1 \\
-\mbox{log}\big(1-\phi(z)\big) &\mbox{if } y = 0
\end{array}
\right.
$$
<a id = 'Logistic-cost-function-refresher'></a>
```python
# plot cost function for y = 0 and y = 1
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
def cost_1(z):
return -np.log(sigmoid(z))
def cost_0(z):
return -np.log(1 - sigmoid(z))
z = np.arange(-10, 10, 0.1)
phi_z = sigmoid(z)
c1 = [cost_1(x) for x in z]
c0 = [cost_0(x) for x in z]
c = np.c_[c1, c0]
# plot
p = PrettierPlot()
ax = p.make_canvas(title="", x_label="$\phi(z)$", y_label="J(x)", y_shift=0.9)
p.multi_line(
x=phi_z,
y=c,
label=["J(w) if y = 1", "J(w) if y = 0"],
x_units="fff",
y_units="f",
bbox=(1.2, 0.9),
marker_on=False,
ax=ax,
)
plt.ylim(0, 5)
```
> Remarks - The key takeaway is seeing how the cost J(w) drastically increases for each curve as the prediction becomes further away from the truth.
## Computing the logistic cost function
$$
J(\textbf{w}) = -\sum^n_{i=1}y^i\mbox{log}\big(a^i\big) + \big(1-y^i\big)\mbox{log}\big(1-a^i\big)
$$
where $a^i$ is the sigmoid activation function of the $i$th sample in the dataset, which is computed in the forward propagation step: $a^i = \phi(z^i)$. The $i$ superscript refers to a specific training sample index, not a neural net layer.
We can add a regularization term to reduce the degree of overfitting:
$$
J(\textbf{w}) = -\bigg[\sum^n_{i=1}y^i\mbox{log}\big(a^i\big) + \big(1-y^i\big)\mbox{log}\big(1-a^i\big)\bigg] + \frac{\lambda}{2}\Vert\textbf{w}\Vert^2_2
$$
This MLP is built to perform multiclass classification, and returns an output vector of $t$ elements that gets compared to the 1 x $t$ dimensional target vector in the one-hot encoding representation. As an example, the activation of the third layer and the target class 2 for a particular sample may look like:
$$
a^{out}
=
\begin{bmatrix} 0.1 \\ 0.9 \\ \vdots \\ 0.3 \end{bmatrix}
, y =
\begin{bmatrix} 0 \\ 1 \\ \vdots \\ 0 \end{bmatrix}
$$
Because of this, we need to generalize the logistic cost function to all $t$ activation units in our network. So it becomes the following:
$$
J(\textbf{W}) = -\sum^n_{i=1}\sum^t_{j=1}y^i_j\mbox{log}\big(a^i_j\big) + \big(1-y^i_j\big)\mbox{log}\big(1-a^i_j\big)
$$
The $i$ again refers to the index of a specific training sample. Here is the full function with the regularization term added:
$$
J(\textbf{W}) = -\bigg[\sum^n_{i=1}\sum^t_{j=1}y^i_j\mbox{log}\big(a^i_j\big) + \big(1-y^i_j\big)\mbox{log}\big(1-a^i_j\big)\bigg] + \frac{\lambda}{2}\sum_{l=1}^{L-1}\sum_{i=1}^{\mu_l}\sum_{j=1}^{\mu_{l+1}}(w^l_{j,i})^2
$$
This generalized regularization term calculates the sum of all weights of an $l$ layer. $\mu_1$ refers to the number of units in a given layer $l$. To minimize this cost function, we need to calculate the partial derivative of the parameters $\textbf{W}$ with respect to each weight in every layer of the network:
$$
\frac{\partial}{\partial(w^l_{j,i})}J(\textbf{W})
$$
It's important to note that $\textbf{W}$ consists of multiple matrices. In an MLP with one hidden unit, we have the weight matrix $\textbf{W}^h$ that connects the input layer to the hidden layer, and $\textbf{W}^{out}$ that connects the hidden layer to the output layer. This is a three dimensional tensor.
For $\textbf{W}^h$, the rows represent the features, and the columns represent the hidden units. One particular value represents the weight that a certain hidden unit has for a certain feature. For $\textbf{W}^{out}$, the rows represent the hidden units, and the columns represent the output units. The matrices in this tensor do not necessarily have the same shape. This only occurs if the MLP is initialized with the samenumber of hidden units, output units and input feautres.
<a id = 'Computing-the-logistic-cost-function'></a>
## Developing intuition for backpropagation
Backpropagation is one of the most widely use algorithms for efficiently training neural networks. It is a very computationally efficient approach to compute the partial derivatives of a complex cost function in MLPs. The goal is to use the derivatives to learn the weight coefficients. The challenging aspect of weight determination in an MLP is that we are typically dealing with a very large number of weight coefficients in a high-dimensional feature space. Further, the error surface of a neural network cost function is not convex or smooth with respect to the parameters, unlike single layer networks such as Adaline or logistic regression. There will be many local minima that need to be overcome in order to find teh global minimum of the cost function.
This calls forth the calculcus concept of the chain rule, which is a useful trick for finding the derivative of a complex, nested function like $f(g(x))$
$$
\frac{d}{dx}\bigg(f\big(g(x)\big)\bigg) = \frac{df}{dg} \cdot \frac{dg}{dx}
$$
The chain rule can be used for an arbitrarily long function. Even if we have five different functions $f(x)$, $g(x)$, $h(x)$, $u(x)$ and $v(x)$, where $F$ is the function composition $F(x) = f(g(h(u(v(x)))))$. The derivative of this function is:
$$
\frac{dF}{dx} = \frac{d}{dx}F(x) = \frac{d}{dx}f(g(h(u(v(x))))) = \frac{df}{dg} \cdot \frac{dg}{dh} \cdot \frac{dh}{du} \cdot \frac{du}{dv} \cdot \frac{dv}{dx}
$$
Automatic differentiation is a technique for solving these kinds of problems efficiently. It comes in two modes, forward and reverse. Backpropagation is a special case of rever mode automatic differentiation. The key point is that applying the chain rule in the forward mode would be expensive since we need to multiply large matrices for each layer (Jacobians) that are eventually multiplied by a vector to obtain the output. The trick within the reverse mode is that we start from right to left: we multiply a matrix by a vector, which yields another vector that is multiplied by the next matrix, and so on. Matrix-vector multiplication is much cheaper computationally that matrix-matrix multiplication, which makes backpropagation very popular in this space.
<a id = 'Developing-your-intuition-for-backpropagation'></a>
## Training neural networks via backpropagation
Earlier we showed how to calculate the cost as the difference between the activation of the last layer and the actual target class label. To get here, we need to apply forward propagation in order to obtain the activaiton of the output layer:
$$
\textbf{Z}^{h} = \textbf{A}^{in}\textbf{W}^{h} \ \mbox{(net input of the hidden layer)}
\\
\textbf{A}^{h} = \phi(\textbf{Z}^{h}) \ \mbox{(activation of the hidden layer)}
\\
\textbf{Z}^{out} = \textbf{A}^{h}\textbf{W}^{out} \ \mbox{(net input of the output layer)}
\\
\textbf{A}^{out} = \phi(\textbf{Z}^{out}) \ \mbox{(activation of the output layer)}
$$
Following that calculation, backpropagation is used to update the weights in the MLP model. The error is propagated from the right to the left. This start by first calculating the error vector of the output layer:
$$
\mathbf{\delta^{out}} = \mathbf{a^{out}} - \textbf{y}
$$
$\textbf{y}$ is the vector of the true class labels. In the homegrown implementation, this is the 'sigma_out variale. Next, calculate the rror term of the hidden layer:
$$
\mathbf{\delta^{h}} = \mathbf{\delta^{out}}\big(\mathbf{W^{out}}\big) \odot \frac{\partial\phi(z^h)}{\partial z^h}
$$
The right most term is just the derivative of the sigmoid activation function, which is:
$$
\frac{\partial\phi(z^h)}{\partial z^h} = \big(a^h \odot (1-a^h)\big)
$$
The symbol $\odot$ stands for element-wise multiplication, which is simply the multiplication of elements in separate matrices that share the same position (when using two equally sized matrices). Element-wise multiplication can also occur between a vector and a matrix as the the rows are equal.
After that, compute the $\delta^h$ layer error matrix (value 'sigma_h' in the homegrowm implementation):
$$
\delta^h = \delta^{out}\big(\textbf{W}^{out}\big)^T \odot \bigg(a^h \odot \big(1 - a^h\big)\bigg)
$$
The $\delta^h$ term was calculated by taking the transpose $\big(\textbf{W}^{out}\big)^T$ of the $h$ x $t$ dimensional matrix $\textbf{W}^{out}$. The value $t$ is the number of output class labels and $h$ is the number of hidden units. Multiplying $\big(\textbf{W}^{out}\big)^T$ by the $n$ x $t$ dimensional $\mathbf{\delta}^{out}$ results in an $n$ x $t$ dimensional matrix that is multiplied elementwise by the sigmoid function derivative that is also of $n$ x $t$ dimension to ultimately obtain $\delta^h$.
Now that we have all of the $\delta$ terms, we can write the derivation of the cost function as follows:
$$
\frac{\partial}{\partial w_{i,y}^{out}}\mathbf{J(W)} = a_j^h\delta_i^{out}
\\
\frac{\partial}{\partial w_{i,y}^{h}}\mathbf{J(W)} = a_j^{in}\delta_i^{h}
$$
Next, we need to accumulate the partial derivative of every node in each layer and the error of the node in the next layer. This requires that we compute $\Delta_{i,j}^l$ for every sample in the training set. This vectorized implementation of this is as follows:
$$
\Delta^h = \Delta^h + \big(\textbf{A}^{in}\big)^T \delta^h
\\
\Delta^{out} = \Delta^{out} + \big(\textbf{A}^{h}\big)^T \delta^{out}
$$
And after accumulating the partial derivatives, we can dd the regularization term:
$$
\Delta^t := \Delta^t + \lambda^l \ \mbox{(except for the bias term)}
$$
The two immediate mathematical expression above correspond to the variables 'delta_w_h', 'delta_b_h', 'delta_w_out' and 'delta_b_out'. Lastly, after computing the gradients, we can now updates the weights by taking an opposite step towards the gradient for each layer $l$:
$$
\textbf{W}^t := \textbf{W}^t - \eta\Delta^t
$$
<a id = 'Training-neural-networks-via-backpropagation'></a>
|
State Before: α : Type u_1
β : Type u_2
γ : Type ?u.6251
f✝ : α ↪ β
s✝ : Finset α
f : α ↪ β
s : Finset α
p : (a : β) → a ∈ map f s → Prop
h : ∀ (x : α) (H : x ∈ s), p (↑f x) (_ : ↑f x ∈ map f s)
x : β
hx : x ∈ map f s
⊢ p x hx State After: case intro.intro
α : Type u_1
β : Type u_2
γ : Type ?u.6251
f✝ : α ↪ β
s✝ : Finset α
f : α ↪ β
s : Finset α
p : (a : β) → a ∈ map f s → Prop
h : ∀ (x : α) (H : x ∈ s), p (↑f x) (_ : ↑f x ∈ map f s)
y : α
hy : y ∈ s
hx : ↑f y ∈ map f s
⊢ p (↑f y) hx Tactic: obtain ⟨y, hy, rfl⟩ := mem_map.1 hx State Before: case intro.intro
α : Type u_1
β : Type u_2
γ : Type ?u.6251
f✝ : α ↪ β
s✝ : Finset α
f : α ↪ β
s : Finset α
p : (a : β) → a ∈ map f s → Prop
h : ∀ (x : α) (H : x ∈ s), p (↑f x) (_ : ↑f x ∈ map f s)
y : α
hy : y ∈ s
hx : ↑f y ∈ map f s
⊢ p (↑f y) hx State After: no goals Tactic: exact h _ hy
|
r=0.65
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7ps3c/media/images/d7ps3c-014/svc:tesseract/full/full/0.65/default.jpg Accept:application/hocr+xml
|
-- The sole purpose of this module is to ease compilation of everything.
module Everything where
import Generic
import Structures
import Instances
import FinMap
import CheckInsert
import GetTypes
import FreeTheorems
import BFF
import Bidir
import LiftGet
import Precond
import Examples
import BFFPlug
|
/* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/*
Copyright (C) 2015 Klaus Spanderen
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<[email protected]>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
*/
/*! \file parameterizedlocalvolsurface.cpp
*/
#include <ql/time/calendars/nullcalendar.hpp>
#include <ql/termstructures/volatility/equityfx/fixedlocalvolsurface.hpp>
#include <ql/termstructures/volatility/equityfx/gridmodellocalvolsurface.hpp>
#include <boost/bind.hpp>
#include <algorithm>
namespace QuantLib {
GridModelLocalVolSurface::GridModelLocalVolSurface(
const Date& referenceDate,
const std::vector<Date>& dates,
const std::vector<ext::shared_ptr<std::vector<Real> > >& strikes,
const DayCounter& dayCounter,
Extrapolation lowerExtrapolation,
Extrapolation upperExtrapolation)
: LocalVolTermStructure(
referenceDate, NullCalendar(), Following, dayCounter),
CalibratedModel(dates.size()*strikes.front()->size()),
referenceDate_(referenceDate),
times_(dates.size()),
strikes_(strikes),
dayCounter_(dayCounter),
lowerExtrapolation_(lowerExtrapolation),
upperExtrapolation_(upperExtrapolation) {
for (Size i=1; i < strikes_.size(); ++i) {
QL_REQUIRE(strikes_[i]->size() == strikes_.front()->size(),
"strike vectors must have the same dimension");
}
std::fill(arguments_.begin(), arguments_.end(),
ConstantParameter(1.0, PositiveConstraint()));
for (Size i=0; i < dates.size(); ++i) {
times_[i] = dayCounter.yearFraction(referenceDate_, dates[i]);
}
generateArguments();
}
void GridModelLocalVolSurface::update() {
LocalVolTermStructure::update();
CalibratedModel::update();
}
Date GridModelLocalVolSurface::maxDate() const {
return localVol_->maxDate();
}
Time GridModelLocalVolSurface::maxTime() const {
return localVol_->maxTime();
}
Real GridModelLocalVolSurface::minStrike() const {
return localVol_->minStrike();
}
Real GridModelLocalVolSurface::maxStrike() const {
return localVol_->maxStrike();
}
Volatility GridModelLocalVolSurface::localVolImpl(Time t, Real strike)
const {
return localVol_->localVol(t, strike, true);
}
void GridModelLocalVolSurface::generateArguments() {
const ext::shared_ptr<Matrix> localVolMatrix(
new Matrix(strikes_.front()->size(), times_.size()));
std::transform(arguments_.begin(), arguments_.end(),
localVolMatrix->begin(),
boost::bind(&Parameter::operator(), _1, 0.0));
localVol_ = ext::make_shared<FixedLocalVolSurface>(
referenceDate_,
times_,
strikes_,
localVolMatrix,
dayCounter_,
lowerExtrapolation_,
upperExtrapolation_);
}
}
|
theory Assertion imports
"State"
begin
no_notation FForm ("F _")
no_notation EExp ("E _")
(*evalF defines the semantics of assertions written in first-order logic*)
primrec evalF :: "state => fform => bool" where
"evalF (f,WTrue) = (True)" |
"evalF (f,WFalse) = (False)" |
"evalF (f,e1 [=] e2) = (case (evalE (f,e1), evalE (f,e2)) of
(RR (r1),RR (r2)) => ((r1::real) = r2) |
(SS (r1),SS (r2)) => ((r1::string) = r2) |
(BB (r1),BB (r2)) => ((r1::bool) = r2) |
(_,_) => False)" |
"evalF (f,e1 [<] e2) = (case (evalE (f,e1), evalE (f,e2)) of
(RR (r1),RR (r2)) => ((r1::real) < r2) |
(_,_) => False)" |
"evalF (f,e1 [>] e2) = (case (evalE (f,e1), evalE (f,e2)) of
(RR (r1),RR (r2)) => (r1::real) > r2 |
(_,_) => False)" |
"evalF (f,[~] form1) = (~ (evalF (f,form1)))" |
"evalF (f,form1 [&] form2) = ((evalF (f,form1)) & (evalF (f,form2)))" |
"evalF (f,form1 [|] form2) = ((evalF (f,form1)) | (evalF (f,form2)))" |
"evalF (f,form1 [-->] form2) = ((evalF (f,form1)) --> (evalF (f,form2)))" |
"evalF (f,form1 [<->] form2) = ((evalF (f,form1)) \<longleftrightarrow> (evalF (f,form2)))" |
"evalF (f,WALL x form1)= (ALL (v::real). (evalF((%a. %i. (if (a=x) then (RR (v)) else (f(a, i)))), form1)))" |
"evalF (f,WEX x form1)= (EX (v::real). evalF((%a. %i. (if (a=x) then (RR (v)) else f(a, i))), form1))"
definition evalFP :: "cstate => fform => now => bool" where
"evalFP(f,P,c) == ALL s. inList(s,f(c)) --> evalF(s,P)"
(*ievalF defines the semantics of assertions written in interval logic and duration calculus*)
consts ievalF :: "cstate => fform => now => now => bool"
axiomatization where
chop_eval: "ievalF (f, P[^]Q, c, d) = (EX k s1 s2. s1@s2=f(k) & ievalF (%t. if t=k then s1 else f(t), P, c, k)
& ievalF (%t. if t=k then s2 else f(t), Q, k, d))" and
chop_sep: "ievalF (f, P, c, d) = (ALL k s1 s2. s1@s2=f(k) --> ievalF (%t. if t=k then s1 else f(t), P, c, k)
& ievalF (%t. if t=k then s2 else f(t), P, k, d))" and
pf_eval: "ievalF (f, pf (P), c, d) = (c=d & (EX s. inList(s, f(c))) & evalF (s, P))" and
high_eval: "ievalF (f, high P, c, d) = ((ALL (k::real). (c<k & k<d) --> evalFP (f, P, k)))" and
chop_interval: "(ALL t. (c=d --> f(c)=g(c)) & (c<=t & t<=d --> f(t)=g(t))) ==> ievalF(f,P,c,d)=ievalF(g,P,c,d)"
lemma chop_eval1: "(EX k. ievalF (f, P, c, k) & ievalF (f, Q, k, d)) ==> ievalF (f, P[^]Q, c, d)"
apply (simp add: chop_eval,auto)
apply (cut_tac x=k in exI,auto)
apply (cut_tac x="f(k)" in exI,auto)
apply (subgoal_tac "f = (%t. if t = k then f(k) else f(t))",auto)
apply (subgoal_tac "(ALL ka s1 s2. s1@s2=f(ka) --> ievalF (%t. if t=ka then s1 else f(t), Q, k, ka)
& ievalF (%t. if t=ka then s2 else f(t), Q, ka, d))")
apply (subgoal_tac "ievalF(%t. if t = k then f(k) else f(t), Q, k, k) &
ievalF(%t. if t = k then [] else f(t), Q, k, d)")
apply blast
apply (erule allE)+
apply blast
apply (cut_tac f=f and P=Q and c=k and d=d in chop_sep,auto)
done
(*The following axioms define the evaluation of formulas of part of first-order interval logic.*)
axiomatization where
True_eval : "ievalF (f,WTrue, c, d) = (True)" and
False_eval : "ievalF (f,WFalse,c,d) = (False)" and
L_eval : "ievalF (f, (l [=] Real L), c, d) = (d-c = L)" and
(*Equal_eval : "ievalF (f,e1 [=] e2,c,d) = evalFP(f,e1 [=] e2,c)" and
Less_eval : "ievalF (f,e1 [<] e2,c,d) = evalFP(f,e1 [<] e2,c)" and
Great_eval: "ievalF (f,e1 [>] e2,c,d) = evalFP(f,e1 [>] e2,c)" and*)
Not_eval: "ievalF (f,[~] form1,c,d) = (~ (ievalF (f,form1,c,d)))" and
And_eval: "ievalF (f,form1 [&] form2,c,d) = ((ievalF (f,form1,c,d)) & (ievalF (f,form2,c,d)))" and
Or_eval: "ievalF (f,F' [|] G,c,d) = ((ievalF (f,F',c,d)) | (ievalF (f,G,c,d)))" and
Imply_eval: "ievalF (f,form1 [-->] form2,c,d) = ((ievalF (f,form1,c,d)) --> (ievalF (f,form2,c,d)))" and
Equiv_eval: "ievalF (f,form1 [<->] form2,c,d) = ((ievalF (f,form1,c,d)) \<longleftrightarrow> (ievalF (f,form2,c,d)))" and
ALL_eval: "ievalF (f,WALL x form1,c,d)= (ALL (v::real). ievalF((%t. List.map(%s. %y i. if y=x & i=R then RR(v) else s(y,i),f(t))), form1, c, d))" and
EX_eval: "ievalF (f,WEX x form1,c,d)= (EX (v::real). ievalF((%t. List.map(%s. %y i. if y=x & i=R then RR(v) else s(y,i),f(t))), form1, c, d))"
(*The following axioms define the semantic meanings of closure of formulas.*)
axiomatization where
close_fact1: "ALL t. (t>=b & t<c --> evalF (f, p)) --> (evalF (f, close(p)))" and
close_fact2: "ALL t. (t>=b & t<c --> evalF (f, p)) --> (evalF (f, close([~]p)))" and
close_fact3: "evalF (s,p) ==> evalF (s,close(p))"
end
|
Require Import List.
Import ListNotations.
Require Import Coq.Init.Nat.
Require Import String.
Open Scope string_scope.
Inductive Var : Set :=
| variable : string -> Var.
Scheme Equality for Var.
Coercion variable : string >-> Var.
Definition Var_equality (var1 var2 : Var) : bool :=
match var1,var2 with
| variable x, variable y => if (eqb x y)
then true
else false
end.
Compute Var_equality "aur" "aur".
(*Vectorul este implementat cu ajutorul List*)
Definition Vector := list nat.
Definition add_first (vect : Vector) (valoare : nat): Vector := valoare :: vect.
Notation "Add( V , Val )" := (add_first V Val) (at level 50).
Definition length (vect : Vector) : nat := List.length vect.
Notation "Size( V )" := (length V) (at level 50).
Definition last_el (vect : Vector) := List.last vect.
Notation "Last( V )" := (last_el V ) (at level 50).
Definition remove_ls (vect : Vector) : Vector := List.removelast vect.
Notation "RemoveLast( V )" := (remove_ls V) (at level 50).
Definition index (vect : Vector) (valoare : nat) : nat := List.nth valoare vect 0.
Notation " V [* I *]" := (index V I) (at level 40).
Check [1;2;3].
Compute Size( [1;32;11;9] ).
Compute Last( [1;22;13;91] ).
Definition Vector1 := [1;32;11;9].
Definition Vector2 := Add( Vector1 , 100 ).
Compute Vector2.
Compute Vector2 [* 3 *].
Compute RemoveLast( [1;32;11;9] ).
(* Environment *)
Inductive Value :=
| undef : Value
| structure : Value
| nat_val : nat -> Value
| bool_val : bool -> Value
| string_val: string -> Value.
Coercion nat_val : nat >-> Value.
Coercion bool_val : bool >-> Value.
Coercion string_val : string >-> Value.
Scheme Equality for Value.
Definition Env := Var -> Value.
Definition env1 : Env :=
fun x =>
if (Var_eq_dec x "variable")
then nat_val 10
else if(Var_eq_dec x "sir")
then string_val "mesaj"
else if(Var_eq_dec x "boolean")
then bool_val true
else undef.
Definition empty_env : Env := fun var => undef.
Inductive Struct :=
|structura : Var -> Struct.
Coercion structura : Var >-> Struct.
Definition is_declared (x : Var) (env : Env) :=
if (Value_beq (env x) undef)
then false
else true.
Compute env1 "variable".
Compute env1 "not_a_variable".
Compute is_declared "variable" env1.
Compute is_declared "ms" env1.
Compute env1 "variable".
Definition conv_nat (x : Value) : nat :=
match x with
| structure => 999
| undef => 999
| bool_val n => 999
| string_val n => 999
| nat_val n' => n'
end.
Compute conv_nat (env1 "variable").
Definition conv_str (x : Value) : string :=
match x with
| undef => "!"
| structure => "!"
| bool_val n => "!"
| string_val n => n
| nat_val n' => "!"
end.
Compute conv_str (env1 "sir").
Definition conv_bool (x : Value) : bool :=
match x with
| undef => false
| structure => false
| bool_val n => n
| string_val n => false
| nat_val n' => false
end.
Compute conv_bool (env1 "boolean").
Definition update (env : Env)
(x : Var) (v : Value) : Env :=
fun y =>
if (Var_eq_dec y x)
then v
else (env y).
Notation "S [ V /' X ]" := (update S X V) (at level 0).
(*ArithExp*)
Inductive ArithExp :=
| avar : Var -> ArithExp
| anum : Value -> ArithExp
| aplus : ArithExp -> ArithExp -> ArithExp
| aminus : ArithExp -> ArithExp -> ArithExp
| amul : ArithExp -> ArithExp -> ArithExp
| adiv : ArithExp -> ArithExp -> ArithExp
| aperc : ArithExp -> ArithExp -> ArithExp.
Coercion avar : Var >-> ArithExp.
Coercion anum : Value >-> ArithExp.
Notation "A -' B" := (aminus A B) (at level 50).
Notation "A +' B" := (aplus A B) (at level 50).
Notation "A *' B" := (amul A B) (at level 40).
Notation "A /' B" := (adiv A B) (at level 40, left associativity).
Notation "A %' B" := (aperc A B) (at level 40).
Fixpoint AEval (a : ArithExp) (env : Env) : Value :=
match a with
| avar var => env var
| anum n' => n'
| aplus a1 a2 => conv_nat(AEval a1 env) + conv_nat(AEval a2 env)
| amul a1 a2 => conv_nat(AEval a1 env) * conv_nat(AEval a2 env)
| aminus a1 a2 =>if( leb (conv_nat(AEval a1 env)) (conv_nat(AEval a2 env)))
then 0
else conv_nat(AEval a1 env) - conv_nat(AEval a2 env)
| adiv a1 a2 => conv_nat(AEval a1 env) / conv_nat(AEval a2 env)
| aperc a1 a2 => (Nat.modulo (conv_nat(AEval a1 env)) (conv_nat(AEval a2 env)))
end.
Compute AEval (16/'4) (env1).
Compute AEval "variable" env1.
Compute AEval ("variable" /' 2)(env1).
(*BoolExp*)
Inductive BoolExp :=
| btrue : BoolExp
| bfalse : BoolExp
| bgt : ArithExp -> ArithExp -> BoolExp
| bge : ArithExp -> ArithExp -> BoolExp
| beq : ArithExp -> ArithExp -> BoolExp
| blt : ArithExp -> ArithExp -> BoolExp
| ble : ArithExp -> ArithExp -> BoolExp
| bnot : BoolExp -> BoolExp
| band : BoolExp -> BoolExp -> BoolExp
| bxor : BoolExp -> BoolExp -> BoolExp
| bor : BoolExp -> BoolExp -> BoolExp.
Notation "A >' B" := (bgt A B)(at level 60).
Notation "A >=' B" := (bge A B)(at level 60).
Notation "A ==' B" := (beq A B)(at level 60).
Notation "A <' B" := (blt A B)(at level 60).
Notation "A <=' B" := (ble A B)(at level 60).
Notation "! A" := (bnot A)(at level 60).
Notation "A &' B" := (band A B)(at level 60).
Notation "A |' B" := (bor A B)(at level 60).
Notation "A ^' B" := (bxor A B)(at level 60).
Fixpoint BEval (b : BoolExp) (env : Env) : Value :=
match b with
| btrue => true
| bfalse => false
| bnot b' => negb (conv_bool(BEval b' env))
| bor b1 b2 => orb (conv_bool(BEval b1 env)) (conv_bool(BEval b2 env))
| bxor b1 b2 => xorb (conv_bool(BEval b1 env)) (conv_bool(BEval b2 env))
| band b1 b2 => andb (conv_bool(BEval b1 env)) (conv_bool(BEval b2 env))
| beq a1 a2 => Nat.eqb (conv_nat(AEval a1 env)) (conv_nat(AEval a2 env))
| blt a1 a2 => ltb (conv_nat(AEval a1 env)) (conv_nat(AEval a2 env))
| ble a1 a2 => leb (conv_nat(AEval a1 env)) (conv_nat(AEval a2 env))
| bgt a1 a2 => negb(leb (conv_nat(AEval a1 env)) (conv_nat(AEval a2 env)))
| bge a1 a2 => negb(ltb (conv_nat(AEval a1 env)) (conv_nat(AEval a2 env)))
end.
Compute BEval (!("x" <' 100)) env1.
Compute AEval "variable" env1.
Compute BEval ("variable" <=' 9) env1.
(*StringExp*)
Inductive StringExp :=
| str_val : string ->StringExp
| str_comp : StringExp -> StringExp -> StringExp
| str_concat : StringExp -> StringExp -> StringExp
| str_len : StringExp -> StringExp.
Coercion str_val : string >-> StringExp.
Notation "strcompare( Str1 , Str2 )" := (str_comp Str1 Str2) (at level 70).
Notation "strconcat( Str1 , Str2 )" := (str_concat Str1 Str2) (at level 50).
Notation "strlength( Str1 )" := (str_len Str1) (at level 50).
Check strcompare("abcd" , "abcd").
Fixpoint SEval (s : StringExp) (env : Env) : Value :=
match s with
| str_val str => str
| str_comp s1 s2 => String.eqb (conv_str(SEval s1 env)) (conv_str(SEval s2 env))
| str_concat s1 s2 => String.append (conv_str(SEval s1 env)) (conv_str(SEval s2 env))
| str_len s => String.length (conv_str(SEval s env))
end.
Compute SEval (strlength(strconcat("aer", "aer"))).
(*Stmt*)
Inductive Stmt :=
(*Declarari si assignments*)
| Nat_decl : Var -> Stmt
| Bool_decl : Var -> Stmt
| String_decl : Var -> Stmt
| Vector_decl : Vector -> Stmt
| nat_assignment : Var -> ArithExp -> Stmt
| string_assignment : Var -> StringExp -> Stmt
| bool_assignment : Var -> BoolExp -> Stmt
| sequence : Stmt -> Stmt -> Stmt
(*Structura implementata la nivel de Stmt - notatie*)
| structSTMT : Var -> Stmt -> Stmt
| Struct_init : Var -> Stmt
| Struct_decl : Var -> Var -> Stmt
(*Switch*)
| caseSTMT : ArithExp -> Stmt -> Stmt
| switchSTMT : ArithExp -> Stmt -> Stmt
(*I/O*)
| printSTMT : string -> Stmt
| scanSTMT : Var -> string -> Stmt
(*Instructiuni repetitive si conditionale*)
| ifthen : BoolExp -> Stmt ->Stmt
| ifthenelse : BoolExp -> Stmt -> Stmt -> Stmt
| while_ : BoolExp -> Stmt -> Stmt.
Inductive Function :=
| call : Stmt -> ArithExp -> Function
| f_return : Value -> Function.
Notation " C ([ A ]) " := (call C A)(at level 80).
Notation "'Return' # R " := (f_return R)(at level 80).
Check Return # 2.
Inductive Format :=
| func_form : Function -> Format
| stmt_form : Stmt -> Format
| func : Function -> Stmt -> Function -> Format
| form_divider : Format -> Format -> Format.
Notation "F1 '$' F2" := (form_divider F1 F2) (at level 99).
Coercion stmt_form : Stmt >-> Format.
Coercion func_form : Function >-> Format.
Notation "X ::= A" := (nat_assignment X A) (at level 80).
Notation "S1 ;; S2" := (sequence S1 S2) (at level 90).
Notation "-nat- V" := (Nat_decl V) (at level 70).
Notation "-bool- V" := (Bool_decl V) (at level 70).
Notation "-string- V" := (String_decl V) (at level 70).
Notation " S --> V " := (Struct_decl S V) (at level 70).
Notation "-vector- V" := (Vector_decl V) (at level 70).
Notation "[ X ] ::= A" := (bool_assignment X A) (at level 80).
Notation "'struct' ( NUME ){ S }" := (structSTMT NUME S) (at level 40).
Notation "copy_string( X , A )" := (string_assignment X A) (at level 80).
Notation " 'print' \\ S " := (printSTMT S) (at level 70).
Notation " 'scan' \\ V \\ S " := (scanSTMT V S) (at level 70).
Notation "'If' ( B ) 'Then' S1 'Else' S2 " := (ifthenelse B S1 S2) (at level 92).
Notation "'If' ( B ) 'IfThen' S1 " := (ifthen B S1) (at level 92).
Notation "'while' ( Cond ) { S }" := (while_ Cond S)(at level 97).
Notation "'for_' ( I ; Cond ; Pas ) { S }" := (I ;; while_ Cond (S ;; Pas))(at level 97).
Notation "'switch' ( A ) { S } " := (switchSTMT A S) (at level 90).
Notation " 'case' ( A ) [ S ] " := (caseSTMT A S) (at level 90).
Notation " % Call 'begin_' S R 'end_' % " := (func Call S R)(at level 97).
Fixpoint STMTeval (s : Stmt) (env : Env) (gas : nat) : Env :=
match gas with
| 0 => env
| S gas' => match s with
| Nat_decl var => update env var 0
| Bool_decl var => update env var false
| Vector_decl vector => env
| Struct_init var => update env var structure
| Struct_decl st v => STMTeval (-nat- "x.diametru" ;; -bool- "x.plin" ;; -string- "x.denumire" ;; Struct_init v ;; Struct_init st ) env gas'
| String_decl var => update env var ""
| nat_assignment var exp => update env var (AEval exp env)
| string_assignment var str => update env var (SEval str env)
| bool_assignment var b => update env var (BEval b env)
| sequence S1 S2 => STMTeval S2 (STMTeval S1 env gas') gas'
| ifthen cond S1 => if(conv_bool(BEval cond env))
then STMTeval S1 env gas'
else env
| ifthenelse cond S1 S2 => if(conv_bool(BEval cond env))
then STMTeval S1 env gas'
else STMTeval S2 env gas'
| while_ cond S1 => if(conv_bool(BEval cond env))
then STMTeval (S1 ;; (while_ cond S1)) env gas'
else env
| caseSTMT A X => if(Nat.eqb (conv_nat(env "switch")) (conv_nat(AEval A env)) )
then STMTeval X env gas'
else env
| switchSTMT A S1 => STMTeval S1 (STMTeval (-nat- "switch" ;; "switch" ::= conv_nat(AEval A env)) env gas') gas'
| printSTMT str => STMTeval (-string- "print" ;; copy_string("print", str)) env gas'
| scanSTMT var str => STMTeval (copy_string(var, str)) env gas'
| structSTMT nume S2 => STMTeval S2 (STMTeval (Struct_init nume ) env gas') gas'
end
end.
Definition dec_ex := (STMTeval ( -bool- "bool" ;;
-string- "sir" ;;
-nat- "numar"
) empty_env 100).
Compute dec_ex "numar".
Compute dec_ex "bool".
Compute dec_ex "sir".
Compute dec_ex "not_declared".
Definition decl_ex := (STMTeval ( -bool- "bool" ;;
-string- "sir" ;;
-nat- "numar" ;;
-vector-[11;23] ;;
"numar" ::= 12 ;;
copy_string("sirul2" , "avion") ;;
["bul"] ::= bfalse ;;
If(btrue |' bfalse)
Then ["boool"] ::= btrue
Else ["boool"] ::= bfalse
) empty_env 100).
Compute decl_ex "numar".
Compute decl_ex "bool".
Compute decl_ex "sir".
Compute decl_ex "sirul2".
Compute decl_ex "bul".
Compute decl_ex "boool".
Definition while_ex := (STMTeval ( -nat- "nr" ;;
"nr" ::= 3 ;;
while("nr" <=' 5)
{
"nr" ::= 104
}
) empty_env 100).
Compute while_ex "nr".
Definition for_ex := (STMTeval ( -nat- "i" ;;
-nat- "sum" ;;
for_ ( "i" ::= 2 ; "i" <=' 4 ; "i" ::= "i" +' 1 ) {
"sum" ::= "sum" +' "i"
}
) empty_env 100).
Compute for_ex "sum".
Definition switch_ex := (STMTeval ( -nat- "x" ;;
switch (6)
{
case(2)
["x" ::= 1] ;;
case(4)
["x" ::= 7] ;;
case(2)
["x" ::= 9]
}
) empty_env 100).
Compute switch_ex "x".
Definition print_scan_ex := (STMTeval ( -string- "var" ;;
print \\ "mesaj afisat" ;;
scan \\ "var" \\ "mesaj citit"
) empty_env 100).
Compute print_scan_ex "var".
Compute print_scan_ex "print".
Definition struct_ex := (STMTeval (
struct ("cerc"){
-nat- "diametru" ;;
-string- "denumire" ;;
-bool- "plin"
} ;;
"cerc"-->"x" ;;
["x.plin"] ::= btrue
) empty_env 100).
Compute struct_ex "cerc".
Compute struct_ex "x".
Compute struct_ex "x.plin".
Compute struct_ex "x.denumire".
Definition ex5 :=
-bool-"global_var"
$
-vector-[]
$
% -nat- "func" ([ 2 ])
begin_
for_ ("i" ::=2 ; "i" <=' 6 ; "i" ::= "i" +' 1) {
"x" ::= "x" +' "i"
}
Return # 1
end_
%
$
% -bool- "bool_func" ([ true ])
begin_
If("n"<='9)
Then "i" ::= 2
Else "i" ::= 1
Return # 1
end_
%
.
|
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
tf : TotalFunction α β
⊢ ∀ (a : α), a ∈ zeroDefaultSupp tf ↔ apply (zeroDefault tf) a ≠ 0
[PROOFSTEP]
intro a
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
tf : TotalFunction α β
a : α
⊢ a ∈ zeroDefaultSupp tf ↔ apply (zeroDefault tf) a ≠ 0
[PROOFSTEP]
rcases tf with ⟨A, y⟩
[GOAL]
case withDefault
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y : β
⊢ a ∈ zeroDefaultSupp (withDefault A y) ↔ apply (zeroDefault (withDefault A y)) a ≠ 0
[PROOFSTEP]
simp only [apply, zeroDefaultSupp, List.mem_map, List.mem_filter, exists_and_right, List.mem_toFinset, exists_eq_right,
Sigma.exists, Ne.def, zeroDefault]
[GOAL]
case withDefault
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y : β
⊢ (∃ x, { fst := a, snd := x } ∈ List.dedupKeys A ∧ (decide ¬x = 0) = true) ↔ ¬Option.getD (List.dlookup a A) 0 = 0
[PROOFSTEP]
constructor
[GOAL]
case withDefault.mp
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y : β
⊢ (∃ x, { fst := a, snd := x } ∈ List.dedupKeys A ∧ (decide ¬x = 0) = true) → ¬Option.getD (List.dlookup a A) 0 = 0
[PROOFSTEP]
rintro ⟨od, hval, hod⟩
[GOAL]
case withDefault.mp.intro.intro
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y od : β
hval : { fst := a, snd := od } ∈ List.dedupKeys A
hod : (decide ¬od = 0) = true
⊢ ¬Option.getD (List.dlookup a A) 0 = 0
[PROOFSTEP]
have := List.mem_dlookup (List.nodupKeys_dedupKeys A) hval
[GOAL]
case withDefault.mp.intro.intro
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y od : β
hval : { fst := a, snd := od } ∈ List.dedupKeys A
hod : (decide ¬od = 0) = true
this : od ∈ List.dlookup a (List.dedupKeys A)
⊢ ¬Option.getD (List.dlookup a A) 0 = 0
[PROOFSTEP]
rw [(_ : List.dlookup a A = od)]
[GOAL]
case withDefault.mp.intro.intro
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y od : β
hval : { fst := a, snd := od } ∈ List.dedupKeys A
hod : (decide ¬od = 0) = true
this : od ∈ List.dlookup a (List.dedupKeys A)
⊢ ¬Option.getD (some od) 0 = 0
[PROOFSTEP]
simpa using hod
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y od : β
hval : { fst := a, snd := od } ∈ List.dedupKeys A
hod : (decide ¬od = 0) = true
this : od ∈ List.dlookup a (List.dedupKeys A)
⊢ List.dlookup a A = some od
[PROOFSTEP]
simpa [List.dlookup_dedupKeys, WithTop.some_eq_coe]
[GOAL]
case withDefault.mpr
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y : β
⊢ ¬Option.getD (List.dlookup a A) 0 = 0 → ∃ x, { fst := a, snd := x } ∈ List.dedupKeys A ∧ (decide ¬x = 0) = true
[PROOFSTEP]
intro h
[GOAL]
case withDefault.mpr
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y : β
h : ¬Option.getD (List.dlookup a A) 0 = 0
⊢ ∃ x, { fst := a, snd := x } ∈ List.dedupKeys A ∧ (decide ¬x = 0) = true
[PROOFSTEP]
use(A.dlookup a).getD (0 : β)
[GOAL]
case h
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y : β
h : ¬Option.getD (List.dlookup a A) 0 = 0
⊢ { fst := a, snd := Option.getD (List.dlookup a A) 0 } ∈ List.dedupKeys A ∧
(decide ¬Option.getD (List.dlookup a A) 0 = 0) = true
[PROOFSTEP]
rw [← List.dlookup_dedupKeys] at h ⊢
[GOAL]
case h
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y : β
h : ¬Option.getD (List.dlookup a (List.dedupKeys A)) 0 = 0
⊢ { fst := a, snd := Option.getD (List.dlookup a (List.dedupKeys A)) 0 } ∈ List.dedupKeys A ∧
(decide ¬Option.getD (List.dlookup a (List.dedupKeys A)) 0 = 0) = true
[PROOFSTEP]
simp only [h, ← List.mem_dlookup_iff A.nodupKeys_dedupKeys, and_true_iff, not_false_iff, Option.mem_def]
[GOAL]
case h
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y : β
h : ¬Option.getD (List.dlookup a (List.dedupKeys A)) 0 = 0
⊢ List.dlookup a (List.dedupKeys A) = some (Option.getD (List.dlookup a (List.dedupKeys A)) 0)
[PROOFSTEP]
cases haA : List.dlookup a A.dedupKeys
[GOAL]
case h.none
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y : β
h : ¬Option.getD (List.dlookup a (List.dedupKeys A)) 0 = 0
haA : List.dlookup a (List.dedupKeys A) = none
⊢ none = some (Option.getD none 0)
[PROOFSTEP]
simp [haA] at h
[GOAL]
case h.some
α : Type u
β : Type v
γ : Sort w
inst✝² : Zero β
inst✝¹ : DecidableEq α
inst✝ : DecidableEq β
a : α
A : List ((_ : α) × β)
y : β
h : ¬Option.getD (List.dlookup a (List.dedupKeys A)) 0 = 0
val✝ : β
haA : List.dlookup a (List.dedupKeys A) = some val✝
⊢ some val✝ = some (Option.getD (some val✝) 0)
[PROOFSTEP]
simp
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List (α × α)
x y z : α
⊢ applyId ((y, z) :: xs) x = if y = x then z else applyId xs x
[PROOFSTEP]
simp only [List.applyId, List.dlookup, eq_rec_constant, Prod.toSigma, List.map]
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List (α × α)
x y z : α
⊢ Option.getD (if h : y = x then some z else List.dlookup x (List.map (fun p => { fst := p.fst, snd := p.snd }) xs)) x =
if y = x then z else Option.getD (List.dlookup x (List.map (fun p => { fst := p.fst, snd := p.snd }) xs)) x
[PROOFSTEP]
split_ifs
[GOAL]
case pos
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List (α × α)
x y z : α
h✝ : y = x
⊢ Option.getD (some z) x = z
[PROOFSTEP]
rfl
[GOAL]
case neg
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List (α × α)
x y z : α
h✝ : ¬y = x
⊢ Option.getD (List.dlookup x (List.map (fun p => { fst := p.fst, snd := p.snd }) xs)) x =
Option.getD (List.dlookup x (List.map (fun p => { fst := p.fst, snd := p.snd }) xs)) x
[PROOFSTEP]
rfl
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : List.length xs = List.length ys
x y : α
i : ℕ
h₂ : List.get? xs i = some x
⊢ applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y
[PROOFSTEP]
induction xs generalizing ys i
[GOAL]
case nil
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y : α
ys : List α
h₀ : List.Nodup []
h₁ : List.length [] = List.length ys
i : ℕ
h₂ : List.get? [] i = some x
⊢ applyId (List.zip [] ys) x = y ↔ List.get? ys i = some y
case cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y head✝ : α
tail✝ : List α
tail_ih✝ :
∀ {ys : List α},
List.Nodup tail✝ →
List.length tail✝ = List.length ys →
∀ (i : ℕ), List.get? tail✝ i = some x → (applyId (List.zip tail✝ ys) x = y ↔ List.get? ys i = some y)
ys : List α
h₀ : List.Nodup (head✝ :: tail✝)
h₁ : List.length (head✝ :: tail✝) = List.length ys
i : ℕ
h₂ : List.get? (head✝ :: tail✝) i = some x
⊢ applyId (List.zip (head✝ :: tail✝) ys) x = y ↔ List.get? ys i = some y
[PROOFSTEP]
case nil => cases h₂
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y : α
ys : List α
h₀ : List.Nodup []
h₁ : List.length [] = List.length ys
i : ℕ
h₂ : List.get? [] i = some x
⊢ applyId (List.zip [] ys) x = y ↔ List.get? ys i = some y
[PROOFSTEP]
case nil => cases h₂
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y : α
ys : List α
h₀ : List.Nodup []
h₁ : List.length [] = List.length ys
i : ℕ
h₂ : List.get? [] i = some x
⊢ applyId (List.zip [] ys) x = y ↔ List.get? ys i = some y
[PROOFSTEP]
cases h₂
[GOAL]
case cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y head✝ : α
tail✝ : List α
tail_ih✝ :
∀ {ys : List α},
List.Nodup tail✝ →
List.length tail✝ = List.length ys →
∀ (i : ℕ), List.get? tail✝ i = some x → (applyId (List.zip tail✝ ys) x = y ↔ List.get? ys i = some y)
ys : List α
h₀ : List.Nodup (head✝ :: tail✝)
h₁ : List.length (head✝ :: tail✝) = List.length ys
i : ℕ
h₂ : List.get? (head✝ :: tail✝) i = some x
⊢ applyId (List.zip (head✝ :: tail✝) ys) x = y ↔ List.get? ys i = some y
[PROOFSTEP]
case cons x' xs xs_ih =>
cases i
· injection h₂ with h₀; subst h₀
cases ys
· cases h₁
·
-- porting note: `open List` no longer makes `zip_cons_cons` visiblesimp only [List.applyId, Prod.toSigma,
Option.getD_some, List.get?, List.dlookup_cons_eq, List.zip_cons_cons, List.map, Option.some_inj]
· cases ys
· cases h₁
· cases' h₀ with _ _ h₀ h₁
simp only [List.get?, List.zip_cons_cons, List.applyId_cons] at h₂ ⊢
rw [if_neg]
· apply xs_ih <;> solve_by_elim [Nat.succ.inj]
· apply h₀; apply List.get?_mem h₂
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
ys : List α
h₀ : List.Nodup (x' :: xs)
h₁ : List.length (x' :: xs) = List.length ys
i : ℕ
h₂ : List.get? (x' :: xs) i = some x
⊢ applyId (List.zip (x' :: xs) ys) x = y ↔ List.get? ys i = some y
[PROOFSTEP]
case cons x' xs xs_ih =>
cases i
· injection h₂ with h₀; subst h₀
cases ys
· cases h₁
·
-- porting note: `open List` no longer makes `zip_cons_cons` visiblesimp only [List.applyId, Prod.toSigma,
Option.getD_some, List.get?, List.dlookup_cons_eq, List.zip_cons_cons, List.map, Option.some_inj]
· cases ys
· cases h₁
· cases' h₀ with _ _ h₀ h₁
simp only [List.get?, List.zip_cons_cons, List.applyId_cons] at h₂ ⊢
rw [if_neg]
· apply xs_ih <;> solve_by_elim [Nat.succ.inj]
· apply h₀; apply List.get?_mem h₂
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
ys : List α
h₀ : List.Nodup (x' :: xs)
h₁ : List.length (x' :: xs) = List.length ys
i : ℕ
h₂ : List.get? (x' :: xs) i = some x
⊢ applyId (List.zip (x' :: xs) ys) x = y ↔ List.get? ys i = some y
[PROOFSTEP]
cases i
[GOAL]
case zero
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
ys : List α
h₀ : List.Nodup (x' :: xs)
h₁ : List.length (x' :: xs) = List.length ys
h₂ : List.get? (x' :: xs) Nat.zero = some x
⊢ applyId (List.zip (x' :: xs) ys) x = y ↔ List.get? ys Nat.zero = some y
[PROOFSTEP]
injection h₂ with h₀
[GOAL]
case zero
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
ys : List α
h₀✝ : List.Nodup (x' :: xs)
h₁ : List.length (x' :: xs) = List.length ys
h₀ : x' = x
⊢ applyId (List.zip (x' :: xs) ys) x = y ↔ List.get? ys Nat.zero = some y
[PROOFSTEP]
subst h₀
[GOAL]
case zero
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
y x' : α
xs ys : List α
h₀ : List.Nodup (x' :: xs)
h₁ : List.length (x' :: xs) = List.length ys
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x' → (applyId (List.zip xs ys) x' = y ↔ List.get? ys i = some y)
⊢ applyId (List.zip (x' :: xs) ys) x' = y ↔ List.get? ys Nat.zero = some y
[PROOFSTEP]
cases ys
[GOAL]
case zero.nil
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
y x' : α
xs : List α
h₀ : List.Nodup (x' :: xs)
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x' → (applyId (List.zip xs ys) x' = y ↔ List.get? ys i = some y)
h₁ : List.length (x' :: xs) = List.length []
⊢ applyId (List.zip (x' :: xs) []) x' = y ↔ List.get? [] Nat.zero = some y
[PROOFSTEP]
cases h₁
[GOAL]
case zero.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
y x' : α
xs : List α
h₀ : List.Nodup (x' :: xs)
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x' → (applyId (List.zip xs ys) x' = y ↔ List.get? ys i = some y)
head✝ : α
tail✝ : List α
h₁ : List.length (x' :: xs) = List.length (head✝ :: tail✝)
⊢ applyId (List.zip (x' :: xs) (head✝ :: tail✝)) x' = y ↔ List.get? (head✝ :: tail✝) Nat.zero = some y
[PROOFSTEP]
simp only [List.applyId, Prod.toSigma, Option.getD_some, List.get?, List.dlookup_cons_eq, List.zip_cons_cons, List.map,
Option.some_inj]
[GOAL]
case succ
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
ys : List α
h₀ : List.Nodup (x' :: xs)
h₁ : List.length (x' :: xs) = List.length ys
n✝ : ℕ
h₂ : List.get? (x' :: xs) (Nat.succ n✝) = some x
⊢ applyId (List.zip (x' :: xs) ys) x = y ↔ List.get? ys (Nat.succ n✝) = some y
[PROOFSTEP]
cases ys
[GOAL]
case succ.nil
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
h₀ : List.Nodup (x' :: xs)
n✝ : ℕ
h₂ : List.get? (x' :: xs) (Nat.succ n✝) = some x
h₁ : List.length (x' :: xs) = List.length []
⊢ applyId (List.zip (x' :: xs) []) x = y ↔ List.get? [] (Nat.succ n✝) = some y
[PROOFSTEP]
cases h₁
[GOAL]
case succ.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
h₀ : List.Nodup (x' :: xs)
n✝ : ℕ
h₂ : List.get? (x' :: xs) (Nat.succ n✝) = some x
head✝ : α
tail✝ : List α
h₁ : List.length (x' :: xs) = List.length (head✝ :: tail✝)
⊢ applyId (List.zip (x' :: xs) (head✝ :: tail✝)) x = y ↔ List.get? (head✝ :: tail✝) (Nat.succ n✝) = some y
[PROOFSTEP]
cases' h₀ with _ _ h₀ h₁
[GOAL]
case succ.cons.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
n✝ : ℕ
h₂ : List.get? (x' :: xs) (Nat.succ n✝) = some x
head✝ : α
tail✝ : List α
h₁✝ : List.length (x' :: xs) = List.length (head✝ :: tail✝)
h₁ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
⊢ applyId (List.zip (x' :: xs) (head✝ :: tail✝)) x = y ↔ List.get? (head✝ :: tail✝) (Nat.succ n✝) = some y
[PROOFSTEP]
simp only [List.get?, List.zip_cons_cons, List.applyId_cons] at h₂ ⊢
[GOAL]
case succ.cons.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
n✝ : ℕ
h₂ : List.get? xs n✝ = some x
head✝ : α
tail✝ : List α
h₁✝ : List.length (x' :: xs) = List.length (head✝ :: tail✝)
h₁ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
⊢ (if x' = x then head✝ else applyId (List.zip xs tail✝) x) = y ↔ List.get? tail✝ n✝ = some y
[PROOFSTEP]
rw [if_neg]
[GOAL]
case succ.cons.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
n✝ : ℕ
h₂ : List.get? xs n✝ = some x
head✝ : α
tail✝ : List α
h₁✝ : List.length (x' :: xs) = List.length (head✝ :: tail✝)
h₁ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
⊢ applyId (List.zip xs tail✝) x = y ↔ List.get? tail✝ n✝ = some y
[PROOFSTEP]
apply xs_ih
[GOAL]
case succ.cons.cons.h₀
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
n✝ : ℕ
h₂ : List.get? xs n✝ = some x
head✝ : α
tail✝ : List α
h₁✝ : List.length (x' :: xs) = List.length (head✝ :: tail✝)
h₁ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
⊢ List.Nodup xs
[PROOFSTEP]
solve_by_elim [Nat.succ.inj]
[GOAL]
case succ.cons.cons.h₁
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
n✝ : ℕ
h₂ : List.get? xs n✝ = some x
head✝ : α
tail✝ : List α
h₁✝ : List.length (x' :: xs) = List.length (head✝ :: tail✝)
h₁ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
⊢ List.length xs = List.length tail✝
[PROOFSTEP]
solve_by_elim [Nat.succ.inj]
[GOAL]
case succ.cons.cons.h₂
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
n✝ : ℕ
h₂ : List.get? xs n✝ = some x
head✝ : α
tail✝ : List α
h₁✝ : List.length (x' :: xs) = List.length (head✝ :: tail✝)
h₁ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
⊢ List.get? xs n✝ = some x
[PROOFSTEP]
solve_by_elim [Nat.succ.inj]
[GOAL]
case succ.cons.cons.hnc
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
n✝ : ℕ
h₂ : List.get? xs n✝ = some x
head✝ : α
tail✝ : List α
h₁✝ : List.length (x' :: xs) = List.length (head✝ :: tail✝)
h₁ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
⊢ ¬x' = x
[PROOFSTEP]
apply h₀
[GOAL]
case succ.cons.cons.hnc.a
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x y x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.length xs = List.length ys →
∀ (i : ℕ), List.get? xs i = some x → (applyId (List.zip xs ys) x = y ↔ List.get? ys i = some y)
n✝ : ℕ
h₂ : List.get? xs n✝ = some x
head✝ : α
tail✝ : List α
h₁✝ : List.length (x' :: xs) = List.length (head✝ :: tail✝)
h₁ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
⊢ x ∈ xs
[PROOFSTEP]
apply List.get?_mem h₂
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x : α
⊢ applyId (List.zip xs ys) x ∈ ys ↔ x ∈ xs
[PROOFSTEP]
simp only [List.applyId]
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x : α
⊢ Option.getD (List.dlookup x (List.map Prod.toSigma (List.zip xs ys))) x ∈ ys ↔ x ∈ xs
[PROOFSTEP]
cases h₃ : List.dlookup x (List.map Prod.toSigma (xs.zip ys)) with
| none =>
dsimp [Option.getD]
rw [h₁.mem_iff]
| some val =>
have h₂ : ys.Nodup := h₁.nodup_iff.1 h₀
replace h₁ : xs.length = ys.length := h₁.length_eq
dsimp
induction xs generalizing ys with
| nil => contradiction
| cons x' xs xs_ih =>
cases' ys with y ys
· cases h₃
dsimp [List.dlookup] at h₃ ; split_ifs at h₃ with h
· rw [Option.some_inj] at h₃
subst x'; subst val
simp only [List.mem_cons, true_or_iff, eq_self_iff_true]
· cases' h₀ with _ _ h₀ h₅
cases' h₂ with _ _ h₂ h₄
have h₆ := Nat.succ.inj h₁
specialize xs_ih h₅ h₃ h₄ h₆
simp only [Ne.symm h, xs_ih, List.mem_cons, false_or_iff]
suffices : val ∈ ys; tauto
erw [← Option.mem_def, List.mem_dlookup_iff] at h₃
simp only [Prod.toSigma, List.mem_map, heq_iff_eq, Prod.exists] at h₃
rcases h₃ with ⟨a, b, h₃, h₄, h₅⟩
apply (List.mem_zip h₃).2
simp only [List.NodupKeys, List.keys, comp, Prod.fst_toSigma, List.map_map]
rwa [List.map_fst_zip _ _ (le_of_eq h₆)]
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x : α
x✝ : Option α
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = x✝
⊢ Option.getD x✝ x ∈ ys ↔ x ∈ xs
[PROOFSTEP]
cases h₃ : List.dlookup x (List.map Prod.toSigma (xs.zip ys)) with
| none =>
dsimp [Option.getD]
rw [h₁.mem_iff]
| some val =>
have h₂ : ys.Nodup := h₁.nodup_iff.1 h₀
replace h₁ : xs.length = ys.length := h₁.length_eq
dsimp
induction xs generalizing ys with
| nil => contradiction
| cons x' xs xs_ih =>
cases' ys with y ys
· cases h₃
dsimp [List.dlookup] at h₃ ; split_ifs at h₃ with h
· rw [Option.some_inj] at h₃
subst x'; subst val
simp only [List.mem_cons, true_or_iff, eq_self_iff_true]
· cases' h₀ with _ _ h₀ h₅
cases' h₂ with _ _ h₂ h₄
have h₆ := Nat.succ.inj h₁
specialize xs_ih h₅ h₃ h₄ h₆
simp only [Ne.symm h, xs_ih, List.mem_cons, false_or_iff]
suffices : val ∈ ys; tauto
erw [← Option.mem_def, List.mem_dlookup_iff] at h₃
simp only [Prod.toSigma, List.mem_map, heq_iff_eq, Prod.exists] at h₃
rcases h₃ with ⟨a, b, h₃, h₄, h₅⟩
apply (List.mem_zip h₃).2
simp only [List.NodupKeys, List.keys, comp, Prod.fst_toSigma, List.map_map]
rwa [List.map_fst_zip _ _ (le_of_eq h₆)]
[GOAL]
case none
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x : α
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = none
⊢ Option.getD none x ∈ ys ↔ x ∈ xs
[PROOFSTEP]
| none =>
dsimp [Option.getD]
rw [h₁.mem_iff]
[GOAL]
case none
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x : α
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = none
⊢ Option.getD none x ∈ ys ↔ x ∈ xs
[PROOFSTEP]
dsimp [Option.getD]
[GOAL]
case none
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x : α
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = none
⊢ x ∈ ys ↔ x ∈ xs
[PROOFSTEP]
rw [h₁.mem_iff]
[GOAL]
case some
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x val : α
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
⊢ Option.getD (some val) x ∈ ys ↔ x ∈ xs
[PROOFSTEP]
| some val =>
have h₂ : ys.Nodup := h₁.nodup_iff.1 h₀
replace h₁ : xs.length = ys.length := h₁.length_eq
dsimp
induction xs generalizing ys with
| nil => contradiction
| cons x' xs xs_ih =>
cases' ys with y ys
· cases h₃
dsimp [List.dlookup] at h₃ ; split_ifs at h₃ with h
· rw [Option.some_inj] at h₃
subst x'; subst val
simp only [List.mem_cons, true_or_iff, eq_self_iff_true]
· cases' h₀ with _ _ h₀ h₅
cases' h₂ with _ _ h₂ h₄
have h₆ := Nat.succ.inj h₁
specialize xs_ih h₅ h₃ h₄ h₆
simp only [Ne.symm h, xs_ih, List.mem_cons, false_or_iff]
suffices : val ∈ ys; tauto
erw [← Option.mem_def, List.mem_dlookup_iff] at h₃
simp only [Prod.toSigma, List.mem_map, heq_iff_eq, Prod.exists] at h₃
rcases h₃ with ⟨a, b, h₃, h₄, h₅⟩
apply (List.mem_zip h₃).2
simp only [List.NodupKeys, List.keys, comp, Prod.fst_toSigma, List.map_map]
rwa [List.map_fst_zip _ _ (le_of_eq h₆)]
[GOAL]
case some
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x val : α
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
⊢ Option.getD (some val) x ∈ ys ↔ x ∈ xs
[PROOFSTEP]
have h₂ : ys.Nodup := h₁.nodup_iff.1 h₀
[GOAL]
case some
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x val : α
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
h₂ : List.Nodup ys
⊢ Option.getD (some val) x ∈ ys ↔ x ∈ xs
[PROOFSTEP]
replace h₁ : xs.length = ys.length := h₁.length_eq
[GOAL]
case some
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
x val : α
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
h₂ : List.Nodup ys
h₁ : List.length xs = List.length ys
⊢ Option.getD (some val) x ∈ ys ↔ x ∈ xs
[PROOFSTEP]
dsimp
[GOAL]
case some
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
x val : α
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
h₂ : List.Nodup ys
h₁ : List.length xs = List.length ys
⊢ val ∈ ys ↔ x ∈ xs
[PROOFSTEP]
induction xs generalizing ys with
| nil => contradiction
| cons x' xs xs_ih =>
cases' ys with y ys
· cases h₃
dsimp [List.dlookup] at h₃ ; split_ifs at h₃ with h
· rw [Option.some_inj] at h₃
subst x'; subst val
simp only [List.mem_cons, true_or_iff, eq_self_iff_true]
· cases' h₀ with _ _ h₀ h₅
cases' h₂ with _ _ h₂ h₄
have h₆ := Nat.succ.inj h₁
specialize xs_ih h₅ h₃ h₄ h₆
simp only [Ne.symm h, xs_ih, List.mem_cons, false_or_iff]
suffices : val ∈ ys; tauto
erw [← Option.mem_def, List.mem_dlookup_iff] at h₃
simp only [Prod.toSigma, List.mem_map, heq_iff_eq, Prod.exists] at h₃
rcases h₃ with ⟨a, b, h₃, h₄, h₅⟩
apply (List.mem_zip h₃).2
simp only [List.NodupKeys, List.keys, comp, Prod.fst_toSigma, List.map_map]
rwa [List.map_fst_zip _ _ (le_of_eq h₆)]
[GOAL]
case some
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
x val : α
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
h₂ : List.Nodup ys
h₁ : List.length xs = List.length ys
⊢ val ∈ ys ↔ x ∈ xs
[PROOFSTEP]
induction xs generalizing ys with
| nil => contradiction
| cons x' xs xs_ih =>
cases' ys with y ys
· cases h₃
dsimp [List.dlookup] at h₃ ; split_ifs at h₃ with h
· rw [Option.some_inj] at h₃
subst x'; subst val
simp only [List.mem_cons, true_or_iff, eq_self_iff_true]
· cases' h₀ with _ _ h₀ h₅
cases' h₂ with _ _ h₂ h₄
have h₆ := Nat.succ.inj h₁
specialize xs_ih h₅ h₃ h₄ h₆
simp only [Ne.symm h, xs_ih, List.mem_cons, false_or_iff]
suffices : val ∈ ys; tauto
erw [← Option.mem_def, List.mem_dlookup_iff] at h₃
simp only [Prod.toSigma, List.mem_map, heq_iff_eq, Prod.exists] at h₃
rcases h₃ with ⟨a, b, h₃, h₄, h₅⟩
apply (List.mem_zip h₃).2
simp only [List.NodupKeys, List.keys, comp, Prod.fst_toSigma, List.map_map]
rwa [List.map_fst_zip _ _ (le_of_eq h₆)]
[GOAL]
case some.nil
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val : α
ys : List α
h₀ : List.Nodup []
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip [] ys)) = some val
h₂ : List.Nodup ys
h₁ : List.length [] = List.length ys
⊢ val ∈ ys ↔ x ∈ []
[PROOFSTEP]
| nil => contradiction
[GOAL]
case some.nil
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val : α
ys : List α
h₀ : List.Nodup []
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip [] ys)) = some val
h₂ : List.Nodup ys
h₁ : List.length [] = List.length ys
⊢ val ∈ ys ↔ x ∈ []
[PROOFSTEP]
contradiction
[GOAL]
case some.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val →
List.Nodup ys → List.length xs = List.length ys → (val ∈ ys ↔ x ∈ xs)
ys : List α
h₀ : List.Nodup (x' :: xs)
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip (x' :: xs) ys)) = some val
h₂ : List.Nodup ys
h₁ : List.length (x' :: xs) = List.length ys
⊢ val ∈ ys ↔ x ∈ x' :: xs
[PROOFSTEP]
| cons x' xs xs_ih =>
cases' ys with y ys
· cases h₃
dsimp [List.dlookup] at h₃ ; split_ifs at h₃ with h
· rw [Option.some_inj] at h₃
subst x'; subst val
simp only [List.mem_cons, true_or_iff, eq_self_iff_true]
· cases' h₀ with _ _ h₀ h₅
cases' h₂ with _ _ h₂ h₄
have h₆ := Nat.succ.inj h₁
specialize xs_ih h₅ h₃ h₄ h₆
simp only [Ne.symm h, xs_ih, List.mem_cons, false_or_iff]
suffices : val ∈ ys; tauto
erw [← Option.mem_def, List.mem_dlookup_iff] at h₃
simp only [Prod.toSigma, List.mem_map, heq_iff_eq, Prod.exists] at h₃
rcases h₃ with ⟨a, b, h₃, h₄, h₅⟩
apply (List.mem_zip h₃).2
simp only [List.NodupKeys, List.keys, comp, Prod.fst_toSigma, List.map_map]
rwa [List.map_fst_zip _ _ (le_of_eq h₆)]
[GOAL]
case some.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val →
List.Nodup ys → List.length xs = List.length ys → (val ∈ ys ↔ x ∈ xs)
ys : List α
h₀ : List.Nodup (x' :: xs)
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip (x' :: xs) ys)) = some val
h₂ : List.Nodup ys
h₁ : List.length (x' :: xs) = List.length ys
⊢ val ∈ ys ↔ x ∈ x' :: xs
[PROOFSTEP]
cases' ys with y ys
[GOAL]
case some.cons.nil
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val →
List.Nodup ys → List.length xs = List.length ys → (val ∈ ys ↔ x ∈ xs)
h₀ : List.Nodup (x' :: xs)
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip (x' :: xs) [])) = some val
h₂ : List.Nodup []
h₁ : List.length (x' :: xs) = List.length []
⊢ val ∈ [] ↔ x ∈ x' :: xs
[PROOFSTEP]
cases h₃
[GOAL]
case some.cons.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val →
List.Nodup ys → List.length xs = List.length ys → (val ∈ ys ↔ x ∈ xs)
h₀ : List.Nodup (x' :: xs)
y : α
ys : List α
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip (x' :: xs) (y :: ys))) = some val
h₂ : List.Nodup (y :: ys)
h₁ : List.length (x' :: xs) = List.length (y :: ys)
⊢ val ∈ y :: ys ↔ x ∈ x' :: xs
[PROOFSTEP]
dsimp [List.dlookup] at h₃
[GOAL]
case some.cons.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val →
List.Nodup ys → List.length xs = List.length ys → (val ∈ ys ↔ x ∈ xs)
h₀ : List.Nodup (x' :: xs)
y : α
ys : List α
h₃ : (if h : x' = x then some (h ▸ y) else List.dlookup x (List.map Prod.toSigma (List.zip xs ys))) = some val
h₂ : List.Nodup (y :: ys)
h₁ : List.length (x' :: xs) = List.length (y :: ys)
⊢ val ∈ y :: ys ↔ x ∈ x' :: xs
[PROOFSTEP]
split_ifs at h₃ with h
[GOAL]
case pos
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val →
List.Nodup ys → List.length xs = List.length ys → (val ∈ ys ↔ x ∈ xs)
h₀ : List.Nodup (x' :: xs)
y : α
ys : List α
h₂ : List.Nodup (y :: ys)
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : x' = x
h₃ : some (h ▸ y) = some val
⊢ val ∈ y :: ys ↔ x ∈ x' :: xs
[PROOFSTEP]
rw [Option.some_inj] at h₃
[GOAL]
case pos
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val →
List.Nodup ys → List.length xs = List.length ys → (val ∈ ys ↔ x ∈ xs)
h₀ : List.Nodup (x' :: xs)
y : α
ys : List α
h₂ : List.Nodup (y :: ys)
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : x' = x
h₃ : h ▸ y = val
⊢ val ∈ y :: ys ↔ x ∈ x' :: xs
[PROOFSTEP]
subst x'
[GOAL]
case pos
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val →
List.Nodup ys → List.length xs = List.length ys → (val ∈ ys ↔ x ∈ xs)
y : α
ys : List α
h₂ : List.Nodup (y :: ys)
h₀ : List.Nodup (x :: xs)
h₁ : List.length (x :: xs) = List.length (y :: ys)
h₃ : (_ : x = x) ▸ y = val
⊢ val ∈ y :: ys ↔ x ∈ x :: xs
[PROOFSTEP]
subst val
[GOAL]
case pos
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x : α
xs : List α
y : α
ys : List α
h₂ : List.Nodup (y :: ys)
h₀ : List.Nodup (x :: xs)
h₁ : List.length (x :: xs) = List.length (y :: ys)
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some ((_ : x = x) ▸ y) →
List.Nodup ys → List.length xs = List.length ys → ((_ : x = x) ▸ y ∈ ys ↔ x ∈ xs)
⊢ (_ : x = x) ▸ y ∈ y :: ys ↔ x ∈ x :: xs
[PROOFSTEP]
simp only [List.mem_cons, true_or_iff, eq_self_iff_true]
[GOAL]
case neg
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val →
List.Nodup ys → List.length xs = List.length ys → (val ∈ ys ↔ x ∈ xs)
h₀ : List.Nodup (x' :: xs)
y : α
ys : List α
h₂ : List.Nodup (y :: ys)
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
⊢ val ∈ y :: ys ↔ x ∈ x' :: xs
[PROOFSTEP]
cases' h₀ with _ _ h₀ h₅
[GOAL]
case neg.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val →
List.Nodup ys → List.length xs = List.length ys → (val ∈ ys ↔ x ∈ xs)
y : α
ys : List α
h₂ : List.Nodup (y :: ys)
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
⊢ val ∈ y :: ys ↔ x ∈ x' :: xs
[PROOFSTEP]
cases' h₂ with _ _ h₂ h₄
[GOAL]
case neg.cons.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val →
List.Nodup ys → List.length xs = List.length ys → (val ∈ ys ↔ x ∈ xs)
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
⊢ val ∈ y :: ys ↔ x ∈ x' :: xs
[PROOFSTEP]
have h₆ := Nat.succ.inj h₁
[GOAL]
case neg.cons.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
xs_ih :
∀ {ys : List α},
List.Nodup xs →
List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val →
List.Nodup ys → List.length xs = List.length ys → (val ∈ ys ↔ x ∈ xs)
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
⊢ val ∈ y :: ys ↔ x ∈ x' :: xs
[PROOFSTEP]
specialize xs_ih h₅ h₃ h₄ h₆
[GOAL]
case neg.cons.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
xs_ih : val ∈ ys ↔ x ∈ xs
⊢ val ∈ y :: ys ↔ x ∈ x' :: xs
[PROOFSTEP]
simp only [Ne.symm h, xs_ih, List.mem_cons, false_or_iff]
[GOAL]
case neg.cons.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
xs_ih : val ∈ ys ↔ x ∈ xs
⊢ val = y ∨ x ∈ xs ↔ x ∈ xs
[PROOFSTEP]
suffices : val ∈ ys
[GOAL]
case neg.cons.cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
xs_ih : val ∈ ys ↔ x ∈ xs
this : val ∈ ys
⊢ val = y ∨ x ∈ xs ↔ x ∈ xs
case this
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
xs_ih : val ∈ ys ↔ x ∈ xs
⊢ val ∈ ys
[PROOFSTEP]
tauto
[GOAL]
case this
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : List.dlookup x (List.map Prod.toSigma (List.zip xs ys)) = some val
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
xs_ih : val ∈ ys ↔ x ∈ xs
⊢ val ∈ ys
[PROOFSTEP]
erw [← Option.mem_def, List.mem_dlookup_iff] at h₃
[GOAL]
case this
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : { fst := x, snd := val } ∈ List.map Prod.toSigma (List.zip xs ys)
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
xs_ih : val ∈ ys ↔ x ∈ xs
⊢ val ∈ ys
case this
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : val ∈ List.dlookup x (List.map Prod.toSigma (List.zip xs ys))
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
xs_ih : val ∈ ys ↔ x ∈ xs
⊢ List.NodupKeys (List.map Prod.toSigma (List.zip xs ys))
[PROOFSTEP]
simp only [Prod.toSigma, List.mem_map, heq_iff_eq, Prod.exists] at h₃
[GOAL]
case this
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
xs_ih : val ∈ ys ↔ x ∈ xs
h₃ : ∃ a b, (a, b) ∈ List.zip xs ys ∧ { fst := a, snd := b } = { fst := x, snd := val }
⊢ val ∈ ys
case this
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : val ∈ List.dlookup x (List.map Prod.toSigma (List.zip xs ys))
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
xs_ih : val ∈ ys ↔ x ∈ xs
⊢ List.NodupKeys (List.map Prod.toSigma (List.zip xs ys))
[PROOFSTEP]
rcases h₃ with ⟨a, b, h₃, h₄, h₅⟩
[GOAL]
case this.intro.intro.intro.refl
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
xs_ih : val ∈ ys ↔ x ∈ xs
h₃ : (x, val) ∈ List.zip xs ys
⊢ val ∈ ys
case this
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : val ∈ List.dlookup x (List.map Prod.toSigma (List.zip xs ys))
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
xs_ih : val ∈ ys ↔ x ∈ xs
⊢ List.NodupKeys (List.map Prod.toSigma (List.zip xs ys))
[PROOFSTEP]
apply (List.mem_zip h₃).2
[GOAL]
case this
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : val ∈ List.dlookup x (List.map Prod.toSigma (List.zip xs ys))
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
xs_ih : val ∈ ys ↔ x ∈ xs
⊢ List.NodupKeys (List.map Prod.toSigma (List.zip xs ys))
[PROOFSTEP]
simp only [List.NodupKeys, List.keys, comp, Prod.fst_toSigma, List.map_map]
[GOAL]
case this
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
x val x' : α
xs : List α
y : α
ys : List α
h₁ : List.length (x' :: xs) = List.length (y :: ys)
h : ¬x' = x
h₃ : val ∈ List.dlookup x (List.map Prod.toSigma (List.zip xs ys))
h₅ : List.Pairwise (fun x x_1 => x ≠ x_1) xs
h₀ : ∀ (a' : α), a' ∈ xs → x' ≠ a'
h₄ : List.Pairwise (fun x x_1 => x ≠ x_1) ys
h₂ : ∀ (a' : α), a' ∈ ys → y ≠ a'
h₆ : List.length xs = List.length ys
xs_ih : val ∈ ys ↔ x ∈ xs
⊢ List.Nodup (List.map (fun x => x.fst) (List.zip xs ys))
[PROOFSTEP]
rwa [List.map_fst_zip _ _ (le_of_eq h₆)]
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
x : α
⊢ ¬x ∈ xs → applyId (List.zip xs ys) x = x
[PROOFSTEP]
intro h
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
x : α
h : ¬x ∈ xs
⊢ applyId (List.zip xs ys) x = x
[PROOFSTEP]
dsimp [List.applyId]
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
x : α
h : ¬x ∈ xs
⊢ Option.getD (List.dlookup x (List.map Prod.toSigma (List.zip xs ys))) x = x
[PROOFSTEP]
rw [List.dlookup_eq_none.2]
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
x : α
h : ¬x ∈ xs
⊢ Option.getD none x = x
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
x : α
h : ¬x ∈ xs
⊢ ¬x ∈ List.keys (List.map Prod.toSigma (List.zip xs ys))
[PROOFSTEP]
rfl
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
x : α
h : ¬x ∈ xs
⊢ ¬x ∈ List.keys (List.map Prod.toSigma (List.zip xs ys))
[PROOFSTEP]
simp only [List.keys, not_exists, Prod.toSigma, exists_and_right, exists_eq_right, List.mem_map, Function.comp_apply,
List.map_map, Prod.exists]
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
x : α
h : ¬x ∈ xs
⊢ ∀ (x_1 : α), ¬(x, x_1) ∈ List.zip xs ys
[PROOFSTEP]
intro y hy
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
x : α
h : ¬x ∈ xs
y : α
hy : (x, y) ∈ List.zip xs ys
⊢ False
[PROOFSTEP]
exact h (List.mem_zip hy).1
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
⊢ Injective (applyId (List.zip xs ys))
[PROOFSTEP]
intro x y h
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
⊢ x = y
[PROOFSTEP]
by_cases hx : x ∈ xs
[GOAL]
case pos
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
hx : x ∈ xs
⊢ x = y
[PROOFSTEP]
by_cases hy : y ∈ xs
[GOAL]
case neg
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
hx : ¬x ∈ xs
⊢ x = y
[PROOFSTEP]
by_cases hy : y ∈ xs
[GOAL]
case pos
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
hx : x ∈ xs
hy : y ∈ xs
⊢ x = y
[PROOFSTEP]
rw [List.mem_iff_get?] at hx hy
[GOAL]
case pos
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
hx : ∃ n, List.get? xs n = some x
hy : ∃ n, List.get? xs n = some y
⊢ x = y
[PROOFSTEP]
cases' hx with i hx
[GOAL]
case pos.intro
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
hy : ∃ n, List.get? xs n = some y
i : ℕ
hx : List.get? xs i = some x
⊢ x = y
[PROOFSTEP]
cases' hy with j hy
[GOAL]
case pos.intro.intro
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
i : ℕ
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
⊢ x = y
[PROOFSTEP]
suffices some x = some y by injection this
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
i : ℕ
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
this : some x = some y
⊢ x = y
[PROOFSTEP]
injection this
[GOAL]
case pos.intro.intro
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
i : ℕ
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
⊢ some x = some y
[PROOFSTEP]
have h₂ := h₁.length_eq
[GOAL]
case pos.intro.intro
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
i : ℕ
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
⊢ some x = some y
[PROOFSTEP]
rw [List.applyId_zip_eq h₀ h₂ _ _ _ hx] at h
[GOAL]
case pos.intro.intro
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
i : ℕ
h : List.get? ys i = some (applyId (List.zip xs ys) y)
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
⊢ some x = some y
[PROOFSTEP]
rw [← hx, ← hy]
[GOAL]
case pos.intro.intro
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
i : ℕ
h : List.get? ys i = some (applyId (List.zip xs ys) y)
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
⊢ List.get? xs i = List.get? xs j
[PROOFSTEP]
congr
[GOAL]
case pos.intro.intro.e_a
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
i : ℕ
h : List.get? ys i = some (applyId (List.zip xs ys) y)
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
⊢ i = j
[PROOFSTEP]
apply List.get?_injective _ (h₁.nodup_iff.1 h₀)
[GOAL]
case pos.intro.intro.e_a
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
i : ℕ
h : List.get? ys i = some (applyId (List.zip xs ys) y)
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
⊢ List.get? ys i = List.get? ys j
[PROOFSTEP]
symm
[GOAL]
case pos.intro.intro.e_a
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
i : ℕ
h : List.get? ys i = some (applyId (List.zip xs ys) y)
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
⊢ List.get? ys j = List.get? ys i
[PROOFSTEP]
rw [h]
[GOAL]
case pos.intro.intro.e_a
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
i : ℕ
h : List.get? ys i = some (applyId (List.zip xs ys) y)
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
⊢ List.get? ys j = some (applyId (List.zip xs ys) y)
[PROOFSTEP]
rw [← List.applyId_zip_eq]
[GOAL]
case pos.intro.intro.e_a.h₀
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
i : ℕ
h : List.get? ys i = some (applyId (List.zip xs ys) y)
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
⊢ List.Nodup xs
[PROOFSTEP]
assumption
[GOAL]
case pos.intro.intro.e_a.h₁
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
i : ℕ
h : List.get? ys i = some (applyId (List.zip xs ys) y)
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
⊢ List.length xs = List.length ys
[PROOFSTEP]
assumption
[GOAL]
case pos.intro.intro.e_a.h₂
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
i : ℕ
h : List.get? ys i = some (applyId (List.zip xs ys) y)
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
⊢ List.get? xs j = some y
[PROOFSTEP]
assumption
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
i : ℕ
h : List.get? ys i = some (applyId (List.zip xs ys) y)
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
⊢ i < List.length ys
[PROOFSTEP]
rw [← h₁.length_eq]
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
i : ℕ
h : List.get? ys i = some (applyId (List.zip xs ys) y)
hx : List.get? xs i = some x
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
⊢ i < List.length xs
[PROOFSTEP]
rw [List.get?_eq_some] at hx
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
i : ℕ
h : List.get? ys i = some (applyId (List.zip xs ys) y)
hx : ∃ h, List.get xs { val := i, isLt := h } = x
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
⊢ i < List.length xs
[PROOFSTEP]
cases' hx with hx hx'
[GOAL]
case intro
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
i : ℕ
h : List.get? ys i = some (applyId (List.zip xs ys) y)
j : ℕ
hy : List.get? xs j = some y
h₂ : List.length xs = List.length ys
hx : i < List.length xs
hx' : List.get xs { val := i, isLt := hx } = x
⊢ i < List.length xs
[PROOFSTEP]
exact hx
[GOAL]
case neg
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
hx : x ∈ xs
hy : ¬y ∈ xs
⊢ x = y
[PROOFSTEP]
rw [← applyId_mem_iff h₀ h₁] at hx hy
[GOAL]
case neg
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
hx : applyId (List.zip xs ys) x ∈ ys
hy : ¬applyId (List.zip xs ys) y ∈ ys
⊢ x = y
[PROOFSTEP]
rw [h] at hx
[GOAL]
case neg
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
hx : applyId (List.zip xs ys) y ∈ ys
hy : ¬applyId (List.zip xs ys) y ∈ ys
⊢ x = y
[PROOFSTEP]
contradiction
[GOAL]
case pos
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
hx : ¬x ∈ xs
hy : y ∈ xs
⊢ x = y
[PROOFSTEP]
rw [← applyId_mem_iff h₀ h₁] at hx hy
[GOAL]
case pos
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
hx : ¬applyId (List.zip xs ys) x ∈ ys
hy : applyId (List.zip xs ys) y ∈ ys
⊢ x = y
[PROOFSTEP]
rw [h] at hx
[GOAL]
case pos
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
hx : ¬applyId (List.zip xs ys) y ∈ ys
hy : applyId (List.zip xs ys) y ∈ ys
⊢ x = y
[PROOFSTEP]
contradiction
[GOAL]
case neg
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
hx : ¬x ∈ xs
hy : ¬y ∈ xs
⊢ x = y
[PROOFSTEP]
rwa [List.applyId_eq_self, List.applyId_eq_self] at h
[GOAL]
case neg.a
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : x = applyId (List.zip xs ys) y
hx : ¬x ∈ xs
hy : ¬y ∈ xs
⊢ ¬y ∈ xs
[PROOFSTEP]
assumption
[GOAL]
case neg.a
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs ys : List α
h₀ : List.Nodup xs
h₁ : xs ~ ys
x y : α
h : applyId (List.zip xs ys) x = applyId (List.zip xs ys) y
hx : ¬x ∈ xs
hy : ¬y ∈ xs
⊢ ¬x ∈ xs
[PROOFSTEP]
assumption
[GOAL]
α : Type u
β : Type v
γ : Sort w
x✝ : ℕ
n : ℕ := x✝
h : 0 < n
⊢ 1 < 2
[PROOFSTEP]
decide
[GOAL]
α✝ : Type u
β : Type v
γ : Sort w
α : Type
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
h₀✝ : List.map Sigma.fst xs ~ List.map Sigma.snd xs
h₁✝ : List.Nodup (List.map Sigma.snd xs)
xs' ys' : List α
h₀ : xs' ~ ys'
h₁ : List.Nodup ys'
h₃ : List.length xs' ≤ List.length ys'
h₄ : List.length ys' ≤ List.length xs'
⊢ List.map Sigma.fst (List.map Prod.toSigma (List.zip xs' ys')) ~
List.map Sigma.snd (List.map Prod.toSigma (List.zip xs' ys'))
[PROOFSTEP]
simp only [comp, List.map_fst_zip, List.map_snd_zip, *, Prod.fst_toSigma, Prod.snd_toSigma, List.map_map]
[GOAL]
α✝ : Type u
β : Type v
γ : Sort w
α : Type
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
h₀✝ : List.map Sigma.fst xs ~ List.map Sigma.snd xs
h₁✝ : List.Nodup (List.map Sigma.snd xs)
xs' ys' : List α
h₀ : xs' ~ ys'
h₁ : List.Nodup ys'
h₃ : List.length xs' ≤ List.length ys'
h₄ : List.length ys' ≤ List.length xs'
⊢ List.Nodup (List.map Sigma.snd (List.map Prod.toSigma (List.zip xs' ys')))
[PROOFSTEP]
simp only [comp, List.map_snd_zip, *, Prod.snd_toSigma, List.map_map]
[GOAL]
α : Type u
β : Type v
γ : Sort w
xs ys : List α
h : xs ~ ys
h' : List.Nodup ys
h₀ : List.length xs ≤ List.length ys
h₁ : List.length ys ≤ List.length xs
⊢ List.map Sigma.fst (List.toFinmap' (List.zip xs ys)) ~ List.map Sigma.snd (List.toFinmap' (List.zip xs ys))
[PROOFSTEP]
simp only [List.toFinmap', comp, List.map_fst_zip, List.map_snd_zip, *, Prod.fst_toSigma, Prod.snd_toSigma,
List.map_map]
[GOAL]
α : Type u
β : Type v
γ : Sort w
xs ys : List α
h : xs ~ ys
h' : List.Nodup ys
h₀ : List.length xs ≤ List.length ys
h₁ : List.length ys ≤ List.length xs
⊢ List.Nodup (List.map Sigma.snd (List.toFinmap' (List.zip xs ys)))
[PROOFSTEP]
simp only [List.toFinmap', comp, List.map_snd_zip, *, Prod.snd_toSigma, List.map_map]
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
f : InjectiveFunction α
⊢ Injective (apply f)
[PROOFSTEP]
cases' f with xs hperm hnodup
[GOAL]
case mapToSelf
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
hperm : List.map Sigma.fst xs ~ List.map Sigma.snd xs
hnodup : List.Nodup (List.map Sigma.snd xs)
⊢ Injective (apply (mapToSelf xs hperm hnodup))
[PROOFSTEP]
generalize h₀ : List.map Sigma.fst xs = xs₀
[GOAL]
case mapToSelf
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
hperm : List.map Sigma.fst xs ~ List.map Sigma.snd xs
hnodup : List.Nodup (List.map Sigma.snd xs)
xs₀ : List α
h₀ : List.map Sigma.fst xs = xs₀
⊢ Injective (apply (mapToSelf xs hperm hnodup))
[PROOFSTEP]
generalize h₁ : xs.map (@id ((Σ _ : α, α) → α) <| @Sigma.snd α fun _ : α => α) = xs₁
[GOAL]
case mapToSelf
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
hperm : List.map Sigma.fst xs ~ List.map Sigma.snd xs
hnodup : List.Nodup (List.map Sigma.snd xs)
xs₀ : List α
h₀ : List.map Sigma.fst xs = xs₀
xs₁ : List α
h₁ : List.map (id Sigma.snd) xs = xs₁
⊢ Injective (apply (mapToSelf xs hperm hnodup))
[PROOFSTEP]
dsimp [id] at h₁
[GOAL]
case mapToSelf
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
hperm : List.map Sigma.fst xs ~ List.map Sigma.snd xs
hnodup : List.Nodup (List.map Sigma.snd xs)
xs₀ : List α
h₀ : List.map Sigma.fst xs = xs₀
xs₁ : List α
h₁ : List.map Sigma.snd xs = xs₁
⊢ Injective (apply (mapToSelf xs hperm hnodup))
[PROOFSTEP]
have hxs : xs = TotalFunction.List.toFinmap' (xs₀.zip xs₁) :=
by
rw [← h₀, ← h₁, List.toFinmap']; clear h₀ h₁ xs₀ xs₁ hperm hnodup
induction xs
case nil => simp only [List.zip_nil_right, List.map_nil]
case cons xs_hd xs_tl
xs_ih =>
simp only [true_and_iff, Prod.toSigma, eq_self_iff_true, Sigma.eta, List.zip_cons_cons, List.map, List.cons_inj]
exact xs_ih
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
hperm : List.map Sigma.fst xs ~ List.map Sigma.snd xs
hnodup : List.Nodup (List.map Sigma.snd xs)
xs₀ : List α
h₀ : List.map Sigma.fst xs = xs₀
xs₁ : List α
h₁ : List.map Sigma.snd xs = xs₁
⊢ xs = List.toFinmap' (List.zip xs₀ xs₁)
[PROOFSTEP]
rw [← h₀, ← h₁, List.toFinmap']
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
hperm : List.map Sigma.fst xs ~ List.map Sigma.snd xs
hnodup : List.Nodup (List.map Sigma.snd xs)
xs₀ : List α
h₀ : List.map Sigma.fst xs = xs₀
xs₁ : List α
h₁ : List.map Sigma.snd xs = xs₁
⊢ xs = List.map Prod.toSigma (List.zip (List.map Sigma.fst xs) (List.map Sigma.snd xs))
[PROOFSTEP]
clear h₀ h₁ xs₀ xs₁ hperm hnodup
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
⊢ xs = List.map Prod.toSigma (List.zip (List.map Sigma.fst xs) (List.map Sigma.snd xs))
[PROOFSTEP]
induction xs
[GOAL]
case nil
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
⊢ [] = List.map Prod.toSigma (List.zip (List.map Sigma.fst []) (List.map Sigma.snd []))
case cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
head✝ : (_ : α) × α
tail✝ : List ((_ : α) × α)
tail_ih✝ : tail✝ = List.map Prod.toSigma (List.zip (List.map Sigma.fst tail✝) (List.map Sigma.snd tail✝))
⊢ head✝ :: tail✝ =
List.map Prod.toSigma (List.zip (List.map Sigma.fst (head✝ :: tail✝)) (List.map Sigma.snd (head✝ :: tail✝)))
[PROOFSTEP]
case nil => simp only [List.zip_nil_right, List.map_nil]
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
⊢ [] = List.map Prod.toSigma (List.zip (List.map Sigma.fst []) (List.map Sigma.snd []))
[PROOFSTEP]
case nil => simp only [List.zip_nil_right, List.map_nil]
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
⊢ [] = List.map Prod.toSigma (List.zip (List.map Sigma.fst []) (List.map Sigma.snd []))
[PROOFSTEP]
simp only [List.zip_nil_right, List.map_nil]
[GOAL]
case cons
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
head✝ : (_ : α) × α
tail✝ : List ((_ : α) × α)
tail_ih✝ : tail✝ = List.map Prod.toSigma (List.zip (List.map Sigma.fst tail✝) (List.map Sigma.snd tail✝))
⊢ head✝ :: tail✝ =
List.map Prod.toSigma (List.zip (List.map Sigma.fst (head✝ :: tail✝)) (List.map Sigma.snd (head✝ :: tail✝)))
[PROOFSTEP]
case cons xs_hd xs_tl
xs_ih =>
simp only [true_and_iff, Prod.toSigma, eq_self_iff_true, Sigma.eta, List.zip_cons_cons, List.map, List.cons_inj]
exact xs_ih
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs_hd : (_ : α) × α
xs_tl : List ((_ : α) × α)
xs_ih : xs_tl = List.map Prod.toSigma (List.zip (List.map Sigma.fst xs_tl) (List.map Sigma.snd xs_tl))
⊢ xs_hd :: xs_tl =
List.map Prod.toSigma (List.zip (List.map Sigma.fst (xs_hd :: xs_tl)) (List.map Sigma.snd (xs_hd :: xs_tl)))
[PROOFSTEP]
case cons xs_hd xs_tl
xs_ih =>
simp only [true_and_iff, Prod.toSigma, eq_self_iff_true, Sigma.eta, List.zip_cons_cons, List.map, List.cons_inj]
exact xs_ih
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs_hd : (_ : α) × α
xs_tl : List ((_ : α) × α)
xs_ih : xs_tl = List.map Prod.toSigma (List.zip (List.map Sigma.fst xs_tl) (List.map Sigma.snd xs_tl))
⊢ xs_hd :: xs_tl =
List.map Prod.toSigma (List.zip (List.map Sigma.fst (xs_hd :: xs_tl)) (List.map Sigma.snd (xs_hd :: xs_tl)))
[PROOFSTEP]
simp only [true_and_iff, Prod.toSigma, eq_self_iff_true, Sigma.eta, List.zip_cons_cons, List.map, List.cons_inj]
[GOAL]
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs_hd : (_ : α) × α
xs_tl : List ((_ : α) × α)
xs_ih : xs_tl = List.map Prod.toSigma (List.zip (List.map Sigma.fst xs_tl) (List.map Sigma.snd xs_tl))
⊢ xs_tl =
List.map (fun p => { fst := p.fst, snd := p.snd })
(List.zipWith Prod.mk (List.map Sigma.fst xs_tl) (List.map Sigma.snd xs_tl))
[PROOFSTEP]
exact xs_ih
[GOAL]
case mapToSelf
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
hperm : List.map Sigma.fst xs ~ List.map Sigma.snd xs
hnodup : List.Nodup (List.map Sigma.snd xs)
xs₀ : List α
h₀ : List.map Sigma.fst xs = xs₀
xs₁ : List α
h₁ : List.map Sigma.snd xs = xs₁
hxs : xs = List.toFinmap' (List.zip xs₀ xs₁)
⊢ Injective (apply (mapToSelf xs hperm hnodup))
[PROOFSTEP]
revert hperm hnodup
[GOAL]
case mapToSelf
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
xs₀ : List α
h₀ : List.map Sigma.fst xs = xs₀
xs₁ : List α
h₁ : List.map Sigma.snd xs = xs₁
hxs : xs = List.toFinmap' (List.zip xs₀ xs₁)
⊢ ∀ (hperm : List.map Sigma.fst xs ~ List.map Sigma.snd xs) (hnodup : List.Nodup (List.map Sigma.snd xs)),
Injective (apply (mapToSelf xs hperm hnodup))
[PROOFSTEP]
rw [hxs]
[GOAL]
case mapToSelf
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
xs₀ : List α
h₀ : List.map Sigma.fst xs = xs₀
xs₁ : List α
h₁ : List.map Sigma.snd xs = xs₁
hxs : xs = List.toFinmap' (List.zip xs₀ xs₁)
⊢ ∀
(hperm :
List.map Sigma.fst (List.toFinmap' (List.zip xs₀ xs₁)) ~ List.map Sigma.snd (List.toFinmap' (List.zip xs₀ xs₁)))
(hnodup : List.Nodup (List.map Sigma.snd (List.toFinmap' (List.zip xs₀ xs₁)))),
Injective (apply (mapToSelf (List.toFinmap' (List.zip xs₀ xs₁)) hperm hnodup))
[PROOFSTEP]
intros hperm hnodup
[GOAL]
case mapToSelf
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
xs₀ : List α
h₀ : List.map Sigma.fst xs = xs₀
xs₁ : List α
h₁ : List.map Sigma.snd xs = xs₁
hxs : xs = List.toFinmap' (List.zip xs₀ xs₁)
hperm : List.map Sigma.fst (List.toFinmap' (List.zip xs₀ xs₁)) ~ List.map Sigma.snd (List.toFinmap' (List.zip xs₀ xs₁))
hnodup : List.Nodup (List.map Sigma.snd (List.toFinmap' (List.zip xs₀ xs₁)))
⊢ Injective (apply (mapToSelf (List.toFinmap' (List.zip xs₀ xs₁)) hperm hnodup))
[PROOFSTEP]
apply InjectiveFunction.applyId_injective
[GOAL]
case mapToSelf.h₀
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
xs₀ : List α
h₀ : List.map Sigma.fst xs = xs₀
xs₁ : List α
h₁ : List.map Sigma.snd xs = xs₁
hxs : xs = List.toFinmap' (List.zip xs₀ xs₁)
hperm : List.map Sigma.fst (List.toFinmap' (List.zip xs₀ xs₁)) ~ List.map Sigma.snd (List.toFinmap' (List.zip xs₀ xs₁))
hnodup : List.Nodup (List.map Sigma.snd (List.toFinmap' (List.zip xs₀ xs₁)))
⊢ List.Nodup xs₀
[PROOFSTEP]
rwa [← h₀, hxs, hperm.nodup_iff]
[GOAL]
case mapToSelf.h₁
α : Type u
β : Type v
γ : Sort w
inst✝ : DecidableEq α
xs : List ((_ : α) × α)
xs₀ : List α
h₀ : List.map Sigma.fst xs = xs₀
xs₁ : List α
h₁ : List.map Sigma.snd xs = xs₁
hxs : xs = List.toFinmap' (List.zip xs₀ xs₁)
hperm : List.map Sigma.fst (List.toFinmap' (List.zip xs₀ xs₁)) ~ List.map Sigma.snd (List.toFinmap' (List.zip xs₀ xs₁))
hnodup : List.Nodup (List.map Sigma.snd (List.toFinmap' (List.zip xs₀ xs₁)))
⊢ xs₀ ~ xs₁
[PROOFSTEP]
rwa [← hxs, h₀, h₁] at hperm
|
import B3_VanillaOptions_MC_BlackScholes as B3MC
import B3_VanillaOptionsinBlackScholesWorld as B3BS
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
def delta_call_BSM(spot,strike,maturity,rate,vol,dividend=None):
if dividend is None:
dividend = 0
d1 = (np.log(spot/strike) + (rate - dividend + 0.5*vol**2)*(maturity))/(vol*np.sqrt(maturity))
Nd1 = norm.cdf(d1)
return np.exp(-dividend*maturity)*Nd1
def _dN(x):
return np.exp((-x**2)/2)/(np.sqrt(2*np.pi))
def gamma_call_BSM(spot,strike,maturity,rate,vol,dividend=None):
if dividend is None:
dividend = 0
d1 = (np.log(spot/strike) + (rate - dividend + 0.5*vol**2)*(maturity))/(vol*np.sqrt(maturity))
dN = _dN(d1)
return (dN/(spot*vol*np.sqrt(maturity)))*np.exp(-dividend*maturity)
def vega_call_BSM(spot,strike,maturity,rate,vol,dividend=None):
if dividend is None:
dividend = 0
d1 = (np.log(spot/strike) + (rate - dividend + 0.5*vol**2)*(maturity))/(vol*np.sqrt(maturity))
dN = _dN(d1)
return np.exp(-dividend*maturity)*spot*np.sqrt(maturity)*dN
def theta_call_BSM(spot,strike,maturity,rate,vol,dividend=None):
if dividend is None:
dividend = 0
d1 = (np.log(spot/strike) + (rate - dividend + 0.5*vol**2)*(maturity))/(vol*np.sqrt(maturity))
d2 = d1 - vol*np.sqrt(maturity)
dN = _dN(d1)
return -np.exp(-dividend*maturity)*spot*dN*vol/(2*np.sqrt(maturity)) + \
dividend*np.exp(-dividend*maturity)*spot*norm.cdf(d1) - \
rate*strike*np.exp(-rate*maturity)*norm.cdf(d2)
def rho_call_BSM(spot,strike,maturity,rate,vol,dividend=None):
if dividend is None:
dividend = 0
d1 = (np.log(spot/strike) + (rate - dividend + 0.5*vol**2)*(maturity))/(vol*np.sqrt(maturity))
d2 = d1 - vol*np.sqrt(maturity)
return strike*maturity*np.exp(-rate*maturity)*norm.cdf(d2)
def delta_call_finite_difference(spot,strike,maturity,rate,vol,dividend=None):
if dividend is None:
dividend = 0
e = 0.0001 ##this works better than using 0.0001*spot
call_up = B3BS.europeanCallOptionPrice(spot + e, strike, maturity, rate, dividend, vol)
call = B3BS.europeanCallOptionPrice(spot, strike, maturity, rate, dividend, vol)
return (call_up-call)/e
def gamma_call_finite_difference(spot,strike,maturity,rate,vol,dividend=None):
if dividend is None:
dividend = 0
e = 0.0001
call_up = B3BS.europeanCallOptionPrice(spot + e, strike, maturity, rate, dividend, vol)
call_dn = B3BS.europeanCallOptionPrice(spot - e, strike, maturity, rate, dividend, vol)
call = B3BS.europeanCallOptionPrice(spot, strike, maturity, rate, dividend, vol)
return (call_up - 2*call + call_dn)/e**2
def vega_call_finite_difference(spot, strike, maturity, rate, vol, dividend=None):
if dividend is None:
dividend = 0
e = 0.0001 ##this works better than using 0.0001*spot
call_up = B3BS.europeanCallOptionPrice(spot , strike, maturity, rate, dividend, vol + e)
call = B3BS.europeanCallOptionPrice(spot, strike, maturity, rate, dividend, vol)
return (call_up-call)/e
def rho_call_finite_difference(spot, strike, maturity, rate, vol, dividend=None):
if dividend is None:
dividend = 0
e = 0.0001 ##this works better than using 0.0001*spot
call_up = B3BS.europeanCallOptionPrice(spot , strike, maturity, rate + e, dividend, vol)
call = B3BS.europeanCallOptionPrice(spot, strike, maturity, rate, dividend, vol)
return (call_up-call)/e
def theta_call_finite_difference(spot, strike, maturity, rate, vol, dividend=None):
if dividend is None:
dividend = 0
e = 0.0001 ##this works better than using 0.0001*spot
call_up = B3BS.europeanCallOptionPrice(spot , strike, maturity + e, rate, dividend, vol)
call = B3BS.europeanCallOptionPrice(spot, strike, maturity, rate, dividend, vol)
return (call_up-call)/e
|
No tolls were collected along SR 878 , in line with the road 's original plans , until MDX 's initial roll @-@ out of open road tolling from late 2009 to mid @-@ 2010 on its road network . Tolling along the Snapper Creek Expressway began on July 17 , 2010 . The move to toll the Snapper Creek Expressway angered local residents , but was tempered by MDX 's move to investigate toll rebates . Initially , tolls were $ 0 @.@ 25 for SunPass users , with a $ 0 @.@ 15 surcharge for motorists using the toll @-@ by @-@ plate system . The toll @-@ by @-@ plate rate increased by ten cents on July 1 , 2013 , to $ 0 @.@ 50 per toll gantry passed , while the SunPass rate was unaffected .
|
module NatOrder
import NatUtils
%default total
%access public export
|||New type for <= on Nat
LEQ : (a : Nat) -> (b : Nat) -> Type
LEQ a b = (k : Nat ** ((a + k) = b))
|||New type for >= on Nat
GEQ : (a : Nat) -> (b : Nat) -> Type
GEQ a b = LEQ b a
|||New type for < on Nat
LNEQ : (a : Nat) -> (b : Nat) -> Type
LNEQ a b = (LEQ a b, Not (a = b))
|||New type for > on Nat
GNEQ : (a : Nat) -> (b : Nat) -> Type
GNEQ a b = LNEQ b a
|||Proof that 0 is the smallest natural number
LEQZero : {n : Nat} -> LEQ Z n
LEQZero {n} = (n ** Refl)
|||Proof that 0 is lesser than every successor
LNEQZeroSucc : {n : Nat} -> LNEQ Z (S n)
LNEQZeroSucc {n} = (((S n) ** Refl), ZIsNotS)
|||Proof that a <= b implies (S a) <= (S b)
LEQSucc : {a : Nat} -> {b : Nat} -> (LEQ a b) -> (LEQ (S a) (S b))
LEQSucc {a} {b} (k ** proofEq) = (k ** cong proofEq)
|||Proof that a < b implies (S a) < (S b)
LNEQSucc : {a : Nat} -> {b : Nat} -> (LNEQ a b) -> (LNEQ (S a) (S b))
LNEQSucc {a} {b} ((k ** proofEq), proofNotEq) = ((k ** cong proofEq), proofNotEqSucc proofNotEq) where
proofNotEqSucc : (Not (a = b)) -> (Not (S a = S b))
proofNotEqSucc proofNotEq proofEq = proofNotEq (predEqual proofEq)
|||Proof that (S a) <= (S b) implies a <= b
LEQPred : {a : Nat} -> {b : Nat} -> (LEQ (S a) (S b)) -> (LEQ a b)
LEQPred {a} {b} (k ** proofEq) = (k ** predEqual proofEq)
|||Proof that (S a) < (S b) implies a < b
LNEQPred : {a : Nat} -> {b : Nat} -> (LNEQ (S a) (S b)) -> (LNEQ a b)
LNEQPred {a} {b} ((k ** proofEq), proofNotEq) = ((k ** predEqual proofEq), proofNotEqPred proofNotEq) where
proofNotEqPred : (Not (S a = S b)) -> (Not (a = b))
proofNotEqPred proofNotEq proofEq = proofNotEq (cong proofEq)
|||Proof that !(a <= b) implies !((S a) <= (S b))
notLEQSucc : {a : Nat} -> {b : Nat} -> (Not (LEQ a b)) -> (Not (LEQ (S a) (S b)))
notLEQSucc {a} {b} proofNotLEQ proofLEQ = proofNotLEQ (LEQPred proofLEQ)
|||Proof that !((S a) <= (S b)) implies !(a <= b)
notLEQPred : {a : Nat} -> {b : Nat} -> (Not (LEQ (S a) (S b))) -> (Not (LEQ a b))
notLEQPred {a} {b} proofNotLEQ proofLEQ = proofNotLEQ (LEQSucc proofLEQ)
|||Proof that !(a < b) implies !((S a) < (S b))
notLNEQSucc : {a : Nat} -> {b : Nat} -> (Not (LNEQ a b)) -> (Not (LNEQ (S a) (S b)))
notLNEQSucc {a} {b} proofNotLNEQ proofLNEQ = proofNotLNEQ (LNEQPred proofLNEQ)
|||Proof that !((S a) < (S b)) implies !(a < b)
notLNEQPred : {a : Nat} -> {b : Nat} -> (Not (LNEQ (S a) (S b))) -> (Not (LNEQ a b))
notLNEQPred {a} {b} proofNotLNEQ proofLNEQ = proofNotLNEQ (LNEQSucc proofLNEQ)
|||Proof that !(a < = b) implies b < a
notLEQImpliesGNEQ : {a : Nat} -> {b : Nat} -> (Not (LEQ a b)) -> (GNEQ a b)
notLEQImpliesGNEQ {a = Z} {b} proofNotLEQ = void (proofNotLEQ (b ** Refl))
notLEQImpliesGNEQ {a = (S n)} {b = Z} _ = ((S n ** Refl), ZIsNotS)
notLEQImpliesGNEQ {a = (S n)} {b = (S m)} proofNotLEQ = (LEQSucc proofGEQ, notEqualSucc proofNotEq) where
proofGEQ = fst (notLEQImpliesGNEQ {a = n} {b = m} (notLEQPred proofNotLEQ))
proofNotEq = snd (notLEQImpliesGNEQ {a = n} {b = m} (notLEQPred proofNotLEQ))
|||Proof that !(a < b) implies b <= a
notLNEQImpliesGEQ : {a : Nat} -> {b : Nat} -> (Not (LNEQ a b)) -> (GEQ a b)
notLNEQImpliesGEQ {a} {b = Z} _ = (a ** Refl)
notLNEQImpliesGEQ {a = Z} {b = (S m)} proofNotLNEQ = void (proofNotLNEQ (((S m) ** Refl), ZIsNotS))
notLNEQImpliesGEQ {a = (S n)} {b = (S m)} proofNotLNEQ = (LEQSucc proofGEQ) where
proofGEQ = notLNEQImpliesGEQ {a = n} {b = m} (notLNEQPred proofNotLNEQ)
|||decides if a <= b
isLEQ : (a : Nat) -> (b : Nat) -> Dec (LEQ a b)
isLEQ Z b = Yes (b ** Refl)
isLEQ (S a) Z = No (\(k ** proofEq) => (SIsNotZ proofEq))
isLEQ (S a) (S b) with (isLEQ a b)
isLEQ (S a) (S b) | (Yes proofLEQ) = Yes (LEQSucc proofLEQ)
isLEQ (S a) (S b) | (No contra) = No (\(k ** proofEq) => (contra (k ** (predEqual proofEq))))
|||Proof that a <= b implies a = b or a < b
leqImpliesEqOrLNEQ : {a : Nat} -> {b : Nat} -> (LEQ a b) -> Either (a = b) (LNEQ a b)
leqImpliesEqOrLNEQ {a} {b} (k ** proofEq) = case k of
Z => Left (rewrite (plusCommutative Z a) in proofEq)
(S n) => Right (((S n) ** proofEq), nonZeroSumNotEqual proofEq SIsNotZ)
|||Proof that all successors are larger than 0
succNotLEQzero : {n : Nat} -> (Not (LEQ (S n) Z))
succNotLEQzero {n} = \(k ** proofEq) => (SIsNotZ proofEq)
|||Proof that LEQ is reflexive
leqRefl : {n : Nat} -> LEQ n n
leqRefl {n} = (Z ** plusZeroRightNeutral n)
|||Proof that (a + k = b) and (b + l = a) implies (k = 0) and (l = 0)
leqAntiSymmetricIndirect : {a : Nat} -> {b : Nat} ->
(proofLEQLeft : (LEQ a b)) -> (proofLEQRight : (LEQ b a)) ->
((fst proofLEQLeft) = Z, (fst proofLEQRight) = Z)
leqAntiSymmetricIndirect {a} {b} (k ** proofEqLeft) (l ** proofEqRight) =
sumZeroImpliesZero (plusLeftCancel a (k + l) Z inductionStep) where
inductionStep = rewrite (plusAssociative a Z (k + l)) in
rewrite (plusZeroRightNeutral a) in
rewrite (plusAssociative a k l) in
trans (cong {f = (\n => n + l)} proofEqLeft) proofEqRight
|||Proof that LEQ is antisymmetric
leqAntiSymmetric : {a : Nat} -> {b : Nat} -> (LEQ a b) -> (LEQ b a) -> (a = b)
leqAntiSymmetric {a} {b} (k ** proofEqLeft) (l ** proofEqRight) =
rewrite (plusCommutative Z a) in
rewrite (sym (fst (leqAntiSymmetricIndirect {a} {b} (k ** proofEqLeft) (l ** proofEqRight)))) in proofEqLeft
|||Proof that LEQ is transitive
leqTransitive : {a : Nat} -> {b : Nat} -> {c : Nat} -> (LEQ a b) -> (LEQ b c) -> (LEQ a c)
leqTransitive {a} {b} {c} (k ** proofEqLeft) (l ** proofEqRight) = ((k + l) ** proofEq) where
proofEq = rewrite (plusAssociative a k l) in
trans (cong {f = (\n => n + l)} proofEqLeft) (proofEqRight)
|||Proof that a <= b implies a <= b + c
leqPlusRight : {a : Nat} -> {b : Nat} -> (c : Nat) -> (LEQ a b) -> (LEQ a (b + c))
leqPlusRight {a} {b} c (k ** proofEq) = ((k + c) ** rewrite (plusAssociative a k c) in (cong {f = (\n => (n + c))} proofEq))
|||Proof that a + c <= b implies a <= b
ltePlusLeft : {a : Nat} -> {b : Nat} -> {c : Nat} -> (LEQ (a + c) b) -> (LEQ a b)
ltePlusLeft {a} {b} {c} (k ** proofEq) = ((c + k) ** rewrite (plusAssociative a c k) in proofEq)
|||Proof that a <= b implies (c + a) <= (c + b)
leqPlusConstantLeft : {a : Nat} -> {b : Nat} -> (c : Nat) -> (LEQ a b) -> (LEQ (c + a) (c + b))
leqPlusConstantLeft {a} {b} c (k ** proofEq) = (k ** proofFinalEq) where
proofFinalEq = rewrite (sym (plusAssociative c a k)) in (cong {f = (\n => c + n)} proofEq)
|||Proof that a <= b implies (a + c) <= (b + c)
leqPlusConstantRight : {a : Nat} -> {b : Nat} -> (c : Nat) -> (LEQ a b) -> (LEQ (a + c) (b + c))
leqPlusConstantRight {a} {b} c proofLEQ = rewrite (plusCommutative a c) in
rewrite (plusCommutative b c) in
(leqPlusConstantLeft c proofLEQ)
|||Proof that (c + a) <= (c + b) implies a <= b
leqMinusConstantLeft : {a : Nat} -> {b : Nat} -> {c : Nat} -> (LEQ (c + a) (c + b)) -> (LEQ a b)
leqMinusConstantLeft {a} {b} {c} (k ** proofEq) = (k ** proofFinalEq) where
proofFinalEq = (plusLeftCancel c (a + k) b (rewrite (plusAssociative c a k) in proofEq))
|||Proof that (a + c) <= (b + c) implies a <= b
leqMinusConstantRight : {a : Nat} -> {b : Nat} -> {c : Nat} -> (LEQ (a + c) (b + c)) -> (LEQ a b)
leqMinusConstantRight {a} {b} {c} proofLEQ = leqMinusConstantLeft {a} {b} {c} proofFinalEq where
proofFinalEq = rewrite (plusCommutative c a) in
rewrite (plusCommutative c b) in proofLEQ
|||Proof that if a <= b, and c <= d, then (a + c) <= (b + d)
leqPlusIsLEQ : {a : Nat} -> {b : Nat} -> {c : Nat} -> {d : Nat} ->
(LEQ a b) -> (LEQ c d) -> (LEQ (a + c) (b + d))
leqPlusIsLEQ {a = a0} {b = b0} {c = c0} {d = d0} proofLeftLEQ proofRightLEQ =
leqTransitive (leqPlusConstantRight {a = a0} {b = b0} c0 proofLeftLEQ) (leqPlusConstantLeft {a = c0} {b = d0} b0 proofRightLEQ)
|||Proof that a <= b implies (c * a) <= (c * b)
leqMultConstantLeft : {a : Nat} -> {b : Nat} -> (c : Nat) -> (LEQ a b) -> (LEQ (c * a) (c * b))
leqMultConstantLeft {a} {b} c (k ** proofEq) = ((c * k) ** proofFinalEq) where
proofFinalEq = rewrite (sym (multDistributesOverPlusRight c a k)) in
cong {f = (\n => c * n)} proofEq
|||Proof that a <= b implies (a * c) <= (b * c)
leqMultConstantRight : {a : Nat} -> {b : Nat} -> (c : Nat) -> (LEQ a b) -> (LEQ (a * c) (b * c))
leqMultConstantRight {a} {b} c (k ** proofEq) = ((k * c) ** proofFinalEq) where
proofFinalEq = rewrite (sym (multDistributesOverPlusLeft a k c)) in
cong {f = (\n => n * c)} proofEq
|||Proof that if a <= b, and c <= d, then (a * c) <= (b * d)
leqMultIsLEQ : {a : Nat} -> {b : Nat} -> {c : Nat} -> {d : Nat} ->
(LEQ a b) -> (LEQ c d) -> (LEQ (a * c) (b * d))
leqMultIsLEQ {a = a0} {b = b0} {c = c0} {d = d0} proofLeftLEQ proofRightLEQ =
leqTransitive (leqMultConstantRight {a = a0} {b = b0} c0 proofLeftLEQ) (leqMultConstantLeft {a = c0} {b = d0} b0 proofRightLEQ)
|||Proof that (c * a) <= (c * b) and c != 0 implies a <= b
leqDivConstantLeft : {a : Nat} -> {b : Nat} -> {c : Nat} -> (Not (c = Z)) ->
(LEQ (c * a) (c * b)) -> (LEQ a b)
--To be proved
|||Proof that (a * c) <= (b * c) and c != 0 implies a <= b
leqDivConstantRight : {a : Nat} -> {b : Nat} -> {c : Nat} -> (Not (c = Z)) ->
(LEQ (a * c) (b * c)) -> (LEQ a b)
--To be proved
------------------------------------------------------------------------------------------------------
|||Convert from LEQ to LTE
leqToLTE : {a : Nat} -> {b : Nat} -> (LEQ a b) -> (LTE a b)
leqToLTE {a = Z} {b} _ = LTEZero
leqToLTE {a = S m} {b = Z} proofLEQ = void(succNotLEQzero proofLEQ)
leqToLTE {a = S m} {b = S n} (k ** proofEq) = LTESucc (leqToLTE {a = m} {b = n} (k ** predEqual proofEq))
|||Convert from LTE to LEQ
lteToLEQ : {a : Nat} -> {b : Nat} -> (LTE a b) -> (LEQ a b)
lteToLEQ {a = Z} {b} _ = LEQZero
lteToLEQ {a = S m} {b = Z} proofLTE = void(succNotLTEzero proofLTE)
lteToLEQ {a = S m} {b = S n} (LTESucc proofLTE) = LEQSucc (lteToLEQ {a = m} {b = n} proofLTE)
-------------------------------------------------------------------------------------------------------
|
function distmsr=dtiFrenetDistance(curves1, curves2, samegroupflag)
%Computes distance between curves using frenet framework: curve matching
%method from Bakircioglu et al., HBM 6:329-333 (1998)
%Usage:
%dtiFrenetDistances(fibergroup1, fibergroup2, npoints)
%dtiFrenetDistances(fibergroup, npoints)
%ER 2008 04/2008 SCSNL
%1. Checks
if ~((samegroupflag==0)||(samegroupflag==1))
display('Same group flag should be 1 or zero');
return;
end
if ~isequal(size(curves1,2), size(curves2,2))
display('Curves must be resampled to the same number of nodes');
return
end
if (samegroupflag==1)&&~isequal(size(curves1,3), size(curves2,3))
display('Are you sure your curves1 and curves2 are equivalent? curves2 will be ignored');
end
nfibers1=size(curves1, 3);
%actually if same group flag == 1 then curves2 is ignored
%2. Compute frenet representations for the curves
for i=1:nfibers1
[T(:, :, i),N(:, :, i),B(:, :,i ),v1(:, i),k1(:, i),t1(:, i)] = frenetAll(curves1(1, :, i),curves1(2, :, i),curves1(3, :, i));
end
if samegroupflag==0
nfibers2=size(curves2, 3);
for i=1:nfibers2
[T(:, :, i),N(:, :, i),B(:, :,i ),v2(:, i),k2(:, i),t2(:, i)] = frenetAll(curves2(1, :, i),curves2(2, :, i),curves2(3, :, i));
end
else
nfibers2=size(curves1, 3);
end
distmsr=zeros(nfibers1, nfibers2);
%CASE WHERE THE GROUPS OF FIBERS ARE NOT EQUIVALENT
if (samegroupflag==0)
display('2 different fiber groups');
for i=1:nfibers1
for j=1:nfibers2
distmsr(i, j)=sum(3.*(k1(:, i).*v1(:, i)-k2(:, j).*v2(:, j)).^2+(t1(:, i).*v1(:, i)-t2(:, j).*v2(:, j)).^2);
end
end
elseif (samegroupflag==1)
display('2 equivalent fiber groups');
%A SHORTCUT-CASE, where the two fibergroups supplied are the same.
for i=1:nfibers1
for j=i:nfibers2
distmsr(i, j)=sum(3.*(k1(:, i).*v1(:, i)-k1(:, j).*v1(:, j)).^2+(t1(:, i).*v1(:, i)-t1(:, j).*v1(:, j)).^2);
end
end
distmsr=distmsr+distmsr'- diag(diag(distmsr)); %To make a full matrix
end
|
lemma ksimplex_0: "n = 0 \<Longrightarrow> s = {(\<lambda>x. p)}"
|
#'@title calcKristCaa
#'
#'@description Calculate air resistance (\code{Caa}) (dimensionless) using the
#'Kristensen method.
#'
#'@param shipType Ship type (vector of strings, see \code{\link{calcShipType}}).
#'Must align with \code{tankerBulkCarrierGCargoShipTypes} and
#' \code{containerShipTypes} groupings
#'@param dwt Ship maximum deadweight tonnage (vector of numericals, tonnage)
#'@param tankerBulkCarrierGCargoShipTypes Ship types specified in input
#'\code{shipTypes} to be modeled as tankers, bulk carriers and general cargo
#'vessels (vector of strings)
#'@param containerShipTypes Ship types specified in input \code{shipTypes} to be
#'modeled as container ships (vector of strings)
#'
#' @details
#' Models the effect of realistic hull roughness on resistance, which is not
#' captured in the frictional and residual resistance coefficients from tank
#' towing operations.
#'
#' This method this requires ship types to be grouped. Use the
#' \code{tankerBulkCarrierGCargoShipTypes}, \code{containerShipTypes} grouping
#' parameters to provide these ship type groupings. Any ship types not included
#' in these groupings will be considered as miscellaneous vessels.
#'
#' NOTE: within the container ship section of this calculation, estimations are
#' made of the ships TEU:\itemize{
#' \item Feeder: TEU = ((dwt/15.19)^(1/0.9814))
#' \item Panamax: TEU = ((dwt/28.81)^(1/0.902))
#' \item PostPanamax: TEU = ((dwt/37)^(1/0.875))
#' }
#' These come from: H.O. Kristensen (2016), "Revision of statistical analysis and determination of
#' regression formulas for main dimensions of container ships based on data from Clarkson".
#' Kristensen's SHIP DESMO model also uses another dwt/teu relation for post-panamax container ships
#' with breadth > 49m. This equation has no inverse to map dwt to TEU and thus all post panamaxs are
#' assumed to have the same dwt/teu relation for all breadths, described above.
#'
#' @return \code{Caa} (vector of numericals, dimensionless)
#'
#' @references
#'Kristensen, H. O. and Lutzen, M. 2013. "Prediction of Resistance and Propulsion
#'Power of Ships."
#'
#'Kristensen, H. O. 2016. "Revision of statistical analysis and determination of
#'regression formulas for main dimensions of container ships based on data from
#'Clarkson."
#'
#'\href{https://gitlab.gbar.dtu.dk/oceanwave3d/Ship-Desmo}{Kristensen, H. O.
#'"Ship-Desmo-Tool." https://gitlab.gbar.dtu.dk/oceanwave3d/Ship-Desmo}
#'
#'@family Kristensen Calculations
#'@family Resistance Calculations
#'
#' @examples
#' calcKristCaa(c("bulk.carrier","container.ship","other.tanker"),c(70000,191144,20000))
#' calcKristCaa(c("bulk.carrier","container.ship","other.tanker"),c(70000,191144,20000),
#' tankerBulkCarrierGCargoShipTypes=c("bulk.carrier","other.tanker"))
#'
#' @export
calcKristCaa<-function(shipType,dwt,
tankerBulkCarrierGCargoShipTypes=c("tanker","general.cargo","chemical.tanker","liquified.gas.tanker","oil.tanker","other.tanker","bulk.carrier"),
containerShipTypes=c("container.ship")
){
Caa<- ifelse(shipType%in%tankerBulkCarrierGCargoShipTypes,
ifelse(#case 1:"Small","Handysize","Handymax",
dwt <= 55000,0.07,
ifelse(#case 2:"Panamax","Aframax","Suezmax"
dwt <= 200000,0.05,
#case 3: "VLCC","VLBC"
0.04
)#end case 3
)# end case 2
# end case 1
#end non container ship group
,ifelse(shipType %in% containerShipTypes,#start container ship group
ifelse( #case 1 Feeder
dwt <= 35000, pmax(0.28*((dwt/15.19)^(1/0.9814))^-0.126,0.09),
ifelse(#case 2 Panamax
dwt <= 60000, pmax(0.28*((dwt/28.81)^(1/0.902))^-0.126,0.09),
pmax(0.28*((dwt/37)^(1/0.875))^-0.126,0.09) #PostPanamax
)# end case 2
),
NA# end case 1
#end container ship group
)
)
return(Caa/1000)
}
|
!========================================================================
!
! T o m o f a s t - x
! -----------------------
!
! Authors: Vitaliy Ogarko, Jeremie Giraud, Roland Martin.
!
! (c) 2021 The University of Western Australia.
!
! The full text of the license is available in file "LICENSE".
!
!========================================================================
!==========================================================================
! A class to work with 3D vectors.
!
! Vitaliy Ogarko, UWA, CET, Australia, 2015.
!==========================================================================
module vector
implicit none
private
public :: operator(+)
public :: operator(-)
public :: operator(*)
public :: assignment(=)
integer, parameter :: CUSTOM_REAL = 8
!-----------------------------------------------
! A main class (floating point vectors).
type, public :: t_vector
real(kind=CUSTOM_REAL) :: x, y, z
contains
private
procedure, public, pass :: cross_product => vector_cross_product
procedure, public, pass :: dot_product => vector_dot_product
procedure, public, pass :: get_norm => vector_get_norm
end type t_vector
interface t_vector
module procedure vector_constructor
end interface t_vector
interface operator(+)
module procedure vector_add
end interface
interface operator(-)
module procedure vector_subtract
end interface
interface operator(*)
module procedure vector_mult
end interface
interface assignment(=)
module procedure vector_assign
end interface
!-----------------------------------
! A class for integer vectors.
type, public :: t_ivector
integer :: x, y, z
end type t_ivector
interface t_ivector
module procedure ivector_constructor
end interface t_ivector
contains
!=======================================================================
! Constructor for t_ivector type.
!=======================================================================
function ivector_constructor(i, j, k) result(res)
integer, intent(in) :: i, j, k
type(t_ivector) :: res
res%x = i
res%y = j
res%z = k
end function ivector_constructor
!=======================================================================
! Constructor for t_vector type.
!=======================================================================
function vector_constructor(x, y, z) result(res)
real(kind=CUSTOM_REAL) :: x, y, z
type(t_vector) :: res
res%x = x
res%y = y
res%z = z
end function vector_constructor
!=======================================================================
! Returns cross-product between vectors.
!=======================================================================
pure function vector_cross_product(this, vec) result(res)
class(t_vector), intent(in) :: this
type(t_vector), intent(in) :: vec
type(t_vector) :: res
res%x = this%y * vec%z - this%z * vec%y
res%y = this%z * vec%x - this%x * vec%z
res%z = this%x * vec%y - this%y * vec%x
end function vector_cross_product
!=======================================================================
! Returns dot-product between vectors.
!=======================================================================
pure function vector_dot_product(this, vec) result(res)
class(t_vector), intent(in) :: this
type(t_vector), intent(in) :: vec
real(kind=CUSTOM_REAL) :: res
res = this%x * vec%x + this%y * vec%y + this%z * vec%z
end function vector_dot_product
!=======================================================================
! Returns the norm of a vector.
!=======================================================================
pure function vector_get_norm(this) result(res)
class(t_vector), intent(in) :: this
real(kind=CUSTOM_REAL) :: res
res = sqrt(this%x**2 + this%y**2 + this%z**2)
end function vector_get_norm
!=======================================================================
! Returns the sum of two vectors.
!=======================================================================
pure function vector_add(v1, v2) result(res)
type(t_vector), intent(in) :: v1, v2
type(t_vector) :: res
res%x = v1%x + v2%x
res%y = v1%y + v2%y
res%z = v1%z + v2%z
end function vector_add
!=======================================================================
! Returns the subtraction of two vectors.
!=======================================================================
pure function vector_subtract(v1, v2) result(res)
type(t_vector), intent(in) :: v1, v2
type(t_vector) :: res
res%x = v1%x - v2%x
res%y = v1%y - v2%y
res%z = v1%z - v2%z
end function vector_subtract
!=======================================================================
! Assign a vector to a scalar.
!=======================================================================
subroutine vector_assign(lhs, rhs)
type(t_vector), intent(out) :: lhs
real(kind=CUSTOM_REAL), intent(in) :: rhs
lhs%x = rhs
lhs%y = rhs
lhs%z = rhs
end subroutine vector_assign
!=======================================================================
! Returns a scalar times vector.
!=======================================================================
pure function vector_mult(const, vec) result(res)
real(kind=CUSTOM_REAL), intent(in) :: const
type(t_vector), intent(in) :: vec
type(t_vector) :: res
res%x = const * vec%x
res%y = const * vec%y
res%z = const * vec%z
end function vector_mult
end module vector
|
Formal statement is: lemma connected_component_trans: "connected_component S x y \<Longrightarrow> connected_component S y z \<Longrightarrow> connected_component S x z" Informal statement is: If $x$ and $y$ are in the same connected component of $S$, and $y$ and $z$ are in the same connected component of $S$, then $x$ and $z$ are in the same connected component of $S$.
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: work_lda *)
(* prefix:
lda_c_ml1_params *params;
assert(p->params != NULL);
params = (lda_c_ml1_params * )(p->params);
*)
C := 6.187335:
b := [2.763169, 1.757515, 1.741397, 0.568985, 1.572202, 1.885389]:
malpha := z -> params_a_fc*((1 + z)^params_a_q + (1 - z)^params_a_q):
mbeta := z -> (1 + z)^(1/3)*(1 - z)^(1/3)/((1 + z)^(1/3) + (1 - z)^(1/3)):
k := (rs, z) -> C*malpha(z)*mbeta(z)*RS_FACTOR/rs:
Q := (rs, z) ->
- b[1]/(1 + b[2]*k(rs, z))
+ b[3]*log(1 + b[4]/k(rs, z))/k(rs, z)
+ b[5]/k(rs, z)
- b[6]/k(rs, z)^2:
f := (rs, zeta) -> 1/2*(RS_FACTOR/rs)^3 * (1 - zeta^2)/4 * Q(rs, zeta):
|
module Test.HelloWorld
import Test.Assertions
import System
import HelloWorld
collect : {default 0 acc : Nat} -> (List (IO Bool)) -> IO Nat
collect {acc} [] = pure acc
collect {acc} (test :: tests) = do
bool <- test
case bool of
True => collect {acc=acc} tests
False => collect {acc=S acc} tests
export
runTests : IO ()
runTests = do
count <- collect
[ assertEquals hello "Hello, World!"
, assertEquals version "1.0.0"
]
case count of
Z => exitSuccess
_ => exitFailure
|
#' Parse and examine further GBIF occurrence issues on a dataset.
#'
#' @export
#'
#' @param .data Output from a call to [occ_search()], [occ_data()], or
#' [occ_download_import()], but only if `return="all"`, or `return="data"`,
#' otherwise function stops with error. The data from `occ_download_import`
#' is just a regular data.frame so you can pass in a data.frame to this
#' function, but if it doesn't have certain columns it will fail.
#' @param ... Named parameters to only get back (e.g. cdround), or to
#' remove (e.g. -cdround).
#' @param mutate (character) One of:
#' - `split` Split issues into new columns.
#' - `expand` Expand issue abbreviated codes into descriptive names.
#' for downloads datasets, this is not super useful since the
#' issues come to you as expanded already.
#' - `split_expand` Split into new columns, and expand issue names.
#'
#' For split and split_expand, values in cells become y ("yes") or n ("no")
#'
#' @references
#' <http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/OccurrenceIssue.html>
#'
#' @details See also the vignette **Cleaning data using GBIF issues**
#'
#' Note that you can also query based on issues, e.g.,
#' `occ_search(taxonKey=1, issue='DEPTH_UNLIKELY')`. However, I imagine
#' it's more likely that you want to search for occurrences based on a
#' taxonomic name, or geographic area, not based on issues, so it makes sense
#' to pull data down, then clean as needed using this function.
#'
#' This function only affects the `data` element in the `gbif` class that is
#' returned from a call to [occ_search()]. Maybe in a future version
#' we will remove the associated records from the `hierarchy` and `media`
#' elements as they are removed from the `data` element.
#'
#' You'll notice that we sort columns to make it easier to glimpse the important
#' parts of your data, namely taxonomic name, taxon key, latitude and longitude,
#' and the issues. The columns are unchanged otherwise.
#'
#' @examples \dontrun{
#' # what do issues mean, can print whole table
#' head(gbif_issues())
#' # or just occurrence related issues
#' gbif_issues()[which(gbif_issues()$type %in% c("occurrence")),]
#' # or search for matches
#' iss <- c('cdround','cudc','gass84','txmathi')
#' gbif_issues()[ gbif_issues()$code %in% iss, ]
#'
#' # compare out data to after occ_issues use
#' (out <- occ_search(limit=100))
#' out %>% occ_issues(cdround)
#'
#' # occ_data
#' (out <- occ_data(limit=100))
#' out %>% occ_issues(cdround)
#'
#' # Parsing output by issue
#' (res <- occ_data(
#' geometry='POLYGON((30.1 10.1,40 40,20 40,10 20,30.1 10.1))',
#' limit = 600))
#'
#' ## or parse issues in various ways
#' ### include only rows with cdround issue
#' gg <- res %>% occ_issues(cdround)
#' NROW(res$data)
#' NROW(gg$data)
#' head(res$data)[,c(1:5)]
#' head(gg$data)[,c(1:5)]
#'
#' ### remove data rows with certain issue classes
#' res %>% occ_issues(-cdround, -cudc)
#'
#' ### split issues into separate columns
#' res %>% occ_issues(mutate = "split")
#' res %>% occ_issues(-cudc, -mdatunl, mutate = "split")
#' res %>% occ_issues(gass84, mutate = "split")
#'
#' ### expand issues to more descriptive names
#' res %>% occ_issues(mutate = "expand")
#'
#' ### split and expand
#' res %>% occ_issues(mutate = "split_expand")
#'
#' ### split, expand, and remove an issue class
#' res %>% occ_issues(-cdround, mutate = "split_expand")
#'
#' ## Or you can use occ_issues without %>%
#' occ_issues(res, -cdround, mutate = "split_expand")
#'
#' # from GBIF downloaded data via occ_download_* functions
#' res <- occ_download_get(key="0000066-140928181241064", overwrite=TRUE)
#' x <- occ_download_import(res)
#' occ_issues(x, -txmathi)
#' occ_issues(x, txmathi)
#' occ_issues(x, gass84)
#' occ_issues(x, zerocd)
#' occ_issues(x, gass84, txmathi)
#' occ_issues(x, mutate = "split")
#' occ_issues(x, -gass84, mutate = "split")
#' occ_issues(x, mutate = "expand")
#' occ_issues(x, mutate = "split_expand")
#'
#' # occ_search/occ_data with many inputs - give slightly different output
#' # format than normal 2482598, 2498387
#' xyz <- occ_data(taxonKey = c(9362842, 2492483, 2435099), limit = 300)
#' xyz
#' length(xyz) # length 3
#' names(xyz) # matches taxonKey values passed in
#' occ_issues(xyz, -gass84)
#' occ_issues(xyz, -cdround)
#' occ_issues(xyz, -cdround, -gass84)
#' }
occ_issues <- function(.data, ..., mutate = NULL) {
assert(.data, c("gbif", "gbif_data", "data.frame", "tbl_df"))
check_issues(type = "occurrence", ...)
handle_issues(.data, is_occ = TRUE, ..., mutate = mutate)
}
|
State Before: F : Type ?u.33624
ι : Type ?u.33627
α : Type u_1
β : Type ?u.33633
inst✝ : DivisionRing α
n : ℤ
⊢ ↑(↑n)⁻¹ = (↑n)⁻¹ State After: case ofNat
F : Type ?u.33624
ι : Type ?u.33627
α : Type u_1
β : Type ?u.33633
inst✝ : DivisionRing α
n : ℕ
⊢ ↑(↑(Int.ofNat n))⁻¹ = (↑(Int.ofNat n))⁻¹
case negSucc
F : Type ?u.33624
ι : Type ?u.33627
α : Type u_1
β : Type ?u.33633
inst✝ : DivisionRing α
n : ℕ
⊢ ↑(↑(Int.negSucc n))⁻¹ = (↑(Int.negSucc n))⁻¹ Tactic: cases' n with n n State Before: case ofNat
F : Type ?u.33624
ι : Type ?u.33627
α : Type u_1
β : Type ?u.33633
inst✝ : DivisionRing α
n : ℕ
⊢ ↑(↑(Int.ofNat n))⁻¹ = (↑(Int.ofNat n))⁻¹ State After: no goals Tactic: simp [ofInt_eq_cast, cast_inv_nat] State Before: case negSucc
F : Type ?u.33624
ι : Type ?u.33627
α : Type u_1
β : Type ?u.33633
inst✝ : DivisionRing α
n : ℕ
⊢ ↑(↑(Int.negSucc n))⁻¹ = (↑(Int.negSucc n))⁻¹ State After: no goals Tactic: simp only [ofInt_eq_cast, Int.cast_negSucc, ← Nat.cast_succ, cast_neg, inv_neg, cast_inv_nat]
|
# ***Introduction to Radar Using Python and MATLAB***
## Andy Harrison - Copyright (C) 2019 Artech House
<br/>
# Rounded Nose Cone Radar Cross Section
***
Referring to Section 7.4.1.6, for cones with rounded nose tips, as shown in Figure 7.12, the physical optics approximation with axial incidence is given by (Equation 7.55)
$$
\sigma = \pi b^2\Bigg[ 1 - \frac{\sin\big(2kb(1-\sin\alpha)\big)}{kb\cos^2\alpha} + \frac{1 + \cos^4\alpha}{4(kb)^2\cos^4\alpha} - \frac{\cos\big(2kb(1-\sin\alpha)\big)}{2(kb^2)\cos^2\alpha} \Bigg] \hspace{0.5in} \text{(m}^2\text{)},
$$
where $b$ is the radius of the rounded nose tip. For incident angles other than axial, but less than the cone angle ($\theta_i \le \alpha$), (Equation 7.56)
$$
\sigma = \pi b^2 \frac{1 + \theta_i^2}{4(kb)^2} \Big[A_1 + A_2 \cos\big(2k\cos\theta_i (1 - \sin\alpha)\big) + A_3\sin\big(2k\cos\theta_i(1-\sin\alpha)\big)\Big]\hspace{0.25in} \text{(m}^2\text{)},
$$
where
\begin{align}
A_1 = \; &2 + 2\alpha^2 - 2\theta_i^2 + \alpha^4 - \alpha^2\theta_i^2 + 0.5\theta_i^4 + 2\alpha^4\theta_i^2 + 4(kb)^2 - 2(kb)^2\theta_i^2 \nonumber \\[5pt]
&- 8(kb)^3\theta_i^2 + (kb)^2\theta_i^4 + 6(kb)^2\alpha^2\theta_i^2 + 8(kb)^3\theta_i^4 + 13(kb)^4\theta_i^4, \\ \nonumber \\
A_2 = &-2 -2\alpha^2 + 2\theta_i^2 + \alpha^2\theta_i^2 - 0.5\theta_i^4 - 6(kb)^2\theta_i^2 + 8(kb)^4\theta_i^3 + 3(kb)^2\theta_i^4, \\ \nonumber \\
A_3 = &-4\big(1 + \alpha^2 - 0.5\theta_i^2 + 3(kb\theta_i)^2\big)\big(kb - kb\theta_i^2 - (kb\theta_i)^2\big) - 4(kb\theta_i)^3.\nonumber
\end{align}
***
Begin by getting the library path
```python
import lib_path
```
Set the operating frequency (Hz), the cone half angle (radians), and the nose radius (m)
```python
from numpy import radians
frequency = 1e9
cone_half_angle = radians(20.0)
nose_radius = 1.4
```
Set the incident angles using the `linspace` routine from `scipy`
```python
from numpy import linspace
incident_angle = linspace(0, cone_half_angle, 1801)
```
Calculate the radar cross section (m^2) for the rounded nose cone
```python
from Libs.rcs.rounded_nose_cone import radar_cross_section
from numpy import array
rcs = array([radar_cross_section(frequency, cone_half_angle, nose_radius, ia) for ia in incident_angle])
```
Display the radar cross section (m^2) for the rounded nose cone using the routines from `matplotlib`
```python
from matplotlib import pyplot as plt
from numpy import log10, degrees
# Set the figure size
plt.rcParams["figure.figsize"] = (15, 10)
# Display the results
plt.plot(degrees(incident_angle), 10.0 * log10(abs(rcs)), '')
# Set the plot title and labels
plt.title('RCS vs Incident Angle', size=14)
plt.ylabel('RCS (dBsm)', size=12)
plt.xlabel('Incident Angle (deg)', size=12)
# Set the tick label size
plt.tick_params(labelsize=12)
# Turn on the grid
plt.grid(linestyle=':', linewidth=0.5)
```
```python
```
|
[STATEMENT]
lemma matrix_to_iarray_nth:
"matrix_to_iarray A !! to_nat i !! to_nat j = A $ i $ j"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. matrix_to_iarray A !! mod_type_class.to_nat i !! mod_type_class.to_nat j = A $ i $ j
[PROOF STEP]
unfolding matrix_to_iarray_def o_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. IArray (map (\<lambda>x. vec_to_iarray (A $ mod_type_class.from_nat x)) [0..<CARD('c)]) !! mod_type_class.to_nat i !! mod_type_class.to_nat j = A $ i $ j
[PROOF STEP]
using vec_to_iarray_nth'
[PROOF STATE]
proof (prove)
using this:
vec_to_iarray ?A !! mod_type_class.to_nat ?i = ?A $ ?i
goal (1 subgoal):
1. IArray (map (\<lambda>x. vec_to_iarray (A $ mod_type_class.from_nat x)) [0..<CARD('c)]) !! mod_type_class.to_nat i !! mod_type_class.to_nat j = A $ i $ j
[PROOF STEP]
by auto
|
import tactic
import data.set.basic
/--
An ideal of R consists of a nonempty subset of R which is closed under addition, additive inverses,
and multiplication by elements of R.
-/
@[nolint has_inhabited_instance]
structure myideal (R : Type) [comm_ring R] :=
(iset : set R)
(not_empty : iset.nonempty)
(r_mul_mem' {x r}: x ∈ iset → r * x ∈ iset)
(add_mem' {x y} : x ∈ iset → y ∈ iset → (x + y) ∈ iset)
(neg_mem' {x} : x ∈ iset → -x ∈ iset)
attribute [ext] myideal
namespace myideal
variables {R : Type} [comm_ring R] (I : myideal R)
instance : has_mem R (myideal R) :=
{ mem := λ x i , x ∈ i.iset}
instance : has_coe (myideal R) (set R) :=
{coe := λ x, x.iset}
instance : has_subset (myideal R) :=
{ subset := λ x y, x.iset ⊆ y.iset }
theorem add_mem {x y : R}: x ∈ I → y ∈ I → x + y ∈ I := by apply add_mem'
theorem neg_mem {x : R} : x ∈ I → -x ∈ I := by apply neg_mem'
theorem r_mul_mem {x r : R} : x ∈ I → r * x ∈ I := by apply r_mul_mem'
end myideal
variables {R : Type} [comm_ring R]
/--
An integral domain has no zero divisors.
-/
def is_integral_domain (R: Type) [comm_ring R]: Prop :=
∀ (x y : R), x * y = 0 → x = 0 ∨ y = 0
/--
A principal ideal is of the form aR for some a ∈ R
-/
def principal_ideal (x : R) : myideal R :=
{
iset := { i : R | ∃(v:R), i = x * v},
not_empty := begin
rw set.nonempty_def,
use x,
use 1,
rw mul_one,
end,
r_mul_mem' := begin
intro i,
intro j,
intro h,
cases h,
use (j * h_w),
rw h_h,
ring,
end,
add_mem' := begin
intros i j hi hj,
cases hi,
cases hj,
use hi_w + hj_w,
rw mul_add,
rw hi_h, rw hj_h,
end,
neg_mem' := begin
intros i hi,
cases hi,
use -hi_w,
rw hi_h,
ring,
end
}
/--
An integral domain is a PID iff every ideal is principal.
-/
def is_pid (R: Type) [comm_ring R]: Prop :=
is_integral_domain R ∧ ∀(I : myideal R), ∃ (x : R), I = principal_ideal x
--1 ∈ I → I = R
lemma one_mem_ideal_R {I : myideal R} : (1:R) ∈ I → coe I = {x : R | true} :=
begin
intro h,
ext,
split,
intro, triv,
intro h2,
rw ←mul_one x,
apply myideal.r_mul_mem,
exact h,
end
lemma zero_mem_ideal {I : myideal R} : (0:R) ∈ I :=
begin
have h := myideal.not_empty I,
rw set.nonempty at h,
cases h with x hx,
have h2 : x + (-x) = 0,
ring,
rw ←h2,
apply myideal.add_mem I,
exact hx,
apply myideal.neg_mem I,
exact hx,
end
/--
The sum of two ideals is also an ideal.
-/
def sum_ideal (I J : myideal R) : myideal R :=
{ iset := {r : R | ∃ (i ∈ I) (j ∈ J), r = i + j},
not_empty := begin
have h1 := myideal.not_empty I,
have h2 := myideal.not_empty J,
rw set.nonempty at h1 h2 ⊢,
cases h1,
cases h2,
use h1_w + h2_w,
use h1_w, split, exact h1_h,
use h2_w, split, exact h2_h,
refl,
end,
r_mul_mem' := begin
intros x r h,
cases h with i h2,
cases h2 with hi h2,
cases h2 with j h2,
cases h2 with hj h2,
rw h2,
rw mul_add,
use r * i,
split,
apply myideal.r_mul_mem,
exact hi,
use r * j,
split,
apply myideal.r_mul_mem,
exact hj,
refl,
end,
add_mem' := begin
intros x y hxm hym,
cases hxm with xi hxi,
cases hxi with hxi h2,
cases h2 with xj hxj,
cases hxj with hxj hx,
cases hym with yi hyi,
cases hyi with hyi h2,
cases h2 with yj hyj,
cases hyj with hyj hy,
use xi + yi,
split,
apply myideal.add_mem, exact hxi, exact hyi,
use xj + yj,
split,
apply myideal.add_mem, exact hxj, exact hyj,
rw hy,
rw hx,
ring,
end,
neg_mem' := begin
intros x h,
cases h with i hi,
cases hi with hi h2,
cases h2 with j h2,
cases h2 with hj h2,
use -i,
split,
apply myideal.neg_mem,
exact hi,
use -j,
split,
apply myideal.neg_mem,
exact hj,
rw h2, ring,
end
}
notation I ` + ` J := sum_ideal I J
/--
An element r of R is irreducible iff it is not a unit and
for all factorisations x * y = r, x or y is a unit.
-/
def irreducible (r : R) : Prop :=
¬is_unit r ∧ ∀(x y : R), x * y = r → is_unit x ∨ is_unit y
lemma r_prod_unit_r_unit (r a : R) (hpu : is_unit (r * a)) :
is_unit r :=
begin
have h2:= is_unit.exists_right_inv hpu,
cases h2,
rw is_unit_iff_exists_inv',
use a * h2_w, rw mul_comm, rw ← mul_assoc, exact h2_h,
end
lemma unit_mul_irr_is_irr (r a : R) (hirr :irreducible r) (hu: is_unit a) :
irreducible (a * r) :=
begin
split,
{
by_contra,
cases hirr,
apply hirr_left,
apply r_prod_unit_r_unit r a,
rw mul_comm,
exact h
},
{
intros x y h,
have h2 : ∃ (b : R), r = (b * x) * y,
rw is_unit_iff_exists_inv at hu,
cases hu with c h3,
use c, rw mul_assoc, rw h,
ring_nf, rw mul_assoc, rw h3, rw mul_one,
cases hirr,
cases h2 with b hb,
specialize hirr_right (b * x) y,
rw eq_comm at hb,
have h3 := hirr_right hb,
cases h3,
left,
rw mul_comm at h3,
apply r_prod_unit_r_unit x b,
exact h3,
right,
exact h3,
}
end
/--
For some a and b, a divides b iff there is a c ∈ R such that a * c = b
-/
def divisible (a b: R) : Prop :=
∃ (c : R), b = a * c
notation a ` \ ` b := divisible a b
/--
Two elements a b are associates iff a = b * c for some unit c.
-/
def associates (a b : R) : Prop :=
∃ (c : R), is_unit c ∧ b = a * c
notation a ` ~ ` b := associates a b
lemma assoc_sym (a b : R) : a ~ b → b ~ a:=
begin
intro h,
cases h with u h2,
cases h2 with hunit h3,
rw is_unit_iff_exists_inv at hunit,
cases hunit with uinv huinv,
use uinv,
split,
rw is_unit_iff_exists_inv,
use u,
rw mul_comm,
exact huinv,
rw h3,
rw mul_assoc,
rw huinv,
rw mul_one,
end
lemma symm_divisible_associates_int_domain (hint : is_integral_domain R) (a b : R) : a \ b → b \ a → a ~ b :=
begin
intros h1 h2,
cases h1 with x hx,
cases h2 with y hy,
by_cases a ≠ 0,
{
rw hx at hy,
apply_fun λ x, x + (-a) at hy,
rw add_neg_self at hy,
rw ←mul_one (-a) at hy,
rw neg_mul_comm a 1 at hy,
rw mul_assoc at hy,
rw ←mul_add at hy,
specialize hint a (x * y + (-1)),
have h2 := hint (eq.symm hy),
cases h2,
exfalso, apply h, exact h2,
apply_fun λ x, x + 1 at h2,
rw zero_add at h2,
rw add_assoc at h2,
rw neg_add_self at h2,
rw add_zero at h2,
use x,
split,
rw is_unit_iff_exists_inv,
use y,
exact h2,
exact hx,
},
{
use 1,
split,
exact is_unit_one,
rw not_ne_iff at h,
rw h at hx,
rw hx,
rw h,
rw zero_mul,
rw zero_mul,
}
end
lemma generators_associate_if_ideals_eq (a b : R) (hint : is_integral_domain R):
principal_ideal a = principal_ideal b → a ~ b :=
begin
intro h,
have h1 : a ∈ principal_ideal a,
use 1, rw mul_one,
have h2 : b ∈ principal_ideal b,
use 1, rw mul_one,
rw h at h1,
rw ←h at h2,
apply symm_divisible_associates_int_domain,
exact hint,
exact h2,
exact h1,
end
/--
In a dividing sequence, each term is divisible by the next term.
-/
def dividing_sequence (f : ℕ → R) : Prop :=
∀ (n : ℕ), f (n + 1) \ f n
/--
A unique factorisation domain (UFD) is an integral domain such that:
All infinite dividing sequences 'stabilise': past some n ∈ ℕ, all terms are associate.#check
All irreducible elements are prime.
-/
def is_ufd (R : Type) [comm_ring R] : Prop :=
is_integral_domain R ∧ (∀(f : ℕ → R), dividing_sequence f →
∃ (m : ℕ), ∀(q : ℕ), m ≤ q → f q ~ f (q + 1) ) ∧
(∀ (p: R), irreducible p →∀ (a b: R), p \ (a*b) → p \ a ∨ p \ b )
/--
In an ascending ideal chain, each ideal is contained in the next one.
-/
def asc_ideal_chain (i : ℕ → myideal R) : Prop :=
∀ (n : ℕ), i n ⊆ i (n + 1)
lemma asc_ideal_chain_add (i : ℕ → myideal R) :
asc_ideal_chain i → ∀(n : ℕ), ∀ (m : ℕ), i n ⊆ i (m+n) :=
begin
intros h n m,
induction m,
rw zero_add, refl,
specialize h (m_n + n),
rw nat.succ_eq_add_one,
change (i n).iset ⊆ (i (m_n + n)).iset at m_ih,
change (i (m_n + n)).iset ⊆ (i (m_n + n + 1)).iset at h,
change (i n).iset ⊆ (i (m_n + 1 + n)).iset,
apply set.subset.trans,
exact m_ih,
nth_rewrite_rhs 1 add_comm,
rw add_assoc,
nth_rewrite_rhs 0 add_comm,
exact h,
end
lemma asc_ideal_chain_ind (i : ℕ → myideal R) :
asc_ideal_chain i ↔ ∀(n : ℕ), ∀ (m : ℕ), n ≤ m → i n ⊆ i m :=
begin
split,
{
intros h n m h2,
have h3 : ∃ (r: ℕ), n + r = m,
use m - n,
linarith,
cases h3 with s hs,
rw ←hs,
rw add_comm,
apply asc_ideal_chain_add,
exact h,
},
{
intros h n,
specialize h n (n+1),
apply h,
norm_num,
}
end
theorem pid_is_noetherian (R : Type) [comm_ring R] (hpid : is_pid R)
(i :ℕ → myideal R) (hinc : asc_ideal_chain i) :
∃(r : ℕ), ∀(s : ℕ ), r ≤ s → i s = i (s + 1)
:=
begin
cases hpid with hint hpid,
let S := set.Union (λ (x : ℕ), myideal.iset (i x)),
let si := myideal.mk S,
let sii : myideal R,
apply si,
{
rw set.nonempty,
let i0 := i 0,
have hne := myideal.not_empty i0,
rw set.nonempty at hne,
cases hne with x0 hx0,
use x0,
rw set.mem_Union,
use 0, exact hx0,
},
{
intros x r h,
rw set.mem_Union at h ⊢,
cases h,
use h_w,
apply myideal.r_mul_mem',
exact h_h,
},{
intros x y h1 h2,
rw set.mem_Union at h1 h2 ⊢,
cases h1 with i1 hi1,
cases h2 with i2 hi2,
by_cases i1 ≤ i2,
{
have h3 := ((asc_ideal_chain_ind i).mp) hinc,
specialize h3 i1 i2,
have h4 := h3 h,
use i2,
apply myideal.add_mem',
apply set.mem_of_subset_of_mem h4,
exact hi1,
exact hi2,
},
{
have hbt : i2 ≤ i1,
rw le_iff_lt_or_eq,
left,
push_neg at h,
exact h,
have h3 := ((asc_ideal_chain_ind i).mp) hinc,
specialize h3 i2 i1,
have h4 := h3 hbt,
use i1,
apply myideal.add_mem',
exact hi1,
apply set.mem_of_subset_of_mem h4,
exact hi2,
}
},{
intros x h,
rw set.mem_Union at h ⊢,
cases h with b hb,
use b,
apply myideal.neg_mem',
exact hb,
},
specialize hpid sii,
cases hpid with a ha,
have hasi : a ∈ sii,
rw ha,
change ∃(v:R), a = a * v,
use 1,
rw mul_one,
change a ∈ sii.iset at hasi,
rw set.mem_Union at hasi,
cases hasi with q hq,
use q,
intro s,
intro hsq,
have hisq : (i q).iset = S,
{
ext,
split,
intro h,
rw set.mem_Union,
use q, exact h,
intro h,
change x ∈ sii.iset at h,
rw ha at h,
cases h,
rw mul_comm at h_h,
rw h_h,
apply myideal.r_mul_mem',
exact hq,
},
apply myideal.ext,
apply set.subset.antisymm,
specialize hinc s,
exact hinc,
apply @set.subset.trans _ (i (s + 1)).iset (i q).iset,
rw hisq,
exact set.subset_Union (λ (x : ℕ), myideal.iset (i x)) (s + 1),
rw asc_ideal_chain_ind at hinc,
specialize hinc q s,
apply hinc,
exact hsq,
end
theorem pid_irreducible_is_prime (hpid : is_pid R) (p : R) (hirr : irreducible p) :
∀ (a b : R), p \ (a * b) → p \ a ∨ p \ b :=
begin
cases hpid with hint hpid,
intros a b h,
let I := sum_ideal (principal_ideal a) (principal_ideal p),
specialize hpid I,
cases hpid with d hd,
have hpi: p ∈ I,
use 0,
split,
exact zero_mem_ideal,
use p,
split,
use 1,
rw mul_one,
rw zero_add,
rw hd at hpi,
cases hpi with r hdr,
cases hirr,
specialize hirr_right d r,
have hut := hirr_right (eq_comm.mpr hdr),
cases hut,
{
right,
have hone : (1:R) ∈ I,
rw is_unit_iff_exists_inv at hut,
rw hd,
cases hut with di hdi,
use di,
rw hdi,
cases hone with u h2,
cases h2 with hu h2,
cases h2 with v h2,
cases h2 with hv h2,
cases hu with s hs,
cases hv with t ht,
rw hs at h2,
rw ht at h2,
apply_fun λ x, b*x at h2,
rw mul_one at h2,
rw mul_add at h2,
rw h2,
rw ← mul_assoc,
rw mul_comm b a,
cases h with q hq,
rw hq,
use q * s + b * t,
ring,
},{
have hai : a ∈ I,
use a,
split,
use 1,
rw mul_one,
use 0,
split,
exact zero_mem_ideal,
rw add_zero,
rw hd at hai,
cases hai with e he,
rw is_unit_iff_exists_inv at hut,
cases hut with ri hri,
apply_fun λ x, x * ri at hdr,
rw mul_assoc at hdr,
rw hri at hdr,
rw mul_one at hdr,
rw ←hdr at he,
left,
use (ri * e),
rw ← mul_assoc,
exact he,
}
end
theorem pid_is_ufd (R : Type) [hc : comm_ring R] (hpid : is_pid R): is_ufd R:=
begin
split,
exact hpid.left,
split,
{
intro f,
intro h,
let i := λ(x : ℕ), principal_ideal (f x),
have hinc : asc_ideal_chain i,
intro n,
change principal_ideal (f n) ⊆ principal_ideal (f (n+1)),
specialize h n,
cases h with y hy,
rw hy,
intros x h2,
cases h2 with z hz,
use y * z,
rw ← mul_assoc,
exact hz,
have hnoet := pid_is_noetherian R hpid,
specialize hnoet i,
have hstab := hnoet hinc,
cases hstab with m hm,
use m,
intro q,
specialize hm q,
intro hmq,
have hiqs := hm hmq,
apply generators_associate_if_ideals_eq,
exact hpid.left,
exact hiqs,
},
{
exact pid_irreducible_is_prime hpid,
}
end
#lint
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.