text
stringlengths 0
3.34M
|
---|
\section{Voice}
Voice is the most common feature describing the predicate. It describes the relationship between the action the predicate expresses and the participants of the action (the subject, objects etc.).
There are three voices in Novoslovnica:
\begin{itemize}
\item Active
\item Reflexive
\item Passive
\end{itemize}
\begin{figure}
\includegraphics[width=\linewidth]{./sources/voices.png}
\caption{Voices in Novoslovnica}
\label{fig:voices}
\end{figure}
\textbf{Active voice}
The active voice describes a sentence where the subject performs the action stated by the verb.
\underline{Examples:}
\textit{Moǐ brat sę zova Ivan.} - My brother's name is Ivan.
\textit{Glědaǐ u prozorec!} - Look at the window!
\textbf{Reflexive voice}
The reflexive voice describes a sentence where the subject plays the both the role of the actor and the object.
\underline{Examples:}
\textit{Ja sę učim govoriti anĝliǐskym jazykom.} - I learn how to speak English.
\textit{On sę glědaje v zòrcadlo.} - He is looking at himself in the mirror.
\textbf{Passive voice}
The passive voice describes a sentence where the action stated by the verb is acted over the subject.
\underline{Examples:}
\textit{Ryba je byla zjědena kotom.} - The fish was eaten by a cat.
\textit{Moǐ prijatelj je byl prijęt do råboty.} - My friend was accepted for the job. |
-- Andreas, 2018-06-19, issue #3130
-- Do not treat .(p) as projection pattern, but always as dot pattern.
record R : Set₁ where
field f : Set
open R
-- Shouldn't pass.
mkR : (A : Set) → R
mkR A .(f) = A
|
Set Warnings "-notation-overridden".
Require Import Category.Lib.
Require Export Category.Theory.Natural.Transformation.
Require Import Category.Structure.Closed.
Require Import Category.Construction.Opposite.
Require Import Category.Construction.Product.
Generalizable All Variables.
Set Primitive Projections.
Set Universe Polymorphism.
Unset Transparent Obligations.
Program Definition InternalHomFunctor `(C : Category)
{E : @Cartesian C} {O : @Closed C _} : C^op ∏ C ⟶ C := {|
fobj := fun p => @exponent_obj C E O (fst p) (snd p);
fmap := fun x y f => _
|}.
Next Obligation.
exact (curry (h0 ∘ eval ∘ (second h))).
Defined.
Next Obligation.
unfold InternalHomFunctor_obligation_1.
proper; simpl.
rewrites.
reflexivity.
Qed.
Next Obligation. unfold second; simpl; cat. Qed.
Next Obligation.
unfold InternalHomFunctor_obligation_1; simpl.
rewrite <- !comp_assoc.
rewrite curry_comp.
symmetry.
rewrite curry_comp.
rewrite <- comp_assoc.
apply compose_respects.
reflexivity.
symmetry.
rewrite curry_comp_l.
rewrite <- !comp_assoc.
rewrite <- first_second.
rewrite !comp_assoc.
rewrite ump_exponents.
rewrite <- !comp_assoc.
rewrite second_comp.
reflexivity.
Qed.
Notation "a ≈> b":= (InternalHomFunctor _ (a, b))
(at level 89) : category_scope.
Notation "a ≈{ C }≈> b":= (InternalHomFunctor C (a, b))
(at level 89) : category_scope.
|
Voyage is set in 1865 . President Barbicane of the ' Gun Club ' decides to build an enormous cannon in Baltimore to shoot a shell , capable of supporting human life , towards the moon in the hopes of a successful landing . Voyage 's protagonist , Michel Ardan , volunteers to travel in the aluminium shell . After the game 's brief introduction in the shell , Ardan lands on the moon and discovers the Selenites , as well as a complex ecosystem of lunar plants . The main accessible areas in the game are the moon 's surface , and the underground Selenite civilization .
|
function a = householder ( n, x )
%*****************************************************************************80
%
%% HOUSEHOLDER constructs a HOUSEHOLDER matrix.
%
% Discussion:
%
% A Householder matrix is also called an elementary reflector.
%
% Formula:
%
% A = I - ( 2 * X * X' ) / ( X' * X )
%
% Example:
%
% N = 5, X = ( 1, 1, 1, 0, -1 )
%
% 1/2 -1/2 -1/2 0 1/2
% -1/2 1/2 -1/2 0 1/2
% -1/2 -1/2 1/2 0 1/2
% 0 0 0 1 0
% 1/2 1/2 1/2 0 1/2
%
% Properties:
%
% A is symmetric: A' = A.
%
% Because A is symmetric, it is normal.
%
% Because A is normal, it is diagonalizable.
%
% A is orthogonal: A' * A = A * A' = I.
%
% inverse ( A ) = A.
%
% det ( A ) = -1.
%
% A is unimodular.
%
% If Y and Z are nonzero vectors of equal length, and
% X = ( Y - Z ) / NORM(Y-Z),
% then
% A * Y = Z.
%
% A represents a reflection through the plane which
% is perpendicular to the vector X. In particular, A*X = -X.
%
% LAMBDA(1) = -1;
% LAMBDA(2:N) = +1.
%
% If X is the vector used to define H, then X is the eigenvector
% associated with the eigenvalue -1.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 23 October 2007
%
% Author:
%
% John Burkardt
%
% Reference:
%
% Gene Golub, Charles Van Loan,
% Matrix Computations, second edition,
% Johns Hopkins University Press, Baltimore, Maryland, 1989.
%
% Pete Stewart,
% Introduction to Matrix Computations,
% Academic Press, 1973,
%
% James Wilkinson,
% The Algebraic Eigenvalue Problem,
% Oxford University Press, 1965.
%
% Parameters:
%
% Input, integer N, the order of A.
%
% Input, real X(N), the vector that defines the
% Householder matrix.
%
% Output, real A(N,N), the matrix.
%
a = zeros ( n, n );
for i = 1 : n
a(i,i) = 1.0;
end
xdot = x(1:n) * x(1:n)';
if ( 0.0 < xdot )
for i = 1 : n
for j = 1 : n
a(i,j) = a(i,j) - 2.0 * x(i) * x(j) / xdot;
end
end
end
return
end
|
(* Property from Case-Analysis for Rippling and Inductive Proof,
Moa Johansson, Lucas Dixon and Alan Bundy, ITP 2010.
This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.*)
theory TIP_prop_34
imports "../../Test_Base"
begin
datatype Nat = Z | S "Nat"
fun x :: "Nat => Nat => bool" where
"x (Z) (Z) = True"
| "x (Z) (S z2) = False"
| "x (S x2) (Z) = False"
| "x (S x2) (S y2) = x x2 y2"
fun min :: "Nat => Nat => Nat" where
"min (Z) z = Z"
| "min (S z2) (Z) = Z"
| "min (S z2) (S y1) = S (min z2 y1)"
fun t2 :: "Nat => Nat => bool" where
"t2 (Z) z = True"
| "t2 (S z2) (Z) = False"
| "t2 (S z2) (S x2) = t2 z2 x2"
theorem property0 :
"((x (min a b) b) = (t2 b a))"
oops
end
|
import numpy as np
from tabulate import tabulate
import sympy
import scipy.integrate as integrate
from mpmath import chebyt, chop, taylor
x = sympy.symbols('x')
def printMatrix(A, B):
selected = None
for row in range(len(B)):
print("(", end='')
for col in range(len(A[row])):
print("\t{1:10.2f}{0}".format(" " if (selected is None
or selected != (row, col)) else "*", A[row][col]), end='')
print("\t) * (\tX{0}) = (\t{1:10.2f})".format(row + 1, B[row][0]))
# норма матрицы, где p = infinity
def infnorm(A):
return max(map(max, A))
# скалярное произведение (Lwj, wi)
def scalar(i, j):
x = sympy.symbols('x')
wj = sympy.jacobi(j, 1, 1, x) * (1 - np.power(x, 2))
d = sympy.lambdify(x, sympy.diff(wj, x, 1))
d2 = sympy.lambdify(x, sympy.diff(wj, x, 2))
l = integrate.quad(lambda x: (-1 * d2(x) / (x - 3) + (1 + x / 2) * d(x)
+ sympy.exp(x /2) * sympy.jacobi(j, 1, 1, x) * (1 - np.power(x, 2))) * sympy.jacobi(i, 1, 1, x) * (1 - np.power(x, 2)), -1, 1)[0]
return l
def Lu(wj):
x = sympy.symbols('x')
d = sympy.lambdify(x, sympy.diff(wj, x, 1))
d2 = sympy.lambdify(x, sympy.diff(wj, x, 2))
h = sympy.diff(sympy.diff(wj, x, 1))
return -1 * d2(x) / (x - 3) + (1 + x / 2) * d(x) + sympy.exp(x / 2) * wj
def f(x):
return 2 - x
def solution(x0, C): # формируем решение
result = 0
for i in range(len(C)):
result += C[i] * sympy.lambdify(x, w[i])(x0)
return result
# печать результата для нескольких n
def printresult(Cond):
headers = ["n", "mu(A)", "y^n(-0.5)", "y^n(0)", "y^n(0.5)", "y*(x) - y^n(x)"]
print(tabulate(Cond, headers, tablefmt='grid'))
# печать результата для одного n
def printSolon(A, B, mu, C, n):
print("Расширенная матрица:")
printMatrix(A, B)
print("Число обусловленности матрицы A:")
print(mu)
print("Коэффициенты разложения С:")
print(C)
# метод Галеркина
def galerkin(n, w):
A = np.eye(n)
B = np.ones((n, 1))
for i in range(n):
B[i] = integrate.quad(lambda x: (2 - x) * sympy.jacobi(i, 1, 1, x) * (1 - np.power(x, 2)), -1, 1)[0]
for j in range(n):
A[i][j] = scalar(i, j)
C = np.linalg.solve(A, B)
mu = infnorm(A) * infnorm(np.linalg.inv(A))
return A, B, C, mu
# Метод коллокации
def collocation(n, w):
t = sorted(
np.squeeze(np.asarray(np.roots(chop(taylor(lambda x: chebyt(n, x), 0, n))[::-1])))) # Корни многочлена Чебышева
A = np.eye(n)
B = np.ones((n, 1))
for i in range(n):
B[i] = f(t[i])
for j in range(n):
A[i][j] = sympy.lambdify(x, Lu(w[j]))(t[i])
C = np.linalg.solve(A, B)
mu = infnorm(A) * infnorm(np.linalg.inv(A))
return A, B, C, mu
print("Проекционные методы решения краевой задачи для обыкновенного дифференциального уравнения второго порядка")
print("Вариант 3")
v = int(input("Введите число координатных функций или нажмите 0, чтобы оставить значения от 3 до 10:"))
CondGalerkin = []
CondColloc = []
if v == 0:
for n in range(3, 11):
w = [] # формируем семейство ортогональных функций — здесь это многочлены Якоби
for i in range(n):
w.append(sympy.jacobi(i, 1, 1, x) * (1 - np.power(x, 2)))
A, B, C, mu = galerkin(n, w)
A1, B1, C1, mu1 = collocation(n, w)
CondGalerkin.append([n, mu, solution(-0.5, C), solution(0, C), solution(0.5, C), np.abs(solution(-0.5, C) - solution(-0.5, C1))])
CondColloc.append([n, mu1, solution(-0.5, C1), solution(0, C1), solution(0.5, C1), np.abs(solution(-0.5, C) - solution(-0.5, C1))])
print("Метод Галёркина:")
printresult(CondGalerkin)
print("Метод коллокации:")
printresult(CondColloc)
else:
n = v
w = []
for i in range(n):
w.append(sympy.jacobi(i, 1, 1, x) * (1 - np.power(x, 2)))
A, B, C, mu = galerkin(n, w)
A1, B1, C1, mu1 = collocation(n, w)
print("Метод Галёркина:")
printSolon(A, B, mu, C, n)
print("Метод коллокации:")
printSolon(A1, B1, mu1, C1, n)
|
\chapter{\label{common-top}Common knowledge, part 2 (topology)}
In this chapter I describe basics of the theory known as \emph{general
topology}. Starting with the next chapter after this one I will describe
generalizations of customary objects of general topology described
in this chapter.
The reason why I've written this chapter is to show to the reader
kinds of objects which I generalize below in this book. For example,
funcoids and a generalization of proximity spaces, and funcoids are
a generalization of pretopologies. To understand the intuitive meaning
of funcoids one needs first know what are proximities and what are
pretopologies.
Having said that, customary topology is \emph{not} used in my definitions
and proofs below. It is just to feed your intuition.
\section{Metric spaces}
The theory of topological spaces started immediately with the definition
would be completely non-intuitive for the reader. It is the reason
why I first describe metric spaces and show that metric spaces give
rise for a topology (see below). Topological spaces are understandable
as a generalization of topologies induced by metric spaces.
\emph{Metric spaces} is a formal way to express the notion of \emph{distance}.
For example, there are distance $|x-y|$ between real numbers $x$
and $y$, distance between points of a plane, etc.
\begin{defn}
\index{space!metric}\index{distance}A \emph{metric space} is a set
$U$ together with a function $d:U\times U\rightarrow\mathbb{R}$
(\emph{distance} or \emph{metric}) such that for every $x,y,z\in U$:
\begin{enumerate}
\item $d(x,y)\ge0$;
\item $d(x,y)=0\Leftrightarrow x=y$;
\item $d(x,y)=d(y,x)$ (\emph{symmetry});
\item \index{inequality!triangle}$d(x,z)\le d(x,y)+d(y,z)$ (\emph{triangle
inequality}).
\end{enumerate}
\end{defn}
\begin{xca}
Show that the Euclid space $\mathbb{R}^{n}$ (with the standard distance)
is a metric space for every $n\in\mathbb{N}$.\end{xca}
\begin{defn}
\index{ball!open}\emph{Open ball} of \emph{radius} $r>0$ centered
at point $a\in U$ is the set
\[
B_{r}(a)=\setcond{x\in U}{d(a,x)<r}.
\]
\end{defn}
\begin{defn}
\index{ball!closed}\emph{Closed ball} of \emph{radius} $r>0$ centered
at point $a\in U$ is the set
\[
B_{r}[a]=\setcond{x\in U}{d(a,x)\le r}.
\]
\end{defn}
One example of use of metric spaces: \emph{Limit} of a sequence~$x$ in a metric space
can be defined as a point~$y$ in this space such that
\[ \forall \epsilon > 0 \exists N\in\mathbb{N} \forall n>N: d(x_n,y) < \epsilon. \]
\subsection{Open and closed sets}
\begin{defn}
\index{set!open!in metric space}A set $A$ in a metric space is called
\emph{open} when $\forall a\in A\exists r>0:B_{r}(a)\subseteq A$.
\end{defn}
\begin{defn}
\index{set!closed!in metric space}A set $A$ in a metric space is
closed when its complement $U\setminus A$ is open.
\end{defn}
\begin{xca}
Show that: closed intervals on real line are closed sets, open intervals are open sets.
\end{xca}
\begin{xca}
Show that open balls are open and closed balls are closed.
\end{xca}
\begin{defn}
\index{closure!in metric space}Closure $\cl(A)$ of a set $A$ in
a metric space is the set of points $y$ such that
\[
\forall\epsilon>0\exists a\in A:d(y,a)<\epsilon.
\]
\end{defn}
\begin{prop}
$\cl(A)\supseteq A$.\end{prop}
\begin{proof}
It follows from $d(a,a)=0<\epsilon$.\end{proof}
\begin{xca}
Prove $\cl(A\cup B)=\cl(A)\cup\cl(B)$ for every subsets $A$ and
$B$ of a metric space.
\end{xca}
\section{Pretopological spaces}
\emph{Pretopological space} can be defined in two equivalent ways:
a \emph{neighborhood system} or a \emph{preclosure operator}. To be
more clear I will call \emph{pretopological space} only the first
(neighborhood system) and the second call a \emph{preclosure space}.
\begin{defn}
\index{space!pre-topological}\index{pretopology}\emph{Pretopological
space} is a set $U$ together with a filter $\Delta(x)$ on \emph{$U$}
for every $x\in U$, such that $\uparrow^{U}\{x\}\sqsubseteq\Delta(x)$.
$\Delta$~is called a \emph{pretopology} on $U$.
Elements of~$\up\Delta(x)$ are called \emph{neighborhoods} of point~$x$.
\end{defn}
\begin{defn}
\index{preclosure}\emph{Preclosure} on a set $U$ is a unary operation
$\cl$ on $\subsets U$ such that for every $A,B\in\subsets U$:
\begin{enumerate}
\item $\cl(\emptyset)=\emptyset$;
\item $\cl(A)\supseteq A$;
\item $\cl(A\cup B)=\cl(A)\cup\cl(B)$.
\end{enumerate}
\index{space!preclosure}I call a preclosure together with a set $U$
as \emph{preclosure space}.
\end{defn}
\begin{thm}
\label{pretop-bij}Small pretopological spaces and small preclosure
spaces bijectively correspond to each other by the formulas:
\begin{gather}
\cl(A)=\setcond{x\in U}{A\in\corestar\Delta(x)};\label{pt-cl}\\
\up\Delta(x)=\setcond{A\in\subsets U}{x\notin\cl(U\setminus A)}.\label{pt-neigh}
\end{gather}
\end{thm}
\begin{proof}
First let's prove that $\cl$ defined by formula (\ref{pt-cl}) is
really a preclosure.
$\cl(\emptyset)=\emptyset$ is obvious. If $x\in A$ then $A\in\corestar\Delta(x)$
and so $\cl(A)\supseteq A$. $\cl(A\cup B)=\setcond{x\in U}{A\cup B\in\corestar\Delta(x)}=\setcond{x\in U}{A\in\corestar\Delta(x)\lor B\in\corestar\Delta(x)}=\cl(A)\cup\cl(B)$.
So, it is really a preclosure.
Next let's prove that $\Delta$ defined by formula (\ref{pt-neigh})
is a pretopology. That $\up\Delta(x)$ is an upper set is obvious.
Let $A,B\in\up\Delta(x)$. Then $x\notin\cl(U\setminus A)\land x\notin\cl(U\setminus B)$;
$x\notin\cl(U\setminus A)\cup\cl(U\setminus B)=\cl((U\setminus A)\cup(U\setminus B))=\cl(U\setminus(A\cap B))$;
$A\cap B\in\up\Delta(x)$. We have proved that $\Delta(x)$ is a filter
object.
Let's prove $\uparrow^{U}\{x\}\sqsubseteq\Delta(x)$. If $A\in\up\Delta(x)$
then $x\notin\cl(U\setminus A)$ and consequently $x\notin U\setminus A$;
$x\in A$; $A\in\up\uparrow^{U}\{x\}$. So $\uparrow^{U}\{x\}\sqsubseteq\Delta(x)$
and thus $\Delta$ is a pretopology.
It is left to prove that the functions defined by the above formulas
are mutually inverse.
Let $\cl_{0}$ be a preclosure, let $\Delta$ be the pretopology induced
by $\cl_{0}$ by the formula (\ref{pt-neigh}), let $\cl_{1}$ be
the preclosure induced by $\Delta$ by the formula (\ref{pt-cl}).
Let's prove $\cl_{1}=\cl_{0}$. Really,
\begin{align*}
x\in\cl_{1}(A) & \Leftrightarrow\\
\Delta(x)\nasymp\uparrow^{U}A & \Leftrightarrow\\
\forall X\in\up\Delta(x):X\cap A\ne\emptyset & \Leftrightarrow\\
\forall X\in\subsets U:(x\notin\cl_{0}(U\setminus X)\Rightarrow X\cap A\ne\emptyset) & \Leftrightarrow\\
\forall X'\in\subsets U:(x\notin\cl_{0}(X')\Rightarrow A\setminus X'\ne\emptyset) & \Leftrightarrow\\
\forall X'\in\subsets U:(A\setminus X'=\emptyset\Rightarrow x\in\cl_{0}(X')) & \Leftrightarrow\\
\forall X'\in\subsets U:(A\subseteq X'\Rightarrow x\in\cl_{0}(X')) & \Leftrightarrow\\
x\in\cl_{0}(A).
\end{align*}
So $\cl_{1}(A)=\cl_{0}(A)$.
Let now $\Delta_{0}$ be a pretopology, let $\cl$ be the closure
induced by $\Delta_{0}$ by the formula (\ref{pt-cl}), let $\Delta_{1}$
be the pretopology induced by $\cl$ by the formula (\ref{pt-neigh}).
Really
\begin{align*}
A\in\up\Delta_{1}(x) & \Leftrightarrow\\
x\notin\cl(U\setminus A) & \Leftrightarrow\\
\Delta_{0}(x)\asymp\uparrow^{U}(U\setminus A) & \Leftrightarrow\text{(proposition \ref{bool-compl})}\\
\uparrow^{U}A\sqsupseteq\Delta_{0}(x) & \Leftrightarrow\\
A\in\up\Delta_{0}(x).
\end{align*}
So $\Delta_{1}(x)=\Delta_{0}(x)$.
That these functions are mutually inverse, is now proved.
\end{proof}
\subsection{Pretopology induced by a metric}
\index{pre-topology!induced by metric}Every metric space induces
a pretopology by the formula:
\[
\Delta(x)=\bigsqcap^{\mathscr{F}U}\setcond{B_{r}(x)}{r\in\mathbb{R},r>0}.
\]
\begin{xca}
Show that it is a pretopology.\end{xca}
\begin{prop}
The preclosure corresponding to this pretopology is the same as the
preclosure of the metric space.\end{prop}
\begin{proof}
I denote the preclosure of the metric space as $\cl_{M}$ and the
preclosure corresponding to our pretopology as $\cl_{P}$. We need
to show $\cl_{P}=\cl_{M}$. Really:
\begin{align*}
\cl_{P}(A) & =\\
\setcond{x\in U}{A\in\corestar\Delta(x)} & =\\
\setcond{x\in U}{\forall\epsilon>0:B_{\epsilon}(x)\nasymp A} & =\\
\setcond{y\in U}{\forall\epsilon>0\exists a\in A:d(y,a)<\epsilon} & =\\
\cl_{M}(A)
\end{align*}
for every set $A\in\subsets U$.
\end{proof}
\section{\label{sec-top}Topological spaces}
\begin{prop}
For the set of open sets of a metric space $(U,d)$ it holds:
\begin{enumerate}
\item Union of any (possibly infinite) number of open sets is an open set.
\item Intersection of a finite number of open sets is an open set.
\item $U$ is an open set.
\end{enumerate}
\end{prop}
\begin{proof}
Let $S$ be a set of open sets. Let $a\in\bigcup S$. Then there exists
$A\in S$ such that $a\in A$. Because $A$ is open we have $B_{r}(a)\subseteq A$
for some $r>0$. Consequently $B_{r}(a)\subseteq\bigcup S$ that is
$\bigcup S$ is open.
Let $A_{0},\dots,A_{n}$ be open sets. Let $a\in A_{0}\cap\dots\cap A_{n}$
for some $n\in\mathbb{N}$. Then there exist $r_{i}$ such that $B_{r_{i}}(a)\subseteq A_{i}$.
So $B_{r}(a)\subseteq A_{0}\cap\dots\cap A_{n}$ for $r=\min\{r_{0},\dots,r_{n}\}$
that is $A_{0}\cap\dots\cap A_{n}$ is open.
That $U$ is an open set is obvious.
\end{proof}
The above proposition suggests the following definition:
\begin{defn}
\index{topology}A \emph{topology} on a set $U$ is a collection~$\mathcal{O}$
(called the set of \emph{open sets}) of subsets of~$U$ such that:\index{set!open}
\begin{enumerate}
\item Union of any (possibly infinite) number of open sets is an open set.
\item Intersection of a finite number of open sets is an open set.
\item $U$ is an open set.
\end{enumerate}
\index{space!topological}The pair $(U,\mathcal{O})$ is called a
\emph{topological space}.\end{defn}
\begin{rem}
From the above it is clear that every metric induces a topology.\end{rem}
\begin{prop}
Empty set is always open.\end{prop}
\begin{proof}
Empty set is union of an empty set.\end{proof}
\begin{defn}
\index{set!closed}A \emph{closed set} is a complement of an open
set.
\end{defn}
Topology can be equivalently expresses in terms of closed sets:
A \emph{topology} on a set $U$ is a collection (called the set of
\emph{closed sets}) of subsets of $U$ such that:
\begin{enumerate}
\item Intersection of any (possibly infinite) number of closed sets is a
closed set.
\item Union of a finite number of closed sets is a closed set.
\item $\emptyset$ is a closed set.\end{enumerate}
\begin{xca}
Show that the definitions using open and closed sets are equivalent.
\end{xca}
\subsection{Relationships between pretopologies and topologies}
\subsubsection{Topological space induced by preclosure space}
\index{space!topological!induced by preclosure}Having a preclosure
space $(U,\cl)$ we define a topological space whose closed sets are
such sets $A\in\subsets U$ that $\cl(A)=A$.
\begin{prop}
This really defines a topology.\end{prop}
\begin{proof}
Let $S$ be a set of closed sets. First, we need to prove that $\bigcap S$
is a closed set. We have $\cl\left(\bigcap S\right)\subseteq A$ for
every $A\in S$. Thus $\cl\left(\bigcap S\right)\subseteq\bigcap S$
and consequently $\cl\left(\bigcap S\right)=\bigcap S$. So $\bigcap S$
is a closed set.
Let now $A_{0},\dots,A_{n}$ be closed sets, then
\[
\cl(A_{0}\cup\dots\cup A_{n})=\cl(A_{0})\cup\dots\cup\cl(A_{n})=A_{0}\cup\dots\cup A_{n}
\]
that is $A_{0}\cup\dots\cup A_{n}$ is a closed set.
That $\emptyset$ is a closed set is obvious.
\end{proof}
Having a pretopological space $(U,\Delta)$ we define a topological
space whose open sets are
\[
\setcond{X\in\subsets U}{\forall x\in X:X\in\up\Delta(x)}.
\]
\begin{prop}
This really defines a topology.\end{prop}
\begin{proof}
Let set $S\subseteq\setcond{X\in\subsets U}{\forall x\in X:X\in\up\Delta(x)}$.
Then $\forall X\in S\forall x\in X:X\in\up\Delta(x)$. Thus
\[
\forall x\in\bigcup S\exists X\in S:X\in\up\Delta(x)
\]
and so $\forall x\in\bigcup S:\bigcup S\in\up\Delta(x)$. So $\bigcup S$
is an open set.
Let now $A_{0},\dots,A_{n}\in\setcond{X\in\subsets U}{\forall x\in X:X\in\up\Delta(x)}$
for $n\in\mathbb{N}$. Then $\forall x\in A_{i}:A_{i}\in\up\Delta(x)$
and so
\[
\forall x\in A_{0}\cap\dots\cap A_{n}:A_{i}\in\up\Delta(x);
\]
thus $\forall x\in A_{0}\cap\dots\cap A_{n}:A_{0}\cap\dots\cap A_{n}\in\up\Delta(x)$.
So $A_{0}\cap\dots\cap A_{n}\in\setcond{X\in\subsets U}{\forall x\in X:X\in\up\Delta(x)}$.
That $U$ is an open set is obvious.\end{proof}
\begin{prop}\label{top-two}
Topology $\tau$ defined by a pretopology and topology $\rho$ defined
by the corresponding preclosure, are the same.\end{prop}
\begin{proof}
Let $A\in\subsets U$.
$A\text{ is \ensuremath{\rho}-closed}\Leftrightarrow\cl(A)=A\Leftrightarrow\cl(A)\subseteq A\Leftrightarrow\forall x\in U:(A\in\corestar\Delta(x)\Rightarrow x\in A)$;
\begin{align*}
A\text{ is \ensuremath{\tau}-open} & \Leftrightarrow\\
\forall x\in A:A\in\up\Delta(x) & \Leftrightarrow\\
\forall x\in U:(x\in A\Rightarrow A\in\up\Delta(x)) & \Leftrightarrow\\
\forall x\in U:(x\notin U\setminus A\Rightarrow U\setminus A\notin\corestar\Delta(x)).
\end{align*}
So $\rho$-closed and $\tau$-open sets are complements of each other. It
follows $\rho=\tau$.
\end{proof}
\subsubsection{Preclosure space induced by topological space}
\index{space!preclosure!induced by topology}We define a preclosure
and a pretopology induced by a topology and then show these two are
equivalent.
Having a topological space we define a preclosure space by the formula
\[
\cl(A)=\bigcap\setcond{X\in\subsets U}{X\text{ is a closed set},X\supseteq A}.
\]
\begin{prop}
It is really a preclosure.\end{prop}
\begin{proof}
$\cl(\emptyset)=\emptyset$ because $\emptyset$ is a closed set.
$\cl(A)\supseteq A$ is obvious.
\begin{align*}
\cl(A\cup B) & =\\
\bigcap\setcond{X\in\subsets U}{X\text{ is a closed set},X\supseteq A\cup B} & =\\
\bigcap\setcond{X_{1}\cup X_{2}}{X_{1},X_{2}\in\subsets U\text{ are closed sets},X_{1}\supseteq A,X_{2}\supseteq B} & =\\
\bigcap\setcond{X_{1}\in\subsets U}{X_{1}\text{ is a closed set},X_{1}\supseteq A}\cup\bigcap\setcond{X_{2}\in\subsets U}{X_{2}\text{ is a closed set},X_{2}\supseteq B} & =\\
\cl(A)\cup\cl(B).
\end{align*}
Thus $\cl$ is a preclosure.
\end{proof}
Or: $\Delta(x)=\bigsqcap^{\mathscr{F}}\setcond{X\in\mathcal{O}}{x\in X}$.
It is trivially a pretopology (used the fact that $U\in\mathcal{O}$).
\begin{prop}
The preclosure and the pretopology defined in this section above correspond
to each other (by the formulas from theorem \ref{pretop-bij}).\end{prop}
\begin{proof}
We need to prove $\cl(A)=\setcond{x\in U}{\Delta(x)\nasymp\uparrow^{U}A}$,
that is
\[
\bigcap\setcond{X\in\subsets U}{X\text{ is a closed set},X\supseteq A}=\setcond{x\in U}{\bigsqcap^{\mathscr{F}U}\setcond{X\in\mathcal{O}}{x\in X}\nasymp\uparrow^{U}A}.
\]
Equivalently transforming it, we get:
\begin{align*}
\bigcap\setcond{X\in\subsets U}{X\text{ is a closed set},X\supseteq A} & =\setcond{x\in U}{\forall X\in\mathcal{O}:(x\in X\Rightarrow\uparrow^{U}X\nasymp\uparrow^{U}A)};\\
\bigcap\setcond{X\in\subsets U}{X\text{ is a closed set},X\supseteq A} & =\setcond{x\in U}{\forall X\in\mathcal{O}:(x\in X\Rightarrow X\nasymp A)}.
\end{align*}
We have
\begin{align*}
x\in\bigcap\setcond{X\in\subsets U}{X\text{ is a closed set},X\supseteq A} & \Leftrightarrow\\
\forall X\in\subsets U:(X\text{ is a closed set}\land X\supseteq A\Rightarrow x\in X) & \Leftrightarrow\\
\forall X'\in\mathcal{O}:(U\setminus X'\supseteq A\Rightarrow x\in U\setminus X') & \Leftrightarrow\\
\forall X'\in\mathcal{O}:(X'\asymp A\Rightarrow x\notin X') & \Leftrightarrow\\
\forall X\in\mathcal{O}:(x\in X\Rightarrow X\nasymp A).
\end{align*}
So our equivalence holds.\end{proof}
\begin{prop}
If $\tau$ is the topology induced by pretopology $\pi$, in turn
induced by topology $\rho$, then $\tau=\rho$.\end{prop}
\begin{proof}
The set of closed sets of $\tau$ is
\begin{align*}
\setcond{A\in\subsets U}{\cl_{\pi}(A)=A} & =\\
\setcond{A\in\subsets U}{\bigcap\setcond{X\in\subsets U}{X\text{ is a closed set in }\rho,X\supseteq A}=A} & =\\
\setcond{A\in\subsets U}{A\text{ is a closed set in }\rho}
\end{align*}
(taken into account that intersecting closed sets is a closed set).\end{proof}
\begin{defn}
\index{closure!Kuratowski}Idempotent closures are called \emph{Kuratowski
closures}.\end{defn}
\begin{thm}
The above defined correspondences between topologies and pretopologies,
restricted to Kuratowski closures, is a bijection.\end{thm}
\begin{proof}
Taking into account the above proposition, it's enough to prove that:
If $\tau$ is the pretopology induced by topology $\pi$, in turn
induced by a Kuratowski closure $\rho$, then $\tau=\rho$.
\begin{align*}
\cl_{\tau}(A) & =\\
\bigcap\setcond{X\in\subsets U}{X\text{ is a closed set in }\pi,X\supseteq A} & =\\
\bigcap\setcond{X\in\subsets U}{\cl_{\rho}(X)=X,X\supseteq A} & =\\
\bigcap\setcond{\cl_{\rho}(X)}{X\in\subsets U,\cl_{\rho}(X)=X,X\supseteq\cl_{\rho}(A)} & =\\
\bigcap\setcond{\cl_{\rho}(\cl_{\rho}(X))}{X=A} & =\\
\cl_{\rho}(\cl_{\rho}(A)) & =\\
\cl_{\rho}(A).
\end{align*}
\end{proof}
\subsubsection{Topology induced by a metric}
\begin{defn}
Every metric space induces a topology in this way: A set $X$ is open
iff
\[
\forall x\in X\exists\epsilon>0:B_{r}(x)\subseteq X.
\]
\end{defn}
\begin{xca}
Prove it is really a topology and this topology is the same as the
topology, induced by the pretopology, in turn induced by our metric
space.
\end{xca}
\section{\label{sec-prox}Proximity spaces}
Let $(U,d)$ be metric space. We will define \emph{distance} between
sets $A,B\in\subsets U$ by the formula
\[
d(A,B)=\inf\setcond{d(a,b)}{a\in A,b\in B}.
\]
(Here ``$\inf$'' denotes infimum on the real line.)
\begin{defn}
Sets $A,B\in\subsets U$ are \emph{near} (denoted $A\mathrel\delta B$)
iff $d(A,B)=0$.
\end{defn}
$\delta$ defined in this way (for a metric space) is an example of
proximity as defined below.
\begin{defn}
\label{prox}\index{proximity}\index{space!proximity}A \emph{proximity
space} is a set $(U,\delta)$ conforming to the following axioms (for
every $A,B,C\in\subsets U$):
\begin{enumerate}
\item $A\cap B\ne\emptyset\Rightarrow A\mathrel\delta B$;
\item if $A\mathrel\delta B$ then $A\ne\emptyset$ and $B\ne\emptyset$;
\item $A\mathrel\delta B\Rightarrow B\mathrel\delta A$ (\emph{symmetry});
\item $(A\cup B)\mathrel\delta C\Leftrightarrow A\mathrel\delta C\lor B\mathrel\delta C$;
\item $C\mathrel\delta(A\cup B)\Leftrightarrow C\mathrel\delta A\lor C\mathrel\delta B$;
\item \label{prox-last}$A\mathrel{\bar{\delta}}B$ implies existence of
$P,Q\in\subsets U$ with $A\mathrel{\bar{\delta}}P$, $B\mathrel{\bar{\delta}}Q$
and $P\cup Q=U$.
\end{enumerate}
\end{defn}
\begin{xca}
Show that proximity generated by a metric space is really a proximity
(conforms to the above axioms).\end{xca}
\begin{defn}
\index{quasi-proximity}\emph{Quasi-proximity} is defined as the above
but without the symmetry axiom.
\end{defn}
\begin{defn}
Closure is generated by a proximity by the following formula:
\[
\cl(A)=\setcond{a\in U}{\{a\}\mathrel\delta A}.
\]
\end{defn}
\begin{prop}
Every closure generated by a proximity is a Kuratowski closure.\end{prop}
\begin{proof}
First prove it is a preclosure. $\cl(\emptyset)=\emptyset$ is obvious.
$\cl(A)\supseteq A$ is obvious.
\begin{align*}
\cl(A\cup B) & =\\
\setcond{a\in U}{\{a\}\mathrel\delta A\cup B} & =\\
\setcond{a\in U}{\{a\}\mathrel\delta A\lor\{a\}\mathrel\delta B} & =\\
\setcond{a\in U}{\{a\}\mathrel\delta A}\cup\setcond{a\in U}{\{a\}\mathrel\delta B} & =\\
\cl(A)\cup\cl(B).
\end{align*}
It is remained to prove that $\cl$ is idempotent, that is $\cl(\cl(A))=\cl(A)$.
It is enough to show $\cl(\cl(A))\subseteq\cl(A)$ that is if $x\notin\cl(A)$
then $x\notin\cl(\cl(A))$.
If $x\notin\cl(A)$ then $\{x\}\mathrel{\bar{\delta}}A$. So there
are $P,Q\in\subsets U$ such that $\{x\}\mathrel{\bar{\delta}}P$,
$A\mathrel{\bar{\delta}}Q$, $P\cup Q=U$. Then $U\setminus Q\subseteq P$,
so $\{x\}\mathrel{\bar{\delta}}U\setminus Q$ and hence $x\in Q$.
Hence $U\setminus\cl(A)\subseteq Q$, and so $\cl(A)\subseteq U\setminus Q\subseteq P$.
Consequently $\{x\}\mathrel{\bar{\delta}}\cl(A)$ and hence $x\notin\cl(\cl(A))$.
\end{proof}
\section{Definition of uniform spaces}
Here I will present the traditional definition of uniform spaces.
Below in the chapter about reloids I will present a shortened and
more algebraic (however a little less elementary) definition of uniform
spaces.
\begin{defn}
\emph{Uniform space} is a pair $(U,D)$ of a set~$U$ and filter~$D\in\mathfrak{F}(U\times U)$
(called \emph{uniformity} or the set of \emph{entourages}) such that:
\begin{enumerate}
\item If $F\in D$ then $\id_{U}\subseteq F$.
\item If $F\in D$ then there exists $G\in D$ such that $G\circ G\subseteq F$.
\item If $F\in D$ then $F^{-1}\in D$.\end{enumerate}
\end{defn}
|
import tactic.tauto
@[derive decidable_eq]
inductive mynat
| zero : mynat
| succ (n : mynat) : mynat
namespace mynat
instance : has_zero mynat := ⟨mynat.zero⟩
theorem mynat_zero_eq_zero : mynat.zero = 0 := rfl
def one : mynat := succ 0
instance : has_one mynat := ⟨mynat.one⟩
theorem one_eq_succ_zero : 1 = succ 0 := rfl
lemma zero_ne_succ (m : mynat) : (0 : mynat) ≠ succ m := λ h, by cases h
lemma succ_inj {m n : mynat} (h : succ m = succ n) : m = n := by cases h; refl
end mynat
attribute [symm] ne.symm
namespace mynat
-- definition of "addition on the natural numbers"
def add : mynat → mynat → mynat
| m 0 := m
| m (succ n) := succ (add m n)
instance : has_add mynat := ⟨mynat.add⟩
-- numerals now work
example : mynat := 37
lemma add_zero (m : mynat) : m + 0 = m := rfl
lemma add_succ (m n : mynat) : m + succ n = succ (m + n) := rfl
-- end of definition of "addition on the natural numbers"
end mynat
namespace mynat
def mul : mynat → mynat → mynat
| m zero := zero
| m (succ n) := mul m n + m
instance : has_mul mynat := ⟨mul⟩
-- notation a * b := mul a b
example : (1 : mynat) * 1 = 1 :=
begin
refl
end
lemma mul_zero (m : mynat) : m * 0 = 0 := rfl
lemma mul_succ (m n : mynat) : m * (succ n) = m * n + m := rfl
def pow : mynat → mynat → mynat
| m zero := one
| m (succ n) := pow m n * m
instance : has_pow mynat mynat := ⟨pow⟩
-- notation a ^ b := pow a b
example : (1 : mynat) ^ (1 : mynat) = 1 :=
begin
refl
end
lemma pow_zero (m : mynat) : m ^ (0 : mynat) = 1 := rfl
lemma pow_succ (m n : mynat) : m ^ (succ n) = m ^ n * m := rfl
end mynat
------------------------------------------------------------------------
namespace mynat
lemma example1 (x y z : mynat) : x + y + z = x + y + z :=
begin
refl,
end
lemma example1' (x y z : mynat) : x + y + z = x + y + z := rfl
lemma example2 (x y : mynat) (h : y = x + 7) : 2 * y = 2 * (x + 7) := by rw h
lemma example2' (x y : mynat) (h : y = x + 7) : 2 * y = 2 * (x + 7) :=
begin
rw h,
end
lemma example2'' (x y : mynat) (h : y = x + 7) : 2 * y = 2 * (x + 7) :=
have h_two_mul : 2 * y = 2 * y, from rfl,
show 2 * y = 2 * (x + 7), from (by rw h)
lemma example3 (a b : mynat) (h : succ a = b) : succ(succ(a)) = succ(b) :=
begin
rw h,
end
lemma zero_add (n : mynat) : 0 + n = n :=
begin
induction n,
{
rw mynat_zero_eq_zero,
rw add_zero,
},
{
rw add_succ,
rw n_ih,
}
end
lemma add_assoc (a b c : mynat) : (a + b) + c = a + (b + c) :=
begin
induction c,
{
rw mynat_zero_eq_zero,
rw add_zero (a + b),
rw add_zero b,
},
{
rw add_succ (a + b),
rw add_succ b,
rw add_succ a,
rw c_ih,
}
end
lemma succ_add (a b : mynat) : succ a + b = succ (a + b) :=
begin
induction b,
{
rw mynat_zero_eq_zero,
rw add_zero a,
rw add_zero (succ a),
},
{
rw add_succ a,
rw add_succ (succ a),
rw b_ih,
}
end
lemma add_comm (a b : mynat) : a + b = b + a :=
begin
induction b,
{
rw mynat_zero_eq_zero,
rw add_zero a,
induction a,
{
rw mynat_zero_eq_zero,
rw add_zero 0,
},
{
rw add_succ 0,
rw ← a_ih,
}
},
{
rw succ_add b_n,
rw add_succ a,
rw b_ih,
}
end
theorem succ_eq_add_one (n : mynat) : succ n = n + 1 :=
begin
rw one_eq_succ_zero,
rw add_succ n,
rw add_zero n,
end
lemma add_right_comm (a b c : mynat) : a + b + c = a + c + b :=
begin
induction c,
{
rw mynat_zero_eq_zero,
rw add_zero (a + b),
rw add_zero a,
},
{
rw add_succ (a + b),
rw add_succ a,
rw succ_add,
rw c_ih,
}
end
-- The proof in tactic mode
lemma zero_mul (m : mynat) : 0 * m = 0 :=
begin
induction m,
{
-- Q0: Why do I need this extra line compared to in http://wwwf.imperial.ac.uk/~buzzard/xena/natural_number_game/?world=3&level=1
rw mynat_zero_eq_zero,
rw mul_zero 0,
},
{
rw mul_succ 0 m_n,
rw add_zero (0 * m_n),
rw m_ih,
}
end
-- The proof of a forall version of the lemma
-- ported from https://leanprover-community.github.io/mathlib_docs/core/init/data/nat/lemmas.html#nat.zero_mul
-- Q1: Why does it need an extra refl than the original proof?
lemma zero_mul_forall : ∀ (m : mynat), 0 * m = 0
| 0 := rfl
| (succ m) := begin
rw [mul_succ, zero_mul_forall],
refl
end
lemma zero_mul_forall_match : ∀ (m : mynat), 0 * m = 0
| zero :=
calc zero * zero
= 0 * 0 : by rw mynat_zero_eq_zero
... = 0 : by rw mul_zero 0
| n@(succ m_n) :=
calc 0 * (succ m_n)
= 0 * m_n + 0 : by rw mul_succ
... = 0 * m_n : by rw add_zero (0 * m_n)
... = 0 : by rw zero_mul_forall_match m_n
lemma zero_mul_forall_match_term : ∀ (m : mynat), 0 * m = 0
| zero :=
calc zero * zero
= 0 * 0 : by rw mynat_zero_eq_zero
... = 0 : by rw mul_zero 0
| n@(succ m_n) :=
calc 0 * (succ m_n)
= 0 * m_n + 0 : mul_succ 0 m_n
... = 0 * m_n : add_zero (0 * m_n)
... = 0 : by rw zero_mul_forall_match m_n
-- Q2: how can I refer to the lemma itself in the match proof at <marker>
lemma zero_mul_match (m : mynat) : 0 * m = 0 :=
match m with
| zero :=
calc zero * zero
= 0 * 0 : by rw mynat_zero_eq_zero
... = 0 : by rw mul_zero 0
| n@(succ m_n) :=
calc 0 * (succ m_n)
= 0 * m_n + 0 : by rw mul_succ
... = 0 * m_n : by rw add_zero (0 * m_n)
... = 0 : by sorry -- <marker>
end
lemma zero_mul_induction_zero : 0 * zero = 0 := rfl
lemma zero_mul_induction_m_n : ∀ (n : mynat), 0 * n = 0 → 0 * n.succ = 0 :=
λ m_n h, add_zero (0 * m_n) ▸ mul_succ 0 m_n ▸ h
-- Q3: Why `add_zero` is no longer needed?
lemma zero_mul_induction_m_n' : ∀ (n : mynat), 0 * n = 0 → 0 * n.succ = 0 :=
λ m_n h, mul_succ 0 m_n ▸ h
lemma zero_mul_rec (m : mynat) : 0 * m = 0 :=
mynat.rec_on m zero_mul_induction_zero zero_mul_induction_m_n
lemma zero_mul_rec' (m : mynat) : 0 * m = 0 :=
mynat.rec_on m rfl (λ m_n h, mul_succ 0 m_n ▸ h)
-- lemma zero_mul_rec'' (m : mynat) : 0 * m = 0 :=
-- m.rec_on zero_mul_induction_zero zero_mul_induction_m_n
-- https://leanprover.zulipchat.com/#narrow/stream/113489-new-members/topic/Natural.20Numbers.20Game/near/199964443
lemma mul_one (m : mynat) : m * 1 = m :=
begin
induction m,
{
rw one_eq_succ_zero,
rw mul_succ,
rw mul_zero,
refl
},
{
rw one_eq_succ_zero,
rw mul_succ,
rw mul_zero,
rw zero_add,
}
end
lemma one_mul (m : mynat) : 1 * m = m :=
begin
induction m,
{
rw mynat_zero_eq_zero,
rw mul_zero,
},
{
rw mul_succ,
rw succ_eq_add_one,
rw m_ih
}
end
lemma mul_add (t a b : mynat) : t * (a + b) = t * a + t * b :=
begin
induction b,
{
rw [mynat_zero_eq_zero, add_zero, mul_zero, add_zero],
},
{
rw mul_succ,
rw add_succ,
rw mul_succ,
rw b_ih,
rw add_assoc,
}
end
lemma mul_assoc (a b c : mynat) : (a * b) * c = a * (b * c) :=
begin
induction c,
{
rw mynat_zero_eq_zero,
rw mul_zero (a * b),
rw mul_zero b,
rw mul_zero a,
},
{
rw mul_succ (a * b),
rw mul_succ b,
rw c_ih,
rw mul_add a,
}
end
lemma succ_mul (a b : mynat) : succ a * b = a * b + b :=
begin
induction b,
{
rw mynat_zero_eq_zero,
rw mul_zero,
rw mul_zero,
rw add_zero,
},
{
rw mul_succ,
rw mul_succ,
rw add_succ,
rw add_succ,
rw b_ih,
rw add_assoc,
rw add_comm b_n a,
rw ←add_assoc,
}
end
lemma succ_mul' (a b : mynat) : succ a * b = a * b + b :=
begin
induction b,
{
rw mynat_zero_eq_zero,
rw mul_zero,
rw mul_zero,
rw add_zero,
},
{
rw mul_succ,
rw mul_succ,
rw add_succ,
rw add_succ,
rw b_ih,
rw add_right_comm,
}
end
lemma add_mul (a b t : mynat) : (a + b) * t = a * t + b * t :=
begin
induction t,
{
rw mynat_zero_eq_zero,
rw [mul_zero, mul_zero, mul_zero, add_zero],
},
{
rw [mul_succ, mul_succ, mul_succ],
rw t_ih,
rw add_right_comm,
rw add_comm (b * t_n) b,
rw ←add_assoc(a * t_n) a b,
rw add_assoc (a * t_n + a) b (b * t_n),
}
end
lemma mul_comm (a b : mynat) : a * b = b * a :=
begin
induction b,
{
rw mynat_zero_eq_zero,
rw [mul_zero, zero_mul],
},
{
rw [mul_succ, succ_mul, b_ih],
}
end
lemma mul_left_comm (a b c : mynat) : a * (b * c) = b * (a * c) :=
begin
rw ←mul_assoc b a c,
rw mul_comm b a,
rw mul_assoc a b c
end
lemma zero_pow_zero : (0 : mynat) ^ (0 : mynat) = 1 :=
begin
rw pow_zero,
end
lemma zero_pow_succ (m : mynat) : (0 : mynat) ^ (succ m) = 0 :=
begin
rw [pow_succ, mul_zero],
end
lemma pow_one (a : mynat) : a ^ (1 : mynat) = a :=
by rw [one_eq_succ_zero, pow_succ, pow_zero, one_mul]
lemma one_pow (m : mynat) : (1 : mynat) ^ m = 1 :=
begin
induction m with n h,
{
rw mynat_zero_eq_zero,
rw pow_zero,
},
{
rw pow_succ,
rw h,
refl,
}
end
lemma pow_add (a m n : mynat) : a ^ (m + n) = a ^ m * a ^ n :=
begin
induction n with k h,
{
rw mynat_zero_eq_zero,
rw pow_zero,
rw add_zero,
rw mul_one,
},
{
rw add_succ,
rw [pow_succ, pow_succ],
rw h,
rw mul_assoc (a ^ m) (a ^ k) a,
}
end
lemma mul_pow (a b n : mynat) : (a * b) ^ n = a ^ n * b ^ n :=
begin
induction n with k h,
{
rw mynat_zero_eq_zero,
rw [pow_zero, pow_zero, pow_zero],
refl,
},
{
rw [pow_succ, pow_succ, pow_succ],
rw h,
rw mul_assoc (a ^ k) (b ^ k) (a * b),
rw mul_left_comm (b ^ k) a b,
rw mul_assoc (a ^ k) a (b ^ k * b),
}
end
lemma pow_pow (a m n : mynat) : (a ^ m) ^ n = a ^ (m * n) :=
begin
induction n with k h,
{
rw mynat_zero_eq_zero,
rw [pow_zero, mul_zero, pow_zero],
},
{
rw [pow_succ, mul_succ],
rw h,
rw pow_add a (m * k) m,
}
end
lemma two_eq_succ_one : 2 = succ 1 := rfl
lemma add_squared (a b : mynat) :
(a + b) ^ (2 : mynat) = a ^ (2 : mynat) + b ^ (2 : mynat) + 2 * a * b :=
begin
rw two_eq_succ_one,
rw one_eq_succ_zero,
rw [pow_succ, pow_succ, pow_succ, pow_succ, pow_succ, pow_succ],
rw [pow_zero, pow_zero, pow_zero],
rw [one_mul, one_mul, one_mul],
rw mul_add,
rw add_mul,
rw add_mul,
rw mul_comm b a,
rw succ_mul,
rw ←one_eq_succ_zero,
rw one_mul,
rw add_mul,
rw add_assoc (a * a) (a * b) (a * b + b * b),
rw add_assoc (a * a) (b * b) (a * b + a * b),
rw ←add_assoc (b * b) (a * b) (a * b),
rw add_comm (b * b) (a * b),
rw add_assoc (a * b) (b * b) (a * b),
rw add_comm (b * b) (a * b),
end -- 28 rewrites
-- https://leanprover.zulipchat.com/#narrow/stream/113489-new-members/topic/natural.20number.20game.20questions/near/196864644
lemma add_squared' (a b : mynat) :
(a + b) ^ (2 : mynat) = a ^ (2 : mynat) + b ^ (2 : mynat) + 2 * a * b :=
begin
rw two_eq_succ_one,
rw pow_succ,
rw pow_succ,
rw pow_succ,
rw pow_one,
rw pow_one,
rw pow_one,
rw add_mul,
rw mul_add,
rw mul_add,
rw succ_mul,
rw one_mul,
rw add_mul,
rw mul_comm b a,
rw add_assoc,
rw add_assoc,
rw add_comm (b * b),
rw add_assoc,
end
-- Adapted from https://leanprover.zulipchat.com/#narrow/stream/113489-new-members/topic/natural.20number.20game.20questions/near/196867894
lemma add_squared'' (a b : mynat) :
(a + b) ^ (2 : mynat) = a ^ (2 : mynat) + b ^ (2 : mynat) + 2 * a * b :=
begin
have two_mul: ∀ x : mynat, (2:mynat) * x = x + x := λx, by rw [two_eq_succ_one, succ_mul 1 x, one_mul],
have pow_two: ∀ x : mynat, x ^ (2:mynat) = x * x := λx, by rw [two_eq_succ_one, pow_succ, pow_one],
rw [pow_two, pow_two, pow_two],
rw [add_mul, two_mul, add_right_comm, add_mul],
rw [← add_assoc, ← mul_add, mul_add b, mul_comm b, add_assoc]
end
example (P Q : Type) (p : P) (h : P → Q) : Q :=
begin
exact h(p),
end
example : mynat → mynat :=
begin
intro n,
exact 3*n+2,
end
example (P Q R S T U: Type)
(p : P)
(h : P → Q)
(i : Q → R)
(j : Q → T)
(k : S → T)
(l : T → U)
: U :=
begin
have q : Q := h(p),
have t : T := j(q),
exact l(t),
end
example (P Q R S T U: Type)
(p : P)
(h : P → Q)
(i : Q → R)
(j : Q → T)
(k : S → T)
(l : T → U)
: U :=
begin
apply l,
apply j,
apply h,
exact p,
end
example (P Q : Type) : P → (Q → P) :=
begin
intros p q,
exact p,
end
example (P Q R : Type) : (P → (Q → R)) → ((P → Q) → (P → R)) :=
begin
intro pqr,
intro pq,
intro p,
have q : Q := pq(p),
exact pqr p q,
end
example (P Q F : Type) : (P → Q) → ((Q → F) → (P → F)) :=
begin
intro pq,
intro qf,
intro p,
have q : Q := pq p,
exact qf q,
end
example (P Q : Type) : (P → Q) → ((Q → empty) → (P → empty)) :=
begin
intro pq,
intro qe,
intro p,
apply qe,
apply pq,
exact p,
end
example (A B C D E F G H I J K L : Type)
(f1 : A → B) (f2 : B → E) (f3 : E → D) (f4 : D → A) (f5 : E → F)
(f6 : F → C) (f7 : B → C) (f8 : F → G) (f9 : G → J) (f10 : I → J)
(f11 : J → I) (f12 : I → H) (f13 : E → H) (f14 : H → K) (f15 : I → L)
: A → L :=
begin
intro a,
apply f15,
apply f11,
have e : E := f2 (f1 a),
have j : J := f9 (f8 (f5 e)),
exact j,
end
example (P Q : Prop) (p : P) (h : P → Q) : Q :=
begin
exact h(p),
end
lemma imp_self (P : Prop) : P → P :=
begin
intro p,
exact p,
end
lemma maze (P Q R S T U: Prop)
(p : P)
(h : P → Q)
(i : Q → R)
(j : Q → T)
(k : S → T)
(l : T → U)
: U :=
begin
apply l,
have q : Q := h p,
exact j q,
end
example (P Q : Prop) : P → (Q → P) :=
begin
intros p q,
exact p,
end
example (P Q R : Prop) : (P → (Q → R)) → ((P → Q) → (P → R)) :=
begin
intros pqr pq p,
have q : Q := pq p,
exact pqr p q,
end
lemma imp_trans (P Q R : Prop) : (P → Q) → ((Q → R) → (P → R)) :=
begin
intros hpq hqr p,
exact hqr (hpq p),
end
lemma contrapositive (P Q : Prop) : (P → Q) → (¬ Q → ¬ P) :=
begin
repeat {rw not_iff_imp_false},
intro hpq,
intro nq,
intro p,
exact nq (hpq p),
end
example (A B C D E F G H I J K L : Prop)
(f1 : A → B) (f2 : B → E) (f3 : E → D) (f4 : D → A) (f5 : E → F)
(f6 : F → C) (f7 : B → C) (f8 : F → G) (f9 : G → J) (f10 : I → J)
(f11 : J → I) (f12 : I → H) (f13 : E → H) (f14 : H → K) (f15 : I → L)
: A → L :=
begin
intro a,
apply f15,
apply f11,
exact f9 (f8 (f5 (f2 (f1 a)))),
end
example (A B C D E F G H I J K L : Prop)
(f1 : A → B) (f2 : B → E) (f3 : E → D) (f4 : D → A) (f5 : E → F)
(f6 : F → C) (f7 : B → C) (f8 : F → G) (f9 : G → J) (f10 : I → J)
(f11 : J → I) (f12 : I → H) (f13 : E → H) (f14 : H → K) (f15 : I → L)
: A → L :=
begin
cc
end
example (P Q : Prop) (p : P) (q : Q) : P ∧ Q :=
begin
split,
exact p,
exact q,
end
lemma and_symm (P Q : Prop) : P ∧ Q → Q ∧ P :=
begin
intro hpq,
cases hpq with p q,
split,
exact q,
exact p,
end
lemma and_trans (P Q R : Prop) : P ∧ Q → Q ∧ R → P ∧ R :=
begin
intro hpq,
intro hqr,
cases hpq with p q,
split,
{
exact p,
},
{
cases hqr with q' r,
exact r,
}
end
lemma iff_trans (P Q R : Prop) : (P ↔ Q) → (Q ↔ R) → (P ↔ R) :=
begin
intro hpq,
intro hqr,
split,
{
intro p,
cases hpq with pq qp,
cases hqr with qr rq,
exact qr (pq p),
},
{
intro r,
cases hpq,
cases hqr,
apply hpq_mpr,
apply hqr_mpr,
exact r,
}
end
lemma iff_trans' (P Q R : Prop) : (P ↔ Q) → (Q ↔ R) → (P ↔ R) :=
begin
intros hpq hqr,
split,
{
intro p,
exact hqr.1 (hpq.1 p),
},
{
intro r,
exact hpq.2 (hqr.2 r),
}
end
lemma iff_trans'' (P Q R : Prop) : (P ↔ Q) → (Q ↔ R) → (P ↔ R) :=
begin
cc
end
lemma iff_trans''' (P Q R : Prop) : (P ↔ Q) → (Q ↔ R) → (P ↔ R) :=
begin
intros hpq hqr,
rw hpq,
exact hqr,
end
example (P Q : Prop) : Q → (P ∨ Q) :=
begin
intro q,
right,
exact q,
end
lemma or_symm (P Q : Prop) : P ∨ Q → Q ∨ P :=
begin
intro hpq,
cases hpq with p q,
{
right,
exact p,
},
{
left,
exact q,
}
end
lemma and_or_distrib_left (P Q R : Prop) : P ∧ (Q ∨ R) ↔ (P ∧ Q) ∨ (P ∧ R) :=
begin
split,
{
intro pnqr,
cases pnqr with p qr,
cases qr with q r,
{
left,
split,
exact p,
exact q,
},
{
right,
split,
exact p,
exact r,
}
},
{
intro pqnpr,
cases pqnpr with pq pr,
{
cases pq with p q,
split,
exact p,
left,
exact q,
},
{
cases pr with p r,
split,
exact p,
right,
exact r,
}
}
end
lemma contra (P Q : Prop) : (P ∧ ¬ P) → Q := by tauto
lemma contra' (P Q : Prop) : (P ∧ ¬ P) → Q :=
begin
intro h,
repeat {rw not_iff_imp_false at h},
cases h with p np,
exfalso,
exact np p,
end
lemma contra'' (P Q : Prop) : (P ∧ ¬ P) → Q :=
begin
intro h,
cases h with p np,
exfalso,
exact np p,
end
local attribute [instance, priority 10] classical.prop_decidable -- we are mathematicians
lemma contrapositive2 (P Q : Prop) : (¬ Q → ¬ P) → (P → Q) :=
begin
by_cases p : P; by_cases q : Q,
repeat {cc},
end
lemma contrapositive2' (P Q : Prop) : (¬ Q → ¬ P) → (P → Q) :=
begin
tauto,
end
/-
Dark mode for http://wwwf.imperial.ac.uk/~buzzard/xena/natural_number_game/:
body, button, .accordion__button,.accordion__panel {
background-color: #202020 !important;
color: #cdcdcd;
}
.Resizer {
background-color: #535353 !important;
}
/*
Press F12, choose Console, copy-paste the following and hit Enter:
monaco.editor.setTheme('vs-dark');
*/
-/
end mynat |
-- examples in "Type-Driven Development with Idris"
-- chapter 9
import Data.Vect
-- check that all functions are total
%default total
--
-- section 9.1
--
maryInVector : Elem "Mary" ["Peter", "Paul", "Mary"]
maryInVector = There (There Here)
removeElem2 : (value : a) -> (xs : Vect (S n) a) ->
(prf : Elem value xs) ->
Vect n a
removeElem2 value (value :: ys) Here = ys
removeElem2 {n = Z} value (y :: []) (There later) = absurd later
removeElem2 {n = (S k)} value (y :: ys) (There later)
= y :: removeElem2 value ys later
removeElem_auto : (value : a) -> (xs : Vect (S n) a) ->
{auto prf : Elem value xs} ->
Vect n a
removeElem_auto value xs {prf} = removeElem2 value xs prf
removeElem : (value : a) -> (xs : Vect (S n) a) ->
{auto prf : Elem value xs} ->
Vect n a
removeElem value (value :: ys) {prf = Here} = ys
removeElem {n = Z} value (y :: []) {prf = There later} = absurd later
removeElem {n = (S k)} value (y :: ys) {prf = There later}
= y :: removeElem value ys
--
-- section 9.2
-- see Hangman.idr
--
|
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: BSD-2-Clause
*)
theory ArchArraysMemInstance
imports ArraysMemInstance
begin
(* Showing arrays are in mem_type requires maximum sizes for objects,
and maximum counts for elements *)
class array_outer_max_size = mem_type +
assumes array_outer_max_size_ax: "size_of TYPE('a::c_type) < 2 ^ 26"
class array_max_count = finite +
assumes array_max_count_ax: "CARD ('a) <= 2 ^ 20"
instance array :: (array_outer_max_size, array_max_count) mem_type
apply intro_classes
apply simp
apply (subgoal_tac "addr_card = 2 ^ (addr_bitsize - 26) * 2 ^ 26")
apply (erule ssubst)
apply (rule less_le_trans[where y = "card (UNIV::'b set) * 2 ^ 26"])
apply (rule mult_less_mono2)
apply (rule array_outer_max_size_ax)
apply simp
apply (rule mult_le_mono1)
apply (rule le_trans[where j = "2 ^ 20"])
apply (rule array_max_count_ax)
apply simp
apply simp
apply (simp add: addr_card)
done
class array_inner_max_size = array_outer_max_size +
assumes array_inner_max_size_ax: "size_of TYPE('a::c_type) < 2 ^ 6"
instance array :: (array_inner_max_size, array_max_count) array_outer_max_size
apply intro_classes
apply simp
apply (rule order_less_le_trans)
apply (rule mult_le_less_imp_less)
apply (rule array_max_count_ax)
apply (rule array_inner_max_size_ax)
apply simp
apply simp
apply simp
done
instance word :: (len8) array_outer_max_size
apply intro_classes
apply(simp add: size_of_def)
apply(subgoal_tac "len_of TYPE('a) \<le> 128")
apply simp
apply(rule len8_width)
done
instance word :: (len8) array_inner_max_size
apply intro_classes
apply(simp add: size_of_def)
apply(subgoal_tac "len_of TYPE('a) \<le> 128")
apply simp
apply(rule len8_width)
done
instance ptr :: (c_type) array_outer_max_size
apply intro_classes
apply (simp add: size_of_def)
done
instance ptr :: (c_type) array_inner_max_size
apply intro_classes
apply (simp add: size_of_def)
done
class lt19 = finite +
assumes lt19_ax: "CARD ('a) < 2 ^ 19"
class lt18 = lt19 +
assumes lt18_ax: "CARD ('a) < 2 ^ 18"
class lt17 = lt18 +
assumes lt17_ax: "CARD ('a) < 2 ^ 17"
class lt16 = lt17 +
assumes lt16_ax: "CARD ('a) < 2 ^ 16"
class lt15 = lt16 +
assumes lt15_ax: "CARD ('a) < 2 ^ 15"
class lt14 = lt15 +
assumes lt14_ax: "CARD ('a) < 2 ^ 14"
class lt13 = lt14 +
assumes lt13_ax: "CARD ('a) < 2 ^ 13"
class lt12 = lt13 +
assumes lt12_ax: "CARD ('a) < 2 ^ 12"
class lt11 = lt12 +
assumes lt11_ax: "CARD ('a) < 2 ^ 11"
class lt10 = lt11 +
assumes lt10_ax: "CARD ('a) < 2 ^ 10"
class lt9 = lt10 +
assumes lt9_ax: "CARD ('a) < 2 ^ 9"
class lt8 = lt9 +
assumes lt8_ax: "CARD ('a) < 2 ^ 8"
class lt7 = lt8 +
assumes lt7_ax: "CARD ('a) < 2 ^ 7"
class lt6 = lt7 +
assumes lt6_ax: "CARD ('a) < 2 ^ 6"
class lt5 = lt6 +
assumes lt5_ax: "CARD ('a) < 2 ^ 5"
class lt4 = lt5 +
assumes lt4_ax: "CARD ('a) < 2 ^ 4"
class lt3 = lt4 +
assumes lt3_ax: "CARD ('a) < 2 ^ 3"
class lt2 = lt3 +
assumes lt2_ax: "CARD ('a) < 2 ^ 2"
class lt1 = lt2 +
assumes lt1_ax: "CARD ('a) < 2 ^ 1"
instance bit0 :: (lt19) array_max_count
using lt19_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt19) array_max_count
using lt19_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt18) lt19
using lt18_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt18) lt19
using lt18_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt17) lt18
using lt17_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt17) lt18
using lt17_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt16) lt17
using lt16_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt16) lt17
using lt16_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt15) lt16
using lt15_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt15) lt16
using lt15_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt14) lt15
using lt14_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt14) lt15
using lt14_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt13) lt14
using lt13_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt13) lt14
using lt13_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt12) lt13
using lt12_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt12) lt13
using lt12_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt11) lt12
using lt11_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt11) lt12
using lt11_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt10) lt11
using lt10_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt10) lt11
using lt10_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt9) lt10
using lt9_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt9) lt10
using lt9_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt8) lt9
using lt8_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt8) lt9
using lt8_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt7) lt8
using lt7_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt7) lt8
using lt7_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt6) lt7
using lt6_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt6) lt7
using lt6_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt5) lt6
using lt5_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt5) lt6
using lt5_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt4) lt5
using lt4_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt4) lt5
using lt4_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt3) lt4
using lt3_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt3) lt4
using lt3_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt2) lt3
using lt2_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt2) lt3
using lt2_ax[where 'a='a] by intro_classes simp
instance bit0 :: (lt1) lt2
using lt1_ax[where 'a='a] by intro_classes simp
instance bit1 :: (lt1) lt2
using lt1_ax[where 'a='a] by intro_classes simp
instance num1 :: lt1
by (intro_classes, simp_all)
(* don't understand why this also seems to be necessary *)
instance num1 :: array_max_count
by (intro_classes, simp)
(* introduce hackish handling of 8192 type by making a copy of the type
under a constructor, and then manually showing that it is an instance of
array_max_count *)
datatype array_max_count_ty = array_max_count_ty "1048576"
(* ML c-parser code also needs to know at which array size to use this type *)
ML \<open>
structure ArchArrayMaxCount = struct
val array_max_count = 1048576
end
\<close>
lemma univ_array_max_count_ty:
"(UNIV::array_max_count_ty set) = image array_max_count_ty (UNIV::1048576 set)"
apply (simp add: set_eq_iff image_iff)
apply (rule_tac allI)
apply (rule_tac array_max_count_ty.induct)
apply simp
done
instance "array_max_count_ty" :: finite
apply intro_classes
apply (simp add: univ_array_max_count_ty)
done
lemma card_array_max_count_ty[simp]: "CARD(array_max_count_ty) = CARD(1048576)"
apply (simp add: univ_array_max_count_ty card_image inj_on_def)
done
instance "array_max_count_ty" :: array_max_count
by intro_classes simp
end
|
If two paths are homotopic in a subset $s$ of a topological space $t$, then they are homotopic in $t$. |
# This function assumes that u is a symmetric polynomial in
# x[1],...,x[n] and returns a polynomial in c[1],...,c[n]
# that agrees with u after replacing the variables c[i] by
# the elementary symmetric functions in x[1],...,x[n].
# If the optional argument p_ is supplied, then the calculation
# is done mod p_.
newton_rewrite := proc(u,n,x,c,p_)
local p,f,i,v,w,a,m,k,cx,cxp,vars;
p := `if`(nargs > 4,p_,0);
f := expand(mul(t+x[i],i=1..n));
for i from 1 to n do
cx[i] := coeff(f,t,n-i);
od:
cxp := proc(i,j)
option remember;
local u;
if j = 0 then
return 1;
else
u := expand(cx[i]*cxp(i,j-1));
if p > 0 then u := mods(u,p); fi;
return u;
fi;
end:
v := u;
w := 0;
vars := plex(seq(x[i],i=1..n));
while v <> 0 do
a,m := LeadingTerm(v,vars);
k := [seq(degree(m,x[i]),i=1..n),0];
w := w + a * mul(c[i]^(k[i]-k[i+1]),i=1..n);
v := expand(v - a * mul(cxp(i,k[i]-k[i+1]),i=1..n));
if nargs > 4 then v := modp(v,p_); fi;
od:
return w;
end:
orbit_sum := proc(m,n,x)
local S,R,v,C,c,r,i;
S := combinat[permute](n);
R := map(s -> [seq(x[i]=x[s[i]],i=1..n)],S);
v := add(subs(r,m),r in R);
C := {coeffs(v,{seq(x[i],i=1..n)})};
c := igcd(op(C));
v := expand(v/c);
return v;
end: |
#-----------------------------------------------------------------------------------------------------------------------------------------------------------
#------------------------------ Forest risk mapping models: Fig 2-4 model projection code - 05/11/21 ---------------------------------------
#-----------------------------------------------------------------------------------------------------------------------------------------------------------
# Author: William Anderegg ([email protected]), University of Utah
library(rworldmap)
library(MASS)
library(SDMTools)
library(raster)
library(RNetCDF)
library(scales)
library(car)
library(RColorBrewer)
library(betareg)
library(pgirmess)
library(geosphere)
library(ape)
#---------------------------------- Pull in FIA mort data -------------------------------------
dir <- ""
fianew <- read.csv(paste(dir, "FIA-TerraClim-Wide-v17-04-14-2021.csv", sep = ""), header = T)
fialonga <- read.csv(paste(dir, "FIA-TerraClim-Long-1990.1999-v17-04-14-2021.csv", sep = ""), header = T)
fialongb <- read.csv(paste(dir, "FIA-TerraClim-Long-2000.2009-v17-04-14-2021.csv", sep = ""), header = T)
fialongc <- read.csv(paste(dir, "FIA-TerraClim-Long-2010.2019-v17-04-14-2021.csv", sep = ""), header = T)
FTpred_ins <- read.csv(paste(dir, "Insect_ForTypToPredict_04-21-2021.csv", sep = ""), header = T)
FTpred_drt <- read.csv(paste(dir, "Drought_ForTypToPredict_04-21-2021.csv", sep = ""), header = T)
# FTs that meet the minimum 20 mortality threshold and cross-validated AUC>0.6
# Processing projection and a useful logical function
proj2 <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84")
"%!in%" <- function(x, y) !("%in%"(x, y))
# Get functional trait data from Trugman et al. (2020) PNAS
p50a <- open.nc(paste(dir, "CWM_P50_025Deg.nc", sep = ""))
p50m <- var.get.nc(p50a, "CWM_P50")
dim(p50m)
p50r <- var.get.nc(p50a, "mean_range_within_site")
dim(p50r)
lat.tr <- var.get.nc(p50a, "lat")
dim(lat.tr)
lon.tr <- var.get.nc(p50a, "lon")
dim(lon.tr)
# Screen FIA data to remove fire, human disturbance, timber cutting, CONDPROP>0.3
fianew1 <- fianew[which(fianew[, 8] >= 0.3 & fianew[, 59] != "True" & fianew[, 63] != "True" & fianew[, 71] != "True" & fianew[, 18] > 1), ]
# Extract trait data for all FIA CONDs
traitdata <- array(dim = c(102453, 2))
for (i in 1:102453) {
lati <- fianew1[i, 1]
loni <- fianew1[i, 2]
latcell <- which.min(abs(lati - lat.tr))
loncell <- which.min(abs(loni - lon.tr))
traitdata[i, 1] <- p50m[latcell, loncell]
traitdata[i, 2] <- p50r[latcell, loncell]
}
traitlong <- array(dim = c(498410, 2))
for (i in 1:498410) {
lati <- fialonga[i, 1]
loni <- fialonga[i, 2]
latcell <- which.min(abs(lati - lat.tr))
loncell <- which.min(abs(loni - lon.tr))
traitlong[i, 1] <- p50m[latcell, loncell]
traitlong[i, 2] <- p50r[latcell, loncell]
}
#------------------------------------------- Processing FIA data --------------------------------------------
# For Obs, build a clean data-frame with dependent variable, predictor variables, FORTYP, lat/lon
fianew1[is.na(fianew1[, 23]) == "TRUE", 23] <- 0 # Impute 0s where no mort was measured
fianew1[is.na(fianew1[, 27]) == "TRUE", 27] <- 0 # Impute 0s where no mort was measured for insect mort
fianew1[which(fianew1[, 151] < -16), 151] <- -16 # Set lower PDSI bounds
fianew1[which(fianew1[, 151] > 16), 151] <- 16 # Set upper PDSI bounds
fianew1[which(fianew1[, 152] < -16), 152] <- -16 # Set lower PDSI bounds
fianew1[which(fianew1[, 152] > 16), 152] <- 16 # Set upper PDSI bounds
fianew1[which(fianew1[, 153] < -16), 153] <- -16 # Set lower PDSI bounds
fianew1[which(fianew1[, 153] > 16), 153] <- 16 # Set upper PDSI bounds
# Combine all cond data and variables into a dataframe with columns: MortDrt, MortIns, 18 clim vars, 2 age vars, 8 traits, lat,lon, FORTYP
fia.newm <- as.data.frame(cbind(
(fianew1[, 23] / fianew1[, 18]) / (fianew1[, 15] - fianew1[, 14]), fianew1[, 27] * (fianew1[, 23] / fianew1[, 18]) / (fianew1[, 15] - fianew1[, 14]),
scale(fianew1[, 127]), scale(fianew1[, 128]), scale(fianew1[, 129]), scale(fianew1[, 139]), scale(fianew1[, 140]), scale(fianew1[, 141]),
scale(fianew1[, 151]), scale(fianew1[, 152]), scale(fianew1[, 153]), scale(fianew1[, 163]), scale(fianew1[, 164]), scale(fianew1[, 165]),
scale(fianew1[, 175]), scale(fianew1[, 176]), scale(fianew1[, 177]), scale(fianew1[, 187]), scale(fianew1[, 188]), scale(fianew1[, 189]),
scale(fianew1[, 3]^2), scale(fianew1[, 3]),
scale(traitdata[, 1]), scale(traitdata[, 2]),
(fianew1[, 19] - fianew1[, 18]), (fianew1[, 15] - fianew1[, 14]),
fianew1[, 1], fianew1[, 2], fianew1[, 4], fianew1[, 18]
))
# Second, screen out NaN values that will mess up the model
fia2 <- fia.newm[which(fia.newm[, 1] != "NaN" & fia.newm[, 5] != "NaN" & fia.newm[, 3] != "NaN" & fia.newm[, 9] != "NaN" & fia.newm[, 29] != "NaN" & fia.newm[, 21] != "NaN" & fia.newm[, 23] != "NaN" & fia.newm[, 1] < 1.01 & is.na(fia.newm[, 2]) == "FALSE"), ]
# Third, scale up to 0.25x0.25 degree for model fitting
dummy <- raster(ncols = 244, nrows = 100, xmn = -126, xmx = -65, ymn = 25, ymx = 50, crs = proj2) # 0.25 degree dummy raster
fia.g1 <- array(dim = c(0, 30))
for (i in 1:112) {
ftype <- unique(fia2[, 29])[i]
d1 <- data.frame(fia2[which(fia2[, 29] == ftype), ])
v1 <- rasterize(d1[, c(28, 27)], dummy, d1[, 1], fun = mean, background = NA, mask = FALSE, na.rm = T)
v1i <- rasterize(d1[, c(28, 27)], dummy, d1[, 2], fun = mean, background = NA, mask = FALSE, na.rm = T)
v1b <- rasterToPoints(v1)
v1bi <- (as.matrix(as.vector(v1i)))[is.na(as.matrix(as.vector(v1i))) == "FALSE"]
stack <- array(dim = c(length(v1bi), 0))
for (j in 1:24) {
varh <- rasterize(d1[, c(28, 27)], dummy, d1[, (j + 2)], fun = mean, background = NA, mask = FALSE, na.rm = T)
var1 <- (as.matrix(as.vector(varh)))[is.na(as.matrix(as.vector(varh))) == "FALSE"]
stack <- cbind(stack, var1)
}
ba <- rasterize(d1[, c(28, 27)], dummy, d1[, 30], fun = sum, background = NA, mask = FALSE, na.rm = T)
ba0 <- (as.matrix(as.vector(ba)))[is.na(as.matrix(as.vector(ba))) == "FALSE"]
fia.out <- cbind(v1b[, 3], v1bi, stack, v1b[, 1:2], rep(ftype, length(v1bi)), ba0)
fia.g1 <- rbind(fia.g1, fia.out)
}
#------------------------------------------- Fit mortality functions --------------------------------------------------
# GLM control parameters - bump up maxit to help with convergence
gm.ctl <- glm.control(epsilon = 1e-8, maxit = 500, trace = FALSE)
mod_insect_proj <- function(fitdata, minthres, ftypcol, preddata) {
colnames(preddata) <- c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V13")
colnames(fitdata) <- c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10")
mod1 <- array(dim = c(0, 7))
for (i in 1:112) {
ftype <- unique(fitdata[, ftypcol])[i]
non_zero <- ifelse(fitdata[which(fitdata[, ftypcol] == ftype), 1] > 0, 1, 0)
d1 <- data.frame(fitdata[which(fitdata[, ftypcol] == ftype), ], non_zero) # Historical mortality data
preddata1 <- data.frame(preddata[which(preddata[, 10] == ftype), ]) # Projection data
# Set up the output matrix
drt.out <- array(dim = c(dim(preddata1)[1], 7))
drt.out[, 1] <- ftype # FORTYPCD
drt.out[, 2] <- preddata1[, 12] # lat
drt.out[, 3] <- preddata1[, 11] # lon
drt.out[, 7] <- preddata1[, 13] # Summed BAlive0 for the FORTYP
# If FORTYP has non-meaningful historical model, set future projections == mean historical
if (ftype %!in% FTpred_ins[, 1]) drt.out[, 6] <- mean(d1[, 1], na.rm = T) # Predicted mort = mean historical where Nmort<minthres
if (ftype %!in% FTpred_ins[, 1]) mod1 <- rbind(mod1, drt.out)
if (ftype %!in% FTpred_ins[, 1]) print(paste("FORTYP prediction skipped due inadequate historical model: ", ftype, sep = ""))
if (ftype %!in% FTpred_ins[, 1]) next
# If FORTYP has Nmort<minthres, set future projections == mean historical
if (sum(d1[, 11], na.rm = T) < minthres) drt.out[, 6] <- mean(d1[, 1], na.rm = T) # Pred mort = mean hist where Nmort<minthres
if (sum(d1[, 11], na.rm = T) < minthres) mod1 <- rbind(mod1, drt.out)
if (sum(d1[, 11], na.rm = T) < minthres) print(paste("FORTYP prediction skipped due Nmort<minthres: ", ftype, sep = ""))
if (sum(d1[, 11], na.rm = T) < minthres) next
# For FORTYPs with adequate historical models
if (sum(non_zero, na.rm = T) > 0) d1 <- d1[which(d1[, 1] < quantile(d1[, 1], 0.995)), ]
if (dim(d1)[1] == 0) d1 <- data.frame(fitdata[which(fitdata[, ftypcol] == ftype), ], non_zero)
# Construct historical mortality model
m1 <- glm(non_zero ~ V2 + V3 + V4 + V5 + V6 + V7 + V8, data = d1, family = binomial(link = logit), control = gm.ctl)
m2 <- betareg(V1 ~ V2 + V3 + V4 + V5 + V6 + V7 + V8, data = subset(d1, non_zero == 1))
# Predict with future climate projections
pred1 <- predict(m1, newdata = preddata1[, 2:8], se = TRUE, type = "response")
pred2 <- predict(m2, newdata = preddata1[, 2:8], se = TRUE, type = "response")
pred1a <- ifelse(pred1$fit >= 0.5, 1, 0)
pred3 <- pred2 * pred1a
pred4 <- pred2 * pred1$fit
# Store model output
drt.out[, 5] <- pred3 # Fitted mort frac
drt.out[, 6] <- pred4 # Predicted (expected value) mort frac
mod1 <- rbind(mod1, drt.out)
}
print(paste("FORTYPs projected: ", length(unique(mod1[which(mod1[, 5] != "NA"), 1])), sep = ""))
print(paste("FORTYPs completed: ", i, sep = ""))
return(mod1)
}
#----------------- FIAlong climate data preprocessing function
prepclim3 <- function(histclim, climdata, histcols, futcols) {
histclim[which(histclim[, 25] < -16), 25] <- -16
histclim[which(histclim[, 25] > 16), 25] <- 16
histclim[which(histclim[, 26] < -16), 26] <- -16
histclim[which(histclim[, 26] > 16), 26] <- 16
print(histclim[1, histcols])
print(climdata[1, futcols])
# Get historical mean and SD for drought models
hist1 <- array(dim = c(6, 2))
for (i in 1:6) {
hist1[i, 1] <- mean(histclim[, histcols[i]], na.rm = T)
hist1[i, 2] <- sd(histclim[, histcols[i]], na.rm = T)
}
fia.futa <- as.data.frame(cbind(rep(1, 498410), (climdata[, futcols[1]] - hist1[1, 1]) / hist1[1, 2], (climdata[, futcols[2]] - hist1[2, 1]) / hist1[2, 2], (climdata[, futcols[3]] - hist1[3, 1]) / hist1[3, 2], (climdata[, futcols[4]] - hist1[4, 1]) / hist1[4, 2], (climdata[, futcols[5]] - hist1[5, 1]) / hist1[5, 2], (climdata[, futcols[6]] - hist1[6, 1]) / hist1[6, 2], scale(fialonga[, 3]), scale(traitlong[, 2]), fialonga[, 7], fialonga[, 1], fialonga[, 2], fialonga[, 4]))
finalclim <- fia.futa[which(fia.futa[, 1] != "NaN" & fia.futa[, 5] != "NaN" & fia.futa[, 3] != "NaN" & fia.futa[, 7] != "NaN" & fia.futa[, 10] != "NaN" & fia.futa[, 8] != "NaN" & is.na(fia.futa[, 2]) == "FALSE"), ]
return(finalclim)
}
#----------------- Upscaling function
# Function to rasterize and weight model mortality projections by observed/current BA
weight.proj1 <- function(raster1, grid, mortcol) {
mort.bafi <- rasterize(raster1[which(raster1[, 1] == unique(raster1[, 1])[1]), c(2, 3)], grid, raster1[which(raster1[, 1] == unique(raster1[, 1])[1]), 7], fun = mean, background = NA, mask = FALSE, na.rm = TRUE)
mort.predfi <- rasterize(raster1[which(raster1[, 1] == unique(raster1[, 1])[1]), c(2, 3)], grid, raster1[which(raster1[, 1] == unique(raster1[, 1])[1]), mortcol], fun = mean, background = NA, mask = FALSE, na.rm = TRUE)
for (i in 2:length(unique(raster1[, 1]))) {
mort.predfi <- addLayer(mort.predfi, rasterize(raster1[which(raster1[, 1] == unique(raster1[, 1])[i]), c(2, 3)], grid, raster1[which(raster1[, 1] == unique(raster1[, 1])[i]), mortcol], fun = mean, background = NA, mask = FALSE, na.rm = TRUE))
mort.bafi <- addLayer(mort.bafi, rasterize(raster1[which(raster1[, 1] == unique(raster1[, 1])[i]), c(2, 3)], grid, raster1[which(raster1[, 1] == unique(raster1[, 1])[i]), 7], fun = mean, background = NA, mask = FALSE, na.rm = TRUE))
}
mort.predwi <- weighted.mean(mort.predfi, mort.bafi, na.rm = T)
return(mort.predwi)
}
#-------------------------------------------- Do future climate projections: INSECT MODELS
models1 <- read.csv(paste(dir, "models.csv", sep = ""), header = T)
# Correct columns to use for historical and future climate CSVs: INSECT MODEL
histcols <- c(19, 23, 25, 30, 32, 34)
futcols <- c(11, 5, 13, 21, 8, 16)
# Set up model and date matrices to loop over
models2 <- unique(models1[, 2])
datest <- c(2010, 2020, 2030, 2040, 2050, 2060, 2070, 2080, 2090)
dateend <- c(2019, 2029, 2039, 2049, 2059, 2069, 2079, 2089, 2099)
scenario <- c(rep("ssp585", 9))
member <- models1[c(1, 5, 9, 13, 17, 21), 4]
# Loop over models and decades
for (i in 1:6) {
for (j in 1:9) {
# Read in future climate projection
climatedata <- read.csv(paste("https://carbonplan.blob.core.windows.net/carbonplan-scratch/forests/quantile-mapping-v3/FIA-CMIP6-Long-", models2[i], ".", scenario[j], ".", member[i], "-", datest[j], ".", dateend[j], "-v18-05-03-2021.csv", sep = ""))
# Do mortality projection
mortproj1 <- mod_insect_proj(fia.g1[, c(2, 5, 6, 11, 13, 15, 20, 22, 24, 29)], 20, 10, prepclim3(fialonga, climatedata, histcols, futcols))
mortproj2 <- mod_insect_proj(fia.g1[, c(2, 5, 6, 11, 13, 15, 20, 22, 24, 29)], 20, 10, prepclim3(fialongb, climatedata, histcols, futcols))
mortproj3 <- mod_insect_proj(fia.g1[, c(2, 5, 6, 11, 13, 15, 20, 22, 24, 29)], 20, 10, prepclim3(fialongc, climatedata, histcols, futcols))
mortproj.all <- mortproj1
mortproj.all[, 6] <- rowMeans(cbind(mortproj1[, 6], mortproj2[, 6], mortproj3[, 6])) # Average across 3 baseline decades (1990-2019)
# Scale up to 0.25 degree & Write raster output
mort.insects.proj <- weight.proj1(mortproj.all, dummy, 6)
writeRaster(mort.insects.proj, "directoryhere", format = "GTiff")
}
}
# Repeat same process for other SSPs and historical runs
#------------------------------------------------ DROUGHT MODELS
mod_drought_proj <- function(fitdata, minthres, ftypcol, preddata) {
colnames(preddata) <- c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V13")
colnames(fitdata) <- c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10")
mod1 <- array(dim = c(0, 7))
for (i in 1:112) {
ftype <- unique(fitdata[, ftypcol])[i]
non_zero <- ifelse(fitdata[which(fitdata[, ftypcol] == ftype), 1] > 0, 1, 0)
d1 <- data.frame(fitdata[which(fitdata[, ftypcol] == ftype), ], non_zero) # Historical mortality data
preddata1 <- data.frame(preddata[which(preddata[, 10] == ftype), ]) # Projection data
# Set up the output matrix
drt.out <- array(dim = c(dim(preddata1)[1], 7))
drt.out[, 1] <- ftype # FORTYPCD
drt.out[, 2] <- preddata1[, 12] # lat
drt.out[, 3] <- preddata1[, 11] # lon
drt.out[, 7] <- preddata1[, 13] # Biomass for the FORTYP
# If FORTYP has non-meaningful historical model, set future projections == mean historical
if (ftype %!in% FTpred_drt[, 1]) drt.out[, 6] <- mean(d1[, 1], na.rm = T) # Predicted mort = mean historical where Nmort<minthres
if (ftype %!in% FTpred_drt[, 1]) mod1 <- rbind(mod1, drt.out)
if (ftype %!in% FTpred_drt[, 1]) print(paste("FORTYP prediction skipped due inadequate historical model: ", ftype, sep = ""))
if (ftype %!in% FTpred_drt[, 1]) next
# If FORTYP has Nmort<minthres, set future projections == mean historical
if (sum(d1[, 11], na.rm = T) < minthres) drt.out[, 6] <- mean(d1[, 1], na.rm = T) # Pred mort = mean hist where Nmort<minthres
if (sum(d1[, 11], na.rm = T) < minthres) mod1 <- rbind(mod1, drt.out)
if (sum(d1[, 11], na.rm = T) < minthres) print(paste("FORTYP prediction skipped due Nmort<minthres: ", ftype, sep = ""))
if (sum(d1[, 11], na.rm = T) < minthres) next
# For FORTYPs with adequate historical models
if (sum(non_zero, na.rm = T) > 0) d1 <- d1[which(d1[, 1] < quantile(d1[, 1], 0.995)), ]
if (dim(d1)[1] == 0) d1 <- data.frame(fitdata[which(fitdata[, ftypcol] == ftype), ], non_zero)
# Construct historical mortality model
m1 <- glm(non_zero ~ V2 + V3 + V4 + V5 + V6 + V7 + V8 + V9, data = d1, family = binomial(link = logit), control = gm.ctl)
m2 <- betareg(V1 ~ V2 + V3 + V4 + V5 + V6 + V7 + V8 + V9, data = subset(d1, non_zero == 1))
# Predict with future climate projections
pred1 <- predict(m1, newdata = preddata1[, 2:9], se = TRUE, type = "response")
pred2 <- predict(m2, newdata = preddata1[, 2:9], se = TRUE, type = "response")
pred1a <- ifelse(pred1$fit >= 0.5, 1, 0)
pred3 <- pred2 * pred1a
pred4 <- pred2 * pred1$fit
# Store model output
drt.out[, 5] <- pred3 # Fitted mort frac
drt.out[, 6] <- pred4 # Predicted (expected value) mort frac
mod1 <- rbind(mod1, drt.out)
}
print(paste("FORTYPs projected: ", length(unique(mod1[which(mod1[, 5] != "NA"), 1])), sep = ""))
print(paste("FORTYPs completed: ", i, sep = ""))
return(mod1)
}
#--------------------- Do future projections
# Correct columns to use for historical and future climate CSVs: DROUGHT MODEL
histcols1a <- c(19, 24, 25, 30, 31, 35)
futcols1a <- c(11, 19, 13, 21, 15, 9)
# Set up model and date matrices to loop over
models2 <- unique(models1[, 2])
datest <- c(2010, 2020, 2030, 2040, 2050, 2060, 2070, 2080, 2090)
dateend <- c(2019, 2029, 2039, 2049, 2059, 2069, 2079, 2089, 2099)
scenario <- c(rep("ssp585", 9))
member <- models1[c(1, 5, 9, 13, 17, 21), 4]
# Loop over models and decades
for (i in 1:6) {
for (j in 1:9) {
# Read in future climate projection
climatedata <- read.csv(paste("https://carbonplan.blob.core.windows.net/carbonplan-scratch/forests/quantile-mapping-v3/FIA-CMIP6-Long-", models2[i], ".", scenario[j], ".", member[i], "-", datest[j], ".", dateend[j], "-v18-05-03-2021.csv", sep = ""))
# Do mortality projection
mortproj1 <- mod_drought_proj(fia.g1[, c(1, 5, 7, 11, 13, 17, 18, 22, 24, 29)], 20, 10, prepclim3(fialonga, climatedata, histcols1a, futcols1a))
mortproj2 <- mod_drought_proj(fia.g1[, c(1, 5, 7, 11, 13, 17, 18, 22, 24, 29)], 20, 10, prepclim3(fialongb, climatedata, histcols1a, futcols1a))
mortproj3 <- mod_drought_proj(fia.g1[, c(1, 5, 7, 11, 13, 17, 18, 22, 24, 29)], 20, 10, prepclim3(fialongc, climatedata, histcols1a, futcols1a))
mortproj.all <- mortproj1
mortproj.all[, 6] <- rowMeans(cbind(mortproj1[, 6], mortproj2[, 6], mortproj3[, 6])) # Average across 3 baseline decades (1990-2019)
# Scale up to 0.25 degree & Write raster output
mort.drought.proj <- weight.proj1(mortproj.all, dummy, 6)
writeRaster(mort.drought.proj, "directoryhere", format = "GTiff")
}
}
# Repeat same process for other SSPs and historical runs
|
(* Author: Dmitriy Traytel *)
header {* Normalization of M2L Formulas *}
(*<*)
theory M2L_Normalization
imports M2L
begin
(*>*)
fun nNot where
"nNot (FNot \<phi>) = \<phi>"
| "nNot (FAnd \<phi>1 \<phi>2) = FOr (nNot \<phi>1) (nNot \<phi>2)"
| "nNot (FOr \<phi>1 \<phi>2) = FAnd (nNot \<phi>1) (nNot \<phi>2)"
| "nNot \<phi> = FNot \<phi>"
primrec norm where
"norm (FQ a m) = FQ a m"
| "norm (FLess m n) = FLess m n"
| "norm (FIn m M) = FIn m M"
| "norm (FOr \<phi> \<psi>) = FOr (norm \<phi>) (norm \<psi>)"
| "norm (FAnd \<phi> \<psi>) = FAnd (norm \<phi>) (norm \<psi>)"
| "norm (FNot \<phi>) = nNot (norm \<phi>)"
| "norm (FExists \<phi>) = FExists (norm \<phi>)"
| "norm (FEXISTS \<phi>) = FEXISTS (norm \<phi>)"
context formula
begin
lemma satisfies_nNot[simp]: "satisfies (w, I) (nNot \<phi>) = satisfies (w,I) (FNot \<phi>)"
by (induct \<phi> rule: nNot.induct) auto
lemma FOV_nNot[simp]: "FOV (nNot \<phi>) = FOV (FNot \<phi>)"
by (induct \<phi> rule: nNot.induct) auto
lemma SOV_nNot[simp]: "SOV (nNot \<phi>) = SOV (FNot \<phi>)"
by (induct \<phi> rule: nNot.induct) auto
lemma pre_wf_formula_nNot[simp]: "pre_wf_formula n (nNot \<phi>) = pre_wf_formula n (FNot \<phi>)"
by (induct \<phi> rule: nNot.induct) auto
lemma FOV_norm[simp]: "FOV (norm \<phi>) = FOV \<phi>"
by (induct \<phi>) auto
lemma SOV_norm[simp]: "SOV (norm \<phi>) = SOV \<phi>"
by (induct \<phi>) auto
lemma pre_wf_formula_norm[simp]: "pre_wf_formula n (norm \<phi>) = pre_wf_formula n \<phi>"
by (induct \<phi> arbitrary: n) auto
lemma satisfies_norm[simp]: "satisfies (w, I) (norm \<phi>) = satisfies (w, I) \<phi>"
by (induct \<phi> arbitrary: I) auto
lemma lang\<^sub>M\<^sub>2\<^sub>L_norm[simp]: "lang\<^sub>M\<^sub>2\<^sub>L n (norm \<phi>) = lang\<^sub>M\<^sub>2\<^sub>L n \<phi>"
unfolding lang\<^sub>M\<^sub>2\<^sub>L_def by auto
end
(*<*)
end
(*>*)
|
Why Indian SEO Companies Fare Better in the International Outsourcing Market?
SEARCH ENGINE MARKETING (SEO) is the strategy of bettering the visibility of your website in a search engine’s SERP’s, thereby boosting the traffic to that website?
Nowadays, there may be thousands of web pages, on an identical theme, product, services, or information. Thus, achieving one’s market is becoming more and more difficult, and competitive for the firms relying on the web business.
To be able to survive, and gain in this neck cut competition, you have to necessarily send to the tyranny of the guidelines of search engines, master them, and therefore apply them with their website stock portfolio.
Though learning these guidelines of the overall game is not any child’s play, and progressively more companies are deciding on pros for these services.
Here, I’ll discuss briefly, why Indian SEO companies fare better in your competition, and what exactly are their future potential customers?
Indian SEO service providing companies have a brief history, so long as outsourcing itself. Presently India sides the most significant chunk of world’s outsourcing business. If you are looking for SEO services then you can check out this link: SEO Outsourcing India The Key to Succeed Online!.
Proficiency in British- Among the key SEO technique to boost website ranking is article writing and content management. Strategic and repeated use of the key term and search phrases is important, without diminishing the relevance of the entire content. |
Formal statement is: lemma unit_imp_no_prime_divisors: assumes "is_unit x" "prime_elem p" shows "\<not>p dvd x" Informal statement is: If $x$ is a unit and $p$ is a prime element, then $p$ does not divide $x$. |
########################################################################
# Synchronous Vertical Federated Logistic Regression for Adult dataset
########################################################################
using VerFedLogistic
using Printf
using SparseArrays
# load data
filename = "adult"
Xtrain, Ytrain, Xtest, Ytest = load_data(filename)
# config
config = Dict{String, Union{Int64, Float64, String}}()
config["num_classes"] = 2
config["num_clients"] = 3
config["num_epoches"] = 40
config["batch_size"] = 2837
config["learning_rate"] = 0.5
config["local_model"] = "mlp"
# vertically split data
Xtrain_split, Xtest_split = split_data(Xtrain, Xtest, config["num_clients"])
# initialize server
server = Server(Ytrain, Ytest, config)
# initialize clients
clients = Vector{Client}(undef, config["num_clients"])
for id = 1:config["num_clients"]
c = Client(id, Xtrain_split[id], Xtest_split[id], config)
clients[id] = c
# connect with server
connect(server, c)
end
# training
startT = time()
vertical_lr_train(server, clients)
endT = time()
@printf "training time: %.2f secs \n" endT - startT
# evaluation
evaluation(server, clients)
|
Formal statement is: lemma setdist_triangle: "setdist S T \<le> setdist S {a} + setdist {a} T" Informal statement is: The distance between two sets is less than or equal to the sum of the distances between the first set and a point and the point and the second set. |
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
Declare(DPBench);
#F DPBench(rec(experiment1 := opts1, ...), <dpopts>)
#F
#F d := DPBench(rec(default := SpiralDefaults), rec())
#F
#F DPBench Interface:
#F
#F .build(transforms, opts, bopts, name, dpopts),
#F
#F .generateCfiles := [true, false], default = true
#F .matrixVerify := [true, false], default = false
#F .fftwVerify := [true, false], default = false
#F .quickVerify := [true, false], default = false
#F
#F .fileTransform(exp, t, opts), .c filenames: override in a subclass
#F .funcTransform(exp, t, opts), function names: override in a subclass
#F .txtFileName(exp, runMethod), timing file name
#F
#F .runAll() run all transforms from every experiment's opts.benchTransforms
#F .resumeAll() same as .runAll() but try to reload hash from disk
#F
#F .run(transforms) run given transforms in all experiments
#F .resume(transforms) same as .run() but try to relaod hash from disk
#F
#F .runRandomAll(), .runRandom() run a random ruletree
#F .runExhaustiveAll(), .runExhaustive() run all ruletrees
#F
#F .generateCode(transforms, exp)
#F .generateProductionCode(transforms, exp) same as .generateCode() but will use opts.production()
#F
#F .entries(transforms, exp)
#F .times(transforms, exp)
#F .alltimes(transforms)
#F .speedup(transforms, baselineExp)
#F
#F .flopcyc(transforms) # FLoating point Operations Per Cycle
#F .mflops(transforms, mhz)
#F .scaledTimes(transforms, scaleFunc)
#F
#F getResult(i) returns "rec(t, opts, rt)" for the <i>th experiment run by runAll()
Class(DPBench, rec(
##
## Private methods
##
_checkExp := exp ->
Cond(not IsRec(exp), Error("Experiments record <exp> must be a record"),
NumRecFields(exp) < 1, Error("Experiments record is empty"),
not ForAll(UserRecFields(exp), f -> IsRec(exp.(f))),
Error("Each entry must be a valid Spiral options record (ie. SpiralDefaults)"),
exp),
_startHashFile := (self, hfile, exp, d) >> PrintTo(hfile,
"<# DPBench experiment '", exp, "'\n",
" # Started ", d[2], " ", d[3], " ", d[1], " ", d[4], ":", d[5], "\n",
" # Transforms: ", Cond(IsBound(self.transforms), self.transforms, ""), "#> \n\n",
"ImportAll(spiral); Import(paradigms.common, paradigms.smp, platforms.sse, platforms.avx, paradigms.vector, nontransforms.ol); \n",
"ImportAll(platforms.scalar); \n",
"ImportAll(paradigms.vector); \n\n",
"hash := HashTableDP(); \n"
),
_loadHash := meth(self, hfile, opts)
local b, bkdowns, ns, result;
# Create a package that contains mappings from breakdown names to the corresponding
# objects. This is needed because global names map to global breakdown rule objects
# which might have different settings from the ones in opts.breakdownRules
# Note: package is essentially a namespace that is always on top of imports
# so imports within a hash file will be superceded by it
bkdowns := ConcatList(UserRecFields(opts.breakdownRules), f->opts.breakdownRules.(f));
ns := tab();
for b in bkdowns do ns.(b.name) := b; od;
result := READ(hfile, ns);
if result = false or not IsBound(ns.hash) then return false;
else return ns.hash;
fi;
end,
_saveHash := meth(self, hfile, exp, date, hash)
local bucket, e;
var.print := var.printFull;
self._startHashFile(hfile, exp, date);
for bucket in hash.entries do
for e in bucket do
if e.data<>[] then
AppendTo(hfile, "HashAdd(hash, ", e.key, ", [", e.data[1], "]);\n");
fi;
od;
od;
var.print := var.printShort;
end,
# merging two hash files by taking fastest entries, returns resulting hash
_mergeHashes := meth(self, src_file1, src_file2, opts)
local h1, h2;
h1 := self._loadHash(src_file1, opts);
h2 := self._loadHash(src_file2, opts);
Checked(h1<>false and h2<>false,
HashWalk(h1, function(key, data)
local d;
d := HashLookup(h2, key);
if d=false or (d[1].measured>data[1].measured and data[1].measured>0) then
HashAdd(h2, key, data);
fi;
end));
return h2;
end,
_reloadAllHashes := meth(self)
local hash, exp, e;
for e in UserRecFields(self.exp) do
exp := self.exp.(e);
hash := self._loadHash(exp.hashFile, exp);
if (self.verbosity>0) then
PrintLine(When(hash=false, "Could not load ", "Loaded "), e, " (", exp.hashFile, ")");
fi;
if hash <> false then
exp.hashTable := hash;
fi;
od;
end,
_generateCode := meth(self, transforms, exp, opts)
local entries, e, c, r, t;
for t in transforms do
e := self.entries([HashAsSPL(t)], exp)[1];
if e = false then Error("Transform ", t, " not found in hashTable for experiment '", exp, "'"); fi;
r := ApplyRuleTreeSPL(e.ruletree, t, opts);
c := CodeRuleTree(r, opts);
PrintLine(t, " -> ", self.prodFileTransform(exp, t, opts));
PrintTo(self.prodFileTransform(exp, t, opts), PrintCode(self.prodFuncTransform(exp, t, opts), c, opts));
od;
end,
##
## Public methods
##
__call__ := meth(self, experiments, dpopts)
local e, exp;
self._checkExp(experiments);
exp:=rec();
for e in UserRecFields(experiments) do
exp.(e) := CopyFields(experiments.(e));
if not IsBound(exp.(e).hashTable) then
exp.(e).hashTable := HashTableDP(); fi;
if not IsBound(exp.(e).hashFile) then
exp.(e).hashFile := Concat(e, ".hash"); fi;
od;
return WithBases(self,
rec(dpopts:=dpopts, ran:=false, exp:=exp, transforms:=[], verbosity:=1, callbacks:=[]));
end,
resume := meth(self, transforms)
if not ForAll(UserRecFields(self.exp), e -> ForAll(self.entries(transforms, e), e->e<>false))
then self._reloadAllHashes();
fi;
self.run(transforms);
end,
generateCfiles := true,
measureFinal := true,
_fileRoot := meth(self, exp, t, opts)
if IsBound(opts.vector) and IsBound(opts.vector.conf) and IsBound(opts.vector.conf.functionNameRoot) then
return opts.vector.conf.functionNameRoot;
else
return Concat(exp, "_", Drop(CodeletName(CodeletShape(t)), 1));
fi;
end,
_freq := meth(self, opts)
if IsBound(opts.vector) and IsBound(opts.vector.conf) and IsBound(opts.vector.conf.target) and IsBound(opts.vector.conf.target.freq) then
return opts.vector.conf.target.freq;
elif IsBound(LocalConfig.cpuinfo.freq) then
return LocalConfig.cpuinfo.freq;
else
return 1000;
fi;
end,
fileTimer := (self, exp, t, opts) >> Concat(self._fileRoot(exp, t, opts), ".timer"),
fileVerifierf := (self, exp, t, opts) >> Concat(self._fileRoot(exp, t, opts), ".verifier"),
fileStub := (self, exp, t, opts) >> Concat(self._fileRoot(exp, t, opts), ".h"),
fileTransform := (self, exp, t, opts) >> Concat(self._fileRoot(exp, t, opts), ".c"),
funcTransform := (self, exp, t, opts) >> self._fileRoot(exp, t, opts),
prodFileTransform := (self, exp, t, opts) >> self.fileTransform(exp, t, opts), # used in generateProductionCode
prodFuncTransform := (self, exp, t, opts) >> self.funcTransform(exp, t, opts),
txtFileName := (exp, runMethod) -> Concat(exp, ".", SubString(runMethod, 5), ".txt"),
verify := meth(self, opts, ruletree, code)
local mat;
mat := When(ruletree.node.isReal() or opts.dataType = "complex" or opts.generateComplexCode,
MatSPL(ruletree.node),
RCMatCyc(MatSPL(ruletree.node)));
return VerifyMatrixCode(code, mat, opts);
end,
#NOTE: Slightly hacked in. Check for opts.profile being bound etc. Look at VerifyMatrixRuleTree.
verifyfftw := (self, opts, code) >> opts.profile.verifyfftw(code, opts),
verifyquick := (self, opts, code) >> opts.profile.verifyquick(code, opts),
resumeAll := meth(self)
local e;
for e in UserRecFields(self.exp) do
if (self.verbosity>0) then
PrintLine("Resuming ", e);
fi;
self.resume(self.exp.(e).benchTransforms);
od;
end,
_runAll := meth(self, runMethod)
local e;
for e in UserRecFields(self.exp) do
PrintLine("Running ", e);
self.(runMethod)(self.exp.(e).benchTransforms);
od;
end,
allTrees := (self) >> let(exp := self.exp.(UserRecFields(self.exp)[1]),
List(exp.benchTransforms, t ->
ApplyRuleTreeSPL( HashLookup(exp.hashTable, HashAsSPL(t))[1].ruletree,
t, exp))),
runAll := (self) >> self._runAll("run"),
runExhaustiveAll := (self) >> self._runAll("runExhaustive"),
runRandomAll := (self) >> self._runAll("runRandom"),
runRandomSaveAll := (self) >> self._runAll("runRandomSave"),
pickRandomSaveAll := (self) >> self._runAll("pickRandomSave"),
run := (self, transforms) >> self._run(transforms, "_runDP", true),
runExhaustive := (self, transforms) >> self._run(transforms, "_runExhaustive", true),
runRandom := (self, transforms) >> self._run(transforms, "_runRandom", false),
runRandomSave := (self, transforms) >> self._run(transforms, "_runRandomSave", true),
pickRandomSave := meth(self, transforms)
local generateCfiles;
#NOTE: once generateCfiles quits measuring things, this can be removed
generateCfiles := self.generateCfiles;
self.generateCfiles := false;
self._run(transforms, "_pickRandomSave", true);
self.generateCfiles := generateCfiles;
end,
# Find best using DP
_runDP := (self, e, t, opts) >> TimedAction(DP(t, self.dpopts, opts)),
# Find best using an exhaustive search
outputExhaustive := false,
_runExhaustive := meth(self, e, t, opts)
local r, searchTime, mincycles, mintree, rt, c, compiletime, cm, measuretime;
r := AllRuleTrees(t, opts);
searchTime := 0;
mincycles := 10^100;
for rt in r do
[c, compiletime] := TimedAction(CodeRuleTreeOpts(rt, opts));
[cm, measuretime] := TimedAction(CMeasure(c, opts));
if self.outputExhaustive then _seqPerfStatsGflops(Concat(e, ".Exhaustive-all.txt"), t, self._freq(opts), self.artcost(rt), cm, compiletime+measuretime); fi;
if (cm < mincycles) then
mincycles := cm; mintree := Copy(rt);
fi;
searchTime:=searchTime+compiletime+measuretime;
od;
HashDelete(opts.hashTable,t);
HashAdd(opts.hashTable, t, [rec(ruletree:=mintree, measured:=mincycles)]);
return([mintree, searchTime, c, mincycles]);
end,
# Run a Random ruletree. Useful for quick, dirty, non-comprehensive tests.
_runRandom := meth(self, e, t, opts)
local r, c, rrtime, codetime, runtime, cycles;
[r, rrtime] := TimedAction(RandomRuleTree(t, opts));
[c, codetime] := TimedAction(CodeRuleTreeOpts(r, opts));
[cycles, runtime]:= TimedAction(CMeasure(c, opts));
return([r, (rrtime+codetime+runtime), c, cycles]);
end,
# Run random search and save result in hash.
_runRandomSave := meth(self, e, t, opts)
local r, c, rrtime, codetime, runtime, cycles;
[r, rrtime] := TimedAction(RandomRuleTree(t, opts));
[c, codetime] := TimedAction(CodeRuleTreeOpts(r, opts));
[cycles, runtime]:= TimedAction(CMeasure(c, opts));
HashDelete(opts.hashTable, t);
HashAdd(opts.hashTable, t, [rec(ruletree:=r, measured:=runtime)]);
return([r, (rrtime+codetime+runtime), c, cycles]);
end,
# Pick random and save result in hash (NOT measured).
_pickRandomSave := meth(self, e, t, opts)
local r, searchtime;
[r, searchtime] := TimedAction(RandomRuleTree(t, opts));
HashDelete(opts.hashTable,t);
HashAdd(opts.hashTable,t,[rec(ruletree:=r)]);
return([r, searchtime, false, -1]);
end,
_run := meth(self, transforms, runMethod, useHash)
local code, t, e, outf, res, opts, cycles, hentry, date, i, searchTime, acc, optsForFile, f, ruletree, randomRes;
Constraint(ForAll(transforms, IsSPL));
for f in self.callbacks do
f(self);
od;
for e in UserRecFields(self.exp) do
opts := self.exp.(e);
date := Date();
for t in transforms do
code := false;
t := SumsUnification(t, opts);
if useHash then
# For run methods that use hash tables
hentry := HashLookup(opts.hashTable, HashAsSPL(t));
if hentry = false or hentry = [] then
res := self.(runMethod)(e, HashAsSPL(t), opts);
if res[1] = [] then
Error("DP did not find any ruletrees for <t> (", t, ")");
fi;
self._saveHash(opts.hashFile, e, date, opts.hashTable);
hentry := HashLookup(opts.hashTable, HashAsSPL(t))[1];
hentry.searchTime := res[2];
searchTime := res[2];
if Length(res) >= 3 then
# _runDP doesn't return code
code := res[3];
fi;
else
hentry := hentry[1];
searchTime := -1;
fi;
hentry.spectree := ApplyRuleTreeSPL(hentry.ruletree, t, opts);
#NOTE: exhaustive search will not update cycles
cycles := When(IsBound(hentry.measured), hentry.measured, 0);
if not t in self.transforms then
Add(self.transforms, t);
fi;
ruletree := hentry.spectree;
else
# For run methods that don't use hash tables
randomRes := self.(runMethod)(e, HashAsSPL(t), opts);
ruletree := ApplyRuleTreeSPL(randomRes[1], t, opts);
searchTime := randomRes[2];
code := randomRes[3];
cycles := randomRes[4];
fi;
#HACK: It's a pain to do this in a cleaner way
if runMethod = "_pickRandomSave" then
return;
fi;
if self.generateCfiles then
#NOTE: Should also output stub.h as <filename.h>
compiler.CMEASURE_CURRENT_TREE := ruletree;
compiler.CMEASURE_LAST_CODE := false;
if (code=false or HashAsSPL(t)<>t) then
code := CodeRuleTree(ruletree, opts);
fi;
compiler.CMEASURE_LAST_CODE := code;
if (self.measureFinal and (runMethod <> "_runRandom"))then
cycles := CMeasure(code, opts);
fi;
if useHash then
hentry.measured := cycles;
fi;
opts.fileinfo := rec(
cycles := cycles,
flops := self.artcost(ruletree),
file := self.fileTransform(e,t,opts),
algorithm := ruletree
);
PrintTo(self.fileTransform(e,t,opts), PrintCode(self.funcTransform(e,t,opts), code, opts));
Unbind(opts.fileinfo);
fi;
if self.matrixVerify or self.fftwVerify or self.quickVerify then
if code=false then
code := CodeRuleTree(ruletree, opts);
fi;
if self.matrixVerify then
acc := self.verify(opts, ruletree, code);
elif self.fftwVerify then
acc := self.verifyfftw(opts, code);
else
acc := self.verifyquick(opts, code);
fi;
if IsBound(opts.outputVecStatistics) and opts.outputVecStatistics then
_seqPerfStatsGflopsAccCount(self.txtFileName(e, runMethod), t,
[self.artcost(ruletree), code.countedArithCost(opts.vector.isa.countrec)*opts.vector.vlen], cycles, searchTime, acc,
code.countOps(opts.vector.isa.countrec), opts.vector.isa.countrec);
else
_seqPerfStatsGflopsAcc(self.txtFileName(e, runMethod), t,
[self.artcost(ruletree), self.countedArithCost(code, opts)], cycles, searchTime, acc);
fi;
else
if (opts.verbosity>-1) then
if IsBound(opts.outputVecStatistics) and opts.outputVecStatistics then
_seqPerfStatsGflopsCount(self.txtFileName(e, runMethod), t,
[self.artcost(ruletree), code.countedArithCost(opts.vector.isa.countrec)*opts.vector.vlen], cycles, searchTime,
code.countOps(opts.vector.isa.countrec), opts.vector.isa.countrec);
else
_seqPerfStatsGflops(self.txtFileName(e, runMethod), t, self._freq(opts),
[self.artcost(ruletree), self.countedArithCost(code, opts)], cycles, searchTime);
fi;
fi;
fi;
od;
od;
self.ran := true;
end,
generateCode := (self, transforms, exp) >> self._generateCode(transforms, exp, self.exp.(exp)),
generateProductionCode := (self, transforms, exp) >> self._generateCode(transforms, exp, self.exp.(exp).production()),
generateAllCode := self >> DoForAll(UserRecFields(self.exp), exp ->
self._generateCode(self.exp.(exp).benchTransforms, exp, self.exp.(exp))),
generateAllProductionCode := self >> DoForAll(UserRecFields(self.exp), exp ->
self._generateCode(self.exp.(exp).benchTransforms, exp, self.exp.(exp).production())),
entries := (self, transforms, exp) >>
When(not IsBound(self.exp.(exp)), Error("No such experiment '",exp, "'"),
Map(transforms,
x -> let(lookup := MultiHashLookup(Concatenation([self.exp.(exp).hashTable], self.exp.(exp).baseHashes), x),
When(lookup = false, false,
#Error("Transform '", x, "' not found in the '", exp, "' table"),
lookup[1])))),
times := (self, transforms, exp) >> Map(self.entries(transforms, exp), x->x.measured),
alltimes := (self, transforms) >>
let(tr := When(IsList(transforms), transforms, [transforms]),
Map(UserRecFields(self.exp), e -> Concatenation([e], self.times(tr, e)))),
speedup := (self, transforms, baselineExp) >>
let(b := self.times(transforms, baselineExp),
Map(self.alltimes(transforms),
times -> Map([1..Length(times)],
i -> When(not IsInt(times[i]), times[i], times[i] / b[i-1])))),
flopcyc := (self, transforms) >>
self.scaledTimes(transforms, e -> self.acost(e) / e.measured),
mflops := (self, transforms, mhz) >>
self.scaledTimes(transforms, e -> self.acost(e) * mhz / e.measured),
# FLoating point Operations Per Cycle
scaledTimes := (self, transforms, scaleFunc) >>
let(tr := When(IsList(transforms), transforms, [transforms]),
Map(UserRecFields(self.exp),
e -> Concatenation([e], Map(self.entries(tr, e), scaleFunc)))),
# Use NonTerminal.normalizedArithCost() if it is there, otherwise return 0
acost := entry -> let(
t := entry.ruletree.node,
When(IsBound(t.normalizedArithCost), t.normalizedArithCost(), 0)
),
artcost := ruletree -> let(
t := ruletree.node,
When(IsBound(t.normalizedArithCost), t.normalizedArithCost(), 0)
),
countedArithCost := (self, c, opts) >>
When(IsRec(c) and
IsBound(c.countedArithCost) and
IsBound(opts.vector) and IsBound(opts.vector.isa) and IsBound(opts.vector.isa.countrec) and IsBound(opts.vector.vlen),
c.countedArithCost(opts.vector.isa.countrec)*opts.vector.vlen, 0),
matrixVerify := false,
setMatrixVerify := self >> CopyFields(self, rec(matrixVerify := true)),
fftwVerify := false,
quickVerify := false,
getOpts := self >> self.exp.(UserRecFields(self.exp)[1]),
_getResult := meth(arg)
local self, t, exp, lookup, rt;
self := arg[1];
exp := UserRecFields(self.exp)[1];
t := When(Length(arg) >= 2, self.exp.(exp).benchTransforms[arg[2]], self.exp.(exp).benchTransforms[1]);
lookup := MultiHashLookup(Concatenation([ self.exp.(exp).hashTable ], self.exp.(exp).baseHashes), HashAsSPL(t));
rt := When(lookup = false, false, lookup[1].ruletree);
return rec(opts := self.exp.(exp), t := t, rt:= rt);
end,
getResult := meth(arg)
local self, l, rt, opts, c;
self := arg[1];
l := When(Length(arg)>1, self._getResult(arg[2]), self._getResult());
rt := l.rt;
opts := l.opts;
c := CodeRuleTree(rt, opts);
return CopyFields(l, rec(c := c, opcount := c.countOps(opts.vector.isa.countrec)));
end,
_runAll := meth(self, runMethod)
local e;
for e in UserRecFields(self.exp) do
PrintLine("Running ", e);
self.(runMethod)(self.exp.(e).benchTransforms);
od;
end,
build := function(arg)
local transforms, opts, bopts, name, dpr;
transforms := When(IsList(arg[1]), arg[1], [arg[1]]);
opts := When(Length(arg) >= 2, arg[2], SpiralDefaults);
opts.benchTransforms := transforms;
bopts := When(Length(arg) >= 3, arg[3], rec());
name := When(Length(arg) >= 4, arg[4], "spiral");
dpr := When(Length(arg) >= 5, arg[5], rec(verbosity := 0, timeBaseCases:=true));
return CopyFields(DPBench(rec((name) := opts), dpr), bopts);
end
));
# Class(NewDPBench, DPBench, rec(
# measureNtimes := 10,
# remeasure := (self, c, opts) >> List([1..self.measureNtimes],
# i -> CMeasure(c, opts)),
# runHooks := [
# meth(self, t, c, hentry, opts)
# hentry.remeasure := self.remeasure(c, opts);
# PrintLine("remeasure : ", hentry.remeasure);
# end
# ]
# ));
#opts := SpiralDefaults; opts2 := CopyFields(opts, rec(declareConstants := true));
#d := NewDPBench(rec(default:=opts,pullconst:=opts2), rec(timeBaseCases := false, verbosity := 0));
sampleDPBench := DPBench(rec(default := SpiralDefaults), rec(timeBaseCases := false, verbosity := 0));
sampleDPBench.transforms := [DFT(2), DFT(3), DFT(4)];
|
C @(#)reduce.f 20.3 2/13/96
subroutine reduce
include 'ipfinc/parametr.inc'
include 'ipfinc/smallp.inc'
double precision si
marki = 0
markk = 0
if (numslk.eq.0) go to 80
it = size
do 60 k = 1,it
10 if (size.le.1) go to 70
j = xbasis(k)
if (j.le.n) go to 60
i = j - n
si = s(i)
if (si * xr(k).lt.0.0.or.si.eq.0.0.and.xr(k).ne.0.0) go to 60
if (k.eq.size) go to 30
do 20 l = 1,size
20 inv(k,l) = inv(size,l)
j = xbasis(size)
xbasis(k) = j
if (j.le.n) inbase(j) = k
30 slack(i) = xr(k)
xr(k) = xr(size)
if (neginv.eq.size) neginv = k
l = iseff(i)
iseff(i) = 0
if (l.eq.size) go to 50
do 40 kk = 1,size
40 inv(kk,l) = inv(kk,size)
yr(l) = yr(size)
ybasis(l) = ybasis(size)
i = ybasis(size)
iseff(i) = l
50 xbasis(size) = 0
ybasis(size) = 0
size = size - 1
size1 = size1 - 1
numslk = numslk - 1
go to 10
60 continue
70 if (size.lt.2.and.xbasis(1).gt.n) mark = 1
if (neginv.eq.0.and.markk.eq.0) go to 80
j = 0
if (neginv.ne.0) j = xbasis(neginv)
if (j.gt.n) markk = neginv
if (markk.eq.0) go to 80
marki = xbasis(markk) - n
80 return
end
|
[STATEMENT]
lemma clearjunk0_map_of_SomeD:
assumes a1: "fmlookup xs t = Some c" and "c \<noteq> 0"
shows "t \<in> fmdom' (clearjunk0 xs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t \<in> fmdom' (clearjunk0 xs)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
fmlookup xs t = Some c
c \<noteq> (0::'a)
goal (1 subgoal):
1. t \<in> fmdom' (clearjunk0 xs)
[PROOF STEP]
by (auto simp: clearjunk0_def fmdom'I) |
import LTS.defs
variable {M : LTS}
set_option pp.beta true
@[reducible]
def init_state (s : M.S) [has_coe M.S (formula M)] : formula M := ↑s
@[reducible]
def holds_over_transition (s : M.S) [has_coe M.S (formula M)] : formula M :=
◾(↑s ⇒ formula.next ↑s)
@[reducible]
def not_init (s : M.S) [has_coe M.S (formula M)] : formula M := !↑s
@[reducible]
def transitions_safe (s : M.S) [has_coe M.S (formula M)] : formula M :=
◾(!↑s ⇒ ! formula.next ↑s)
namespace absent
def globally (P : formula M) : formula M :=
◾ (!P)
def before (P R : formula M) : formula M :=
(◆R) ⇒ ((!P) U R)
def after (P Q : formula M) : formula M :=
◾(Q ⇒ ◾!P)
def between (P Q R : formula M) : formula M :=
◾((Q & ◆ R) ⇒ (!P U R))
def after_until (P Q R: formula M) : formula M :=
◾((Q & ◆ R) ⇒ (!P W R))
def strong_between (P Q R : formula M) : formula M :=
◾((Q & !R & ◆ R) ⇒ (!P U R))
def strong_after_until (P Q R: formula M) : formula M :=
◾((Q & !R) ⇒ ((!P) W R))
end absent
namespace exist
def globally (P : formula M) : formula M :=
◆P
def before (P R : formula M) : formula M :=
(!R) W (P & !R)
def after (P Q : formula M) : formula M :=
(◾(!Q)) ⅋ ◆(Q & ◆ P)
def between (P Q R : formula M) : formula M :=
◾((Q & !R) ⇒ ((!R) W (P & !R)))
def after_until (P Q R: formula M) : formula M :=
◾((Q & !R) ⇒ (!R U (P & !R)))
end exist
namespace universal
def globally (P : formula M) : formula M :=
◾ P
def before (P R : formula M) : formula M :=
(◆R) ⇒ (P U R)
def after (P Q : formula M) : formula M :=
◾(Q ⇒ ◾P)
def between (P Q R : formula M) : formula M :=
◾((Q & !R & ◆R) ⇒ (P U R))
def after_until (P Q R: formula M) : formula M :=
◾((Q & !R) ⇒ (P W R))
end universal
namespace precedes
def globally (S P : formula M) : formula M :=
(!P) W S
def before (S P R : formula M) : formula M :=
(◆R) ⇒ ((!P) U (S ⅋ R))
def after (S P Q : formula M) : formula M :=
(◾!Q) ⅋ ◆(Q & ((!P) W S))
def between (S P Q R : formula M) : formula M :=
◾((Q & (!R) & ◆R) ⇒ (!P U (S ⅋ R)))
def after_until (S P Q R: formula M) : formula M :=
◾((Q & !R) ⇒ ((!P) W (S ⅋ R)))
end precedes
namespace responds
def globally (P S : formula M) : formula M :=
◾(P ⇒ ◆S)
def before (S P R : formula M) : formula M :=
(◆R) ⇒ ((P ⇒ (!R U (S & !R))) U R)
def after (S P Q : formula M) : formula M :=
◾(Q ⇒ ◾(P ⇒ ◆S))
def between (S P Q R : formula M) : formula M :=
◾((Q & !R & ◆R) ⇒ (P ⇒ (!R U (S & !R))) U R)
def after_until (S P Q R: formula M) : formula M :=
◾((Q & !R) ⇒ ((P ⇒ (!R U (S & !R))) W R))
end responds
lemma sat_em (P : formula M) (π : path M) : sat P π → ¬sat ( !P) π := by {intros, rw sat, tidy}
|
@debug "Loading DataFrames support into Gadfly"
using .DataFrames
function meltdata(U::AbstractDataFrame, colgroups::Vector{Col.GroupedColumn})
um, un = size(U)
# Figure out the size of the new melted matrix
allcolumns = Set{Symbol}(propertynames(U))
vm = um
colidxs = [colgroup.columns===nothing ? collect(allcolumns) : colgroup.columns for colgroup in colgroups]
vm *= prod(length.(colidxs))
grouped_columns = reduce(vcat, colidxs)
ungrouped_columns = setdiff(allcolumns, grouped_columns)
vn = length(colgroups) + length(ungrouped_columns)
V = AbstractArray[]
vnames = Symbol[]
colmap = Dict{Any, Int}()
eltypd = Dict{Symbol, DataType}(k=>v for (k,v) in zip(propertynames(U), eltype.(eachcol(U))))
# allocate vectors for grouped columns
for (j, (colgroup, colidx)) in enumerate(zip(colgroups, colidxs))
eltyp = promote_type(getindex.([eltypd], colidx)...)
push!(V, eltyp == Vector ? Array{eltyp}(undef, vm) : Array{Union{Nothing,eltyp}}(undef, vm))
name = gensym()
push!(vnames, name)
colmap[colgroup] = j
end
# allocate vectors for ungrouped columns
for (j, col) in enumerate(ungrouped_columns)
push!(V, Array{eltypd[col]}(undef, vm))
colmap[col] = j + length(colgroups)
push!(vnames, col)
end
# Indicator columns for each colgroup
col_indicators = Array{Symbol}(undef, vm, length(colgroups))
row_indicators = Array{Int}(undef, vm, length(colgroups))
vi = 1
for ui in 1:um
for colidx in product(colidxs...)
# copy grouped columns
for (vj, uj) in enumerate(colidx)
V[vj][vi] = U[ui, uj]
col_indicators[vi, vj] = uj
row_indicators[vi, vj] = ui
end
# copy ungrouped columns
for (vj, uj) in enumerate(ungrouped_columns)
V[vj + length(colgroups)][vi] = U[ui, uj]
end
vi += 1
end
end
df = DataFrame(; collect(zip(vnames, V))...)
return MeltedData(U, df, row_indicators, col_indicators, colmap)
end
evalmapping(source::MeltedData{T}, arg::Col.GroupedColumnValue) where T<:AbstractDataFrame =
source.melted_data[:,source.colmap[Col.GroupedColumn(arg.columns)]]
evalmapping(source::AbstractDataFrame, arg::Symbol) = source[:,arg]
evalmapping(source::AbstractDataFrame, arg::AbstractString) = evalmapping(source, Symbol(arg))
evalmapping(source::AbstractDataFrame, arg::Integer) = source[:,arg]
evalmapping(source::AbstractDataFrame, arg::Expr) = with(source, arg)
|
lemma real_le_affinity: "0 < m \<Longrightarrow> y \<le> m * x + c \<longleftrightarrow> inverse m * y + - (c / m) \<le> x" for m :: "'a::linordered_field" |
library(rmarkdown)
library(knitr)
rmdfiles <- Sys.glob("~/repos/dnamalci/tests/*.rmd")
sapply(rmdfiles, function(x) render(x, output_format="all",
output_dir = "~/repos/dnamalci"))
# sapply(rmdfiles, purl)
|
[STATEMENT]
lemma extend_mono: "F \<le> G ==> extend h F \<le> extend h G"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. F \<le> G \<Longrightarrow> extend h F \<le> extend h G
[PROOF STEP]
by (force simp add: component_eq_subset) |
[STATEMENT]
lemma (in quorum) quorum_non_empty: "Q \<in> Quorum \<Longrightarrow> Q \<noteq> {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Q \<in> Quorum \<Longrightarrow> Q \<noteq> {}
[PROOF STEP]
by (auto dest: qintersect) |
Formal statement is: lemma eventually_at_infinity: "eventually P at_infinity \<longleftrightarrow> (\<exists>b. \<forall>x. b \<le> norm x \<longrightarrow> P x)" Informal statement is: A predicate $P$ holds eventually at infinity if and only if there exists a real number $b$ such that $P$ holds for all $x$ with $\|x\| \geq b$. |
#!/usr/bin/env python
import sys
import argparse
import pandas as pd
import numpy as np
from scipy.optimize import fsolve
def solve(n, *data): # dont know n or k, know x # of umis that collide bw genes, suppI_g support of I for the gene, suppI_c support of I for cell
x, suppI_g, suppI_c = data
s = 0
for i in suppI_g:
s += i/(n-i)
return suppI_c - (n-suppI_c)*s - x
def estNk(ubug):
Nest = []
umipercell = ubug.groupby("bcs")[["umi"]].count()
umipercellpergene = ubug.groupby(["bcs", "gene"])[["umi"]].count()
counter = 0
for cellwewant in umipercell.umi.nlargest(50).keys():
if counter%10==0:
print(counter)
cellbug = ubug[ubug["bcs"] == cellwewant]
suppI_g = cellbug.groupby(["bcs", "gene"])["umi"].nunique().values
suppI_c = cellbug["umi"].nunique()
x = cellbug.groupby(["bcs", "umi"])["gene"].nunique()
x = x[x>1].shape[0]
sol = fsolve(solve, 250000, args=(x, suppI_g, suppI_c))
Nest.append(int(sol[0]))
counter += 1
return Nest
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="estimate N, number of umis from which we are sampling.")
parser.add_argument("--b", help="bug file in text format")
parser.add_argument("outdir", type=str, help="output directory for Nest.txt")
args = parser.parse_args()
print("Loading bug file..")
bug = pd.read_csv(args.b, header=None, names=["bcs", "umi", "gene", "mul"], sep="\t")
ubug = bug[bug["gene"].map(lambda l: "," not in str(l))]
Nest = estNk(ubug)
Nest = ",".join(map(str, Nest))
with open(args.outdir + "Nest.txt", "w") as f:
f.write("{}".format(Nest))
|
module sorting_with_proof
import Data.Vect
import Data.Fin
||| Vectors of Natural numbers
total
NatVect : Nat -> Type
NatVect n = Vect n Nat
||| The type Fin n -> Fin n
total
F_Fin : Nat -> Type
F_Fin n = (Fin n) -> (Fin n)
||| Identity function
total
Idt : (t : Type) -> t -> t
Idt s a = a
||| The type of permutation/bijection of {1,...,n}.
||| Notice that for finite sets one sided inverse is enough.
||| Also I have a strong feeling that using univalence the last part of the definition is
||| equivalent to saying that (g . f) and Idt are equivalent
total
Perm : Nat -> Type
Perm n = ( f : F_Fin n ** ( g : F_Fin n ** ((a : (Fin n)) -> ((g (f a)) = a))))
||| Second definition of perm. I have strong feeling that using univalence this is equivalent
||| to the first definition.
total
Perm2 : Nat -> Type
Perm2 n = ( f : F_Fin n ** ( g : F_Fin n ** ((g . f) = (Idt (Fin n)))))
data Finite : Nat -> Type where
FinZ : (k : Nat) -> (Finite (S k))
FinS : (k : Nat) -> (Finite k) -> (Finite (S k))
total
Finite_to_Fin : (k : Nat) -> (Finite k) -> (Fin k)
Finite_to_Fin Z a impossible
Finite_to_Fin (S k) (FinZ k) = FZ
Finite_to_Fin (S k) (FinS k nm) = FS (Finite_to_Fin k nm)
total
Fin_to_Finite : (k : Nat) -> (Fin k) -> (Finite k)
Fin_to_Finite Z a impossible
Fin_to_Finite (S k) FZ = FinZ k
Fin_to_Finite (S k) (FS l) = FinS k (Fin_to_Finite k l)
||| Predecessor function for finite
total
predFinite : (k : Nat) -> (Finite k) -> (Finite k)
predFinite Z a impossible
predFinite (S k) (FinZ k) = FinZ k
predFinite (S (S k)) (FinS (S k) (FinZ k)) = FinZ (S k)
predFinite (S (S k)) (FinS (S k) l) = FinS (S k) (predFinite (S k) l)
||| Predecessor function for fin
total
predFin : (n : Nat) -> (Fin n) -> (Fin n)
predFin n a = Finite_to_Fin n ( predFinite n (Fin_to_Finite n a))
||| Type of proofs that a vector is sorted in increasing order.
||| Note that the predecessor function takes care of the first index
total
SortProof : (n : Nat) -> (NatVect n) -> (Fin n) -> Type
SortProof Z v a impossible
SortProof (S k) v l = LTE (Vect.index (pred l) v) (Vect.index l v)
||| Type of the sorted vectors.
SortedVect : Type
SortedVect = (n : Nat ** (v : (NatVect n) ** ((k : Fin n) -> (SortProof n v k))))
|
(* Author: Bernhard Stöckl *)
theory IKKBZ_Examples
imports IKKBZ_Optimality
begin
section \<open>Examples of Applying IKKBZ\<close>
subsection \<open>Computing Contributing Selectivity without Lists\<close>
context directed_tree
begin
definition contr_sel :: "'a selectivity \<Rightarrow> 'a \<Rightarrow> real" where
"contr_sel sel y = (if \<exists>x. x \<rightarrow>\<^bsub>T\<^esub> y then sel (THE x. x \<rightarrow>\<^bsub>T\<^esub> y) y else 1)"
definition tree_sel :: "'a selectivity \<Rightarrow> bool" where
"tree_sel sel = (\<forall>x y. \<not>(x \<rightarrow>\<^bsub>T\<^esub> y \<or> y \<rightarrow>\<^bsub>T\<^esub> x) \<longrightarrow> sel x y = 1)"
lemma contr_sel_gt0: "sel_reasonable sf \<Longrightarrow> contr_sel sf x > 0"
unfolding contr_sel_def sel_reasonable_def by simp
lemma contr_sel_le1: "sel_reasonable sf \<Longrightarrow> contr_sel sf x \<le> 1"
unfolding contr_sel_def sel_reasonable_def by simp
lemma nempty_if_not_fwd_conc: "\<not>forward_arcs (y#xs) \<Longrightarrow> xs \<noteq> []"
by auto
lemma len_gt1_if_not_fwd_conc: "\<not>forward_arcs (y#xs) \<Longrightarrow> length (y#xs) > 1"
by auto
lemma two_elems_if_not_fwd_conc: "\<not>forward_arcs (y#xs) \<Longrightarrow> \<exists>a b cs. a # b # cs = y#xs"
by (metis forward_arcs.cases forward_arcs.simps(2))
lemma hd_reach_all_if_nfwd_app_fwd:
"\<lbrakk>\<not>forward_arcs (y#xs); forward_arcs (y#ys@xs); x \<in> set (y#ys@xs)\<rbrakk>
\<Longrightarrow> hd (rev (y#ys@xs)) \<rightarrow>\<^sup>*\<^bsub>T\<^esub> x"
using hd_reach_all_forward'[of "rev (y#ys@xs)"] len_gt1_if_not_fwd_conc forward_arcs_alt by auto
lemma hd_not_y_if_if_nfwd_app_fwd:
assumes "\<not>forward_arcs (y#xs)" and "forward_arcs (y#ys@xs)"
shows "hd (rev (y#ys@xs)) \<noteq> y"
proof -
obtain a where a_def: "a \<in> set (ys@xs)" "a \<rightarrow>\<^bsub>T\<^esub> y"
by (metis assms Nil_is_append_conv forward_arcs.simps(3) neq_Nil_conv)
then have "hd (rev (y#ys@xs)) \<rightarrow>\<^sup>*\<^bsub>T\<^esub> a" using hd_reach_all_if_nfwd_app_fwd[OF assms] by simp
then show ?thesis
using a_def(2) reachable1_not_reverse
by (metis loopfree.adj_not_same reachable_adjI reachable_neq_reachable1)
qed
lemma hd_reach1_y_if_nfwd_app_fwd:
"\<lbrakk>\<not>forward_arcs (y#xs); forward_arcs (y#ys@xs)\<rbrakk> \<Longrightarrow> hd (rev (y#ys@xs)) \<rightarrow>\<^sup>+\<^bsub>T\<^esub> y"
using hd_not_y_if_if_nfwd_app_fwd hd_reach_all_if_nfwd_app_fwd by auto
lemma not_fwd_if_skip1:
"\<lbrakk>\<not> forward_arcs (y#x#x'#xs); forward_arcs (x#x'#xs)\<rbrakk> \<Longrightarrow> \<not> forward_arcs (y#x'#xs)"
by auto
lemma fwd_arcs_conc_nlast_elem:
assumes "forward_arcs xs" and "y \<in> set xs" and "y \<noteq> last xs"
shows "forward_arcs (y#xs)"
proof -
obtain as bs where as_def: "as @ y # bs = xs" "bs \<noteq> []"
using split_list_not_last[OF assms(2,3)] by blast
then have "forward_arcs (y#bs)" using assms(1) forward_arcs_split by blast
then obtain x where x_def: "x \<in> set bs" "x \<rightarrow>\<^bsub>T\<^esub> y"
using as_def(2) by (force intro: list.exhaust)
then have "x \<in> set xs" using as_def(1) by auto
then show ?thesis using assms(1) x_def(2) forward_arcs.elims(3) by blast
qed
lemma fwd_app_nhead_elem: "\<lbrakk>forward xs; y \<in> set xs; y \<noteq> hd xs\<rbrakk> \<Longrightarrow> forward (xs@[y])"
using fwd_arcs_conc_nlast_elem forward_arcs_alt by (simp add: last_rev)
lemma hd_last_not_fwd_arcs: "\<not>forward_arcs (x#xs@[x])"
proof
assume asm: "forward_arcs (x#xs@[x])"
then obtain y where y_def: "y \<in> set (xs@[x])" "y \<rightarrow>\<^bsub>T\<^esub> x"
by (metis append_is_Nil_conv forward_arcs.simps(3) no_back_arcs.cases)
then have hd_in_verts: "hd (rev (xs @ [x])) \<in> verts T" by auto
have "forward_arcs (xs@[x])" using asm forward_arcs_split[of "[x]" "xs@[x]"] by simp
then have "x \<rightarrow>\<^sup>*\<^bsub>T\<^esub> y" using hd_reach_all_forward[OF hd_in_verts] y_def forward_arcs_alt by simp
then show False using y_def(2) reachable1_not_reverse by auto
qed
lemma hd_not_fwd_arcs: "\<not>forward_arcs (ys@x#xs@[x])"
using hd_last_not_fwd_arcs forward_arcs_split by blast
lemma hd_last_not_fwd: "\<not>forward (x#xs@[x])"
using hd_last_not_fwd_arcs forward_arcs_alt by simp
lemma hd_not_fwd: "\<not>forward (x#xs@[x]@ys)"
using hd_not_fwd_arcs forward_arcs_alt by simp
lemma y_not_dom_if_nfwd_app_fwd:
"\<lbrakk>\<not>forward_arcs (y#xs); forward_arcs (y#ys@xs); x \<in> set xs\<rbrakk> \<Longrightarrow> \<not> x \<rightarrow>\<^bsub>T\<^esub> y"
using forward_arcs_split[of "y#ys" xs] two_elems_if_not_fwd_conc by force
lemma not_y_dom_if_nfwd_app_fwd:
"\<lbrakk>\<not>forward_arcs (y#xs); forward_arcs (y#ys@xs); x \<in> set xs\<rbrakk> \<Longrightarrow> \<not> y \<rightarrow>\<^bsub>T\<^esub> x"
by (smt (verit, ccfv_threshold) append_is_Nil_conv forward_arcs_alt' forward_arcs_split
forward_cons fwd_app_nhead_elem hd_append hd_reach1_y_if_nfwd_app_fwd
hd_reachable1_from_outside' list.distinct(1) reachable1_not_reverse reachable_adjI
reachable_neq_reachable1 rev.simps(2) rev_append set_rev split_list)
lemma list_sel_aux'1_if_tree_sel_nfwd:
"\<lbrakk>tree_sel sel; \<not>forward_arcs (y#xs); forward_arcs (y#ys@xs)\<rbrakk>
\<Longrightarrow> list_sel_aux' sel xs y = 1"
proof(induction xs arbitrary: ys rule: forward_arcs.induct)
case (2 x)
then show ?case using not_y_dom_if_nfwd_app_fwd[OF 2(2,3)] by (auto simp: tree_sel_def)
next
case (3 x x' xs)
then have "forward_arcs (x # x' # xs)"
using forward_arcs_split[of "y#ys" "x#x'#xs"] by simp
then have "\<not> forward_arcs (y # x' # xs)" using not_fwd_if_skip1 "3.prems"(2) by blast
moreover have "forward_arcs (y # (ys@[x]) @ x' # xs)" using 3 by simp
ultimately have "list_sel_aux' sel (x' # xs) y = 1" using "3.IH"[OF "3.prems"(1)] by blast
then show ?case
using "3.prems"(1) y_not_dom_if_nfwd_app_fwd[OF "3.prems"(2,3)]
not_y_dom_if_nfwd_app_fwd[OF "3.prems"(2,3)]
by (simp add: tree_sel_def)
qed(simp)
lemma contr_sel_eq_list_sel_aux'_if_tree_sel:
"\<lbrakk>tree_sel sel; distinct (y#xs); forward_arcs (y#xs); xs \<noteq> []\<rbrakk>
\<Longrightarrow> contr_sel sel y = list_sel_aux' sel xs y"
proof(induction xs rule: forward_arcs.induct)
case (2 x)
then have "x \<rightarrow>\<^bsub>T\<^esub> y" by simp
then have "(THE x. x \<rightarrow>\<^bsub>T\<^esub> y) = x" using two_in_arcs_contr by blast
then show ?case using \<open>x \<rightarrow>\<^bsub>T\<^esub> y\<close> unfolding contr_sel_def by auto
next
case (3 x x' xs)
then show ?case
proof(cases "x \<rightarrow>\<^bsub>T\<^esub> y")
case True
then have "(THE x. x \<rightarrow>\<^bsub>T\<^esub> y) = x" using two_in_arcs_contr by blast
then have contr_sel: "contr_sel sel y = sel x y" using True unfolding contr_sel_def by auto
have "\<not>forward_arcs (y#x'#xs)" using True "3.prems"(2) two_in_arcs_contr by auto
then have "list_sel_aux' sel (x'#xs) y = 1"
using list_sel_aux'1_if_tree_sel_nfwd[of sel y "x'#xs" "[x]"] "3.prems"(1,3) by auto
then show ?thesis using contr_sel by simp
next
case False
have "\<not>y \<rightarrow>\<^bsub>T\<^esub> x"
using "3.prems"(2,3) forward_arcs_alt' no_back_arc_if_fwd_dstct
by (metis distinct_rev list.set_intros(1) rev.simps(2) set_rev)
then have "sel x y = 1" using "3.prems"(1) False unfolding tree_sel_def by blast
then show ?thesis using 3 False by simp
qed
qed(simp)
corollary contr_sel_eq_list_sel_aux'_if_tree_sel':
"\<lbrakk>tree_sel sel; distinct (xs@[y]); forward (xs@[y]); xs \<noteq> []\<rbrakk>
\<Longrightarrow> contr_sel sel y = list_sel_aux' sel (rev xs) y"
by (simp add: contr_sel_eq_list_sel_aux'_if_tree_sel forward_arcs_alt)
corollary contr_sel_eq_list_sel_aux'_if_tree_sel'':
"\<lbrakk>tree_sel sel; distinct (xs@[y]); forward (xs@[y]); xs \<noteq> []\<rbrakk>
\<Longrightarrow> contr_sel sel y = list_sel_aux' sel xs y"
by (simp add: contr_sel_eq_list_sel_aux'_if_tree_sel' mset_x_eq_list_sel_aux'_eq[of "rev xs"])
lemma contr_sel_root[simp]: "contr_sel sel root = 1"
by (auto simp: contr_sel_def dest: dominated_not_root)
lemma contr_sel_notvert[simp]: "v \<notin> verts T \<Longrightarrow> contr_sel sel v = 1"
by (auto simp: contr_sel_def)
lemma hd_reach_all_forward_verts:
"\<lbrakk>forward xs; set xs = verts T; v \<in> verts T\<rbrakk> \<Longrightarrow> hd xs \<rightarrow>\<^sup>*\<^bsub>T\<^esub> v"
using hd_reach_all_forward list.set_sel(1)[of xs] by force
lemma hd_eq_root_if_forward_verts: "\<lbrakk>forward xs; set xs = verts T\<rbrakk> \<Longrightarrow> hd xs = root"
using hd_reach_all_forward_verts root_if_all_reach by simp
lemma contr_sel_eq_ldeep_s_if_tree_dst_fwd_verts:
assumes "tree_sel sel" and "distinct xs" and "forward xs" and "set xs = verts T"
shows "contr_sel sel y = ldeep_s sel (rev xs) y"
proof -
have hd_root: "hd xs = root" using hd_eq_root_if_forward_verts assms(3,4) by blast
consider "y \<in> set xs" "y = root" | "y \<in> set xs" "y \<noteq> root" | "y \<notin> set xs" by blast
then show ?thesis
proof(cases)
case 1
then show ?thesis using hd_root ldeep_s_revhd1_if_distinct assms(2) by auto
next
case 2
then obtain as bs where as_def: "as @ y # bs = xs" using split_list[of y] by fastforce
then have "forward (as@[y])" using assms(3) forward_split[of "as@[y]"] by auto
moreover have "distinct (as@[y])" using assms(2) as_def by auto
moreover have "as \<noteq> []" using 2 hd_root as_def by fastforce
ultimately have "contr_sel sel y = list_sel_aux' sel (rev as) y"
using contr_sel_eq_list_sel_aux'_if_tree_sel'[OF assms(1)] by blast
then show ?thesis using as_def distinct_ldeep_s_eq_aux'[of "rev xs"] assms(2) by auto
next
case 3
then have "contr_sel sel y = 1" using assms(4) by simp
then show ?thesis using 3 ldeep_s_1_if_nelem set_rev by fastforce
qed
qed
corollary contr_sel_eq_ldeep_s_if_tree_dst_fwd_verts':
"\<lbrakk>tree_sel sel; distinct xs; forward xs; set xs = verts T\<rbrakk>
\<Longrightarrow> contr_sel sel = ldeep_s sel (rev xs)"
using contr_sel_eq_ldeep_s_if_tree_dst_fwd_verts by blast
lemma add_leaf_forward_arcs_preserv:
"\<lbrakk>a \<notin> arcs T; u \<in> verts T; v \<notin> verts T; forward_arcs xs\<rbrakk>
\<Longrightarrow> directed_tree.forward_arcs \<lparr>verts = verts T \<union> {v}, arcs = arcs T \<union> {a},
tail = (tail T)(a := u), head = (head T)(a := v)\<rparr> xs"
proof(induction xs rule: forward_arcs.induct)
case 1
then show ?case using directed_tree.forward_arcs.simps(1) add_leaf_dir_tree by fast
next
case (2 x)
then show ?case using directed_tree.forward_arcs.simps(2) add_leaf_dir_tree by fast
next
case (3 x y xs)
let ?T = "\<lparr>verts = verts T \<union> {v}, arcs = arcs T \<union> {a},
tail = (tail T)(a := u), head = (head T)(a := v)\<rparr>"
interpret T: directed_tree ?T root using add_leaf_dir_tree[OF "3.prems"(1-3)] by blast
have "T.forward_arcs (y # xs)" using 3 by fastforce
then show ?case
using T.forward_arcs.simps(3)[of x y xs] add_leaf_dom_preserv "3.prems"(1,4) by fastforce
qed
end
subsection \<open>Contributing Selectivity Satisfies ASI Property\<close>
context finite_directed_tree
begin
lemma dst_fwd_arcs_all_verts_ex: "\<exists>xs. forward_arcs xs \<and> distinct xs \<and> set xs = verts T"
using finite_verts proof(induction rule: finite_directed_tree_induct)
case (single_vert t h root)
then show ?case using directed_tree.forward_arcs.simps(2)[OF dir_tree_single] by fastforce
next
case (add_leaf T' V A t h u root a v)
define T where "T \<equiv> \<lparr>verts = V \<union> {v}, arcs = A \<union> {a}, tail = t(a := u), head = h(a := v)\<rparr>"
interpret T': directed_tree T' root using add_leaf.hyps(3) by blast
interpret T: directed_tree T root using add_leaf.hyps(1,4-6) T'.add_leaf_dir_tree T_def by simp
obtain xs where xs_def: "T'.forward_arcs xs" "distinct xs" "set xs = verts T'"
using add_leaf.IH by blast
then have "T.forward_arcs xs"
using T'.add_leaf_forward_arcs_preserv add_leaf.hyps(1,4,5,6) T_def by simp
moreover have "\<exists>y\<in>set xs. y \<rightarrow>\<^bsub>T\<^esub> v"
using add_leaf.hyps(1,4) T_def xs_def(3) unfolding arcs_ends_def arc_to_ends_def by force
ultimately have "T.forward_arcs (v#xs)" using T.forward_arcs.elims(3) by blast
then show ?case using xs_def(2,3) add_leaf.hyps(1,5) T_def by auto
qed
lemma dst_fwd_all_verts_ex: "\<exists>xs. forward xs \<and> distinct xs \<and> set xs = verts T"
using dst_fwd_arcs_all_verts_ex forward_arcs_alt'[symmetric] by auto
lemma c_list_asi_if_tree_sel:
fixes sf cf h r
defines "rank \<equiv> (\<lambda>l. (ldeep_T (contr_sel sf) cf l - 1) / c_list (contr_sel sf) cf h r l)"
assumes "tree_sel sf"
and "sel_reasonable sf"
and "\<forall>x. cf x > 0"
and "\<forall>x. h x > 0"
shows "asi rank r (c_list (contr_sel sf) cf h r)"
using c_list_asi assms contr_sel_eq_ldeep_s_if_tree_dst_fwd_verts' dst_fwd_all_verts_ex
by fastforce
end
context tree_query_graph
begin
abbreviation sel_r :: "'a \<Rightarrow> 'a \<Rightarrow> real" where
"sel_r r \<equiv> directed_tree.contr_sel (dir_tree_r r) match_sel"
text \<open>
Since cf is only required to be positive for verts of G, we map all others to 1.
\<close>
definition cf' :: "'a \<Rightarrow> real" where
"cf' x = (if x \<in> verts G then cf x else 1)"
definition c_list_r :: "('a \<Rightarrow> real) \<Rightarrow> 'a \<Rightarrow> 'a list \<Rightarrow> real" where
"c_list_r h r = c_list (sel_r r) cf' h r"
definition rank_r :: "('a \<Rightarrow> real) \<Rightarrow> 'a \<Rightarrow> 'a list \<Rightarrow> real" where
"rank_r h r xs = (ldeep_T (sel_r r) cf' xs - 1) / c_list_r h r xs"
lemma dom_in_dir_tree_r:
assumes "r \<in> verts G" and "x \<rightarrow>\<^bsub>G\<^esub> y"
shows "x \<rightarrow>\<^bsub>dir_tree_r r\<^esub> y \<or> y \<rightarrow>\<^bsub>dir_tree_r r\<^esub> x"
proof -
obtain e1 where e1_def: "e1 \<in> arcs G" "tail G e1 = x" "head G e1 = y"
using assms(2) unfolding arcs_ends_def arc_to_ends_def by blast
then show ?thesis
proof(cases "e1 \<in> arcs (dir_tree_r r)")
case True
moreover have "tail (dir_tree_r r) e1 = x"
using e1_def(2) tail_dir_tree_r_eq[OF assms(1)] by blast
moreover have "head (dir_tree_r r) e1 = y"
using e1_def(3) head_dir_tree_r_eq[OF assms(1)] by blast
ultimately show ?thesis using e1_def(1) unfolding arcs_ends_def arc_to_ends_def by blast
next
case False
then obtain e2 where e2_def: "e2 \<in> arcs (dir_tree_r r)" "tail G e2 = y" "head G e2 = x"
using arcs_compl_un_eq_arcs[OF assms(1)] e1_def by force
have "tail (dir_tree_r r) e2 = y"
using e2_def(2) tail_dir_tree_r_eq[OF assms(1)] by blast
moreover have "head (dir_tree_r r) e2 = x"
using e2_def(3) head_dir_tree_r_eq[OF assms(1)] by blast
ultimately show ?thesis using e2_def(1) unfolding arcs_ends_def arc_to_ends_def by blast
qed
qed
lemma dom_in_dir_tree_r_iff_aux:
"r \<in> verts G \<Longrightarrow> (x \<rightarrow>\<^bsub>dir_tree_r r\<^esub> y \<or> y \<rightarrow>\<^bsub>dir_tree_r r\<^esub> x) \<longleftrightarrow> (x \<rightarrow>\<^bsub>G\<^esub> y \<or> y \<rightarrow>\<^bsub>G\<^esub> x)"
using dir_tree_r_dom_in_G dom_in_dir_tree_r by blast
lemma dom_in_dir_tree_r_iff:
"r \<in> verts G \<Longrightarrow> (x \<rightarrow>\<^bsub>dir_tree_r r\<^esub> y \<or> y \<rightarrow>\<^bsub>dir_tree_r r\<^esub> x) \<longleftrightarrow> x \<rightarrow>\<^bsub>G\<^esub> y"
using dom_in_dir_tree_r_iff_aux dominates_sym by blast
lemma dir_tree_sel[intro]: "r \<in> verts G \<Longrightarrow> directed_tree.tree_sel (dir_tree_r r) match_sel"
unfolding directed_tree.tree_sel_def[OF directed_tree_r]
using match_sel1_if_no_arc dom_in_dir_tree_r_iff by blast
lemma pos_cards'[intro!]: "\<forall>x. cf' x > 0"
unfolding cf'_def using pos_cards by simp
theorem c_list_asi: "\<lbrakk>r \<in> verts G; \<forall>x. h x > 0\<rbrakk> \<Longrightarrow> asi (rank_r h r) r (c_list_r h r)"
using finite_directed_tree.c_list_asi_if_tree_sel[OF fin_directed_tree_r]
unfolding c_list_r_def rank_r_def by blast
subsection \<open>Applying IKKBZ\<close>
lemma cf'_simp: "x \<in> verts G \<Longrightarrow> cf' x = cf x"
unfolding cf'_def by simp
lemma ldeep_T_cf'_eq: "set xs \<subseteq> verts G \<Longrightarrow> ldeep_T sf cf' xs = ldeep_T sf cf xs"
using ldeep_T_eq_if_cf_eq[of xs] cf'_simp by blast
lemma clist_cf'_eq: "set xs \<subseteq> verts G \<Longrightarrow> c_list sf cf' h r xs = c_list sf cf h r xs"
by (simp add: clist_eq_if_cf_eq ldeep_T_cf'_eq)
lemma card_cf'_eq: "matching_rels t \<Longrightarrow> card cf' f t = card cf f t"
by (induction cf' f t rule: card.induct) (auto simp: matching_rels_def cf'_simp)
lemma c_IKKBZ_cf'_eq: "matching_rels t \<Longrightarrow> c_IKKBZ h cf' sf t = c_IKKBZ h cf sf t"
by (induction h cf' sf t rule: c_IKKBZ.induct) (auto simp: card_cf'_eq cf'_simp matching_rels_def)
lemma c_IKKBZ_cf'_eq': "valid_tree t \<Longrightarrow> c_IKKBZ h cf' sf t = c_IKKBZ h cf sf t"
by (simp add: c_IKKBZ_cf'_eq matching_rels_def valid_tree_def)
lemma c_out_cf'_eq: "matching_rels t \<Longrightarrow> c_out cf' sf t = c_out cf sf t"
by (induction cf' sf t rule: c_out.induct) (auto simp: card_cf'_eq cf'_simp matching_rels_def)
lemma c_out_cf'_eq': "valid_tree t \<Longrightarrow> c_out cf' sf t = c_out cf sf t"
by (simp add: c_out_cf'_eq matching_rels_def valid_tree_def)
lemma joinTree_card'_pos[intro]: "pos_rel_cards cf' t"
by (induction t) (auto simp: pos_cards' pos_rel_cards_def)
lemma match_reasonable_cards'[intro]: "reasonable_cards cf' match_sel t"
using pos_sel_reason_impl_reason by blast
lemma sel_r_gt0: "r \<in> verts G \<Longrightarrow> sel_r r x > 0"
using directed_tree.contr_sel_gt0[OF directed_tree_r] by blast
lemma sel_r_le1: "r \<in> verts G \<Longrightarrow> sel_r r x \<le> 1"
using directed_tree.contr_sel_le1[OF directed_tree_r] by blast
lemma sel_r_eq_ldeep_s_if_dst_fwd_verts:
"\<lbrakk>r \<in> verts G; distinct xs; directed_tree.forward (dir_tree_r r) xs; set xs = verts G\<rbrakk>
\<Longrightarrow> sel_r r = ldeep_s match_sel (rev xs)"
using directed_tree.contr_sel_eq_ldeep_s_if_tree_dst_fwd_verts'[OF directed_tree_r]
verts_dir_tree_r_eq
by blast
lemma sel_r_eq_ldeep_s_if_valid_fwd:
"\<lbrakk>r \<in> verts G; valid_tree t; directed_tree.forward (dir_tree_r r) (inorder t)\<rbrakk>
\<Longrightarrow> sel_r r = ldeep_s match_sel (revorder t)"
unfolding valid_tree_def distinct_relations_def inorder_eq_set[symmetric] revorder_eq_rev_inorder
using sel_r_eq_ldeep_s_if_dst_fwd_verts by blast
lemma sel_r_eq_ldeep_s_if_valid_no_cross:
"\<lbrakk>valid_tree t; no_cross_products t; left_deep t\<rbrakk>
\<Longrightarrow> sel_r (first_node t) = ldeep_s match_sel (revorder t)"
using sel_r_eq_ldeep_s_if_valid_fwd forward_if_ldeep_no_cross'
valid_tree_def first_node_in_verts_if_valid
by blast
lemma c_list_ldeep_s_eq_c_list_r_if_valid_no_cross:
"\<lbrakk>valid_tree t; no_cross_products t; left_deep t\<rbrakk>
\<Longrightarrow> c_list (ldeep_s match_sel (revorder t)) cf' h (first_node t) xs
= c_list_r h (first_node t) xs"
using sel_r_eq_ldeep_s_if_valid_no_cross c_list_r_def by simp
lemma c_IKKBZ_list_correct_if_simple_h:
assumes "valid_tree t" and "no_cross_products t" and "left_deep t"
shows "c_list_r (\<lambda>x. h x (cf' x)) (first_node t) (revorder t) = c_IKKBZ h cf match_sel t"
proof -
have "(\<lambda>t. c_IKKBZ h cf' match_sel t) t
= c_list (ldeep_s match_sel (revorder t)) cf' (\<lambda>x. h x (cf' x)) (first_node t) (revorder t)"
using c_IKKBZ_eq_c_list assms(1,3) valid_tree_def by fast
then show ?thesis
using c_list_ldeep_s_eq_c_list_r_if_valid_no_cross assms by (simp add: c_IKKBZ_cf'_eq')
qed
end
subsubsection \<open>Applying IKKBZ on Simple Cost Functions\<close>
text \<open>
For simple cost functions like @{term c_nlj} and @{term c_hj} that do not depend on the
contributing selectivies as @{term c_out} does, the h function does not change. Therefore, we can
apply it directly using @{term c_IKKBZ} and @{term c_list}.
\<close>
context cmp_tree_query_graph
begin
context
fixes h :: "'a \<Rightarrow> real \<Rightarrow> real"
assumes h_pos: "\<forall>x. h x (cf' x) > 0"
begin
theorem ikkbz_query_graph_if_simple_h:
defines "cost \<equiv> c_IKKBZ h cf match_sel"
defines "h' \<equiv> (\<lambda>x. h x (cf' x))"
shows "ikkbz_query_graph bfs sel cf G cmp cost (c_list_r h') (rank_r h')"
unfolding ikkbz_query_graph_def ikkbz_query_graph_axioms_def assms
by (auto simp: cmp_tree_query_graph_axioms c_list_asi c_IKKBZ_list_correct_if_simple_h h_pos)
interpretation ikkbz_query_graph bfs sel cf G cmp
"c_IKKBZ h cf match_sel" "c_list_r (\<lambda>x. h x (cf' x))" "rank_r (\<lambda>x. h x (cf' x))"
by (fact ikkbz_query_graph_if_simple_h)
corollary ikkbz_simple_h_nempty: "ikkbz \<noteq> []"
by (rule ikkbz_nempty)
corollary ikkbz_simple_h_valid_tree: "valid_tree (create_ldeep ikkbz)"
by (rule ikkbz_valid_tree)
corollary ikkbz_simple_h_no_cross:
"no_cross_products (create_ldeep ikkbz)"
by (rule ikkbz_no_cross)
theorem ikkbz_simple_h_optimal:
"\<lbrakk>valid_tree t; no_cross_products t; left_deep t\<rbrakk>
\<Longrightarrow> c_IKKBZ h cf match_sel (create_ldeep ikkbz) \<le> c_IKKBZ h cf match_sel t"
by (rule ikkbz_optimal_tree)
abbreviation ikkbz_simple_h :: "'a list" where
"ikkbz_simple_h \<equiv> ikkbz"
end
text \<open>
We can now apply these results directly to valid cost functions like @{term c_nlj} and
@{term c_hj}.
\<close>
lemma id_cf'_gt0: "\<forall>x. id (cf' x) > 0"
by auto
corollary ikkbz_nempty_nlj: "ikkbz_simple_h (\<lambda>_. id) \<noteq> []"
using ikkbz_simple_h_nempty[of "\<lambda>_. id", OF id_cf'_gt0] by blast
corollary ikkbz_valid_tree_nlj: "valid_tree (create_ldeep (ikkbz_simple_h (\<lambda>_. id)))"
using ikkbz_simple_h_valid_tree[of "\<lambda>_. id", OF id_cf'_gt0] by blast
corollary ikkbz_no_cross_nlj: "no_cross_products (create_ldeep (ikkbz_simple_h (\<lambda>_. id)))"
using ikkbz_simple_h_no_cross[of "\<lambda>_. id", OF id_cf'_gt0] by blast
corollary ikkbz_optimal_nlj:
"\<lbrakk>valid_tree t; no_cross_products t; left_deep t\<rbrakk>
\<Longrightarrow> c_nlj cf match_sel (create_ldeep (ikkbz_simple_h (\<lambda>_. id))) \<le> c_nlj cf match_sel t"
using ikkbz_simple_h_optimal[of "\<lambda>_. id", OF id_cf'_gt0] ikkbz_nempty_nlj
by (fastforce simp: c_nlj_IKKBZ create_ldeep_ldeep)
corollary ikkbz_nempty_hj: "ikkbz_simple_h (\<lambda>_ _. 1.2) \<noteq> []"
using ikkbz_simple_h_nempty by force
corollary ikkbz_valid_tree_hj: "valid_tree (create_ldeep (ikkbz_simple_h (\<lambda>_ _. 1.2)))"
using ikkbz_simple_h_valid_tree by force
corollary ikkbz_no_cross_hj: "no_cross_products (create_ldeep (ikkbz_simple_h (\<lambda>_ _. 1.2)))"
using ikkbz_simple_h_no_cross by force
corollary ikkbz_optimal_hj:
"\<lbrakk>valid_tree t; no_cross_products t; left_deep t\<rbrakk>
\<Longrightarrow> c_hj cf match_sel (create_ldeep (ikkbz_simple_h (\<lambda>_ _. 1.2))) \<le> c_hj cf match_sel t"
using ikkbz_simple_h_optimal[of "\<lambda>_ _. 1.2"] ikkbz_nempty_hj
by (fastforce simp: c_hj_IKKBZ create_ldeep_ldeep)
end
subsubsection \<open>Applying IKKBZ on C\_out\<close>
text \<open>
Since @{term c_out} uses the contributing selectivity as part of its h, we can not use the general
approach we used for the "simple" cost functions. Instead, we show the applicability directly.
\<close>
context tree_query_graph
begin
definition c_out_list_r :: "'a \<Rightarrow> 'a list \<Rightarrow> real" where
"c_out_list_r r = c_list_r (\<lambda>a. sel_r r a * cf' a) r"
definition c_out_rank_r :: "'a \<Rightarrow> 'a list \<Rightarrow> real" where
"c_out_rank_r r = rank_r (\<lambda>a. sel_r r a * cf' a) r"
lemma c_out_eq_c_list_cf':
fixes t
defines "xs \<equiv> revorder t"
defines "h \<equiv> (\<lambda>a. ldeep_s match_sel xs a * cf' a)"
assumes "distinct_relations t" and "left_deep t"
shows "c_list (ldeep_s match_sel xs) cf' h (first_node t) xs = c_out cf' match_sel t"
using c_out_eq_c_list assms by blast
lemma c_out_list_correct_cf':
fixes t
defines "h \<equiv> (\<lambda>a. sel_r (first_node t) a * cf' a)"
assumes "valid_tree t" and "no_cross_products t" and "left_deep t"
shows "c_list_r h (first_node t) (revorder t) = c_out cf' match_sel t"
using c_out_eq_c_list_cf' assms sel_r_eq_ldeep_s_if_valid_no_cross
by (fastforce simp: valid_tree_def c_list_ldeep_s_eq_c_list_r_if_valid_no_cross)
lemma c_out_list_correct_cf:
fixes t
defines "h \<equiv> (\<lambda>a. sel_r (first_node t) a * cf' a)"
assumes "valid_tree t" and "no_cross_products t" and "left_deep t"
shows "c_list_r h (first_node t) (revorder t) = c_out cf match_sel t"
using c_out_list_correct_cf' c_out_cf'_eq' assms by simp
lemma c_out_list_correct:
"\<lbrakk>valid_tree t; no_cross_products t; left_deep t\<rbrakk>
\<Longrightarrow> c_out_list_r (first_node t) (revorder t) = c_out cf match_sel t"
using c_out_list_correct_cf c_out_list_r_def by simp
lemma c_out_h_gt0: "r \<in> verts G \<Longrightarrow> (\<lambda>a. sel_r r a * cf' a) x > 0"
using sel_r_gt0 by (simp add: pos_cards')
lemma c_out_r_asi: "r \<in> verts G \<Longrightarrow> asi (c_out_rank_r r) r (c_out_list_r r)"
using c_out_h_gt0 by (simp add: c_list_asi c_out_list_r_def c_out_rank_r_def)
end
context cmp_tree_query_graph
begin
theorem ikkbz_query_graph_c_out:
"ikkbz_query_graph bfs sel cf G cmp (c_out cf match_sel) c_out_list_r c_out_rank_r"
unfolding ikkbz_query_graph_def ikkbz_query_graph_axioms_def
by (auto simp: cmp_tree_query_graph_axioms c_out_r_asi c_out_list_correct)
interpretation QG\<^sub>o\<^sub>u\<^sub>t:
ikkbz_query_graph bfs sel cf G cmp "c_out cf match_sel" c_out_list_r c_out_rank_r
by (rule ikkbz_query_graph_c_out)
corollary ikkbz_nempty_cout: "QG\<^sub>o\<^sub>u\<^sub>t.ikkbz \<noteq> []"
using QG\<^sub>o\<^sub>u\<^sub>t.ikkbz_nempty .
corollary ikkbz_valid_tree_cout: "valid_tree (create_ldeep QG\<^sub>o\<^sub>u\<^sub>t.ikkbz)"
using QG\<^sub>o\<^sub>u\<^sub>t.ikkbz_valid_tree .
corollary ikkbz_no_cross_cout: "no_cross_products (create_ldeep QG\<^sub>o\<^sub>u\<^sub>t.ikkbz)"
using QG\<^sub>o\<^sub>u\<^sub>t.ikkbz_no_cross .
corollary ikkbz_optimal_cout:
"\<lbrakk>valid_tree t; no_cross_products t; left_deep t\<rbrakk>
\<Longrightarrow> c_out cf match_sel (create_ldeep QG\<^sub>o\<^sub>u\<^sub>t.ikkbz) \<le> c_out cf match_sel t"
using QG\<^sub>o\<^sub>u\<^sub>t.ikkbz_optimal_tree .
end
subsection \<open>Instantiating Comparators with Linorders\<close>
(* possible cmp definition based on 'a::linorder *)
locale alin_tree_query_graph = tree_query_graph bfs sel cf G
for bfs sel and cf :: "'a :: linorder \<Rightarrow> real" and G
begin
lift_definition cmp :: "('a list\<times>'b) comparator" is
"(\<lambda>x y. if hd (fst x) < hd (fst y) then Less
else if hd (fst x) > hd (fst y) then Greater else Equiv)"
by(unfold_locales) (auto split: if_splits)
lemma cmp_hd_eq_if_equiv: "compare cmp (v1,e1) (v2,e2) = Equiv \<Longrightarrow> hd v1 = hd v2"
by(auto simp: cmp.rep_eq split: if_splits)
lemma cmp_sets_not_dsjnt_if_equiv:
"\<lbrakk>v1 \<noteq> []; v2 \<noteq> []; compare cmp (v1,e1) (v2,e2) = Equiv\<rbrakk> \<Longrightarrow> set v1 \<inter> set v2 \<noteq> {}"
using cmp_hd_eq_if_equiv disjoint_iff_not_equal hd_in_set[of v1] by auto
lemma cmp_tree_qg: "cmp_tree_query_graph bfs sel cf G cmp"
by standard (simp add: cmp_sets_not_dsjnt_if_equiv)
interpretation cmp_tree_query_graph bfs sel cf G cmp
by (rule cmp_tree_qg)
(* The results are now useable: *)
thm ikkbz_optimal_hj ikkbz_optimal_cout
end
(* possible cmp definition based on 'b::linorder *)
locale blin_tree_query_graph = tree_query_graph bfs sel cf G
for bfs and sel :: "'b :: linorder \<Rightarrow> real" and cf G
begin
lift_definition cmp :: "('a list\<times>'b) comparator" is
"(\<lambda>x y. if snd x < snd y then Less
else if snd x > snd y then Greater else Equiv)"
by(unfold_locales) (auto split: if_splits)
lemma cmp_arcs_eq_if_equiv: "compare cmp (v1,e1) (v2,e2) = Equiv \<Longrightarrow> e1 = e2"
by(auto simp: cmp.rep_eq split: if_splits)
lemma cmp_tree_qg: "cmp_tree_query_graph bfs sel cf G cmp"
by standard (simp add: cmp_arcs_eq_if_equiv)
interpretation cmp_tree_query_graph bfs sel cf G cmp
by (rule cmp_tree_qg)
(* The results are now useable: *)
thm ikkbz_optimal_hj ikkbz_optimal_cout
end
end |
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
hU : IsOpen U
⊢ ↑↑μ U = 0 ↔ U = ∅
[PROOFSTEP]
simpa only [not_lt, nonpos_iff_eq_zero, not_nonempty_iff_eq_empty] using not_congr (hU.measure_pos_iff μ)
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
hU : IsOpen U
h : U = ∅
⊢ ↑↑μ U = 0
[PROOFSTEP]
simp [h]
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
hU : IsOpen U
⊢ U =ᶠ[ae μ] ∅ ↔ U = ∅
[PROOFSTEP]
rw [ae_eq_empty, hU.measure_zero_iff_eq_empty]
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
hF : IsClosed F
⊢ F =ᶠ[ae μ] univ ↔ F = univ
[PROOFSTEP]
refine' ⟨fun h ↦ _, fun h ↦ by rw [h]⟩
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
hF : IsClosed F
h : F = univ
⊢ F =ᶠ[ae μ] univ
[PROOFSTEP]
rw [h]
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
hF : IsClosed F
h : F =ᶠ[ae μ] univ
⊢ F = univ
[PROOFSTEP]
rwa [ae_eq_univ, hF.isOpen_compl.measure_eq_zero_iff μ, compl_empty_iff] at h
[GOAL]
X : Type u_1
Y : Type u_2
inst✝⁵ : TopologicalSpace X
m : MeasurableSpace X
inst✝⁴ : TopologicalSpace Y
inst✝³ : T2Space Y
μ ν : Measure X
inst✝² : IsOpenPosMeasure μ
s U F : Set X
x : X
inst✝¹ : OpensMeasurableSpace X
inst✝ : IsFiniteMeasure μ
hF : IsClosed F
⊢ ↑↑μ F = ↑↑μ univ ↔ F = univ
[PROOFSTEP]
rw [← ae_eq_univ_iff_measure_eq hF.measurableSet.nullMeasurableSet, hF.ae_eq_univ_iff_eq]
[GOAL]
X : Type u_1
Y : Type u_2
inst✝⁵ : TopologicalSpace X
m : MeasurableSpace X
inst✝⁴ : TopologicalSpace Y
inst✝³ : T2Space Y
μ ν : Measure X
inst✝² : IsOpenPosMeasure μ
s U F : Set X
x : X
inst✝¹ : OpensMeasurableSpace X
inst✝ : IsProbabilityMeasure μ
hF : IsClosed F
⊢ ↑↑μ F = 1 ↔ F = univ
[PROOFSTEP]
rw [← measure_univ (μ := μ), hF.measure_eq_univ_iff_eq]
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
f g : X → Y
h : f =ᶠ[ae (restrict μ U)] g
hU : IsOpen U
hf : ContinuousOn f U
hg : ContinuousOn g U
⊢ EqOn f g U
[PROOFSTEP]
replace h := ae_imp_of_ae_restrict h
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
f g : X → Y
hU : IsOpen U
hf : ContinuousOn f U
hg : ContinuousOn g U
h : ∀ᵐ (x : X) ∂μ, x ∈ U → f x = g x
⊢ EqOn f g U
[PROOFSTEP]
simp only [EventuallyEq, ae_iff, not_imp] at h
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
f g : X → Y
hU : IsOpen U
hf : ContinuousOn f U
hg : ContinuousOn g U
h : ↑↑μ {a | a ∈ U ∧ ¬f a = g a} = 0
⊢ EqOn f g U
[PROOFSTEP]
have : IsOpen (U ∩ {a | f a ≠ g a}) :=
by
refine' isOpen_iff_mem_nhds.mpr fun a ha => inter_mem (hU.mem_nhds ha.1) _
rcases ha with ⟨ha : a ∈ U, ha' : (f a, g a) ∈ (diagonal Y)ᶜ⟩
exact
(hf.continuousAt (hU.mem_nhds ha)).prod_mk_nhds (hg.continuousAt (hU.mem_nhds ha))
(isClosed_diagonal.isOpen_compl.mem_nhds ha')
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
f g : X → Y
hU : IsOpen U
hf : ContinuousOn f U
hg : ContinuousOn g U
h : ↑↑μ {a | a ∈ U ∧ ¬f a = g a} = 0
⊢ IsOpen (U ∩ {a | f a ≠ g a})
[PROOFSTEP]
refine' isOpen_iff_mem_nhds.mpr fun a ha => inter_mem (hU.mem_nhds ha.1) _
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
f g : X → Y
hU : IsOpen U
hf : ContinuousOn f U
hg : ContinuousOn g U
h : ↑↑μ {a | a ∈ U ∧ ¬f a = g a} = 0
a : X
ha : a ∈ U ∩ {a | f a ≠ g a}
⊢ {a | f a ≠ g a} ∈ 𝓝 a
[PROOFSTEP]
rcases ha with ⟨ha : a ∈ U, ha' : (f a, g a) ∈ (diagonal Y)ᶜ⟩
[GOAL]
case intro
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
f g : X → Y
hU : IsOpen U
hf : ContinuousOn f U
hg : ContinuousOn g U
h : ↑↑μ {a | a ∈ U ∧ ¬f a = g a} = 0
a : X
ha : a ∈ U
ha' : (f a, g a) ∈ (diagonal Y)ᶜ
⊢ {a | f a ≠ g a} ∈ 𝓝 a
[PROOFSTEP]
exact
(hf.continuousAt (hU.mem_nhds ha)).prod_mk_nhds (hg.continuousAt (hU.mem_nhds ha))
(isClosed_diagonal.isOpen_compl.mem_nhds ha')
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
f g : X → Y
hU : IsOpen U
hf : ContinuousOn f U
hg : ContinuousOn g U
h : ↑↑μ {a | a ∈ U ∧ ¬f a = g a} = 0
this : IsOpen (U ∩ {a | f a ≠ g a})
⊢ EqOn f g U
[PROOFSTEP]
replace := (this.eq_empty_of_measure_zero h).le
[GOAL]
X : Type u_1
Y : Type u_2
inst✝³ : TopologicalSpace X
m : MeasurableSpace X
inst✝² : TopologicalSpace Y
inst✝¹ : T2Space Y
μ ν : Measure X
inst✝ : IsOpenPosMeasure μ
s U F : Set X
x : X
f g : X → Y
hU : IsOpen U
hf : ContinuousOn f U
hg : ContinuousOn g U
h : ↑↑μ {a | a ∈ U ∧ ¬f a = g a} = 0
this : U ∩ {a | f a ≠ g a} ≤ ∅
⊢ EqOn f g U
[PROOFSTEP]
exact fun x hx => Classical.not_not.1 fun h => this ⟨hx, h⟩
[GOAL]
X : Type u_1
Y : Type u_2
inst✝⁷ : TopologicalSpace X
m : MeasurableSpace X
inst✝⁶ : TopologicalSpace Y
inst✝⁵ : T2Space Y
μ ν : Measure X
inst✝⁴ : IsOpenPosMeasure μ
s U F : Set X
x : X
inst✝³ : OpensMeasurableSpace X
Z : Type u_3
inst✝² : TopologicalSpace Z
inst✝¹ : MeasurableSpace Z
inst✝ : BorelSpace Z
f : X → Z
hf : Continuous f
hf_surj : Surjective f
⊢ IsOpenPosMeasure (map f μ)
[PROOFSTEP]
refine' ⟨fun U hUo hUne => _⟩
[GOAL]
X : Type u_1
Y : Type u_2
inst✝⁷ : TopologicalSpace X
m : MeasurableSpace X
inst✝⁶ : TopologicalSpace Y
inst✝⁵ : T2Space Y
μ ν : Measure X
inst✝⁴ : IsOpenPosMeasure μ
s U✝ F : Set X
x : X
inst✝³ : OpensMeasurableSpace X
Z : Type u_3
inst✝² : TopologicalSpace Z
inst✝¹ : MeasurableSpace Z
inst✝ : BorelSpace Z
f : X → Z
hf : Continuous f
hf_surj : Surjective f
U : Set Z
hUo : IsOpen U
hUne : Set.Nonempty U
⊢ ↑↑(map f μ) U ≠ 0
[PROOFSTEP]
rw [Measure.map_apply hf.measurable hUo.measurableSet]
[GOAL]
X : Type u_1
Y : Type u_2
inst✝⁷ : TopologicalSpace X
m : MeasurableSpace X
inst✝⁶ : TopologicalSpace Y
inst✝⁵ : T2Space Y
μ ν : Measure X
inst✝⁴ : IsOpenPosMeasure μ
s U✝ F : Set X
x : X
inst✝³ : OpensMeasurableSpace X
Z : Type u_3
inst✝² : TopologicalSpace Z
inst✝¹ : MeasurableSpace Z
inst✝ : BorelSpace Z
f : X → Z
hf : Continuous f
hf_surj : Surjective f
U : Set Z
hUo : IsOpen U
hUne : Set.Nonempty U
⊢ ↑↑μ (f ⁻¹' U) ≠ 0
[PROOFSTEP]
exact (hUo.preimage hf).measure_ne_zero μ (hf_surj.nonempty_preimage.mpr hUne)
[GOAL]
X✝ : Type u_1
inst✝⁴ : PseudoMetricSpace X✝
m✝ : MeasurableSpace X✝
μ✝ : Measure X✝
inst✝³ : IsOpenPosMeasure μ✝
X : Type u_2
inst✝² : MetricSpace X
m : MeasurableSpace X
μ : Measure X
inst✝¹ : IsOpenPosMeasure μ
inst✝ : NoAtoms μ
x : X
r : ℝ
⊢ 0 < ↑↑μ (closedBall x r) ↔ 0 < r
[PROOFSTEP]
refine' ⟨fun h ↦ _, measure_closedBall_pos μ x⟩
[GOAL]
X✝ : Type u_1
inst✝⁴ : PseudoMetricSpace X✝
m✝ : MeasurableSpace X✝
μ✝ : Measure X✝
inst✝³ : IsOpenPosMeasure μ✝
X : Type u_2
inst✝² : MetricSpace X
m : MeasurableSpace X
μ : Measure X
inst✝¹ : IsOpenPosMeasure μ
inst✝ : NoAtoms μ
x : X
r : ℝ
h : 0 < ↑↑μ (closedBall x r)
⊢ 0 < r
[PROOFSTEP]
contrapose! h
[GOAL]
X✝ : Type u_1
inst✝⁴ : PseudoMetricSpace X✝
m✝ : MeasurableSpace X✝
μ✝ : Measure X✝
inst✝³ : IsOpenPosMeasure μ✝
X : Type u_2
inst✝² : MetricSpace X
m : MeasurableSpace X
μ : Measure X
inst✝¹ : IsOpenPosMeasure μ
inst✝ : NoAtoms μ
x : X
r : ℝ
h : r ≤ 0
⊢ ↑↑μ (closedBall x r) ≤ 0
[PROOFSTEP]
rw [(subsingleton_closedBall x h).measure_zero μ]
|
lemma uniformly_continuous_on_cmul[continuous_intros]: fixes f :: "'a::metric_space \<Rightarrow> 'b::real_normed_vector" assumes "uniformly_continuous_on s f" shows "uniformly_continuous_on s (\<lambda>x. c *\<^sub>R f(x))" |
In the early 18th century , a tribal confederacy known as the Six Nations of the Iroquois , headquartered in New York , ruled the Indian ( Native American ) tribes of Pennsylvania , including those who lived near what would become Lock Haven . Indian settlements in the area included three Munsee villages on the 325 @-@ acre ( 1 @.@ 32 km2 ) Great Island in the West Branch Susquehanna River at the mouth of Bald Eagle Creek . Four Indian trails , the Great Island Path , the Great Shamokin Path , the Bald Eagle Creek Path , and the Sinnemahoning Path , crossed the island , and a fifth , Logan 's Path , met Bald Eagle Creek Path a few miles upstream near the mouth of Fishing Creek . During the French and Indian War ( 1754 – 63 ) , colonial militiamen on the Kittanning Expedition destroyed Munsee property on the Great Island and along the West Branch . By 1763 , the Munsee had abandoned their island villages and other villages in the area .
|
\documentclass{article}
\usepackage{amsmath, amsthm, amssymb,algorithm2e}
\newcommand{\prob}{\mathbf{P}}
%\title{GenMut}
\begin{document}
%\maketitle
\section*{GenMut.pl and GenMutCfg.pl}
\subsection*{Aim}
MBSS\_MutBnd.pl and MBSS\_MutBndCfg.pl are perl scripts that generate a new bnd file (and a new cfg file for MBSS\_MutBndCfg.pl). These new files contain external variables that control under/overexpression of nodes. The script MBSS\_MutBndCfg.pl prepares both bnd and cfg files for simulating mutants. By default the external variables controlling mutations are set to 0. To simulate the mutant of a particular gene, the corresponding external variable needs to be set to 1.
\subsection*{How to run the script}
The scripts are run as a command line. The list of nodes on which mutations can be applied needs to be provided. The bnd file (and the cfg) needs to be provided as well.
\begin{verbatim}
MBSS_MutBnd.pl <file.bnd> "<node_list>"
\end{verbatim}
\begin{verbatim}
MBSS_MutBndCfg.pl <file.bnd> <file.cfg> "<node_list>"
\end{verbatim}
\subsection*{Outputs}
The generated files have the ``\_mut'' suffix, e.g., ``file\_mut.bnd'' and ``file\_mut.cfg''. The name of an external variable that controls overexpression of a node has the prefix ``\$High\_'' (e.g., node ``N'' has ``\$High\_N'' associated external variable). For underexpression, the prefix is ``\$Low\_N''.
The list of nodes, for which external variables are added, is given in the command line. The standard output provides the nodes that has been found in the bnd file. If these variables are set to 1 (in the cfg file), the state of the node is forced, no matter the initial condition. This is realized by an almost instantaneous transition, with a maximum rate given by the ``max\_rate'' node variable in the new bnd file. The underexpression variable is prioritized against the overexpression variable.
NB: the ``max\_rate'' is the maximum rate possible in c++, divided by the number of nodes. If the user is using such a large rate in another place, MaBoSS run could produce wrong results because addition of large rate may overflow c++ maximum number.
\end{document}
|
theory flash82Rev imports flashPub
begin
section{*Main defintions*}
lemma NI_FAckVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_FAck ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_InvVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Inv iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_InvAck_1VsInv82:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv82 iInv1 ) (NI_InvAck_1 iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by(cut_tac a1 a2 a3 a4, auto)
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_InvAck_1_HomeVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_InvAck_1_Home iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_InvAck_2VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_InvAck_2 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Local_GetX_GetXVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_GetX iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_Nak1VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_Nak1 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_Nak2VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_Nak2 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_Nak3VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_Nak3 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX1VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_PutX1 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX2VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_PutX2 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX3VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_PutX3 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX4VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_PutX4 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX5VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_PutX5 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX6VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_PutX6 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX7VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_PutX7 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX8VsInv82:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_PutX8 N iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1\<and>(iRule2~=iInv1 )) \<or>((iRule1~=iInv1 )\<and>iRule2=iInv1) \<or>((iRule1~=iInv1 )\<and>(iRule2~=iInv1 )) "
by( cut_tac a1 a2 a3 a4 , auto)
moreover
{assume b1:"(iRule1=iInv1\<and>(iRule2~=iInv1 ))"
have "?P3 s"
apply( cut_tac a1 a2 a3 a4 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>iRule2=iInv1)"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>(iRule2~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX8_homeVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_PutX8_home N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX9VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_PutX9 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX10VsInv82:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_PutX10 N iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1\<and>(iRule2~=iInv1 )) \<or>((iRule1~=iInv1 )\<and>iRule2=iInv1) \<or>((iRule1~=iInv1 )\<and>(iRule2~=iInv1 )) "
by( cut_tac a1 a2 a3 a4 , auto)
moreover
{assume b1:"(iRule1=iInv1\<and>(iRule2~=iInv1 ))"
have "?P3 s"
apply( cut_tac a1 a2 a3 a4 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>iRule2=iInv1)"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>(iRule2~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX10_homeVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_PutX10_home N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX11VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_GetX_PutX11 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_Get_GetVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_Get_Get iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Local_Get_Nak1VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_Get_Nak1 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_Get_Nak2VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_Get_Nak2 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_Get_Nak3VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_Get_Nak3 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_Get_Put1VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_Get_Put1 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_Get_Put2VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_Get_Put2 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_Get_Put3VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_Get_Put3 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_PutVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_Put ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_Local_PutXAcksDoneVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Local_PutXAcksDone ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_NakVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Nak iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Nak_ClearVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Nak_Clear ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_Nak_HomeVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Nak_Home ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_Remote_GetX_NakVsInv82:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Remote_GetX_Nak iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1\<and>(iRule2~=iInv1 )) \<or>((iRule1~=iInv1 )\<and>iRule2=iInv1) \<or>((iRule1~=iInv1 )\<and>(iRule2~=iInv1 )) "
by( cut_tac a1 a2 a3 a4 , auto)
moreover
{assume b1:"(iRule1=iInv1\<and>(iRule2~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>iRule2=iInv1)"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>(iRule2~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Remote_GetX_Nak_HomeVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Remote_GetX_Nak_Home iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Remote_GetX_PutXVsInv82:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Remote_GetX_PutX iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1\<and>(iRule2~=iInv1 )) \<or>((iRule1~=iInv1 )\<and>iRule2=iInv1) \<or>((iRule1~=iInv1 )\<and>(iRule2~=iInv1 )) "
by( cut_tac a1 a2 a3 a4 , auto)
moreover
{assume b1:"(iRule1=iInv1\<and>(iRule2~=iInv1 ))"
have "?P3 s"
apply( cut_tac a1 a2 a3 a4 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' iInv1) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>iRule2=iInv1)"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>(iRule2~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Remote_GetX_PutX_HomeVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Remote_GetX_PutX_Home iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Remote_Get_Nak1VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Remote_Get_Nak1 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Remote_Get_Nak2VsInv82:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Remote_Get_Nak2 iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1\<and>(iRule2~=iInv1 )) \<or>((iRule1~=iInv1 )\<and>iRule2=iInv1) \<or>((iRule1~=iInv1 )\<and>(iRule2~=iInv1 )) "
by( cut_tac a1 a2 a3 a4 , auto)
moreover
{assume b1:"(iRule1=iInv1\<and>(iRule2~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>iRule2=iInv1)"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>(iRule2~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Remote_Get_Put1VsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Remote_Get_Put1 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Remote_Get_Put2VsInv82:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Remote_Get_Put2 iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1\<and>(iRule2~=iInv1 )) \<or>((iRule1~=iInv1 )\<and>iRule2=iInv1) \<or>((iRule1~=iInv1 )\<and>(iRule2~=iInv1 )) "
by( cut_tac a1 a2 a3 a4 , auto)
moreover
{assume b1:"(iRule1=iInv1\<and>(iRule2~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>iRule2=iInv1)"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>(iRule2~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Remote_PutVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Remote_Put iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Remote_PutXVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Remote_PutX iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_ReplaceVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Replace iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_ReplaceHomeVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_ReplaceHome ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_ReplaceHomeShrVldVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_ReplaceHomeShrVld ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_ReplaceShrVldVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_ReplaceShrVld iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_ShWbVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_ShWb N ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_WbVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (NI_Wb ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_GetX_GetX1VsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Local_GetX_GetX1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_GetX_GetX2VsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Local_GetX_GetX2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_GetX_PutX1VsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Local_GetX_PutX1 N ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_GetX_PutX2VsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Local_GetX_PutX2 N ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_GetX_PutX3VsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Local_GetX_PutX3 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_GetX_PutX4VsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Local_GetX_PutX4 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_Get_GetVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Local_Get_Get ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_Get_PutVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Local_Get_Put ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_PutXVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Local_PutX ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_ReplaceVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Local_Replace ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Remote_GetVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Remote_Get iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma PI_Remote_GetXVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Remote_GetX iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma PI_Remote_PutXVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Remote_PutX iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma PI_Remote_ReplaceVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (PI_Remote_Replace iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma StoreVsInv82:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (Store iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma StoreHomeVsInv82:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv82 iInv1 ) (StoreHome ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
end
|
module Logic.Identity where
open import Logic.Equivalence
open import Logic.Base
infix 20 _≡_ _≢_
data _≡_ {A : Set}(x : A) : A -> Set where
refl : x ≡ x
subst : {A : Set}(P : A -> Set){x y : A} -> x ≡ y -> P y -> P x
subst P {x} .{x} refl px = px
sym : {A : Set}{x y : A} -> x ≡ y -> y ≡ x
sym {A} refl = refl
trans : {A : Set}{x y z : A} -> x ≡ y -> y ≡ z -> x ≡ z
trans {A} refl xz = xz
cong : {A B : Set}(f : A -> B){x y : A} -> x ≡ y -> f x ≡ f y
cong {A} f refl = refl
cong2 : {A B C : Set}(f : A -> B -> C){x z : A}{y w : B} -> x ≡ z -> y ≡ w -> f x y ≡ f z w
cong2 {A}{B} f refl refl = refl
Equiv : {A : Set} -> Equivalence A
Equiv = record
{ _==_ = _≡_
; refl = \x -> refl
; sym = \x y -> sym
; trans = \x y z -> trans
}
_≢_ : {A : Set} -> A -> A -> Set
x ≢ y = ¬ (x ≡ y)
sym≢ : {A : Set}{x y : A} -> x ≢ y -> y ≢ x
sym≢ np p = np (sym p)
|
module Formalization.PredicateLogic.Signature where
import Lvl
open import Numeral.Natural
open import Type
-- A signature consists of a countable family of constant/function and relation symbols.
-- `Prop(n)` should be interpreted as the indices for relations of arity `n`.
-- `Obj(n)` should be interpreted as the indices for functions of arity `n` (constants if `n = 0`).
record Signature : Typeω where
constructor intro
field
{ℓₚ} : Lvl.Level
Prop : ℕ → Type{ℓₚ}
{ℓₒ} : Lvl.Level
Obj : ℕ → Type{ℓₒ}
|
[STATEMENT]
lemma decseqD: "decseq f \<Longrightarrow> i \<le> j \<Longrightarrow> f j \<le> f i"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>decseq f; i \<le> j\<rbrakk> \<Longrightarrow> f j \<le> f i
[PROOF STEP]
by (auto simp: decseq_def) |
[STATEMENT]
lemma acute_chara:
assumes "Bet A B A'" and
"B \<noteq> A'"
shows "Acute A B C \<longleftrightarrow> A B C LtA A' B C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (Acute A B C) = (A B C LtA A' B C)
[PROOF STEP]
using acute_chara_1 acute_chara_2 assms(1) assms(2)
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>Bet ?A ?B ?A'; ?B \<noteq> ?A'; Acute ?A ?B ?C\<rbrakk> \<Longrightarrow> ?A ?B ?C LtA ?A' ?B ?C
\<lbrakk>Bet ?A ?B ?A'; ?A ?B ?C LtA ?A' ?B ?C\<rbrakk> \<Longrightarrow> Acute ?A ?B ?C
Bet A B A'
B \<noteq> A'
goal (1 subgoal):
1. (Acute A B C) = (A B C LtA A' B C)
[PROOF STEP]
by blast |
FUNCTION:NAME
:BEGIN
-- @@stderr --
dtrace: script 'test/unittest/drops/drp.DTRACEDROP_PRINCIPAL.end.d' matched 2 probes
dtrace: [DTRACEDROP_PRINCIPAL] 1 drop on CPU #
|
import pandas as pd
from astroquery.simbad import Simbad
import os
import pathlib as pl
import numpy as np
import json
root_dir = os.getcwd()
ipac_dir = root_dir + '/data/ipac/'
table_dir = root_dir + '/data/mine/tables/'
alias_file = root_dir + '/data/mine/aliases.json'
pl.Path(ipac_dir).mkdir(parents=True, exist_ok=True)
pl.Path(table_dir).mkdir(parents=True, exist_ok=True)
def canonicalize_galaxies():
downloaded_names = dict()
for table_csv in filter(lambda x: True if x.endswith("csv") else False, os.listdir(table_dir)):
df = pd.read_csv(table_dir + table_csv)
for index, name in enumerate(df['name']):
print('Processing ', index + 1, ' / ', len(df['name']), ' in file: ', table_csv)
found_key = False
for key, value in downloaded_names.items():
if name in value:
df.iloc[index, ]['name'] = key
break
elif name == key:
found_key = True
break
if not found_key:
ids = Simbad.query_objectids(name)
downloaded_names[name] = list(np.asarray(ids.as_array(), dtype=str)) if ids else []
df.to_csv(table_csv)
with open(alias_file, 'w') as fp:
json.dump(downloaded_names, fp)
|
'''
y = b + a1 * x1 : 단순 선형 회귀
y = b + a1*x1 + a2*x2+...: 다중 선형 회귀
'''
import numpy as np
from sklearn.linear_model import LinearRegression
np.random.seed(1216)
X1 =np.random.rand(100,1)
print('X1 =', X1[:5])
X2 = np.random.rand(100,1)
print('X2 =', X2[:5])
y = 3 + 4 * X1 + 5 * X2 + np.random.randn(100,1) # target = 찾고자 하는 값
print('y =', y[:5])
X = np.c_[X1,X2] # data
print('X = ', X[:5])
lin_reg = LinearRegression() # LR 객체 생성
lin_reg.fit(X,y) # model fitting, 학습
print(' y 절편 =', lin_reg.intercept_, '기울기 = ', lin_reg.coef_) |
Formal statement is: lemma borel_measurable_lebesgue_preimage_borel: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::euclidean_space" shows "f \<in> borel_measurable lebesgue \<longleftrightarrow> (\<forall>T. T \<in> sets borel \<longrightarrow> {x. f x \<in> T} \<in> sets lebesgue)" Informal statement is: A function $f$ is Lebesgue measurable if and only if the preimage of every Borel set is a Lebesgue measurable set. |
theorem Cauchy_theorem_global: assumes S: "open S" and holf: "f holomorphic_on S" and vpg: "valid_path \<gamma>" and loop: "pathfinish \<gamma> = pathstart \<gamma>" and pas: "path_image \<gamma> \<subseteq> S" and zero: "\<And>w. w \<notin> S \<Longrightarrow> winding_number \<gamma> w = 0" shows "(f has_contour_integral 0) \<gamma>" |
The Euclidean relation holds for two polynomials $x$ and $y$ if and only if $x = qy + r$ and $r = 0$ or $deg(r) < deg(y)$. |
(* Title: HOL/Auth/Guard/Proto.thy
Author: Frederic Blanqui, University of Cambridge Computer Laboratory
Copyright 2002 University of Cambridge
*)
section\<open>Other Protocol-Independent Results\<close>
theory Proto imports Guard_Public begin
subsection\<open>protocols\<close>
type_synonym rule = "event set * event"
abbreviation
msg' :: "rule => msg" where
"msg' R == msg (snd R)"
type_synonym proto = "rule set"
definition wdef :: "proto => bool" where
"wdef p == ALL R k. R:p --> Number k:parts {msg' R}
--> Number k:parts (msg`(fst R))"
subsection\<open>substitutions\<close>
record subs =
agent :: "agent => agent"
nonce :: "nat => nat"
nb :: "nat => msg"
key :: "key => key"
primrec apm :: "subs => msg => msg" where
"apm s (Agent A) = Agent (agent s A)"
| "apm s (Nonce n) = Nonce (nonce s n)"
| "apm s (Number n) = nb s n"
| "apm s (Key K) = Key (key s K)"
| "apm s (Hash X) = Hash (apm s X)"
| "apm s (Crypt K X) = (
if (EX A. K = pubK A) then Crypt (pubK (agent s (agt K))) (apm s X)
else if (EX A. K = priK A) then Crypt (priK (agent s (agt K))) (apm s X)
else Crypt (key s K) (apm s X))"
| "apm s \<lbrace>X,Y\<rbrace> = \<lbrace>apm s X, apm s Y\<rbrace>"
lemma apm_parts: "X:parts {Y} ==> apm s X:parts {apm s Y}"
apply (erule parts.induct, simp_all, blast)
apply (erule parts.Fst)
apply (erule parts.Snd)
by (erule parts.Body)+
lemma Nonce_apm [rule_format]: "Nonce n:parts {apm s X} ==>
(ALL k. Number k:parts {X} --> Nonce n ~:parts {nb s k}) -->
(EX k. Nonce k:parts {X} & nonce s k = n)"
by (induct X, simp_all, blast)
lemma wdef_Nonce: "[| Nonce n:parts {apm s X}; R:p; msg' R = X; wdef p;
Nonce n ~:parts (apm s `(msg `(fst R))) |] ==>
(EX k. Nonce k:parts {X} & nonce s k = n)"
apply (erule Nonce_apm, unfold wdef_def)
apply (drule_tac x=R in spec, drule_tac x=k in spec, clarsimp)
apply (drule_tac x=x in bspec, simp)
apply (drule_tac Y="msg x" and s=s in apm_parts, simp)
by (blast dest: parts_parts)
primrec ap :: "subs => event => event" where
"ap s (Says A B X) = Says (agent s A) (agent s B) (apm s X)"
| "ap s (Gets A X) = Gets (agent s A) (apm s X)"
| "ap s (Notes A X) = Notes (agent s A) (apm s X)"
abbreviation
ap' :: "subs => rule => event" where
"ap' s R == ap s (snd R)"
abbreviation
apm' :: "subs => rule => msg" where
"apm' s R == apm s (msg' R)"
abbreviation
priK' :: "subs => agent => key" where
"priK' s A == priK (agent s A)"
abbreviation
pubK' :: "subs => agent => key" where
"pubK' s A == pubK (agent s A)"
subsection\<open>nonces generated by a rule\<close>
definition newn :: "rule => nat set" where
"newn R == {n. Nonce n:parts {msg (snd R)} & Nonce n ~:parts (msg`(fst R))}"
lemma newn_parts: "n:newn R ==> Nonce (nonce s n):parts {apm' s R}"
by (auto simp: newn_def dest: apm_parts)
subsection\<open>traces generated by a protocol\<close>
definition ok :: "event list => rule => subs => bool" where
"ok evs R s == ((ALL x. x:fst R --> ap s x:set evs)
& (ALL n. n:newn R --> Nonce (nonce s n) ~:used evs))"
inductive_set
tr :: "proto => event list set"
for p :: proto
where
Nil [intro]: "[]:tr p"
| Fake [intro]: "[| evsf:tr p; X:synth (analz (spies evsf)) |]
==> Says Spy B X # evsf:tr p"
| Proto [intro]: "[| evs:tr p; R:p; ok evs R s |] ==> ap' s R # evs:tr p"
subsection\<open>general properties\<close>
lemma one_step_tr [iff]: "one_step (tr p)"
apply (unfold one_step_def, clarify)
by (ind_cases "ev # evs:tr p" for ev evs, auto)
definition has_only_Says' :: "proto => bool" where
"has_only_Says' p == ALL R. R:p --> is_Says (snd R)"
lemma has_only_Says'D: "[| R:p; has_only_Says' p |]
==> (EX A B X. snd R = Says A B X)"
by (unfold has_only_Says'_def is_Says_def, blast)
lemma has_only_Says_tr [simp]: "has_only_Says' p ==> has_only_Says (tr p)"
apply (unfold has_only_Says_def)
apply (rule allI, rule allI, rule impI)
apply (erule tr.induct)
apply (auto simp: has_only_Says'_def ok_def)
by (drule_tac x=a in spec, auto simp: is_Says_def)
lemma has_only_Says'_in_trD: "[| has_only_Says' p; list @ ev # evs1 \<in> tr p |]
==> (EX A B X. ev = Says A B X)"
by (drule has_only_Says_tr, auto)
lemma ok_not_used: "[| Nonce n ~:used evs; ok evs R s;
ALL x. x:fst R --> is_Says x |] ==> Nonce n ~:parts (apm s `(msg `(fst R)))"
apply (unfold ok_def, clarsimp)
apply (drule_tac x=x in spec, drule_tac x=x in spec)
by (auto simp: is_Says_def dest: Says_imp_spies not_used_not_spied parts_parts)
lemma ok_is_Says: "[| evs' @ ev # evs:tr p; ok evs R s; has_only_Says' p;
R:p; x:fst R |] ==> is_Says x"
apply (unfold ok_def is_Says_def, clarify)
apply (drule_tac x=x in spec, simp)
apply (subgoal_tac "one_step (tr p)")
apply (drule trunc, simp, drule one_step_Cons, simp)
apply (drule has_only_SaysD, simp+)
by (clarify, case_tac x, auto)
subsection\<open>types\<close>
type_synonym keyfun = "rule => subs => nat => event list => key set"
type_synonym secfun = "rule => nat => subs => key set => msg"
subsection\<open>introduction of a fresh guarded nonce\<close>
definition fresh :: "proto => rule => subs => nat => key set => event list
=> bool" where
"fresh p R s n Ks evs == (EX evs1 evs2. evs = evs2 @ ap' s R # evs1
& Nonce n ~:used evs1 & R:p & ok evs1 R s & Nonce n:parts {apm' s R}
& apm' s R:guard n Ks)"
lemma freshD: "fresh p R s n Ks evs ==> (EX evs1 evs2.
evs = evs2 @ ap' s R # evs1 & Nonce n ~:used evs1 & R:p & ok evs1 R s
& Nonce n:parts {apm' s R} & apm' s R:guard n Ks)"
by (unfold fresh_def, blast)
lemma freshI [intro]: "[| Nonce n ~:used evs1; R:p; Nonce n:parts {apm' s R};
ok evs1 R s; apm' s R:guard n Ks |]
==> fresh p R s n Ks (list @ ap' s R # evs1)"
by (unfold fresh_def, blast)
lemma freshI': "[| Nonce n ~:used evs1; (l,r):p;
Nonce n:parts {apm s (msg r)}; ok evs1 (l,r) s; apm s (msg r):guard n Ks |]
==> fresh p (l,r) s n Ks (evs2 @ ap s r # evs1)"
by (drule freshI, simp+)
lemma fresh_used: "[| fresh p R' s' n Ks evs; has_only_Says' p |]
==> Nonce n:used evs"
apply (unfold fresh_def, clarify)
apply (drule has_only_Says'D)
by (auto intro: parts_used_app)
lemma fresh_newn: "[| evs' @ ap' s R # evs:tr p; wdef p; has_only_Says' p;
Nonce n ~:used evs; R:p; ok evs R s; Nonce n:parts {apm' s R} |]
==> EX k. k:newn R & nonce s k = n"
apply (drule wdef_Nonce, simp+)
apply (frule ok_not_used, simp+)
apply (clarify, erule ok_is_Says, simp+)
apply (clarify, rule_tac x=k in exI, simp add: newn_def)
apply (clarify, drule_tac Y="msg x" and s=s in apm_parts)
apply (drule ok_not_used, simp+)
by (clarify, erule ok_is_Says, simp_all)
lemma fresh_rule: "[| evs' @ ev # evs:tr p; wdef p; Nonce n ~:used evs;
Nonce n:parts {msg ev} |] ==> EX R s. R:p & ap' s R = ev"
apply (drule trunc, simp, ind_cases "ev # evs:tr p", simp)
by (drule_tac x=X in in_sub, drule parts_sub, simp, simp, blast+)
lemma fresh_ruleD: "[| fresh p R' s' n Ks evs; keys R' s' n evs <= Ks; wdef p;
has_only_Says' p; evs:tr p; ALL R k s. nonce s k = n --> Nonce n:used evs -->
R:p --> k:newn R --> Nonce n:parts {apm' s R} --> apm' s R:guard n Ks -->
apm' s R:parts (spies evs) --> keys R s n evs <= Ks --> P |] ==> P"
apply (frule fresh_used, simp)
apply (unfold fresh_def, clarify)
apply (drule_tac x=R' in spec)
apply (drule fresh_newn, simp+, clarify)
apply (drule_tac x=k in spec)
apply (drule_tac x=s' in spec)
apply (subgoal_tac "apm' s' R':parts (spies (evs2 @ ap' s' R' # evs1))")
apply (case_tac R', drule has_only_Says'D, simp, clarsimp)
apply (case_tac R', drule has_only_Says'D, simp, clarsimp)
apply (rule_tac Y="apm s' X" in parts_parts, blast)
by (rule parts.Inj, rule Says_imp_spies, simp, blast)
subsection\<open>safe keys\<close>
definition safe :: "key set => msg set => bool" where
"safe Ks G == ALL K. K:Ks --> Key K ~:analz G"
lemma safeD [dest]: "[| safe Ks G; K:Ks |] ==> Key K ~:analz G"
by (unfold safe_def, blast)
lemma safe_insert: "safe Ks (insert X G) ==> safe Ks G"
by (unfold safe_def, blast)
lemma Guard_safe: "[| Guard n Ks G; safe Ks G |] ==> Nonce n ~:analz G"
by (blast dest: Guard_invKey)
subsection\<open>guardedness preservation\<close>
definition preserv :: "proto => keyfun => nat => key set => bool" where
"preserv p keys n Ks == (ALL evs R' s' R s. evs:tr p -->
Guard n Ks (spies evs) --> safe Ks (spies evs) --> fresh p R' s' n Ks evs -->
keys R' s' n evs <= Ks --> R:p --> ok evs R s --> apm' s R:guard n Ks)"
lemma preservD: "[| preserv p keys n Ks; evs:tr p; Guard n Ks (spies evs);
safe Ks (spies evs); fresh p R' s' n Ks evs; R:p; ok evs R s;
keys R' s' n evs <= Ks |] ==> apm' s R:guard n Ks"
by (unfold preserv_def, blast)
lemma preservD': "[| preserv p keys n Ks; evs:tr p; Guard n Ks (spies evs);
safe Ks (spies evs); fresh p R' s' n Ks evs; (l,Says A B X):p;
ok evs (l,Says A B X) s; keys R' s' n evs <= Ks |] ==> apm s X:guard n Ks"
by (drule preservD, simp+)
subsection\<open>monotonic keyfun\<close>
definition monoton :: "proto => keyfun => bool" where
"monoton p keys == ALL R' s' n ev evs. ev # evs:tr p -->
keys R' s' n evs <= keys R' s' n (ev # evs)"
lemma monotonD [dest]: "[| keys R' s' n (ev # evs) <= Ks; monoton p keys;
ev # evs:tr p |] ==> keys R' s' n evs <= Ks"
by (unfold monoton_def, blast)
subsection\<open>guardedness theorem\<close>
lemma Guard_tr [rule_format]: "[| evs:tr p; has_only_Says' p;
preserv p keys n Ks; monoton p keys; Guard n Ks (initState Spy) |] ==>
safe Ks (spies evs) --> fresh p R' s' n Ks evs --> keys R' s' n evs <= Ks -->
Guard n Ks (spies evs)"
apply (erule tr.induct)
(* Nil *)
apply simp
(* Fake *)
apply (clarify, drule freshD, clarsimp)
apply (case_tac evs2)
(* evs2 = [] *)
apply (frule has_only_Says'D, simp)
apply (clarsimp, blast)
(* evs2 = aa # list *)
apply (clarsimp, rule conjI)
apply (blast dest: safe_insert)
(* X:guard n Ks *)
apply (rule in_synth_Guard, simp, rule Guard_analz)
apply (blast dest: safe_insert)
apply (drule safe_insert, simp add: safe_def)
(* Proto *)
apply (clarify, drule freshD, clarify)
apply (case_tac evs2)
(* evs2 = [] *)
apply (frule has_only_Says'D, simp)
apply (frule_tac R=R' in has_only_Says'D, simp)
apply (case_tac R', clarsimp, blast)
(* evs2 = ab # list *)
apply (frule has_only_Says'D, simp)
apply (clarsimp, rule conjI)
apply (drule Proto, simp+, blast dest: safe_insert)
(* apm s X:guard n Ks *)
apply (frule Proto, simp+)
apply (erule preservD', simp+)
apply (blast dest: safe_insert)
apply (blast dest: safe_insert)
by (blast, simp, simp, blast)
subsection\<open>useful properties for guardedness\<close>
lemma newn_neq_used: "[| Nonce n:used evs; ok evs R s; k:newn R |]
==> n ~= nonce s k"
by (auto simp: ok_def)
lemma ok_Guard: "[| ok evs R s; Guard n Ks (spies evs); x:fst R; is_Says x |]
==> apm s (msg x):parts (spies evs) & apm s (msg x):guard n Ks"
apply (unfold ok_def is_Says_def, clarify)
apply (drule_tac x="Says A B X" in spec, simp)
by (drule Says_imp_spies, auto intro: parts_parts)
lemma ok_parts_not_new: "[| Y:parts (spies evs); Nonce (nonce s n):parts {Y};
ok evs R s |] ==> n ~:newn R"
by (auto simp: ok_def dest: not_used_not_spied parts_parts)
subsection\<open>unicity\<close>
definition uniq :: "proto => secfun => bool" where
"uniq p secret == ALL evs R R' n n' Ks s s'. R:p --> R':p -->
n:newn R --> n':newn R' --> nonce s n = nonce s' n' -->
Nonce (nonce s n):parts {apm' s R} --> Nonce (nonce s n):parts {apm' s' R'} -->
apm' s R:guard (nonce s n) Ks --> apm' s' R':guard (nonce s n) Ks -->
evs:tr p --> Nonce (nonce s n) ~:analz (spies evs) -->
secret R n s Ks:parts (spies evs) --> secret R' n' s' Ks:parts (spies evs) -->
secret R n s Ks = secret R' n' s' Ks"
lemma uniqD: "[| uniq p secret; evs: tr p; R:p; R':p; n:newn R; n':newn R';
nonce s n = nonce s' n'; Nonce (nonce s n) ~:analz (spies evs);
Nonce (nonce s n):parts {apm' s R}; Nonce (nonce s n):parts {apm' s' R'};
secret R n s Ks:parts (spies evs); secret R' n' s' Ks:parts (spies evs);
apm' s R:guard (nonce s n) Ks; apm' s' R':guard (nonce s n) Ks |] ==>
secret R n s Ks = secret R' n' s' Ks"
by (unfold uniq_def, blast)
definition ord :: "proto => (rule => rule => bool) => bool" where
"ord p inff == ALL R R'. R:p --> R':p --> ~ inff R R' --> inff R' R"
lemma ordD: "[| ord p inff; ~ inff R R'; R:p; R':p |] ==> inff R' R"
by (unfold ord_def, blast)
definition uniq' :: "proto => (rule => rule => bool) => secfun => bool" where
"uniq' p inff secret == ALL evs R R' n n' Ks s s'. R:p --> R':p -->
inff R R' --> n:newn R --> n':newn R' --> nonce s n = nonce s' n' -->
Nonce (nonce s n):parts {apm' s R} --> Nonce (nonce s n):parts {apm' s' R'} -->
apm' s R:guard (nonce s n) Ks --> apm' s' R':guard (nonce s n) Ks -->
evs:tr p --> Nonce (nonce s n) ~:analz (spies evs) -->
secret R n s Ks:parts (spies evs) --> secret R' n' s' Ks:parts (spies evs) -->
secret R n s Ks = secret R' n' s' Ks"
lemma uniq'D: "[| uniq' p inff secret; evs: tr p; inff R R'; R:p; R':p; n:newn R;
n':newn R'; nonce s n = nonce s' n'; Nonce (nonce s n) ~:analz (spies evs);
Nonce (nonce s n):parts {apm' s R}; Nonce (nonce s n):parts {apm' s' R'};
secret R n s Ks:parts (spies evs); secret R' n' s' Ks:parts (spies evs);
apm' s R:guard (nonce s n) Ks; apm' s' R':guard (nonce s n) Ks |] ==>
secret R n s Ks = secret R' n' s' Ks"
by (unfold uniq'_def, blast)
lemma uniq'_imp_uniq: "[| uniq' p inff secret; ord p inff |] ==> uniq p secret"
apply (unfold uniq_def)
apply (rule allI)+
apply (case_tac "inff R R'")
apply (blast dest: uniq'D)
by (auto dest: ordD uniq'D intro: sym)
subsection\<open>Needham-Schroeder-Lowe\<close>
definition a :: agent where "a == Friend 0"
definition b :: agent where "b == Friend 1"
definition a' :: agent where "a' == Friend 2"
definition b' :: agent where "b' == Friend 3"
definition Na :: nat where "Na == 0"
definition Nb :: nat where "Nb == 1"
abbreviation
ns1 :: rule where
"ns1 == ({}, Says a b (Crypt (pubK b) \<lbrace>Nonce Na, Agent a\<rbrace>))"
abbreviation
ns2 :: rule where
"ns2 == ({Says a' b (Crypt (pubK b) \<lbrace>Nonce Na, Agent a\<rbrace>)},
Says b a (Crypt (pubK a) \<lbrace>Nonce Na, Nonce Nb, Agent b\<rbrace>))"
abbreviation
ns3 :: rule where
"ns3 == ({Says a b (Crypt (pubK b) \<lbrace>Nonce Na, Agent a\<rbrace>),
Says b' a (Crypt (pubK a) \<lbrace>Nonce Na, Nonce Nb, Agent b\<rbrace>)},
Says a b (Crypt (pubK b) (Nonce Nb)))"
inductive_set ns :: proto where
[iff]: "ns1:ns"
| [iff]: "ns2:ns"
| [iff]: "ns3:ns"
abbreviation (input)
ns3a :: event where
"ns3a == Says a b (Crypt (pubK b) \<lbrace>Nonce Na, Agent a\<rbrace>)"
abbreviation (input)
ns3b :: event where
"ns3b == Says b' a (Crypt (pubK a) \<lbrace>Nonce Na, Nonce Nb, Agent b\<rbrace>)"
definition keys :: "keyfun" where
"keys R' s' n evs == {priK' s' a, priK' s' b}"
lemma "monoton ns keys"
by (simp add: keys_def monoton_def)
definition secret :: "secfun" where
"secret R n s Ks ==
(if R=ns1 then apm s (Crypt (pubK b) \<lbrace>Nonce Na, Agent a\<rbrace>)
else if R=ns2 then apm s (Crypt (pubK a) \<lbrace>Nonce Na, Nonce Nb, Agent b\<rbrace>)
else Number 0)"
definition inf :: "rule => rule => bool" where
"inf R R' == (R=ns1 | (R=ns2 & R'~=ns1) | (R=ns3 & R'=ns3))"
lemma inf_is_ord [iff]: "ord ns inf"
apply (unfold ord_def inf_def)
apply (rule allI)+
apply (rule impI)
apply (simp add: split_paired_all)
by (rule impI, erule ns.cases, simp_all)+
subsection\<open>general properties\<close>
lemma ns_has_only_Says' [iff]: "has_only_Says' ns"
apply (unfold has_only_Says'_def)
apply (rule allI, rule impI)
apply (simp add: split_paired_all)
by (erule ns.cases, auto)
lemma newn_ns1 [iff]: "newn ns1 = {Na}"
by (simp add: newn_def)
lemma newn_ns2 [iff]: "newn ns2 = {Nb}"
by (auto simp: newn_def Na_def Nb_def)
lemma newn_ns3 [iff]: "newn ns3 = {}"
by (auto simp: newn_def)
lemma ns_wdef [iff]: "wdef ns"
by (auto simp: wdef_def elim: ns.cases)
subsection\<open>guardedness for NSL\<close>
lemma "uniq ns secret ==> preserv ns keys n Ks"
apply (unfold preserv_def)
apply (rule allI)+
apply (rule impI, rule impI, rule impI, rule impI, rule impI)
apply (erule fresh_ruleD, simp, simp, simp, simp)
apply (rule allI)+
apply (rule impI, rule impI, rule impI)
apply (simp add: split_paired_all)
apply (erule ns.cases)
(* fresh with NS1 *)
apply (rule impI, rule impI, rule impI, rule impI, rule impI, rule impI)
apply (erule ns.cases)
(* NS1 *)
apply clarsimp
apply (frule newn_neq_used, simp, simp)
apply (rule No_Nonce, simp)
(* NS2 *)
apply clarsimp
apply (frule newn_neq_used, simp, simp)
apply (case_tac "nonce sa Na = nonce s Na")
apply (frule Guard_safe, simp)
apply (frule Crypt_guard_invKey, simp)
apply (frule ok_Guard, simp, simp, simp, clarsimp)
apply (frule_tac K="pubK' s b" in Crypt_guard_invKey, simp)
apply (frule_tac R=ns1 and R'=ns1 and Ks=Ks and s=sa and s'=s in uniqD, simp+)
apply (simp add: secret_def, simp add: secret_def, force, force)
apply (simp add: secret_def keys_def, blast)
apply (rule No_Nonce, simp)
(* NS3 *)
apply clarsimp
apply (case_tac "nonce sa Na = nonce s Nb")
apply (frule Guard_safe, simp)
apply (frule Crypt_guard_invKey, simp)
apply (frule_tac x=ns3b in ok_Guard, simp, simp, simp, clarsimp)
apply (frule_tac K="pubK' s a" in Crypt_guard_invKey, simp)
apply (frule_tac R=ns1 and R'=ns2 and Ks=Ks and s=sa and s'=s in uniqD, simp+)
apply (simp add: secret_def, simp add: secret_def, force, force)
apply (simp add: secret_def, rule No_Nonce, simp)
(* fresh with NS2 *)
apply (rule impI, rule impI, rule impI, rule impI, rule impI, rule impI)
apply (erule ns.cases)
(* NS1 *)
apply clarsimp
apply (frule newn_neq_used, simp, simp)
apply (rule No_Nonce, simp)
(* NS2 *)
apply clarsimp
apply (frule newn_neq_used, simp, simp)
apply (case_tac "nonce sa Nb = nonce s Na")
apply (frule Guard_safe, simp)
apply (frule Crypt_guard_invKey, simp)
apply (frule ok_Guard, simp, simp, simp, clarsimp)
apply (frule_tac K="pubK' s b" in Crypt_guard_invKey, simp)
apply (frule_tac R=ns2 and R'=ns1 and Ks=Ks and s=sa and s'=s in uniqD, simp+)
apply (simp add: secret_def, simp add: secret_def, force, force)
apply (simp add: secret_def, rule No_Nonce, simp)
(* NS3 *)
apply clarsimp
apply (case_tac "nonce sa Nb = nonce s Nb")
apply (frule Guard_safe, simp)
apply (frule Crypt_guard_invKey, simp)
apply (frule_tac x=ns3b in ok_Guard, simp, simp, simp, clarsimp)
apply (frule_tac K="pubK' s a" in Crypt_guard_invKey, simp)
apply (frule_tac R=ns2 and R'=ns2 and Ks=Ks and s=sa and s'=s in uniqD, simp+)
apply (simp add: secret_def, simp add: secret_def, force, force)
apply (simp add: secret_def keys_def, blast)
apply (rule No_Nonce, simp)
(* fresh with NS3 *)
by simp
subsection\<open>unicity for NSL\<close>
lemma "uniq' ns inf secret"
apply (unfold uniq'_def)
apply (rule allI)+
apply (simp add: split_paired_all)
apply (rule impI, erule ns.cases)
(* R = ns1 *)
apply (rule impI, erule ns.cases)
(* R' = ns1 *)
apply (rule impI, rule impI, rule impI, rule impI)
apply (rule impI, rule impI, rule impI, rule impI)
apply (rule impI, erule tr.induct)
(* Nil *)
apply (simp add: secret_def)
(* Fake *)
apply (clarify, simp add: secret_def)
apply (drule notin_analz_insert)
apply (drule Crypt_insert_synth, simp, simp, simp)
apply (drule Crypt_insert_synth, simp, simp, simp, simp)
(* Proto *)
apply (erule_tac P="ok evsa R sa" in rev_mp)
apply (simp add: split_paired_all)
apply (erule ns.cases)
(* ns1 *)
apply (clarify, simp add: secret_def)
apply (erule disjE, erule disjE, clarsimp)
apply (drule ok_parts_not_new, simp, simp, simp)
apply (clarify, drule ok_parts_not_new, simp, simp, simp)
(* ns2 *)
apply (simp add: secret_def)
(* ns3 *)
apply (simp add: secret_def)
(* R' = ns2 *)
apply (rule impI, rule impI, rule impI, rule impI)
apply (rule impI, rule impI, rule impI, rule impI)
apply (rule impI, erule tr.induct)
(* Nil *)
apply (simp add: secret_def)
(* Fake *)
apply (clarify, simp add: secret_def)
apply (drule notin_analz_insert)
apply (drule Crypt_insert_synth, simp, simp, simp)
apply (drule_tac n="nonce s' Nb" in Crypt_insert_synth, simp, simp, simp, simp)
(* Proto *)
apply (erule_tac P="ok evsa R sa" in rev_mp)
apply (simp add: split_paired_all)
apply (erule ns.cases)
(* ns1 *)
apply (clarify, simp add: secret_def)
apply (drule_tac s=sa and n=Na in ok_parts_not_new, simp, simp, simp)
(* ns2 *)
apply (clarify, simp add: secret_def)
apply (drule_tac s=sa and n=Nb in ok_parts_not_new, simp, simp, simp)
(* ns3 *)
apply (simp add: secret_def)
(* R' = ns3 *)
apply simp
(* R = ns2 *)
apply (rule impI, erule ns.cases)
(* R' = ns1 *)
apply (simp only: inf_def, blast)
(* R' = ns2 *)
apply (rule impI, rule impI, rule impI, rule impI)
apply (rule impI, rule impI, rule impI, rule impI)
apply (rule impI, erule tr.induct)
(* Nil *)
apply (simp add: secret_def)
(* Fake *)
apply (clarify, simp add: secret_def)
apply (drule notin_analz_insert)
apply (drule_tac n="nonce s' Nb" in Crypt_insert_synth, simp, simp, simp)
apply (drule_tac n="nonce s' Nb" in Crypt_insert_synth, simp, simp, simp, simp)
(* Proto *)
apply (erule_tac P="ok evsa R sa" in rev_mp)
apply (simp add: split_paired_all)
apply (erule ns.cases)
(* ns1 *)
apply (simp add: secret_def)
(* ns2 *)
apply (clarify, simp add: secret_def)
apply (erule disjE, erule disjE, clarsimp, clarsimp)
apply (drule_tac s=sa and n=Nb in ok_parts_not_new, simp, simp, simp)
apply (erule disjE, clarsimp)
apply (drule_tac s=sa and n=Nb in ok_parts_not_new, simp, simp, simp)
by (simp_all add: secret_def)
end
|
The brand Excel Next highly represents the great dynamism of Excel Group. The concept of the brand Next to offer a comfortable stay at the best available price, taking advantage of a location which is very close to strategic points of interest.
Having both the lowest price and an innvovative service stands for Excel Next originality.
Excel Milano 3 Next is composed of two buildings of recent construction inside Humanitas Institute area, and offers 59 comfortable and essential rooms. It is located in Fizzonasco di Pieve Emanuele, in the South of Milan, and is a comfortable solution for thos who look for a stay close to Humanitas Hospital, as well as for those who need to go to Assago Forum.
Thanks to the synergy with Excel Milano 3, Excel Milano 3 Next offers a high quality service taking advantage of bed and bath linen supply, staff and cleaning service as a 4 star hotel.
Excel Milano 3 huéspedes pueden unirse al nuevo Sporting Milano 3, pidiendo la tarjeta de entrada en la recepción, donde obtendrá toda la informaciónes.
Nuestro centro corporativo de Excel está disponible para reuniones y eventos. |
-- Andreas, 2014-09-07
open import Common.Equality
data ⊥ : Set where
record ⊤ : Set where
-- Agda allows us to prove that A ≠ (A → B)
test : {A B : Set} → A ≡ (A → B) → ⊥
test ()
-- Agda allows us to prove that A ≠ (B → A)
test' : {A B : Set} → A ≡ (B → A) → ⊥
test' ()
-- But ⊤ is isomorphic to ⊤ → ⊤, which under univalence
-- isomorphism-as-equality contradicts both test and test'.
test'' : (⊤ ≡ (⊤ → ⊤)) → ⊥
test'' = test'
there : ⊤ → (⊤ → ⊤)
there _ _ = _
back : (⊤ → ⊤) → ⊤
back _ = _
|
module Sigma
import Basic
import Unit
import Void
infixr 0 #
public export
data Sigma : (a : Type) -> (p : a -> Type) -> Type where
(#) : (x : a) -> p x -> Sigma a p
public export
Pair : Type -> Type -> Type
Pair a b = Sigma a (const b)
public export
pr1 : Sigma a p -> a
pr1 (x # _) = x
public export
pr2 : (s : Sigma a p) -> p (pr1 s)
pr2 (_ # y) = y
public export
SigmaInduction : (q : Sigma a p -> Type)
-> ((x : a) -> (y : p x) -> q (x # y))
-> (s : Sigma a p) -> q s
SigmaInduction _ f (x # y) = f x y
public export
uncurry : (q : Sigma a p -> Type)
-> ((x : a) -> (y : p x) -> q (x # y))
-> (s : Sigma a p) -> q s
uncurry = SigmaInduction
public export
curry : (q : Sigma a p -> Type)
-> ((s : Sigma a p) -> q s)
-> ((x : a) -> (y : p x) -> q (x # y))
curry _ f x y = f (x # y)
|
Formal statement is: lemma compact_nest: fixes F :: "'a::linorder \<Rightarrow> 'b::heine_borel set" assumes F: "\<And>n. compact(F n)" "\<And>n. F n \<noteq> {}" and mono: "\<And>m n. m \<le> n \<Longrightarrow> F n \<subseteq> F m" shows "\<Inter>(range F) \<noteq> {}" Informal statement is: If $F_n$ is a sequence of nonempty compact sets such that $F_n \subseteq F_m$ whenever $n \leq m$, then $\bigcap_{n=1}^\infty F_n$ is nonempty. |
module Numeral.PositiveInteger.Oper where
open import Numeral.PositiveInteger
infixl 10010 _+_
infixl 10020 _⋅_
infixl 10030 _^_
-- Addition
_+_ : ℕ₊ → ℕ₊ → ℕ₊
x + 𝟏 = 𝐒(x)
x + 𝐒(y) = 𝐒(x + y)
-- Multiplication
_⋅_ : ℕ₊ → ℕ₊ → ℕ₊
x ⋅ 𝟏 = x
x ⋅ 𝐒(y) = x + (x ⋅ y)
-- Exponentiation
_^_ : ℕ₊ → ℕ₊ → ℕ₊
x ^ 𝟏 = x
x ^ 𝐒(y) = x ⋅ (x ^ y)
-- Factorial
_! : ℕ₊ → ℕ₊
𝟏 ! = 𝟏
𝐒(x) ! = 𝐒(x) ⋅ (x !)
open import Data.Option
open import Data.Option.Functions
-- Truncated subtraction
_−₀_ : ℕ₊ → ℕ₊ → Option(ℕ₊)
𝟏 −₀ _ = None
𝐒(x) −₀ 𝟏 = Some x
𝐒(x) −₀ 𝐒(y) = x −₀ y
open import Data.Boolean
open import Type
_≤?_ : ℕ₊ → ℕ₊ → Bool
𝟏 ≤? _ = 𝑇
𝐒(x) ≤? 𝟏 = 𝐹
𝐒(x) ≤? 𝐒(y) = x ≤? y
|
/* -----------------------------------------------------------------------------
* Copyright 2021 Jonathan Haigh
* SPDX-License-Identifier: MIT
* ---------------------------------------------------------------------------*/
#ifndef SQ_INCLUDE_GUARD_system_linux_SqFieldSchemaImpl_h_
#define SQ_INCLUDE_GUARD_system_linux_SqFieldSchemaImpl_h_
#include "core/typeutil.h"
#include "system/SqFieldSchema.gen.h"
#include "system/schema.h"
#include <gsl/gsl>
namespace sq::system::linux {
class SqFieldSchemaImpl : public SqFieldSchema<SqFieldSchemaImpl> {
public:
explicit SqFieldSchemaImpl(const FieldSchema &field_schema);
SQ_ND Result get_name() const;
SQ_ND Result get_doc() const;
SQ_ND Result get_params() const;
SQ_ND Result get_return_type() const;
SQ_ND Result get_return_list() const;
SQ_ND Result get_null() const;
SQ_ND Primitive to_primitive() const override;
private:
gsl::not_null<const FieldSchema *> field_schema_;
};
} // namespace sq::system::linux
#endif // SQ_INCLUDE_GUARD_system_linux_SqFieldSchemaImpl_h_
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: mgga_exc *)
(* prefix:
mgga_c_vsxc_params *params;
assert(p->params != NULL);
params = (mgga_c_vsxc_params * )(p->params);
*)
$define lda_c_pw_params
$define lda_c_pw_modified_params
$include "lda_c_pw.mpl"
$include "gvt4.mpl"
vsxc_comp := (rs, z, spin, xs, ts) ->
+ lda_stoll_par(f_pw, rs, z, 1)
* gtv4(params_a_alpha_ss, params_a_dss, xs, 2*(ts - K_FACTOR_C))
* Fermi_D(xs, ts):
(* The parallel and perpendicular components of the energy *)
vsxc_fpar := (rs, z, xs0, xs1, ts0, ts1) ->
+ vsxc_comp(rs, z, 1, xs0, ts0)
+ vsxc_comp(rs, -z, -1, xs1, ts1):
vsxc_fperp := (rs, z, xs0, xs1, ts0, ts1) ->
+ lda_stoll_perp(f_pw, rs, z)
* gtv4(params_a_alpha_ab, params_a_dab, sqrt(xs0^2 + xs1^2), 2*(ts0 + ts1 - 2*K_FACTOR_C)):
vsxc_f := (rs, z, xs0, xs1, ts0, ts1) ->
+ vsxc_fpar (rs, z, xs0, xs1, ts0, ts1)
+ vsxc_fperp(rs, z, xs0, xs1, ts0, ts1):
f := (rs, z, xt, xs0, xs1, us0, us1, ts0, ts1) ->
vsxc_f(rs, z, xs0, xs1, ts0, ts1):
|
module Prelude.Char where
open import Prelude.Bool
postulate
Char : Set
{-# BUILTIN CHAR Char #-}
private
primitive
primCharEquality : (c c' : Char) -> Bool
postulate
eof : Char
{-# COMPILED_EPIC eof () -> Int = foreign Int "eof" () #-}
charEq : Char -> Char -> Bool
charEq = primCharEquality
|
[STATEMENT]
lemma semIntAll_prSubstAll:
assumes "wlsSEM SEM"
shows "prSubstAll (semInt SEM) (semIntAbs SEM) SEM"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. prSubstAll (semInt SEM) (semIntAbs SEM) SEM
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
wlsSEM SEM
goal (1 subgoal):
1. prSubstAll (semInt SEM) (semIntAbs SEM) SEM
[PROOF STEP]
unfolding prSubstAll_def
[PROOF STATE]
proof (prove)
using this:
wlsSEM SEM
goal (1 subgoal):
1. prSubst (semInt SEM) SEM \<and> prSubstAbs (semInt SEM) (semIntAbs SEM) SEM
[PROOF STEP]
by(simp add: semInt_prSubst semIntAbs_prSubstAbs) |
(* Title: HOL/Auth/n_german_lemma_on_inv__36.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_german Protocol Case Study*}
theory n_german_lemma_on_inv__36 imports n_german_base
begin
section{*All lemmas on causal relation between inv__36 and some rule r*}
lemma n_RecvReqSVsinv__36:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvReqS N i" apply fastforce done
from a2 obtain p__Inv1 p__Inv2 where a2:"p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv1)\<or>(i~=p__Inv1\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv1) ''Cmd'')) (Const GntE)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const true))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv1) ''Cmd'')) (Const GntE)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const true))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv1\<and>i~=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv1) ''Cmd'')) (Const GntE)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const true))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvReqEVsinv__36:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvReqE N i" apply fastforce done
from a2 obtain p__Inv1 p__Inv2 where a2:"p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv1)\<or>(i~=p__Inv1\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv1) ''Cmd'')) (Const GntE)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const true))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv1) ''Cmd'')) (Const GntE)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const true))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv1\<and>i~=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv1) ''Cmd'')) (Const GntE)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const true))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInv__part__0Vsinv__36:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__0 i" apply fastforce done
from a2 obtain p__Inv1 p__Inv2 where a2:"p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv1)\<or>(i~=p__Inv1\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv1\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInv__part__1Vsinv__36:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__1 i" apply fastforce done
from a2 obtain p__Inv1 p__Inv2 where a2:"p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv1)\<or>(i~=p__Inv1\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv1\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvAckVsinv__36:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv1 p__Inv2 where a2:"p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv1)\<or>(i~=p__Inv1\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv1\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__36:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv1 p__Inv2 where a2:"p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv1)\<or>(i~=p__Inv1\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv1\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__36:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv1 p__Inv2 where a2:"p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv1)\<or>(i~=p__Inv1\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Para (Ident ''InvSet'') p__Inv2)) (Const true)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv1\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__36:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv1 p__Inv2 where a2:"p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv1)\<or>(i~=p__Inv1\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv1\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__36:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv1 p__Inv2 where a2:"p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv1)\<or>(i~=p__Inv1\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv1)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv1\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendReqE__part__1Vsinv__36:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_StoreVsinv__36:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvInvAckVsinv__36:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvInvAck i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqE__part__0Vsinv__36:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__36:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqS i" and
a2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__36 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
-- {-# OPTIONS -v tc.term.exlam:100 -v extendedlambda:100 -v int2abs.reifyterm:100 -v tc.with:100 -v tc.mod.apply:100 #-}
module Issue778b (Param : Set) where
open import Issue778M Param
data D : (Nat → Nat) → Set where
d : D pred → D pred
-- Ulf, 2013-11-11: With the fix to issue 59 that inlines with functions,
-- this no longer termination checks. The problem is having a termination
-- path going through a with-expression (the variable x in this case).
{-# TERMINATING #-}
test : (f : Nat → Nat) → D f → Nat
test .pred (d x) = bla
where bla : Nat
bla with (d x) -- Andreas, 2014-11-06 "with x" has been outlawed.
... | (d (d y)) = test pred y
|
/-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import data.quaternion
import analysis.normed_space.inner_product
/-!
# Quaternions as a normed algebra
In this file we define the following structures on the space `ℍ := ℍ[ℝ]` of quaternions:
* inner product space;
* normed ring;
* normed space over `ℝ`.
## Notation
The following notation is available with `open_locale quaternion`:
* `ℍ` : quaternions
## Tags
quaternion, normed ring, normed space, normed algebra
-/
localized "notation `ℍ` := quaternion ℝ" in quaternion
open_locale real_inner_product_space
noncomputable theory
namespace quaternion
instance : has_inner ℝ ℍ := ⟨λ a b, (a * b.conj).re⟩
lemma inner_self (a : ℍ) : ⟪a, a⟫ = norm_sq a := rfl
lemma inner_def (a b : ℍ) : ⟪a, b⟫ = (a * b.conj).re := rfl
instance : inner_product_space ℝ ℍ :=
inner_product_space.of_core
{ inner := has_inner.inner,
conj_sym := λ x y, by simp [inner_def, mul_comm],
nonneg_re := λ x, norm_sq_nonneg,
definite := λ x, norm_sq_eq_zero.1,
add_left := λ x y z, by simp only [inner_def, add_mul, add_re],
smul_left := λ x y r, by simp [inner_def] }
lemma norm_sq_eq_norm_sq (a : ℍ) : norm_sq a = ∥a∥ * ∥a∥ :=
by rw [← inner_self, real_inner_self_eq_norm_sq]
instance : norm_one_class ℍ :=
⟨by rw [norm_eq_sqrt_real_inner, inner_self, norm_sq.map_one, real.sqrt_one]⟩
@[simp] lemma norm_mul (a b : ℍ) : ∥a * b∥ = ∥a∥ * ∥b∥ :=
begin
simp only [norm_eq_sqrt_real_inner, inner_self, norm_sq.map_mul],
exact real.sqrt_mul norm_sq_nonneg _
end
@[simp, norm_cast] lemma norm_coe (a : ℝ) : ∥(a : ℍ)∥ = ∥a∥ :=
by rw [norm_eq_sqrt_real_inner, inner_self, norm_sq_coe, real.sqrt_sq_eq_abs, real.norm_eq_abs]
noncomputable instance : normed_ring ℍ :=
{ dist_eq := λ _ _, rfl,
norm_mul := λ a b, (norm_mul a b).le }
noncomputable instance : normed_algebra ℝ ℍ :=
{ norm_algebra_map_eq := norm_coe,
to_algebra := quaternion.algebra }
instance : has_coe ℂ ℍ := ⟨λ z, ⟨z.re, z.im, 0, 0⟩⟩
@[simp, norm_cast]
@[simp, norm_cast] lemma coe_complex_add (z w : ℂ) : ↑(z + w) = (z + w : ℍ) := by ext; simp
@[simp, norm_cast] lemma coe_complex_mul (z w : ℂ) : ↑(z * w) = (z * w : ℍ) := by ext; simp
@[simp, norm_cast] lemma coe_complex_zero : ((0 : ℂ) : ℍ) = 0 := rfl
@[simp, norm_cast] lemma coe_complex_one : ((1 : ℂ) : ℍ) = 1 := rfl
@[simp, norm_cast] lemma coe_real_complex_mul (r : ℝ) (z : ℂ) : (r • z : ℍ) = ↑r * ↑z :=
by ext; simp
@[simp, norm_cast] lemma coe_complex_coe (r : ℝ) : ((r : ℂ) : ℍ) = r := rfl
/-- Coercion `ℂ →ₐ[ℝ] ℍ` as an algebra homomorphism. -/
def of_complex : ℂ →ₐ[ℝ] ℍ :=
{ to_fun := coe,
map_one' := rfl,
map_zero' := rfl,
map_add' := coe_complex_add,
map_mul' := coe_complex_mul,
commutes' := λ x, rfl }
@[simp] lemma coe_of_complex : ⇑of_complex = coe := rfl
end quaternion
|
/-
Copyright (c) 2021 Kyle Miller. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kyle Miller
-/
import combinatorics.simple_graph.basic
/-!
# Graph connectivity
In a simple graph,
* A *walk* is a finite sequence of adjacent vertices, and can be
thought of equally well as a sequence of directed edges.
* A *trail* is a walk whose edges each appear no more than once.
* A *path* is a trail whose vertices appear no more than once.
* A *cycle* is a nonempty trail whose first and last vertices are the
same and whose vertices except for the first appear no more than once.
**Warning:** graph theorists mean something different by "path" than
do homotopy theorists. A "walk" in graph theory is a "path" in
homotopy theory. Another warning: some graph theorists use "path" and
"simple path" for "walk" and "path."
Some definitions and theorems have inspiration from multigraph
counterparts in [Chou1994].
## Main definitions
* `simple_graph.walk`
* `simple_graph.is_trail`, `simple_graph.is_path`, and `simple_graph.is_cycle`.
* `simple_graph.path`
## Tags
walks, trails, paths, circuits, cycles
-/
universes u
namespace simple_graph
variables {V : Type u} (G : simple_graph V)
/-- A walk is a sequence of adjacent vertices. For vertices `u v : V`,
the type `walk u v` consists of all walks starting at `u` and ending at `v`.
We say that a walk *visits* the vertices it contains. The set of vertices a
walk visits is `simple_graph.walk.support`. -/
@[derive decidable_eq]
inductive walk : V → V → Type u
| nil {u : V} : walk u u
| cons {u v w: V} (h : G.adj u v) (p : walk v w) : walk u w
attribute [refl] walk.nil
instance walk.inhabited (v : V) : inhabited (G.walk v v) := ⟨by refl⟩
namespace walk
variables {G}
lemma exists_eq_cons_of_ne : Π {u v : V} (hne : u ≠ v) (p : G.walk u v),
∃ (w : V) (h : G.adj u w) (p' : G.walk w v), p = cons h p'
| _ _ hne nil := (hne rfl).elim
| _ _ _ (cons h p') := ⟨_, h, p', rfl⟩
/-- The length of a walk is the number of edges along it. -/
def length : Π {u v : V}, G.walk u v → ℕ
| _ _ nil := 0
| _ _ (cons _ q) := q.length.succ
/-- The concatenation of two compatible walks. -/
@[trans]
def append : Π {u v w : V}, G.walk u v → G.walk v w → G.walk u w
| _ _ _ nil q := q
| _ _ _ (cons h p) q := cons h (p.append q)
/-- The concatenation of the reverse of the first walk with the second walk. -/
protected def reverse_aux : Π {u v w : V}, G.walk u v → G.walk u w → G.walk v w
| _ _ _ nil q := q
| _ _ _ (cons h p) q := reverse_aux p (cons (G.symm h) q)
/-- The walk in reverse. -/
@[symm]
def reverse {u v : V} (w : G.walk u v) : G.walk v u := w.reverse_aux nil
/-- Get the `n`th vertex from a walk, where `n` is generally expected to be
between `0` and `p.length`, inclusive.
If `n` is greater than or equal to `p.length`, the result is the path's endpoint. -/
def get_vert : Π {u v : V} (p : G.walk u v) (n : ℕ), V
| u v nil _ := u
| u v (cons _ _) 0 := u
| u v (cons _ q) (n+1) := q.get_vert n
@[simp] lemma cons_append {u v w x : V} (h : G.adj u v) (p : G.walk v w) (q : G.walk w x) :
(cons h p).append q = cons h (p.append q) := rfl
@[simp] lemma cons_nil_append {u v w : V} (h : G.adj u v) (p : G.walk v w) :
(cons h nil).append p = cons h p := rfl
@[simp] lemma append_nil : Π {u v : V} (p : G.walk u v), p.append nil = p
| _ _ nil := rfl
| _ _ (cons h p) := by rw [cons_append, append_nil]
@[simp] lemma nil_append {u v : V} (p : G.walk u v) : nil.append p = p := rfl
lemma append_assoc : Π {u v w x : V} (p : G.walk u v) (q : G.walk v w) (r : G.walk w x),
p.append (q.append r) = (p.append q).append r
| _ _ _ _ nil _ _ := rfl
| _ _ _ _ (cons h p') q r := by { dunfold append, rw append_assoc, }
@[simp] lemma reverse_nil {u : V} : (nil : G.walk u u).reverse = nil := rfl
lemma reverse_singleton {u v : V} (h : G.adj u v) :
(cons h nil).reverse = cons (G.symm h) nil := rfl
@[simp] lemma cons_reverse_aux {u v w x : V} (p : G.walk u v) (q : G.walk w x) (h : G.adj w u) :
(cons h p).reverse_aux q = p.reverse_aux (cons (G.symm h) q) := rfl
@[simp] protected lemma append_reverse_aux : Π {u v w x : V}
(p : G.walk u v) (q : G.walk v w) (r : G.walk u x),
(p.append q).reverse_aux r = q.reverse_aux (p.reverse_aux r)
| _ _ _ _ nil _ _ := rfl
| _ _ _ _ (cons h p') q r := append_reverse_aux p' q (cons (G.symm h) r)
@[simp] protected lemma reverse_aux_append : Π {u v w x : V}
(p : G.walk u v) (q : G.walk u w) (r : G.walk w x),
(p.reverse_aux q).append r = p.reverse_aux (q.append r)
| _ _ _ _ nil _ _ := rfl
| _ _ _ _ (cons h p') q r := by simp [reverse_aux_append p' (cons (G.symm h) q) r]
protected lemma reverse_aux_eq_reverse_append {u v w : V} (p : G.walk u v) (q : G.walk u w) :
p.reverse_aux q = p.reverse.append q :=
by simp [reverse]
@[simp] lemma reverse_cons {u v w : V} (h : G.adj u v) (p : G.walk v w) :
(cons h p).reverse = p.reverse.append (cons (G.symm h) nil) :=
by simp [reverse]
@[simp] lemma reverse_append {u v w : V} (p : G.walk u v) (q : G.walk v w) :
(p.append q).reverse = q.reverse.append p.reverse :=
by simp [reverse]
@[simp] lemma reverse_reverse : Π {u v : V} (p : G.walk u v), p.reverse.reverse = p
| _ _ nil := rfl
| _ _ (cons h p) := by simp [reverse_reverse]
@[simp] lemma length_nil {u : V} : (nil : G.walk u u).length = 0 := rfl
@[simp] lemma length_cons {u v w : V} (h : G.adj u v) (p : G.walk v w) :
(cons h p).length = p.length + 1 := rfl
@[simp] lemma length_append : Π {u v w : V} (p : G.walk u v) (q : G.walk v w),
(p.append q).length = p.length + q.length
| _ _ _ nil _ := by simp
| _ _ _ (cons _ _) _ := by simp [length_append, add_left_comm, add_comm]
@[simp] protected lemma length_reverse_aux : Π {u v w : V} (p : G.walk u v) (q : G.walk u w),
(p.reverse_aux q).length = p.length + q.length
| _ _ _ nil _ := by simp!
| _ _ _ (cons _ _) _ := by simp [length_reverse_aux, nat.add_succ, nat.succ_add]
@[simp] lemma length_reverse {u v : V} (p : G.walk u v) : p.reverse.length = p.length :=
by simp [reverse]
/-- The `support` of a walk is the list of vertices it visits in order. -/
def support : Π {u v : V}, G.walk u v → list V
| u v nil := [u]
| u v (cons h p) := u :: p.support
/-- The `edges` of a walk is the list of edges it visits in order. -/
def edges : Π {u v : V}, G.walk u v → list (sym2 V)
| u v nil := []
| u v (@cons _ _ _ x _ h p) := ⟦(u, x)⟧ :: p.edges
@[simp] lemma support_nil {u : V} : (nil : G.walk u u).support = [u] := rfl
@[simp] lemma support_cons {u v w : V} (h : G.adj u v) (p : G.walk v w) :
(cons h p).support = u :: p.support := rfl
lemma support_append {u v w : V} (p : G.walk u v) (p' : G.walk v w) :
(p.append p').support = p.support ++ p'.support.tail :=
by induction p; cases p'; simp [*]
@[simp]
lemma support_reverse {u v : V} (p : G.walk u v) : p.reverse.support = p.support.reverse :=
by induction p; simp [support_append, *]
lemma support_ne_nil {u v : V} (p : G.walk u v) : p.support ≠ [] :=
by cases p; simp
lemma tail_support_append {u v w : V} (p : G.walk u v) (p' : G.walk v w) :
(p.append p').support.tail = p.support.tail ++ p'.support.tail :=
by rw [support_append, list.tail_append_of_ne_nil _ _ (support_ne_nil _)]
lemma support_eq_cons {u v : V} (p : G.walk u v) : p.support = u :: p.support.tail :=
by cases p; simp
@[simp] lemma start_mem_support {u v : V} (p : G.walk u v) : u ∈ p.support :=
by cases p; simp
@[simp] lemma end_mem_support {u v : V} (p : G.walk u v) : v ∈ p.support :=
by induction p; simp [*]
lemma mem_support_iff {u v w : V} (p : G.walk u v) :
w ∈ p.support ↔ w = u ∨ w ∈ p.support.tail :=
by cases p; simp
@[simp]
lemma mem_tail_support_append_iff {t u v w : V} (p : G.walk u v) (p' : G.walk v w) :
t ∈ (p.append p').support.tail ↔ t ∈ p.support.tail ∨ t ∈ p'.support.tail :=
by rw [tail_support_append, list.mem_append]
@[simp] lemma end_mem_tail_support_of_ne {u v : V} (h : u ≠ v) (p : G.walk u v) :
v ∈ p.support.tail :=
by { obtain ⟨_, _, _, rfl⟩ := exists_eq_cons_of_ne h p, simp }
@[simp]
lemma mem_support_append_iff {t u v w : V} (p : G.walk u v) (p' : G.walk v w) :
t ∈ (p.append p').support ↔ t ∈ p.support ∨ t ∈ p'.support :=
begin
simp only [mem_support_iff, mem_tail_support_append_iff],
by_cases h : t = v; by_cases h' : t = u;
subst_vars;
try { have := ne.symm h' };
simp [*],
end
lemma coe_support {u v : V} (p : G.walk u v) :
(p.support : multiset V) = {u} + p.support.tail :=
by cases p; refl
lemma coe_support_append {u v w : V} (p : G.walk u v) (p' : G.walk v w) :
((p.append p').support : multiset V) = {u} + p.support.tail + p'.support.tail :=
by rw [support_append, ←multiset.coe_add, coe_support]
lemma coe_support_append' [decidable_eq V] {u v w : V} (p : G.walk u v) (p' : G.walk v w) :
((p.append p').support : multiset V) = p.support + p'.support - {v} :=
begin
rw [support_append, ←multiset.coe_add],
simp only [coe_support],
rw add_comm {v},
simp only [← add_assoc, add_tsub_cancel_right],
end
lemma chain_adj_support_aux : Π {u v w : V} (h : G.adj u v) (p : G.walk v w),
list.chain G.adj u p.support
| _ _ _ h nil := list.chain.cons h list.chain.nil
| _ _ _ h (cons h' p) := list.chain.cons h (chain_adj_support_aux h' p)
lemma chain_adj_support : Π {u v : V} (p : G.walk u v), list.chain' G.adj p.support
| _ _ nil := list.chain.nil
| _ _ (cons h p) := chain_adj_support_aux h p
/-- Every edge in a walk's edge list is an edge of the graph.
It is written in this form to avoid unsightly coercions. -/
lemma edges_subset_edge_set : Π {u v : V} (p : G.walk u v) {e : sym2 V}
(h : e ∈ p.edges), e ∈ G.edge_set
| _ _ (cons h' p') e h := by rcases h with ⟨rfl, h⟩; solve_by_elim
@[simp] lemma edges_nil {u : V} : (nil : G.walk u u).edges = [] := rfl
@[simp] lemma edges_cons {u v w : V} (h : G.adj u v) (p : G.walk v w) :
(cons h p).edges = ⟦(u, v)⟧ :: p.edges := rfl
@[simp] lemma edges_append {u v w : V} (p : G.walk u v) (p' : G.walk v w) :
(p.append p').edges = p.edges ++ p'.edges :=
by induction p; simp [*]
@[simp] lemma edges_reverse {u v : V} (p : G.walk u v) : p.reverse.edges = p.edges.reverse :=
by induction p; simp [*, sym2.eq_swap]
@[simp] lemma length_support {u v : V} (p : G.walk u v) : p.support.length = p.length + 1 :=
by induction p; simp *
@[simp] lemma length_edges {u v : V} (p : G.walk u v) : p.edges.length = p.length :=
by induction p; simp *
lemma mem_support_of_mem_edges : Π {t u v w : V} (p : G.walk v w) (he : ⟦(t, u)⟧ ∈ p.edges),
t ∈ p.support
| t u v w (cons h p') he := begin
simp only [support_cons, edges_cons, list.mem_cons_iff, quotient.eq] at he ⊢,
rcases he with ((he|he)|he),
{ exact or.inl rfl },
{ exact or.inr (start_mem_support _) },
{ exact or.inr (mem_support_of_mem_edges _ he), }
end
lemma edges_nodup_of_support_nodup {u v : V} {p : G.walk u v} (h : p.support.nodup) :
p.edges.nodup :=
begin
induction p,
{ simp, },
{ simp only [edges_cons, support_cons, list.nodup_cons] at h ⊢,
exact ⟨λ h', h.1 (mem_support_of_mem_edges p_p h'), p_ih h.2⟩, }
end
/-- A *trail* is a walk with no repeating edges. -/
structure is_trail {u v : V} (p : G.walk u v) : Prop :=
(edges_nodup : p.edges.nodup)
/-- A *path* is a walk with no repeating vertices.
Use `simple_graph.walk.is_path.mk'` for a simpler constructor. -/
structure is_path {u v : V} (p : G.walk u v) extends to_trail : is_trail p : Prop :=
(support_nodup : p.support.nodup)
/-- A *circuit* at `u : V` is a nonempty trail beginning and ending at `u`. -/
structure is_circuit {u : V} (p : G.walk u u) extends to_trail : is_trail p : Prop :=
(ne_nil : p ≠ nil)
/-- A *cycle* at `u : V` is a circuit at `u` whose only repeating vertex
is `u` (which appears exactly twice). -/
structure is_cycle [decidable_eq V] {u : V} (p : G.walk u u)
extends to_circuit : is_circuit p : Prop :=
(support_nodup : p.support.tail.nodup)
lemma is_trail_def {u v : V} (p : G.walk u v) : p.is_trail ↔ p.edges.nodup :=
⟨is_trail.edges_nodup, λ h, ⟨h⟩⟩
lemma is_path.mk' {u v : V} {p : G.walk u v} (h : p.support.nodup) : is_path p :=
⟨⟨edges_nodup_of_support_nodup h⟩, h⟩
lemma is_path_def {u v : V} (p : G.walk u v) : p.is_path ↔ p.support.nodup :=
⟨is_path.support_nodup, is_path.mk'⟩
lemma is_cycle_def [decidable_eq V] {u : V} (p : G.walk u u) :
p.is_cycle ↔ is_trail p ∧ p ≠ nil ∧ p.support.tail.nodup :=
iff.intro (λ h, ⟨h.1.1, h.1.2, h.2⟩) (λ h, ⟨⟨h.1, h.2.1⟩, h.2.2⟩)
@[simp] lemma is_trail.nil {u : V} : (nil : G.walk u u).is_trail :=
⟨by simp [edges]⟩
lemma is_trail.of_cons {u v w : V} {h : G.adj u v} {p : G.walk v w} :
(cons h p).is_trail → p.is_trail :=
by simp [is_trail_def]
@[simp] lemma cons_is_trail_iff {u v w : V} (h : G.adj u v) (p : G.walk v w) :
(cons h p).is_trail ↔ p.is_trail ∧ ⟦(u, v)⟧ ∉ p.edges :=
by simp [is_trail_def, and_comm]
lemma is_trail.reverse {u v : V} (p : G.walk u v) (h : p.is_trail) : p.reverse.is_trail :=
by simpa [is_trail_def] using h
@[simp] lemma reverse_is_trail_iff {u v : V} (p : G.walk u v) : p.reverse.is_trail ↔ p.is_trail :=
by split; { intro h, convert h.reverse _, try { rw reverse_reverse } }
lemma is_trail.of_append_left {u v w : V} {p : G.walk u v} {q : G.walk v w}
(h : (p.append q).is_trail) : p.is_trail :=
by { rw [is_trail_def, edges_append, list.nodup_append] at h, exact ⟨h.1⟩ }
lemma is_trail.of_append_right {u v w : V} {p : G.walk u v} {q : G.walk v w}
(h : (p.append q).is_trail) : q.is_trail :=
by { rw [is_trail_def, edges_append, list.nodup_append] at h, exact ⟨h.2.1⟩ }
lemma is_trail.count_edges_le_one [decidable_eq V] {u v : V}
{p : G.walk u v} (h : p.is_trail) (e : sym2 V) : p.edges.count e ≤ 1 :=
list.nodup_iff_count_le_one.mp h.edges_nodup e
lemma is_trail.count_edges_eq_one [decidable_eq V] {u v : V}
{p : G.walk u v} (h : p.is_trail) {e : sym2 V} (he : e ∈ p.edges) :
p.edges.count e = 1 :=
list.count_eq_one_of_mem h.edges_nodup he
@[simp] lemma is_path.nil {u : V} : (nil : G.walk u u).is_path :=
by { fsplit; simp }
lemma is_path.of_cons {u v w : V} {h : G.adj u v} {p : G.walk v w} :
(cons h p).is_path → p.is_path :=
by simp [is_path_def]
@[simp] lemma cons_is_path_iff {u v w : V} (h : G.adj u v) (p : G.walk v w) :
(cons h p).is_path ↔ p.is_path ∧ u ∉ p.support :=
by split; simp [is_path_def] { contextual := tt }
lemma is_path.reverse {u v : V} {p : G.walk u v} (h : p.is_path) : p.reverse.is_path :=
by simpa [is_path_def] using h
@[simp] lemma is_path_reverse_iff {u v : V} (p : G.walk u v) : p.reverse.is_path ↔ p.is_path :=
by split; intro h; convert h.reverse; simp
lemma is_path.of_append_left {u v w : V} {p : G.walk u v} {q : G.walk v w} :
(p.append q).is_path → p.is_path :=
by { simp only [is_path_def, support_append], exact list.nodup_of_nodup_append_left }
lemma is_path.of_append_right {u v w : V} {p : G.walk u v} {q : G.walk v w}
(h : (p.append q).is_path) : q.is_path :=
begin
rw ←is_path_reverse_iff at h ⊢,
rw reverse_append at h,
apply h.of_append_left,
end
end walk
end simple_graph
|
If $f$ converges to $l$ and $f$ is eventually real, then $l$ is real. |
If $f$ converges to $l$ and $f$ is eventually real, then $l$ is real. |
\documentclass[12pt]{article}
\usepackage{geometry}
\geometry{margin=1in}
\geometry{a4paper}
\usepackage{textcomp}
\usepackage{booktabs}
\usepackage{array}
\usepackage{paralist}
\usepackage{verbatim}
\usepackage{subfigure}
\usepackage{graphicx,caption}
\usepackage{placeins}
\usepackage{lipsum}
\usepackage{xcolor}
\usepackage{dcolumn}
\usepackage{sectsty}
\allsectionsfont{\sffamily\mdseries\upshape}
\usepackage{gensymb,amsmath,mathtools,amssymb}
\usepackage{flafter}
%\usepackage{parskip}
\usepackage[utf8]{inputenc}
\usepackage[english]{babel}
\usepackage{tocbibind}
\usepackage[toc,page]{appendix}
\captionsetup{width=\linewidth}
\usepackage{bm}
\usepackage{url}
\usepackage{pdflscape}
\newcommand{\half}{\frac{1}{2}}
\graphicspath{{./figs/}}
\title{Initial Sizing of 2020 ICLR hybrid rocket}
\author{Devansh Agrawal}
%\date{}
\begin{document}
\maketitle
\section{Introduction}
This document provides an overview of the sizing analysis and results for the 10k SRAD hybrid entry by Imperial College London Rocketry.
This is a working document, and as more information is fed into the system, the parameters may be updated.
\section{Flight performance requirement}
The first challenge was to estimate the performance requirements of the hybrid rocket to able to attain the target altitude with some margin. A separate document details the analysis, but a summary is provided here.
We assume the rocket follows a bang-off control scheme - the rocket thrusts at its main engine's max thrust, $F_{max}$~Newtons for $t_{burn}$~seconds and then coasts the remaining part of the journey. In this scenario, the final altitude of the rocket, $h_f = \hat h_f c^2/g$, is entirely dependent on four non-dimensional parameters\footnote{Assumes exponential atmosphere, constant drag coefficient, perfectly vertical flight}:
\begin{enumerate}
\item The thrust to initial weight ratio, $\hat F = (F_{max})/(m_0 g)$
\item A drag parameter defined as\footnote{Note: This parameter is like the drag to weight ratio (except it uses $c$ as the velocity and $m_0$ as the mass)}, $ x \equiv (\half \rho c^2 c_d A)/(m_0 g)$
\item Propellant mass fraction, $MR \equiv m_p/m_0$
\item Atmosphere parameter, $\hat \beta = \beta c^2/g$
\end{enumerate}
Plugging in suitable parameters, we find the required propellant mass fraction as a function of the thrust to weight ratio, as in figure~\ref{fig:PMF}.
%\FloatBarrier
\begin{figure}[htbp]
\centering
\includegraphics[width=0.8\linewidth]{perf_req.eps}
\caption{Required thrust to weight ratio as a function of the propellant mass fraction}
\label{fig:PMF}
\end{figure}
%FloatBarrier
From this, we can see that we need a propellant mass fraction of approximate 17\%. The fact that the lines are near vertical above $T/W>2$ suggests that a slow, low thrust burn is roughly equivalent to a fast, high thrust burn. Therefore, since we have the ability to control our burn during the flight, a long, slow burn allows us to turn off the thrust closer to apogee, and with greater certainty of success.
\textbf{For design purposes, and to give ourselves some flexibility, we can therefore design our rocket to have a $T/W>2$ and a propellant mass fraction of at least 20\%. }
Detailed analysis with accurate drag coefficients, the mach dependence, and optimal control should be performed next to ensure these performance targets are sufficient and robust to future design changes.
\section{Vehicle Architecture}
The chosen system breakdown of the rocket is:
\begin{enumerate}
\item Payload
\begin{itemize}
\item 4~kg payload
\item 4.5~kg allocated, to allow for mounting
\end{itemize}
\item Avionics
\begin{itemize}
\item Includes all flight computers and sensing equipment, switchboards and mounting hardware
\item Includes interface wiring to other components with electronics
\item Does not include mass of electronics for other subsystems
\item Allocated 2~kg, based on previous IREC reports.
\end{itemize}
\item Recovery System
\begin{itemize}
\item Includes main and drogue parachute, parachute lines and deployment mechanism mass
\item includes mass of black powder and associated electronics.
\item Allocated 3~kg, based on previous IREC reports.
\end{itemize}
\item Main Engine
\begin{itemize}
\item Includes oxidiser tank, fuel tank
\item ox mass, fuel mass,
\item valves assembly with electronics (allocated 1~kg), and nozzle assembly (allocated 1~kg)
\item Detailed engine sizing was not performed, and needs to be throughly verified.
\end{itemize}
\item Boosters
\begin{itemize}
\item Since the $T/W$ of the main engine is around 2, it is not enough to clear the launch rail with the required speed. As such, solid boosters are to be used launch the rocket, sized to provide $T/W = 10$ for the duration needed to clear the launch rails.
\item Includes motor dry mass, but not mounting structural mass
\end{itemize}
\item Structures
\begin{itemize}
\item Includes nose cone (allocated 0.5 kg)
\item fins (1.2~kg)
\item body tube with internal bulkheads and couplers (4~kg)
\item booster mounting structure (0.3~kg)
\item overall structural mass allowed is 6~kg.
\end{itemize}
\end{enumerate}
\section{Sizing approach}
GPkit\footnote{\url{https://gpkit.readthedocs.io/en/latest/}} was used to perform the sizing study. GPkit allows the user to define variables describing the vehicle, the constraints relating the variables (either due to physics, performance requirements or due to design requirements), and an objective function to optimise. It will then perform a global optimisation and return the optimised parameters of the design. GPkit also allows for easy compartmentalisation, by allowing the user to define these variables within classes, and thus separating the different parts of the design.
I have created a basic framework that should be general enough to allow more detail to be added into the model, as we develop it.
At the top level, a \texttt{rocket} class is defined. The six components above are created, each defined in a separate python file. These classes inherit from \texttt{gpkit.Model} which allows gpkit to interpret the variables and the constraints, and exposes a \texttt{solve} method to perform the optimisation.
For ease of visualisation and interpretation, a jupyter notebook instantiates the \texttt{rocket} and calls the signomial solver, \texttt{localsolve}. Due to the structure of the rocket unfortunately, the geometric globally optimal solver cannot be called, but a local signomial solver must be used. That said, in most scenarios, this solver is sufficiently robust to return a good, and viable solution.
The jupyter notebook also has result printing code blocks to allow for easy debugging of models.
Note, when there are changes to the python files where the relationships are described, jupyter must re-import the classes. The best way to ensure this is accurately done is by clicking the \texttt{Restart \& Run-All} button.
The most important relationships used in this sizing are listed at the end of the document, but the most up-to-date relationships are only available in the python files.
\section{Sizing results}
The solve took 5 GP solves, and 1.54 seconds.
\textbf{Total rocket mass: 27.89 kg}
The results are more easily interpreted in the form of a diagram, on the last page.
\begin{landscape}
\begin{verbatim}
SORTED BY LINEAGE, SENSITIVITY (solved on 05-11 19:18) NOTE: ENGINE IS POORLY SIZED.
+----------------+---------------------+----------+--------+--------+---------------------------------------+
| key | lineage | value | unit | sens | Description |
+----------------+---------------------+----------+--------+--------+---------------------------------------+
| PMF | Rocket | 0.220 | - | 0.814 | Propellant Mass Fraction required |
| v_{launch} | Rocket | 30.000 | m/s | 0.125 | Velocity off launch rail |
| L_{launch} | Rocket | 5.200 | m | -0.029 | Length of launch rail |
| g | Rocket | 9.810 | m/s^2 | 0.011 | Acceleration due to gravity |
| a_{launch} | Rocket | 86.538 | m/s^2 | * | Acceleration off launch rail |
| m | Rocket | 27.891 | kg | * | Mass of Rocket |
| TW_{main, min} | Rocket | 2.500 | - | 0.000 | Main engine thrust to take off weight |
| min_a | Rocket | 86.538 | m/s^2 | * | minimum launch acceleration |
| m | Rocket/Avionics | 1.000 | kg | 0.069 | Mass of Avionics |
| DMF | Rocket/Boosters | 0.700 | - | 0.157 | Dry mass fraction of boosters |
| c | Rocket/Boosters | 2000.000 | m/s | -0.067 | boosters exhaust speed |
| m | Rocket/Boosters | 0.975 | kg | * | Mass of Boosters |
| m_{prop} | Rocket/Boosters | 0.292 | kg | * | Propellant mass of boosters |
| m_{dry} | Rocket/Boosters | 0.682 | kg | * | Dry mass of boosters |
| t_{burn} | Rocket/Boosters | 0.347 | s | * | Booster burn time |
| F | Rocket/Boosters | 1687.261 | N | * | Boosters cumulative thrust |
| m | Rocket/Payload | 4.000 | kg | 0.275 | Mass of Payload |
| m | Rocket/Recovery | 3.500 | kg | 0.241 | Mass of Recovery |
| \rho_{ox, tank}| Rocket/SimpleEngine | 8000.000 | kg/m^3 | 0.391 | Density of ox tank, steel |
| Tank P | Rocket/SimpleEngine | 60.000 | bar | 0.391 | Max Ox Tank pressure |
| SF | Rocket/SimpleEngine | 3.000 | - | 0.391 | Wall thickness safety factor |
| \sigma_{max} | Rocket/SimpleEngine | 585.000 | MPa | -0.391 | Max stress of tank, steel |
| rho_{ox} | Rocket/SimpleEngine | 650.000 | kg/m^3 | -0.282 | density of liquid ox *ROUGH* |
| rho_{wax} | Rocket/SimpleEngine | 900.000 | kg/m^3 | -0.109 | Density of fuel |
| m_{valves} | Rocket/SimpleEngine | 1.000 | kg | 0.069 | Mass of valves and plumbing |
| m_{nozzle} | Rocket/SimpleEngine | 1.000 | kg | 0.069 | Mass of nozzle assembly |
| OF | Rocket/SimpleEngine | 7.500 | - | -0.063 | Ox to fuel ratio |
| F | Rocket/SimpleEngine | 1000.000 | N | -0.040 | Engine thrust |
| m | Rocket/SimpleEngine | 13.816 | kg | * | Mass of Engine |
| m_{grain tank} | Rocket/SimpleEngine | 1.580 | kg | * | Mass of combustion chamber |
| m_{ox tank} | Rocket/SimpleEngine | 4.101 | kg | * | Mass of ox tank |
| m_{prop} | Rocket/SimpleEngine | 6.136 | kg | * | Mass of Propellant |
| m_{dry} | Rocket/SimpleEngine | 7.680 | kg | * | Dry mass of engine |
| m_{ox} | Rocket/SimpleEngine | 5.414 | kg | * | ox mass |
| m_{fuel} | Rocket/SimpleEngine | 0.722 | kg | * | fuel mass |
| d_ox | Rocket/SimpleEngine | 6.000 | in | 0.000 | Diameter of ox tank |
| t_{wall} | Rocket/SimpleEngine | 2.345 | mm | * | Wall Thickness of ox tank |
| L_{ox} | Rocket/SimpleEngine | 0.457 | m | * | Length of ox tank |
| v_{fuel} | Rocket/SimpleEngine | 802.096 | cm^3 | * | Volume of fuel |
| L_{grain} | Rocket/SimpleEngine | 0.176 | m | * | Length of the grain |
| V_{ox} | Rocket/SimpleEngine | 8329.458 | cm^3 | * | Volume of ox tank |
| A_{grain} | Rocket/SimpleEngine | 45.581 | cm^2 | * | cross section area of grain |
| m | Rocket/Structures | 4.600 | kg | 0.317 | Mass of Structures |
+----------------+---------------------+----------+--------+--------+---------------------------------------+
\end{verbatim}
Note, the sensitivity is the logarithmic sensitivity, ie, $\text{sensitivity} = \frac{d \log(\text{cost})}{d\log(\text{var})}$. A positive number indicates that increasing the variable will increase the cost. Note, the star indicates a zero sensitivity, since this is a variable that gpkit has solved for. As such, it represents the minima of the function and thus has zero sensitivity, similar to how a function has zero gradient wrt to a variable when it is optimized.
\end{landscape}
\section{Next steps}
\begin{itemize}
\item Verify structural mass allocations
\item Verify stability requirements - ie, ensure fins are large enough
\item Perform detailed drag accounting
\item Perform detailed controls analysis
\item more accurate tank sizing needed, especially considering manufacturability, source-ability, cost.
\item tank ullage not accounted for
\item very simplified thrust curve needs to be improved
\end{itemize}
Design modification if the engine performance is poorer than expected: bigger boosters. Therefore, the booster mounts need to be flexible enough to allow different booster designs. Could look into dropping boosters after their work is done, but this is complicated.
%\bibliographystyle{unsrt}
%\bibliography{biblio}
\section{Sizing relationships used}
\emph{NOTE: the values in this section are probably wrong, the method should be roughly accurate. Consult the github for the values used.}
Most constraints are fairly straightforward. Here are the key ones
\begin{itemize}
\item m = sum of mass of components
\item propellant mass fraction $>$ 20\%
\item Launch requirements:
\begin{itemize}
\item v off launch rail $>$ 30m/s
\item launch accel = (booster thrust + main engine thrust - mg)/m
\item launch accel $>$ min accel = (launch v)$^2$/(2 launch rail L)
\item booster burn time such that burn out occurs at 5 m/s
\end{itemize}
\item Components:
\begin{itemize}
\item Payload: m = 4.5 kg
\item Avionics: m = 2 kg
\item Recovery: m = 3 kg
\item Engine:
\begin{itemize}
\item OF = 6
\item m fuel, m ox based on m prop and OF
\item d = 150 mm
\item P tank $<$ 60 bar
\item wall thickness based on hoop stress and safety factor of 5, assumes Al-7075 due to high yield strength (double of Al 6061), sealing and welding to be determined more thoroughly
\item ox is fully liquid, at critical density of 490 kg/m3 $\rightarrow$ determine length of ox tank
\item mass of ox tank based on cylinder material thickness, end caps not accounted for
\item grain tank is similarly sized, assumes the grain is only occupied in half the cross sectional area (needs to be refined), and same wall thickness as ox tank. No liner material considered. Carbon overwrap of tank tube would save lots of mass if possible.
\item \emph{regression rates, motor dynamics, etc not accounted for}
\item m valves = 1 kg
\item m nozzle = 1 kg
\item assumed F= 1000 N
\item c = 1800 m/s (needs to be verified)
\end{itemize}
\item Boosters:
\begin{itemize}
\item propellant mass such that total impulse can be delivered
\item dry mass fraction is 70\%. Needs to be refined by picking a motor, assumed c = 2000m/s
\end{itemize}
\item Structures:
\begin{itemize}
\item m = sum of components
\item m fins = 1.2 kg
\item m nose cone = 0.5 kg
\item m tube = 4 kg
\item m booster struc = 0.3 kg
\end{itemize}
\end{itemize}
\end{itemize}
\FloatBarrier
\begin{figure}[htbp]
\centering
\includegraphics[width=\linewidth]{sizing_result_Nov_5.eps}
\caption{Summary of Sizing results}
\label{fig:}
\end{figure}
%FloatBarrier
\end{document}
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
(* prefix:
gga_x_vmt84_params *params;
assert(p->params != NULL);
params = (gga_x_vmt84_params * )(p->params);
*)
$include "gga_x_vmt.mpl"
vmt84_f0 := s -> (1 - exp(-params_a_alpha*s^4))/s^2 - 1 + exp(-params_a_alpha*s^4):
vmt84_f := x -> vmt_f(x) + vmt84_f0(X2S*x):
f := (rs, zeta, xt, xs0, xs1) -> gga_exchange(vmt84_f, rs, zeta, xs0, xs1):
|
# 17. Random Forest and Gradient Boosted Trees Classifier
[](https://colab.research.google.com/github/rhennig/EMA6938/blob/main/Notebooks/17.RandomForest.ipynb)
Previously, we used a Decision Tree Classifier to learn the fcc, bcc, and hcp crystal structure of 47 elements with Scikit-learn. Now, we will train a random forest and a gradient boosted trees model.
Let's first load the required libraries.
```python
# Install the mendeleev and poymatgen packages using pip in the current Jupyter kernel
# To use them, you may need to restart the kernel
import sys
!{sys.executable} -m pip install mendeleev
!{sys.executable} -m pip install pymatgen
import pymatgen as pymat
from pymatgen.core.periodic_table import Element
import mendeleev as mendel
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
```
Requirement already satisfied: mendeleev in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (0.9.0)
Requirement already satisfied: numpy<2.0.0,>=1.19.5 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from mendeleev) (1.22.2)
Requirement already satisfied: pyfiglet<0.9,>=0.8.post1 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from mendeleev) (0.8.post1)
Requirement already satisfied: colorama<0.5.0,>=0.4.4 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from mendeleev) (0.4.4)
Requirement already satisfied: pandas>=0.25.0 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from mendeleev) (1.3.5)
Requirement already satisfied: SQLAlchemy<2.0.0,>=1.3.23 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from mendeleev) (1.4.31)
Requirement already satisfied: six<2.0.0,>=1.15.0 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from mendeleev) (1.15.0)
Requirement already satisfied: Pygments<3.0.0,>=2.8.0 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from mendeleev) (2.10.0)
Requirement already satisfied: python-dateutil>=2.7.3 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pandas>=0.25.0->mendeleev) (2.8.2)
Requirement already satisfied: pytz>=2017.3 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pandas>=0.25.0->mendeleev) (2021.3)
Requirement already satisfied: greenlet!=0.4.17 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from SQLAlchemy<2.0.0,>=1.3.23->mendeleev) (1.1.2)
Requirement already satisfied: pymatgen in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (2022.1.9)
Requirement already satisfied: pybtex in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (0.24.0)
Requirement already satisfied: sympy in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (1.9)
Requirement already satisfied: pandas in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (1.3.5)
Requirement already satisfied: networkx>=2.2 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (2.6.3)
Requirement already satisfied: uncertainties>=3.1.4 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (3.1.6)
Requirement already satisfied: scipy>=1.5.0 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (1.7.3)
Requirement already satisfied: tqdm in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (4.62.3)
Requirement already satisfied: ruamel.yaml>=0.15.6 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (0.17.20)
Requirement already satisfied: spglib>=1.9.9.44 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (1.16.3)
Requirement already satisfied: matplotlib>=1.5 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (3.4.3)
Requirement already satisfied: palettable>=3.1.1 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (3.3.0)
Requirement already satisfied: plotly>=4.5.0 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (5.5.0)
Requirement already satisfied: tabulate in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (0.8.9)
Requirement already satisfied: monty>=3.0.2 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (2022.1.12.1)
Requirement already satisfied: Cython>=0.29.23 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (0.29.26)
Requirement already satisfied: requests in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (2.27.1)
Requirement already satisfied: numpy>=1.20.1 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pymatgen) (1.22.2)
Requirement already satisfied: pillow>=6.2.0 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from matplotlib>=1.5->pymatgen) (8.3.2)
Requirement already satisfied: pyparsing>=2.2.1 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from matplotlib>=1.5->pymatgen) (2.4.7)
Requirement already satisfied: kiwisolver>=1.0.1 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from matplotlib>=1.5->pymatgen) (1.3.2)
Requirement already satisfied: cycler>=0.10 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from matplotlib>=1.5->pymatgen) (0.11.0)
Requirement already satisfied: python-dateutil>=2.7 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from matplotlib>=1.5->pymatgen) (2.8.2)
Requirement already satisfied: tenacity>=6.2.0 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from plotly>=4.5.0->pymatgen) (8.0.1)
Requirement already satisfied: six in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from plotly>=4.5.0->pymatgen) (1.15.0)
Requirement already satisfied: ruamel.yaml.clib>=0.2.6 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from ruamel.yaml>=0.15.6->pymatgen) (0.2.6)
Requirement already satisfied: future in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from uncertainties>=3.1.4->pymatgen) (0.18.2)
Requirement already satisfied: pytz>=2017.3 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pandas->pymatgen) (2021.3)
Requirement already satisfied: latexcodec>=1.0.4 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pybtex->pymatgen) (2.0.1)
Requirement already satisfied: PyYAML>=3.01 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from pybtex->pymatgen) (5.4.1)
Requirement already satisfied: idna<4,>=2.5 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from requests->pymatgen) (3.1)
Requirement already satisfied: certifi>=2017.4.17 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from requests->pymatgen) (2021.10.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from requests->pymatgen) (1.26.7)
Requirement already satisfied: charset-normalizer~=2.0.0 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from requests->pymatgen) (2.0.9)
Requirement already satisfied: mpmath>=0.19 in /Users/rhennig/opt/anaconda3/envs/tf2/lib/python3.8/site-packages (from sympy->pymatgen) (1.2.1)
### Getting the dataset
We select 47 elements that occur in the fcc, hcp, and bcc structure. The elements listed were chosen because querying them for these properties yields a dataset with no unknown values, and because they represent the three most common crystallographic structures.
We then query both Pymatgen and Mendeleev to get a complete set of properties per element. We will use this data to create the features from which the model will train and test.
```python
fcc_elements = ["Ag", "Al", "Au", "Cu", "Ir", "Ni", "Pb", "Pd", "Pt", "Rh", "Th", "Yb"]
bcc_elements = ["Ba", "Ca", "Cr", "Cs", "Eu", "Fe", "Li", "Mn", "Mo", "Na", "Nb", "Rb", "Ta", "V", "W" ]
hcp_elements = ["Be", "Cd", "Co", "Dy", "Er", "Gd", "Hf", "Ho", "Lu", "Mg", "Re",
"Ru", "Sc", "Tb", "Ti", "Tl", "Tm", "Y", "Zn", "Zr"]
elements = fcc_elements + bcc_elements + hcp_elements
random.Random(1).shuffle(elements)
querable_mendeleev = ["atomic_number", "atomic_volume", "boiling_point", "en_ghosh", "evaporation_heat", "heat_of_formation",
"melting_point", "specific_heat"]
querable_pymatgen = ["atomic_mass", "atomic_radius", "electrical_resistivity","molar_volume", "bulk_modulus", "youngs_modulus",
"average_ionic_radius", "density_of_solid", "coefficient_of_linear_thermal_expansion"]
querable_values = querable_mendeleev + querable_pymatgen
```
We will use the database queries to populate a pandas dataframe.
```python
all_values = [] # Values for Attributes
all_labels = [] # Crystal structure labels (0 = fcc, 1 = bcc, 2 = hcp)
for item in elements:
element_values = []
# This section queries Mendeleev
element_object = mendel.element(item)
for i in querable_mendeleev:
element_values.append(getattr(element_object,i))
# This section queries Pymatgen
element_object = Element(item)
for i in querable_pymatgen:
element_values.append(getattr(element_object,i))
all_values.append(element_values) # All lists are appended to another list, creating a List of Lists
if (item in fcc_elements):
all_labels.append(0) # The crystal structure labels are assigned here
elif (item in bcc_elements):
all_labels.append(1) # The crystal structure labels are assigned here
elif (item in hcp_elements):
all_labels.append(2) # The crystal structure labels are assigned here
# Pandas Dataframe
df = pd.DataFrame(all_values, columns=querable_values)
# We will patch some of the values that are not available in the datasets.
# Value for the CTE of Cesium
index_Cs = df.index[df['atomic_number'] == 55]
df.iloc[index_Cs, df.columns.get_loc("coefficient_of_linear_thermal_expansion")] = 0.000097
# Value from: David R. Lide (ed), CRC Handbook of Chemistry and Physics, 84th Edition. CRC Press. Boca Raton, Florida, 2003
# Value for the CTE of Rubidium
index_Rb = df.index[df['atomic_number'] == 37]
df.iloc[index_Rb, df.columns.get_loc("coefficient_of_linear_thermal_expansion")] = 0.000090
# Value from: https://www.azom.com/article.aspx?ArticleID=1834
# Value for the Evaporation Heat of Ruthenium
index_Ru = df.index[df['atomic_number'] == 44]
df.iloc[index_Ru, df.columns.get_loc("evaporation_heat")] = 595 # kJ/mol
# Value from: https://www.webelements.com/ruthenium/thermochemistry.html
# Value for the Bulk Modulus of Zirconium
index_Zr = df.index[df['atomic_number'] == 40]
df.iloc[index_Zr, df.columns.get_loc("bulk_modulus")] = 94 # GPa
# Value from: https://materialsproject.org/materials/mp-131/
df.head(n=10)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>atomic_number</th>
<th>atomic_volume</th>
<th>boiling_point</th>
<th>en_ghosh</th>
<th>evaporation_heat</th>
<th>heat_of_formation</th>
<th>melting_point</th>
<th>specific_heat</th>
<th>atomic_mass</th>
<th>atomic_radius</th>
<th>electrical_resistivity</th>
<th>molar_volume</th>
<th>bulk_modulus</th>
<th>youngs_modulus</th>
<th>average_ionic_radius</th>
<th>density_of_solid</th>
<th>coefficient_of_linear_thermal_expansion</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>27</td>
<td>6.70</td>
<td>3143.0</td>
<td>0.143236</td>
<td>389.1</td>
<td>426.7</td>
<td>1768.00</td>
<td>0.456</td>
<td>58.933195</td>
<td>1.35</td>
<td>6.000000e-08</td>
<td>6.67</td>
<td>180.0</td>
<td>209.0</td>
<td>0.768333</td>
<td>8900.0</td>
<td>0.000013</td>
</tr>
<tr>
<th>1</th>
<td>69</td>
<td>18.10</td>
<td>2220.0</td>
<td>0.216724</td>
<td>232.0</td>
<td>232.2</td>
<td>1818.00</td>
<td>0.160</td>
<td>168.934210</td>
<td>1.75</td>
<td>6.760000e-07</td>
<td>19.10</td>
<td>45.0</td>
<td>74.0</td>
<td>1.095000</td>
<td>9321.0</td>
<td>0.000013</td>
</tr>
<tr>
<th>2</th>
<td>39</td>
<td>19.80</td>
<td>3611.0</td>
<td>0.121699</td>
<td>367.0</td>
<td>424.7</td>
<td>1795.00</td>
<td>0.284</td>
<td>88.905850</td>
<td>1.80</td>
<td>6.000000e-07</td>
<td>19.88</td>
<td>41.0</td>
<td>64.0</td>
<td>1.040000</td>
<td>4472.0</td>
<td>0.000011</td>
</tr>
<tr>
<th>3</th>
<td>75</td>
<td>8.85</td>
<td>5900.0</td>
<td>0.243516</td>
<td>704.0</td>
<td>774.0</td>
<td>3453.00</td>
<td>0.138</td>
<td>186.207000</td>
<td>1.35</td>
<td>1.800000e-07</td>
<td>8.86</td>
<td>370.0</td>
<td>463.0</td>
<td>0.712500</td>
<td>21020.0</td>
<td>0.000006</td>
</tr>
<tr>
<th>4</th>
<td>28</td>
<td>6.60</td>
<td>3005.0</td>
<td>0.147207</td>
<td>378.6</td>
<td>430.1</td>
<td>1726.00</td>
<td>0.443</td>
<td>58.693400</td>
<td>1.35</td>
<td>7.200000e-08</td>
<td>6.59</td>
<td>180.0</td>
<td>200.0</td>
<td>0.740000</td>
<td>8908.0</td>
<td>0.000013</td>
</tr>
<tr>
<th>5</th>
<td>67</td>
<td>18.70</td>
<td>2968.0</td>
<td>0.207795</td>
<td>301.0</td>
<td>300.6</td>
<td>1747.00</td>
<td>0.164</td>
<td>164.930320</td>
<td>1.75</td>
<td>8.140000e-07</td>
<td>18.74</td>
<td>40.0</td>
<td>65.0</td>
<td>1.041000</td>
<td>8795.0</td>
<td>0.000011</td>
</tr>
<tr>
<th>6</th>
<td>79</td>
<td>10.20</td>
<td>3080.0</td>
<td>0.261370</td>
<td>340.0</td>
<td>368.2</td>
<td>1337.58</td>
<td>0.129</td>
<td>196.966569</td>
<td>1.35</td>
<td>2.200000e-08</td>
<td>10.21</td>
<td>220.0</td>
<td>78.0</td>
<td>1.070000</td>
<td>19300.0</td>
<td>0.000014</td>
</tr>
<tr>
<th>7</th>
<td>21</td>
<td>15.00</td>
<td>3104.0</td>
<td>0.119383</td>
<td>332.7</td>
<td>377.8</td>
<td>1814.00</td>
<td>0.556</td>
<td>44.955912</td>
<td>1.60</td>
<td>5.500000e-07</td>
<td>15.00</td>
<td>57.0</td>
<td>74.0</td>
<td>0.885000</td>
<td>2985.0</td>
<td>0.000010</td>
</tr>
<tr>
<th>8</th>
<td>45</td>
<td>8.30</td>
<td>4000.0</td>
<td>0.140838</td>
<td>494.0</td>
<td>556.0</td>
<td>2239.00</td>
<td>0.244</td>
<td>102.905500</td>
<td>1.35</td>
<td>4.300000e-08</td>
<td>8.28</td>
<td>380.0</td>
<td>275.0</td>
<td>0.745000</td>
<td>12450.0</td>
<td>0.000008</td>
</tr>
<tr>
<th>9</th>
<td>74</td>
<td>9.53</td>
<td>5930.0</td>
<td>0.239050</td>
<td>824.0</td>
<td>851.0</td>
<td>3680.00</td>
<td>0.133</td>
<td>183.840000</td>
<td>1.35</td>
<td>5.400000e-08</td>
<td>9.47</td>
<td>310.0</td>
<td>411.0</td>
<td>0.766667</td>
<td>19250.0</td>
<td>0.000005</td>
</tr>
</tbody>
</table>
</div>
### Processing and Organizing Data
We normalize the data and randomly split it into training and testing sets.
##### SETS
We have 47 elements for which the crystal structure is known and we will use 40 of these as a training set and the remaining 7 as testing set.
##### NORMALIZATION
We will again use the Standard Score Normalization, which subtracts the mean of the feature and divide by its standard deviation.
$$
\frac{X - µ}{σ}
$$
While our model might converge without feature normalization, the resultant model would be difficult to train and would be dependent on the choice of units used in the input.
```python
# SETS
all_values = [list(df.iloc[x]) for x in range(len(all_values))]
# List of lists are turned into Numpy arrays to facilitate calculations in steps to follow
# (Normalization).
all_values = np.array(all_values, dtype = float)
print("Shape of Values:", all_values.shape)
all_labels = np.array(all_labels, dtype = int)
print("Shape of Labels:", all_labels.shape)
# Training Set
train_values = all_values[:40, :]
train_labels = all_labels[:40]
# Testing Set
test_values = all_values[-7:, :]
test_labels = all_labels[-7:]
# NORMALIZATION
mean = np.nanmean(train_values, axis = 0) # mean
std = np.nanstd(train_values, axis = 0) # standard deviation
train_values = (train_values - mean) / std # input scaling
test_values = (test_values - mean) / std # input scaling
print(train_values[0]) # print a sample entry from the training set
print(train_labels[0])
```
Shape of Values: (47, 17)
Shape of Labels: (47,)
[-0.80084167 -0.75983551 -0.00894162 -0.40732945 0.15599373 0.16654528
0.09455406 0.02631292 -0.82400017 -0.80570946 -0.67799461 -0.75661221
0.70972845 0.6516648 -0.77257498 0.11409173 -0.3075323 ]
2
### Creating the Random Forest Model
For this classification, we will use a random forest.
```python
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn import tree
# Create Decision Tree classifer object
model = RandomForestClassifier()
# Train Decision Tree Classifer
model.fit(train_values, train_labels)
```
RandomForestClassifier()
### Validation
We calculate the accuracy score on the training and the testing sets.
```python
#Predict the response for training and testing dataset
train_pred = model.predict(train_values)
test_pred = model.predict(test_values)
# Model Accuracy for training and testing set, how often is the classifier correct?
print('Training accuracy = %.3f ' % accuracy_score(train_labels, train_pred))
print('Testing accuracy = %.3f ' % accuracy_score(test_labels, test_pred))
```
Training accuracy = 1.000
Testing accuracy = 0.429
### Visualize the random forest model
Since the random forest consists of many decision trees, we can visualize the individual decision trees.
```python
label_names = ('fcc', 'bcc', 'hcp')
fig = plt.figure(figsize=(25,20))
# Select an individual decision tree, here 0.
_ = tree.plot_tree(model.estimators_[0], feature_names=querable_values,
class_names = label_names, filled=True, impurity=True, rounded=True)
```
```python
train_predictions = model.predict(train_values)
test_predictions = model.predict(test_values)
print("train_labels = ", train_labels)
print("test_labels = ", test_labels)
all_labels = np.hstack((train_labels, test_labels))
all_predictions = np.hstack((train_predictions, test_predictions))
predicted_labels = []
true_labels = []
for i in range(all_predictions.shape[0]):
if (all_predictions[i] == 0):
predicted_labels.append("FCC")
if (all_labels[i] == 0):
true_labels.append("FCC")
if (all_predictions[i] == 1):
predicted_labels.append("BCC")
if (all_labels[i] == 1):
true_labels.append("BCC")
if (all_predictions[i] == 2):
predicted_labels.append("HCP")
if (all_labels[i] == 2):
true_labels.append("HCP")
predicted_labels = np.array(predicted_labels).reshape((-1, 1))
true_labels = np.array(true_labels).reshape((-1, 1))
headings = ["Atomic number", "True crystal structure", "Predicted crystal structure"]
atomic_number_array = np.array(df.iloc[:, 0]).reshape((-1, 1))
plot_table = np.concatenate((atomic_number_array, true_labels, predicted_labels), axis=1)
plot_df = pd.DataFrame(plot_table, columns=headings)
```
train_labels = [2 2 2 2 0 2 0 2 0 1 2 2 2 0 1 2 1 1 1 1 1 2 0 0 1 2 1 1 2 1 1 0 2 2 0 2 0
1 1 2]
test_labels = [2 2 0 1 0 2 0]
```python
plot_df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Atomic number</th>
<th>True crystal structure</th>
<th>Predicted crystal structure</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>27</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>1</th>
<td>69</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>2</th>
<td>39</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>3</th>
<td>75</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>4</th>
<td>28</td>
<td>FCC</td>
<td>FCC</td>
</tr>
<tr>
<th>5</th>
<td>67</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>6</th>
<td>79</td>
<td>FCC</td>
<td>FCC</td>
</tr>
<tr>
<th>7</th>
<td>21</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>8</th>
<td>45</td>
<td>FCC</td>
<td>FCC</td>
</tr>
<tr>
<th>9</th>
<td>74</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>10</th>
<td>64</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>11</th>
<td>65</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>12</th>
<td>72</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>13</th>
<td>70</td>
<td>FCC</td>
<td>FCC</td>
</tr>
<tr>
<th>14</th>
<td>55</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>15</th>
<td>30</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>16</th>
<td>56</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>17</th>
<td>25</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>18</th>
<td>26</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>19</th>
<td>42</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>20</th>
<td>11</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>21</th>
<td>71</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>22</th>
<td>90</td>
<td>FCC</td>
<td>FCC</td>
</tr>
<tr>
<th>23</th>
<td>29</td>
<td>FCC</td>
<td>FCC</td>
</tr>
<tr>
<th>24</th>
<td>3</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>25</th>
<td>81</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>26</th>
<td>23</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>27</th>
<td>37</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>28</th>
<td>40</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>29</th>
<td>24</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>30</th>
<td>41</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>31</th>
<td>47</td>
<td>FCC</td>
<td>FCC</td>
</tr>
<tr>
<th>32</th>
<td>4</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>33</th>
<td>44</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>34</th>
<td>13</td>
<td>FCC</td>
<td>FCC</td>
</tr>
<tr>
<th>35</th>
<td>22</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>36</th>
<td>82</td>
<td>FCC</td>
<td>FCC</td>
</tr>
<tr>
<th>37</th>
<td>20</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>38</th>
<td>73</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>39</th>
<td>66</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>40</th>
<td>48</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>41</th>
<td>68</td>
<td>HCP</td>
<td>HCP</td>
</tr>
<tr>
<th>42</th>
<td>46</td>
<td>FCC</td>
<td>HCP</td>
</tr>
<tr>
<th>43</th>
<td>63</td>
<td>BCC</td>
<td>BCC</td>
</tr>
<tr>
<th>44</th>
<td>77</td>
<td>FCC</td>
<td>HCP</td>
</tr>
<tr>
<th>45</th>
<td>12</td>
<td>HCP</td>
<td>BCC</td>
</tr>
<tr>
<th>46</th>
<td>78</td>
<td>FCC</td>
<td>HCP</td>
</tr>
</tbody>
</table>
</div>
### Questions:
#### Hyperparameter optimization
1. We can select the `criterion` parameter to measure the quality of a split. The default value is `'squared_error'`.
2. When the algorithm performs a split, the main goal is to decrease impurity as much as possible. The more the impurity decreases, the more informative power that split gains. As the tree gets deeper, the amount of impurity decrease becomes lower. We can use this to prevent the tree from doing further splits. The hyperparameter for this task is `min_impurity_decrease`. Its default is zero. Try changing it to see the difference.
3. If the algorithm keeps splitting nodes, the model will probably be overfit. The `min_samples_split` parameter can be used to control the tree based on impurity values. It sets a threshold on gini. Try setting it to 0.3, so a node needs to have a gini value that is more then 0.3 to be further split.
4. Another hyperparameter to control the depth of a tree is `max_depth`. It does not make any calculations regarding impurity or sample ratio. The model stops splitting when max_depth is reached. Note that `max_depth` is less flexible compared to min_impurity_decrease.
5. Another hyperparameter is `min_samples_leaf`. It indicates the minimum number of samples required to be at a leaf node.
6. We can also limit the number of leaf nodes using `max_leaf_nodes` parameter which grows the tree in best-first fashion until max_leaf_nodes reached. The best split is decided based on impurity decrease.
7. Another important hyperparameter of decision trees is `max_features` which is the number of features to consider when looking for the best split. If not specified, the model considers all of the features. There is only 1 feature in our dataset.
To change the hyperparameters:
`regressor = RandomForestRegressor(hyperparameter = value)`
Change the `max_depth` and `min_samples_split` to see how this affects the training and prediction error.
```python
# Create Decision Tree classifer object
model = RandomForestClassifier(max_depth=3, min_samples_split=0.5)
# Train Decision Tree Classifer
model.fit(train_values, train_labels)
#Predict the response for training and testing dataset
train_pred = model.predict(train_values)
test_pred = model.predict(test_values)
# Model Accuracy for training and testing set, how often is the classifier correct?
print('Training accuracy = %.3f ' % accuracy_score(train_labels, train_pred))
print('Testing accuracy = %.3f ' % accuracy_score(test_labels, test_pred))
label_names = ('fcc', 'bcc', 'hcp')
fig = plt.figure(figsize=(25,20))
_ = tree.plot_tree(model.estimators_[0], feature_names=querable_values, class_names = label_names, filled=True)
```
### Creating the Gradient Boosted Trees Model
Next, we will test gradient boosted trees for this classification.
```python
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import accuracy_score
from sklearn import tree
# Create Decision Tree classifer object
model = GradientBoostingClassifier()
# Train Decision Tree Classifer
model.fit(train_values, train_labels)
```
GradientBoostingClassifier()
### Validation
We calculate the accuracy score on the training and the testing sets.
```python
#Predict the response for training and testing dataset
train_pred = model.predict(train_values)
test_pred = model.predict(test_values)
# Model Accuracy for training and testing set, how often is the classifier correct?
print('Training accuracy = %.3f ' % accuracy_score(train_labels, train_pred))
print('Testing accuracy = %.3f ' % accuracy_score(test_labels, test_pred))
```
Training accuracy = 1.000
Testing accuracy = 0.571
As we can see, for this dataset all the tree-based models do comparably well.
```python
```
|
/-
Copyright (c) 2018 Kevin Buzzard, Patrick Massot. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kevin Buzzard, Patrick Massot
This file is to a certain extent based on `quotient_module.lean` by Johannes Hölzl.
-/
import group_theory.coset
import group_theory.congruence
/-!
# Quotients of groups by normal subgroups
This files develops the basic theory of quotients of groups by normal subgroups. In particular it
proves Noether's first and second isomorphism theorems.
## Main definitions
* `mk'`: the canonical group homomorphism `G →* G/N` given a normal subgroup `N` of `G`.
* `lift φ`: the group homomorphism `G/N →* H` given a group homomorphism `φ : G →* H` such that
`N ⊆ ker φ`.
* `map f`: the group homomorphism `G/N →* H/M` given a group homomorphism `f : G →* H` such that
`N ⊆ f⁻¹(M)`.
## Main statements
* `quotient_ker_equiv_range`: Noether's first isomorphism theorem, an explicit isomorphism
`G/ker φ → range φ` for every group homomorphism `φ : G →* H`.
* `quotient_inf_equiv_prod_normal_quotient`: Noether's second isomorphism theorem, an explicit
isomorphism between `H/(H ∩ N)` and `(HN)/N` given a subgroup `H` and a normal subgroup `N` of a
group `G`.
* `quotient_group.quotient_quotient_equiv_quotient`: Noether's third isomorphism theorem,
the canonical isomorphism between `(G / N) / (M / N)` and `G / M`, where `N ≤ M`.
## Tags
isomorphism theorems, quotient groups
-/
universes u v
namespace quotient_group
variables {G : Type u} [group G] (N : subgroup G) [nN : N.normal] {H : Type v} [group H]
include nN
/-- The congruence relation generated by a normal subgroup. -/
@[to_additive "The additive congruence relation generated by a normal additive subgroup."]
protected def con : con G :=
{ to_setoid := left_rel N,
mul' := λ a b c d (hab : a⁻¹ * b ∈ N) (hcd : c⁻¹ * d ∈ N),
calc (a * c)⁻¹ * (b * d) = c⁻¹ * (a⁻¹ * b) * c⁻¹⁻¹ * (c⁻¹ * d) :
by simp only [mul_inv_rev, mul_assoc, inv_mul_cancel_left]
... ∈ N : N.mul_mem (nN.conj_mem _ hab _) hcd }
@[to_additive quotient_add_group.add_group]
instance quotient.group : group (G ⧸ N) :=
(quotient_group.con N).group
/-- The group homomorphism from `G` to `G/N`. -/
@[to_additive quotient_add_group.mk' "The additive group homomorphism from `G` to `G/N`."]
def mk' : G →* G ⧸ N := monoid_hom.mk' (quotient_group.mk) (λ _ _, rfl)
@[simp, to_additive]
lemma coe_mk' : (mk' N : G → G ⧸ N) = coe := rfl
@[simp, to_additive]
lemma mk'_apply (x : G) : mk' N x = x := rfl
/-- Two `monoid_hom`s from a quotient group are equal if their compositions with
`quotient_group.mk'` are equal.
See note [partially-applied ext lemmas]. -/
@[ext, to_additive /-" Two `add_monoid_hom`s from an additive quotient group are equal if their
compositions with `add_quotient_group.mk'` are equal.
See note [partially-applied ext lemmas]. "-/]
lemma monoid_hom_ext ⦃f g : G ⧸ N →* H⦄ (h : f.comp (mk' N) = g.comp (mk' N)) : f = g :=
monoid_hom.ext $ λ x, quotient_group.induction_on x $ (monoid_hom.congr_fun h : _)
@[simp, to_additive quotient_add_group.eq_zero_iff]
lemma eq_one_iff {N : subgroup G} [nN : N.normal] (x : G) : (x : G ⧸ N) = 1 ↔ x ∈ N :=
begin
refine quotient_group.eq.trans _,
rw [mul_one, subgroup.inv_mem_iff],
end
@[simp, to_additive quotient_add_group.ker_mk]
lemma ker_mk :
monoid_hom.ker (quotient_group.mk' N : G →* G ⧸ N) = N :=
subgroup.ext eq_one_iff
@[to_additive quotient_add_group.eq_iff_sub_mem]
-- for commutative groups we don't need normality assumption
omit nN
@[to_additive quotient_add_group.add_comm_group]
instance {G : Type*} [comm_group G] (N : subgroup G) : comm_group (G ⧸ N) :=
{ mul_comm := λ a b, quotient.induction_on₂' a b
(λ a b, congr_arg mk (mul_comm a b)),
.. @quotient_group.quotient.group _ _ N N.normal_of_comm }
include nN
local notation ` Q ` := G ⧸ N
@[simp, to_additive quotient_add_group.coe_zero]
lemma coe_one : ((1 : G) : Q) = 1 := rfl
@[simp, to_additive quotient_add_group.coe_add]
lemma coe_mul (a b : G) : ((a * b : G) : Q) = a * b := rfl
@[simp, to_additive quotient_add_group.coe_neg]
lemma coe_inv (a : G) : ((a⁻¹ : G) : Q) = a⁻¹ := rfl
@[simp, to_additive quotient_add_group.coe_sub]
lemma coe_div (a b : G) : ((a / b : G) : Q) = a / b := rfl
@[simp, to_additive quotient_add_group.coe_nsmul]
lemma coe_pow (a : G) (n : ℕ) : ((a ^ n : G) : Q) = a ^ n := rfl
@[simp, to_additive quotient_add_group.coe_zsmul]
lemma coe_zpow (a : G) (n : ℤ) : ((a ^ n : G) : Q) = a ^ n := rfl
/-- A group homomorphism `φ : G →* H` with `N ⊆ ker(φ)` descends (i.e. `lift`s) to a
group homomorphism `G/N →* H`. -/
@[to_additive quotient_add_group.lift "An `add_group` homomorphism `φ : G →+ H` with `N ⊆ ker(φ)`
descends (i.e. `lift`s) to a group homomorphism `G/N →* H`."]
def lift (φ : G →* H) (HN : ∀x∈N, φ x = 1) : Q →* H :=
(quotient_group.con N).lift φ $ λ x y (h : x⁻¹ * y ∈ N),
calc φ x = φ (y * (x⁻¹ * y)⁻¹) : by rw [mul_inv_rev, inv_inv, mul_inv_cancel_left]
... = φ y : by rw [φ.map_mul, HN _ (N.inv_mem h), mul_one]
@[simp, to_additive quotient_add_group.lift_mk]
lemma lift_mk {φ : G →* H} (HN : ∀x∈N, φ x = 1) (g : G) :
lift N φ HN (g : Q) = φ g := rfl
@[simp, to_additive quotient_add_group.lift_mk']
lemma lift_mk' {φ : G →* H} (HN : ∀x∈N, φ x = 1) (g : G) :
lift N φ HN (mk g : Q) = φ g := rfl
@[simp, to_additive quotient_add_group.lift_quot_mk]
lemma lift_quot_mk {φ : G →* H} (HN : ∀x∈N, φ x = 1) (g : G) :
lift N φ HN (quot.mk _ g : Q) = φ g := rfl
/-- A group homomorphism `f : G →* H` induces a map `G/N →* H/M` if `N ⊆ f⁻¹(M)`. -/
@[to_additive quotient_add_group.map "An `add_group` homomorphism `f : G →+ H` induces a map
`G/N →+ H/M` if `N ⊆ f⁻¹(M)`."]
def map (M : subgroup H) [M.normal] (f : G →* H) (h : N ≤ M.comap f) :
G ⧸ N →* H ⧸ M :=
begin
refine quotient_group.lift N ((mk' M).comp f) _,
assume x hx,
refine quotient_group.eq.2 _,
rw [mul_one, subgroup.inv_mem_iff],
exact h hx,
end
@[simp, to_additive quotient_add_group.map_coe] lemma map_coe
(M : subgroup H) [M.normal] (f : G →* H) (h : N ≤ M.comap f) (x : G) :
map N M f h ↑x = ↑(f x) :=
lift_mk' _ _ x
@[to_additive quotient_add_group.map_mk'] lemma map_mk'
(M : subgroup H) [M.normal] (f : G →* H) (h : N ≤ M.comap f) (x : G) :
map N M f h (mk' _ x) = ↑(f x) :=
quotient_group.lift_mk' _ _ x
omit nN
variables (φ : G →* H)
open function monoid_hom
/-- The induced map from the quotient by the kernel to the codomain. -/
@[to_additive quotient_add_group.ker_lift "The induced map from the quotient by the kernel to the
codomain."]
def ker_lift : G ⧸ ker φ →* H :=
lift _ φ $ λ g, φ.mem_ker.mp
@[simp, to_additive quotient_add_group.ker_lift_mk]
lemma ker_lift_mk (g : G) : (ker_lift φ) g = φ g :=
lift_mk _ _ _
@[simp, to_additive quotient_add_group.ker_lift_mk']
lemma ker_lift_mk' (g : G) : (ker_lift φ) (mk g) = φ g :=
lift_mk' _ _ _
@[to_additive quotient_add_group.ker_lift_injective]
lemma ker_lift_injective : injective (ker_lift φ) :=
assume a b, quotient.induction_on₂' a b $
assume a b (h : φ a = φ b), quotient.sound' $
show a⁻¹ * b ∈ ker φ, by rw [mem_ker,
φ.map_mul, ← h, φ.map_inv, inv_mul_self]
-- Note that `ker φ` isn't definitionally `ker (φ.range_restrict)`
-- so there is a bit of annoying code duplication here
/-- The induced map from the quotient by the kernel to the range. -/
@[to_additive quotient_add_group.range_ker_lift "The induced map from the quotient by the kernel to
the range."]
def range_ker_lift : G ⧸ ker φ →* φ.range :=
lift _ φ.range_restrict $ λ g hg, (mem_ker _).mp $ by rwa range_restrict_ker
@[to_additive quotient_add_group.range_ker_lift_injective]
lemma range_ker_lift_injective : injective (range_ker_lift φ) :=
assume a b, quotient.induction_on₂' a b $
assume a b (h : φ.range_restrict a = φ.range_restrict b), quotient.sound' $
show a⁻¹ * b ∈ ker φ, by rw [←range_restrict_ker, mem_ker,
φ.range_restrict.map_mul, ← h, φ.range_restrict.map_inv, inv_mul_self]
@[to_additive quotient_add_group.range_ker_lift_surjective]
lemma range_ker_lift_surjective : surjective (range_ker_lift φ) :=
begin
rintro ⟨_, g, rfl⟩,
use mk g,
refl,
end
/-- **Noether's first isomorphism theorem** (a definition): the canonical isomorphism between
`G/(ker φ)` to `range φ`. -/
@[to_additive quotient_add_group.quotient_ker_equiv_range "The first isomorphism theorem
(a definition): the canonical isomorphism between `G/(ker φ)` to `range φ`."]
noncomputable def quotient_ker_equiv_range : G ⧸ ker φ ≃* range φ :=
mul_equiv.of_bijective (range_ker_lift φ) ⟨range_ker_lift_injective φ, range_ker_lift_surjective φ⟩
/-- The canonical isomorphism `G/(ker φ) ≃* H` induced by a homomorphism `φ : G →* H`
with a right inverse `ψ : H → G`. -/
@[to_additive quotient_add_group.quotient_ker_equiv_of_right_inverse "The canonical isomorphism
`G/(ker φ) ≃+ H` induced by a homomorphism `φ : G →+ H` with a right inverse `ψ : H → G`.",
simps]
def quotient_ker_equiv_of_right_inverse (ψ : H → G) (hφ : function.right_inverse ψ φ) :
G ⧸ ker φ ≃* H :=
{ to_fun := ker_lift φ,
inv_fun := mk ∘ ψ,
left_inv := λ x, ker_lift_injective φ (by rw [function.comp_app, ker_lift_mk', hφ]),
right_inv := hφ,
.. ker_lift φ }
/-- The canonical isomorphism `G/⊥ ≃* G`. -/
@[to_additive quotient_add_group.quotient_bot "The canonical isomorphism `G/⊥ ≃+ G`.", simps]
def quotient_bot : G ⧸ (⊥ : subgroup G) ≃* G :=
quotient_ker_equiv_of_right_inverse (monoid_hom.id G) id (λ x, rfl)
/-- The canonical isomorphism `G/(ker φ) ≃* H` induced by a surjection `φ : G →* H`.
For a `computable` version, see `quotient_group.quotient_ker_equiv_of_right_inverse`.
-/
@[to_additive quotient_add_group.quotient_ker_equiv_of_surjective "The canonical isomorphism
`G/(ker φ) ≃+ H` induced by a surjection `φ : G →+ H`.
For a `computable` version, see `quotient_add_group.quotient_ker_equiv_of_right_inverse`."]
noncomputable def quotient_ker_equiv_of_surjective (hφ : function.surjective φ) :
G ⧸ (ker φ) ≃* H :=
quotient_ker_equiv_of_right_inverse φ _ hφ.has_right_inverse.some_spec
/-- If two normal subgroups `M` and `N` of `G` are the same, their quotient groups are
isomorphic. -/
@[to_additive "If two normal subgroups `M` and `N` of `G` are the same, their quotient groups are
isomorphic."]
def equiv_quotient_of_eq {M N : subgroup G} [M.normal] [N.normal] (h : M = N) :
G ⧸ M ≃* G ⧸ N :=
{ to_fun := (lift M (mk' N) (λ m hm, quotient_group.eq.mpr (by simpa [← h] using M.inv_mem hm))),
inv_fun := (lift N (mk' M) (λ n hn, quotient_group.eq.mpr (by simpa [← h] using N.inv_mem hn))),
left_inv := λ x, x.induction_on' $ by { intro, refl },
right_inv := λ x, x.induction_on' $ by { intro, refl },
map_mul' := λ x y, by rw monoid_hom.map_mul }
@[simp, to_additive]
lemma equiv_quotient_of_eq_mk {M N : subgroup G} [M.normal] [N.normal] (h : M = N) (x : G) :
quotient_group.equiv_quotient_of_eq h (quotient_group.mk x) = (quotient_group.mk x) :=
rfl
/-- Let `A', A, B', B` be subgroups of `G`. If `A' ≤ B'` and `A ≤ B`,
then there is a map `A / (A' ⊓ A) →* B / (B' ⊓ B)` induced by the inclusions. -/
@[to_additive "Let `A', A, B', B` be subgroups of `G`. If `A' ≤ B'` and `A ≤ B`,
then there is a map `A / (A' ⊓ A) →+ B / (B' ⊓ B)` induced by the inclusions."]
def quotient_map_subgroup_of_of_le {A' A B' B : subgroup G}
[hAN : (A'.subgroup_of A).normal] [hBN : (B'.subgroup_of B).normal]
(h' : A' ≤ B') (h : A ≤ B) :
A ⧸ (A'.subgroup_of A) →* B ⧸ (B'.subgroup_of B) :=
map _ _ (subgroup.inclusion h) $
by simp [subgroup.subgroup_of, subgroup.comap_comap]; exact subgroup.comap_mono h'
@[simp, to_additive]
lemma quotient_map_subgroup_of_of_le_coe {A' A B' B : subgroup G}
[hAN : (A'.subgroup_of A).normal] [hBN : (B'.subgroup_of B).normal]
(h' : A' ≤ B') (h : A ≤ B) (x : A) :
quotient_map_subgroup_of_of_le h' h x = ↑(subgroup.inclusion h x : B) := rfl
/-- Let `A', A, B', B` be subgroups of `G`.
If `A' = B'` and `A = B`, then the quotients `A / (A' ⊓ A)` and `B / (B' ⊓ B)` are isomorphic.
Applying this equiv is nicer than rewriting along the equalities, since the type of
`(A'.subgroup_of A : subgroup A)` depends on on `A`.
-/
@[to_additive "Let `A', A, B', B` be subgroups of `G`.
If `A' = B'` and `A = B`, then the quotients `A / (A' ⊓ A)` and `B / (B' ⊓ B)` are isomorphic.
Applying this equiv is nicer than rewriting along the equalities, since the type of
`(A'.add_subgroup_of A : add_subgroup A)` depends on on `A`.
"]
def equiv_quotient_subgroup_of_of_eq {A' A B' B : subgroup G}
[hAN : (A'.subgroup_of A).normal] [hBN : (B'.subgroup_of B).normal]
(h' : A' = B') (h : A = B) :
A ⧸ (A'.subgroup_of A) ≃* B ⧸ (B'.subgroup_of B) :=
monoid_hom.to_mul_equiv
(quotient_map_subgroup_of_of_le h'.le h.le) (quotient_map_subgroup_of_of_le h'.ge h.ge)
(by { ext ⟨x, hx⟩, refl })
(by { ext ⟨x, hx⟩, refl })
section snd_isomorphism_thm
open _root_.subgroup
/-- **Noether's second isomorphism theorem**: given two subgroups `H` and `N` of a group `G`, where
`N` is normal, defines an isomorphism between `H/(H ∩ N)` and `(HN)/N`. -/
@[to_additive "The second isomorphism theorem: given two subgroups `H` and `N` of a group `G`,
where `N` is normal, defines an isomorphism between `H/(H ∩ N)` and `(H + N)/N`"]
noncomputable def quotient_inf_equiv_prod_normal_quotient (H N : subgroup G) [N.normal] :
H ⧸ ((H ⊓ N).comap H.subtype) ≃* _ ⧸ (N.comap (H ⊔ N).subtype) :=
/- φ is the natural homomorphism H →* (HN)/N. -/
let φ : H →* _ ⧸ (N.comap (H ⊔ N).subtype) :=
(mk' $ N.comap (H ⊔ N).subtype).comp (inclusion le_sup_left) in
have φ_surjective : function.surjective φ := λ x, x.induction_on' $
begin
rintro ⟨y, (hy : y ∈ ↑(H ⊔ N))⟩, rw mul_normal H N at hy,
rcases hy with ⟨h, n, hh, hn, rfl⟩,
use [h, hh], apply quotient.eq.mpr, change h⁻¹ * (h * n) ∈ N,
rwa [←mul_assoc, inv_mul_self, one_mul],
end,
(equiv_quotient_of_eq (by simp [comap_comap, ←comap_ker])).trans
(quotient_ker_equiv_of_surjective φ φ_surjective)
end snd_isomorphism_thm
section third_iso_thm
variables (M : subgroup G) [nM : M.normal]
include nM nN
@[to_additive quotient_add_group.map_normal]
instance map_normal : (M.map (quotient_group.mk' N)).normal :=
{ conj_mem := begin
rintro _ ⟨x, hx, rfl⟩ y,
refine induction_on' y (λ y, ⟨y * x * y⁻¹, subgroup.normal.conj_mem nM x hx y, _⟩),
simp only [mk'_apply, coe_mul, coe_inv]
end }
variables (h : N ≤ M)
/-- The map from the third isomorphism theorem for groups: `(G / N) / (M / N) → G / M`. -/
@[to_additive quotient_add_group.quotient_quotient_equiv_quotient_aux
"The map from the third isomorphism theorem for additive groups: `(A / N) / (M / N) → A / M`."]
def quotient_quotient_equiv_quotient_aux :
(G ⧸ N) ⧸ (M.map (mk' N)) →* G ⧸ M :=
lift (M.map (mk' N))
(map N M (monoid_hom.id G) h)
(by { rintro _ ⟨x, hx, rfl⟩, rw map_mk' N M _ _ x,
exact (quotient_group.eq_one_iff _).mpr hx })
@[simp, to_additive quotient_add_group.quotient_quotient_equiv_quotient_aux_coe]
lemma quotient_quotient_equiv_quotient_aux_coe (x : G ⧸ N) :
quotient_quotient_equiv_quotient_aux N M h x = quotient_group.map N M (monoid_hom.id G) h x :=
quotient_group.lift_mk' _ _ x
@[to_additive quotient_add_group.quotient_quotient_equiv_quotient_aux_coe_coe]
lemma quotient_quotient_equiv_quotient_aux_coe_coe (x : G) :
quotient_quotient_equiv_quotient_aux N M h (x : G ⧸ N) =
x :=
quotient_group.lift_mk' _ _ x
/-- **Noether's third isomorphism theorem** for groups: `(G / N) / (M / N) ≃ G / M`. -/
@[to_additive quotient_add_group.quotient_quotient_equiv_quotient
"**Noether's third isomorphism theorem** for additive groups: `(A / N) / (M / N) ≃ A / M`."]
def quotient_quotient_equiv_quotient :
(G ⧸ N) ⧸ (M.map (quotient_group.mk' N)) ≃* G ⧸ M :=
monoid_hom.to_mul_equiv
(quotient_quotient_equiv_quotient_aux N M h)
(quotient_group.map _ _ (quotient_group.mk' N) (subgroup.le_comap_map _ _))
(by { ext, simp })
(by { ext, simp })
end third_iso_thm
section trivial
@[to_additive] lemma subsingleton_quotient_top :
subsingleton (G ⧸ (⊤ : subgroup G)) :=
trunc.subsingleton
/-- If the quotient by a subgroup gives a singleton then the subgroup is the whole group. -/
@[to_additive] lemma subgroup_eq_top_of_subsingleton (H : subgroup G)
(h : subsingleton (G ⧸ H)) : H = ⊤ :=
top_unique $ λ x _,
have this : 1⁻¹ * x ∈ H := quotient_group.eq.1 (subsingleton.elim _ _),
by rwa [one_inv, one_mul] at this
end trivial
@[to_additive quotient_add_grup.comap_comap_center]
lemma comap_comap_center {H₁ : subgroup G} [H₁.normal] {H₂ : subgroup (G ⧸ H₁)} [H₂.normal] :
(((subgroup.center ((G ⧸ H₁) ⧸ H₂))).comap (mk' H₂)).comap (mk' H₁) =
(subgroup.center (G ⧸ H₂.comap (mk' H₁))).comap (mk' (H₂.comap (mk' H₁))) :=
begin
ext x,
simp only [mk'_apply, subgroup.mem_comap, subgroup.mem_center_iff, forall_coe],
apply forall_congr,
change ∀ (y : G), (↑↑(y * x) = ↑↑(x * y) ↔ ↑(y * x) = ↑(x * y)),
intro y,
repeat { rw [eq_iff_div_mem] },
simp,
end
end quotient_group
|
[STATEMENT]
lemma rejects_subset: "\<lbrakk>rejects \<F> S M; N \<subseteq> M\<rbrakk> \<Longrightarrow> rejects \<F> S N"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>rejects \<F> S M; N \<subseteq> M\<rbrakk> \<Longrightarrow> rejects \<F> S N
[PROOF STEP]
by (fastforce simp add: rejects_def comparables_def) |
Require Import List.
Require Import NPeano EqNat Compare_dec.
(* determine whether given list of numbers is in decreasing order *)
Fixpoint decreasing_order (ns : list nat) : bool :=
match ns with
| nil => true
| a :: ns' => andb (match ns' with
| nil => true
| b :: _ => ltb b a
end)
(decreasing_order ns')
end.
Fixpoint last_is_1 (ns : list nat) : bool :=
match ns with
| nil => false
| 1 :: nil => true
| _ :: nil => false
| n :: ns' => last_is_1 ns'
end.
Definition coinlist := list nat.
Definition repr := list nat.
Definition N := 4.
Definition C : coinlist := 25 :: 6 :: 5 :: 1 :: nil.
Eval compute in decreasing_order C.
(* this should follow from decreasing_order and last_is_1
Definition no_zeroes (C:coinlist) := forallb (fun c:nat => ltb 0 c) C.
*)
Fixpoint repr_value (C : coinlist) (V : repr) : nat := (* inner product V . C *)
match C, V with
| nil, nil => 0
| c :: C', v :: V' => (c*v) + repr_value C' V'
| _, _ => 0
end.
Eval compute in (beq_nat 38 (repr_value (25 :: 10 :: 5 :: 1 :: nil) (1 :: 1 :: 0 :: 3 :: nil))).
Fixpoint repr_size (A : repr) : nat :=
match A with
| nil => 0
| a :: A' => a + repr_size A'
end.
Eval compute in (beq_nat 5 (repr_size (1 :: 1 :: 0 :: 3 :: nil))).
Fixpoint repr_lt (U V : repr) : bool :=
match U, V with
| nil, nil => false
| u :: U', v :: V' =>
orb (ltb u v) (andb (beq_nat u v) (repr_lt U' V'))
| nil, _ => true
| _, nil => false
end.
Fixpoint repr_le (U V : repr) : bool :=
match U, V with
| nil, nil => true
| u :: U', v :: V' =>
orb (ltb u v) (andb (beq_nat u v) (repr_le U' V'))
| nil, _ => true
| _, nil => false
end.
Definition repr_gt (U V : repr) : bool := repr_lt V U.
Definition repr_ge (U V : repr) : bool := repr_le V U.
Eval compute in (repr_lt (1 :: 1 :: 0 :: 3 :: nil) (1 :: 3 :: 0 :: 0 :: nil)).
Eval compute in (repr_lt (1 :: 1 :: 0 :: 3 :: nil) (1 :: 1 :: 0 :: 3 :: nil)).
Eval compute in (repr_le (1 :: 1 :: 0 :: 3 :: nil) (1 :: 1 :: 0 :: 3 :: nil)).
Eval compute in (repr_le (1 :: 1 :: 0 :: 3 :: nil) (1 :: 3 :: 0 :: 0 :: nil)).
Eval compute in negb (repr_lt (1 :: 1 :: nil) (1 :: 1 :: nil)).
Eval compute in negb (repr_lt (3 :: 1 :: nil) (1 :: 1 :: nil)).
Fixpoint make_list (k:nat) (v:nat) :=
match k with
| 0 => nil
| S k' => v :: make_list k' v
end.
(* comp : is the first better than the second *)
Fixpoint best_of (comp : repr -> repr -> bool) (candidate : repr) (Rs : list repr) : repr :=
match Rs with
| nil => candidate
| r :: Rs' => best_of comp (if (comp r candidate) then r else candidate) Rs'
end.
Eval compute in 11 / 3.
Eval compute in 11 mod 3.
Fixpoint range (n:nat) : list nat :=
match n with
| 0 => nil
| S n' => n' :: range n'
end.
Fixpoint range_from (start num : nat) : list nat :=
match num with
| 0 => nil
| S m => start :: range_from (S start) m
end.
Eval compute in (range 5).
Eval compute in (range_from 10 5).
Fixpoint cons_each (x:nat) (V:list repr) :=
match V with
| nil => nil
| v :: V' => (cons x v) :: cons_each x V'
end.
Fixpoint all_reprs_iterate
(all_reprs : coinlist -> nat -> list repr) (C':coinlist) (c:nat) (v:nat) (X : list nat) : list repr :=
match X with
| nil => nil
| x :: X' =>
(* x .. c *)
app
(cons_each x (all_reprs C' (v - (x * c))))
(all_reprs_iterate all_reprs C' c v X')
end.
Fixpoint all_reprs (C : coinlist) (v : nat) {struct C} : list repr :=
match C with
| nil => nil
| c :: nil => (v :: nil) :: nil
| c :: C' => let max_of_c := v / c in
let count_of_c_opts := range (S max_of_c) in
(* all_reprs_iterate all_reprs C' c v *)
(fix all_reprs_iterate (X : list nat) : list repr :=
match X with
| nil => nil
| x :: X' =>
(* x .. c *)
app
(cons_each x (all_reprs C' (v - (c * x))))
(all_reprs_iterate X')
end)
count_of_c_opts
end.
Eval compute in (all_reprs C 17).
(* new is "more [or equally] minimal" than cur if:
- size(new) < size(cur), or
- size(new)=size(cur) and cur <= new [lexic. less than] *)
Definition more_minimal (new : repr) (cur : repr) : bool :=
orb (ltb (repr_size new) (repr_size cur))
(andb (beq_nat (repr_size new) (repr_size cur))
(repr_le cur new)).
Fixpoint make_repr_all_ones (n:nat) (v:nat) : repr :=
match n with
| 0 => nil
| 1 => v :: nil
| S n' => 0 :: make_repr_all_ones n' v
end.
(* brute force computations of the minimal and greedy representations *)
Definition minimal_bf C v := best_of more_minimal (make_repr_all_ones (length C) v) (all_reprs C v).
Definition greedy_bf C v := best_of repr_ge (make_repr_all_ones (length C) v) (all_reprs C v).
Fixpoint greedy (C:coinlist) (v:nat) : repr :=
match C with
| nil => nil
| c :: C' => let q := v / c in
let r := v mod c in
q :: greedy C' r
end.
Eval compute in (make_repr_all_ones 4 17).
Eval compute in (more_minimal (0 :: 1 :: 1 :: 2 :: nil) (0 :: 0 :: 1 :: 12 :: nil)).
Eval compute in (more_minimal (1 :: 2 :: 1 :: 0 :: nil) (0 :: 1 :: 1 :: 2 :: nil) ).
Eval compute in
let v := 83 in
(minimal_bf C v ,
greedy_bf C v,
greedy C v).
(* =================================================== *)
(* Pearson's algorithm to find smallest counterexample *)
Definition targetCvals (C:coinlist) : coinlist :=
map (fun c => c - 1) C.
Eval compute in targetCvals (25 :: 10 :: 5 :: 1 :: nil).
Definition greedy_multi (C:coinlist) (V : list nat) : list repr :=
map (greedy C) V.
Eval compute in (greedy_multi C (targetCvals C)).
Definition zero_out : repr -> repr :=
map (fun x => 0).
Fixpoint generate_possible_ce (R : repr) (i : nat) : repr :=
match R, i with
| x :: R', 0 => x+1 :: zero_out R'
| x :: R', S i' => x :: generate_possible_ce R' i'
| _, _ => R
end.
Definition generate_possible_ces_from (R : repr) (i j : nat) : list repr :=
map (generate_possible_ce R) (range_from i (j - i)).
Eval compute in generate_possible_ce (2 :: 1 :: 3 :: 1 :: 2 :: 7 :: nil) 3.
Eval compute in generate_possible_ces_from (2 :: 1 :: 3 :: 1 :: 2 :: 7 :: nil) 1 4.
Fixpoint app_all (Rs : list (list repr)) : list repr :=
match Rs with
| nil => nil
| x :: Rs' => app x (app_all Rs')
end.
Definition generate_min_reprs_to_check (Gs : list repr) : list repr :=
app_all (map (fun G => generate_possible_ces_from G 1 N) Gs).
Eval compute in generate_min_reprs_to_check (greedy_multi C (targetCvals C)).
Definition is_min_lt_greedy_repr (R : repr) : bool :=
ltb (repr_size R) (repr_size (greedy C (repr_value C R))).
Fixpoint findp (A:Type) (f: A -> bool) (As : list A) : option A :=
match As with
| nil => None
| a :: As' => if (f a) then Some a else findp A f As'
end.
Definition find_counterexample (C:coinlist) : option nat :=
match
findp _ is_min_lt_greedy_repr (generate_min_reprs_to_check (greedy_multi C (targetCvals C)))
with
| None => None
| Some R => Some (repr_value C R)
end.
Eval compute in (find_counterexample C).
Eval compute in
let v := 10 in
(minimal_bf C v ,
greedy_bf C v,
greedy C v). |
#' Utah's Municipalities
#'
#' Polygons containing municipal boundaries from gis.utah.gov.
#'
#' @format An sf type polygon shapefile
"muni_poly"
|
Formal statement is: lemma filterlim_at_bot_mirror: "(LIM x at_bot. f x :> F) \<longleftrightarrow> (LIM x at_top. f (-x::real) :> F)" Informal statement is: The filter limit of $f$ at $-\infty$ is the same as the filter limit of $f$ at $\infty$. |
theory T156
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z)))
"
nitpick[card nat=4,timeout=86400]
oops
end |
import data.real.basic
open_locale classical
/-
Theoretical negations.
This file is for people interested in logic who want to fully understand
negations.
Here we don't use `contrapose` or `push_neg`. The goal is to prove lemmas
that are used by those tactics. Of course we can use
`exfalso`, `by_contradiction` and `by_cases`.
If this doesn't sound like fun then skip ahead to the next file.
-/
section negation_prop
variables P Q : Prop
-- 0055
example : (P → Q) ↔ (¬ Q → ¬ P) :=
begin
split,
{
intros h1 hnQ hP,
exact hnQ (h1 hP),
},
{
intros h1 hP,
by_contradiction hnQ,
exact h1 hnQ hP,
}
end
-- 0056
lemma non_imp (P Q : Prop) : ¬ (P → Q) ↔ P ∧ ¬ Q :=
begin
sorry
end
-- In the next one, let's use the axiom
-- propext {P Q : Prop} : (P ↔ Q) → P = Q
-- 0057
example (P : Prop) : ¬ P ↔ P = false :=
begin
sorry
end
end negation_prop
section negation_quantifiers
variables (X : Type) (P : X → Prop)
-- 0058
example : ¬ (∀ x, P x) ↔ ∃ x, ¬ P x :=
begin
sorry
end
-- 0059
example : ¬ (∃ x, P x) ↔ ∀ x, ¬ P x :=
begin
sorry
end
-- 0060
example (P : ℝ → Prop) : ¬ (∃ ε > 0, P ε) ↔ ∀ ε > 0, ¬ P ε :=
begin
sorry
end
-- 0061
example (P : ℝ → Prop) : ¬ (∀ x > 0, P x) ↔ ∃ x > 0, ¬ P x :=
begin
sorry
end
end negation_quantifiers
|
[STATEMENT]
lemma nat_of_fatom_bij: "bij_betw nat_of_fatom ground_fatoms UNIV"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bij_betw nat_of_fatom ground_fatoms UNIV
[PROOF STEP]
using nat_of_hatom_bij bij_betw_trans hatom_of_fatom_bij hatom_of_nat_bij
[PROOF STATE]
proof (prove)
using this:
bij nat_of_hatom
\<lbrakk>bij_betw ?f ?A ?B; bij_betw ?g ?B ?C\<rbrakk> \<Longrightarrow> bij_betw (?g \<circ> ?f) ?A ?C
bij_betw hatom_of_fatom ground_fatoms UNIV
bij hatom_of_nat
goal (1 subgoal):
1. bij_betw nat_of_fatom ground_fatoms UNIV
[PROOF STEP]
unfolding nat_of_fatom_def comp_def
[PROOF STATE]
proof (prove)
using this:
bij nat_of_hatom
\<lbrakk>bij_betw ?f ?A ?B; bij_betw ?g ?B ?C\<rbrakk> \<Longrightarrow> bij_betw (\<lambda>x. ?g (?f x)) ?A ?C
bij_betw hatom_of_fatom ground_fatoms UNIV
bij hatom_of_nat
goal (1 subgoal):
1. bij_betw (\<lambda>t. nat_of_hatom (hatom_of_fatom t)) ground_fatoms UNIV
[PROOF STEP]
by blast |
COLUMBIA SPY: Dozens attend CBFD Open House to learn about fire prevention and safety - and to ride the fire trucks!
Dozens attend CBFD Open House to learn about fire prevention and safety - and to ride the fire trucks!
Riding on a fire engine is a fantasy for most kids, but last night it became a reality for those visiting Columbia Borough Fire Department's Open House. Before climbing aboard the trucks, however, children collected several fire safety related items to fill the backpacks they received there. They also got close-up looks inside an ambulance, police car, and a QRS vehicle. Sparky the Fire Dog greeted guests, many of whom posed with him for photos. Although activities were designed to be fun, education remained at the forefront to teach children (and adults) about fire prevention and safety. Several kids even braved entering a fire safety trailer filled with smoke to learn how to escape a fire. Free food and refreshments were also served at the event. |
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.DStructures.Meta.Isomorphism where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Univalence
open import Cubical.Functions.FunExtEquiv
open import Cubical.Homotopy.Base
open import Cubical.Data.Sigma
open import Cubical.Relation.Binary
open import Cubical.Algebra.Group
open import Cubical.Structures.LeftAction
open import Cubical.DStructures.Base
open import Cubical.DStructures.Meta.Properties
open import Cubical.DStructures.Structures.Constant
open import Cubical.DStructures.Structures.Type
open import Cubical.DStructures.Structures.Group
private
variable
ℓ ℓ' ℓ'' ℓ₁ ℓ₁' ℓ₁'' ℓ₂ ℓA ℓA' ℓ≅A ℓ≅A' ℓB ℓB' ℓ≅B ℓ≅B' ℓC ℓ≅C ℓ≅ᴰ ℓ≅ᴰ' : Level
open URGStr
open URGStrᴰ
----------------------------------------------
-- Pseudo-isomorphisms between URG structures
-- are relational isos of the underlying rel.
----------------------------------------------
𝒮-PIso : {A : Type ℓA} (𝒮-A : URGStr A ℓ≅A)
{A' : Type ℓA'} (𝒮-A' : URGStr A' ℓ≅A')
→ Type (ℓ-max (ℓ-max ℓA ℓA') (ℓ-max ℓ≅A ℓ≅A'))
𝒮-PIso 𝒮-A 𝒮-A' = RelIso (URGStr._≅_ 𝒮-A) (URGStr._≅_ 𝒮-A')
----------------------------------------------
-- Since the relations are univalent,
-- a rel. iso induces an iso of the underlying
-- types.
----------------------------------------------
𝒮-PIso→Iso : {A : Type ℓA} (𝒮-A : URGStr A ℓ≅A)
{A' : Type ℓA'} (𝒮-A' : URGStr A' ℓ≅A')
(ℱ : 𝒮-PIso 𝒮-A 𝒮-A')
→ Iso A A'
𝒮-PIso→Iso 𝒮-A 𝒮-A' ℱ
= RelIso→Iso (_≅_ 𝒮-A) (_≅_ 𝒮-A') (uni 𝒮-A) (uni 𝒮-A') ℱ
----------------------------------------------
-- From a DURG structure, extract the
-- relational family over the base type
----------------------------------------------
𝒮ᴰ→relFamily : {A : Type ℓA} {𝒮-A : URGStr A ℓ≅A}
{B : A → Type ℓB} (𝒮ᴰ-B : URGStrᴰ 𝒮-A B ℓ≅B)
→ RelFamily A ℓB ℓ≅B
-- define the type family, just B
𝒮ᴰ→relFamily {B = B} 𝒮ᴰ-B .fst = B
-- the binary relation is the displayed relation over ρ a
𝒮ᴰ→relFamily {𝒮-A = 𝒮-A} {B = B} 𝒮ᴰ-B .snd {a = a} b b'
= 𝒮ᴰ-B ._≅ᴰ⟨_⟩_ b (𝒮-A .ρ a) b'
--------------------------------------------------------------
-- the type of relational isos between a DURG structure
-- and the pulled back relational family of another
--
-- ℱ will in applications always be an isomorphism,
-- but that's not needed for this definition.
--------------------------------------------------------------
𝒮ᴰ-♭PIso : {A : Type ℓA} {𝒮-A : URGStr A ℓ≅A}
{A' : Type ℓA'} {𝒮-A' : URGStr A' ℓ≅A'}
(ℱ : A → A')
{B : A → Type ℓB} (𝒮ᴰ-B : URGStrᴰ 𝒮-A B ℓ≅B)
{B' : A' → Type ℓB'} (𝒮ᴰ-B' : URGStrᴰ 𝒮-A' B' ℓ≅B')
→ Type (ℓ-max ℓA (ℓ-max (ℓ-max ℓB ℓB') (ℓ-max ℓ≅B ℓ≅B')))
𝒮ᴰ-♭PIso ℱ 𝒮ᴰ-B 𝒮ᴰ-B'
= ♭RelFiberIsoOver ℱ (𝒮ᴰ→relFamily 𝒮ᴰ-B) (𝒮ᴰ→relFamily 𝒮ᴰ-B')
---------------------------------------------------------
-- Given
-- - an isomorphism ℱ of underlying types A, A', and
-- - an 𝒮ᴰ-bPIso over ℱ
-- produce an iso of the underlying total spaces
---------------------------------------------------------
𝒮ᴰ-♭PIso-Over→TotalIso : {A : Type ℓA} {𝒮-A : URGStr A ℓ≅A}
{A' : Type ℓA'} {𝒮-A' : URGStr A' ℓ≅A'}
(ℱ : Iso A A')
{B : A → Type ℓB} (𝒮ᴰ-B : URGStrᴰ 𝒮-A B ℓ≅B)
{B' : A' → Type ℓB'} (𝒮ᴰ-B' : URGStrᴰ 𝒮-A' B' ℓ≅B')
(𝒢 : 𝒮ᴰ-♭PIso (Iso.fun ℱ) 𝒮ᴰ-B 𝒮ᴰ-B')
→ Iso (Σ A B) (Σ A' B')
𝒮ᴰ-♭PIso-Over→TotalIso ℱ 𝒮ᴰ-B 𝒮ᴰ-B' 𝒢
= RelFiberIsoOver→Iso ℱ
(𝒮ᴰ→relFamily 𝒮ᴰ-B) (𝒮ᴰ-B .uniᴰ)
(𝒮ᴰ→relFamily 𝒮ᴰ-B') (𝒮ᴰ-B' .uniᴰ)
𝒢
|
```python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
plt.rcParams['font.size'] = 13
plt.rcParams['axes.spines.right'] = False
plt.rcParams['ytick.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['xtick.top'] = False
```
### A linear mapping from input to output space
Linear regression assumes that some recorded output values $y_i$ for $i=1,...,N_\mathrm{samples}$ depend linearly on some input values $\mathbf{x}_i$, and that any deviations are due to noise. Given $\mathbf{x}$-$y$ data, your task is then to rediscover the form of this linear mapping, which in the simplest case corresponds to fitting a line to the data. The question is merely how to select the best line?
```python
# Generate example data
# Initialize
sigma = 0.5 # noise std
nSamples = 21 # number of samples
x = np.linspace(0, 5, nSamples) # input values
lineFun = lambda w0, w1, x: w0 + w1*x # linear mapping
# Generate y-data
w0 = 1 # intercept
w1 = 0.5 # slope
y = lineFun(w0, w1, x)
y += np.random.randn(nSamples)*sigma
# Plot our generated data
plt.figure(figsize=(7.5, 3))
plt.plot(x, y, 'ko')
plt.xlabel('x')
plt.ylabel('y');
```
### Least squares approach
One approach for evaluating how good a line fits is by summing up the squared distances to each $y$-value from the line, and to compare this value for various lines. A lower value would indicate a better fit and a higher a worse fit. Usually, this sum is further divided by the number of samples, so as to get a mean squared error (MSE).
```python
# Calculate the MSE for a test line
w0Test = 1.0
w1Test = 0.5
yHat = lineFun(w0Test, w1Test, x) # Model predictions, the predicted line
mseFun = lambda y, yHat: np.mean((y-yHat)**2) # MSE function
mseTest = mseFun(y, yHat) # MSE value for our test line
# Plot the test line (blue), the distace to each y_i (red) and the y-values (black)
plt.figure(figsize=(7.5, 3))
for xi, yi, yHati in zip(x, y, yHat):
plt.stem([xi], [yi], 'r', bottom=yHati)
plt.plot(x, yHat, 'b-')
plt.plot(x, y, 'ko')
plt.xlabel('x')
plt.ylabel('y');
plt.title('MSE: ' + '%1.3f' % mseTest);
```
From the example above we learn that each line (a unique combination of $w_0$ and $w_1$) obtains a slightly different MSE value. Our task is therefore to find the $w_0$ and $w_1$ combination with the lowest value. Naively, we can try to do this by simply testing various combinations and plotting the MSE value as a function of both $w_0$ and $w_1$.
```python
# Get w0 and w1 combinations over a grid
nGrid = 21
W0, W1 = np.meshgrid(np.linspace(w0-2, w0+2, nGrid), np.linspace(w1-1, w1+1, nGrid))
# Get the MSE for each combination
mseVals = np.zeros([nGrid, nGrid])
for i in range(nGrid):
for j in range(nGrid):
yHat = lineFun(W0[i, j], W1[i, j], x)
mseVals[i, j] = mseFun(y, yHat)
# Plot the surface
fig = plt.figure(figsize=(15, 5))
ax = plt.subplot(1, 2, 1, projection='3d')
ax.plot_surface(W0, W1, mseVals, cmap=cm.coolwarm, linewidth=0, antialiased=False)
ax.set_xlabel('$w_0$')
ax.set_ylabel('$w_1$')
ax.set_zlabel('MSE')
ax = plt.subplot(1, 2, 2)
ax.contourf(W0, W1, mseVals, 50, cmap=cm.coolwarm)
ax.set_xlabel('$w_0$')
ax.set_ylabel('$w_1$');
```
The take home message so far is thus that each line (unique combination of $w_0$ and $w_1$) corresponds to one point on a MSE surface, and that the best line in a MSE sense is at the bottom of the surface where the MSE minimum is located.
```python
# Test various w0 and w1 values to see how the corresponding line fits at various locations on MSE surface
w0Test = 0.5
w1Test = 1.0
# Evaluate the MSE value for our current test line
yHat = lineFun(w0Test, w1Test, x)
mseFun = lambda y, yHat: np.mean((y-yHat)**2)
mseTest = mseFun(y, yHat)
# Plot
fig = plt.figure(figsize=(15, 5))
ax = plt.subplot(1, 2, 1)
for xi, yi, yHati in zip(x, y, yHat):
ax.stem([xi], [yi], 'r', bottom=yHati)
ax.plot(x, yHat, 'b-')
ax.plot(x, y, 'ko')
ax.set_xlabel('x')
ax.set_ylabel('y');
ax.set_title('MSE: ' + '%1.3f' % mseTest);
ax = plt.subplot(1, 2, 2)
ax.contourf(W0, W1, mseVals, 50, cmap=cm.coolwarm)
ax.set_xlabel('$w_0$')
ax.set_ylabel('$w_1$');
ax.plot(w0Test, w1Test, 'ko', ms=10);
```
### How to find the minimum in practice
Before moving on we need a more general description of our optimization problem, and we obtain it by describing the linear mapping using matrix notation. The line equation used above ($\hat{y}_i = w_0+w_1 x_i$) can then be written as:
\begin{equation}
\hat{y}_i = \mathbf{x}_i^T \mathbf{w}, \quad \text{where}: \mathbf{x}_i^T = [1, x_i] \; \text{and} \; \mathbf{w}^T = [w_0, w_1],
\end{equation}
and subsequently, it is possible to write the predictions for all samples as $\hat{\mathbf{y}} = \mathbf{X}\mathbf{w}$, where $\mathbf{x}_i^T$, ..., $\mathbf{x}_{N_\mathrm{samples}}^T$ make up the rows in $\mathbf{X}$. Similarly, the MSE can also be expressed with matrix notation as:
\begin{equation}
MSE = \frac{1}{N_\mathrm{samples}} (\mathbf{y}-\mathbf{X}\mathbf{w})^T(\mathbf{y}-\mathbf{X}\mathbf{w})
\end{equation}
Now, lets assume that we start with an initial guess ($\tilde{w}$) for both $w_0$ and $w_1$, and then we will try to iteratively improve it. This does, however, require a way of choosing how to iteratively change $\tilde{w}$, but we can use the gradient for this. The gradient points in the direction that a function increases the fastest. So, by moving in the opposite direction we should effectively find new values for $w_0$ and $w_1$ that correspond to a lower MSE value. We this move on to find the gradient by differentiating the MSE function with respect to $\mathbf{w}$:
\begin{equation}
\frac{MSE}{d\mathbf{w}} = \frac{-2}{N_\mathrm{samples}} \mathbf{X}^T(\mathbf{y}-\mathbf{X}\mathbf{w})
\end{equation}
The idea of moving in the opposite direction to the gradient is fundamental to all gradient based optimization techniques, and the simplest idea of just taking small steps in the opposite direction is called gradient descent.
```python
# Plot the MSE surface first
fig = plt.figure(figsize=(7.5, 5))
plt.contourf(W0, W1, mseVals, 50, cmap=cm.coolwarm)
plt.xlabel('$w_0$')
plt.ylabel('$w_1$');
# Gradient descent
eta = 0.5e-2 # step length
wTilde = np.array([2.5, 1.25]) # initial guess
X = np.vstack([np.ones(nSamples), x]).T # create the X matrix where each row is one sample
plt.plot(wTilde[0], wTilde[1], 'o', ms=6, c=[1, 1, 1]) # Plot our initial location on the MSE surface
for i in range(10):
gradient = -2./nSamples * np.dot(X.T, y-np.dot(X, wTilde)) # Calculate the gradient
wTilde -= eta*gradient # Move in the opposite direction of the gradient
plt.plot(wTilde[0], wTilde[1], 'o', ms=6, c=[1, 1, 1]) # Plot our new location on the MSE surface
```
Gradient descent is a stupidly simple approach, but it often converges quite slowly (as seen above). Luckily, we don't actually have to use it to find our MSE optimal line. We can actually take a shortcut by remembering that the gradient is zero at a the minimum. Thus, we can solve for the optimal $w_0$ and $w_1$ values by setting the gradient to zero:
\begin{align}
\frac{-2}{N_\mathrm{samples}} \mathbf{X}^T(\mathbf{y}-\mathbf{X}\mathbf{w}) &= 0 \\
\mathbf{X}^T \mathbf{y} &= \mathbf{X}^T \mathbf{X}\mathbf{w}\\
(\mathbf{X}^T \mathbf{X})^{-1} \mathbf{X}^T \mathbf{y} &= \mathbf{w}
\end{align}
where the last line now corresponds to the classical expression for solving a linear regression problem.
```python
# Optimal solution
wOpt = np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y)
yHat = lineFun(wOpt[0], wOpt[1], x)
mseFun = lambda y, yHat: np.mean((y-yHat)**2)
mseTest = mseFun(y, yHat)
fig = plt.figure(figsize=(15, 5))
ax = plt.subplot(1, 2, 1)
for xi, yi, yHati in zip(x, y, yHat):
ax.stem([xi], [yi], 'r', bottom=yHati)
ax.plot(x, yHat, 'b-')
ax.plot(x, y, 'ko')
ax.set_xlabel('x')
ax.set_ylabel('y');
ax.set_title('MSE: ' + '%1.3f' % mseTest);
ax = plt.subplot(1, 2, 2)
ax.contourf(W0, W1, mseVals, 50, cmap=cm.coolwarm)
ax.set_xlabel('$w_0$')
ax.set_ylabel('$w_1$');
ax.plot(wOpt[0], wOpt[1], 'o', ms=6, c=[1, 1, 1]);
```
### What if x is multi-dimensional?
The general solution derived above works even if $\mathbf{x}_i$ is multi-dimensional. The only difference is that we now try to fit a plane or a hyper-plane to the data instead of just a line.
```python
# Initialize
w0 = 0.5 # True parameter values for w0, w1, and w2
w1 = 0.5
w2 = 0.5
sigma = 1. # noise std
nGrid = 11 # x-grid resolution
X1, X2 = np.meshgrid(np.linspace(-5, 5, nGrid), np.linspace(-5, 5, nGrid)) # x1 ans x2 values on a grid
planeFun = lambda w0, w1, w2, x1, x2: w0 + w1*x1 + w2*x2 # linear mapping
# Generate noisy y-data
y = planeFun(w0, w1, w2, X1, X2).ravel()
y += np.random.randn(nGrid**2)*sigma
# Find the least squares solution
X = np.vstack([np.ones(nGrid**2), X1.ravel(), X2.ravel()]).T
wOpt = np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y)
yHat = planeFun(wOpt[0], wOpt[1], wOpt[2], X1, X2)
# Plot the found least squares plane and the data points
fig = plt.figure(figsize=(7.5, 5))
ax = fig.add_subplot(111, projection='3d')
ax.plot(X1.ravel(), X2.ravel(), y, 'ko')
ax.plot_surface(X1, X2, yHat, cmap=cm.coolwarm, linewidth=0, antialiased=False, alpha=0.5)
ax.set_xlabel('x_1')
ax.set_ylabel('x_2')
ax.set_zlabel('y');
```
### Maximum likelihood, a second approach
Another approach of finding an optimal line would be to assume that each observed $y$-value consist of two parts, signal and noise, such that
\begin{equation}
y_i = \mathbf{x}_i^T \mathbf{w} + \epsilon_i
\end{equation}
where $\epsilon$ is a normally distributed random noise term with zero mean. If the noise is identical and all data points (samples) are independent, then we can score various mappings (unique $\mathbf{w}$ vectors) by how likely it is that each particular mapping would have generated the observed data. That is, the probability to observe any one single data point for the mapping $\mathbf{w}$ is
\begin{equation}
P(y_i|\mathbf{w}) = \frac{1}{\sqrt{2 \pi \sigma^2}} \exp \left( -\frac{(y_i - \mathbf{x}_i^T \mathbf{w})^2}{2 \sigma^2} \right),
\end{equation}
and subsequently, the likelihood for observing all independent data points are:
\begin{equation}
l(\mathbf{y}|\mathbf{w}) = \prod_i^{N_\mathrm{samples}} \frac{1}{\sqrt{2 \pi \sigma^2}} \exp \left( -\frac{(y_i - \mathbf{x}_i^T \mathbf{w})^2}{2 \sigma^2} \right).
\end{equation}
It is however a bit cumbersome to work with likelihoods, but luckily we can work with the simpler log-likelihood instead. Taking logs of the likelihood thus gives:
\begin{align}
ll(\mathbf{y}|\mathbf{w}) &= \sum_i^{N_\mathrm{samples}} \ -\frac{(y_i - \mathbf{x}_i^T \mathbf{w})^2}{2 \sigma^2} - N_\mathrm{samples} \log( \sqrt{2 \pi \sigma^2} ) \\
ll(\mathbf{y}|\mathbf{w}) &= \frac{-1}{2 \sigma^2} (\mathbf{y}-\mathbf{X}\mathbf{w})^T(\mathbf{y}-\mathbf{X}\mathbf{w}) - N_\mathrm{samples} \log( \sqrt{2 \pi \sigma^2} )
\end{align}
The log-likelihood represents a function which we would like to maximize, in contrast to the MSE, as we want to find the $\mathbf{w}$ that is most likely to have generated the data. However, the gradient is zero at both a maximum and a minimum, and we can thus find the $\mathbf{w}$ at the maximum by setting the gradient of the log-likelihood function to zero.
\begin{align}
\frac{ll(\mathbf{y}|\mathbf{w})}{d\mathbf{w}} = \frac{-1}{\sigma^2} \mathbf{X}^T(\mathbf{y}-\mathbf{X}\mathbf{w}) = &0 \\
\frac{-1}{\sigma^2} \mathbf{X}^T(\mathbf{y}-\mathbf{X}\mathbf{w}) &= 0 \\
\mathbf{X}^T \mathbf{y} &= \mathbf{X}^T \mathbf{X}\mathbf{w}\\
(\mathbf{X}^T \mathbf{X})^{-1} \mathbf{X}^T \mathbf{y} &= \mathbf{w}
\end{align}
Amazingly, we can now see that the maximum likelihood solution is the same as the least squares solution.
### Orthogonal projections, a third approach
If we go back and look at how we described our linear mapping in matrix form
\begin{equation}
\hat{\mathbf{y}} = \mathbf{X} \mathbf{w}
\end{equation}
we notice that all possible prediction vectors ($\hat{\mathbf{y}}$) live in the column space of $\mathbf{X}$, that is in the space spanned by the columns in $\mathbf{X}$. The intuitive explanation for this is that all prediction vectors are constructed as a linear combination of the columns in $\mathbf{X}$, and thus these must lie in the columns space of $\mathbf{X}$. This also means that if $\mathbf{y}$ is not in the column space of $\mathbf{X}$, then we can not describe it perfectly as a linear combination of the columns in $\mathbf{X}$. However, we can search for an orthogonal projection of $\mathbf{y}$ onto the column space, and this will represent the $\hat{\mathbf{y}}$ that is closest (euclidean norm) to $\mathbf{y}$, that is our best possible approximation. These ideas are illustrated below where the dashed black lines represent the columns of $\mathbf{X}$, the gray plane the column space, the blue line $\mathbf{y}$, the black line our orthogonal projection of $\mathbf{y}$, and the red line the error $\mathbf{y}-\hat{\mathbf{y}}$
```python
# Example X and y selected to get a descent figure
X = np.array([[1, 1, 1],[3, 1, 0.5]]).T
y = np.array([7, 14, 1])
wOpt = np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y)
u = np.dot(X, wOpt)
# The columns space
nGrid = 11
X1 = np.zeros([nGrid, nGrid])
X2 = np.zeros([nGrid, nGrid])
X3 = np.zeros([nGrid, nGrid])
W1, W2 = np.meshgrid(np.linspace(-5, 5, nGrid), np.linspace(-5, 5, nGrid))
for i in range(nGrid):
for j in range(nGrid):
X1[i, j] = W1[i, j]*X[0, 0] + W2[i, j]*X[0, 1]
X2[i, j] = W1[i, j]*X[1, 0] + W2[i, j]*X[1, 1]
X3[i, j] = W1[i, j]*X[2, 0] + W2[i, j]*X[2, 1]
# Plot columns vectors, the column space, y, and the orthogonal projection of y onto the column space
fig = plt.figure(figsize=(10, 7.5))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X1, X2, X3, color='gray', linewidth=0, antialiased=False, alpha=0.5)
scaling = 5
ax.plot([0, scaling*X[0, 0]], [0, scaling*X[1, 0]], [0, scaling*X[2, 0]], 'k--', lw=2)
ax.plot([0, scaling*X[0, 1]], [0, scaling*X[1, 1]], [0, scaling*X[2, 1]], 'k--', lw=2)
ax.plot([0, y[0]], [0, y[1]], [0, y[2]], 'k-', lw=2)
ax.plot([0, u[0]], [0, u[1]], [0, u[2]], 'b-', lw=2)
ax.plot([y[0], u[0]], [y[1], u[1]], [y[2], u[2]], 'r-', lw=2)
ax.grid(False)
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('$x_3$');
```
The question is now then how to find the orthogonal projection of $\mathbf{y}$. We do, however, know that the inner product between two vectors is zero if they are orthogonal. The error $\mathbf{y}-\hat{\mathbf{y}}$ should thus be orthogonal to all columns in $\mathbf{X}$. If we now write this is mathematical terms, we get:
\begin{align}
<\mathbf{X}, (\mathbf{y} - \hat{\mathbf{y}})> = <\mathbf{X}, (\mathbf{y} - \mathbf{X} \mathbf{w})> &= 0, \\
\mathbf{X}^T(\mathbf{y}-\mathbf{X}\mathbf{w}) &= 0, \\
\mathbf{X}^T \mathbf{y} &= \mathbf{X}^T \mathbf{X}\mathbf{w}, \\
(\mathbf{X}^T \mathbf{X})^{-1} \mathbf{X}^T \mathbf{y} &= \mathbf{w},
\end{align}
and amazingly see that this approach also yielded the exact same solution as the original least squares approach.
|
{-# OPTIONS --sized-types #-}
open import FRP.JS.Bool using ( Bool ; true ; false ) renaming ( _≟_ to _≟b_ )
open import FRP.JS.Nat using ( ℕ )
open import FRP.JS.Float using ( ℝ ) renaming ( _≟_ to _≟n_ )
open import FRP.JS.String using ( String ) renaming ( _≟_ to _≟s_ )
open import FRP.JS.Array using ( Array ) renaming ( lookup? to alookup? ; _≟[_]_ to _≟a[_]_ )
open import FRP.JS.Object using ( Object ) renaming ( lookup? to olookup? ; _≟[_]_ to _≟o[_]_ )
open import FRP.JS.Maybe using ( Maybe ; just ; nothing )
open import FRP.JS.Size using ( Size ; ↑_ )
module FRP.JS.JSON where
data JSON : {σ : Size} → Set where
null : ∀ {σ} → JSON {σ}
string : ∀ {σ} → String → JSON {σ}
float : ∀ {σ} → ℝ → JSON {σ}
bool : ∀ {σ} → Bool → JSON {σ}
array : ∀ {σ} → Array (JSON {σ}) → JSON {↑ σ}
object : ∀ {σ} → Object (JSON {σ}) → JSON {↑ σ}
{-# COMPILED_JS JSON function(x,v) {
if (x === null) { return v.null(null); }
else if (x.constructor === String) { return v.string(null,x); }
else if (x.constructor === Number) { return v.float(null,x); }
else if (x.constructor === Boolean) { return v.bool(null,x); }
else if (x.constructor === Array) { return v.array(null,x); }
else { return v.object(null,x); }
} #-}
{-# COMPILED_JS null function() { return null; } #-}
{-# COMPILED_JS string function() { return function(x) { return x; }; } #-}
{-# COMPILED_JS float function() { return function(x) { return x; }; } #-}
{-# COMPILED_JS bool function() { return function(x) { return x; }; } #-}
{-# COMPILED_JS array function() { return function(x) { return x; }; } #-}
{-# COMPILED_JS object function() { return function(x) { return x; }; } #-}
postulate
show : JSON → String
parse : String → Maybe JSON
{-# COMPILED_JS show JSON.stringify #-}
{-# COMPILED_JS parse require("agda.box").handle(JSON.parse) #-}
Key : Bool → Set
Key true = String
Key false = ℕ
lookup? : ∀ {σ} → Maybe (JSON {↑ σ}) → ∀ {b} → Key b → Maybe (JSON {σ})
lookup? (just (object js)) {true} k = olookup? js k
lookup? (just (array js)) {false} i = alookup? js i
lookup? _ _ = nothing
_≟_ : ∀ {σ τ} → JSON {σ} → JSON {τ} → Bool
null ≟ null = true
string s ≟ string t = s ≟s t
float m ≟ float n = m ≟n n
bool b ≟ bool c = b ≟b c
array js ≟ array ks = js ≟a[ _≟_ ] ks
object js ≟ object ks = js ≟o[ _≟_ ] ks
_ ≟ _ = false
|
= = = Synthesis of aziridines = = =
|
// Copyright (C) 2008-2018 Lorenzo Caminiti
// Distributed under the Boost Software License, Version 1.0 (see accompanying
// file LICENSE_1_0.txt or a copy at http://www.boost.org/LICENSE_1_0.txt).
// See: http://www.boost.org/doc/libs/release/libs/contract/doc/html/index.html
#include "no_lambdas.hpp"
#include <boost/bind.hpp>
#include <cassert>
//[no_lambdas_cpp
iarray::iarray(unsigned max, unsigned count) :
boost::contract::constructor_precondition<iarray>(boost::bind(
&iarray::constructor_precondition, max, count)),
values_(new int[max]), // Member initializations can be here.
capacity_(max)
{
boost::contract::old_ptr<int> old_instances;
boost::contract::check c = boost::contract::constructor(this)
.old(boost::bind(&iarray::constructor_old, boost::ref(old_instances)))
.postcondition(boost::bind(
&iarray::constructor_postcondition,
this,
boost::cref(max),
boost::cref(count),
boost::cref(old_instances)
))
;
for(unsigned i = 0; i < count; ++i) values_[i] = int();
size_ = count;
++instances_;
}
iarray::~iarray() {
boost::contract::old_ptr<int> old_instances;
boost::contract::check c = boost::contract::destructor(this)
.old(boost::bind(&iarray::destructor_old, this,
boost::ref(old_instances)))
.postcondition(boost::bind(&iarray::destructor_postcondition,
boost::cref(old_instances)))
;
delete[] values_;
--instances_;
}
void iarray::push_back(int value, boost::contract::virtual_* v) {
boost::contract::old_ptr<unsigned> old_size;
boost::contract::check c = boost::contract::public_function(v, this)
.precondition(boost::bind(&iarray::push_back_precondition, this))
.old(boost::bind(&iarray::push_back_old, this, boost::cref(v),
boost::ref(old_size)))
.postcondition(boost::bind(&iarray::push_back_postcondition, this,
boost::cref(old_size)))
;
values_[size_++] = value;
}
unsigned iarray::capacity() const {
// Check invariants.
boost::contract::check c = boost::contract::public_function(this);
return capacity_;
}
unsigned iarray::size() const {
// Check invariants.
boost::contract::check c = boost::contract::public_function(this);
return size_;
}
int iarray::instances() {
// Check static invariants.
boost::contract::check c = boost::contract::public_function<iarray>();
return instances_;
}
int iarray::instances_ = 0;
//]
int main() {
iarray a(3, 2);
assert(a.capacity() == 3);
assert(a.size() == 2);
a.push_back(-123);
assert(a.size() == 3);
return 0;
}
|
import tactic -- hide
/-
## Basic definition
Below we have one possible notion of being a subgroup. We will want to prove that
this definition matches the more natural one, and we will do so in this and the next levels.
On the left you will see a tab with theorems that you can use in your proofs. In this level
you will need to use `nonempty_of_subgroup` and `mul_inv_of_subgroup`, which follow
directly from the definition of subgroup and are in fact the way that we will be able
to access the definition.
Throughout, you will find very useful the `group` tactic, which works like the powerful `ring`
tactic but with equalities involving elements of a group.
You will need to type inverses, which are written using a superindex "-1". You type it as
`\-1`, and you will see how the `-1` appears as a superindex.
-/
variables {G : Type} [group G] {H : set G} -- hide
@[class] -- hide
def subgroup (X : set G) := X.nonempty ∧ (∀ x y, x ∈ X → y ∈ X → x * y⁻¹ ∈ X)
/- Axiom: subgroup (X : set G)
X.nonempty ∧ (∀ x y, x ∈ X → y ∈ X → x * y⁻¹ ∈ X)
-/
lemma nonempty_of_subgroup (X : set G) [h : subgroup X] : ∃ x, x ∈ X
:= h.1 -- hide
/- Axiom : nonempty_of_subgroup (X : set G) [subgroup X]
∃ x, x ∈ X
-/
lemma mul_inv_of_subgroup {X : set G} [h : subgroup X] {x y : G} (hx : x ∈ X) (hy : y ∈ X) : x * y⁻¹ ∈ X
:= h.2 x y hx hy -- hide
/- Axiom : mul_inv_of_subgroup {X : set G} [h : subgroup X] {x y : G}
(hx : x ∈ X) (hy : y ∈ X) :
x * y⁻¹ ∈ X
-/
/- Lemma:
If $H\leq G$, then $1 \in H$.
-/
lemma subgroup.one_mem [h : subgroup H]: (1 : G) ∈ H :=
begin
cases h.1 with x hx,
have h2 := h.2 x x hx hx,
rw show (1 : G) = x * x⁻¹, by group,
assumption,
end
|
lemma AE_finite_all: assumes f: "finite S" shows "(AE x in M. \<forall>i\<in>S. P i x) \<longleftrightarrow> (\<forall>i\<in>S. AE x in M. P i x)" |
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
α : Type u_5
β : Type u_6
a : α
⊢ Function.Injective (mk a)
[PROOFSTEP]
intro b₁ b₂ h
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
α : Type u_5
β : Type u_6
a : α
b₁ b₂ : β
h : (a, b₁) = (a, b₂)
⊢ b₁ = b₂
[PROOFSTEP]
simpa only [true_and, Prod.mk.inj_iff, eq_self_iff_true] using h
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
α : Type u_5
β : Type u_6
b : β
⊢ Function.Injective fun a => (a, b)
[PROOFSTEP]
intro b₁ b₂ h
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
α : Type u_5
β : Type u_6
b : β
b₁ b₂ : α
h : (fun a => (a, b)) b₁ = (fun a => (a, b)) b₂
⊢ b₁ = b₂
[PROOFSTEP]
simpa only [and_true, eq_self_iff_true, mk.inj_iff] using h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
p q : α × β
⊢ p = q ↔ p.fst = q.fst ∧ p.snd = q.snd
[PROOFSTEP]
rw [← @mk.eta _ _ p, ← @mk.eta _ _ q, mk.inj_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
p₁ : α
p₂ : β
q₁ : α
q₂ : β
⊢ (p₁, p₂) = (q₁, q₂) ↔ (p₁, p₂).fst = (q₁, q₂).fst ∧ (p₁, p₂).snd = (q₁, q₂).snd
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
a : α
b : β
x : α
⊢ (a, b).fst = x ↔ (a, b) = (x, (a, b).snd)
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
a : α
b x : β
⊢ (a, b).snd = x ↔ (a, b) = ((a, b).fst, x)
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
s✝ : β → β → Prop
x y : α × β
r : α → α → Prop
s : β → β → Prop
p q : α × β
h : Prod.Lex r s p q
⊢ r p.fst q.fst ∨ p.fst = q.fst ∧ s p.snd q.snd
[PROOFSTEP]
cases h
[GOAL]
case left
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
s✝ : β → β → Prop
x y : α × β
r : α → α → Prop
s : β → β → Prop
a₁✝ : α
b₁✝ : β
a₂✝ : α
b₂✝ : β
h✝ : r a₁✝ a₂✝
⊢ r (a₁✝, b₁✝).fst (a₂✝, b₂✝).fst ∨ (a₁✝, b₁✝).fst = (a₂✝, b₂✝).fst ∧ s (a₁✝, b₁✝).snd (a₂✝, b₂✝).snd
[PROOFSTEP]
simp [*]
[GOAL]
case right
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
s✝ : β → β → Prop
x y : α × β
r : α → α → Prop
s : β → β → Prop
a✝ : α
b₁✝ b₂✝ : β
h✝ : s b₁✝ b₂✝
⊢ r (a✝, b₁✝).fst (a✝, b₂✝).fst ∨ (a✝, b₁✝).fst = (a✝, b₂✝).fst ∧ s (a✝, b₁✝).snd (a✝, b₂✝).snd
[PROOFSTEP]
simp [*]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
s✝ : β → β → Prop
x y : α × β
r : α → α → Prop
s : β → β → Prop
p q : α × β
h✝ : r p.fst q.fst ∨ p.fst = q.fst ∧ s p.snd q.snd
a : α
b : β
c : α
d : β
e : (a, b).fst = (c, d).fst
h : s (a, b).snd (c, d).snd
⊢ Prod.Lex r s (a, b) (c, d)
[PROOFSTEP]
subst e
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
s✝ : β → β → Prop
x y : α × β
r : α → α → Prop
s : β → β → Prop
p q : α × β
h✝ : r p.fst q.fst ∨ p.fst = q.fst ∧ s p.snd q.snd
a : α
b d : β
h : s (a, b).snd ((a, b).fst, d).snd
⊢ Prod.Lex r s (a, b) ((a, b).fst, d)
[PROOFSTEP]
exact Lex.right _ h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
x y : α × β
inst✝¹ : IsIrrefl α r
inst✝ : IsIrrefl β s
⊢ ∀ (a : α × β), ¬Prod.Lex r s a a
[PROOFSTEP]
rintro ⟨i, a⟩ (⟨_, _, h⟩ | ⟨_, h⟩)
[GOAL]
case mk.left
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
x y : α × β
inst✝¹ : IsIrrefl α r
inst✝ : IsIrrefl β s
i : α
a : β
h : r i i
⊢ False
[PROOFSTEP]
exact irrefl _ h
[GOAL]
case mk.right
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
x y : α × β
inst✝¹ : IsIrrefl α r
inst✝ : IsIrrefl β s
i : α
a : β
h : s a a
⊢ False
[PROOFSTEP]
exact irrefl _ h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
s✝ : β → β → Prop
x y : α × β
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsTrichotomous α r
inst✝ : IsTotal β s
x✝¹ x✝ : α × β
i : α
a : β
j : α
b : β
⊢ Prod.Lex r s (i, a) (j, b) ∨ Prod.Lex r s (j, b) (i, a)
[PROOFSTEP]
obtain hij | rfl | hji := trichotomous_of r i j
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
s✝ : β → β → Prop
x y : α × β
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsTrichotomous α r
inst✝ : IsTotal β s
x✝¹ x✝ : α × β
i : α
a : β
j : α
b : β
hij : r i j
⊢ Prod.Lex r s (i, a) (j, b) ∨ Prod.Lex r s (j, b) (i, a)
[PROOFSTEP]
exact Or.inl (.left _ _ hij)
[GOAL]
case inr.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
s✝ : β → β → Prop
x y : α × β
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsTrichotomous α r
inst✝ : IsTotal β s
x✝¹ x✝ : α × β
i : α
a b : β
⊢ Prod.Lex r s (i, a) (i, b) ∨ Prod.Lex r s (i, b) (i, a)
[PROOFSTEP]
exact (total_of s a b).imp (.right _) (.right _)
[GOAL]
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r✝ : α → α → Prop
s✝ : β → β → Prop
x y : α × β
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsTrichotomous α r
inst✝ : IsTotal β s
x✝¹ x✝ : α × β
i : α
a : β
j : α
b : β
hji : r j i
⊢ Prod.Lex r s (i, a) (j, b) ∨ Prod.Lex r s (j, b) (i, a)
[PROOFSTEP]
exact Or.inr (.left _ _ hji)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
x y : α × β
inst✝¹ : _root_.IsTrichotomous α r
inst✝ : _root_.IsTrichotomous β s
x✝¹ x✝ : α × β
i : α
a : β
j : α
b : β
⊢ Prod.Lex r s (i, a) (j, b) ∨ (i, a) = (j, b) ∨ Prod.Lex r s (j, b) (i, a)
[PROOFSTEP]
obtain hij | rfl | hji := trichotomous_of r i j
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
x y : α × β
inst✝¹ : _root_.IsTrichotomous α r
inst✝ : _root_.IsTrichotomous β s
x✝¹ x✝ : α × β
i : α
a : β
j : α
b : β
hij : r i j
⊢ Prod.Lex r s (i, a) (j, b) ∨ (i, a) = (j, b) ∨ Prod.Lex r s (j, b) (i, a)
case inr.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
x y : α × β
inst✝¹ : _root_.IsTrichotomous α r
inst✝ : _root_.IsTrichotomous β s
x✝¹ x✝ : α × β
i : α
a b : β
⊢ Prod.Lex r s (i, a) (i, b) ∨ (i, a) = (i, b) ∨ Prod.Lex r s (i, b) (i, a)
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
x y : α × β
inst✝¹ : _root_.IsTrichotomous α r
inst✝ : _root_.IsTrichotomous β s
x✝¹ x✝ : α × β
i : α
a : β
j : α
b : β
hji : r j i
⊢ Prod.Lex r s (i, a) (j, b) ∨ (i, a) = (j, b) ∨ Prod.Lex r s (j, b) (i, a)
[PROOFSTEP]
{exact Or.inl (Lex.left _ _ hij)
}
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
x y : α × β
inst✝¹ : _root_.IsTrichotomous α r
inst✝ : _root_.IsTrichotomous β s
x✝¹ x✝ : α × β
i : α
a : β
j : α
b : β
hij : r i j
⊢ Prod.Lex r s (i, a) (j, b) ∨ (i, a) = (j, b) ∨ Prod.Lex r s (j, b) (i, a)
[PROOFSTEP]
exact Or.inl (Lex.left _ _ hij)
[GOAL]
case inr.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
x y : α × β
inst✝¹ : _root_.IsTrichotomous α r
inst✝ : _root_.IsTrichotomous β s
x✝¹ x✝ : α × β
i : α
a b : β
⊢ Prod.Lex r s (i, a) (i, b) ∨ (i, a) = (i, b) ∨ Prod.Lex r s (i, b) (i, a)
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
x y : α × β
inst✝¹ : _root_.IsTrichotomous α r
inst✝ : _root_.IsTrichotomous β s
x✝¹ x✝ : α × β
i : α
a : β
j : α
b : β
hji : r j i
⊢ Prod.Lex r s (i, a) (j, b) ∨ (i, a) = (j, b) ∨ Prod.Lex r s (j, b) (i, a)
[PROOFSTEP]
{exact (trichotomous_of (s) a b).imp3 (Lex.right _) (congr_arg _) (Lex.right _)
}
[GOAL]
case inr.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
x y : α × β
inst✝¹ : _root_.IsTrichotomous α r
inst✝ : _root_.IsTrichotomous β s
x✝¹ x✝ : α × β
i : α
a b : β
⊢ Prod.Lex r s (i, a) (i, b) ∨ (i, a) = (i, b) ∨ Prod.Lex r s (i, b) (i, a)
[PROOFSTEP]
exact (trichotomous_of (s) a b).imp3 (Lex.right _) (congr_arg _) (Lex.right _)
[GOAL]
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
x y : α × β
inst✝¹ : _root_.IsTrichotomous α r
inst✝ : _root_.IsTrichotomous β s
x✝¹ x✝ : α × β
i : α
a : β
j : α
b : β
hji : r j i
⊢ Prod.Lex r s (i, a) (j, b) ∨ (i, a) = (j, b) ∨ Prod.Lex r s (j, b) (i, a)
[PROOFSTEP]
{exact Or.inr (Or.inr $ Lex.left _ _ hji)
}
[GOAL]
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
x y : α × β
inst✝¹ : _root_.IsTrichotomous α r
inst✝ : _root_.IsTrichotomous β s
x✝¹ x✝ : α × β
i : α
a : β
j : α
b : β
hji : r j i
⊢ Prod.Lex r s (i, a) (j, b) ∨ (i, a) = (j, b) ∨ Prod.Lex r s (j, b) (i, a)
[PROOFSTEP]
exact Or.inr (Or.inr $ Lex.left _ _ hji)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
f : α → γ
g : β → δ
f₁ : α → β
g₁ : γ → δ
f₂ : β → α
g₂ : δ → γ
hf : LeftInverse f₁ f₂
hg : LeftInverse g₁ g₂
a : β × δ
⊢ map f₁ g₁ (map f₂ g₂ a) = a
[PROOFSTEP]
rw [Prod.map_map, hf.comp_eq_id, hg.comp_eq_id, map_id, id]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty α
inst✝ : Nonempty β
f : α → γ
g : β → δ
h : Injective (map f g)
a₁ a₂ : α
ha : f a₁ = f a₂
⊢ a₁ = a₂
[PROOFSTEP]
inhabit β
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty α
inst✝ : Nonempty β
f : α → γ
g : β → δ
h : Injective (map f g)
a₁ a₂ : α
ha : f a₁ = f a₂
inhabited_h : Inhabited β
⊢ a₁ = a₂
[PROOFSTEP]
injection @h (a₁, default) (a₂, default) (congr_arg (fun c : γ => Prod.mk c (g default)) ha : _)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty α
inst✝ : Nonempty β
f : α → γ
g : β → δ
h : Injective (map f g)
b₁ b₂ : β
hb : g b₁ = g b₂
⊢ b₁ = b₂
[PROOFSTEP]
inhabit α
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty α
inst✝ : Nonempty β
f : α → γ
g : β → δ
h : Injective (map f g)
b₁ b₂ : β
hb : g b₁ = g b₂
inhabited_h : Inhabited α
⊢ b₁ = b₂
[PROOFSTEP]
injection @h (default, b₁) (default, b₂) (congr_arg (Prod.mk (f default)) hb : _)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty γ
inst✝ : Nonempty δ
f : α → γ
g : β → δ
h : Surjective (map f g)
c : γ
⊢ ∃ a, f a = c
[PROOFSTEP]
inhabit δ
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty γ
inst✝ : Nonempty δ
f : α → γ
g : β → δ
h : Surjective (map f g)
c : γ
inhabited_h : Inhabited δ
⊢ ∃ a, f a = c
[PROOFSTEP]
obtain ⟨⟨a, b⟩, h⟩ := h (c, default)
[GOAL]
case intro.mk
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty γ
inst✝ : Nonempty δ
f : α → γ
g : β → δ
h✝ : Surjective (map f g)
c : γ
inhabited_h : Inhabited δ
a : α
b : β
h : map f g (a, b) = (c, default)
⊢ ∃ a, f a = c
[PROOFSTEP]
exact ⟨a, congr_arg Prod.fst h⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty γ
inst✝ : Nonempty δ
f : α → γ
g : β → δ
h : Surjective (map f g)
d : δ
⊢ ∃ a, g a = d
[PROOFSTEP]
inhabit γ
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty γ
inst✝ : Nonempty δ
f : α → γ
g : β → δ
h : Surjective (map f g)
d : δ
inhabited_h : Inhabited γ
⊢ ∃ a, g a = d
[PROOFSTEP]
obtain ⟨⟨a, b⟩, h⟩ := h (default, d)
[GOAL]
case intro.mk
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty γ
inst✝ : Nonempty δ
f : α → γ
g : β → δ
h✝ : Surjective (map f g)
d : δ
inhabited_h : Inhabited γ
a : α
b : β
h : map f g (a, b) = (default, d)
⊢ ∃ a, g a = d
[PROOFSTEP]
exact ⟨b, congr_arg Prod.snd h⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty α
inst✝ : Nonempty β
f : α → γ
g : β → δ
⊢ Bijective (map f g) ↔ Bijective f ∧ Bijective g
[PROOFSTEP]
haveI := Nonempty.map f ‹_›
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty α
inst✝ : Nonempty β
f : α → γ
g : β → δ
this : Nonempty γ
⊢ Bijective (map f g) ↔ Bijective f ∧ Bijective g
[PROOFSTEP]
haveI := Nonempty.map g ‹_›
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty α
inst✝ : Nonempty β
f : α → γ
g : β → δ
this✝ : Nonempty γ
this : Nonempty δ
⊢ Bijective (map f g) ↔ Bijective f ∧ Bijective g
[PROOFSTEP]
exact (map_injective.and map_surjective).trans (and_and_and_comm)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty β
inst✝ : Nonempty δ
f₁ : α → β
g₁ : γ → δ
f₂ : β → α
g₂ : δ → γ
h : LeftInverse (map f₁ g₁) (map f₂ g₂)
b : β
⊢ f₁ (f₂ b) = b
[PROOFSTEP]
inhabit δ
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty β
inst✝ : Nonempty δ
f₁ : α → β
g₁ : γ → δ
f₂ : β → α
g₂ : δ → γ
h : LeftInverse (map f₁ g₁) (map f₂ g₂)
b : β
inhabited_h : Inhabited δ
⊢ f₁ (f₂ b) = b
[PROOFSTEP]
exact congr_arg Prod.fst (h (b, default))
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty β
inst✝ : Nonempty δ
f₁ : α → β
g₁ : γ → δ
f₂ : β → α
g₂ : δ → γ
h : LeftInverse (map f₁ g₁) (map f₂ g₂)
d : δ
⊢ g₁ (g₂ d) = d
[PROOFSTEP]
inhabit β
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Nonempty β
inst✝ : Nonempty δ
f₁ : α → β
g₁ : γ → δ
f₂ : β → α
g₂ : δ → γ
h : LeftInverse (map f₁ g₁) (map f₂ g₂)
d : δ
inhabited_h : Inhabited β
⊢ g₁ (g₂ d) = d
[PROOFSTEP]
exact congr_arg Prod.snd (h (default, d))
|
[STATEMENT]
lemma aligned_sorted_separators: "aligned l (Node ts t) u \<Longrightarrow> sorted_less (l#(separators ts)@[u])"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. aligned l (Node ts t) u \<Longrightarrow> sorted_less (l # separators ts @ [u])
[PROOF STEP]
by (smt (verit, ccfv_threshold) aligned_sorted_inorder separators_in_inorder sorted_inorder_separators sorted_lems(2) sorted_wrt.simps(2) sorted_wrt_append subset_eq) |
import Data.Vect
%default total
-- variable
x : Int
x = 5
-- function1 incr
incr : Int -> Int
incr x = x + 1
-- function2 add
add : Int -> Int -> Int
add x y = x + y
-- adt sum type
data TrafficLight = Red | Yellow | Green
-- function adt show
show : TrafficLight -> String
show Red = "R"
show Yellow = "Y"
show Green = "G"
-- mylist
data MyList : Type -> Type where
Empty : MyList t
Cons : (elem : t) -> MyList t -> MyList t
-- variable xs vect
xs : Vect 3 Int
xs = [1,2,3]
-- vtake
vtake : (n:Nat) -> Vect (n+m) t -> Vect n t
vtake Z xs = []
vtake (S k) (x :: xs) = x :: vtake k xs
-- vtake to string
elems : String
elems = show ( vtake 2 [1,2,3,4] )
-- vappend
vappend : Vect n t -> Vect m t -> Vect (n+m) t
vappend [] ys = ys
vappend (x :: xs) ys = x :: vappend xs ys
|
# ifndef LANGIL_H
# define LANGIL_H
///////////////////////////////////////////////////////
////// GILLESPIE CLASS
//////
////// R. Perez-Carrasco
////// Created: 16 Nov '13
///////////////////////////////////////////////////////
/* Class for simulating master equation through langil algorythm. It defines three classes:
- langil: In charge of the integration
- species: contains a name a number and methods to change number
- reaction: contains a name, a stoiciometry associated and a pointer to a propensity computing function
*/
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <string>
#include <fstream>
#include <iostream>
#include <unordered_map>
#include <vector>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <iomanip>
//#include <omp.h>
#include "species.h"
#include "cell_cycle.h"
#define VERBOSE 1
#define NON_VERBOSE 0
#define TIMESTEPMULT 3
#define TIMESTEP 0
#define ALL 1
#define NOWRITE 2
#define LASTPOINT 4
#define GILLESPIE 0
#define MACROSCOPIC 2
#define CLEEULER 1
#define CLEMILSTEIN 3
#define ADIABATIC true
#define NONADIABATIC false
#define GEOMETRIC true
#define NONGEOMETRIC false
#define DETERMINISTIC_TIME true
#define STOCHASTIC_TIME false
#define EXPONENTIAL_REACTION 0
#define GEOMETRIC_REACTION 1
#define DETERMINISTIC_REACTION 2
#define ADIABATIC_REACTION 3
using namespace std;
struct StructDivisionState{
double time;
double species0before;
double species1before;
double species0after;
double species1after;
};
class reaction; // defined after langil
class species; // defined after reaction
class expressionzone; // defined in a separate file
class langil{
friend class expressionzone;
// friend class reaction; // reaction have access to the state of the system
public:
langil(string outputfilename, double vol=100, int seed=-1);
void SetState(string aname, float anumber); //Set Initial conditions
double GetState(string a); // get the state of the selected species
void SetTime(double time); //Set current time
void AddSpecies(string aname, float anum); // Add a new molecular species
void AddSpeciesTimeTracking(string aname, float anum, double lifetime); // Add a molecular species with a timer
void AddCellCycleSpecies(); // add a cell cycle to the system
void addCellPhase(double duration, int type_phase); // add a cell phase to the cell cycle
void PrintSummarySpecies();
string StateString();
void Add_Reaction(string name,vector<int> stoich,double (*prop_f)(v_species&),int typeofrec = EXPONENTIAL_REACTION); // Add a reaction to the system overloaded to create directly
void UpdateTime(); // Update times for the deterministic events
void SetRunType(int rt,double dt=0);
double GillespieStep(double timelimit = -1); // Advance one step in the Gillespie algorithm. Returns time of the step. Timelimit does not react if the reaction would take more than timelimit
double LangevinEulerStep(double timelimit = -1); // Make an Euler-Maruyama integration step
double LangevinMilsteinStep(double timelimit = -1); // Make an SDE integration step with the Milstein algorithm (not tested)
double MacroStep(double timlimit = -1); // Make an Euler (deterministic) step
double MacroAdiabaticStep(double dtt); // Make an Euler step if the Adiabaitc species
double Run(double time, bool verbose=true); // React until reach time time
double RunTimes(int N,double time, bool verbose=true); // Run Run() N times
double RunTransition(bool (*prop_f)(v_species&), bool (*prop_g)(v_species&), int loops, double maxT); // Run transition between two points, if loops=1 it also records the transition trajectory
void WriteState(); // Write state in file
void WriteTempState(); // Write state in file
void WritePrevrecState(); // Write previous recorded state in file
void SetWriteState(int flag, double tl=0);// Switch betweem different states:
double (langil::*MakeStep)(double); // pointer to the actual integration chosen
// TIMESTEP: write the state every tl time lapse
// ALL: write after each reaction takes place
// NOWRITE: don't write anything in output file
double SetLangevinTimeStep(double dx); // set dt to a characteristic time that will increase with the size of the system
void Add_TimeAction(double (*actionfunc)(double));
void RunTimeAction();
void setPhaseDuration(int phase, double duration, int type_phase);
void Set_AdiabaticReaction(string aname);
void Set_GeometricReaction(string aname);
void Set_Boundary_Behaviour(void (*b_b)(v_species&, v_species&)); // Boundary behaviour
void Set_StoreDivisionTimes();// activate the storage of division times in DivisionTimes vector
void (*Boundary_Behaviour)(v_species&, v_species&); // boundary behaviour function
vector<StructDivisionState> Get_HistoryDivision();
void Reset_HistoryDivision();
void SetDivisionHistory(int b,int a);
protected:
v_species x; //value of each species. shared_ptr is used since the vector can contain species and also childrens of that class (i.e. different classes)
vector<reaction> r;//vector with all the reactions
double time; // current time of simulation
gsl_rng * rng; // allocator for the rng generator
vector<double> rnd; // rnd numbers
vector<double> xtemp; // temporal vector for integrations
v_species xtemp0,xtemp1; // temporal vector for integrations
double pro,pro0; // auxiliar variables for the program
int sto; // auxiliar value for stoichiometry
double totalprop,cumprop, detprop; // sum of the propensities
double nexttau; // time of the next step
vector<reaction>::iterator nextreaction; // reaction selected to react
vector<int>* nextstoichiometry;
double Omega; // volume of the system to change between concentrations and absolute numbers
double signal; // variable that can modulate an external signal its time course
// may be changed through time actions with Add_TimeAction
int celldivided; // flag to track possible celldivision
int geostoichiometry; // dummy variable to set random geometric stoichiometries
bool record; // Set record of the trajectory
ofstream trajfile; // File for output traj
string outputfilename; // File for output name
int fwrite; // flag for write state when integrating
double timelapse; // dt for recording if TIMESTEP
double nextrectime; // t for recording if TIMESTEP
double totaltime; // total integration time for the trajectory
double dt; // timestep used in integrations for macroscopic and langevin (if negative, it is used by Langevin as a prediction for dx)
int cellcyclespeciesidx; // index of the species vector that tracks cell cycle phases
cell_cycle cell; // cell_cycle object to manage cell cycle events
bool fStoreDivisionTimes; // flag to store Division Times
vector<StructDivisionState> DivisionHistoryVector; // Division history vector. Each component is a vector
StructDivisionState divisionstate;
vector<double(*)(double)> actionfuncvec; // vector to time action functions
};
////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
class reaction{//Class containing the info of each reaction. Each reaction added
// through the method Add_Reaction(reaction) will be stored as an element in a vector
// by langil class. The pointer x, points out to the state of the langil system and
// is managed automatically by langil when a reaction is added to a langil instance
// It has also the property "adiabatic" meaning that it is a fast variable and can always be simulated
// from its deterministic behaviour
friend class langil;
public:
reaction(string aname,vector<int> asto,double (*aprop_f)(v_species&)=NULL,int reacttype= EXPONENTIAL_REACTION,
vector<reaction>* x0=NULL){
name=aname;
stoichiometry=asto;
prop_f=aprop_f;
reactiontype=reacttype;
}
double GetPropensity(){
return prop_f(*x);
}
double GetPropensity(v_species& x0){
return prop_f(x0);
}
string GetName(){
return name;
}
void SetPropensity(double (*p_f)(v_species&)){
prop_f=p_f;
}
void SetState(v_species& x0){
x=&x0;
}
vector<int>* GetStoichiometry(){
return &stoichiometry;
}
bool IsAdiabatic(){
return (reactiontype == ADIABATIC_REACTION);
}
bool IsNotAdiabatic(){
return (reactiontype != ADIABATIC_REACTION);
}
bool IsGeometric(){
return (reactiontype == GEOMETRIC_REACTION);
}
bool IsNotGeometric(){
return (reactiontype != GEOMETRIC_REACTION);
}
bool IsNotDetermTime(){
return (reactiontype != (DETERMINISTIC_REACTION));
}
bool IsDetermTime(){
return (reactiontype == (DETERMINISTIC_REACTION));
}
void SetAdiabatic(){
reactiontype = ADIABATIC_REACTION;
}
void SetGeometric(){
reactiontype = GEOMETRIC_REACTION;
}
void SetDeterministicTime(){
reactiontype = DETERMINISTIC_REACTION;
}
protected:
string name; // name of the reactio
vector<int> stoichiometry; // stoichiometry
double (*prop_f)(v_species&); // propensity function
v_species* x; //value of each species. shared_ptr is used since the vector can contain species and also childrens of that class (i.e. different classes)
int reactiontype; // can be any of the macros EXPONENTIAL_REACTION, DETERMINISITIC_REACTION, etc.
// of the system
};
#endif
|
Formal statement is: lemma big_small_mult: "f1 \<in> L F (g1) \<Longrightarrow> f2 \<in> l F (g2) \<Longrightarrow> (\<lambda>x. f1 x * f2 x) \<in> l F (\<lambda>x. g1 x * g2 x)" Informal statement is: If $f_1$ is big-O of $g_1$ and $f_2$ is little-o of $g_2$, then $f_1 f_2$ is little-o of $g_1 g_2$. |
module PermConsProperties
import Data.Vect
import congruence
import PermCons
%access public export
%default total
||| a :: (perm xs) = (includePerm perm) (a :: xs)
PermC_prp_1 : (n : Nat) -> (a : Nat) -> (xs : (Vect n Nat)) ->
(perm : (PermC n)) ->
( (a :: (applyPerm n Nat perm xs)) =
(applyPerm (S n) Nat (includePerm n perm) (a :: xs)))
PermC_prp_1 Z a Nil perm = Refl
PermC_prp_1 (S Z) a [x] perm = Refl
PermC_prp_1 (S (S k)) a xs (Idt (S (S k))) = Refl
PermC_prp_1 (S (S k)) a xs (Swap (S (S k)) FZ) = Refl
PermC_prp_1 (S (S k)) a xs (Swap (S (S k)) (FS l)) = Refl
PermC_prp_1 (S (S k)) a xs (CPerm (S (S k)) f g) = let
f1 = applyPerm (S (S k)) Nat f
f2 = applyPerm (S (S (S k))) Nat (includePerm (S (S k)) f)
g1 = applyPerm (S (S k)) Nat g
g2 = applyPerm (S (S (S k))) Nat (includePerm (S (S k)) g)
pf1 = PermC_prp_1 (S (S k)) a xs g -- a :: (g1 xs) = (includePerm g1) (a :: xs)
pf2 = congruence (Vect (S (S (S k))) Nat) (Vect (S (S (S k))) Nat)
(a :: (g1 xs)) (g2 (a :: xs)) f2 pf1 -- f2 (a :: g1 xs) = f2 (g2 (a :: xs))
v1 = g1 xs
pf3 = PermC_prp_1 (S (S k)) a v1 f -- a :: (f1 v1) = f2 (a :: v1)
in
(trans pf3 pf2)
PermC_prp_2 : (n : Nat) -> (u : (Vect n Nat)) -> ((applyPerm n Nat (Idt n) u) = u)
PermC_prp_2 Z u = Refl
PermC_prp_2 (S Z) u = Refl
PermC_prp_2 (S (S k)) u = Refl
|
module Flexidisc.Record.Transformation
import Flexidisc.Record
import Flexidisc.RecordContent
import Flexidisc.OrdList.Disjoint
import Flexidisc.OrdList.Nub
import public Flexidisc.Transformation.Type
import public Flexidisc.Transformation.TransHeader
import public Control.Monad.Identity
%default total
%access export
||| A list of transformation of tagged values.
||| Its purpose is to apply several transformation on different fields
||| simultaneously.
data TransformationM : (m : Type -> Type) -> (k : Type) ->
(header : TransHeader k) -> Type where
Trans : (o : Ord k) =>
(values : MapValuesM m k o header) ->
(nubProof : Nub header) ->
TransformationM m k (T header)
public export
Transformation : (k : Type) -> (header : TransHeader k) -> Type
Transformation = TransformationM id
||| Monomorphic `id` to help inference
transM : (m : Type -> Type) -> TransformationM m k header -> TransformationM m k header
transM _ = id
||| Monomorphic `id` to help inference
trans : TransformationM Identity k header -> TransformationM Identity k header
trans = id
||| The empty record
Nil : Ord k => TransformationM m k []
Nil = Trans [] []
||| Insert a new transformation function in a `Transformation`
(::) : (DecEq k, Ord k) => TaggedValue k' (s -> m t) -> TransformationM m k header ->
{default SoTrue fresh : IsFresh k' header} ->
TransformationM m k ((k', s :-> t) :: header)
(::) x (Trans xs isnub) {fresh} =
Trans (insert x xs) (freshInsert (getProof fresh) isnub)
transPreservesFresh : Ord k => (xs : OrdList k o MapValue) -> (y : Fresh l (toSource xs)) -> Fresh l (toTarget xs)
transPreservesFresh [] y = y
transPreservesFresh ((k, s :-> t) :: xs) (f :: fresh) = f :: transPreservesFresh xs fresh
transPreservesNub : (header : OrdList k o MapValue) -> Nub (toSource header) -> Nub (toTarget header)
transPreservesNub [] xs = xs
transPreservesNub ((l, s :-> t) :: xs) (y::ys) = transPreservesFresh xs y :: transPreservesNub xs ys
||| Map all the field of a record
mapRecord : (trans : TransformationM m k mapper) ->
(xs : Record k (toSource mapper)) ->
RecordM m k (toTarget mapper)
mapRecord (Trans trans nubT) (Rec xs nubXS) {mapper = T mapper} =
Rec (mapRecord trans xs) (transPreservesNub mapper nubXS)
||| Map all the field of a record, extract the effect
traverseRecord : Applicative m =>
(trans : TransformationM m k mapper) ->
(xs : Record k (toSource mapper)) ->
m (Record k (toTarget mapper))
traverseRecord (Trans trans nubT) (Rec xs nubXS) {mapper = T mapper} =
map (flip Rec (transPreservesNub mapper nubXS)) (traverseRecord trans xs)
||| Map all the field of a record
traverseRecord' : (trans : Transformation k mapper) ->
(xs : Record k (toSource mapper)) ->
Record k (toTarget mapper)
traverseRecord' = mapRecord
||| Create a record of Maybe type, with the values of the initial record,
||| if defined, or with `Nothing` if the field is not defined.
optional : DecEq k =>
(xs : Record k header) ->
{auto prf : HereOrNot post header} ->
{default SoTrue postNub : IsNub post} ->
RecordM Maybe k post
optional (Rec xs nubXS) {prf = HN prf} {postNub} =
Rec (optional xs prf) (getProof postNub)
||| Change the effect
hoist : (f: {a : _} -> m a -> n a) -> (xs : RecordM m k header) -> RecordM n k header
hoist f (Rec xs nubXS) = Rec (hoist f xs) nubXS
||| Perform a `Record` transformation under the `Identity` Monad
withIdentity : (RecordM Identity k pre -> RecordM Identity k post) ->
Record k pre -> Record k post
withIdentity f = hoist runIdentity . f . hoist Id
||| lift fields of a Record
lift : (f: {a : _} -> a -> m a) -> (xs : Record k header) -> RecordM m k header
lift = hoist
||| extract an effect from a record
sequence : Applicative m => (xs : RecordM m k header) -> m (Record k header)
sequence (Rec xs nubXS) = flip Rec nubXS <$> sequence xs
||| embed the effect in the values, directly into the `Record`
unlift : RecordM m k header -> Record k (map m header)
unlift (Rec values nubProof) = Rec (unlift values) (mapValuesPreservesNub nubProof)
||| Map a subset of a record
patchRecord : (DecEq k, Applicative m) =>
(trans : TransformationM m k mapper) ->
(xs : Record k header) ->
{auto prf : Sub (toSource mapper) header} ->
m (Record k (patch (toTarget mapper) header))
patchRecord (Trans trans nubT) (Rec xs nubXS) {prf = S prf} {mapper = T mapper} = let
nubProof = disjointNub diffIsDisjoint
(isNubFromSub diffIsSub nubXS)
(transPreservesNub mapper (isNubFromSub prf nubXS))
in map (flip Rec nubProof) (patchRecord trans xs prf)
||| Map a subset of a record
patchRecord' : DecEq k =>
(trans : Transformation k mapper) ->
(xs : Record k header) ->
{auto prf : Sub (toSource mapper) header} ->
Record k (patch (toTarget mapper) header)
patchRecord' (Trans trans nubT) (Rec xs nubXS) {prf = S prf} {mapper = T mapper} = let
nubProof = disjointNub diffIsDisjoint
(isNubFromSub diffIsSub nubXS)
(transPreservesNub mapper (isNubFromSub prf nubXS))
in (flip Rec nubProof) (patchRecord' trans xs prf)
-- Foldmap
public export
data RecordFunc : (required : Header k) ->
(optional : Header k) ->
(result : Type) -> Type where
Func : (Record k required -> RecordM Maybe k opt -> a) ->
RecordFunc required opt a
||| Apply a function on a known set of data
foldRecord : (Ord k, DecEq k) =>
RecordFunc required opt a ->
Record k header ->
{auto optNub : IsNub opt} ->
{auto decomp : Decomp required opt header} ->
a
foldRecord (Func f) xs {decomp = D sub op} {optNub} =
f (project xs) (optional xs {postNub = optNub})
||| Apply a modifier Record to a record
(<**>) : RecordM (endoM m n) k header -> RecordM m k header -> RecordM n k header
(<**>) (Rec fs _) (Rec xs nubXS) = Rec (fs <**> xs) nubXS
-- Operators
keepIf : (Alternative m, Applicative m) => (a -> Bool) -> a -> m a
keepIf f x = map (const x) (guard (f x))
is : (Eq a, Alternative m, Applicative m) => a -> a -> m a
is expected = keepIf (== expected)
isnot : (Eq a, Alternative m, Applicative m) => a -> a -> m a
isnot expected = keepIf (/= expected)
|
{-# OPTIONS --cubical --safe #-}
open import Prelude
open import Relation.Binary
module LexPerm where
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj27eqsynthconj4 : forall (lv0 : natural), (@eq natural (lv0) (plus lv0 Zero)).
Admitted.
QuickChick conj27eqsynthconj4.
|
Q: Why/how did you get into the animal health industry?
DB: For over 20 years I worked in the human drug development and medical communications industries, managing dozens of products and development programs from Phase I though Phase IV. While I enjoyed what I was doing, I wanted to do something that also paired with my love for animals and degree in zoology. I was fortunate to be able to leverage my years of experience in drug development and investor relations to co-found KindredBio, a company dedicated to developing cutting-edge therapeutics for cats, dogs, and horses. As the parent of frogs, hermit crabs, mice, rats, bunnies, hamsters, and a dog as a child (not all at the same time), my mother is not surprised by my success or that of KindredBio!
Q: Who has been a career inspiration to you?
DB: I met my co-founder and KindredBio CEO, Dr. Richard Chin, when we worked together at Elan Pharmaceuticals. Right away, I felt connected to him and his business philosophies. As a Harvard-trained physician and former Rhodes Scholar, with a track record of almost a dozen drug approvals, I knew I could learn from him and that my clinical operations expertise and management skills would benefit him as well. When we left Elan, we kept in touch and I always knew we would work together again. Then, in 2012, we began to talk about how we could pair our decades of experience and love of animals to start a veterinary biopharmaceutical company. That’s when we started KindredBio and never looked back. I feel very fortunate to be in partnership with Richard because our diverse skill set and management styles that really complement one another.
Q: People think of pets as part of their family, how does that fit in with what you do?
DB: Our fury companions have truly become members of the family - proven by Americans spending $700 million each year on Valentine’s Day gifts for our beloved pets. The evolution of the pet as a family member has been relatively short. In my lifetime, I have seen dogs move from the yard, to the dog house, to a sequestered room behind a gate, and now, 40% of pets sleep in bed with their pet parents! In 2016, pet owners in the U.S. spent over $66 billion on their pets, and increase of over 10% from the year before. There is a critical need, and willingness to pay, for innovative medicines for our pets. We found that there are few companies dedicated to developing such therapies for companion animals, with a market in dire need, which is why we founded KindredBio.
DB: Our mission is to bring our pets the same kinds of innovative, safe, and effective medicines that our human family members enjoy. Our core strategy is to leverage the billions of dollars that have been invested in human drug development by modifying, improving, and repurposing pre-existing drugs and pursuing biological targets that have already proven to be safe and effective in humans.
We have developed a team of veterinarians, scientists, and operational experts who love animals and want to develop therapeutics that have been appropriately studied and, eventually, approved by FDA for use in pets. The passion that we have for pet wellness is infectious throughout the organization.
Q: Why did you decide to headquarter the company in Silicon Valley?
DB: We love the energy that comes with working in an innovative hub of technology like Silicon Valley. The bay area is an epicenter of biotechnology and Richard, who spent years as the head of Clinical Research for biotherapeutics at Genentech, has recruited a world-class team of scientists and protein engineers to develop our cutting-edge biologics for cats, dogs, and horses. Because of the talent in the area, we have put together an incredible team that is innovating in lockstep with human breakthroughs, such as those in immunotherapy. Importantly, the energy of the valley, along with the great weather and access to outdoor pursuits, is what allows us to attract top talent to our organization.
Q: What were some of the challenges you faced as a woman raising money on Wall Street?
DB: I grew up in biotech and pharma in California, and was fortunate that I did not feel limited by a glass ceiling in my career trajectory. It was quite apparent to me as we began our testing-the-waters meetings and eventual IPO roadshow that there were many fewer female decision-makers on Wall Street. There were entire days on the roadshow when I would not see a single woman at the table. I have always felt that, regardless of gender, it is critical to know your business and industry better than anyone else in the room. Because we had a very strong business plan that I knew inside-and-out, I did not feel a need to alter my pitch because of my gender. The fact that my passion for animals and KindredBio shines through my pitch, perhaps more because of my delivery as a woman, is only an asset. I am happy to say that, in the nearly four years as a publicly-held company, I do meet more and more female investors at the table who are decision-makers.
Q: Tell me about the drugs you have in development and how they help animals?
DB: We are currently anticipating FDA approval and launch of Zimeta™ (dipyrone injection), a novel, non-steroidal anti-inflammatory for the control of fever in horses, and Mirataz™ (mirtazapine 2% topical ointment) for the management of weight loss in cats. It says so much about our team that we have two drugs under review by FDA in less than 5 years of founding the company. In addition to those products, we have approximately 20 products in development for a variety of diseases for cats, dogs, and horses. In the future, we will be helping animals with autoimmune diseases, cancer, and metabolic disorders, to name a few.
Q: What is your advice for companies who are fundraising for their businesses?
DB: Tout your brain and your heart. Telling your story and conveying your vision is a huge part of connecting with a potential investor. Your heart will show how much you believe in what you are doing. Investors will see that. It’s equally important to show your knowledge of what you are selling. Investors want to see that you know what you are talking about – from the industry and competitors to the product and the customer. Do your homework. Combine your passion and knowledge with drive and persistence, and you are well on your way.
Q: What is the best advice you've ever received?
DB: While my Mom didn’t verbalize advice as much as she led by example, she taught me to follow my passion, regardless of where society pushed me (or even where she thought I should be heading). She raised two girls as a single parent and worked two jobs at once, as a special education teacher and a waitress. She also got her Master’s degree before I graduated high school and found time to attend the Academy of Dramatic Arts to fulfill her creative needs. As a child of the 70’s and 80’s, she was a phenomenal role model, who taught me that a woman could be anything she set out to be. She supported my every whim as a child, as long as it was something I was passionate about. I saw the way she loved teaching and nurturing children with learning challenges, and the fulfillment she received in return, which showed me how to have a rewarding career, driven by passion.
Q: What hobbies or interests do you enjoy when you aren't working?
DB: My husband, Lon, and I enjoy traveling, music, and theater in our spare time. We have had the opportunity to support theatrical productions on and off Broadway. In additional to adventure travel, we find the dozens of concerts we attend each year to be a great way to be in the moment and relax. We have recently become horse enthusiasts and owners of a grand prix show jumper, Wasco, as well as parent to a border collie, Betty, and two cats, Gladys and Glover. |
function y = in2cm(x)
%IN2CM converts inches to centimeters
%
% Usage: y = in2cm(x)
%
% Input parameters:
% x - input / inches
%
% Output parameters:
% y - output / cm
%
% See also: in2px, cm2px, cm2in, px2in, px2cm
%*****************************************************************************
% The MIT License (MIT) *
% *
% Copyright (c) 2010-2019 SFS Toolbox Developers *
% *
% Permission is hereby granted, free of charge, to any person obtaining a *
% copy of this software and associated documentation files (the "Software"), *
% to deal in the Software without restriction, including without limitation *
% the rights to use, copy, modify, merge, publish, distribute, sublicense, *
% and/or sell copies of the Software, and to permit persons to whom the *
% Software is furnished to do so, subject to the following conditions: *
% *
% The above copyright notice and this permission notice shall be included in *
% all copies or substantial portions of the Software. *
% *
% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *
% THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *
% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *
% DEALINGS IN THE SOFTWARE. *
% *
% The SFS Toolbox allows to simulate and investigate sound field synthesis *
% methods like wave field synthesis or higher order ambisonics. *
% *
% https://sfs.readthedocs.io [email protected] *
%*****************************************************************************
%% ===== Checking of input parameters ====================================
nargmin = 1;
nargmax = 1;
narginchk(nargmin,nargmax);
%% ===== Computation =====================================================
y = x * 2.54;
|
%CREATECLIQUETREE Takes in a list of factors F, Evidence and returns a
%clique tree after calling ComputeInitialPotentials at the end.
%
% P = CREATECLIQUETREE(F, Evidence) Takes a list of factors and creates a clique
% tree. The value of the cliques should be initialized to
% the initial potential.
% It returns a clique tree that has the following fields:
% - .edges: Contains indices of the nodes that have edges between them.
% - .cliqueList: Contains the list of factors used to build the Clique
% tree.
%
% Copyright (C) Daphne Koller, Stanford University, 2012
function P = CreateCliqueTree(F, Evidence)
C.nodes = {};
V = unique([F(:).var]);
% Setting up the cardinality for the variables since we only get a list
% of factors.
C.card = zeros(1, length(V));
for i = 1 : length(V),
for j = 1 : length(F)
if (~isempty(find(F(j).var == i)))
C.card(i) = F(j).card(find(F(j).var == i));
break;
end
end
end
C.factorList = F;
% Setting up the adjacency matrix.
edges = zeros(length(V));
for i = 1:length(F)
for j = 1:length(F(i).var)
for k = 1:length(F(i).var)
edges(F(i).var(j), F(i).var(k)) = 1;
end
end
end
cliquesConsidered = 0;
while cliquesConsidered < length(V)
% Using Min-Neighbors where you prefer to eliminate the variable that has
% the smallest number of edges connected to it.
% Everytime you enter the loop, you look at the state of the graph and
% pick the variable to be eliminated.
bestClique = 0;
bestScore = inf;
for i=1:size(edges,1)
score = sum(edges(i,:));
if score > 0 && score < bestScore
bestScore = score;
bestClique = i;
end
end
cliquesConsidered = cliquesConsidered + 1;
[F, C, edges] = EliminateVar(F, C, edges, bestClique);
end
% Pruning the tree.
C = PruneTree(C);
% We are incorporating the effect of evidence in our factor list.
for j = 1:length(Evidence),
if (Evidence(j) > 0),
C.factorList = ObserveEvidence(C.factorList, [j, Evidence(j)]);
end;
end;
% Assume that C now has correct cardinality, variables, nodes and edges.
% Here we make the function call to assign factors to cliques and compute the
% initial potentials for clusters.
P = ComputeInitialPotentials(C);
return
|
Require Import
FJ.Base
FJ.Syntax
FJ.Semantics.AuxiliarDefinitions
FJ.Typing
FJ.Tactics
FJ.Typechecker.ExpTypechecker.
Definition MethodOkDec : forall (n : nat) CT C (M : Method), {{ M | MethodOk CT C M}}.
refine (fun n CT C M =>
match MapsToDec C CT with
| !! => ??
| [|| CD ||] =>
match ExpHasTypeDec n CT (mkGamma ((mkFormalArg this C) :: (margs M)))
(mbody M) with
| ?? => ??
| Found _ D _ =>
match valid_overrideDec CT n (mname M) (cextends CD)
(mkMethodType (map ftype (margs M)) (mtype M)) with
| No => ??
| Yes =>
match BoundedSubtypeDec CT n D (mtype M) with
| Yes => Found _ M _
| No => ??
end
end
end
end) ; simpl in * ; try map_solver ; eauto.
eapply T_Method ;eauto.
eapply BoundedSubtypeSound ; eauto.
Defined. |
module WhereTheWaterFlows
using StaticArrays, Requires
export waterflows, fill_dem, catchments,
plotarea
const I11 = CartesianIndex(1,1)
const I22 = CartesianIndex(2,2)
#const NaNflow = -99
const NOFLOW = 5 # direction number indicating no flow. Use a constant to better keep track.
"""
Direction numbers. E.g. dirnums[1,1] will return the number
corresponding to the direction top-left.
Note, I use the conversion that the x-axis corresponds to the row of
the matrix and the y-axis the columns. To print them in this "normal"
coordinate system use `showme`
"""
const dirnums = SMatrix{3,3}(reverse([ 7 8 9
4 NOFLOW 6
1 2 3]',
dims=2))
# "As dirnums but with no-flow encoded as NaNflow."
# const dirnums_nan = SMatrix{3,3}(reverse([ 7 8 9
# 4 NaNflow 6
# 1 2 3]',
# dims=2))
"Translation from dirnums to CartesianIndex"
const cartesian = SMatrix{3,3}(reverse(permutedims([CartesianIndex(-1,1) CartesianIndex(0,1) CartesianIndex(1,1)
CartesianIndex(-1,0) CartesianIndex(0,0) CartesianIndex(1,0)
CartesianIndex(-1,-1) CartesianIndex(0,-1) CartesianIndex(1,-1)]),
dims=2))
"""
Show an array with its axes oriented such that they correspond
to x and y direction.
"""
showme(ar) = (display(reverse(ar',dims=1)); println(" "))
"Translate a CartesianIndex, giving the offset, into a direction number"
ind2dir(ind::CartesianIndex) = dirnums[ind + I22]
# ind2dir_nan(ind::CartesianIndex) = dirnums_nan[ind + I22]
"Translate a D8 direction number into a CartesianIndex"
dir2ind(dir) = cartesian[dir]
# "Translate a D8 direction number (with NaNflow at center) into a CartesianIndex"
# dir2ind_nan(dir) = dir==0 ? CartesianIndex(0,0) : dir2ind(dir)
"Translate a D8 direction number into a 2D vector."
dir2vec(dir) = [dir2ind(dir).I...]
"""
Tests whether a cell `J` with flowdir `dirJ` flows into cell `I`.
"""
flowsinto(J::CartesianIndex, dirJ, I::CartesianIndex) = ind2dir(I-J) == dirJ
"Return CartesianIndices corresponding to the 8 neighbors and the point itself"
iterate_D9(I, Iend, I1=I11) = max(I1, I-I1):min(Iend, I+I1)
function iterate_D9(I, ar::AbstractMatrix)
R = CartesianIndices(size(ar))
I1, Iend = first(R), last(R)
return max(I1, I-I1):min(Iend, I+I1)
end
"""
on_outer_boundary(ar, I::CartesianIndex)
Check whether on outer boundary of ar
"""
function on_outer_boundary(ar, I::CartesianIndex)
i,j = I.I
iend, jend = size(ar)
if i==1 || i==iend || j==1 || j==jend
return true
else
return false
end
end
"""
d8dir_feature(dem, bnd_as_pits)
D8 directions of a DEM and drainage features.
Elevations with NaN map to dir==NOFLOW, i.e. just like pits.
However, they are not treated in the pits-filling function `drainpits`.
The argument `bnd_as_pits` determines whether neighboring cells
flow out of the domain or into NaN-cells (`bnd_as_pits==true`)
or not.
Return
- dir - direction, encoded as `dirnums`
- nout - number of outflow cells of a cell (0 or 1)
- nin - number of inflow cells of a cell (0-8)
- pits - location of pits as a `Vector{CartesianIndex{2}}` (sorted)
"""
function d8dir_feature(dem, bnd_as_pits)
# outputs
diro = zeros(Int8, size(dem))
nout = falses(size(dem))
nin = zeros(Int8, size(dem))
pits = CartesianIndex{2}[]
R = CartesianIndices(size(dem))
Iend = last(R)
# get dir for all points
for I in R
# make pits on boundary if bnd_as_pits is set
if bnd_as_pits && on_outer_boundary(dem,I)
# make it a pit
diro[I] = NOFLOW
continue
end
ele = dem[I] # keeps track of lowest elevation of all 9 cells
dir = NOFLOW
if isnan(ele)
# just mark as NOFLOW
else
for J in iterate_D9(I, Iend)
I==J && continue
ele2 = dem[J]
if isnan(ele2)
if bnd_as_pits
# flow into first found NaN-cell
dir = ind2dir(J-I)
break
else
# ignore NaN-Cell
continue
end
elseif ele > ele2
# lower elevation found, adjust dir
ele = ele2
dir = ind2dir(J-I)
end
end
end
diro[I] = dir
end
# flow features
for I in R
for J in iterate_D9(I, Iend)
J==I && continue
nin[I] += flowsinto(J, diro[J], I)
end
if diro[I]==NOFLOW
nout[I] = false
if !isnan(dem[I])
push!(pits, I)
elseif nin[I]>0
# mark NaN points only as pits if something is flowing into them
push!(pits, I)
end
else
nout[I] = true
end
end
return diro, nout, nin, pits
end
"""
waterflows(dem, cellarea=ones(size(dem));
maxitr=1000, calc_streamlength=true, drain_pits=true, bnd_as_pits=false)
Does the water flow routing according the D8 algorithm.
kwargs:
- drain_pits -- whether to route through pits
- maxiter -- maximum iterations of the algorithm
- calc_streamlength -- whether to calculate stream length
- bnd_as_pits -- whether the domain boundary an NaNs should be pits,
i.e. adjacent cells can drain into them,
or whether to ignore them
TODO: if bnd_as_pits rout water along boundary edges first. This would probably
substantially reduce the number of catchments, as currently every boundary point
is a pit and thus a catchment (if bnd_as_pits==true).
Returns
- area -- upslope area
- stream-length -- length of stream to the farthest source
- dir -- flow direction at each location
- nout -- whether the point has outlflow. I.e. nout[I]==0 --> I is a pit
- nin -- number of inflow cells
- pits -- location of pits as Vector{CartesianIndex{2}}
- c -- catchment map
- bnds -- boundaries between catchments. The boundary to the exterior/NaNs is not in here.
"""
function waterflows(dem, cellarea=ones(size(dem));
maxitr=1000, calc_streamlength=true, drain_pits=true, bnd_as_pits=false)
area, slen, dir, nout, nin, pits = _waterflows(d8dir_feature(dem, bnd_as_pits)..., cellarea; maxitr=maxitr, calc_streamlength=calc_streamlength)
c, bnds = catchments(dir, pits, bnd_as_pits)
if drain_pits
dir, nin, nout, pits, c, bnds = drainpits(dem, dir, nin, nout, pits, (c,bnds))
area, slen = _waterflows(dir, nout, nin, pits)
end
#area[isnan.(dem)] .= NaN
return area, slen, dir, nout, nin, pits, c, bnds
end
# this function does the actual routing
function _waterflows(dir, nout, nin, pits, cellarea=ones(size(dir)); maxitr=1000, calc_streamlength=true)
area = copy(cellarea)
tmp = convert(Matrix{Int}, nin)
tmp2 = calc_streamlength ? copy(tmp) : tmp
for counter = 1:maxitr
n = 0
for R in CartesianIndices(size(dir))
# Consider only points with less than `counter` upstream points
if tmp[R]==0
tmp[R] = -counter # done with it
if calc_streamlength
tmp2[R] = -counter # done with it
end
d = dir[R]
if d!=NOFLOW
receiver = R + dir2ind(d)
area[receiver] += area[R]
tmp2[receiver] -= 1
tmp2[receiver] < 0 && error("This should not happen!")
end
n +=1
end
end
if n==0
#@show counter
break
end
if counter==maxitr
error("Maximum number of iterations reached in `_waterflows`: $counter")
end
copyto!(tmp, tmp2)
end
return area, -tmp, dir, nout, nin, pits
end
"""
catchments(dir, pits, bnd_as_pits)
Calculate catchments from
- dir
- pits
Return: catchments Matrix{Int}. Value==0 corresponds to NaNs in the DEM
which are not pits (i.e. where no water flows into).
"""
function catchments(dir, pits, bnd_as_pits)
c = zeros(Int, size(dir))
np = length(pits)
# recursively traverse the drainage tree in up-flow direction,
# starting at all pits
for (n, pit) in enumerate(pits)
_catchments!(n, c, dir, pit)
end
return c, make_boundaries(c, 1:np)
end
function _catchments!(n, c, dir, ij)
c[ij] = n
# proc upstream points
for IJ in iterate_D9(ij, c)
ij==IJ && continue
if flowsinto(IJ, dir[IJ], ij)
_catchments!(n, c, dir, IJ)
end
end
end
"""
make_boundaries(catchments, colors, bnd_as_pits)
Make vectors of boundary points. Note that points along the
edge of the domain as well as points bordering NaN-cells with no
inflow do not count as boundaries.
TODO: this is brute force...
"""
function make_boundaries(catchments, colors)
bnds = [CartesianIndex[] for c in colors]
for R in CartesianIndices(size(catchments))
c = catchments[R]
c==0 && continue # don't find boundaries for c==0 (NaNs with no inflow)
bnd = bnds[c]
for I in iterate_D9(R, catchments)
co = catchments[I]
if co!=c && co!=0
push!(bnd, R)
break
end
end
end#
bnds
end
"""
Checks that all points in `bnds[color]` are indeed on the
boundary; removes them otherwise.
"""
function _prune_boundary!(bnds, catchments::Matrix, color)
del = Int[]
# loop over all boundary cells
for (i,P) in enumerate(bnds[color])
keep = false
# check cells around it
for PP in iterate_D9(P, catchments)
# if one is of different color, keep it.
co = catchments[PP]
if co!=color && co!=0
keep = true
break
end
end
if !keep
push!(del, i)
end
end
deleteat!(bnds[color], del)
return nothing
end
"""
drainpits(dem, dir, nin, nout, pits, (c, bnds)=catchments(dir, pits);
maxitr=100)
Return an update direction field which drains (interior) pits.
This is done by reversing the flow connecting the lowest boundary point
to the pits.
There needs to be a decision on how to treat the outer boundary and boundaries
to NaN-cells. Possibilities:
- do not treat boundary cells as pits but do treat pits at the boundary as terminal.
- treat boundary cells as pits, i.e. flow reaching such a cell will vanish. Again
such cells would be terminal.
What to do if there are no terminal pits in the DEM? Fill to the uppermost pit? Or take
the lowermost as terminal?
Returns new dir, nin, nout, pits (sorted), c.
TODO: this is the performance bottleneck.
"""
function drainpits(dem, dir, nin, nout, pits, (c, bnds)=catchments(dir, pits);
maxitr=100)
dir_ = copy(dir)
dir2 = copy(dir)
nin_ = copy(nin)
nout_ = copy(nout)
pits_ = copy(pits)
c_ = copy(c)
bnds_ = deepcopy(bnds)
Iend = CartesianIndex(size(dem))
pits_to_keep = trues(length(pits_))
no_drainage_across_boundary = false
# iterate until all interior pits are removed
for i=1:maxitr
n_removed = 0
for (color, P) in enumerate(pits_)
copyto!(dir2, dir_) # TODO hack...
# Already removed pit, skip
P==CartesianIndex(-1,-1) && continue
# Don't process pits on the DEM boundary, because there water disappears.
(P.I[1]==1 || P.I[2]==1 || P.I[1]==size(dir,1) || P.I[2]==size(dir,2)) && continue
# Don't process pits which are NaNs, because there water disappears.
# See also bnd_as_pits.
isnan(dem[P]) && continue
# If there are no more boundaries left, stop. This should only occur when
# bnd_as_pits==false and when there are only interior pits.
if all(isempty.(bnds_))
no_drainage_across_boundary = true # set flag to warn later
break
end
## Debug plotting
# cls = [c=>sum(c_.==c) for c=1:6]
# @show color, size(bnds_[color])
# @show cls
# imshow(Array(c_'), origin="lower")
# if color==1
# colorbar()
# end
# readline(stdin)
# if color==6
# imshow(Array(c_'), origin="lower")
# end
# find point on catchment boundary with minimum elevation
Imin = bnds_[color][findmin(getindex.(Ref(dem), bnds_[color]))[2]]
@assert c_[Imin]==c_[P] # Something is amiss if the found minimum is the pit!
# make the outflow and find the next catchment
min_ = Inf
target = Imin
for J in iterate_D9(Imin, dem)
Imin==J && continue # do not look at the point itself
c_[J] == color && continue # J is in Imin's catchment
if dem[J] < min_
min_ = dem[J]
target = J
end
end
if target==Imin
# this means that point is on a NaN boundary -> don't process
continue
end
# do the flow across the boundary
_flow_from_to!(Imin, target, dir_, nin_, nout_)
# reverse directions on path going from Imin to P
if Imin!=P
P1 = Imin
while true
# get downstream point
P2 = dir2ind(dir2[P1]) + P1 # note usage of `dir2`
# The need for dir2 could be removed by first
# traversing the flow path without modifying it.
# And then do the modification in a second traversal.
if P2==P1
# With dir2, this should be fixed now.
println("This should not happen!")
@show color, Imin, P, P1
break
end
# reverse
_flow_from_to!(P2, P1, dir_, nin_, nout_)
P2==P && break # reached the pit
P1 = P2
end
end
# remove from list of pits
pits_to_keep[color] = false
pits_[color] = CartesianIndex(-1,-1)
# update catchments
othercolor = c_[target]
c_[c_.==color] .= othercolor
append!(bnds_[othercolor], bnds_[color])
empty!(bnds_[color])
_prune_boundary!(bnds_, c_, othercolor)
n_removed +=1
end
n_removed==0 && break
if i==maxitr
error("Maximum number of iterations reached in `drainpits`: $i")
end
end
if no_drainage_across_boundary
@warn """Water cannot leave this DEM! Instead it drains into a random interior pit.
Consider setting `bnd_as_pits=true`."""
end
# remove removed pits
pits_ = pits_[pits_.!=Ref(CartesianIndex(-1, -1))]
# redo colors
cols = Dict([c=>i for (i,c) in enumerate(unique(c_))])
for i in eachindex(c_)
c_[i] = cols[c_[i]]
end
return dir_, nin_, nout_, sort(pits_), c_, bnds_
end
"""
Update dir, nin, and nout such that flow at P1 is now
from P1 to P2.
It can potentially modify `dir, nin, nout` at three locations
- P1: dir, nout, nin
- P2: nin
- if allow_reversion==true, then P2's dir, nout can also be modified.
- P3 (previous receiver cell of P1): nin
"""
function _flow_from_to!(P1, P2, dir, nin, nout, allow_reversion=false)
# already right
ind2dir(P2-P1)==dir[P1] && return nothing
# otherwise update
P3 = P1 + dir2ind(dir[P1]) # previous receiver cell
if P3!=P1
nin[P3] -= 1
end
dir[P1] = ind2dir(P2-P1)
nin[P2] += 1
nout[P1] = true # definitely not a pit anymore
# if flow from P2 was into P1, then make P2 a pit
# (otherwise dir will be inconsistent)
if (dir2ind(dir[P2]) + P2 == P1)
if allow_reversion
dir[P2] = ind2dir(CartesianIndex(0,0))
nout[P2] = false
nin[P1] -= 1
else
error("Flow direction in P2 would need to be reversed but not allowed (set option `allow_reversion`).")
end
end
return nothing
end
"""
fill_dem(dem, pits, dir)
Fill the pits (aka sinks) of a DEM (apply this after applying "drainpits",
which is the default). Returns the filled DEM.
Note, this is not needed as pre-processing step to use `upstream` area.
This uses a tree traversal to fill the DEM. It does it depth-first (as it
is easier) which may lead to a stack overflow on a large DEM.
"""
function fill_dem(dem, pits, dir; small=0.0)
dem = copy(dem)
for pit in pits
npts = _fill_ij!(0.0, dem, pit, dir, small)
end
return dem
end
function _fill_ij!(ele, dem, ij, dir, small)
if ele > dem[ij]
dem[ij] = ele
ele += small
else
ele = dem[ij]
end
# proc upstream points
for IJ in iterate_D9(ij, dem)
ij==IJ && continue
if flowsinto(IJ, dir[IJ], ij)
_fill_ij!(ele, dem, IJ, dir, small)
end
end
end
## Plotting
function __init__()
@require PyPlot="d330b81b-6aea-500a-939a-2ce795aea3ee" include("plotting.jl")
end
end # module
|
You should not receive this vaccine if you have ever had a life-threatening allergic reaction to any vaccine containing Japanese encephalitis virus.
What is Japanese encephalitis virus vaccine (SA14-14-2)?
Japanese encephalitis is a serious disease caused by a virus. It is the leading cause of viral encephalitis (inflammation of the brain) in Asia. Encephalitis is an infection of the membrane around the brain and spinal cord. This infection often causes only mild symptoms, but prolonged swelling of the brain can cause permanent brain damage or death.
Japanese encephalitis virus is carried and spread by mosquitoes.
The Japanese encephalitis SA14-14-2 vaccine is used to help prevent this disease in adults and adolescents who are at least 17 years old.
This vaccine is recommended for people who live in or travel to areas where Japanese encephalitis is known to exist, or where an epidemic has recently occurred.
You should receive the vaccine and booster dose at least 1 week prior to your arrival in an area where you may be exposed to the virus.
Not everyone who travels to Asia needs to receive a Japanese encephalitis vaccine. Follow your doctor instructions or the recommendations of the Centers for Disease Control and Prevention (CDC).
This vaccine is also recommended for people who work in a research laboratory and may be exposed to Japanese encephalitis virus through needle-stick accidents or inhalation of viral droplets in the air.
Like any vaccine, the Japanese encephalitis SA14-14-2 vaccine may not provide protection from disease in every person.
a weak immune system caused by disease or by taking certain medicines or receiving cancer treatments.
Vaccines may be harmful to an unborn baby and generally should not be given to a pregnant woman. However, not vaccinating the mother could be more harmful to the baby if the mother becomes infected with a disease that this vaccine could prevent. Your doctor will decide whether you should receive this vaccine, especially if you have a high risk of infection with Japanese encephalitis virus.
Do not receive this vaccine without telling your doctor if you are breast-feeding a baby.
The Japanese encephalitis SA14-14-2 vaccine is given in a series of 2 shots. The shots are usually 28 days apart. Your individual booster schedule may be different from these guidelines. Follow your doctor's instructions or the schedule recommended by the health department of the state you live in.
In addition to receiving the Japanese encephalitis vaccine, use protective clothing, insect repellents, and mosquito netting around your bed to further prevent mosquito bites that could infect you with the Japanese encephalitis virus.
Becoming infected with Japanese encephalitis is much more dangerous to your health than receiving this vaccine. However, like any medicine, this vaccine can cause side effects but the risk of serious side effects is extremely low.
Get emergency medical help if you have any of these signs of an allergic reaction: hives; difficulty breathing; dizziness, weakness, fast heart rate; swelling of your face, lips, tongue, or throat.
pain, redness, tenderness, or a hard lump where the shot was given.
What other drugs will affect Japanese encephalitis virus vaccine?
This list is not complete. Other drugs may interact with Japanese encephalitis virus vaccine, including prescription and over-the-counter medicines, vitamins, and herbal products. Not all possible interactions are listed in this medication guide.
Copyright 1996-2018 Cerner Multum, Inc. Version: 2.03. Revision date: 1/23/2014. |
import basic_definitions.sub_module
import Tools.tools
import linear_algebra.matrix
import group_theory.group_action
import init.algebra.functions
--set_option trace.simplify.rewrite true --- a reprednre peut etre un peu !
open_locale big_operators
namespace general
universes u v w w'
variables (G : Type u) (R : Type v) (X : Type w) [group G] [ring R] [mul_action G X]
/--
Permutation representation : let `•` an action of `G` on `X`. Let `R` be a ring,
let `M = R → X` be the free-module over `M` of base `X`. For `v : R → X` and `g ∈ G` the formula
`ρ g v := g⁻¹ • v` define a permutation de representation.
-/
def rho (g : G) : (X → R) → (X → R) := λ v x, v (g⁻¹ • x)
def rho_linear (g : G) : (X → R) →ₗ[R] (X → R) := {
to_fun := rho G R X g,
add := by { intros, exact rfl},
smul := by {intros, exact rfl}
}
@[simp]lemma rho_apply (g : G)(v : X → R)(x : X) : rho G R X g v x = v (g⁻¹ • x) := rfl
@[simp]lemma rho_mul (σ τ : G) : rho G R X (σ * τ) = rho G R X σ ∘ rho G R X τ := begin
ext v x, rw rho_apply, rw mul_inv_rev, rw mul_smul, exact rfl,
end
@[simp]lemma rho_one : rho G R X (1 : G) = id := begin
ext x v, rw rho_apply, rw one_inv, rw one_smul, exact rfl,
end
@[simp]lemma rho_right_inv (g : G) : (rho G R X g : (X → R) → (X → R)) ∘ (rho G R X g⁻¹) = id := begin
rw ← rho_mul, rw mul_inv_self, rw rho_one,
end
@[simp]lemma rho_left_inv (g : G) : ( rho G R X (g⁻¹ ) :
(X → R) → (X → R) ) ∘ (rho G R X g : (X → R) → (X → R)) = id :=
begin
rw ← rho_mul, rw inv_mul_self, rw rho_one,
end
def Perm : group_representation G R (X → R) := {
to_fun := rho_linear G R X,
map_one' :=
begin
unfold rho_linear,congr,
exact rho_one G R X,
end,
map_mul' :=
begin
unfold rho_linear, intros, congr,
exact rho_mul G R X _ _,
end
}
variables (g : G) (x y : X → R)
example (g : G) (x y : X → R) (r : R) : true := begin
let ρ := @Perm G R X,
have f : ρ 1 = 1,
rw ρ.map_one,
have : ρ g (x+y) = ρ g x +ρ g y,
rw (ρ g).map_add,
trivial,
end
end general
namespace finite_action
open classical_basis general
universes u v w w'
variables {G : Type u} (R : Type v) (X : Type w) [fintype X] [decidable_eq X][group G] [comm_ring R] [mul_action G X]
/-!
Goal : study more the representation
-/
@[simp]theorem action_on_basis (g : G)(x : X) : rho G R X g (ε x) = ε (g • x) := begin
funext y, simp,
unfold ε,
split_ifs,
{exact rfl},
{rw [h, ← mul_smul,mul_inv_self,one_smul] at h_1, trivial},
{rw [← h_1, ← mul_smul,inv_mul_self,one_smul] at h, trivial},
{exact rfl},
end
@[simp]theorem action_on_basis_apply (g : G) (x y : X) : rho G R X g (ε x) y = if g • x = y then 1 else 0 :=
begin simp, exact rfl,
-- rw action_on_basis, exact rfl,
end
@[simp]theorem trace (g : G) : ∑ (x : X), rho G R X g (ε x) x = fintype.card {x : X | g • x = x } :=
begin simp, exact rfl,
--have r : (λ (x : X), rho G R X g (ε x) x) = λ x,if g • x = x then 1 else 0,
-- funext, rw action_on_basis_apply,
--rw r,
--rw finset.sum_boole,simp, exact rfl, --- filter ?
end
variables (g : G)
@[simp]lemma Perm_ext (g : G) : rho G R X g = (Perm G R X) g := rfl
end finite_action |
[STATEMENT]
lemma in_set_spmf: "x \<in> set_spmf p \<longleftrightarrow> Some x \<in> set_pmf p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x \<in> set_spmf p) = (Some x \<in> set_pmf p)
[PROOF STEP]
by(simp add: set_spmf_def) |
c is A i is 65
i is 1601 c is A
c is A s is 65
s is 1601 c is A
i is 1601 s is 1601
s is 1601 i is 1601
i is 4294967295 ll is ffffffff
ll is 8589934591 i is ffffffff
c is A ll is 41
ll is 1ffffff41 c is A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.