text
stringlengths 0
3.34M
|
---|
section \<open>Multitape Turing Machines\<close>
theory Multitape_TM
imports
TM_Common
begin
text \<open>Turing machines can be either defined via a datatype or via a locale.
We use TMs with left endmarker and dedicated accepting and rejecting state from
which no further transitions are allowed. Deterministic TMs can be partial.
Having multiple tapes, tape positions, directions, etc. is modelled via functions
of type @{typ "'k \<Rightarrow> 'whatever"} for some finite index type @{typ "'k :: finite"}.
The input will always be provided on the first tape, indexed by @{term "0 :: 'k :: zero"}.\<close>
datatype ('q,'a,'k)mttm = MTTM
(Q_tm: "'q set") (* Q - states *)
"'a set" (* Sigma - input alphabet *)
(\<Gamma>_tm: "'a set") (* Gamma - tape alphabet *)
'a (* blank *)
'a (* left endmarker *)
"('q \<times> ('k \<Rightarrow> 'a) \<times> 'q \<times> ('k \<Rightarrow> 'a) \<times> ('k \<Rightarrow> dir)) set" (* transitions \<delta> *)
'q (* start state *)
'q (* accept state *)
'q (* reject state *)
datatype ('a,'q,'k) mt_config = Config\<^sub>M
(mt_state: 'q) (* state *)
"'k \<Rightarrow> nat \<Rightarrow> 'a" (* k tape contents *)
(mt_pos: "'k \<Rightarrow> nat") (* k tape positions *)
locale multitape_tm =
fixes
Q :: "'q set" and
\<Sigma> :: "'a set" and
\<Gamma> :: "'a set" and
blank :: 'a and
LE :: 'a and
\<delta> :: "('q \<times> ('k \<Rightarrow> 'a) \<times> 'q \<times> ('k \<Rightarrow> 'a) \<times> ('k :: {finite,zero} \<Rightarrow> dir)) set" and
s :: 'q and
t :: 'q and
r :: 'q
assumes
fin_Q: "finite Q" and
fin_\<Gamma>: "finite \<Gamma>" and
\<Sigma>_sub_\<Gamma>: "\<Sigma> \<subseteq> \<Gamma>" and
sQ: "s \<in> Q" and
tQ: "t \<in> Q" and
rQ: "r \<in> Q" and
blank: "blank \<in> \<Gamma>" "blank \<notin> \<Sigma>" and
LE: "LE \<in> \<Gamma>" "LE \<notin> \<Sigma>" and
tr: "t \<noteq> r" and
\<delta>_set: "\<delta> \<subseteq> (Q - {t,r}) \<times> (UNIV \<rightarrow> \<Gamma>) \<times> Q \<times> (UNIV \<rightarrow> \<Gamma>) \<times> (UNIV \<rightarrow> UNIV)" and
\<delta>LE: "(q, a, q', a', d) \<in> \<delta> \<Longrightarrow> a k = LE \<Longrightarrow> a' k = LE \<and> d k \<in> {dir.N,dir.R}"
begin
lemma \<delta>: assumes "(q,a,q',b,d) \<in> \<delta>"
shows "q \<in> Q" "a k \<in> \<Gamma>" "q' \<in> Q" "b k \<in> \<Gamma>"
using assms \<delta>_set by auto
lemma fin_\<Sigma>: "finite \<Sigma>"
using fin_\<Gamma> \<Sigma>_sub_\<Gamma> by (metis finite_subset)
lemma fin_\<delta>: "finite \<delta>"
by (intro finite_subset[OF \<delta>_set] finite_cartesian_product fin_funcsetI, insert fin_Q fin_\<Gamma>, auto)
lemmas tm = sQ \<Sigma>_sub_\<Gamma> blank(1) LE(1)
fun valid_config :: "('a, 'q, 'k) mt_config \<Rightarrow> bool" where
"valid_config (Config\<^sub>M q w n) = (q \<in> Q \<and> (\<forall> k. range (w k) \<subseteq> \<Gamma>) \<and> (\<forall> k. w k 0 = LE))"
definition init_config :: "'a list \<Rightarrow> ('a,'q,'k)mt_config" where
"init_config w = (Config\<^sub>M s (\<lambda> k n. if n = 0 then LE else if k = 0 \<and> n \<le> length w then w ! (n-1) else blank) (\<lambda> _. 0))"
lemma valid_init_config: "set w \<subseteq> \<Sigma> \<Longrightarrow> valid_config (init_config w)"
unfolding init_config_def valid_config.simps using tm by (force simp: set_conv_nth)
inductive_set step :: "('a, 'q, 'k) mt_config rel" where
step: "(q, (\<lambda> k. ts k (n k)), q', a, dir) \<in> \<delta> \<Longrightarrow>
(Config\<^sub>M q ts n, Config\<^sub>M q' (\<lambda> k. (ts k)(n k := a k)) (\<lambda> k. go_dir (dir k) (n k))) \<in> step"
lemma valid_step: assumes step: "(\<alpha>,\<beta>) \<in> step"
and val: "valid_config \<alpha>"
shows "valid_config \<beta>"
using step
proof (cases rule: step.cases)
case (step q ts n q' a dir)
from \<delta>[OF step(3)] val \<delta>LE step(3)
show ?thesis unfolding step(1-2) by fastforce
qed
definition Lang :: "'a list set" where
"Lang = {w . set w \<subseteq> \<Sigma> \<and> (\<exists> w' n. (init_config w, Config\<^sub>M t w' n) \<in> step^*)}"
definition deterministic where
"deterministic = (\<forall> q a p1 b1 d1 p2 b2 d2. (q,a,p1,b1,d1) \<in> \<delta> \<longrightarrow> (q,a,p2,b2,d2) \<in> \<delta> \<longrightarrow> (p1,b1,d1) = (p2,b2,d2))"
definition upper_time_bound :: "(nat \<Rightarrow> nat) \<Rightarrow> bool" where
"upper_time_bound f = (\<forall> w c n. set w \<subseteq> \<Sigma> \<longrightarrow> (init_config w, c) \<in> step^^n \<longrightarrow> n \<le> f (length w))"
end
fun valid_mttm :: "('q,'a,'k :: {finite,zero})mttm \<Rightarrow> bool" where
"valid_mttm (MTTM Q \<Sigma> \<Gamma> bl le \<delta> s t r) = multitape_tm Q \<Sigma> \<Gamma> bl le \<delta> s t r"
fun Lang_mttm :: "('q,'a,'k :: {finite,zero})mttm \<Rightarrow> 'a list set" where
"Lang_mttm (MTTM Q \<Sigma> \<Gamma> bl le \<delta> s t r) = multitape_tm.Lang \<Sigma> bl le \<delta> s t"
fun det_mttm :: "('q,'a,'k :: {finite,zero})mttm \<Rightarrow> bool" where
"det_mttm (MTTM Q \<Sigma> \<Gamma> bl le \<delta> s t r) = multitape_tm.deterministic \<delta>"
fun upperb_time_mttm :: "('q,'a,'k :: {finite, zero})mttm \<Rightarrow> (nat \<Rightarrow> nat) \<Rightarrow> bool" where
"upperb_time_mttm (MTTM Q \<Sigma> \<Gamma> bl le \<delta> s t r) f = multitape_tm.upper_time_bound \<Sigma> bl le \<delta> s f"
end
|
If $r \neq 0$, then $y = x/r$ if and only if $r \cdot y = x$.
|
module Compiler.Erlang.ModuleOpts
import Data.List
import Data.Strings
import Core.Name
import Compiler.Erlang.Name
%default total
public export
record ModuleOpts where
constructor MkModuleOpts
ns : Namespace
exportFunName : Maybe Name
inlineSize : Maybe Nat
defaultModuleOpts : Namespace -> ModuleOpts
defaultModuleOpts ns = MkModuleOpts ns Nothing Nothing
data Flag
= SetExportFunName String
| SetInlineSize Nat
flagToOpts : Flag -> ModuleOpts -> ModuleOpts
flagToOpts (SetExportFunName exportFunName) opts = record { exportFunName = Just (NS (ns opts) (UN exportFunName)) } opts
flagToOpts (SetInlineSize inlineSize) opts = record { inlineSize = Just inlineSize } opts
flagsToOpts : Namespace -> List Flag -> ModuleOpts
flagsToOpts ns flags = flagsToOpts' flags (defaultModuleOpts ns)
where
flagsToOpts' : List Flag -> ModuleOpts -> ModuleOpts
flagsToOpts' [] opts = opts
flagsToOpts' (flag :: flags) opts = flagsToOpts' flags (flagToOpts flag opts)
stringToFlags : List String -> List Flag
stringToFlags ds = mapMaybe parseFlag (map (\d => assert_total (words d)) ds) -- TODO: Remove `assert_total` when `words` is total
where
parseFlag : List String -> Maybe Flag
parseFlag ["export", exportFunName] = Just $ SetExportFunName exportFunName
parseFlag ["inline", inlineSize] = Just $ SetInlineSize (integerToNat (cast inlineSize))
parseFlag _ = Nothing
export
parseModuleOpts : Namespace -> List String -> ModuleOpts
parseModuleOpts ns str = flagsToOpts ns (stringToFlags str)
|
[STATEMENT]
lemma meet_glb: "z \<le> x \<and> z \<le> y \<Longrightarrow> z \<le> x \<sqinter> y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. z \<le> x \<and> z \<le> y \<Longrightarrow> z \<le> x \<sqinter> y
[PROOF STEP]
by simp
|
# MS-E2121 Exercise session 9
### Problem 9.1: Uncapacitated Facility Location (UFL)
**a)**
Let $N = \{1,\dots,n\}$ be a set of potential facilities and $M = \{1,\dots,m\}$ a set of clients. Let $y_j = 1$ if facility $j$ is opened, and $y_j = 0$ otherwise. Moreover, let $x_{ij}$ be the fraction of client $i$'s demand satisfied from facility $j$. The UFL can be formulated as the mixed-integer problem (MIP):
$$\begin{align}
\text{(UFL-W)} : \quad &\min_{x,y} \sum_{j\in N} f_jy_j + \sum_{i\in M}\sum_{j\in N} c_{ij}x_{ij} \\
&\text{s.t.} \\
&\quad \sum_{j\in N}x_{ij} = 1, &\forall i \in M,\\
&\quad \sum_{i\in M}x_{ij} \leq my_j, &\forall j \in N,\\
&\quad x_{ij} \geq 0, &\forall i \in M, \forall j \in N,\\
&\quad y_j \in \{0,1\}, &\forall j\in N,
\end{align}$$
where $f_j$ is the cost of opening facility $j$, and $c_{ij}$ is the cost of satisfying client $i$'s demand from facility $j$. Consider an instance of the UFL with opening costs $f=(4,3,4,4,7)$ and client costs
$$\begin{align*}
(c_{ij}) = \left(
\begin{array}{ccccc}
12 & 13 & 6 & 0 & 1 \\
8 & 4 & 9 & 1 & 2 \\
2 & 6 & 6 & 0 & 1 \\
3 & 5 & 2 & 1 & 8 \\
8 & 0 & 5 & 10 & 8 \\
2 & 0 & 3 & 4 & 1
\end{array}
\right)
\end{align*}$$
Implement (the model) and solve the problem with Julia using JuMP.
**b)**
An alternative formulation of the UFL is of the form
$$\begin{align}
\text{(UFL-S)} : \quad &\min_{x,y} \sum_{j\in N}f_jy_j + \sum_{i\in M}\sum_{j\in N}c_{ij}x_{ij}\\
&\text{s.t.} \\
&\quad \sum_{j\in N}x_{ij} = 1, &\forall i \in M,\\
&\quad x_{ij} \leq y_j, &\forall i\in M, \forall j \in N,\\
&\quad x_{ij} \geq 0, &\forall i \in M, \forall j \in N,\\
&\quad y_j \in \{0,1\}, &\forall j\in N.
\end{align}$$
Linear programming (LP) relaxations of these problems can be obtained by relaxing the binary constraints $y_j\in \{0,1\}$ to $0 \leq y_j \leq 1$ for all $j \in N$. For the same instance as in part (a), solve the LP relaxations of UFL-W and UFL-S with Julia using JuMP, and compare the optimal costs of the LP relaxations against the optimal integer cost obtained in part (a).
```julia
using JuMP, Cbc
```
Write down the problem data
```julia
f = [4 3 4 4 7] # Facility opening costs
c = [12 13 6 0 1; 8 4 9 1 2; 2 6 6 0 1; 3 5 2 1 8; 8 0 5 10 8; 2 0 3 4 1] # Cost of satisfying demand
(m, n) = size(c)
M = 1:m # Set of facilities
N = 1:n;# Set of clients
```
Implement the problem in JuMP
```julia
ufl_w = Model(Cbc.Optimizer)
@variable(ufl_w, x[M,N] >= 0) # Fraction of demand (client i) satisfied by facility j
@variable(ufl_w, y[N], Bin) # Facility location
# Minimize total cost
@objective(ufl_w, Min, sum(f[j]*y[j] for j in N) + sum(c[i,j]*x[i,j] for i in M, j in N))
# For each client, the demand must be fulfilled
@constraint(ufl_w, demand[i in M], sum(x[i,j] for j in N) == 1)
# A big-M style constraint stating that facility j can't send out anything if y[j]==0
@constraint(ufl_w, supply[j in N], sum(x[i,j] for i in M) <= m*y[j])
optimize!(ufl_w)
```
```julia
println("UFL-W MILP:")
println("Optimal value $(objective_value(ufl_w))")
println("with y = $(value.(y).data)")
```
```julia
ufl_w_rel = Model(Cbc.Optimizer)
@variable(ufl_w_rel, x[M,N] >= 0) # Fraction of demand (client i) satisfied by facility j
@variable(ufl_w_rel, 0<=y[N]<=1) # Facility location
# Minimize total cost
@objective(ufl_w_rel, Min, sum(f[j]*y[j] for j in N) + sum(c[i,j]*x[i,j] for i in M, j in N))
# For each client, the demand must be fulfilled
@constraint(ufl_w_rel, demand[i in M], sum(x[i,j] for j in N) == 1)
# A big-M style constraint stating that facility j can't send out anything if y[j]==0
@constraint(ufl_w_rel, supply[j in N], sum(x[i,j] for i in M) <= m*y[j])
optimize!(ufl_w_rel)
```
```julia
println("UFL-W LP:")
println("Optimal value $(objective_value(ufl_w_rel))")
println("with y = $(value.(y).data)")
```
```julia
ufl_s_rel = Model(Cbc.Optimizer)
@variable(ufl_s_rel, x[M,N] >= 0)
@variable(ufl_s_rel, 0<=y[N]<=1)
@objective(ufl_s_rel, Min, sum(f[j]*y[j] for j in N) + sum(c[i,j]*x[i,j] for i in M, j in N))
@constraint(ufl_s_rel, demand[i in M], sum(x[i,j] for j in N) == 1)
# The difference between the models is that UFL-S has m constraints telling that nothing can be sent to client i from facility j if y[j]==0
# In UFL-W, there is a single constraint telling that nothing can be sent from facility j if y[j]==0
@constraint(ufl_s_rel, supply[i in M, j in N], x[i,j] <= y[j])
optimize!(ufl_s_rel)
```
```julia
println("UFL-S LP:")
println("Optimal value $(objective_value(ufl_s_rel))")
println("with y = $(value.(y).data)")
```
#### Branching
We see that the UFL-S relaxation produces an integer solution, meaning that we have an integer optimal solution and no branching needs to be done. However, if we used UFL-W instead, we would need to do B&B or something else to obtain the integer optimum. In the UFL-W LP relaxation solution (0, 1/3, 0, 2/3, 0), we have two fractional variables $y_2$ and $y_4$, and we can branch on one of them. Let's choose $y_2$ and see what happens if we set it to 0 or 1.
```julia
ufl_w_rel_y2_0 = Model(Cbc.Optimizer)
@variable(ufl_w_rel_y2_0, x[M,N] >= 0)
@variable(ufl_w_rel_y2_0, 0<=y[N]<=1)
@objective(ufl_w_rel_y2_0, Min, sum(f[j]*y[j] for j in N) + sum(c[i,j]*x[i,j] for i in M, j in N))
@constraint(ufl_w_rel_y2_0, demand[i in M], sum(x[i,j] for j in N) == 1)
@constraint(ufl_w_rel_y2_0, supply[j in N], sum(x[i,j] for i in M) <= m*y[j])
@constraint(ufl_w_rel_y2_0, y[2] == 0)
optimize!(ufl_w_rel_y2_0)
```
```julia
ufl_w_rel_y2_1 = Model(Cbc.Optimizer)
@variable(ufl_w_rel_y2_1, x[M,N] >= 0)
@variable(ufl_w_rel_y2_1, 0<=y[N]<=1)
@objective(ufl_w_rel_y2_1, Min, sum(f[j]*y[j] for j in N) + sum(c[i,j]*x[i,j] for i in M, j in N))
@constraint(ufl_w_rel_y2_1, demand[i in M], sum(x[i,j] for j in N) == 1)
@constraint(ufl_w_rel_y2_1, supply[j in N], sum(x[i,j] for i in M) <= m*y[j])
@constraint(ufl_w_rel_y2_1, y[2] == 1)
optimize!(ufl_w_rel_y2_1)
```
```julia
println("UFL-W LP with y2=0:")
println("Optimal value $(objective_value(ufl_w_rel_y2_0))")
println("with y = $(value.(ufl_w_rel_y2_0[:y]).data)")
println()
println("UFL-W LP with y2=1:")
println("Optimal value $(objective_value(ufl_w_rel_y2_1))")
println("with y = $(value.(ufl_w_rel_y2_1[:y]).data)")
```
Both branches have fractional solutions, and more branching is thus needed. You can practice that in the next exercise.
#'
### Problem 9.3: Solving Branch & Bound (B&B) graphically
*You can do this with pen and paper if you want to do it graphically, or solve the problems using JuMP instead if you don't feel like drawing.*
Consider the following integer programming problem $IP$:
$$\begin{matrix}
\text{max} &x_{1} &+&2x_{2} & \\
\text{s.t.}&-3x_{1} &+&4x_{2} &\le 4 \\
&3x_{1} &+&2x_{2} &\le 11 \\
&2x_{1} &-&x_{2} &\le 5 \\
&x_{1}, &x_{2} & \text{integer} &\\
\end{matrix}$$
Plot (or draw) the feasible region of the linear programming (LP) relaxation of the problem $IP$, then solve the problems using the figure. Recall that the LP relaxation of $IP$ is obtained by replacing the integrality constraints $x_1,x_2\in \mathbb{Z}_+$ by linear nonnegativity $x_1,x_2\geq 0$ and including (possible) upper bounds corresponding to the upper bounds of the integer variables ($x_1,x_2\leq 1$ for binary variables).
(a) What is the optimal cost $z_{LP}$ of the LP relaxation of the problem $IP$? What is the optimal cost $z$ of the problem $IP$?
(b) Draw the border of the convex hull of the feasible solutions of the problem $IP$. Recall that the convex hull represents the *ideal* formulation for the problem $IP$.
(c) Solve the problem $IP$ by LP-relaxation based Branch \& Bound (B\&B). You can solve the LP relaxations at each node of the B\&B tree graphically. Start the B\&B procedure without any primal bound.
Check your solutions using JuMP. Make sure to point out the optimal solutions in the figure, as well as giving their numerical values.
```julia
# TODO: add your code here
```
```julia
# TODO: add your code here
```
```julia
# TODO: add your code here
```
```julia
# TODO: add your code here
```
```julia
# TODO: add your code here
```
```julia
# TODO: add your code here
```
```julia
# TODO: add your code here
```
```julia
# TODO: add your code here
```
```julia
# TODO: add your code here
```
```julia
```
|
# Using analagous interface to qutips mesolve
```python
import qutip as q
from spylind import spylind as spl
import sympy as sm
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
```
No pretty printing stuff (probably because no Pandas)
# Qubit dynamics
## Single uniformly driven qubit
### Qutip version
```python
#def qubit_integrate(epsilon, delta, g1, g2, solver):
epsilon = 0.0 * 2 * np.pi # cavity frequency
delta = 1.0 * 2 * np.pi # atom frequency
g2 = 0.15
g1 = 0.0
# intial state
psi0 = q.basis(2,0)
H = epsilon / 2.0 * q.sigmaz() + delta / 2.0 * q.sigmax()
# collapse operators
c_ops = []
if g1 > 0.0:
c_ops.append(np.sqrt(g1) * q.sigmam())
if g2 > 0.0:
c_ops.append(np.sqrt(g2) * q.sigmaz())
e_ops = [q.sigmax(), q.sigmay(), q.sigmaz()]
tlist = np.linspace(0,2,100)
output = q.mesolve(H, psi0, tlist, c_ops, e_ops)
```
```python
plt.plot(np.array(output.expect).T )
```
```python
#output = spl.mesolve([(sm.symbols('H_0'),H) ], psi0, tlist, c_ops, e_ops)
output = spl.mesolve([H] , psi0, tlist=tlist, c_ops = c_ops, e_ops=e_ops )
```
makeMESymb enter
Munch({'tSym': t, 'dimSyms': [], 'prop_state_syms': [\rho_{0|0}, \rho_{1|1}, \rho_{0|1}], 'stationary_state_syms': [], 'driving_syms': [], 'state_dep_syms': []})
state dependent functions should have signature [t, \rho_{0|0}, \rho_{1|1}, \rho_{0|1}]
Not integrating first step (it's just the initial state)
```python
plt.plot(tlist, output)
plt.legend()
```
### Inhomogeneously broadened ensemble of qubits
```python
output = spl.mesolve([H] , psi0, tlist=tlist, c_ops = c_ops, e_ops=e_ops )
```
False
```python
psi0.data.todense()
```
matrix([[1.+0.j],
[0.+0.j]])
## Time dependent driving of qubit
```python
#def qubit_integrate(epsilon, delta, g1, g2, solver):
epsilon = 0.0 * 2 * np.pi # cavity frequency
g2 = 0.15
g1 = 0.0
# intial state
psi0 = q.basis(2,0)
H0 = [epsilon / 2.0 * q.sigmaz(), lambda t,args: 1 ]
H1 = [q.sigmax(), lambda t, args: 5*np.sqrt(t) ]
# collapse operators
c_ops = []
if g1 > 0.0:
c_ops.append(np.sqrt(g1) * q.sigmam())
if g2 > 0.0:
c_ops.append(np.sqrt(g2) * q.sigmaz())
e_ops = [q.sigmax(), q.sigmay(), q.sigmaz()]
tlist = np.linspace(0,4,200)
output = q.mesolve([H0,H1], psi0, tlist, c_ops, e_ops)
```
```python
plt.plot(np.array(output.expect).T)
```
```python
Esym = sm.symbols('E', real=True)
H0 = epsilon / 2.0 * q.sigmaz()
H1 = [Esym, q.sigmax()]
output = spl.mesolve([H0,H1], psi0, tlist, c_ops, e_ops, t_dep_fL = {Esym: lambda t: 5*np.sqrt(t)}, max_step_size=0.01)
```
No pretty printing stuff (probably because no Pandas)
Munch({'tSym': t, 'dimSyms': [], 'prop_state_syms': [\rho_{0|0}, \rho_{1|1}, \rho_{0|1}], 'stationary_state_syms': [], 'driving_syms': [E], 'state_dep_syms': []})
state dependent functions should have signature [t, \rho_{0|0}, \rho_{1|1}, \rho_{0|1}, E]
Not integrating first step (it's just the initial state)
```python
plt.plot(tlist, output)
plt.legend()
```
```python
```
|
% Initialization code for running Garmin serial
% input model for xPC
Ts=1/75; % sample time in seconds
|
open import Coinduction using ( β )
open import Data.Product using ( _,_ )
open import Relation.Binary using ( Poset )
open import Relation.Binary.PropositionalEquality using ( _β‘_ ; refl ; sym ; trans ; cong ; substβ ) renaming ( setoid to β‘-setoid )
open import System.IO.Transducers using ( _β_ ; inp ; out ; done ; β¦_β§ ; _β_ ; _β²_ ; _β«_ )
open import System.IO.Transducers.Session using ( Session )
open import System.IO.Transducers.Trace using ( Trace ; [] ; _β·_ ; _β_ )
open import System.IO.Transducers.Properties.Category using ( β«-semantics )
import Relation.Binary.PartialOrderReasoning
module System.IO.Transducers.Properties.TwoCategory where
-- The category is poset-enriched, with order inherited from prefix order on traces.
-- Reflexivity
β-refl : β {S} (as : Trace S) β (as β as)
β-refl [] = []
β-refl (a β· as) = (a β· β-refl as)
β‘-impl-β : β {S} {as bs : Trace S} β (as β‘ bs) β (as β bs)
β‘-impl-β refl = β-refl _
β²-refl : β {S T} (f : Trace S β Trace T) β (f β² f)
β²-refl f as = β-refl (f as)
β-impl-β² : β {S T} {f g : Trace S β Trace T} β (f β g) β (f β² g)
β-impl-β² fβg as = β‘-impl-β (fβg as)
-- Transitivity
β-trans : β {S} {as bs cs : Trace S} β (as β bs) β (bs β cs) β (as β cs)
β-trans [] bs = []
β-trans (a β· as) (.a β· bs) = (a β· β-trans as bs)
β²-trans : β {S T} {f g h : Trace S β Trace T} β (f β² g) β (g β² h) β (f β² h)
β²-trans fβ²g gβ²h as = β-trans (fβ²g as) (gβ²h as)
-- Antisymmetry
β-antisym : β {S} {as bs : Trace S} β (as β bs) β (bs β as) β (as β‘ bs)
β-antisym [] [] = refl
β-antisym (a β· as) (.a β· bs) = cong (_β·_ a) (β-antisym as bs)
β²-antisym : β {S T} {f g : Trace S β Trace T} β (f β² g) β (g β² f) β (f β g)
β²-antisym fβ²g gβ²f as = β-antisym (fβ²g as) (gβ²f as)
-- β and β² form posets
β-poset : Session β Poset _ _ _
β-poset S = record
{ Carrier = Trace S
; _β_ = _β‘_
; _β€_ = _β_
; isPartialOrder = record
{ antisym = β-antisym
; isPreorder = record
{ reflexive = β‘-impl-β
; trans = β-trans
; βΌ-resp-β = ((Ξ» bsβ‘cs β substβ _β_ refl bsβ‘cs) , (Ξ» asβ‘bs β substβ _β_ asβ‘bs refl))
; isEquivalence = Relation.Binary.Setoid.isEquivalence (β‘-setoid (Trace S))
}
}
}
β²-poset : Session β Session β Poset _ _ _
β²-poset S T = record
{ Carrier = (Trace S β Trace T)
; _β_ = _β_
; _β€_ = _β²_
; isPartialOrder = record
{ antisym = β²-antisym
; isPreorder = record
{ reflexive = β-impl-β²
; trans = β²-trans
; βΌ-resp-β = (Ξ» PβQ Pβ²R as β substβ _β_ refl (PβQ as) (Pβ²R as)) , Ξ» QβR Qβ²P as β substβ _β_ (QβR as) refl (Qβ²P as)
; isEquivalence = record
{ refl = Ξ» as β refl
; sym = Ξ» PβQ as β sym (PβQ as)
; trans = Ξ» PβQ QβR as β trans (PβQ as) (QβR as)
}
}
}
}
-- Inequational reasoning
module β-Reasoning {S} where
open Relation.Binary.PartialOrderReasoning (β-poset S) public renaming ( _β€β¨_β©_ to _ββ¨_β©_ ; _ββ¨_β©_ to _β‘β¨_β©_ )
module β²-Reasoning {S T} where
open Relation.Binary.PartialOrderReasoning (β²-poset S T) public renaming ( _β€β¨_β©_ to _β²β¨_β©_ ; _ββ¨_β©_ to _ββ¨_β©_ )
open β-Reasoning
-- Processes are β-monotone
P-monotone : β {S T as bs} β (P : S β T) β (as β bs) β (β¦ P β§ as β β¦ P β§ bs)
P-monotone (inp F) [] = []
P-monotone (inp F) (a β· asβbs) = P-monotone (β F a) asβbs
P-monotone (out b P) asβbs = b β· P-monotone P asβbs
P-monotone done asβbs = asβbs
-- Composition is β²-monotone
β«-monotone : β {S T U} (Pβ Pβ : S β T) (Qβ Qβ : T β U) β
(β¦ Pβ β§ β² β¦ Pβ β§) β (β¦ Qβ β§ β² β¦ Qβ β§) β
(β¦ Pβ β« Qβ β§ β² β¦ Pβ β« Qβ β§)
β«-monotone Pβ Pβ Qβ Qβ Pββ²Pβ Qββ²Qβ as =
begin
β¦ Pβ β« Qβ β§ as
β‘β¨ β«-semantics Pβ Qβ as β©
β¦ Qβ β§ (β¦ Pβ β§ as)
ββ¨ P-monotone Qβ (Pββ²Pβ as) β©
β¦ Qβ β§ (β¦ Pβ β§ as)
ββ¨ Qββ²Qβ (β¦ Pβ β§ as) β©
β¦ Qβ β§ (β¦ Pβ β§ as)
β‘β¨ sym (β«-semantics Pβ Qβ as) β©
β¦ Pβ β« Qβ β§ as
β
|
Formal statement is: lemma has_contour_integral_trivial_iff [simp]: "(f has_contour_integral i) (linepath a a) \<longleftrightarrow> i=0" Informal statement is: The contour integral of a function $f$ along the path $a \mapsto a$ is $0$ if and only if $i=0$.
|
\documentclass[conference]{IEEEtran}
\IEEEoverridecommandlockouts
\renewcommand\IEEEkeywordsname{Keywords}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage[american]{babel}
\usepackage{graphicx}
\usepackage{float}
\usepackage{amsmath, amssymb, exscale}
\usepackage{bera}
\usepackage[bookmarks=false]{hyperref}
\usepackage{xcolor}
\def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em
T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}}
\makeatletter
\let\old@ps@headings\ps@headings
\let\old@ps@IEEEtitlepagestyle\ps@IEEEtitlepagestyle
\def\confheader#1{%
% for all pages except the first
\def\ps@headings{%
\old@ps@headings%
\def\@oddhead{\strut\hfill#1\hfill\strut}%
\def\@evenhead{\strut\hfill#1\hfill\strut}%
}%
% for the first page
\def\ps@IEEEtitlepagestyle{%
\old@ps@IEEEtitlepagestyle%
\def\@oddhead{\strut\hfill#1\hfill\strut}%
\def\@evenhead{\strut\hfill#1\hfill\strut}%
}%
\ps@headings%
}
\makeatother
\begin{document}
\title{Robust De-anonymization of Large Datasets}
\author{\IEEEauthorblockN{Rui Fernandes (up202103071)}
\IEEEauthorblockA{Department of Computer Science\\Faculty of Sciences of the University of Porto}
}
\maketitle
\begin{abstract}
Privacy preservation is a major concern when it comes to data analysis. When a dataset is released
to third parties, privacy-preserving techniques are often necessary to reduce the possibility of
identifying sensitive information about individuals. Usually, the data owner modifies the data in a
way that the modified data can guarantee privacy while retaining sufficient utility. This process
is usually referred to as privacy-preserving data publishing.
In this report, I provide an overview of a statistical de-anonymization attack against
high-dimensional micro-data and its application in de-anonymizing the Netflix Prize dataset.\\
\end{abstract}
\begin{IEEEkeywords}
Data Anonymization, Privacy Preservation, Privacy-Preserving Data Publishing
\end{IEEEkeywords}
\section{Introduction}
A common reason for publishing anonymized micro-data\footnote{Micro-data are sets of records
containing information on individuals.} is \textit{collaborative filtering}, \textit{i.e.},
predicting a consumer's future choices based on his past behavior using the knowledge of what
similar consumers did.
However, the privacy risks associated with publishing micro-data are well known. Even if personal
identifiers have been removed, an adversary can use background knowledge and cross-correlation with
other databases to re-identify individual data records. Famous attacks include the de-anonymization
of a Massachusetts hospital discharge database by joining it with a public voter database, in which
the combination of both was used to determine the values of medical attributes for each person who
appears in both databases \cite{sweeney_2021}.
\section{Problem}
In October 2006, Netflix, the world's largest online movie rental service, announced the
\$1-million Netflix Prize, a machine learning and data mining competition for movie rating
prediction \cite{AndifYou17:online}. To win the prize, a contestant would have to design a system
that is more accurate than the company's recommendation system by at least 10\%. To support the
research, a dataset was made available consisting of about 100 million movie ratings for 17,770
movies given by 480,189 users. Each rating consists of four entries: user, movie, date of grade,
grade. Users and movies are represented with integer IDs, while ratings range from 1 to 5.
This seemingly innocuous dataset actually has real privacy implications which will be addressed in
the following sections.
\section{De-anonymization of the Netflix Prize Dataset}
Narayanan and Shmatikov \cite{4531148} have shown that it is possible to de-anonimyze the Netflix
Prize Dataset using the algorithm described in Section \ref{sec:alg}.
In this dataset, there are no attributes that can be used directly for de-anonymization. Indeed,
there are hundreds of records with the same value for a certain attribute. However, knowledge that
a particular individual has a certain attribute value reveals some information since attribute
values and even the mere fact that a given attribute is non-null vary from record to record.
Formally, we sample a record $r$ randomly from a database $D$ and give auxiliary information
related to $r$ to the adversary. Given this auxiliary information and an anonymized sample
$\hat{D}$ of $D$, his goal is to reconstruct the attribute values of the entire record $r$.
A possible source of background knowledge is the Internet Movie Database -- IMDb. Netflix
subscribers that also use IMDb are expected to have a high correlation between their private
Netflix ratings and their public IMDb ratings. In many cases, even a few movies that are rated by a
subscriber in both services would be sufficient to identify his record in the Netflix Prize dataset
with enough statistical confidence to rule out the possibility of a false match.
\subsection{Algorithm} \label{sec:alg}
The algorithm use to de-anonymize the Netflix Prize dataset has three main components:
\begin{itemize}
\item A \textbf{scoring function} $Score$ which assigns a numerical score to each record in
$\hat{D}$ based on how well it matches the adversary's auxiliary information $aux$. This function
gives higher weights to statistically rare attributes, which captures the intuitive notion that
statistically rare attributes help de-anonymization much more than the knowledge of a common
attribute;
\item A \textbf{matching criterion} which is the algorithm applied by the adversary to the set
of scores to determine if there's a match. The score of a candidate record is determined by the
least similar attribute between itself and the adversary's auxiliary information. To improve
the robustness of the algorithm, the matching criterion requires the top score to be significantly
higher than the second-best score. This measures how much the first record "stands out" from
other candidate records;
\item A \textbf{record selection} which selects a "best-guess" record, or a probability
distribution over the candidate records.
\end{itemize}
The algorithm, whose inputs are a sample $\hat{D}$ of a database $D$ and auxiliary information $aux
= Aux(r), \ r \leftarrow D$ and outputs either a record $r' \in \hat{D}$ or a set of records and a
probability distribution over those records, works as follows:
\begin{enumerate}
\item Compute $Score(aux, r'), \ \forall \ r' \in \hat{D}$;
\item Apply the matching criterion to the resulting set of scores;
\item If a "best-guess" is required, output $r' \in \hat{D}$ with the highest score. If a
probability distribution over the candidate records is required, compute and output a
probability distribution based on the scores.
\end{enumerate}
A more mathematical approach is provided by the authors in the original paper.
It is important to note that is algorithm may fail in two different scenarios: when an incorrect
record is assigned the highest score and, on the other hand, when the correct record does not have
a significantly higher score when compared to the second-highest score.
\subsection{Results}
Using this algorithm, it was possible to infer that very little auxiliary information is needed to
de-anonymize an average subscriber record from the Netflix Prize dataset.
In fact, 99\% of the Netflix subscribers were shown to be uniquely identifiable by a limited
knowledge of no more than 8 movie ratings (2 of which may be wrong) with their corresponding rating
dates. This emphasizes the relevance of background information for de-anonymization and
re-identification.
Furthermore, a considerable privacy breach occurs even without any date, especially when the
auxiliary information consists of movies that are not blockbusters, considering the fact that, as
previously stated, statistically rare attributes help de-anonymization much more than the knowing a
common attribute.
It is also important to note that \textit{partial de-anonymization} may still pose a serious
threat, considering the fact that there are many things the adversary might know about his target,
such as the approximate number of movies rated, that can be used together with human inspection to
complete the de-anonymization. In some cases, knowing the number of movies the target has rated,
even if with a 50\% error, can more than double the probability of complete de-anonymization.
Finally, even if it is hard to collect such information for a large number of subscribers,
\textit{targeted de-anonymization} still presents a serious threat to privacy.
\section{Critical Analysis}
The utility of a data source lies in its ability to disclose data, and privacy aspects have the
potential to hurt utility. Indeed, utility and privacy may be competing goals. The central question
concerning privacy and utility of data is: "Can a higher level of privacy be achieved while
maintaining utility?". Another important question is how to design micro-data sanitization
algorithms that provide both privacy and utility. This is particularly relevant in the field of
Privacy Preserving Data Mining, whose goal is to extract relevant knowledge from large amounts of
data and provide accurate results while preventing sensitive information from disclosure.
\section{Conclusions}
It has been demonstrated that with very limited background knowledge, even if imprecise, it is
possible to de-anonymize move viewing records released in the Netflix Prize dataset. It is also
worth noting that anonymization operations such as generalization and suppression do not ensure
privacy, and in any case fail on high-dimensional data\footnote{High dimensional data refers to a
dataset in which the number of features $p$ is larger than the number of observations $N$, often
written as $p \gg N$.}. For most records, simply knowing which columns are non-null reveal as much
information as knowing the specific values of these columns.
\bibliographystyle{IEEEtran}
\bibliography{refs}
\end{document}
|
*----------------------------------------------------------------------*
subroutine vec_from_da(ffda,idxvec,vec,len)
*----------------------------------------------------------------------*
* get a vector of length len from disc
* lblk is the block-size of the DA file (unit luda)
* idxvec is the number of the vector, in case of several vectors
* in the same file; the vectors are always aligned such that they
* start at the beginning of a block
*----------------------------------------------------------------------*
implicit none
include 'stdunit.h'
include 'def_filinf.h'
integer ::
& ntest = 00
type(filinf), intent(in) ::
& ffda
integer, intent(in) ::
& len, idxvec
real(8), intent(out) ::
& vec(len)
integer ::
& lblk, luda,
& nrecs, len_rest, irec, irecst, irecnd, idxst, idxnd, len_rd
real(8) ::
& xnrm
real(8), external ::
& ddot
luda = ffda%unit
lblk = ffda%reclen
if (luda.lt.0)
& call quit(1,'vec_from_da',
& 'file is not open: '//trim(ffda%name))
if (lblk.eq.0)
& call quit(1,'vec_from_da',
& 'record length of file is zero: '//trim(ffda%name))
nrecs = len/lblk
len_rest = mod(len,lblk)
if (len_rest.gt.0) nrecs = nrecs+1
if (len_rest.eq.0) len_rest=lblk
irecst = (idxvec-1)*nrecs+1
irecnd = (idxvec-1)*nrecs+nrecs
len_rd = lblk
idxst = 1
do irec = irecst, irecnd
if (irec.eq.irecnd) len_rd = len_rest
idxnd = idxst-1 + len_rd
read(luda,rec=irec) vec(idxst:idxnd)
idxst = idxnd+1
end do
if (ntest.eq.100) then
write(lulog,*) 'read ',nrecs,' blocks'
write(lulog,*) 'length of vector: ',len
xnrm = sqrt(ddot(len,vec,1,vec,1))
write(lulog,*) 'norm of vector: ',xnrm
end if
return
end
|
{-# OPTIONS --without-K --safe #-}
--------------------------------------------------------------------------------
-- A simple reflection based solver for categories.
--
-- Based off 'Tactic.MonoidSolver' from 'agda-stdlib'
--------------------------------------------------------------------------------
open import Categories.Category
module Categories.Tactic.Category where
open import Level
open import Function using (_β¨_β©_)
open import Data.Bool as Bool using (Bool; _β¨_; if_then_else_)
open import Data.Maybe as Maybe using (Maybe; just; nothing; maybe)
open import Data.List as List using (List; _β·_; [])
open import Data.Product as Product using (_Γ_; _,_)
open import Agda.Builtin.Reflection
open import Reflection.Argument
open import Reflection.Term using (getName; _β―β
β·β_)
open import Reflection.TypeChecking.Monad.Syntax
module _ {o β e} (π : Category o β e) where
open Category π
open HomReasoning
open Equiv
private
variable
A B C : Obj
f g : A β B
--------------------------------------------------------------------------------
-- An 'Expr' reifies the parentheses/identity morphisms of some series of
-- compositions of morphisms into a data structure. In fact, this is also
-- a category!
--------------------------------------------------------------------------------
data Expr : Obj β Obj β Set (o β β) where
_ββ²_ : β {A B C} β Expr B C β Expr A B β Expr A C
idβ² : β {A} β Expr A A
[_β] : β {A B} β A β B β Expr A B
-- Embed a morphism in 'Expr' back into 'π' without normalizing.
[_β] : Expr A B β A β B
[ f ββ² g β] = [ f β] β [ g β]
[ idβ² β] = id
[ [ f β] β] = f
-- Convert an 'Expr' back into a morphism, while normalizing
--
-- This actually embeds the morphism into the category of copresheaves
-- on π, which obeys the category laws up to beta-eta equality.
-- This lets us normalize away all the associations/identity morphisms.
embed : Expr B C β A β B β A β C
embed (f ββ² g) h = embed f (embed g h)
embed idβ² h = h
embed [ f β] h = f β h
preserves-ββ² : β (f : Expr B C) β (h : A β B) β embed f id β h β embed f h
preserves-ββ² idβ² f = identityΛ‘
preserves-ββ² [ x β] f = β-resp-βΛ‘ identityΚ³
preserves-ββ² (f ββ² g) h = begin
embed (f ββ² g) id β h β‘β¨β©
embed f (embed g id) β h βΛβ¨ preserves-ββ² f (embed g id) β©ββ¨refl β©
(embed f id β embed g id) β h ββ¨ assoc β©
embed f id β embed g id β h ββ¨ reflβ©ββ¨ preserves-ββ² g h β©
embed f id β embed g h ββ¨ preserves-ββ² f (embed g h) β©
embed (f ββ² g) h β
preserves-β : β (f : Expr A B) β embed f id β [ f β]
preserves-β idβ² = refl
preserves-β [ x β] = identityΚ³
preserves-β (f ββ² g) = begin
embed (f ββ² g) id βΛβ¨ preserves-ββ² f (embed g id) β©
embed f id β embed g id ββ¨ preserves-β f β©ββ¨ preserves-β g β©
[ f β] β [ g β] β‘β¨β©
[ f ββ² g β] β
--------------------------------------------------------------------------------
-- Reflection Helpers
--------------------------------------------------------------------------------
_==_ = primQNameEquality
{-# INLINE _==_ #-}
getArgs : Term β Maybe (Term Γ Term)
getArgs (def _ xs) = go xs
where
go : List (Arg Term) β Maybe (Term Γ Term)
go (vArg x β· vArg y β· []) = just (x , y)
go (x β· xs) = go xs
go _ = nothing
getArgs _ = nothing
--------------------------------------------------------------------------------
-- Getting Category Names
--------------------------------------------------------------------------------
record CategoryNames : Set where
field
is-β : Name β Bool
is-id : Name β Bool
buildMatcher : Name β Maybe Name β Name β Bool
buildMatcher n nothing x = n == x
buildMatcher n (just m) x = n == x β¨ m == x
findCategoryNames : Term β TC CategoryNames
findCategoryNames cat = do
β-altName β normalise (def (quote Category._β_) (3 β―β
β·β cat β¨β·β© []))
id-altName β normalise (def (quote Category.id) (3 β―β
β·β cat β¨β·β© []))
returnTC record
{ is-β = buildMatcher (quote Category._β_) (getName β-altName)
; is-id = buildMatcher (quote Category.id) (getName id-altName)
}
--------------------------------------------------------------------------------
-- Constructing an Expr
--------------------------------------------------------------------------------
β³idβ³ : Term
β³idβ³ = quote idβ² β¨ con β© []
β³[_β]β³ : Term β Term
β³[ t β]β³ = quote [_β] β¨ con β© (t β¨β·β© [])
module _ (names : CategoryNames) where
open CategoryNames names
mutual
β³ββ³ : List (Arg Term) β Term
β³ββ³ (x β¨β·β© y β¨β·β© xs) = quote _ββ²_ β¨ con β© buildExpr x β¨β·β© buildExpr y β¨β·β© []
β³ββ³ (x β· xs) = β³ββ³ xs
β³ββ³ _ = unknown
buildExpr : Term β Term
buildExpr t@(def n xs) =
if (is-β n)
then β³ββ³ xs
else if (is-id n)
then β³idβ³
else
β³[ t β]β³
buildExpr t@(con n xs) =
if (is-β n)
then β³ββ³ xs
else if (is-id n)
then β³idβ³
else
β³[ t β]β³
buildExpr t = β³[ t β]β³
--------------------------------------------------------------------------------
-- Constructing the Solution
--------------------------------------------------------------------------------
constructSoln : Term β CategoryNames β Term β Term β Term
constructSoln cat names lhs rhs =
quote Category.Equiv.trans β¨ def β© 3 β―β
β·β cat β¨β·β©
(quote Category.Equiv.sym β¨ def β© 3 β―β
β·β cat β¨β·β©
(quote preserves-β β¨ def β© 3 β―β
β·β cat β¨β·β© buildExpr names lhs β¨β·β© []) β¨β·β© [])
β¨β·β©
(quote preserves-β β¨ def β© 3 β―β
β·β cat β¨β·β© buildExpr names rhs β¨β·β© [])
β¨β·β© []
solve-macro : Term β Term β TC _
solve-macro mon hole = do
holeβ² β inferType hole >>= normalise
names β findCategoryNames mon
just (lhs , rhs) β returnTC (getArgs holeβ²)
where nothing β typeError (termErr holeβ² β· [])
let soln = constructSoln mon names lhs rhs
unify hole soln
macro
solve : Term β Term β TC _
solve = solve-macro
|
function f = subsasgn(f, index, val)
%SUBSASGN Chebfun SUBSASGN.
% ( )
% F(X) = VAL assigns the values VAL at locations specified by X to the
% CHEBFUN F. SIZE(X, 1) should be equal to LENGTH(VAL) and SIZE(X, 2) should
% be the number of columns in F. SUBSASGN introduces new breakpoints
% in F at points in X that were not originally in F.DOMAIN. See DEFINEPOINT
% for further details.
%
% .
% CHEBFUN properties are restricted, so F.PROP = VAL has no effect.
%
% {}
% F{A, B} = G redefines the CHEBFUN F in the interval [A, B] using G. See
% CHEBFUN/DEFINEINTERVAL for further details.
%
% See also SUBSREF, DEFINEPOINT, DEFINEINTERVAL.
% Copyright 2017 by The University of Oxford and The Chebfun Developers.
% See http://www.chebfun.org/ for Chebfun information.
% TODO: Error checking. Particularly integer indicies and quasimatrix indexing.
% TODO: Document for array-valued CHEBFUN objects and quasimatrices.
idx = index(1).subs;
switch index(1).type
case '.'
% [TODO]: Restrict access to this.
f = builtin('subsasgn', f, index, val);
case '()'
if ( f(1).isTransposed && numel(idx) >= 2 )
idx(1:2) = idx([2 1]);
end
if ( ischar(idx{1}) && strcmp(idx{1}, ':') )
% Assign a column:
f = assignColumns(f, idx{2}, val);
else
% Define a point value:
f = definePoint(f, idx{1}, val);
end
case '{}'
% Define an interval:
f = defineInterval(f, [idx{:}], val);
otherwise
error('CHEBFUN:CHEBFUN:subsasgn:UnexpectedType',...
['??? Unexpected index.type of ' index(1).type]);
end
end
|
<a href="https://colab.research.google.com/github/28left/22jupyter/blob/main/activity_22_5_01_colab.ipynb" target="_parent"></a>
```python
!pip install git+https://github.com/28left/cyllene.git
```
Collecting git+https://github.com/28left/cyllene.git
Cloning https://github.com/28left/cyllene.git to /tmp/pip-req-build-ytxfw2s1
Running command git clone -q https://github.com/28left/cyllene.git /tmp/pip-req-build-ytxfw2s1
Installing build dependencies ... [?25l[?25hdone
Getting requirements to build wheel ... [?25l[?25hdone
Preparing wheel metadata ... [?25l[?25hdone
Requirement already satisfied: sympy in /usr/local/lib/python3.7/dist-packages (from cyllene==0.2) (1.7.1)
Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from cyllene==0.2) (1.19.5)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from cyllene==0.2) (3.2.2)
Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.7/dist-packages (from sympy->cyllene==0.2) (1.2.1)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->cyllene==0.2) (1.3.1)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->cyllene==0.2) (2.8.1)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->cyllene==0.2) (0.10.0)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->cyllene==0.2) (2.4.7)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.1->matplotlib->cyllene==0.2) (1.15.0)
Building wheels for collected packages: cyllene
Building wheel for cyllene (PEP 517) ... [?25l[?25hdone
Created wheel for cyllene: filename=cyllene-0.2-cp37-none-any.whl size=27527 sha256=d21451174b843beac45ba734c3fe5e7ce9c26593d878e838a739b5e6ebb450c3
Stored in directory: /tmp/pip-ephem-wheel-cache-5q12d4bt/wheels/3e/18/73/4a45f074bb2820233eda690ac5f87b9c9f140d5442aa3fde8c
Successfully built cyllene
Installing collected packages: cyllene
Successfully installed cyllene-0.2
```python
!pip install "ipython>=7"
```
Collecting ipython>=7
[?25l Downloading https://files.pythonhosted.org/packages/c9/b1/82cbe2b856386f44f37fdae54d9b425813bd86fe33385c9d658d64826098/ipython-7.22.0-py3-none-any.whl (785kB)
[K |ββββββββββββββββββββββββββββββββ| 788kB 8.7MB/s
[?25hRequirement already satisfied: decorator in /usr/local/lib/python3.7/dist-packages (from ipython>=7) (4.4.2)
Requirement already satisfied: pickleshare in /usr/local/lib/python3.7/dist-packages (from ipython>=7) (0.7.5)
Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.7/dist-packages (from ipython>=7) (5.0.5)
Requirement already satisfied: pexpect>4.3; sys_platform != "win32" in /usr/local/lib/python3.7/dist-packages (from ipython>=7) (4.8.0)
Requirement already satisfied: jedi>=0.16 in /usr/local/lib/python3.7/dist-packages (from ipython>=7) (0.18.0)
Requirement already satisfied: pygments in /usr/local/lib/python3.7/dist-packages (from ipython>=7) (2.6.1)
Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.7/dist-packages (from ipython>=7) (54.2.0)
Collecting prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0
[?25l Downloading https://files.pythonhosted.org/packages/eb/e6/4b4ca4fa94462d4560ba2f4e62e62108ab07be2e16a92e594e43b12d3300/prompt_toolkit-3.0.18-py3-none-any.whl (367kB)
[K |ββββββββββββββββββββββββββββββββ| 368kB 17.0MB/s
[?25hRequirement already satisfied: backcall in /usr/local/lib/python3.7/dist-packages (from ipython>=7) (0.2.0)
Requirement already satisfied: ipython-genutils in /usr/local/lib/python3.7/dist-packages (from traitlets>=4.2->ipython>=7) (0.2.0)
Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.7/dist-packages (from pexpect>4.3; sys_platform != "win32"->ipython>=7) (0.7.0)
Requirement already satisfied: parso<0.9.0,>=0.8.0 in /usr/local/lib/python3.7/dist-packages (from jedi>=0.16->ipython>=7) (0.8.2)
Requirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython>=7) (0.2.5)
[31mERROR: jupyter-console 5.2.0 has requirement prompt-toolkit<2.0.0,>=1.0.0, but you'll have prompt-toolkit 3.0.18 which is incompatible.[0m
[31mERROR: google-colab 1.0.0 has requirement ipython~=5.5.0, but you'll have ipython 7.22.0 which is incompatible.[0m
Installing collected packages: prompt-toolkit, ipython
Found existing installation: prompt-toolkit 1.0.18
Uninstalling prompt-toolkit-1.0.18:
Successfully uninstalled prompt-toolkit-1.0.18
Found existing installation: ipython 5.5.0
Uninstalling ipython-5.5.0:
Successfully uninstalled ipython-5.5.0
Successfully installed ipython-7.22.0 prompt-toolkit-3.0.18
```python
import cyllene
from cyllene import *
import sympy as sp
from sympy import plot
f = expression('x+2')
g = expression('x^2')
G_g = plot(g, (x, -3, 3), show=False)
line = plot(4, (x, -5, 5), line_color='red',show=False)
G_g.extend(line)
G_h = plot(g, (x, 0, 3), show=False)
line = plot(4, (x, 0, 4), line_color='red',show=False)
G_h.extend(line)
f_exp = function('2^x')
P_1 = cyllene.pp.ExpressionProblem('1',
'Compute the following values of $f(x) = 2^x$:',
5,
['$f(-2)$', '$f(-1)$', '$f(0)$', '$f(1)$', '$f(2)$'],
'numerical',
[f_exp(-2), f_exp(-1), f_exp(0), f_exp(1), f_exp(2)]
)
ProbStack.add(P_1)
P_2 = cyllene.pp.TrueFalse('2', "The function $f(x) = 2^x$ is one-to-one", True)
ProbStack.add(P_2)
f_log = function('log(x,2)')
P_3 = cyllene.pp.ExpressionProblem('3',
'Compute the following values of $f(x) = \\log_2(x)$:',
3,
['$f(2)$', '$f(1)$', '$f(1/2)$'],
'numerical',
[f_log(2), f_log(1), sp.N(f_log(1/2))]
)
ProbStack.add(P_3)
```
# Activity 5 - Exponential and Logarithmic Functions
## Part 1 - Review of one-to-one functions and inverse functions
### Introduction
Recall what is meant by a one-to-one function. Given a function, we say that it is one-to-one if every element in the range of the function corresponds to only one element in the domain of a function. That is, given a function $f(x)$,
> if $f(x_1) = f(x_2)$, then $x_1 = x_2$.
(If the output was the same, then the input must have been the same.)
Consider the following three functions, two of the functions are one-to-one functions; which one is NOT a one-to-one function?
> 1. $f(x) = x + 2$ with domain $x \in (-\infty, \infty )$
> 2. $g(x) = x^2$ with domain $x \in (-\infty, \infty )$
> 3. $h(x) = x^2$ with domain $x \in [0, \infty)$
In the past, you may have sketched the functions and employed the "horizontal line test" to determine whether or not the function was one-to-one. That is, if a horizontal line intersected the graph in more then one point, then the function was not one-to-one.
Upon sketching the first function, $y=f(x)$, we see it is a line with slope $m=1$.
Any horizontal line will cross this sketch in exactly one spot.
In fact, given any value in the range of the function, for example $y=10$, we can calculate the corresponding $x$ value that gets mapped to it; that is, if $x+2=10$, then $x = 10-2 = 8$. [Note $f(8) = 10$]
```python
plot(f, (x,-12,12));
```
On first glance, it may appear the the next two functions are the same; however, it is important to remember that associated with any functional definition is the domain of the function. Even though $g(x)=x^2$ and $h(x)=x^2$, the domains are different, thus they are really two different functions. The function $g(x)$ defined on all real numbers is NOT a one-to-one function.
A counter example would be $g(-2) = (-2)^2 = 4$ and $g(2) = 2^2 = 4$. Given an output, $y=4$, we have two inputs from the domain, $x_1= -2$ and $x_2=2$ that get mapped to $y=4$. Sketch the parabola $y=x^2$ from $x=-3$ to $x=3$ and verify that the horizontal line, $y=4$, intersects the graph at two points, $(-2, 4)$ and $(2, 4)$. Thus the function $g(x)$ as defined is NOT a one-to-one function.
```python
G_g.show();
```
However, if you restrict your domain to just $x \ge 0$, the "sketch" of the function will only be the right half of the parabola you sketched. The horizontal line $y=4$ will only cross the graph in one place. In fact, any horizontal line $y=k$ where $k>0$ will pass through the right half of parabola exactly once. The third function is a one-to-one function.
```python
G_h.show();
```
### The consequence of a one-to-one determination: The existence of an inverse function!
If a function is one-to-one, then an inverse *function* exists. For $f(x) = x + 2$ with domain $x \in (-\infty, \infty )$, we may write out the inverse function by writing $y = x + 2$ and solving for $x$, that is, $x = y-2$, which represents the inverse function, written $f^{-1}(y) = y-2$. Employing the forward function, $f(8) = 8+2 = 10$. Then, the inverse, $f^{-1}(10) = 10-2 = 8$, takes us back!
For the function $h(x) = x^2$ with domain $x \in [0, \infty)$, we can do a similar process to determine the inverse function. We let $y = x^2$ and solve for $x$ to get $x = \sqrt{y}$ and $x = -\sqrt{y}$. However, since we restricted the domain of $h(x)$ to be $x \ge 0$, we need only consider the positive square root and $h^{-1}(y) = \sqrt{y}$.
Notice of the second function from the Introduction, $g(x)$, since the domain was not resricted, the inverse process is not a "functional" process; for example, if $y=4$, going in reverse, there are two $x$-values associated with it, $x=-\sqrt{4}=-2$ and $x=\sqrt{4}=2$ (the horizontal line test).
In short, given $y = f(x)$, then $x = f^{-1}(y)$. Also note that the domain of the forward function $f(x)$ is the range of the inverse function, and the range of $f(x)$ is the domain of the inverse function.
### A note on notation.
Given $f(x) = x+2$, in lieu of writing $f^{-1}(y) = y-2$, since the variable $y$ represents the input and $y-2$ is the rule (what to do with the input), often the inverse is also written with the dummy variable $x$, that is, $f^{-1}(x) = x-2$. However, the "$x$" in $f(x)$ and the "$x$" in $f^{-1}(x)$ represent two different quantities, most often with different units.
### Explore the outputs of a basic exponential function
Consider the function $f(x) = 2^x$ defined on $(-\infty, \infty)$
## Exercise
```python
%problem 1
```
### Problem 1
Compute the following values of $f(x) = 2^x$:
**(1)** $f(-2)$
**(2)** $f(-1)$
**(3)** $f(0)$
**(4)** $f(1)$
**(5)** $f(2)$
*Enter the answer(s) in the cell below.*
```python
%%answer Problem 1
(1): 1/4
(2): 1/2
(3): 1
(4): 2
(5): 4
```
You entered: <br>✅ **(1)** $\displaystyle \frac{1}{4}$ (*Correct*) <br>✅ **(2)** $\displaystyle \frac{1}{2}$ (*Correct*) <br>✅ **(3)** $\displaystyle 1$ (*Correct*) <br>✅ **(4)** $\displaystyle 2$ (*Correct*) <br>✅ **(5)** $\displaystyle 4$ (*Correct*) <br>
## Exercise
Below is a plot of the function $f(x) = 2^x$. Use the horizontal line test to determine if the function is one-to-one.
```python
plot(f_exp(x), (x,-3,3));
%problem 2
```
```python
%%answer Problem 2
T
```
✅ **Correct!**
Since the function $f(x) = 2^x$ is one-to-one, we know that a unique inverse function exists. We can't just write $y = 2^x$ and "solve for $x$", we need new notation to depict this new function.
> If $f(x) = 2^x$, then $f^{-1}(y) = \log_2(y)$.
In the graph above, consider the horizontal line through $y=4$ on the vertical axis and a vertical line through $x=2$ on the horizontal axis and note that it intersects the curve at the point $(2, 4)$. This may be interpreted as $2^2=4$ or as $\log_2(4) = 2$
Using this idea, confirm the following values of $\log_2(y)$:
```python
%problem 3
```
### Problem 3
Compute the following values of $f(x) = \log_2(x)$:
**(1)** $\quad$$f(2)$
**(2)** $\quad$$f(1)$
**(3)** $\quad$$f(1/2)$
*Enter the answer(s) in the cell below.*
### Closing Considerations
>1. If $y = b^x$, where wher the base $b>0$ and $b \neq 1$, then $x = \log_b (y)$; that is, given $y = f(x)$, then $x = f^{-1}(y)$.
>2. Note that the domain (valid input values) of $f(x) = 2^x$ is $(-\infty, \infty)$, but the range (outputs) of $f(x) = 2^x > 0$.
This implies the domain of $f^{-1}(x) = \log_2(x)$ is $x>0$ (See the note on notation above).
In general, the only valid input value into $\log_b(x)$ is a value $x>0$ (e.g. the domain of the function $\log_b(x)$ is $x>0$)
>3. Since $2^0 = 1$, and in general $b^0 = 1$, we have $\log_b(1) = 0$ regardless of base $b$.
|
#!/usr/bin/python3
"""ProFET is a program for feature extraction from fasta files.
4 main options:
1. Extract features from fasta files(s) (Requires data)
2. Train a classifier (Requires training data)
3. Extract classifier's performance (Requires trained model and testing set)
4. Predict new data sets (Needs a trained model and input data set)
Training data sets can be in one of the two formats:
a. Each class in a seperated file. For example:
b. All data in the same file, but class can be extract from sequence identifier.
For example:
"""
import argparse
import pickle
import json
import sys
import time
import pandas as pd
import numpy as np
import pprint as pp
from Bio.SeqIO.FastaIO import SimpleFastaParser
from multiprocessing import Pool, Manager
from os.path import basename, exists
from functools import lru_cache as memoized
#Internal imports
from FeatureGen import Get_Protein_Feat #To generate features
from Model_trainer import trainClassifier, load_data
from GetPredictorPerf import get_scores
# from IPython.core.debugger import Tracer #TO REMOVE!!!
# import warnings
# import traceback
# def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
# traceback.print_stack()
# log = file if hasattr(file,'write') else sys.stderr
# log.write(warnings.formatwarning(message, category, filename, lineno, line))
# warnings.showwarnings = warn_with_traceback
# warnings.simplefilter("always")
# np.seterr(all='raise')
def get_params():
'''Parse arguments'''
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', '-r', dest='dataset', action='append',
type=str, help='The path to the data set (fasta file/s. May serve as training, testing, new set, or feature extracting')
#parser.add_argument('--resultsDir', '-rs', dest='resultsDir', type=str,
#help='The path to directory to write the results files')
parser.add_argument('--extractfeat', '-f', dest='extractfeatures',
action='store_true', default=False, help='Extract features')
parser.add_argument('--performance', '-p', dest='performance',
action='store_true', default=False, help='Print performance of the model on a given data (test) set')
parser.add_argument('--classformat', '-c', dest='classformat',
type=str, default='file', help='Defines the class of each '\
'sequence, by on of the next options: \'dir\', \'file\', or \'id\'."')
parser.add_argument('--outputmodel', '-o', dest='outputmodel',
default=None, type=str, help='Filename for saving the model')
parser.add_argument('--inputmodel', '-i', dest='inputmodel',
default=None, type=str, help='Filename for saving the model')
parser.add_argument('--outputfile', '-s', dest='outputfile',
default=None, type=str, help='Filename for saving the '\
'output (features, or predicted labels)')
parser.add_argument('--classifier', '-t', dest='classifiertype',
default='forest', help='The type of the classifier model. '\
'Can be one of next options: forest')
results = parser.parse_args()
return results
@memoized(maxsize=32)
def get_label_from_filename(filename):
'''Extract the label according to filename
Remove paht, and extensions
For example:
'../Mamm_Organellas/Mammal_peroxisome9.features.csv' -> 'Mammal_peroxisome9'
'''
return basename(filename).split('.')[0]
def write_csv(features_dict, out_file=None):
'''If out_file is not given, return features as a pandas.dataFrame object
'''
#Since different sequences can have different set of features(!) get the union of features:
feature_names = set().union(*[tuple(val.keys()) for val in features_dict.values()])
if out_file:
f = open(out_file,'w')
else:
csv_str = ''
header = sorted(feature_names)
#Print feature names
if out_file:
f.write('accession\t' + '\t'.join(header) + '\n')
else:
csv_str += 'accession\t' + '\t'.join(header) + '\n'
values_line_fmt = '%s\t' * len(header) + '%s\n'
for acc, features in features_dict.items():
#If feature doesn't exists, put a 0
values_line = acc + '\t' + '\t'.join([str(features.get(f_name, 0)) for f_name in header]) + '\n'
if out_file:
f.write(values_line)
else:
csv_str += values_line
if not out_file: return load_data(csv_str)
def write_features_to_file(features_dict, out_file):
'''Write in json format'''
with open(out_file, 'w') as f:
f.write(json.dumps(features_dict, sort_keys=True, indent=4))
def load_features_file(features_file):
'''Load json format file'''
with open(features_file) as f:
return json.load(f)
def update_dict_with_features(d, seq_id, seq, classname=None):
'''Extract features and update the dict
A worker function to be executed in paralel with multiprocessing.
'''
features = Get_Protein_Feat(seq)
if classname:
features.update({'classname': classname})
d[seq_id] = features
@memoized(maxsize=32)
def extract_features(sequences_file, classformat=None, force=False):
'''Given a fasta file with sequences, and an output file,
Extract features, write it to file, and return the features as a dict
of dicts
Uses mutliprocessin to accelerate.
'''
features_dict_file = sequences_file + '_%s.pkl' % classformat
if exists(features_dict_file) and not force:
fh = open(features_dict_file, 'rb')
features = pickle.load(fh)
fh.close()
return features
pool = Pool()
manager = Manager()
features_dict = manager.dict()
if classformat == 'file':
classname = get_label_from_filename(sequences_file)
else:
classname = None
with open(sequences_file) as f:
for record in SimpleFastaParser(f):
p = pool.apply_async(update_dict_with_features, args=(features_dict, record[0], record[1], classname))
print('.', end='')
sys.stdout.flush()
pool.close()
pool.join()
with open(features_dict_file, 'wb') as out_fh:
features_dict = dict(features_dict)
pickle.dump(features_dict, out_fh, protocol=pickle.HIGHEST_PROTOCOL)
return features_dict
def extract_datasets_features(datasets, classformat, output_file=None):
'''For each file in dataset execut get features, and save to file, or save to one file if output file is given
'''
if output_file and exists(output_file):
#Returns filename, maybe change to dataframe object
return output_file
all_features_dict = {}
for filename in datasets:
print('Generating features for file:', filename)
features_dict = extract_features(filename, classformat)
all_features_dict.update(features_dict)
if not output_file:
write_csv(features_dict, filename + '.features.csv')
print('Done')
if output_file:
print('Writing all features to:', output_file)
write_csv(all_features_dict, output_file)
return all_features_dict
def train_model(dataset_files, classformat, outputmodel, classifiertype):
'''Given set of files with fasta sequences, class format (e.g., file),
filename to save model and required model type (e.g., forest)
Train the model and save it to the given file
'''
output_features_file='ProFET_features.csv'
features_dict = extract_datasets_features(dataset_files, classformat, output_features_file)
#features_df = load_data(output_features_file)
print('Learning %s model' % classifiertype)
model, label_encoder, scaler, feature_names = trainClassifier(output_features_file, classifiertype, kbest=0, alpha=False, optimalFlag=False, normFlag=True)
#Save model and additional data to file
pickle.dump((model, label_encoder, scaler, feature_names), open(outputmodel,'wb'), protocol=pickle.HIGHEST_PROTOCOL)
print('Done')
def load_model_from_file(filename):
'''Given a pickle filename return model, label encoder and a scaler'''
#return pickle.load(open(filename, 'rb'))
fh = open(filename, 'rb')
model_obj = pickle.load(fh)
fh.close()
return model_obj
def remove_nan_vals(array):
'''Remove rows with nan values and return the new array'''
row_len = array.shape[1]
while np.isnan(array.min()):
ind = array.argmin()
array = np.delete(array,(int(ind/row_len)), axis=0)
return array
def class_data(inputmodel_file, dataset, outfile=None):
'''Given a file of trained model, a dataset (can be multiple files) print for
each sequence the accession and the predicted label.
If outfile is given, print to this file instead of STDOUT
'''
#Load model
model, label_encoder, scaler, model_feature_names = load_model_from_file(inputmodel_file)
if outfile:
f = open(outfile, 'w')
else:
f = sys.stdout
for sequences_file in dataset:
res = write_csv(extract_features(sequences_file, classformat=None))
features = res[0]
accessions = res[1]
data_feature_names = res[-1]
features = match_features(model_feature_names, features, data_feature_names)
#Remove nan values
features = remove_nan_vals(features)
#Predict
scaled_features = scaler.transform(features)
labels_idx = model.predict(scaled_features)
labels = label_encoder.inverse_transform(labels_idx)
#Tracer()() #TO REMOVE!!!
for acc, label in zip(accessions.values, labels):
f.write('%s\t%s\n' % (acc, label))
if outfile:
f.close()
def match_features(main_features_list, features, minor_features_list):
'''Given feature names list, and a features dataFrame,
Add column with zeros for missing features from the dataFrame'''
main_features_set = set(main_features_list)
minor_features_set = set(minor_features_list)
#Remove spare features in data
for feature_name in minor_features_set - main_features_set:
idx = np.where(minor_features_list==feature_name)[0][0]
#Remove a column
features = np.delete(features, idx, axis=1)
#Remove name from list to keep indexing consictency
#minor_features_list.remove(feature_name)
minor_features_list = np.delete(minor_features_list, idx)
#raise 'Data contains more features than the model!'
additional_features = main_features_set - minor_features_set
#Find missing features in data
mask = np.in1d(main_features_list, list(additional_features), assume_unique=True)
#Find the indexes
idxs = sorted(np.where(mask))
for idx in idxs:
#idx = np.where(main_features_list==feature_name)[0][0]
#Add zeros for missing feature (column idx)
features = np.insert(features, idx, 0, axis=1)
return features
def get_classifier_performance(inputmodel, dataset, classformat):
'''
'''
#Load Model
model, model_label_encoder, model_scaler, model_feature_names = load_model_from_file(inputmodel)
true_labels = np.array([])
predicted_labels = np.array([])
for sequences_file in dataset:
#Extract data set features and true lables
features, part_true_labels, label_encoder, feature_names = write_csv(extract_features(sequences_file, classformat=classformat))
true_labels = np.append(true_labels, model_label_encoder.transform(label_encoder.inverse_transform(part_true_labels)))
features = match_features(model_feature_names, features, feature_names)
#Predict labels
scaled_features = model_scaler.transform(features)
labels_encoded = model.predict(scaled_features)
#labels = label_encoder.inverse_transform(labels_encoded)
predicted_labels = np.append(predicted_labels, labels_encoded)
#Generate scores
results = get_scores(predicted_labels, true_labels, verbose=False)
return results
def main():
results = get_params()
dataset = results.dataset
extractfeatures = results.extractfeatures
classformat = results.classformat
outputmodel = results.outputmodel
inputmodel = results.inputmodel
outputfile = results.outputfile
classifiertype = results.classifiertype
performance = results.performance
#Four options for program: (i) Extract features, (ii) Learn model, or given a model:
# (iii) Classify new data, (iv) Extreact performance
#(i) Extract features
if extractfeatures:
if not dataset or not outputfile:
print('Extracting features requires a training dataset and '\
'an ouptput file to be given')
exit()
else:
print('Only extracting Features')
features = extract_datasets_features(dataset, classformat=None, output_file=outputfile)
#(ii) Learn model
elif outputmodel:
if not dataset:
print('Training a model requires a training dataset will be'\
'given')
exit()
else:
train_model(dataset, classformat, outputmodel, classifiertype)
#or given a model:
elif inputmodel:
# (iii) Classify new data
if dataset and not performance:
class_data(inputmodel, dataset, outputfile)
#(iv) Extreact performance
else:
results = get_classifier_performance(inputmodel, dataset, classformat)
pp.pprint(results)
else:
print('Got wrong combination of parameters, please refer '\
'to help or README')
exit()
if __name__=="__main__":
main()
# classformat = 'file'
# inputmodel = 'chap_model'
# model, label_encoder, scaler, model_feature_names = load_model_from_file(inputmodel)
# sequences_file = '../Chap/test/negative.fasta'
# res = write_csv(extract_features(sequences_file, classformat=None))
# features = res[0]
# accessions = res[1]
# data_feature_names = res[-1]
# features = match_features(model_feature_names, features, data_feature_names)
# #Predict
# scaled_features = scaler.transform(features)
# labels_idx = model.predict(scaled_features)
# labels = label_encoder.inverse_transform(labels_idx)
# features1, labels1, label_encoder1, feature_names1 = write_csv(extract_features(sequences_file1, classformat=classformat))
# # sequences_file2 = '../Mamm_Organellas/Mammal_melanosome_0.9.fasta'
# # #features2, labels2, label_encoder2, feature_names2 = write_csv(extract_features(sequences_file, classformat=classformat))
# sequences_file2 = '../Chap/test/positive.fasta'
# dataset = [sequences_file1, sequences_file2]
# results = get_classifier_performance(inputmodel, dataset, classformat)
#Example of execution:
#python3 ProFET.py --dataset ../Chap/train/positive.fasta --dataset ../Chap/train/negative.fasta --classformat file --outputmodel test_model
#python3 ProFET.py --dataset ../Chap/test/positive.fasta --dataset ../Chap/test/negative.fasta --inputmodel chap_model --performance
|
module Day14
using AdventOfCode2019
struct Reaction
result::Pair{String,Int}
recipe::Array{Pair{String,Int},1}
end
function day14(input::String = readInput(joinpath(@__DIR__, "..", "data", "day14.txt")))
recipes = _parseInput(input)
ingredients = _empty_ingredient_dict(recipes, 1)
nORE = _amount_of_ore!(ingredients, recipes)
nFuel = _max_fuel(1_000_000_000_000, recipes)
return [nORE, nFuel]
end
function _parseInput(input::String)
lines = split(strip(input), "\n")
recipes = Dict{String,Reaction}()
for line in lines
comp = findall(r"\d+\s+[A-Z]+", line)
last = split(line[comp[end]])
result = Pair(last[2], parse(Int, last[1]))
recipeRanges = comp[1:end-1]
recipe = [Pair(elem[2], parse(Int, elem[1])) for elem in [split(line[r]) for r in recipeRanges]]
reaction = Reaction(result, recipe)
recipes[reaction.result.first] = reaction
end
return recipes
end
function _empty_ingredient_dict(recipes::Dict{String,Reaction}, init_fuel=0)
d = Dict{String,Int}()
d["ORE"] = 0
for (key, _) in recipes
d[key] = 0
end
d["FUEL"] = init_fuel
return d
end
function _amount_of_ore!(ingredients::Dict{String,Int}, recipes::Dict{String,Reaction})
ing = Array{Pair{String,Int},1}()
for (name, amount) in ingredients
if name == "ORE"
continue
end
if amount > 0
push!(ing, Pair(name, amount))
ingredients[name] = 0
end
end
if length(ing) == 0
return ingredients["ORE"]
end
for (name, amount) in ing
if name == "ORE"
continue
end
recipe = recipes[name]
factor = 1
if recipe.result[2] < amount
factor = ceil(Int, amount/recipe.result[2])
end
ingredients[name] += amount - factor * recipe.result[2]
for (n, a) in recipe.recipe
ingredients[n] += factor * a
end
end
return _amount_of_ore!(ingredients, recipes)
end
function _max_fuel(availableORE::Int, recipes::Dict{String,Reaction})
u = 1
while true
nORE = _amount_of_ore!(_empty_ingredient_dict(recipes, u), recipes)
if nORE > availableORE
break
end
u *= 2
end
u == 1 && return 0
le = u Γ· 2
while u - le > 1
m = (le + u) Γ· 2
nORE = _amount_of_ore!(_empty_ingredient_dict(recipes, m), recipes)
if nORE > availableORE
u = m
else
le = m
end
end
return le
end
end # module
|
(* File: Akra_Bazzi_Asymptotics.thy
Author: Manuel Eberl <[email protected]>
Proofs for the four(ish) asymptotic inequalities required for proving the
Akra Bazzi theorem with variation functions in the recursive calls.
*)
section \<open>Asymptotic bounds\<close>
theory Akra_Bazzi_Asymptotics
imports
Complex_Main
Akra_Bazzi_Library
"HOL-Library.Landau_Symbols"
begin
locale akra_bazzi_asymptotics_bep =
fixes b e p hb :: real
assumes bep: "b > 0" "b < 1" "e > 0" "hb > 0"
begin
context
begin
text \<open>
Functions that are negligible w.r.t. @{term "ln (b*x) powr (e/2 + 1)"}.
\<close>
private abbreviation (input) negl :: "(real \<Rightarrow> real) \<Rightarrow> bool" where
"negl f \<equiv> f \<in> o(\<lambda>x. ln (b*x) powr (-(e/2 + 1)))"
private lemma neglD: "negl f \<Longrightarrow> c > 0 \<Longrightarrow> eventually (\<lambda>x. \<bar>f x\<bar> \<le> c / ln (b*x) powr (e/2+1)) at_top"
by (drule (1) landau_o.smallD, subst (asm) powr_minus) (simp add: field_simps)
private lemma negl_mult: "negl f \<Longrightarrow> negl g \<Longrightarrow> negl (\<lambda>x. f x * g x)"
by (erule landau_o.small_1_mult, rule landau_o.small_imp_big, erule landau_o.small_trans)
(insert bep, simp)
private lemma ev4:
assumes g: "negl g"
shows "eventually (\<lambda>x. ln (b*x) powr (-e/2) - ln x powr (-e/2) \<ge> g x) at_top"
proof (rule smallo_imp_le_real)
define h1 where [abs_def]:
"h1 x = (1 + ln b/ln x) powr (-e/2) - 1 + e/2 * (ln b/ln x)" for x
define h2 where [abs_def]:
"h2 x = ln x powr (- e / 2) * ((1 + ln b / ln x) powr (- e / 2) - 1)" for x
from bep have "((\<lambda>x. ln b / ln x) \<longlongrightarrow> 0) at_top"
by (simp add: tendsto_0_smallo_1)
note one_plus_x_powr_Taylor2_bigo[OF this, of "-e/2"]
also have "(\<lambda>x. (1 + ln b / ln x) powr (- e / 2) - 1 - - e / 2 * (ln b / ln x)) = h1"
by (simp add: h1_def)
finally have "h1 \<in> o(\<lambda>x. 1 / ln x)"
by (rule landau_o.big_small_trans) (insert bep, simp add: power2_eq_square)
with bep have "(\<lambda>x. h1 x - e/2 * (ln b / ln x)) \<in> \<Theta>(\<lambda>x. 1 / ln x)" by simp
also have "(\<lambda>x. h1 x - e/2 * (ln b/ln x)) = (\<lambda>x. (1 + ln b/ ln x) powr (-e/2) - 1)"
by (rule ext) (simp add: h1_def)
finally have "h2 \<in> \<Theta>(\<lambda>x. ln x powr (-e/2) * (1 / ln x))" unfolding h2_def
by (intro landau_theta.mult) simp_all
also have "(\<lambda>x. ln x powr (-e/2) * (1 / ln x)) \<in> \<Theta>(\<lambda>x. ln x powr (-(e/2+1)))" by simp
also from g bep have "(\<lambda>x. ln x powr (-(e/2+1))) \<in> \<omega>(g)" by (simp add: smallomega_iff_smallo)
finally have "g \<in> o(h2)" by (simp add: smallomega_iff_smallo)
also have "eventually (\<lambda>x. h2 x = ln (b*x) powr (-e/2) - ln x powr (-e/2)) at_top"
using eventually_gt_at_top[of "1::real"] eventually_gt_at_top[of "1/b"]
by eventually_elim (insert bep, simp add: field_simps powr_diff [symmetric] h2_def
ln_mult [symmetric] powr_divide del: ln_mult)
hence "h2 \<in> \<Theta>(\<lambda>x. ln (b*x) powr (-e/2) - ln x powr (-e/2))" by (rule bigthetaI_cong)
finally show "g \<in> o(\<lambda>x. ln (b * x) powr (- e / 2) - ln x powr (- e / 2))" .
next
show "eventually (\<lambda>x. ln (b*x) powr (-e/2) - ln x powr (-e/2) \<ge> 0) at_top"
using eventually_gt_at_top[of "1/b"] eventually_gt_at_top[of "1::real"]
by eventually_elim (insert bep, auto intro!: powr_mono2' simp: field_simps simp del: ln_mult)
qed
private lemma ev1:
"negl (\<lambda>x. (1 + c * inverse b * ln x powr (-(1+e))) powr p - 1)"
proof-
from bep have "((\<lambda>x. c * inverse b * ln x powr (-(1+e))) \<longlongrightarrow> 0) at_top"
by (simp add: tendsto_0_smallo_1)
have "(\<lambda>x. (1 + c * inverse b * ln x powr (-(1+e))) powr p - 1)
\<in> O(\<lambda>x. c * inverse b * ln x powr - (1 + e))"
using bep by (intro one_plus_x_powr_Taylor1_bigo) (simp add: tendsto_0_smallo_1)
also from bep have "negl (\<lambda>x. c * inverse b * ln x powr - (1 + e))" by simp
finally show ?thesis .
qed
private lemma ev2_aux:
defines "f \<equiv> \<lambda>x. (1 + 1/ln (b*x) * ln (1 + hb / b * ln x powr (-1-e))) powr (-e/2)"
obtains h where "eventually (\<lambda>x. f x \<ge> 1 + h x) at_top" "h \<in> o(\<lambda>x. 1 / ln x)"
proof (rule that[of "\<lambda>x. f x - 1"])
define g where [abs_def]: "g x = 1/ln (b*x) * ln (1 + hb / b * ln x powr (-1-e))" for x
have lim: "((\<lambda>x. ln (1 + hb / b * ln x powr (- 1 - e))) \<longlongrightarrow> 0) at_top"
by (rule tendsto_eq_rhs[OF tendsto_ln[OF tendsto_add[OF tendsto_const, of _ 0]]])
(insert bep, simp_all add: tendsto_0_smallo_1)
hence lim': "(g \<longlongrightarrow> 0) at_top" unfolding g_def
by (intro tendsto_mult_zero) (insert bep, simp add: tendsto_0_smallo_1)
from one_plus_x_powr_Taylor2_bigo[OF this, of "-e/2"]
have "(\<lambda>x. (1 + g x) powr (-e/2) - 1 - - e/2 * g x) \<in> O(\<lambda>x. (g x)\<^sup>2)" .
also from lim' have "(\<lambda>x. g x ^ 2) \<in> o(\<lambda>x. g x * 1)" unfolding power2_eq_square
by (intro landau_o.big_small_mult smalloI_tendsto) simp_all
also have "o(\<lambda>x. g x * 1) = o(g)" by simp
also have "(\<lambda>x. (1 + g x) powr (-e/2) - 1 - - e/2 * g x) = (\<lambda>x. f x - 1 + e/2 * g x)"
by (simp add: f_def g_def)
finally have A: "(\<lambda>x. f x - 1 + e / 2 * g x) \<in> O(g)" by (rule landau_o.small_imp_big)
hence "(\<lambda>x. f x - 1 + e/2 * g x - e/2 * g x) \<in> O(g)"
by (rule sum_in_bigo) (insert bep, simp)
also have "(\<lambda>x. f x - 1 + e/2 * g x - e/2 * g x) = (\<lambda>x. f x - 1)" by simp
finally have "(\<lambda>x. f x - 1) \<in> O(g)" .
also from bep lim have "g \<in> o(\<lambda>x. 1 / ln x)" unfolding g_def
by (auto intro!: smallo_1_tendsto_0)
finally show "(\<lambda>x. f x - 1) \<in> o(\<lambda>x. 1 / ln x)" .
qed simp_all
private lemma ev2:
defines "f \<equiv> \<lambda>x. ln (b * x + hb * x / ln x powr (1 + e)) powr (-e/2)"
obtains h where
"negl h"
"eventually (\<lambda>x. f x \<ge> ln (b * x) powr (-e/2) + h x) at_top"
"eventually (\<lambda>x. \<bar>ln (b * x) powr (-e/2) + h x\<bar> < 1) at_top"
proof -
define f'
where "f' x = (1 + 1 / ln (b*x) * ln (1 + hb / b * ln x powr (-1-e))) powr (-e/2)" for x
from ev2_aux obtain g where g: "eventually (\<lambda>x. 1 + g x \<le> f' x) at_top" "g \<in> o(\<lambda>x. 1 / ln x)"
unfolding f'_def .
define h where [abs_def]: "h x = ln (b*x) powr (-e/2) * g x" for x
show ?thesis
proof (rule that[of h])
from bep g show "negl h" unfolding h_def
by (auto simp: powr_diff elim: landau_o.small_big_trans)
next
from g(2) have "g \<in> o(\<lambda>x. 1)" by (rule landau_o.small_big_trans) simp
with bep have "eventually (\<lambda>x. \<bar>ln (b*x) powr (-e/2) * (1 + g x)\<bar> < 1) at_top"
by (intro smallo_imp_abs_less_real) simp_all
thus "eventually (\<lambda>x. \<bar>ln (b*x) powr (-e/2) + h x\<bar> < 1) at_top"
by (simp add: algebra_simps h_def)
next
from eventually_gt_at_top[of "1/b"] and g(1)
show "eventually (\<lambda>x. f x \<ge> ln (b*x) powr (-e/2) + h x) at_top"
proof eventually_elim
case (elim x)
from bep have "b * x + hb * x / ln x powr (1 + e) = b*x * (1 + hb / b * ln x powr (-1 - e))"
by (simp add: field_simps powr_diff powr_add powr_minus)
also from elim(1) bep
have "ln \<dots> = ln (b*x) * (1 + 1/ln (b*x) * ln (1 + hb / b * ln x powr (-1-e)))"
by (subst ln_mult) (simp_all add: add_pos_nonneg field_simps)
also from elim(1) bep have "\<dots> powr (-e/2) = ln (b*x) powr (-e/2) * f' x"
by (subst powr_mult) (simp_all add: field_simps f'_def)
also from elim have "\<dots> \<ge> ln (b*x) powr (-e/2) * (1 + g x)"
by (intro mult_left_mono) simp_all
finally show "f x \<ge> ln (b*x) powr (-e/2) + h x"
by (simp add: f_def h_def algebra_simps)
qed
qed
qed
private lemma ev21:
obtains g where
"negl g"
"eventually (\<lambda>x. 1 + ln (b * x + hb * x / ln x powr (1 + e)) powr (-e/2) \<ge>
1 + ln (b * x) powr (-e/2) + g x) at_top"
"eventually (\<lambda>x. 1 + ln (b * x) powr (-e/2) + g x > 0) at_top"
proof-
from ev2 guess g . note g = this
from g(3) have "eventually (\<lambda>x. 1 + ln (b * x) powr (-e/2) + g x > 0) at_top"
by eventually_elim simp
with g(1,2) show ?thesis by (intro that[of g]) simp_all
qed
private lemma ev22:
obtains g where
"negl g"
"eventually (\<lambda>x. 1 - ln (b * x + hb * x / ln x powr (1 + e)) powr (-e/2) \<le>
1 - ln (b * x) powr (-e/2) - g x) at_top"
"eventually (\<lambda>x. 1 - ln (b * x) powr (-e/2) - g x > 0) at_top"
proof-
from ev2 guess g . note g = this
from g(2) have "eventually (\<lambda>x. 1 - ln (b * x + hb * x / ln x powr (1 + e)) powr (-e/2) \<le>
1 - ln (b * x) powr (-e/2) - g x) at_top"
by eventually_elim simp
moreover from g(3) have "eventually (\<lambda>x. 1 - ln (b * x) powr (-e/2) - g x > 0) at_top"
by eventually_elim simp
ultimately show ?thesis using g(1) by (intro that[of g]) simp_all
qed
lemma asymptotics1:
shows "eventually (\<lambda>x.
(1 + c * inverse b * ln x powr -(1+e)) powr p *
(1 + ln (b * x + hb * x / ln x powr (1 + e)) powr (- e / 2)) \<ge>
1 + (ln x powr (-e/2))) at_top"
proof-
let ?f = "\<lambda>x. (1 + c * inverse b * ln x powr -(1+e)) powr p"
let ?g = "\<lambda>x. 1 + ln (b * x + hb * x / ln x powr (1 + e)) powr (- e / 2)"
define f where [abs_def]: "f x = 1 - ?f x" for x
from ev1[of c] have "negl f" unfolding f_def
by (subst landau_o.small.uminus_in_iff [symmetric]) simp
from landau_o.smallD[OF this zero_less_one]
have f: "eventually (\<lambda>x. f x \<le> ln (b*x) powr -(e/2+1)) at_top"
by eventually_elim (simp add: f_def)
from ev21 guess g . note g = this
define h where [abs_def]: "h x = -g x + f x + f x * ln (b*x) powr (-e/2) + f x * g x" for x
have A: "eventually (\<lambda>x. ?f x * ?g x \<ge> 1 + ln (b*x) powr (-e/2) - h x) at_top"
using g(2,3) f
proof eventually_elim
case (elim x)
let ?t = "ln (b*x) powr (-e/2)"
have "1 + ?t - h x = (1 - f x) * (1 + ln (b*x) powr (-e/2) + g x)"
by (simp add: algebra_simps h_def)
also from elim have "?f x * ?g x \<ge> (1 - f x) * (1 + ln (b*x) powr (-e/2) + g x)"
by (intro mult_mono[OF _ elim(1)]) (simp_all add: algebra_simps f_def)
finally show "?f x * ?g x \<ge> 1 + ln (b*x) powr (-e/2) - h x" .
qed
from bep \<open>negl f\<close> g(1) have "negl h" unfolding h_def
by (fastforce intro!: sum_in_smallo landau_o.small.mult simp: powr_diff
intro: landau_o.small_trans)+
from ev4[OF this] A show ?thesis by eventually_elim simp
qed
lemma asymptotics2:
shows "eventually (\<lambda>x.
(1 + c * inverse b * ln x powr -(1+e)) powr p *
(1 - ln (b * x + hb * x / ln x powr (1 + e)) powr (- e / 2)) \<le>
1 - (ln x powr (-e/2))) at_top"
proof-
let ?f = "\<lambda>x. (1 + c * inverse b * ln x powr -(1+e)) powr p"
let ?g = "\<lambda>x. 1 - ln (b * x + hb * x / ln x powr (1 + e)) powr (- e / 2)"
define f where [abs_def]: "f x = 1 - ?f x" for x
from ev1[of c] have "negl f" unfolding f_def
by (subst landau_o.small.uminus_in_iff [symmetric]) simp
from landau_o.smallD[OF this zero_less_one]
have f: "eventually (\<lambda>x. f x \<le> ln (b*x) powr -(e/2+1)) at_top"
by eventually_elim (simp add: f_def)
from ev22 guess g . note g = this
define h where [abs_def]: "h x = -g x - f x + f x * ln (b*x) powr (-e/2) + f x * g x" for x
have "((\<lambda>x. ln (b * x + hb * x / ln x powr (1 + e)) powr - (e / 2)) \<longlongrightarrow> 0) at_top"
apply (insert bep, intro tendsto_neg_powr, simp)
apply (rule filterlim_compose[OF ln_at_top])
apply (rule filterlim_at_top_smallomega_1, simp)
using eventually_gt_at_top[of "max 1 (1/b)"]
apply (auto elim!: eventually_mono intro!: add_pos_nonneg simp: field_simps)
done
hence ev_g: "eventually (\<lambda>x. \<bar>1 - ?g x\<bar> < 1) at_top"
by (intro smallo_imp_abs_less_real smalloI_tendsto) simp_all
have A: "eventually (\<lambda>x. ?f x * ?g x \<le> 1 - ln (b*x) powr (-e/2) + h x) at_top"
using g(2,3) ev_g f
proof eventually_elim
case (elim x)
let ?t = "ln (b*x) powr (-e/2)"
from elim have "?f x * ?g x \<le> (1 - f x) * (1 - ln (b*x) powr (-e/2) - g x)"
by (intro mult_mono) (simp_all add: f_def)
also have "... = 1 - ?t + h x" by (simp add: algebra_simps h_def)
finally show "?f x * ?g x \<le> 1 - ln (b*x) powr (-e/2) + h x" .
qed
from bep \<open>negl f\<close> g(1) have "negl h" unfolding h_def
by (fastforce intro!: sum_in_smallo landau_o.small.mult simp: powr_diff
intro: landau_o.small_trans)+
from ev4[OF this] A show ?thesis by eventually_elim simp
qed
lemma asymptotics3: "eventually (\<lambda>x. (1 + (ln x powr (-e/2))) / 2 \<le> 1) at_top"
(is "eventually (\<lambda>x. ?f x \<le> 1) _")
proof (rule eventually_mp[OF always_eventually], clarify)
from bep have "(?f \<longlongrightarrow> 1/2) at_top"
by (force intro: tendsto_eq_intros tendsto_neg_powr ln_at_top)
hence "\<And>e. e>0 \<Longrightarrow> eventually (\<lambda>x. \<bar>?f x - 0.5\<bar> < e) at_top"
by (subst (asm) tendsto_iff) (simp add: dist_real_def)
from this[of "0.5"] show "eventually (\<lambda>x. \<bar>?f x - 0.5\<bar> < 0.5) at_top" by simp
fix x assume "\<bar>?f x - 0.5\<bar> < 0.5"
thus "?f x \<le> 1" by simp
qed
lemma asymptotics5: "eventually (\<lambda>x. ln (b*x - hb*x*ln x powr -(1+e)) powr (-e/2) < 1) at_top"
proof-
from bep have "((\<lambda>x. b - hb * ln x powr -(1+e)) \<longlongrightarrow> b - 0) at_top"
by (intro tendsto_intros tendsto_mult_right_zero tendsto_neg_powr ln_at_top) simp_all
hence "LIM x at_top. (b - hb * ln x powr -(1+e)) * x :> at_top"
by (rule filterlim_tendsto_pos_mult_at_top[OF _ _ filterlim_ident], insert bep) simp_all
also have "(\<lambda>x. (b - hb * ln x powr -(1+e)) * x) = (\<lambda>x. b*x - hb*x*ln x powr -(1+e))"
by (intro ext) (simp add: algebra_simps)
finally have "filterlim ... at_top at_top" .
with bep have "((\<lambda>x. ln (b*x - hb*x*ln x powr -(1+e)) powr -(e/2)) \<longlongrightarrow> 0) at_top"
by (intro tendsto_neg_powr filterlim_compose[OF ln_at_top]) simp_all
hence "eventually (\<lambda>x. \<bar>ln (b*x - hb*x*ln x powr -(1+e)) powr (-e/2)\<bar> < 1) at_top"
by (subst (asm) tendsto_iff) (simp add: dist_real_def)
thus ?thesis by simp
qed
lemma asymptotics6: "eventually (\<lambda>x. hb / ln x powr (1 + e) < b/2) at_top"
and asymptotics7: "eventually (\<lambda>x. hb / ln x powr (1 + e) < (1 - b) / 2) at_top"
and asymptotics8: "eventually (\<lambda>x. x*(1 - b - hb / ln x powr (1 + e)) > 1) at_top"
proof-
from bep have A: "(\<lambda>x. hb / ln x powr (1 + e)) \<in> o(\<lambda>_. 1)" by simp
from bep have B: "b/3 > 0" and C: "(1 - b)/3 > 0" by simp_all
from landau_o.smallD[OF A B] show "eventually (\<lambda>x. hb / ln x powr (1+e) < b/2) at_top"
by eventually_elim (insert bep, simp)
from landau_o.smallD[OF A C] show "eventually (\<lambda>x. hb / ln x powr (1 + e) < (1 - b)/2) at_top"
by eventually_elim (insert bep, simp)
from bep have "(\<lambda>x. hb / ln x powr (1 + e)) \<in> o(\<lambda>_. 1)" "(1 - b) / 2 > 0" by simp_all
from landau_o.smallD[OF this] eventually_gt_at_top[of "1::real"]
have A: "eventually (\<lambda>x. 1 - b - hb / ln x powr (1 + e) > 0) at_top"
by eventually_elim (insert bep, simp add: field_simps)
from bep have "(\<lambda>x. x * (1 - b - hb / ln x powr (1+e))) \<in> \<omega>(\<lambda>_. 1)" "(0::real) < 2" by simp_all
from landau_omega.smallD[OF this] A eventually_gt_at_top[of "0::real"]
show "eventually (\<lambda>x. x*(1 - b - hb / ln x powr (1 + e)) > 1) at_top"
by eventually_elim (simp_all add: abs_mult)
qed
end
end
definition "akra_bazzi_asymptotic1 b hb e p x \<longleftrightarrow>
(1 - hb * inverse b * ln x powr -(1+e)) powr p * (1 + ln (b*x + hb*x/ln x powr (1+e)) powr (-e/2))
\<ge> 1 + (ln x powr (-e/2) :: real)"
definition "akra_bazzi_asymptotic1' b hb e p x \<longleftrightarrow>
(1 + hb * inverse b * ln x powr -(1+e)) powr p * (1 + ln (b*x + hb*x/ln x powr (1+e)) powr (-e/2))
\<ge> 1 + (ln x powr (-e/2) :: real)"
definition "akra_bazzi_asymptotic2 b hb e p x \<longleftrightarrow>
(1 + hb * inverse b * ln x powr -(1+e)) powr p * (1 - ln (b*x + hb*x/ln x powr (1+e)) powr (-e/2))
\<le> 1 - ln x powr (-e/2 :: real)"
definition "akra_bazzi_asymptotic2' b hb e p x \<longleftrightarrow>
(1 - hb * inverse b * ln x powr -(1+e)) powr p * (1 - ln (b*x + hb*x/ln x powr (1+e)) powr (-e/2))
\<le> 1 - ln x powr (-e/2 :: real)"
definition "akra_bazzi_asymptotic3 e x \<longleftrightarrow> (1 + (ln x powr (-e/2))) / 2 \<le> (1::real)"
definition "akra_bazzi_asymptotic4 e x \<longleftrightarrow> (1 - (ln x powr (-e/2))) * 2 \<ge> (1::real)"
definition "akra_bazzi_asymptotic5 b hb e x \<longleftrightarrow>
ln (b*x - hb*x*ln x powr -(1+e)) powr (-e/2::real) < 1"
definition "akra_bazzi_asymptotic6 b hb e x \<longleftrightarrow> hb / ln x powr (1 + e :: real) < b/2"
definition "akra_bazzi_asymptotic7 b hb e x \<longleftrightarrow> hb / ln x powr (1 + e :: real) < (1 - b) / 2"
definition "akra_bazzi_asymptotic8 b hb e x \<longleftrightarrow> x*(1 - b - hb / ln x powr (1 + e :: real)) > 1"
definition "akra_bazzi_asymptotics b hb e p x \<longleftrightarrow>
akra_bazzi_asymptotic1 b hb e p x \<and> akra_bazzi_asymptotic1' b hb e p x \<and>
akra_bazzi_asymptotic2 b hb e p x \<and> akra_bazzi_asymptotic2' b hb e p x \<and>
akra_bazzi_asymptotic3 e x \<and> akra_bazzi_asymptotic4 e x \<and> akra_bazzi_asymptotic5 b hb e x \<and>
akra_bazzi_asymptotic6 b hb e x \<and> akra_bazzi_asymptotic7 b hb e x \<and>
akra_bazzi_asymptotic8 b hb e x"
lemmas akra_bazzi_asymptotic_defs =
akra_bazzi_asymptotic1_def akra_bazzi_asymptotic1'_def
akra_bazzi_asymptotic2_def akra_bazzi_asymptotic2'_def akra_bazzi_asymptotic3_def
akra_bazzi_asymptotic4_def akra_bazzi_asymptotic5_def akra_bazzi_asymptotic6_def
akra_bazzi_asymptotic7_def akra_bazzi_asymptotic8_def akra_bazzi_asymptotics_def
lemma akra_bazzi_asymptotics:
assumes "\<And>b. b \<in> set bs \<Longrightarrow> b \<in> {0<..<1}"
assumes "hb > 0" "e > 0"
shows "eventually (\<lambda>x. \<forall>b\<in>set bs. akra_bazzi_asymptotics b hb e p x) at_top"
proof (intro eventually_ball_finite ballI)
fix b assume "b \<in> set bs"
with assms interpret akra_bazzi_asymptotics_bep b e p hb by unfold_locales auto
show "eventually (\<lambda>x. akra_bazzi_asymptotics b hb e p x) at_top"
unfolding akra_bazzi_asymptotic_defs
using asymptotics1[of "-c" for c] asymptotics2[of "-c" for c]
by (intro eventually_conj asymptotics1 asymptotics2 asymptotics3
asymptotics4 asymptotics5 asymptotics6 asymptotics7 asymptotics8) simp_all
qed simp
end
|
#pragma once
#include "arcana/finally_scope.h"
#include "arcana/functional/inplace_function.h"
#include "arcana/sentry.h"
#include "arcana/threading/affinity.h"
#include <algorithm>
#include <tuple>
#include <vector>
#include <gsl/gsl>
namespace arcana
{
using ticket_seed = int64_t;
using ticket = gsl::final_action<
stdext::inplace_function<void(), sizeof(std::aligned_storage_t<sizeof(ticket_seed) + sizeof(void*)>)>>;
using ticket_scope = finally_scope<ticket>;
/*
An event routing class used to dispatch events to multiple listeners.
Each router class can only handle a certain fixed set of event types defined by EventTs.
*/
template<typename... EventTs>
class router
{
public:
static constexpr size_t LISTENER_SIZE = 4 * sizeof(int64_t);
template<typename EventT>
using listener_function = stdext::inplace_function<void(const EventT&), LISTENER_SIZE>;
/*
Sends an event synchronously to all listeners.
*/
template<typename EventT>
void fire(const EventT& evt)
{
GSL_CONTRACT_CHECK("thread affinity", m_affinity.check());
using event = std::decay_t<EventT>;
auto& listeners = std::get<listener_group<event>>(m_listeners);
{
auto guard = std::get<sentry<event>>(m_sentries).take();
for (listener<event>& listener : listeners)
{
if (listener.valid)
{
listener.callback(evt);
}
}
}
// if we're no longer iterating the listeners list in this stack
// remove all the unregistered listeners and add the pending ones
if (!std::get<sentry<event>>(m_sentries).is_active())
{
listeners.erase(std::remove_if(listeners.begin(),
listeners.end(),
[](const listener<event>& l) { return !l.valid; }),
listeners.end());
// move the pending listeners to the real list
// and clear the pending list
auto& pending = std::get<listener_group<event>>(m_pending);
std::move(pending.begin(), pending.end(), std::back_inserter(listeners));
pending.clear();
}
}
/*
Adds an event listener.
*/
template<typename EventT, typename T>
ticket add_listener(T&& listener)
{
auto id = internal_add_listener<EventT>(std::forward<T>(listener));
return ticket{ [id, this] { internal_remove_listener<EventT>(id); } };
}
/*
Sets the routers thread affinity. Once this is set the methods
on this instance will need to be called by that thread.
*/
void set_affinity(const affinity& aff)
{
m_affinity = aff;
}
private:
/*
Adds an event listener.
*/
template<typename EventT, typename T>
ticket_seed internal_add_listener(T&& listener)
{
GSL_CONTRACT_CHECK("thread affinity", m_affinity.check());
using event = std::decay_t<EventT>;
// if we're currently firing an event in that group we need to wait until we're done before
// adding the listener to the list
auto& listeners = std::get<sentry<event>>(m_sentries).is_active()
? std::get<listener_group<event>>(m_pending)
: std::get<listener_group<event>>(m_listeners);
auto id = m_nextId++;
listeners.emplace_back(std::forward<T>(listener), id);
return id;
}
/*
Removes an event listener by id.
*/
template<typename EventT>
void internal_remove_listener(const ticket_seed& id)
{
GSL_CONTRACT_CHECK("thread affinity", m_affinity.check());
using event = std::decay_t<EventT>;
auto& listeners = std::get<listener_group<event>>(m_listeners);
auto found = std::find_if(listeners.begin(), listeners.end(), [id](const listener<event>& listener) {
return listener.id == id;
});
if (found == listeners.end())
{
assert(false && "removing item that isn't there");
return;
}
// don't modify the collection while iterating, just disable the listener
if (std::get<sentry<event>>(m_sentries).is_active())
{
found->valid = false;
}
else
{
listeners.erase(found);
}
}
template<typename EventT>
struct listener
{
using callback_t = listener_function<EventT>;
callback_t callback;
ticket_seed id;
bool valid;
listener(callback_t&& callback, const ticket_seed& id)
: callback{ std::move(callback) }
, id{ id }
, valid{ true }
{}
listener(const callback_t& callback, const ticket_seed& id)
: callback{ callback }
, id{ id }
, valid{ true }
{}
};
template<typename EventT>
using listener_group = std::vector<listener<EventT>>;
std::tuple<listener_group<EventTs>...> m_listeners;
std::tuple<sentry<EventTs>...> m_sentries;
std::tuple<listener_group<EventTs>...> m_pending;
affinity m_affinity;
ticket_seed m_nextId;
};
}
|
lemma homotopy_equivalent_space_sym: "X homotopy_equivalent_space Y \<longleftrightarrow> Y homotopy_equivalent_space X"
|
# Notes on saturation Vapor Pressure #
There are a large number of expressions for the saturation vapor pressure in the literature, and many of these, even recent ones, seem to reference previous studies in a haphazard way. So how much do these differ, is there a standard, and by what criteria should one judge them by. These are big questions, and I won't answer them comprehensively here, but perhaps a bit of insight can be shared.
The first thing to note is that there is a community that concerns itself with this question. They call themselves the international association for the physical properties of water and steam, and mostly concern themselves with the behavior of water at high temperature. The approach of the IAPWS is to develop an empirical equation of state for water, in the form of a specification of its Helmholtz free energy, from which all other properties can be derived. The standard reference for the IAPWS equation of state is the publication by Wagner and Pru{\ss} (Thermodynamic Properties of Ordinary Water) published in 2002 and which describes the IAPWS-95 approved formulation. Minor corrections have since been made to this, which as best I can tell are relevant at high temperatures. By working with an equation of state, all properties of water, from the specific heats to the gas constants to the phase-change enthalpies can be derived consistently. The disadvantage of this approach is that the equation is derived by positing an analytic form that is then fit to a very wide and diverse abundance of existing data. The resultant equation is described in an ideal part, which involves a summation of nine terms and thirteen coefficients, and a residual part, with more than 50 terms and over 200 constants.
For the case of the saturation vapor pressure over water Wagner and Pru{\ss} suggest a much simpler equation that is described in terms of only six coefficients. First, below I compare the relative error to the IAPWS standard as has been formlated and distributed in the iapws python package, version (1.4). There has been some discussion on the web of its implementation, but the similarity with the Wagner and Pru{\ss} formulation gives me confidence.
```python
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import interpolate, optimize
plot_dir = '/Users/m219063/Research/Projects/Thermodynamics/plots/'
!%matplotlib inline
```
/bin/sh: line 0: fg: no job control
```python
gravity = 9.8076
cpd = 1006.
Rd = 287.05
Rv = 461.53 # IAPWS97 at 273.15
cpv = 1865.01 # ''
lv0 = 2500.93e3 # IAPWS97 at 273.15
lf0 = 333.42e3 #''
cl = 4179.57 # IAPWS97 at 305 and P=0.1 MPa (chosen to give a good fit for es over ice)
ci = 1905.43 # IAPWS97 at 247.065 and P=0.1 MPa (chosen to give a good fit for es over ice)
eps1 = Rd/Rv
eps2 = Rv/Rd -1.
P0 = 100000. # Standard Pressure
T0 = 273.15 # Standard Temperature
PvC = 22.064e6 # Critical pressure of water vapor
TvC = 647.096 # Critical temperature of water vapor
TvT = 273.16 # Triple point temperature of water
PvT = 611.655
lvT = lv0 + (cpv-cl)*(TvT-T0)
lfT = lf0 + (cpv-ci)*(TvT-T0)
lsT = lvT + lfT
es_default = 'sonntag'
def thermo_input(x, xtype='none'):
import numpy as np
x = np.asarray(x).flatten()
scalar_input = False
if x.ndim == 0:
x = x[None] # Makes x 1D
scalar_input = True
if (xtype == 'Kelvin' and x.max() < 100 ): x = x+273.15
if (xtype == 'Celcius'and x.max() > 100 ): x = x-273.15
if (xtype == 'Pascal' and x.max() < 1200): x = x*100.
if (xtype == 'kg/kg' and x.max() > 1.0) : x = x/1000.
if (xtype == 'meter' and x.max() < 10.0): print('Warning: input should be in meters, max value less than 10, not corrected')
return x, scalar_input
def eslf(T, formula=es_default):
""" Returns the saturation vapour pressure [Pa] over liquid given
the temperature. Temperatures can be in Celcius or Kelvin.
Formulas supported are
- Goff-Gratch (1994 Smithsonian Tables)
- Sonntag (1994)
- Flatau
- Magnus Tetens (MT)
- Romps (2017)
- Mpurhpy-Koop
- Bolton
- Wagner and Pruss (WP, 2002) is the default
>>> eslf(273.16)
611.657
"""
import numpy as np
x, scalar_input = thermo_input(T, 'Kelvin')
if formula == "flatau":
if (np.min(x) > 100): x = x-273.16
np.maximum(x,-80.)
c_es= np.asarray([0.6105851e+03, 0.4440316e+02, 0.1430341e+01, 0.2641412e-01,
0.2995057e-03,0.2031998e-05,0.6936113e-08,0.2564861e-11,-0.3704404e-13])
es = np.polyval(c_es[::-1],x)
elif formula == "bolton":
if (np.min(x) > 100): x = x-273.15
es = 611.2*np.exp((17.67*x)/(243.5+x))
elif formula == "sonntag":
xx = -6096.9385/x + 16.635794 - 2.711193e-2*x + 1.673952e-5*x*x + 2.433502 * np.log(x)
es = 100.*np.exp(xx)
elif formula =='goff-gratch':
x1 = 273.16/x
x2 = 373.16/x
xl = np.log10(1013.246 ) - 7.90298*(x2 - 1) + 5.02808*np.log10(x2) - 1.3816e-7*(10**(11.344*(1.-1./x2)) - 1.0) + 8.1328e-3 * (10**(-3.49149*(x2-1)) - 1.0)
es =10**(xl+2) # plus 2 converts from hPa to Pa
elif formula == 'wagner-pruss':
vt = 1.-x/TvC
es = PvC * np.exp(TvC/x * (-7.85951783*vt + 1.84408259*vt**1.5 - 11.7866497*vt**3 + 22.6807411*vt**3.5 - 15.9618719*vt**4 + 1.80122502*vt**7.5))
elif formula == 'hardy98':
y = -2.8365744e+3/(x*x) - 6.028076559e+3/x + 19.54263612 - 2.737830188e-2*x + 1.6261698e-5*x**2 + 7.0229056e-10*x**3 - 1.8680009e-13*x**4 + 2.7150305 * np.log(x)
es = np.exp(y)
elif formula == 'romps':
Rr = 461.
cvl_r = 4119
cvv_r = 1418
cpv_r = cvv_r + Rr
es = 611.65 * (x/TvT) **((cpv_r-cvl_r)/Rr) * np.exp((2.37403e6 - (cvv_r-cvl_r)*TvT)*(1/TvT - 1/x)/Rr)
elif formula == "murphy-koop":
es = np.exp(54.842763 - 6763.22/x - 4.210*np.log(x) + 0.000367*x + np.tanh(0.0415*(x - 218.8)) * (53.878 - 1331.22/x - 9.44523 * np.log(x) + 0.014025*x))
elif formula == "standard-analytic":
c1 = (cpv-cl)/Rv
c2 = lvT/(Rv*TvT) - c1
es = PvT * np.exp(c2*(1.-TvT/x)) * (x/TvT)**c1
else:
exit("formula not supported")
es = np.maximum(es,0)
if scalar_input:
return np.squeeze(es)
return es
def esif(T, formula=es_default):
""" Returns the saturation vapour pressure [Pa] over ice given
the temperature. Temperatures can be in Celcius or Kelvin.
uses the Goff-Gratch (1994 Smithsonian Tables) formula
>>> esli(273.15)
6.112
m """
import numpy as np
x, scalar_input = thermo_input(T, 'Kelvin')
if formula == "sonntag":
es = 100 * np.exp(24.7219 - 6024.5282/x + 0.010613868*x - 0.000013198825*x**2 - 0.49382577*np.log(x))
elif formula == "goff-gratch":
x1 = 273.16/x
xi = np.log10( 6.1071) - 9.09718*(x1 - 1) - 3.56654*np.log10(x1) + 0.876793*(1 - 1./x1)
es = 10**(xi+2)
elif formula == "wagner-pruss": #(actually wagner et al, 2011)
a1 = -0.212144006e+2
a2 = 0.273203819e+2
a3 = -0.610598130e+1
b1 = 0.333333333e-2
b2 = 0.120666667e+1
b3 = 0.170333333e+1
theta = T/TvT
es = PvT * np.exp((a1*theta**b1 + a2 * theta**b2 + a3 * theta**b3)/theta)
elif formula == "murphy-koop":
es = np.exp(9.550426 - 5723.265/x + 3.53068 * np.log(x) - 0.00728332*x)
elif formula == "romps":
Rr = 461.
cvv_r = 1418.
cvs_r = 1861.
cpv_r = cvv_r + Rr
es = 611.65 * (x/TvT) **((cpv_r-cvs_r)/Rr) * np.exp((2.37403e6 + 0.33373e6 - (cvv_r-cvs_r)*TvT)*(1/TvT - 1/x)/Rr)
elif formula == "standard-analytic":
c1 = (cpv-ci)/Rv
c2 = lsT/(Rv*TvT) - c1
es = PvT * np.exp(c2*(1.-TvT/x)) * (x/TvT)**c1
else:
exit("formula not supported")
es = np.maximum(es,0)
if scalar_input:
return np.squeeze(es)
return es
def esilf(T,formula=es_default):
import numpy as np
return np.minimum(esif(T,formula),eslf(T,formula))
def es(T,formula=es_default,state='liq'):
import numpy as np
x, scalar_input = thermo_input(T, 'Kelvin')
if (state == 'liq'):
return eslf(x,formula)
if (state == 'ice'):
return esif(x,formula)
if (state == 'mxd'):
return esilf(x,formula)
def des(T,formula=es_default,state='liq'):
import numpy as np
x, scalar_input = thermo_input(T, 'Kelvin')
dx = 0.01; xp = x+dx/2; xm = x-dx/2
return (es(xp,formula,state)-es(xm,formula,state))/dx
def dlnesdlnT(T,formula=es_default,state='liq'):
import numpy as np
x, scalar_input = thermo_input(T, 'Kelvin')
dx = 0.01; xp = x+dx/2; xm = x-dx/2
return ((es(xp,formula,state)-es(xm,formula,state))/es(x,formula,state) * (x/dx))
def phase_change_enthalpy(Tx,fusion=False):
""" Returns the enthlapy [J/g] of vaporization (default) of water vapor or
(if fusion=True) the fusion anthalpy. Input temperature can be in degC or Kelvin
>>> phase_change_enthalpy(273.15)
2500.8e3
"""
import numpy as np
TC, scalar_input = thermo_input(Tx, 'Celcius')
TK, scalar_input = thermo_input(Tx, 'Kelvin')
if (fusion):
el = lfT + (cl-ci)*(TK-TvT)
else:
el = lvT + (cpv-cl)*(TK-TvT)
if scalar_input:
return np.squeeze(el)
return el
```
## Some baic properties of water from the IAWPS routines. ##
```python
import iapws
print ('Using IAPWS Version %s\n'%(iapws.__version__,))
T = np.arange(183.15,313.15)
ci_iapws = np.full(len(T),np.nan)
cl_iapws = np.full(len(T),np.nan)
for i,Tx in enumerate(T):
if (Tx < 283): ci_iapws[i] = iapws._iapws._Ice(Tx, 0.1)['cp']*1000 / ci
if (Tx > 263): cl_iapws[i] = iapws._iapws._Liquid(Tx, 0.1)['cp']*1000 / cl
fig = plt.figure(figsize=(4,3))
ax1 = plt.subplot(1,1,1)
ax1.set_xlabel('$T$ / K')
ax1.set_ylabel('$c_\mathrm{i}$ / %5.2f, $c_\mathrm{l}$ / %5.2f'%(ci,cl))
ax1.set_xticks([185,247.07,273.15,305.00])
plt.scatter([247.065],[1.])
plt.scatter([305.000],[1.])
plt.plot(T,ci_iapws)
plt.plot(T,cl_iapws)
sns.set_context("paper", font_scale=1.2)
sns.despine(offset=10)
plt.tight_layout()
fig.savefig(plot_dir+'cp-Tdependance.pdf')
TK = np.arange(273.15,315.15,0.01)
es_iapws = np.zeros(len(TK))
for i, x in enumerate(TK):
es_iapws[i] = iapws.iapws97._PSat_T(x) *1.e6 #Temperature, [K]; Returns:Pressure, [MPa]
```
## Behavior of saturation vapor pressure above the triple point ##
This comparison of relative error suggests that the Wagner-Pru{\ss}, Murphy and Koop, Hardy, and Sonntag formulations lie closest to the IAPWS-97 reference. Romps (2017) and Bolton (1980) are similarly accurate and may have advantages. Hardy is interesting as it appears in a technical document and is rarely mentioned in the subsequent literature, but used by Vaisala in the calibration of their sondes
```python
state = 'liq'
fig = plt.figure(figsize=(10,5))
ax1 = plt.subplot(1,1,1)
ax1.set_xlabel('$T$ / K')
ax1.set_ylabel('$e_{\mathrm{s,x}}/e_{\mathrm{s,ref}} - 1$')
ax1.set_yscale('log')
es_ref = es_iapws
es_w = es(TK,formula="wagner-pruss",state=state)
es_r = es(TK,formula='romps',state=state)
es_g = es(TK,formula='goff-gratch',state=state)
es_m = es(TK,formula='murphy-koop',state=state)
es_s = es(TK,formula='sonntag',state=state)
es_b = es(TK,formula='bolton',state=state)
es_f = es(TK,formula='flatau',state=state)
es_h = es(TK,formula='hardy98',state=state)
es_a = es(TK,formula='standard-analytic',state=state)
plt.plot(TK,np.abs(es_h/es_ref-1),c='tab:blue',ls='solid',label='Hardy (1998)')
plt.plot(TK,np.abs(es_f/es_ref-1),c='tab:orange',label='Flatau (1992)')
plt.plot(TK,np.abs(es_g/es_ref-1),c='tab:green',label='Goff-Gratch (1957)')
plt.plot(TK,np.abs(es_b/es_ref-1),c='tab:red',ls='dotted',label='Bolton (1980)')
plt.plot(TK,np.abs(es_r/es_ref-1),c='tab:purple',label='Romps (2017)')
plt.plot(TK,np.abs(es_s/es_ref-1),c='tab:grey',label='Sonntag (1990)')
plt.plot(TK,np.abs(es_m/es_ref-1),c='tab:pink',label='Murphy-Koop (2005)')
plt.plot(TK,np.abs(es_w/es_ref-1),c='tab:brown',label='Wagner-Pruss (2002)')
plt.plot(TK,np.abs(es_a/es_ref-1),c='tab:purple',ls='dotted',label='Analytic')
plt.legend(loc="lower right",ncol=3)
sns.set_context("paper", font_scale=1.2)
sns.despine(offset=10)
fig.savefig(plot_dir+'es_l-error.pdf')
```
## Extension to temperatures below the triple point ##
To extend over the entire temperature range a different reference is required, for this any of the Hardy, Sonntag, Murphy-Koop and Wagner-Pru{\ss} formulations could suffice. We choose Wagner-Pru{\ss} because Wagner's group is responsible for the standard, and has also developed the IAPWS standard for saturation vapor pressure over ice. Below the results are plooted with respect to this standard over a much larger temperature range.
It is not clear how accurate Wagner and Pru{\ss} wis hen extended well beyond the IAPWS range, based on which it might be that the grouping of errors of similar magnitude from the Bolton, Flatau and Goff-Gratch formulations are indicative of a low temperature bias in the Wagner-Pru{\ss} formualtion. I doubt that this is the case, as the poor performance of all these formulations in the higher temperature range, and the simplicity of their formulation make it unlikely. The agreement of the Murphy-Koop formulation with these simpler formulations at low temperature may be indicative of Murphy and Koops focus on saturation over ice rather than liquid.
```python
state = 'liq'
fig = plt.figure(figsize=(10,5))
ax1 = plt.subplot(1,1,1)
ax1.set_xlabel('$T$ / K')
ax1.set_ylabel('$e_{\mathrm{s,x}}/e_{\mathrm{s,ref}} - 1$')
ax1.set_yscale('log')
TK = np.arange(180,320,0.5)
es_w = es(TK,formula="wagner-pruss",state=state)
es_r = es(TK,formula='romps',state=state)
es_g = es(TK,formula='goff-gratch',state=state)
es_m = es(TK,formula='murphy-koop',state=state)
es_s = es(TK,formula='sonntag',state=state)
es_b = es(TK,formula='bolton',state=state)
es_f = es(TK,formula='flatau',state=state)
es_h = es(TK,formula='hardy98',state=state)
es_a = es(TK,formula='standard-analytic',state=state)
es_ref = es_w
plt.plot(TK,np.abs(es_h/es_ref-1),c='tab:blue',ls='solid',label='Hardy (1998)')
plt.plot(TK,np.abs(es_f/es_ref-1),c='tab:orange',label='Flatau (1992)')
plt.plot(TK,np.abs(es_g/es_ref-1),c='tab:green',label='Goff-Gratch (1957)')
plt.plot(TK,np.abs(es_b/es_ref-1),c='tab:red',ls='dotted',label='Bolton (1980)')
plt.plot(TK,np.abs(es_r/es_ref-1),c='tab:purple',label='Romps (2017)')
plt.plot(TK,np.abs(es_s/es_ref-1),c='tab:grey',label='Sonntag (1990)')
plt.plot(TK,np.abs(es_m/es_ref-1),c='tab:pink',label='Murphy-Koop (2005)')
plt.plot(TK,np.abs(es_a/es_ref-1),c='tab:purple',ls='dotted',label='Analytic')
#plt.plot(TK,np.abs(es_w/es_ref-1),c='tab:olive',label='Wagner-Pruss (2002)')
plt.legend(loc="lower left",ncol=2)
sns.set_context("paper", font_scale=1.2)
sns.despine(offset=10)
fig.savefig(plot_dir+'es_lsc-error.pdf')
```
## Saturation vapor pressure over ice ##
A subset of the formulations also postulate forms for the saturation vapor pressure over ice. For the reference in this quantity we use Wagner et al., (2011) as this has been adopted as the IAPWS standard. Here is seems that Murphy and Koop's (2005) formulation behaves very well in comparision to Wagner et al., but Sonntag is also quite adequate, particularly at lower ($T<273.15$ K) temperatures where it is likely to be applied.
```python
state = 'ice'
fig = plt.figure(figsize=(10,5))
ax1 = plt.subplot(1,1,1)
ax1.set_xlabel('$T$ / K')
ax1.set_ylabel('$e_{\mathrm{s,x}}/e_{\mathrm{s,ref}} - 1$')
ax1.set_yscale('log')
TK = np.arange(180,320,0.5)
es_w = es(TK,formula="wagner-pruss",state=state)
es_r = es(TK,formula='romps',state=state)
es_g = es(TK,formula='goff-gratch',state=state)
es_m = es(TK,formula='murphy-koop',state=state)
es_s = es(TK,formula='sonntag',state=state)
es_a = es(TK,formula='standard-analytic',state=state)
es_ref = es_w
plt.plot(TK,np.abs(es_g/es_ref-1),c='tab:green',label='Goff-Gratch (1957)')
plt.plot(TK,np.abs(es_r/es_ref-1),c='tab:purple',label='Romps (2017)')
plt.plot(TK,np.abs(es_s/es_ref-1),c='tab:grey',label='Sonntag (1990)')
plt.plot(TK,np.abs(es_m/es_ref-1),c='tab:pink',label='Murphy-Koop (2005)')
plt.plot(TK,np.abs(es_a/es_ref-1),c='tab:purple',ls='dotted',label='Analytic')
#plt.plot(TK,np.abs(es_w/es_ref-1),c='tab:olive',label='Wagner-Pruss (2002)')
plt.legend(loc="lower left",ncol=2)
sns.set_context("paper", font_scale=1.2)
sns.despine(offset=10)
fig.savefig(plot_dir+'es_i-error.pdf')
```
## Clausius Clapeyron ##
Often over looked is that many conceptual models are built on the application of the Clausius-Clapeyron equation,
\begin{equation}
\frac{\mathrm{d} \ln e_\mathrm{s}}{\mathrm{d \ln T}} \left(\frac{\ell_\mathrm{v}}{R_\mathrm{v} T}\right)^{-1} = 1
\end{equation}
with the assumption that the vaporization enthalpy, $\ell_\mathrm{v}$ that appears in this equation, is linear in temperature following Kirchoff's relation. This is similar to assuming that the specific heats are independent of temeprature, an idealization which is, unfortunately, just that, and idealization.
But because of this it is interesting to compare this expression as given by the above formulation of the saturation vapor pressure (through their numerical derivative) and independent expressions of $\ell_\mathrm{v}$ based on the assumption of constant specific heats.
This is shown below for ice and liquid saturation. The analytic expression, which has larger errors for es is constructued to satisfy this relationship and is exact to the precision of the numerical calculations. The various formulations using more accurate expressions for $e_s$ which implicityl don't assume constancy in specific heats are similarly accurate, with the exception of Goff-Gratch, and Romps for Ice. Hardy is only shown for water. For ice Sonntag does not behave well for $T> 290$ K, but it is not likely to be used at these temperatures. Note that Romps would be perfect had we adopted his modified specific heats.
Based on the above my recommendation is to use the formulations by Wagner's group, unless one is interested in very low temperatures ($T<180$K) in which case the formulation of Koop and Murphy may be desirable. For just liquid processes Hardy might be a good choice, it is less well known but used by Vaisala for its sondes. There may be advantages to using Sonntag if there is interest in liquid and ice as it might allow more efficient implementations, but for my tests all formulations were within 30% of one another.
Another alternative, would be to use the analytic approach, either using Romps' formulae if getthing the staturation vapor pressure as close to measurements as possible is preferred, or using the analytic formula with the correct (at the standard temperature and pressure) specific heats and gast constants.
```python
state = 'liq'
fig = plt.figure(figsize=(10,10))
ax1 = plt.subplot(2,1,1)
ax1.set_ylabel('$|\mathrm{CC}_\mathrm{liq} - 1|$')
ax1.set_yscale('log')
ax1.set_xticklabels([])
TK = np.arange(180,320,0.5)
lv = phase_change_enthalpy(TK)
if (state == 'ice'): lv += phase_change_enthalpy(TK,fusion=True)
y = lv/(Rv * TK)
cc_w = dlnesdlnT(TK,formula="wagner-pruss",state=state) / y
cc_r = dlnesdlnT(TK,formula='romps',state=state) /y
cc_g = dlnesdlnT(TK,formula='goff-gratch',state=state) /y
cc_m = dlnesdlnT(TK,formula='murphy-koop',state=state) /y
cc_s = dlnesdlnT(TK,formula='sonntag',state=state) /y
cc_h = dlnesdlnT(TK,formula='hardy98',state=state) /y
cc_a = dlnesdlnT(TK,formula='standard-analytic',state=state) /y
plt.plot(TK,np.abs(cc_h/1 -1.),c='tab:blue',label='Hardy (1998)')
plt.plot(TK,np.abs(cc_g/1 -1.),c='tab:green',label='Goff-Gratch (1957)')
plt.plot(TK,np.abs(cc_r/1 -1.),c='tab:purple',label='Romps (2017)')
plt.plot(TK,np.abs(cc_s/1 -1.),c='tab:grey',label='Sonntag (1990)')
plt.plot(TK,np.abs(cc_m/1 -1.),c='tab:pink',label='Murphy-Koop (2005)')
plt.plot(TK,np.abs(cc_w/1 -1.),c='tab:olive',label='Wagner-Pruss (2002)')
plt.plot(TK,np.abs(cc_a/1 -1.),c='tab:purple',ls='dotted',label='Analytic')
plt.legend(loc="lower left",ncol=1)
state = 'ice'
TK = np.arange(180,320,0.5)
lv = phase_change_enthalpy(TK)
if (state == 'ice'): lv = phase_change_enthalpy(TK,fusion=True) + phase_change_enthalpy(TK)
y = lv/(Rv * TK)
cc_w = dlnesdlnT(TK,formula="wagner-pruss",state=state) / y
cc_r = dlnesdlnT(TK,formula='romps',state=state) /y
cc_g = dlnesdlnT(TK,formula='goff-gratch',state=state) /y
cc_m = dlnesdlnT(TK,formula='murphy-koop',state=state) /y
cc_s = dlnesdlnT(TK,formula='sonntag',state=state) /y
cc_a = dlnesdlnT(TK,formula='standard-analytic',state=state) /y
ax2 = plt.subplot(2,1,2)
ax2.set_xlabel('$T$ / K')
ax2.set_ylabel('$|\mathrm{CC}_\mathrm{ice} - 1|$')
ax2.set_yscale('log')
plt.plot(TK,np.abs(cc_g/1 -1.),c='tab:green',label='Goff-Gratch (1957)')
plt.plot(TK,np.abs(cc_r/1 -1.),c='tab:purple',label='Romps (2017)')
plt.plot(TK,np.abs(cc_s/1 -1.),c='tab:grey',label='Sonntag (1990)')
plt.plot(TK,np.abs(cc_m/1 -1.),c='tab:pink',label='Murphy-Koop (2005)')
plt.plot(TK,np.abs(cc_w/1 -1.),c='tab:olive',label='Wagner-Pruss (2002)')
plt.plot(TK,np.abs(cc_a/1. -1.),c='tab:purple',ls='dotted',label='Analytic')
sns.set_context("paper", font_scale=1.2)
sns.despine(offset=10)
fig.savefig(plot_dir+'cc-error.pdf')
```
## Optimizing analytic fits for saturation vapor pressure ##
Romps suggests modifying the specific heats of liquid, ice and the gas constant of vapor to arrive at an optimal fit for the saturation vapor pressure using the analytic form. One can do almost as good by just modifying the specific heat of the condensate phases. Here we show how the maximum error in the fit depends on the specific heat of the condensate phases as compared to the reference, and how we arrive at our optimal fit by only manipulating the condensate phase specific heats to values that they anyway adopt within the range of temperatures spanned by the atmosphere. This justifys the default choice for saturation vapor pressure and the specific heats used in aes_thermo.py
```python
fig = plt.figure(figsize=(10,5))
cl_1 = (iapws._iapws._Liquid(265, 0.1)['cp'])*1000.
cl_2 = (iapws._iapws._Liquid(305, 0.1)['cp'])*1000
ci_1 = (iapws._iapws._Ice(193, 0.01)['cp'])*1000.
ci_2 = (iapws._iapws._Ice(273, 0.10)['cp'])*1000
cls = np.arange(cl_2,cl_1)
err = np.zeros(len(cls))
ax1 = plt.subplot(1,2,1)
ax1.set_xlabel('$c_\mathrm{liq}$ / Jkg$^{-1}$K$^{-1}$')
ax1.set_ylabel('$(e_{\mathrm{s,x}}/e_{\mathrm{s,ref}} - 1)_\mathrm{max}$ / %')
ax1.set_yscale('log')
state = 'liq'
TK = np.arange(260,300,0.5)
es_ref = es(TK,formula="wagner-pruss",state=state)
for i,cx in enumerate(cls):
c1 = (cpv-cx)/Rv
c2 = lvT/(Rv*TvT) - c1
es_a = PvT * np.exp(c2*(1.-TvT/TK)) * (TK/TvT)**c1
err[i] = np.max(np.abs(es_a/es_ref -1.))*100.
ax1.plot(cls,err,c='tab:purple',ls='dotted',label='Analytic $c_\mathrm{liq}$ for $T\in$ (260K,305K)')
ax1.legend(loc="upper left",ncol=2)
cis = np.arange(ci_1,ci_2)
err = np.zeros(len(cis))
ax2 = plt.subplot(1,2,2)
ax2.set_xlabel('$c_\mathrm{ice}$ / Jkg$^{-1}$K$^{-1}$')
ax2.set_ylabel('$(e_{\mathrm{s,x}}/e_{\mathrm{s,ref}} - 1)_\mathrm{max}$ / %')
ax2.set_yscale('log')
state = 'ice'
TK = np.arange(180,273,0.5)
es_ref = es(TK,formula="wagner-pruss",state=state)
for i,cx in enumerate(cis):
c1 = (cpv-cx)/Rv
c2 = lsT/(Rv*TvT) - c1
es_a = PvT * np.exp(c2*(1.-TvT/TK)) * (TK/TvT)**c1
err[i] = np.max(np.abs(es_a/es_ref -1.))*100.
ax2.plot(cis,err,c='tab:purple',ls='dotted',label='Analytic $c_\mathrm{ice}$ for $T\in$ (193K,273K)')
ax2.legend(loc="upper right",ncol=2)
sns.set_context("paper", font_scale=1.2)
sns.despine(offset=10)
fig.savefig(plot_dir+'es-analytic-fits.pdf')
Tfit = 305
print ('Taking fit for $c_\mathrm{liq}=$ %3.2f J/(kg K) at $T=$ %3.2f K'%(iapws._iapws._Liquid(Tfit, 0.1)['cp']*1000.,Tfit))
Tfit = 247.065
print ('Taking fit for $c_\mathrm{ice}=$ %3.2f J/(kg K) at $T=$ %3.2f K'%(iapws._iapws._Ice(Tfit, 0.1)['cp']*1000.,Tfit))
```
## RCEMIP comparision ##
During RCEMIP (Wing et al.) different models output different RH, differing in ways of calculating it and also whether or not it was calculated relative to liquid or ice. In this analysis we create a python implementation of the intial RCEMIP sounding and then for the given state estimate the RH using different formulat and different assumptions regarding the reference condensate (liquid/ice). We also show the difference associated with 1 K of temperature.
```python
def rcemip_on_z(z,SST):
# function [T,q,p] = rcemip_on_z(z,SST)
#
# Inputs:
# z: array of heights (low to high, m)
# SST: sea surface temperature (K)
#
# Outputs:
T = np.zeros(len(z)) # temperature (K)
q = np.zeros(len(z)) # specific humidity (g/g)
p = np.zeros(len(z)) # pressure (Pa)
## Constants
g = 9.79764 #m/s^2
Rd = 287.04 #J/kgK
## Parameters
p0 = 101480 #Pa surface pressure
qt = 10**(-11) #g/g specific humidity at tropopause
zq1 = 4000 #m
zq2 = 7500 #m
zt = 15000 #m tropopause height
gamma = 0.0067 #K/m lapse rate
## Scratch
Tv = np.zeros(len(z)) # temperature (K)
if SST == 295:
q0 = 0.01200; #g/g specific humidity at surface (adjusted from 300K value so RH near surface approx 80%)
elif SST == 300:
q0 = 0.01865; #g/g specific humidity at surface
elif SST == 305:
q0 = 0.02400 #g/g specific humidity at surface (adjusted from 300K value so RH near surface approx 80%)
T0 = SST - 0 #surface air temperature adjusted to be 0K less than SST
## Virtual Temperature at surface and tropopause
Tv0 = T0*(1 + 0.608*q0) #virtual temperature at surface
Tvt = Tv0 - gamma*zt #virtual temperature at tropopause z=zt
## Pressure
pt = p0*(Tvt/Tv0)**(g/(Rd*gamma)); #pressure at tropopause z=zt
p = p0*((Tv0-gamma*z)/Tv0)**(g/(Rd*gamma)) #0 <= z <= zt
p[z>zt] = pt*np.exp(-g*(z[z>zt]-zt)/(Rd*Tvt)) #z > zt
## Specific humidity
q = q0*np.exp(-z/zq1)*np.exp(-(z/zq2)**2)
q[z>zt] = qt #z > zt
## Temperature
#Virtual Temperature
Tv = Tv0 - gamma*z #0 <= z <= zt
Tv[z>zt] = Tvt #z > zt
#Absolute Temperature at all heights
T = Tv/(1 + 0.608*q)
return T, q, p
z = np.arange(0,17000,100)
T, q , p = rcemip_on_z(z,300)
```
```python
def get_rh (T,q,p,formula='wagner-pruss',state='liq'):
es_w = es(T,formula=formula,state=state)
x = es_w * eps1/(p-es_w)
return 100.*q*(1+x)/x
fig = plt.figure(figsize=(4,5))
ax1 = plt.subplot(1,1,1)
ax1.set_ylabel('$z$ / km')
ax1.set_xlabel('RH / %')
ax1.set_ylim(0,14.5)
ax1.set_yticks([0,4,8,12])
plt.plot(get_rh(T,q,p,state='mxd'),z/1000.,label = 'Wagner Pruss (ice/liq)')
plt.plot(get_rh(T+1,q,p,state='mxd'),z/1000.,label = 'Wagner Pruss (ice/liq) + 1 K')
plt.plot(get_rh(T,q,p,state='ice'),z/1000.,label = 'Wagner Pruss (ice)')
plt.plot(get_rh(T,q,p,formula='romps',state='mxd'),z/1000.,label = 'Romps (ice/liq)')
plt.plot(get_rh(T,q,p),z/1000.,label = 'Wagner Pruss (liq)')
plt.plot(get_rh(T,q,p,formula='flatau'),z/1000.,label = 'Flatau (liq)')
plt.legend(loc="lower left",ncol=1)
sns.set_context("paper")
sns.despine(offset=10)
plt.tight_layout()
fig.savefig(plot_dir+'RCEMIP-RHerror.pdf')
```
## Credit ##
Jiawei Bao, Geet George, and Hauke Schulz are thanked for comments on these notes, and the identification of some errors in earlier versions.
```python
```
|
If $f$ and $g$ are power series with $g$ having a lower degree than $f$, and $g$ has a nonzero constant term, then the power series $f/g$ has a radius of convergence at least as large as the minimum of the radii of convergence of $f$ and $g$.
|
State Before: C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
β’ IsDetecting (Set.op π’) β IsCodetecting π’ State After: case refine'_1
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsDetecting (Set.op π’)
X Y : C
f : X βΆ Y
hf : β (G : C), G β π’ β β (h : X βΆ G), β! h', f β« h' = h
β’ IsIso f
case refine'_2
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsCodetecting π’
X Y : Cα΅α΅
f : X βΆ Y
hf : β (G : Cα΅α΅), G β Set.op π’ β β (h : G βΆ Y), β! h', h' β« f = h
β’ IsIso f Tactic: refine' β¨fun hπ’ X Y f hf => _, fun hπ’ X Y f hf => _β© State Before: case refine'_1
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsDetecting (Set.op π’)
X Y : C
f : X βΆ Y
hf : β (G : C), G β π’ β β (h : X βΆ G), β! h', f β« h' = h
β’ IsIso f State After: case refine'_1
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsDetecting (Set.op π’)
X Y : C
f : X βΆ Y
hf : β (G : C), G β π’ β β (h : X βΆ G), β! h', f β« h' = h
G : Cα΅α΅
hG : G β Set.op π’
h : G βΆ X.op
β’ β! h', h' β« f.op = h Tactic: refine' (isIso_op_iff _).1 (hπ’ _ fun G hG h => _) State Before: case refine'_1
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsDetecting (Set.op π’)
X Y : C
f : X βΆ Y
hf : β (G : C), G β π’ β β (h : X βΆ G), β! h', f β« h' = h
G : Cα΅α΅
hG : G β Set.op π’
h : G βΆ X.op
β’ β! h', h' β« f.op = h State After: case refine'_1.intro.intro
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsDetecting (Set.op π’)
X Y : C
f : X βΆ Y
hf : β (G : C), G β π’ β β (h : X βΆ G), β! h', f β« h' = h
G : Cα΅α΅
hG : G β Set.op π’
h : G βΆ X.op
t : Y βΆ G.unop
ht : f β« t = h.unop
ht' : β (y : Y βΆ G.unop), (fun h' => f β« h' = h.unop) y β y = t
β’ β! h', h' β« f.op = h Tactic: obtain β¨t, ht, ht'β© := hf (unop G) (Set.mem_op.1 hG) h.unop State Before: case refine'_1.intro.intro
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsDetecting (Set.op π’)
X Y : C
f : X βΆ Y
hf : β (G : C), G β π’ β β (h : X βΆ G), β! h', f β« h' = h
G : Cα΅α΅
hG : G β Set.op π’
h : G βΆ X.op
t : Y βΆ G.unop
ht : f β« t = h.unop
ht' : β (y : Y βΆ G.unop), (fun h' => f β« h' = h.unop) y β y = t
β’ β! h', h' β« f.op = h State After: no goals Tactic: exact
β¨t.op, Quiver.Hom.unop_inj ht, fun y hy => Quiver.Hom.unop_inj (ht' _ (Quiver.Hom.op_inj hy))β© State Before: case refine'_2
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsCodetecting π’
X Y : Cα΅α΅
f : X βΆ Y
hf : β (G : Cα΅α΅), G β Set.op π’ β β (h : G βΆ Y), β! h', h' β« f = h
β’ IsIso f State After: case refine'_2
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsCodetecting π’
X Y : Cα΅α΅
f : X βΆ Y
hf : β (G : Cα΅α΅), G β Set.op π’ β β (h : G βΆ Y), β! h', h' β« f = h
G : C
hG : G β π’
h : Y.unop βΆ G
β’ β! h', f.unop β« h' = h Tactic: refine' (isIso_unop_iff _).1 (hπ’ _ fun G hG h => _) State Before: case refine'_2
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsCodetecting π’
X Y : Cα΅α΅
f : X βΆ Y
hf : β (G : Cα΅α΅), G β Set.op π’ β β (h : G βΆ Y), β! h', h' β« f = h
G : C
hG : G β π’
h : Y.unop βΆ G
β’ β! h', f.unop β« h' = h State After: case refine'_2.intro.intro
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsCodetecting π’
X Y : Cα΅α΅
f : X βΆ Y
hf : β (G : Cα΅α΅), G β Set.op π’ β β (h : G βΆ Y), β! h', h' β« f = h
G : C
hG : G β π’
h : Y.unop βΆ G
t : G.op βΆ X
ht : t β« f = h.op
ht' : β (y : G.op βΆ X), (fun h' => h' β« f = h.op) y β y = t
β’ β! h', f.unop β« h' = h Tactic: obtain β¨t, ht, ht'β© := hf (op G) (Set.op_mem_op.2 hG) h.op State Before: case refine'_2.intro.intro
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsCodetecting π’
X Y : Cα΅α΅
f : X βΆ Y
hf : β (G : Cα΅α΅), G β Set.op π’ β β (h : G βΆ Y), β! h', h' β« f = h
G : C
hG : G β π’
h : Y.unop βΆ G
t : G.op βΆ X
ht : t β« f = h.op
ht' : β (y : G.op βΆ X), (fun h' => h' β« f = h.op) y β y = t
β’ β! h', f.unop β« h' = h State After: case refine'_2.intro.intro
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsCodetecting π’
X Y : Cα΅α΅
f : X βΆ Y
hf : β (G : Cα΅α΅), G β Set.op π’ β β (h : G βΆ Y), β! h', h' β« f = h
G : C
hG : G β π’
h : Y.unop βΆ G
t : G.op βΆ X
ht : t β« f = h.op
ht' : β (y : G.op βΆ X), (fun h' => h' β« f = h.op) y β y = t
y : X.unop βΆ G
hy : (fun h' => f.unop β« h' = h) y
β’ (fun h' => h' β« f = h.op) y.op Tactic: refine' β¨t.unop, Quiver.Hom.op_inj ht, fun y hy => Quiver.Hom.op_inj (ht' _ _)β© State Before: case refine'_2.intro.intro
C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsCodetecting π’
X Y : Cα΅α΅
f : X βΆ Y
hf : β (G : Cα΅α΅), G β Set.op π’ β β (h : G βΆ Y), β! h', h' β« f = h
G : C
hG : G β π’
h : Y.unop βΆ G
t : G.op βΆ X
ht : t β« f = h.op
ht' : β (y : G.op βΆ X), (fun h' => h' β« f = h.op) y β y = t
y : X.unop βΆ G
hy : (fun h' => f.unop β« h' = h) y
β’ (fun h' => h' β« f = h.op) y.op State After: no goals Tactic: exact Quiver.Hom.unop_inj (by simpa only using hy) State Before: C : Type uβ
instβΒΉ : Category C
D : Type uβ
instβ : Category D
π’ : Set C
hπ’ : IsCodetecting π’
X Y : Cα΅α΅
f : X βΆ Y
hf : β (G : Cα΅α΅), G β Set.op π’ β β (h : G βΆ Y), β! h', h' β« f = h
G : C
hG : G β π’
h : Y.unop βΆ G
t : G.op βΆ X
ht : t β« f = h.op
ht' : β (y : G.op βΆ X), (fun h' => h' β« f = h.op) y β y = t
y : X.unop βΆ G
hy : (fun h' => f.unop β« h' = h) y
β’ (y.op β« f).unop = h.op.unop State After: no goals Tactic: simpa only using hy
|
C Copyright(C) 1999-2020 National Technology & Engineering Solutions
C of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
C NTESS, the U.S. Government retains certain rights in this software.
C
C See packages/seacas/LICENSE for details
SUBROUTINE CLOSEP (MP, N15, X, Y, IPOINT, COOR, LINKP, JJ)
C***********************************************************************
C SUBROUTINE CLOSE = FINDS THE CLOSEST EXISTING POINT TO THE MOUSE
C***********************************************************************
C SUBROUTINE CALLED BY:
C INPUT = INPUTS MESH DEFINITIONS FROM THE LIGHT TABLE
C***********************************************************************
C VARIABLES USED:
C X = THE X LOCATION IN USER COORDINATES
C Y = THE Y LOCATION IN USER COORDINATES
C POINT = ARRAY OF VALUES DEFINING A POINT
C (I, 1) = THE NUMBER OF THE POINT
C (I, 2) = THE X COORDINATE OF THE POINT
C (I, 3) = THE Y COORDINATE OF THE POINT
C (I, 4) = THE BOUNDARY FLAG OF THE POINT
C I = THE NUMBER OF THE CLOSEST POINT FOUND
C K = THE NUMBER OF POINTS IN THE DATABASE
C***********************************************************************
DIMENSION IPOINT (MP), COOR (2, MP), LINKP (2, MP)
LOGICAL ADDLNK
ADDLNK = .FALSE.
DMIN = 100000.
DO 100 I = 1, N15
CALL LTSORT (MP, LINKP, I, IPNTR, ADDLNK)
IF (IPNTR .GT. 0) THEN
DIST = SQRT (((COOR (1, IPNTR) - X) **2) +
& ((COOR (2, IPNTR) - Y) **2))
IF (DIST .LT. DMIN) THEN
DMIN = DIST
JJ = IPOINT (IPNTR)
ENDIF
ENDIF
100 CONTINUE
RETURN
END
|
Formal statement is: lemma power2_csqrt[simp,algebra]: "(csqrt z)\<^sup>2 = z" Informal statement is: The square of the complex square root of $z$ is $z$.
|
Barcelona's ClΓnic hospital has become the first in Spain to adopt 5G technology so that surgeons can carry out operations at a distance in real time.
The low latency of 5G communications networks means that operations can take place remotely without any delays.
The aim of the project is to connect surgeons from around the world without them needing to be present in the operating theater.
Presented on Tuesday, the 'Remote surgery' project will have to wait until 5G technology becomes widely available, which is scheduled from 2020.
Meanwhile, as part of the Mobile World Congress in Barcelona later this month, surgeons from the ClΓnic hospital will remotely carry out an operation that will be streamed on 5G.
|
/-
Copyright (c) 2018 Johannes HΓΆlzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes HΓΆlzl, Mitchell Rowett, Scott Morrison, Johan Commelin, Mario Carneiro,
Michael Howes
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.group_theory.subgroup
import Mathlib.deprecated.submonoid
import Mathlib.PostPort
universes u_3 l u_1 u_2 u_4
namespace Mathlib
/-- `s` is an additive subgroup: a set containing 0 and closed under addition and negation. -/
class is_add_subgroup {A : Type u_3} [add_group A] (s : set A) extends is_add_submonoid s where
neg_mem : β {a : A}, a β s β -a β s
/-- `s` is a subgroup: a set containing 1 and closed under multiplication and inverse. -/
class is_subgroup {G : Type u_1} [group G] (s : set G) extends is_submonoid s where
inv_mem : β {a : G}, a β s β aβ»ΒΉ β s
theorem is_subgroup.div_mem {G : Type u_1} [group G] {s : set G} [is_subgroup s] {x : G} {y : G}
(hx : x β s) (hy : y β s) : x / y β s :=
sorry
theorem additive.is_add_subgroup {G : Type u_1} [group G] (s : set G) [is_subgroup s] :
is_add_subgroup s :=
is_add_subgroup.mk is_subgroup.inv_mem
theorem additive.is_add_subgroup_iff {G : Type u_1} [group G] {s : set G} :
is_add_subgroup s β is_subgroup s :=
sorry
theorem multiplicative.is_subgroup {A : Type u_3} [add_group A] (s : set A) [is_add_subgroup s] :
is_subgroup s :=
is_subgroup.mk is_add_subgroup.neg_mem
theorem multiplicative.is_subgroup_iff {A : Type u_3} [add_group A] {s : set A} :
is_subgroup s β is_add_subgroup s :=
sorry
/-- The group structure on a subgroup coerced to a type. -/
def subtype.group {G : Type u_1} [group G] {s : set G} [is_subgroup s] : group β₯s :=
group.mk monoid.mul sorry monoid.one sorry sorry
(fun (x : β₯s) => { val := βxβ»ΒΉ, property := sorry })
(fun (x y : β₯s) => { val := βx / βy, property := sorry }) sorry
/-- The commutative group structure on a commutative subgroup coerced to a type. -/
def subtype.comm_group {G : Type u_1} [comm_group G] {s : set G} [is_subgroup s] : comm_group β₯s :=
comm_group.mk group.mul sorry group.one sorry sorry group.inv group.div sorry sorry
@[simp] theorem is_subgroup.coe_inv {G : Type u_1} [group G] {s : set G} [is_subgroup s] (a : β₯s) :
β(aβ»ΒΉ) = (βaβ»ΒΉ) :=
rfl
@[simp] theorem is_subgroup.coe_gpow {G : Type u_1} [group G] {s : set G} [is_subgroup s] (a : β₯s)
(n : β€) : β(a ^ n) = βa ^ n :=
sorry
@[simp] theorem is_add_subgroup.gsmul_coe {A : Type u_3} [add_group A] {s : set A}
[is_add_subgroup s] (a : β₯s) (n : β€) : β(n β’β€ a) = n β’β€ βa :=
sorry
theorem is_add_subgroup.of_add_neg {G : Type u_1} [add_group G] (s : set G) (one_mem : 0 β s)
(div_mem : β {a b : G}, a β s β b β s β a + -b β s) : is_add_subgroup s :=
sorry
theorem is_add_subgroup.of_sub {A : Type u_3} [add_group A] (s : set A) (zero_mem : 0 β s)
(sub_mem : β {a b : A}, a β s β b β s β a - b β s) : is_add_subgroup s :=
sorry
protected instance is_add_subgroup.inter {G : Type u_1} [add_group G] (sβ : set G) (sβ : set G)
[is_add_subgroup sβ] [is_add_subgroup sβ] : is_add_subgroup (sβ β© sβ) :=
is_add_subgroup.mk
fun (x : G) (hx : x β sβ β© sβ) =>
{ left := is_add_subgroup.neg_mem (and.left hx),
right := is_add_subgroup.neg_mem (and.right hx) }
protected instance is_add_subgroup.Inter {G : Type u_1} [add_group G] {ΞΉ : Sort u_2} (s : ΞΉ β set G)
[h : β (y : ΞΉ), is_add_subgroup (s y)] : is_add_subgroup (set.Inter s) :=
is_add_subgroup.mk
fun (x : G) (h_1 : x β set.Inter s) =>
iff.mpr set.mem_Inter fun (y : ΞΉ) => is_add_subgroup.neg_mem (iff.mp set.mem_Inter h_1 y)
theorem is_add_subgroup_Union_of_directed {G : Type u_1} [add_group G] {ΞΉ : Type u_2}
[hΞΉ : Nonempty ΞΉ] (s : ΞΉ β set G) [β (i : ΞΉ), is_add_subgroup (s i)]
(directed : β (i j : ΞΉ), β (k : ΞΉ), s i β s k β§ s j β s k) :
is_add_subgroup (set.Union fun (i : ΞΉ) => s i) :=
sorry
def gpowers {G : Type u_1} [group G] (x : G) : set G := set.range (pow x)
def gmultiples {A : Type u_3} [add_group A] (x : A) : set A := set.range fun (i : β€) => i β’β€ x
protected instance gpowers.is_subgroup {G : Type u_1} [group G] (x : G) : is_subgroup (gpowers x) :=
is_subgroup.mk fun (xβ : G) (_x : xβ β gpowers x) => sorry
protected instance gmultiples.is_add_subgroup {A : Type u_3} [add_group A] (x : A) :
is_add_subgroup (gmultiples x) :=
iff.mp multiplicative.is_subgroup_iff (gpowers.is_subgroup x)
theorem is_subgroup.gpow_mem {G : Type u_1} [group G] {a : G} {s : set G} [is_subgroup s]
(h : a β s) {i : β€} : a ^ i β s :=
int.cases_on i (fun (i : β) => idRhs (a ^ i β s) (is_submonoid.pow_mem h))
fun (i : β) => idRhs (a ^ Nat.succ iβ»ΒΉ β s) (is_subgroup.inv_mem (is_submonoid.pow_mem h))
theorem is_add_subgroup.gsmul_mem {A : Type u_3} [add_group A] {a : A} {s : set A}
[is_add_subgroup s] : a β s β β {i : β€}, i β’β€ a β s :=
is_subgroup.gpow_mem
theorem gpowers_subset {G : Type u_1} [group G] {a : G} {s : set G} [is_subgroup s] (h : a β s) :
gpowers a β s :=
sorry
theorem gmultiples_subset {A : Type u_3} [add_group A] {a : A} {s : set A} [is_add_subgroup s]
(h : a β s) : gmultiples a β s :=
gpowers_subset h
theorem mem_gpowers {G : Type u_1} [group G] {a : G} : a β gpowers a := sorry
theorem mem_gmultiples {A : Type u_3} [add_group A] {a : A} : a β gmultiples a := sorry
namespace is_subgroup
theorem inv_mem_iff {G : Type u_1} {a : G} [group G] (s : set G) [is_subgroup s] :
aβ»ΒΉ β s β a β s :=
sorry
theorem Mathlib.is_add_subgroup.add_mem_cancel_right {G : Type u_1} {a : G} {b : G} [add_group G]
(s : set G) [is_add_subgroup s] (h : a β s) : b + a β s β b β s :=
sorry
theorem Mathlib.is_add_subgroup.add_mem_cancel_left {G : Type u_1} {a : G} {b : G} [add_group G]
(s : set G) [is_add_subgroup s] (h : a β s) : a + b β s β b β s :=
sorry
end is_subgroup
class normal_add_subgroup {A : Type u_3} [add_group A] (s : set A) extends is_add_subgroup s where
normal : β (n : A), n β s β β (g : A), g + n + -g β s
class normal_subgroup {G : Type u_1} [group G] (s : set G) extends is_subgroup s where
normal : β (n : G), n β s β β (g : G), g * n * (gβ»ΒΉ) β s
theorem normal_add_subgroup_of_add_comm_group {G : Type u_1} [add_comm_group G] (s : set G)
[hs : is_add_subgroup s] : normal_add_subgroup s :=
sorry
theorem additive.normal_add_subgroup {G : Type u_1} [group G] (s : set G) [normal_subgroup s] :
normal_add_subgroup s :=
normal_add_subgroup.mk normal_subgroup.normal
theorem additive.normal_add_subgroup_iff {G : Type u_1} [group G] {s : set G} :
normal_add_subgroup s β normal_subgroup s :=
sorry
theorem multiplicative.normal_subgroup {A : Type u_3} [add_group A] (s : set A)
[normal_add_subgroup s] : normal_subgroup s :=
normal_subgroup.mk normal_add_subgroup.normal
theorem multiplicative.normal_subgroup_iff {A : Type u_3} [add_group A] {s : set A} :
normal_subgroup s β normal_add_subgroup s :=
sorry
namespace is_subgroup
-- Normal subgroup properties
theorem mem_norm_comm {G : Type u_1} [group G] {s : set G} [normal_subgroup s] {a : G} {b : G}
(hab : a * b β s) : b * a β s :=
sorry
theorem Mathlib.is_add_subgroup.mem_norm_comm_iff {G : Type u_1} [add_group G] {s : set G}
[normal_add_subgroup s] {a : G} {b : G} : a + b β s β b + a β s :=
{ mp := is_add_subgroup.mem_norm_comm, mpr := is_add_subgroup.mem_norm_comm }
/-- The trivial subgroup -/
def trivial (G : Type u_1) [group G] : set G := singleton 1
@[simp] theorem Mathlib.is_add_subgroup.mem_trivial {G : Type u_1} [add_group G] {g : G} :
g β is_add_subgroup.trivial G β g = 0 :=
set.mem_singleton_iff
protected instance Mathlib.is_add_subgroup.trivial_normal {G : Type u_1} [add_group G] :
normal_add_subgroup (is_add_subgroup.trivial G) :=
sorry
theorem Mathlib.is_add_subgroup.eq_trivial_iff {G : Type u_1} [add_group G] {s : set G}
[is_add_subgroup s] : s = is_add_subgroup.trivial G β β (x : G), x β s β x = 0 :=
sorry
protected instance univ_subgroup {G : Type u_1} [group G] : normal_subgroup set.univ :=
normal_subgroup.mk
(eq.mpr
(id
(Eq.trans
(forall_congr_eq
fun (n : G) =>
Eq.trans
(imp_congr_eq
(propext ((fun {Ξ± : Type u_1} (x : Ξ±) => iff_true_intro (set.mem_univ x)) n))
(Eq.trans
(forall_congr_eq
fun (g : G) =>
propext
((fun {Ξ± : Type u_1} (x : Ξ±) => iff_true_intro (set.mem_univ x))
(g * n * (gβ»ΒΉ))))
(propext (forall_const G))))
(propext (forall_prop_of_true True.intro)))
(propext (forall_const G))))
trivial)
def Mathlib.is_add_subgroup.add_center (G : Type u_1) [add_group G] : set G :=
set_of fun (z : G) => β (g : G), g + z = z + g
theorem mem_center {G : Type u_1} [group G] {a : G} : a β center G β β (g : G), g * a = a * g :=
iff.rfl
protected instance center_normal {G : Type u_1} [group G] : normal_subgroup (center G) := sorry
def Mathlib.is_add_subgroup.add_normalizer {G : Type u_1} [add_group G] (s : set G) : set G :=
set_of fun (g : G) => β (n : G), n β s β g + n + -g β s
protected instance Mathlib.is_add_subgroup.normalizer_is_add_subgroup {G : Type u_1} [add_group G]
(s : set G) : is_add_subgroup (is_add_subgroup.add_normalizer s) :=
sorry
theorem Mathlib.is_add_subgroup.subset_add_normalizer {G : Type u_1} [add_group G] (s : set G)
[is_add_subgroup s] : s β is_add_subgroup.add_normalizer s :=
sorry
/-- Every subgroup is a normal subgroup of its normalizer -/
protected instance Mathlib.is_add_subgroup.add_normal_in_add_normalizer {G : Type u_1} [add_group G]
(s : set G) [is_add_subgroup s] : normal_add_subgroup (subtype.val β»ΒΉ' s) :=
normal_add_subgroup.mk
fun (a : β₯(is_add_subgroup.add_normalizer s)) (ha : a β subtype.val β»ΒΉ' s)
(_x : β₯(is_add_subgroup.add_normalizer s)) => sorry
end is_subgroup
-- Homomorphism subgroups
namespace is_group_hom
def ker {G : Type u_1} {H : Type u_2} [group H] (f : G β H) : set G := f β»ΒΉ' is_subgroup.trivial H
theorem Mathlib.is_add_group_hom.mem_ker {G : Type u_1} {H : Type u_2} [add_group H] (f : G β H)
{x : G} : x β is_add_group_hom.ker f β f x = 0 :=
is_add_subgroup.mem_trivial
theorem Mathlib.is_add_group_hom.zero_ker_neg {G : Type u_1} {H : Type u_2} [add_group G]
[add_group H] (f : G β H) [is_add_group_hom f] {a : G} {b : G} (h : f (a + -b) = 0) :
f a = f b :=
sorry
theorem one_ker_inv' {G : Type u_1} {H : Type u_2} [group G] [group H] (f : G β H) [is_group_hom f]
{a : G} {b : G} (h : f (aβ»ΒΉ * b) = 1) : f a = f b :=
sorry
theorem Mathlib.is_add_group_hom.neg_ker_zero {G : Type u_1} {H : Type u_2} [add_group G]
[add_group H] (f : G β H) [is_add_group_hom f] {a : G} {b : G} (h : f a = f b) :
f (a + -b) = 0 :=
sorry
theorem inv_ker_one' {G : Type u_1} {H : Type u_2} [group G] [group H] (f : G β H) [is_group_hom f]
{a : G} {b : G} (h : f a = f b) : f (aβ»ΒΉ * b) = 1 :=
sorry
theorem Mathlib.is_add_group_hom.zero_iff_ker_neg {G : Type u_1} {H : Type u_2} [add_group G]
[add_group H] (f : G β H) [is_add_group_hom f] (a : G) (b : G) : f a = f b β f (a + -b) = 0 :=
{ mp := is_add_group_hom.neg_ker_zero f, mpr := is_add_group_hom.zero_ker_neg f }
theorem one_iff_ker_inv' {G : Type u_1} {H : Type u_2} [group G] [group H] (f : G β H)
[is_group_hom f] (a : G) (b : G) : f a = f b β f (aβ»ΒΉ * b) = 1 :=
{ mp := inv_ker_one' f, mpr := one_ker_inv' f }
theorem inv_iff_ker {G : Type u_1} {H : Type u_2} [group G] [group H] (f : G β H)
[w : is_group_hom f] (a : G) (b : G) : f a = f b β a * (bβ»ΒΉ) β ker f :=
eq.mpr (id (Eq._oldrec (Eq.refl (f a = f b β a * (bβ»ΒΉ) β ker f)) (propext (mem_ker f))))
(one_iff_ker_inv f a b)
theorem inv_iff_ker' {G : Type u_1} {H : Type u_2} [group G] [group H] (f : G β H)
[w : is_group_hom f] (a : G) (b : G) : f a = f b β aβ»ΒΉ * b β ker f :=
eq.mpr (id (Eq._oldrec (Eq.refl (f a = f b β aβ»ΒΉ * b β ker f)) (propext (mem_ker f))))
(one_iff_ker_inv' f a b)
protected instance Mathlib.is_add_group_hom.image_add_subgroup {G : Type u_1} {H : Type u_2}
[add_group G] [add_group H] (f : G β H) [is_add_group_hom f] (s : set G) [is_add_subgroup s] :
is_add_subgroup (f '' s) :=
is_add_subgroup.mk fun (a : H) (_x : a β f '' s) => sorry
protected instance range_subgroup {G : Type u_1} {H : Type u_2} [group G] [group H] (f : G β H)
[is_group_hom f] : is_subgroup (set.range f) :=
set.image_univ βΈ is_group_hom.image_subgroup f set.univ
protected instance preimage {G : Type u_1} {H : Type u_2} [group G] [group H] (f : G β H)
[is_group_hom f] (s : set H) [is_subgroup s] : is_subgroup (f β»ΒΉ' s) :=
is_subgroup.mk
(eq.mpr
(id
(Eq.trans
(forall_congr_eq
fun (a : G) =>
Eq.trans
(imp_congr_ctx_eq (propext set.mem_preimage)
fun (_h : f a β s) =>
Eq.trans
(Eq.trans (propext set.mem_preimage)
((fun (αΎ° αΎ°_1 : H) (e_2 : αΎ° = αΎ°_1) (αΎ°_2 αΎ°_3 : set H) (e_3 : αΎ°_2 = αΎ°_3) =>
congr (congr_arg has_mem.mem e_2) e_3)
(f (aβ»ΒΉ)) (f aβ»ΒΉ) (map_inv f a) s s (Eq.refl s)))
(propext
((fun [c : is_subgroup s] {a : H} (αΎ° : a β s) =>
iff_true_intro (is_subgroup.inv_mem αΎ°))
(iff.mpr (iff_true_intro _h) True.intro))))
(propext forall_true_iff))
(propext (forall_const G))))
trivial)
protected instance preimage_normal {G : Type u_1} {H : Type u_2} [group G] [group H] (f : G β H)
[is_group_hom f] (s : set H) [normal_subgroup s] : normal_subgroup (f β»ΒΉ' s) :=
sorry
protected instance normal_subgroup_ker {G : Type u_1} {H : Type u_2} [group G] [group H] (f : G β H)
[is_group_hom f] : normal_subgroup (ker f) :=
is_group_hom.preimage_normal f (is_subgroup.trivial H)
theorem Mathlib.is_add_group_hom.injective_of_trivial_ker {G : Type u_1} {H : Type u_2}
[add_group G] [add_group H] (f : G β H) [is_add_group_hom f]
(h : is_add_group_hom.ker f = is_add_subgroup.trivial G) : function.injective f :=
sorry
theorem trivial_ker_of_injective {G : Type u_1} {H : Type u_2} [group G] [group H] (f : G β H)
[is_group_hom f] (h : function.injective f) : ker f = is_subgroup.trivial G :=
sorry
theorem Mathlib.is_add_group_hom.injective_iff_trivial_ker {G : Type u_1} {H : Type u_2}
[add_group G] [add_group H] (f : G β H) [is_add_group_hom f] :
function.injective f β is_add_group_hom.ker f = is_add_subgroup.trivial G :=
{ mp := is_add_group_hom.trivial_ker_of_injective f,
mpr := is_add_group_hom.injective_of_trivial_ker f }
theorem Mathlib.is_add_group_hom.trivial_ker_iff_eq_zero {G : Type u_1} {H : Type u_2} [add_group G]
[add_group H] (f : G β H) [is_add_group_hom f] :
is_add_group_hom.ker f = is_add_subgroup.trivial G β β (x : G), f x = 0 β x = 0 :=
sorry
end is_group_hom
protected instance subtype_val.is_add_group_hom {G : Type u_1} [add_group G] {s : set G}
[is_add_subgroup s] : is_add_group_hom subtype.val :=
is_add_group_hom.mk
protected instance coe.is_add_group_hom {G : Type u_1} [add_group G] {s : set G}
[is_add_subgroup s] : is_add_group_hom coe :=
is_add_group_hom.mk
protected instance subtype_mk.is_group_hom {G : Type u_1} {H : Type u_2} [group G] [group H]
{s : set G} [is_subgroup s] (f : H β G) [is_group_hom f] (h : β (x : H), f x β s) :
is_group_hom fun (x : H) => { val := f x, property := h x } :=
is_group_hom.mk
protected instance set_inclusion.is_group_hom {G : Type u_1} [group G] {s : set G} {t : set G}
[is_subgroup s] [is_subgroup t] (h : s β t) : is_group_hom (set.inclusion h) :=
subtype_mk.is_group_hom (fun (x : β₯s) => βx) fun (x : β₯s) => set.inclusion._proof_1 h x
/-- `subtype.val : set.range f β H` as a monoid homomorphism, when `f` is a monoid homomorphism. -/
def monoid_hom.range_subtype_val {G : Type u_1} {H : Type u_2} [monoid G] [monoid H] (f : G β* H) :
β₯(set.range βf) β* H :=
monoid_hom.of subtype.val
/-- `set.range_factorization f : G β set.range f` as a monoid homomorphism, when `f` is a monoid
homomorphism. -/
def add_monoid_hom.range_factorization {G : Type u_1} {H : Type u_2} [add_monoid G] [add_monoid H]
(f : G β+ H) : G β+ β₯(set.range βf) :=
add_monoid_hom.mk (set.range_factorization βf) sorry sorry
namespace add_group
inductive in_closure {A : Type u_3} [add_group A] (s : set A) : A β Prop where
| basic : β {a : A}, a β s β in_closure s a
| zero : in_closure s 0
| neg : β {a : A}, in_closure s a β in_closure s (-a)
| add : β {a b : A}, in_closure s a β in_closure s b β in_closure s (a + b)
end add_group
namespace group
inductive in_closure {G : Type u_1} [group G] (s : set G) : G β Prop where
| basic : β {a : G}, a β s β in_closure s a
| one : in_closure s 1
| inv : β {a : G}, in_closure s a β in_closure s (aβ»ΒΉ)
| mul : β {a b : G}, in_closure s a β in_closure s b β in_closure s (a * b)
/-- `group.closure s` is the subgroup closed over `s`, i.e. the smallest subgroup containg s. -/
def Mathlib.add_group.closure {G : Type u_1} [add_group G] (s : set G) : set G :=
set_of fun (a : G) => add_group.in_closure s a
theorem Mathlib.add_group.mem_closure {G : Type u_1} [add_group G] {s : set G} {a : G} :
a β s β a β add_group.closure s :=
add_group.in_closure.basic
protected instance closure.is_subgroup {G : Type u_1} [group G] (s : set G) :
is_subgroup (closure s) :=
is_subgroup.mk fun (a : G) => in_closure.inv
theorem Mathlib.add_group.subset_closure {G : Type u_1} [add_group G] {s : set G} :
s β add_group.closure s :=
fun (a : G) => add_group.mem_closure
theorem Mathlib.add_group.closure_subset {G : Type u_1} [add_group G] {s : set G} {t : set G}
[is_add_subgroup t] (h : s β t) : add_group.closure s β t :=
sorry
theorem Mathlib.add_group.closure_subset_iff {G : Type u_1} [add_group G] (s : set G) (t : set G)
[is_add_subgroup t] : add_group.closure s β t β s β t :=
{ mp := fun (h : add_group.closure s β t) (b : G) (ha : b β s) => h (add_group.mem_closure ha),
mpr := fun (h : s β t) (b : G) (ha : b β add_group.closure s) => add_group.closure_subset h ha }
theorem Mathlib.add_group.closure_mono {G : Type u_1} [add_group G] {s : set G} {t : set G}
(h : s β t) : add_group.closure s β add_group.closure t :=
add_group.closure_subset (set.subset.trans h add_group.subset_closure)
@[simp] theorem closure_subgroup {G : Type u_1} [group G] (s : set G) [is_subgroup s] :
closure s = s :=
set.subset.antisymm (closure_subset (set.subset.refl s)) subset_closure
theorem Mathlib.add_group.exists_list_of_mem_closure {G : Type u_1} [add_group G] {s : set G}
{a : G} (h : a β add_group.closure s) :
β (l : List G), (β (x : G), x β l β x β s β¨ -x β s) β§ list.sum l = a :=
sorry
theorem Mathlib.add_group.image_closure {G : Type u_1} {H : Type u_2} [add_group G] [add_group H]
(f : G β H) [is_add_group_hom f] (s : set G) :
f '' add_group.closure s = add_group.closure (f '' s) :=
sorry
theorem Mathlib.add_group.mclosure_subset {G : Type u_1} [add_group G] {s : set G} :
add_monoid.closure s β add_group.closure s :=
add_monoid.closure_subset add_group.subset_closure
theorem Mathlib.add_group.mclosure_neg_subset {G : Type u_1} [add_group G] {s : set G} :
add_monoid.closure (Neg.neg β»ΒΉ' s) β add_group.closure s :=
add_monoid.closure_subset
fun (x : G) (hx : x β Neg.neg β»ΒΉ' s) =>
neg_neg x βΈ is_add_subgroup.neg_mem (add_group.subset_closure hx)
theorem Mathlib.add_group.closure_eq_mclosure {G : Type u_1} [add_group G] {s : set G} :
add_group.closure s = add_monoid.closure (s βͺ Neg.neg β»ΒΉ' s) :=
sorry
theorem Mathlib.add_group.mem_closure_union_iff {G : Type u_1} [add_comm_group G] {s : set G}
{t : set G} {x : G} :
x β add_group.closure (s βͺ t) β
β (y : G),
β (H : y β add_group.closure s), β (z : G), β (H : z β add_group.closure t), y + z = x :=
sorry
theorem gpowers_eq_closure {G : Type u_1} [group G] {a : G} : gpowers a = closure (singleton a) :=
sorry
end group
namespace is_subgroup
theorem Mathlib.is_add_subgroup.trivial_eq_closure {G : Type u_1} [add_group G] :
is_add_subgroup.trivial G = add_group.closure β
:=
sorry
end is_subgroup
/-The normal closure of a set s is the subgroup closure of all the conjugates of
elements of s. It is the smallest normal subgroup containing s. -/
namespace group
theorem conjugates_subset {G : Type u_1} [group G] {t : set G} [normal_subgroup t] {a : G}
(h : a β t) : conjugates a β t :=
sorry
theorem conjugates_of_set_subset' {G : Type u_1} [group G] {s : set G} {t : set G}
[normal_subgroup t] (h : s β t) : conjugates_of_set s β t :=
set.bUnion_subset fun (x : G) (H : x β s) => conjugates_subset (h H)
/-- The normal closure of a set s is the subgroup closure of all the conjugates of
elements of s. It is the smallest normal subgroup containing s. -/
def normal_closure {G : Type u_1} [group G] (s : set G) : set G := closure (conjugates_of_set s)
theorem conjugates_of_set_subset_normal_closure {G : Type u_1} {s : set G} [group G] :
conjugates_of_set s β normal_closure s :=
subset_closure
theorem subset_normal_closure {G : Type u_1} {s : set G} [group G] : s β normal_closure s :=
set.subset.trans subset_conjugates_of_set conjugates_of_set_subset_normal_closure
/-- The normal closure of a set is a subgroup. -/
protected instance normal_closure.is_subgroup {G : Type u_1} [group G] (s : set G) :
is_subgroup (normal_closure s) :=
closure.is_subgroup (conjugates_of_set s)
/-- The normal closure of s is a normal subgroup. -/
protected instance normal_closure.is_normal {G : Type u_1} {s : set G} [group G] :
normal_subgroup (normal_closure s) :=
sorry
/-- The normal closure of s is the smallest normal subgroup containing s. -/
theorem normal_closure_subset {G : Type u_1} [group G] {s : set G} {t : set G} [normal_subgroup t]
(h : s β t) : normal_closure s β t :=
sorry
theorem normal_closure_subset_iff {G : Type u_1} [group G] {s : set G} {t : set G}
[normal_subgroup t] : s β t β normal_closure s β t :=
{ mp := normal_closure_subset, mpr := set.subset.trans subset_normal_closure }
theorem normal_closure_mono {G : Type u_1} [group G] {s : set G} {t : set G} :
s β t β normal_closure s β normal_closure t :=
fun (h : s β t) => normal_closure_subset (set.subset.trans h subset_normal_closure)
end group
class simple_group (G : Type u_4) [group G] where
simple : β (N : set G) [_inst_1_1 : normal_subgroup N], N = is_subgroup.trivial G β¨ N = set.univ
class simple_add_group (A : Type u_4) [add_group A] where
simple :
β (N : set A) [_inst_1_1 : normal_add_subgroup N], N = is_add_subgroup.trivial A β¨ N = set.univ
theorem additive.simple_add_group_iff {G : Type u_1} [group G] :
simple_add_group (additive G) β simple_group G :=
sorry
protected instance additive.simple_add_group {G : Type u_1} [group G] [simple_group G] :
simple_add_group (additive G) :=
iff.mpr additive.simple_add_group_iff _inst_2
theorem multiplicative.simple_group_iff {A : Type u_3} [add_group A] :
simple_group (multiplicative A) β simple_add_group A :=
sorry
protected instance multiplicative.simple_group {A : Type u_3} [add_group A] [simple_add_group A] :
simple_group (multiplicative A) :=
iff.mpr multiplicative.simple_group_iff _inst_2
theorem simple_add_group_of_surjective {G : Type u_1} {H : Type u_2} [add_group G] [add_group H]
[simple_add_group G] (f : G β H) [is_add_group_hom f] (hf : function.surjective f) :
simple_add_group H :=
sorry
/-- Create a bundled subgroup from a set `s` and `[is_subroup s]`. -/
def subgroup.of {G : Type u_1} [group G] (s : set G) [h : is_subgroup s] : subgroup G :=
subgroup.mk s sorry sorry is_subgroup.inv_mem
protected instance subgroup.is_subgroup {G : Type u_1} [group G] (K : subgroup G) :
is_subgroup βK :=
is_subgroup.mk (subgroup.inv_mem' K)
protected instance subgroup.of_normal {G : Type u_1} [group G] (s : set G) [h : is_subgroup s]
[n : normal_subgroup s] : subgroup.normal (subgroup.of s) :=
subgroup.normal.mk normal_subgroup.normal
end Mathlib
|
lemma discrete_subset_disconnected: fixes S :: "'a::topological_space set" fixes t :: "'b::real_normed_vector set" assumes conf: "continuous_on S f" and no: "\<And>x. x \<in> S \<Longrightarrow> \<exists>e>0. \<forall>y. y \<in> S \<and> f y \<noteq> f x \<longrightarrow> e \<le> norm (f y - f x)" shows "f ` S \<subseteq> {y. connected_component_set (f ` S) y = {y}}"
|
module Data.VectorSpace
import Data.Vect
infixr 9 *^
infixl 9 ^/
infixl 6 ^+^, ^-^
infix 7 `dot`
public export
interface VectorSpace v where
||| Vector with no magnitude (unit for addition).
zeroVector : v
||| Multiplication by a scalar.
(*^) : Double -> v -> v
||| Division by a scalar.
(^/) : v -> Double -> v
||| Vector addition
(^+^) : v -> v -> v
||| Vector subtraction
(^-^) : v -> v -> v
||| Vector negation. Addition with a negated vector should
||| be same as subtraction.
negateVector : v -> v
negateVector v = zeroVector ^-^ v
||| Dot product (also known as scalar or inner product).
||| For two vectors, mathematically represented as a = a1,a2,...,an and b = b1,b2,...,bn,
||| the dot product is a . b = a1*b1 + a2*b2 + ... + an*bn.
dot : v -> v -> Double
||| Vector's norm (also known as magnitude).
||| For a vector represented mathematically
||| as a = a1,a2,...,an, the norm is the square root of a1^2 + a2^2 + ... + an^2.
norm : v -> Double
norm v = sqrt $ dot v v
||| Return a vector with the same origin and orientation (angle),
||| but such that the norm is one (the unit for multiplication by a scalar).
normalize : v -> v
normalize v = let n = norm v in if n == 0 then v else v ^/ n
--------------------------------------------------------------------------------
-- Implementations
--------------------------------------------------------------------------------
public export
VectorSpace Double where
zeroVector = 0.0
(^-^) = (-)
(^+^) = (+)
(^/) = (/)
(*^) = (*)
dot = (*)
public export
{n : _} -> VectorSpace (Vect n Double) where
zeroVector = replicate n 0.0
(^-^) = zipWith (-)
(^+^) = zipWith (+)
v ^/ s = map (/ s) v
s *^ v = map (* s) v
dot a b = sum $ zipWith (*) a b
|
Set Implicit Arguments.
Require Import LibLN.
Implicit Types x : var.
Implicit Types X : var.
(* ********************************************************************** *)
(** * Description of the Language *)
(** Representation of types *)
Inductive Mode := Sub | Sup.
Definition flip (m : Mode) : Mode :=
match m with
| Sub => Sup
| Sup => Sub
end.
Inductive typ : Set :=
| typ_btm : typ
| typ_top : typ
| typ_int : typ
| typ_arrow : typ -> typ -> typ
| typ_bvar : nat -> typ
| typ_fvar : var -> typ
| typ_all : Mode -> typ -> typ -> typ.
(** Representation of pre-terms *)
Inductive trm : Set :=
| trm_unit : trm
| trm_bvar : nat -> trm
| trm_fvar : var -> trm
| trm_nval : nat -> trm
| trm_nsucc: trm -> trm
| trm_nind : trm -> trm -> trm -> trm
| trm_abs : typ -> trm -> trm
| trm_app : trm -> trm -> trm
| trm_tabs : Mode -> typ -> trm -> trm
| trm_tapp : trm -> typ -> trm.
(** Opening up a type binder occuring in a type *)
Fixpoint open_tt_rec (K : nat) (U : typ) (T : typ) {struct T} : typ :=
match T with
| typ_btm => typ_btm
| typ_top => typ_top
| typ_int => typ_int
| typ_bvar J => If K = J then U else (typ_bvar J)
| typ_fvar X => typ_fvar X
| typ_arrow T1 T2 => typ_arrow (open_tt_rec K U T1) (open_tt_rec K U T2)
| typ_all m T1 T2 => typ_all m (open_tt_rec K U T1)(open_tt_rec (S K) U T2)
end.
Definition open_tt T U := open_tt_rec 0 U T.
(** Opening up a type binder occuring in a term *)
Fixpoint open_te_rec (K : nat) (U : typ) (e : trm) {struct e} : trm :=
match e with
| trm_unit => trm_unit
| trm_bvar i => trm_bvar i
| trm_fvar x => trm_fvar x
| trm_nval v => trm_nval v
| trm_nsucc t => trm_nsucc (open_te_rec K U t)
| trm_nind t1 t2 t3 => trm_nind (open_te_rec K U t1) (open_te_rec K U t2) (open_te_rec K U t3)
| trm_abs V e1 => trm_abs (open_tt_rec K U V) (open_te_rec K U e1)
| trm_app e1 e2 => trm_app (open_te_rec K U e1) (open_te_rec K U e2)
| trm_tabs m T e => trm_tabs m (open_tt_rec K U T) (open_te_rec (S K) U e)
| trm_tapp e1 V => trm_tapp (open_te_rec K U e1) (open_tt_rec K U V)
end.
Definition open_te t U := open_te_rec 0 U t.
(** Opening up a term binder occuring in a term *)
Fixpoint open_ee_rec (k : nat) (f : trm) (e : trm) {struct e} : trm :=
match e with
| trm_unit => trm_unit
| trm_bvar i => If k = i then f else (trm_bvar i)
| trm_fvar x => trm_fvar x
| trm_nval v => trm_nval v
| trm_nsucc s => trm_nsucc (open_ee_rec k f s)
| trm_nind i z s => trm_nind (open_ee_rec k f i) (open_ee_rec k f z) (open_ee_rec k f s)
| trm_abs V e1 => trm_abs V (open_ee_rec (S k) f e1)
| trm_app e1 e2 => trm_app (open_ee_rec k f e1) (open_ee_rec k f e2)
| trm_tabs m T t => trm_tabs m T (open_ee_rec k f t)
| trm_tapp t T => trm_tapp (open_ee_rec k f t) T
end.
Definition open_ee t u := open_ee_rec 0 u t.
(** Notation for opening up binders with type or term variables *)
Notation "T 'open_tt_var' X" := (open_tt T (typ_fvar X)) (at level 67).
Notation "t 'open_te_var' X" := (open_te t (typ_fvar X)) (at level 67).
Notation "t 'open_ee_var' x" := (open_ee t (trm_fvar x)) (at level 67).
Inductive bind : Set :=
| bind_tRel : Mode -> typ -> bind
| bind_typ : typ -> bind.
Notation "X !: m <>: T" := (X ~ bind_tRel m T)
(at level 23, left associativity) : env_scope.
Notation "x ~: T" := (x ~ bind_typ T)
(at level 23, left associativity) : env_scope.
Definition env := LibEnv.env bind.
Definition mode_to_sub m := match m with
| Sub => typ_top
| Sup => typ_btm end.
(** Types as locally closed pre-types *)
Inductive type : typ -> Prop :=
| type_top :
type typ_top
| type_int :
type typ_int
| type_btm :
type typ_btm
| type_var : forall X,
type (typ_fvar X)
| type_arrow : forall T1 T2,
type T1 ->
type T2 ->
type (typ_arrow T1 T2)
| type_all : forall m L T1 T2,
type T1 ->
(forall X, X \notin L -> type (T2 open_tt_var X)) ->
type (typ_all m T1 T2).
(** Terms as locally closed pre-terms *)
Inductive term : trm -> Prop :=
| term_unit : term trm_unit
| term_var : forall x,
term (trm_fvar x)
| term_nval : forall v,
term (trm_nval v)
| term_nsucc : forall t,
term t -> term (trm_nsucc t)
| term_nind : forall t1 t2 t3,
term t1 ->
term t2 ->
term t3 ->
term (trm_nind t1 t2 t3)
| term_abs : forall L V e1,
type V ->
(forall x, x \notin L -> term (e1 open_ee_var x)) ->
term (trm_abs V e1)
| term_app : forall e1 e2,
term e1 ->
term e2 ->
term (trm_app e1 e2)
| term_tabs : forall m T L e1, type T ->
(forall X, X \notin L -> term (e1 open_te_var X)) ->
term (trm_tabs m T e1)
| term_tapp : forall e1 V,
term e1 ->
type V ->
term (trm_tapp e1 V).
(** Binding map term variables to types, and keeps type variables in the environment.
[x ~: T] is a typing assumption *)
(** Well-formedness of a pre-type T in an environment E:
all the type variables of T must be bound via a
subtyping relation in E. This predicates implies
that T is a type *)
Inductive wft : env -> typ -> Prop :=
| wft_top : forall E,
wft E typ_top
| wft_var : forall m T1 E X,
binds X (bind_tRel m T1) E ->
wft E (typ_fvar X)
| wft_int : forall E,
wft E typ_int
| wft_btm :forall E,
wft E typ_btm
| wft_arrow : forall E T1 T2,
wft E T1 ->
wft E T2 ->
wft E (typ_arrow T1 T2)
| wft_all : forall L E T m Tn,
wft E Tn ->
(forall X, X \notin L ->
wft (E & X !: m <>: Tn) (T open_tt_var X)) ->
wft E (typ_all m Tn T).
Inductive okt : env -> Prop :=
| okt_empty :
okt empty
| okt_tvr : forall E X m T,
okt E -> wft E T -> X # E -> okt (E & X !: m <>: T)
| okt_typ : forall E x T,
okt E -> wft E T -> x # E -> okt (E & x ~: T).
Definition BindPosVariance m1 m2 :=
match m1 with
| Sup => m2
| Sub => flip m2 end.
Definition SelectBound m1:=
match m1 with
| Sup => typ_btm
| Sub => typ_top end.
Inductive R : env -> Mode -> typ -> typ -> Prop :=
| SInt : forall E m, okt E -> R E m typ_int typ_int
| STB1 : forall E A m, okt E ->wft E A ->R E m A (mode_to_sub m)
| STB2 : forall E A m, okt E -> wft E A ->
R E m (mode_to_sub (flip m)) A
| SFun : forall E A B C D m, R E (flip m) A C -> R E m B D -> R E m (typ_arrow A B) (typ_arrow C D)
| SVarRefl : forall E X m, okt E -> wft E (typ_fvar X) -> R E m (typ_fvar X) (typ_fvar X)
| SVarBnd : forall E X m T F, binds X (bind_tRel m T) E -> wft E T ->
R E m T F -> R E m (typ_fvar X) F
| SVarBndFl : forall E X m T F, binds X (bind_tRel m T) E -> wft E T ->
R E m T F -> R E (flip m) F (typ_fvar X)
(* | SVarTrns : forall E X m T1 T2 , R E m T1 (typ_fvar X) -> R E m (typ_fvar X) T2 -> R E m T1 T2 *)
| SAll : forall E L m1 m2 T1 T2 T3 T4, (*wft E T1 -> wft E T2 ->*)
R E (BindPosVariance m1 m2) T1 T2 ->
(forall X, X \notin L ->
R (E&(X !: m1 <>: (SelectBound m1))) m2 (T3 open_tt_var X) (T4 open_tt_var X))->
R E m2 (typ_all m1 T1 T3) (typ_all m1 T2 T4).
(** Generalized Subtyping Judgements *)
(* Inductive R : Mode -> typ -> typ -> Prop :=
| SInt : forall m, R m typ_int typ_int
| STop1 : forall A, R Sub A typ_top
| STop2 : forall A, R Sup typ_top A
| SBot1 : forall A, R Sup A typ_btm
| SBot2 : forall A, R Sub typ_btm A
| SFun : forall A B C D m, R (flip m) A C -> R m B D -> R m (typ_arrow A B) (typ_arrow C D). *)
Inductive typing : env -> trm -> typ -> Prop :=
| typing_unit : forall E, okt E ->typing E trm_unit typ_top
| typing_var : forall E x T,
okt E ->
binds x (bind_typ T) E ->
typing E (trm_fvar x) T
| typing_nval : forall E v,
okt E ->
typing E (trm_nval v) typ_int
| typing_nsucc : forall E t,
okt E ->
typing E t typ_int ->
typing E (trm_nsucc t) typ_int
| typing_nind : forall E V t1 t2 t3,
okt E ->
typing E t1 typ_int ->
typing E t2 V ->
typing E t3 (typ_arrow V V) ->
typing E (trm_nind t1 t2 t3) V
| typing_abs : forall L E V e1 T1,
(forall x, x \notin L ->
typing (E & x ~: V) (e1 open_ee_var x) T1) ->
typing E (trm_abs V e1) (typ_arrow V T1)
| typing_app : forall T1 E e1 e2 T2,
typing E e1 (typ_arrow T1 T2) ->
typing E e2 T1 ->
typing E (trm_app e1 e2) T2
| typing_tabs : forall L m Tk E e T,
(forall X, X \notin L ->
typing (E & X!: m <>: Tk) (e open_te_var X) (T open_tt_var X)) ->
typing E (trm_tabs m Tk e) (typ_all m Tk T)
| typing_tapp : forall m E Tb T1 T2 e,
typing E e (typ_all m Tb T1) ->
R E m T2 Tb ->
typing E (trm_tapp e T2) (open_tt T1 T2)
| typing_sub : forall S E e T,
typing E e S ->
R E Sub S T ->
wft E T ->
typing E e T.
(** Values *)
Inductive value : trm -> Prop :=
| value_unit : value trm_unit
| value_abs : forall V e1, term (trm_abs V e1) ->
value (trm_abs V e1)
| value_ival : forall v, value (trm_nval v)
| value_tabs : forall m T e, term (trm_tabs m T e) ->
value (trm_tabs m T e).
(** One-step reduction *)
Inductive red : trm -> trm -> Prop :=
| red_app_1 : forall e1 e1' e2,
term e2 ->
red e1 e1' ->
red (trm_app e1 e2) (trm_app e1' e2)
| red_app_2 : forall e1 e2 e2',
value e1 ->
red e2 e2' ->
red (trm_app e1 e2) (trm_app e1 e2')
| red_abs : forall V e1 v2,
term (trm_abs V e1) ->
value v2 ->
red (trm_app (trm_abs V e1) v2) (open_ee e1 v2)
| red_succ_can : forall v,
red (trm_nsucc (trm_nval v)) (trm_nval (S v))
| red_succ_red : forall t t',
red t t'->
red (trm_nsucc t) (trm_nsucc t')
| red_ind_nred : forall t1 t1' t2 t3,
red t1 t1' ->
term t2 ->
term t3 ->
red (trm_nind t1 t2 t3) (trm_nind t1' t2 t3)
| red_ind_icase : forall t1 t2 t3 t3',
value t1 ->
term t2 ->
red t3 t3' ->
red (trm_nind t1 t2 t3) (trm_nind t1 t2 t3')
| red_ind_izero : forall t2 t3,
term t2 ->
value t3 ->
red (trm_nind (trm_nval 0) t2 t3) t2
| red_ind_isucc : forall k t2 t3,
term t2 ->
value t3 ->
red (trm_nind (trm_nval (S k)) t2 t3) (trm_app t3 (trm_nind (trm_nval k) t2 t3))
| red_tabs : forall m T e V,
term (trm_tabs m T e) ->
type V ->
red (trm_tapp (trm_tabs m T e) V) (open_te e V)
| red_tapp : forall e1 e1' V,
type V ->
red e1 e1' ->
red (trm_tapp e1 V) (trm_tapp e1' V).
(** Our goal is to prove preservation and progress *)
Definition preservation := forall E e e' T,
typing E e T ->
red e e' ->
typing E e' T.
Definition progress := forall e T,
typing empty e T ->
value e
\/ exists e', red e e'.
(** * Additional Definitions Used in the Proofs *)
(** Computing free term variables in a type *)
Fixpoint fv_tt (T : typ) {struct T} : vars :=
match T with
| typ_fvar X => \{X}
| typ_arrow T1 T2 => (fv_tt T1) \u (fv_tt T2)
| typ_all _ T1 T2 => (fv_tt T1) \u (fv_tt T2)
| _ => \{}
end.
(** Computing free type variables in a term *)
Fixpoint fv_te (e : trm) {struct e} : vars :=
match e with
| trm_nsucc t => fv_te t
| trm_nind t1 t2 t3 => (fv_te t1) \u (fv_te t2) \u (fv_te t3)
| trm_abs V e1 => (fv_tt V) \u (fv_te e1)
| trm_app e1 e2 => (fv_te e1) \u (fv_te e2)
| trm_tabs m T e1 => (fv_tt T) \u (fv_te e1)
| trm_tapp e1 V => (fv_tt V) \u (fv_te e1)
| _ => \{}
end.
(** Computing free term variables in a type *)
Fixpoint fv_ee (e : trm) {struct e} : vars :=
match e with
| trm_unit => \{}
| trm_bvar i => \{}
| trm_fvar x => \{x}
| trm_nval _ => \{}
| trm_nsucc t => fv_ee t
| trm_nind t1 t2 t3 => (fv_ee t1) \u (fv_ee t2) \u (fv_ee t3)
| trm_abs V e1 => (fv_ee e1)
| trm_app e1 e2 => (fv_ee e1) \u (fv_ee e2)
| trm_tabs m T t => fv_ee t
| trm_tapp t1 _ => (fv_ee t1)
end.
(** Substitution for free type variables in types. *)
Fixpoint subst_tt (Z : var) (U : typ) (T : typ) {struct T} : typ :=
match T with
| typ_top => typ_top
| typ_int => typ_int
| typ_btm => typ_btm
| typ_bvar J => typ_bvar J
| typ_fvar X => If X = Z then U else (typ_fvar X)
| typ_arrow T1 T2 => typ_arrow (subst_tt Z U T1) (subst_tt Z U T2)
| typ_all m T1 T2 => typ_all m (subst_tt Z U T1) (subst_tt Z U T2)
end.
(** Substitution for free type variables in terms. *)
Fixpoint subst_te (Z : var) (U : typ) (e : trm) {struct e} : trm :=
match e with
| trm_unit => trm_unit
| trm_bvar i => trm_bvar i
| trm_fvar x => trm_fvar x
| trm_nval v => trm_nval v
| trm_nsucc t => trm_nsucc (subst_te Z U t)
| trm_nind t1 t2 t3 => trm_nind (subst_te Z U t1) (subst_te Z U t2) (subst_te Z U t3)
| trm_abs V e1 => trm_abs (subst_tt Z U V) (subst_te Z U e1)
| trm_app e1 e2 => trm_app (subst_te Z U e1) (subst_te Z U e2)
| trm_tabs m T e => trm_tabs m (subst_tt Z U T) (subst_te Z U e)
| trm_tapp e1 V => trm_tapp (subst_te Z U e1) (subst_tt Z U V)
end.
(** Substitution for free term variables in terms. *)
Fixpoint subst_ee (z : var) (u : trm) (e : trm) {struct e} : trm :=
match e with
| trm_unit => trm_unit
| trm_bvar i => trm_bvar i
| trm_fvar x => If x = z then u else (trm_fvar x)
| trm_nval i => trm_nval i
| trm_nsucc t => trm_nsucc (subst_ee z u t)
| trm_nind t1 t2 t3 => trm_nind (subst_ee z u t1) (subst_ee z u t2) (subst_ee z u t3)
| trm_abs V e1 => trm_abs V (subst_ee z u e1)
| trm_app e1 e2 => trm_app (subst_ee z u e1) (subst_ee z u e2)
| trm_tabs m T t=> trm_tabs m T (subst_ee z u t)
| trm_tapp t1 T => trm_tapp (subst_ee z u t1) T
end.
Definition subst_tb (Z : var) (P : typ) (b : bind) : bind :=
match b with
| bind_tRel m T => bind_tRel m (subst_tt Z P T)
| bind_typ T => bind_typ (subst_tt Z P T)
end.
(* ********************************************************************** *)
(** * Tactics *)
(** Constructors as hints. *)
Hint Constructors type term wft ok okt value red.
Hint Resolve
STB1 STB2 SInt SFun SVarRefl
typing_var typing_app typing_sub typing_tapp typing_nval typing_nsucc typing_nind.
(** Gathering free names already used in the proofs *)
Ltac gather_vars :=
let A := gather_vars_with (fun x : vars => x) in
let B := gather_vars_with (fun x : var => \{x}) in
let C := gather_vars_with (fun x : trm => fv_te x) in
let D := gather_vars_with (fun x : trm => fv_ee x) in
let E := gather_vars_with (fun x : typ => fv_tt x) in
let F := gather_vars_with (fun x : env => dom x) in
constr:(A \u B \u C \u D \u E \u F).
(** "pick_fresh x" tactic create a fresh variable with name x *)
Ltac pick_fresh x :=
let L := gather_vars in (pick_fresh_gen L x).
(** "apply_fresh T as x" is used to apply inductive rule which
use an universal quantification over a cofinite set *)
Tactic Notation "apply_fresh" constr(T) "as" ident(x) :=
apply_fresh_base T gather_vars x.
Tactic Notation "apply_fresh" "*" constr(T) "as" ident(x) :=
apply_fresh T as x; autos*.
(** These tactics help applying a lemma which conclusion mentions
an environment (E & F) in the particular case when F is empty *)
Ltac get_env :=
match goal with
| |- wft ?E _ => E
| |- R ?E _ _ _ => E
| |- typing ?E _ _ => E
end.
Tactic Notation "apply_empty_bis" tactic(get_env) constr(lemma) :=
let E := get_env in rewrite <- (concat_empty_r E);
eapply lemma; try rewrite concat_empty_r.
Tactic Notation "apply_empty" constr(F) :=
apply_empty_bis (get_env) F.
Tactic Notation "apply_empty" "*" constr(F) :=
apply_empty F; autos*.
(** Tactic to undo when Coq does too much simplification *)
Ltac unsimpl_map_bind :=
match goal with |- context [ ?B (subst_tt ?Z ?P ?U) ] =>
unsimpl ((subst_tb Z P) (B U)) end.
Tactic Notation "unsimpl_map_bind" "*" :=
unsimpl_map_bind; autos*.
(** Tactic to undo when Coq does too much simplification *)
(* ********************************************************************** *)
(** * Properties of Substitutions *)
(** Substitution on indices is identity on well-formed terms. *)
Lemma open_tt_rec_type_core : forall T j V U i, i <> j ->
(open_tt_rec j V T) = open_tt_rec i U (open_tt_rec j V T) ->
T = open_tt_rec i U T.
Proof.
induction T; introv Neq H; simpl in *; inversion H; f_equal*.
case_nat*. case_nat*.
Qed.
Lemma open_tt_rec_type : forall T U,
type T -> forall k, T = open_tt_rec k U T.
Proof.
induction 1; intros; simpl; f_equal*. unfolds open_tt.
pick_fresh X. apply* (@open_tt_rec_type_core T2 0 (typ_fvar X)).
Qed.
(** Substitution for a fresh name is identity. *)
Lemma subst_tt_fresh : forall Z U T,
Z \notin fv_tt T -> subst_tt Z U T = T.
Proof.
induction T; simpl; intros; f_equal*.
case_var*.
Qed.
(** Substitution distributes on the open operation. *)
Lemma subst_tt_open_tt_rec : forall T1 T2 X P n, type P ->
subst_tt X P (open_tt_rec n T2 T1) =
open_tt_rec n (subst_tt X P T2) (subst_tt X P T1).
Proof.
introv WP. generalize n.
induction T1; intros k; simpls; f_equal*.
case_nat*.
case_var*. rewrite* <- open_tt_rec_type.
Qed.
Lemma subst_tt_open_tt : forall T1 T2 X P, type P ->
subst_tt X P (open_tt T1 T2) =
open_tt (subst_tt X P T1) (subst_tt X P T2).
Proof.
unfold open_tt. autos* subst_tt_open_tt_rec.
Qed.
(** Substitution and open_var for distinct names commute. *)
Lemma subst_tt_open_tt_var : forall X Y U T, Y <> X -> type U ->
(subst_tt X U T) open_tt_var Y = subst_tt X U (T open_tt_var Y).
Proof.
introv Neq Wu. rewrite* subst_tt_open_tt.
simpl. case_var*.
Qed.
(** Opening up a body t with a type u is the same as opening
up the abstraction with a fresh name x and then substituting u for x. *)
Lemma subst_tt_intro : forall X T2 U,
X \notin fv_tt T2 -> type U ->
open_tt T2 U = subst_tt X U (T2 open_tt_var X).
Proof.
introv Fr Wu. rewrite* subst_tt_open_tt.
rewrite* subst_tt_fresh. simpl. case_var*.
Qed.
(* ********************************************************************** *)
(** ** Properties of type substitution in terms *)
Lemma open_te_rec_term_core : forall e j u i P ,
open_ee_rec j u e = open_te_rec i P (open_ee_rec j u e) ->
e = open_te_rec i P e.
Proof.
induction e; intros; simpl in *; inversion H; f_equal*; f_equal*.
Qed.
Lemma open_te_rec_type_core : forall e j Q i P, i <> j ->
open_te_rec j Q e = open_te_rec i P (open_te_rec j Q e) ->
e = open_te_rec i P e.
Proof.
induction e; intros; simpl in *; inversion H0; f_equal*;
match goal with H: ?i <> ?j |- ?t = open_tt_rec ?i _ ?t =>
apply* (@open_tt_rec_type_core t j) end.
Qed.
Lemma open_te_rec_term : forall e U,
term e -> forall k, e = open_te_rec k U e.
Proof.
intros e U WF. induction WF; intros; simpl;
f_equal*; try solve [ apply* open_tt_rec_type ].
unfolds open_ee. pick_fresh x.
apply* (@open_te_rec_term_core e1 0 (trm_fvar x)).
unfolds open_te. pick_fresh X.
apply* (@open_te_rec_type_core e1 0 (typ_fvar X)).
Qed.
(** Substitution for a fresh name is identity. *)
Lemma subst_te_fresh : forall X U e,
X \notin fv_te e -> subst_te X U e = e.
Proof.
induction e; simpl; intros; f_equal*; autos* subst_tt_fresh.
Qed.
(** Substitution distributes on the open operation. *)
Lemma subst_te_open_te : forall e T X U, type U ->
subst_te X U (open_te e T) =
open_te (subst_te X U e) (subst_tt X U T).
Proof.
intros. unfold open_te. generalize 0.
induction e; intros; simpls; f_equal*;
autos* subst_tt_open_tt_rec.
Qed.
(** Substitution and open_var for distinct names commute. *)
Lemma subst_te_open_te_var : forall X Y U e, Y <> X -> type U ->
(subst_te X U e) open_te_var Y = subst_te X U (e open_te_var Y).
Proof.
introv Neq Wu. rewrite* subst_te_open_te.
simpl. case_var*.
Qed.
(** Opening up a body t with a type u is the same as opening
up the abstraction with a fresh name x and then substituting u for x. *)
Lemma subst_te_intro : forall X U e,
X \notin fv_te e -> type U ->
open_te e U = subst_te X U (e open_te_var X).
Proof.
introv Fr Wu. rewrite* subst_te_open_te.
rewrite* subst_te_fresh. simpl. case_var*.
Qed.
(* ********************************************************************** *)
(** ** Properties of term substitution in terms *)
Lemma open_ee_rec_term_core : forall e j v u i, i <> j ->
open_ee_rec j v e = open_ee_rec i u (open_ee_rec j v e) ->
e = open_ee_rec i u e.
Proof.
induction e; introv Neq H; simpl in *; inversion H; f_equal*.
case_nat*. case_nat*.
Qed.
Lemma open_ee_rec_type_core : forall e j V u i,
open_te_rec j V e = open_ee_rec i u (open_te_rec j V e) ->
e = open_ee_rec i u e.
Proof.
induction e; introv H; simpls; inversion H; f_equal*.
Qed.
Lemma open_ee_rec_term : forall u e,
term e -> forall k, e = open_ee_rec k u e.
Proof.
induction 1; intros; simpl; f_equal*.
unfolds open_ee. pick_fresh x.
apply* (@open_ee_rec_term_core e1 0 (trm_fvar x)).
unfolds open_te. pick_fresh X.
apply* (@open_ee_rec_type_core e1 0 (typ_fvar X)).
Qed.
(** Substitution for a fresh name is identity. *)
Lemma subst_ee_fresh : forall x u e,
x \notin fv_ee e -> subst_ee x u e = e.
Proof.
induction e; simpl; intros; f_equal*.
case_var*.
Qed.
(** Substitution distributes on the open operation. *)
Lemma subst_ee_open_ee : forall t1 t2 u x, term u ->
subst_ee x u (open_ee t1 t2) =
open_ee (subst_ee x u t1) (subst_ee x u t2).
Proof.
intros. unfold open_ee. generalize 0.
induction t1; intros; simpls; f_equal*.
case_nat*.
case_var*. rewrite* <- open_ee_rec_term.
Qed.
(** Substitution and open_var for distinct names commute. *)
Lemma subst_ee_open_ee_var : forall x y u e, y <> x -> term u ->
(subst_ee x u e) open_ee_var y = subst_ee x u (e open_ee_var y).
Proof.
introv Neq Wu. rewrite* subst_ee_open_ee.
simpl. case_var*.
Qed.
(** Opening up a body t with a type u is the same as opening
up the abstraction with a fresh name x and then substituting u for x. *)
Lemma subst_ee_intro : forall x u e,
x \notin fv_ee e -> term u ->
open_ee e u = subst_ee x u (e open_ee_var x).
Proof.
introv Fr Wu. rewrite* subst_ee_open_ee.
rewrite* subst_ee_fresh. simpl. case_var*.
Qed.
(** Interactions between type substitutions in terms and opening
with term variables in terms. *)
Lemma subst_te_open_ee_var : forall Z P x e,
(subst_te Z P e) open_ee_var x = subst_te Z P (e open_ee_var x).
Proof.
introv. unfold open_ee. generalize 0.
induction e; intros; simpl; f_equal*. case_nat*.
Qed.
(** Interactions between term substitutions in terms and opening
with type variables in terms. *)
Lemma subst_ee_open_te_var : forall z u e X, term u ->
(subst_ee z u e) open_te_var X = subst_ee z u (e open_te_var X).
Proof.
introv. unfold open_te. generalize 0.
induction e; intros; simpl; f_equal*.
case_var*. symmetry. autos* open_te_rec_term.
Qed.
(** Substitutions preserve local closure. *)
Lemma subst_tt_type : forall T Z P,
type T -> type P -> type (subst_tt Z P T).
Proof.
induction 1; intros; simpl; auto.
case_var*.
apply type_all with (L:=L \u \{Z}). apply* IHtype.
intros. rewrite* subst_tt_open_tt_var.
Qed.
Lemma subst_te_term : forall e Z P,
term e -> type P -> term (subst_te Z P e).
Proof.
lets: subst_tt_type. induction 1; intros; simpl; auto.
apply_fresh* term_abs as x. rewrite* subst_te_open_ee_var.
apply term_tabs with (L:=L \u \{Z}). auto.
intros. rewrite* subst_te_open_te_var.
Qed.
Lemma subst_ee_term : forall e1 Z e2,
term e1 -> term e2 -> term (subst_ee Z e2 e1).
Proof.
induction 1; intros; simpl; auto.
case_var*.
apply_fresh* term_abs as y. rewrite* subst_ee_open_ee_var.
apply term_tabs with (L:=L \u \{Z}). auto. intros. rewrite* subst_ee_open_te_var.
Qed.
Hint Resolve subst_tt_type subst_te_term subst_ee_term.
(* ********************************************************************** *)
(** * Properties of well-formedness of a type in an environment *)
(** If a type is well-formed in an environment then it is locally closed. *)
Lemma wft_type : forall E T,
wft E T -> type T.
Proof.
induction 1; eauto.
Qed.
(** Through weakening *)
Lemma wft_weaken : forall G T E F,
wft (E & G) T ->
ok (E & F & G) ->
wft (E & F & G) T.
Proof.
intros. gen_eq K: (E & G). gen E F G.
induction H; intros; subst; eauto.
(* case: var *)
apply (@wft_var m T1 (E0&F&G)). apply* binds_weaken. autos*.
(* case: all *)
apply_fresh* wft_all as Y. apply_ih_bind* H1.
Qed.
(** Through strengthening *)
Lemma wft_strengthen : forall E F x U T,
wft (E & x ~: U & F) T -> wft (E & F) T.
Proof.
intros. gen_eq G: (E & x ~: U & F). gen F. assert (wft G T). { auto. }
induction H; intros F EQ; subst; auto.
apply (@wft_var m T1). destruct (binds_concat_inv H) as [?|[? ?]].
apply* binds_concat_right.
destruct (binds_push_inv H2) as [[? ?]|[? ?]].
subst. false.
apply~ binds_concat_left.
autos*.
(* todo: binds_cases tactic *)
apply_fresh* wft_all as Y. apply_ih_bind* H2.
Qed.
(** Through type substitution *)
Lemma wft_subst_tb : forall m T1 F E Z P T,
wft (E & Z !:m <>: T1 & F) T ->
wft E P ->
ok (E & map (subst_tb Z P) F) ->
wft (E & map (subst_tb Z P) F) (subst_tt Z P T).
Proof.
introv WT WP. gen_eq G: (E & Z !:m <>: T1 & F). gen F.
induction WT; intros F EQ Ok; subst; simpl subst_tt; auto.
case_var*.
apply_empty* wft_weaken.
destruct (binds_concat_inv H) as [?|[? ?]].
apply wft_var with (m:=m0) (T1:=(subst_tt Z P T0)).
apply~ binds_concat_right.
unsimpl_map_bind. apply~ binds_map.
destruct (binds_push_inv H1) as [[? ?]|[? ?]].
subst. false~.
applys wft_var. apply* binds_concat_left.
apply_fresh* wft_all as Y.
unsimpl ((subst_tb Z P) (bind_tRel m0 Tn)).
lets: wft_type.
rewrite* subst_tt_open_tt_var.
apply_ih_map_bind* H0.
Qed.
(** Through type reduction *)
Lemma wft_open : forall E U m T1 T2,
ok E ->
wft E (typ_all m T1 T2) ->
wft E U ->
wft E (open_tt T2 U).
Proof.
introv Ok WA WU. inversions WA. pick_fresh X.
autos* wft_type. rewrite* (@subst_tt_intro X).
lets K: (@wft_subst_tb m T1 empty).
specializes_vars K. clean_empty K. apply* K.
(* todo: apply empty ? *)
Qed.
(** Through narrowing *)
Lemma wft_narrow : forall m V F U T E X,
wft (E & X !: m <>: V & F) T ->
ok (E & X !: m <>: U & F) ->
wft (E & X !: m <>: U & F) T.
Proof.
intros. gen_eq K: (E & X !: m <>: V & F). gen E F.
induction H; intros; subst; eauto.
destruct (binds_middle_inv H) as [K|[K|K]]; try destructs K.
applys wft_var. apply* binds_concat_right.
subst. applys wft_var. apply~ binds_middle_eq.
applys wft_var. apply~ binds_concat_left.
apply* binds_concat_left.
apply_fresh* wft_all as Y. apply_ih_bind* H1.
Qed.
(* ********************************************************************** *)
(** * Relations between well-formed environment and types well-formed
in environments *)
(** If an environment is well-formed, then it does not contain duplicated keys. *)
Lemma ok_from_okt : forall E,
okt E -> ok E.
Proof.
induction 1; auto.
Qed.
Hint Extern 1 (ok _) => apply ok_from_okt.
(** Extraction from a typing assumption in a well-formed environments *)
Lemma wft_from_env_has_typ : forall x U E,
okt E -> binds x (bind_typ U) E -> wft E U.
Proof.
induction E using env_ind; intros Ok B.
false* binds_empty_inv.
inversions Ok.
false (empty_push_inv H0).
destruct (eq_push_inv H) as [? [? ?]]. subst. clear H.
destruct (binds_push_inv B) as [[? ?]|[? ?]]. inversion H3.
apply_empty* wft_weaken.
destruct (eq_push_inv H) as [? [? ?]]. subst. clear H.
destruct (binds_push_inv B) as [[? ?]|[? ?]]. subst.
inversions H3. apply_empty* wft_weaken.
apply_empty* wft_weaken.
Qed.
Lemma wft_from_env_has_sub : forall x m U E,
okt E -> binds x (bind_tRel m U) E -> wft E U.
Proof.
induction E using env_ind; intros Ok B.
false* binds_empty_inv.
inversions Ok.
false (empty_push_inv H0).
destruct (eq_push_inv H) as [? [? ?]]. subst. clear H.
destruct (binds_push_inv B) as [[? ?]|[? ?]]. subst.
inversions H3. apply_empty* wft_weaken.
apply_empty* wft_weaken.
destruct (eq_push_inv H) as [? [? ?]]. subst. clear H.
destruct (binds_push_inv B) as [[? ?]|[? ?]]. subst.
inversions H3.
apply_empty* wft_weaken.
Qed.
(** Extraction from a well-formed environment *)
Lemma wft_from_okt_typ : forall x T E,
okt (E & x ~: T) -> wft E T.
Proof.
intros. inversions* H.
false (empty_push_inv H1).
destruct (eq_push_inv H0) as [? [? ?]]. false.
destruct (eq_push_inv H0) as [? [? ?]]. inversions~ H4.
Qed.
Lemma wft_from_okt_sub : forall m x T E,
okt (E & x !: m <>: T) -> wft E T.
Proof.
intros. inversions* H.
false (empty_push_inv H1).
destruct (eq_push_inv H0) as [? [? ?]]. inversions~ H4.
destruct (eq_push_inv H0) as [? [? ?]]. false.
Qed.
Lemma wft_weaken_right : forall T E F,
wft E T ->
ok (E & F) ->
wft (E & F) T.
Proof.
intros. apply_empty* wft_weaken.
Qed.
Hint Resolve wft_weaken_right.
Hint Resolve wft_from_okt_typ wft_from_okt_sub.
Hint Immediate wft_from_env_has_typ wft_from_env_has_sub.
Hint Resolve wft_subst_tb.
(** Extraction from a subtyping assumption in a well-formed environments *)
(* ********************************************************************** *)
(** ** Properties of well-formedness of an environment *)
(** Inversion lemma *)
Lemma okt_push_inv : forall E X B,
okt (E & X ~ B) -> (exists m T, B = bind_tRel m T) \/ exists T, B = bind_typ T.
Proof.
introv O. inverts O.
false* empty_push_inv.
lets (?&?&?): (eq_push_inv H). subst*.
lets (?&?&?): (eq_push_inv H). subst*.
Qed.
Lemma okt_push_tvr_inv : forall E X m T,
okt (E & X !:m <>: T) -> okt E /\ wft E T /\ X # E.
Proof.
introv O. inverts O.
false* empty_push_inv.
lets (?&M&?): (eq_push_inv H). subst. inverts~ M.
lets (?&?&?): (eq_push_inv H). false.
Qed.
Lemma okt_push_sub_type : forall m E X T,
okt (E & X !: m <>: T) -> type T.
Proof. intros. applys wft_type. forwards*: okt_push_tvr_inv. Qed.
Lemma okt_push_typ_inv : forall E x T,
okt (E & x ~: T) -> okt E /\ wft E T /\ x # E.
Proof.
introv O. inverts O.
false* empty_push_inv.
lets (?&?&?): (eq_push_inv H). false.
lets (?&M&?): (eq_push_inv H). subst. inverts~ M.
Qed.
Lemma okt_push_typ_type : forall E X T,
okt (E & X ~: T) -> type T.
Proof. intros. applys wft_type. forwards*: okt_push_typ_inv. Qed.
Hint Immediate okt_push_typ_type okt_push_sub_type.
(** Through strengthening *)
Lemma okt_strengthen : forall x T (E F:env),
okt (E & x ~: T & F) ->
okt (E & F).
Proof.
introv O. induction F using env_ind.
rewrite concat_empty_r in *. lets*: (okt_push_typ_inv O).
rewrite concat_assoc in *.
lets: okt_push_inv O. destruct H; subst.
destruct H. destruct H. subst.
lets (?&?): (okt_push_tvr_inv O).
applys~ okt_tvr. applys* wft_strengthen. destruct* H0.
destruct H. subst. lets (?&?): (okt_push_typ_inv O).
applys~ okt_typ. applys* wft_strengthen. destruct* H0.
Qed.
Lemma okt_strengthen_l : forall E F,
okt (E&F) ->
okt E.
Proof. introv OKC. induction F using env_ind.
rewrite concat_empty_r in *. auto.
rewrite concat_assoc in *.
lets: okt_push_inv OKC. destruct H; try destruct H; subst.
destruct H. subst.
lets (?&?): (okt_push_tvr_inv OKC). apply IHF. apply H.
lets (?&?): (okt_push_typ_inv OKC). apply* IHF.
Qed.
Lemma okt_subst_tb : forall Q m Z P (E F:env),
okt (E & Z!: m <>: Q & F) ->
wft E P ->
okt (E & map (subst_tb Z P) F).
Proof.
introv O W. induction F using env_ind.
rewrite map_empty. rewrite concat_empty_r in *.
lets*: (okt_push_tvr_inv O).
rewrite map_push. rewrite concat_assoc in *.
lets : okt_push_inv O. destruct H; try destruct H; subst.
destruct H. subst. lets (?&?): (okt_push_tvr_inv O).
applys~ okt_tvr. destruct* H0. autos*.
lets (?&?&?): (okt_push_typ_inv O).
applys~ okt_typ. apply* wft_subst_tb.
Qed.
Lemma okt_narrow : forall m V (E F:env) U X,
okt (E & X !: m <>: V & F) ->
wft E U ->
okt (E & X !: m <>: U & F).
Proof.
introv O W. induction F using env_ind.
rewrite concat_empty_r in *. lets*: (okt_push_tvr_inv O).
rewrite concat_assoc in *.
lets [(?&?&?)|(?&?)]: okt_push_inv O; subst.
lets (?&?&?): (okt_push_tvr_inv O).
applys~ okt_tvr. applys* wft_narrow.
lets (?&?&?): (okt_push_typ_inv O).
applys~ okt_typ. applys* wft_narrow.
Qed.
(** Automation *)
Hint Resolve okt_narrow okt_subst_tb wft_weaken.
Hint Immediate okt_strengthen.
(* ********************************************************************** *)
(** ** Environment is unchanged by substitution from a fresh name *)
Lemma notin_fv_tt_open : forall Y X T,
X \notin fv_tt (T open_tt_var Y) ->
X \notin fv_tt T.
Proof.
introv. unfold open_tt. generalize 0.
induction T; simpl; intros k Fr; auto.
specializes IHT1 k. specializes IHT2 k. auto.
specializes IHT1 k. specializes IHT2 (S k). auto.
Qed.
Lemma notin_fv_wf : forall E X T,
wft E T -> X # E -> X \notin fv_tt T.
Proof.
induction 1; intros Fr; simpl.
eauto.
rewrite notin_singleton. intro. subst. applys binds_fresh_inv H Fr.
auto. auto. auto.
notin_simpl; auto.
notin_simpl; auto. pick_fresh Y. apply* (@notin_fv_tt_open Y).
Qed.
Lemma map_subst_tb_id : forall G Z P,
okt G -> Z # G -> G = map (subst_tb Z P) G.
Proof.
induction 1; intros Fr; autorewrite with rew_env_map; simpl.
auto.
rewrite* <- IHokt. rewrite* subst_tt_fresh. apply* notin_fv_wf.
rewrite* <- IHokt. rewrite* subst_tt_fresh. apply* notin_fv_wf.
Qed.
(* ********************************************************************** *)
(** ** Regularity of relations *)
(** The subtyping relation is restricted to well-formed objects. *)
Lemma sub_regular : forall E m T1 T2,
R E m T1 T2 -> okt E /\ wft E T1 /\ wft E T2.
Proof. introv Rel.
induction Rel; autos*.
- destruct m; autos*.
- destruct m; autos*.
- split. destruct IHRel; auto. split; apply_fresh* wft_all as Y.
destruct (H0 Y); auto. destruct H2. destruct m1; simpl in H2.
apply_empty* (@wft_narrow Sub typ_top); autos.
apply_empty* (@wft_narrow Sup typ_btm); autos.
destruct (H0 Y); auto. destruct H2. destruct m1; simpl in H3.
apply_empty* (@wft_narrow Sub typ_top); autos.
apply_empty* (@wft_narrow Sup typ_btm); autos.
Qed.
Lemma sub_wft : forall E m T1 T2,
R E m T1 T2 -> wft E T1 /\ wft E T2.
Proof. intros. apply sub_regular in H. destruct H; auto. Qed.
(** The typing relation is restricted to well-formed objects. *)
Lemma typing_regular : forall E e T,
typing E e T -> okt E /\ term e /\ wft E T.
Proof.
induction 1; splits*.
pick_fresh y. specializes H0 y. destructs~ H0.
forwards*: okt_push_typ_inv.
apply_fresh* term_abs as y.
pick_fresh y. specializes H0 y. destructs~ H0.
forwards*: okt_push_typ_inv.
specializes H0 y. destructs~ H0.
pick_fresh y. specializes H0 y. destructs~ H0.
apply* wft_arrow.
forwards*: okt_push_typ_inv.
apply_empty* wft_strengthen.
destructs IHtyping1. inversion* H3.
pick_fresh y. specializes H0 y. destructs~ H0.
forwards*: okt_push_tvr_inv.
apply term_tabs with (L:=L).
pick_fresh X. destruct H0 with (X:=X).
autos. apply okt_push_tvr_inv in H1. destruct H1. destruct H3.
apply wft_type in H3. auto.
intros.
forwards~ K: (H0 X). destructs K.
forwards*: okt_push_tvr_inv.
apply_fresh* wft_all as Y.
pick_fresh y. forwards~ K: (H0 y). destructs K.
forwards*: okt_push_tvr_inv.
forwards~ K: (H0 Y). destructs K.
forwards*: okt_push_tvr_inv.
apply term_tapp; autos*. eapply wft_type. apply sub_wft in H0.
destruct H0. apply H0.
destruct IHtyping. destruct H2. inversion H3. subst.
applys* wft_open T2. apply sub_wft in H0. destruct* H0.
Qed.
(** The value relation is restricted to well-formed objects. *)
Lemma value_regular : forall t,
value t -> term t.
Proof.
induction 1; autos*.
Qed.
(** The reduction relation is restricted to well-formed objects. *)
Lemma red_regular : forall t t',
red t t' -> term t /\ term t'.
Proof.
induction 1; split; autos* value_regular.
inversions H. pick_fresh y. rewrite* (@subst_ee_intro y).
inversions H. pick_fresh Y. rewrite* (@subst_te_intro Y).
Qed.
(** Automation *)
Hint Extern 1 (okt ?E) =>
match goal with
| H: R _ _ _ _ |- _ => apply (proj31 (sub_regular H))
| H: typing _ _ _ |- _ => apply (proj31 (typing_regular H))
end.
Hint Extern 1 (wft ?E ?T) =>
match goal with
| H: typing E _ T |- _ => apply (proj33 (typing_regular H))
| H: R E _ T _ |- _ => apply (proj32 (sub_regular H))
| H: R E _ _ T |- _ => apply (proj33 (sub_regular H))
end.
Hint Extern 1 (type ?T) =>
let go E := apply (@wft_type E); auto in
match goal with
| H: typing ?E _ T |- _ => go E
| H: R ?E _ T _ |- _ => go E
| H: R ?E _ _ T |- _ => go E
end.
Hint Extern 1 (term ?e) =>
match goal with
| H: typing _ ?e _ |- _ => apply (proj32 (typing_regular H))
| H: red ?e _ |- _ => apply (proj1 (red_regular H))
| H: red _ ?e |- _ => apply (proj2 (red_regular H))
end.
(* ********************************************************************** *)
(** * Properties of Subtyping *)
(* ********************************************************************** *)
(** Reflexivity (1) *)
Lemma refl : forall E m A,
okt E ->
wft E A ->
R E m A A.
introv OK WFT. lets W:(wft_type WFT). gen E. gen m.
induction W; intros; inversion WFT; eauto.
- destruct m. apply* STB1. apply* STB2.
- destruct m. apply* STB2. apply* STB1.
- subst. pick_fresh X. apply SAll with (L:=L\u L0 \u dom E).
auto. intros v Hv. apply notin_union_r in Hv. destruct Hv.
apply notin_union_r in H2. destruct H2.
apply H0 with (E:=E & v!:m<>:(SelectBound m)); auto.
destruct m; auto.
apply_empty* (@wft_narrow m T1).
Defined.
(* ********************************************************************** *)
(** Weakening (2) *)
Lemma sub_weakening : forall m E F G S T,
R (E & G) m S T ->
okt (E & F & G) ->
R (E & F & G) m S T.
Proof.
introv Typ. gen F. inductions Typ; introv Ok; auto.
(* case: fvar trans *)
apply SVarBnd with (T:=T). apply* binds_weaken.
autos. autos.
apply SVarBndFl with (T:=T). apply* binds_weaken.
autos. autos.
(* apply SVarTrns with (X:=X); auto. *)
(* case: all *)
apply SAll with (L:=L \u (dom E) \u (dom F) \u (dom G)). auto. intros.
assert (E & F & G & X !: m1 <>: (SelectBound m1) = E&F&(G & X !: m1<>:(SelectBound m1))).
rewrite concat_assoc. reflexivity. rewrite H2. apply H0; auto.
rewrite concat_assoc. auto. rewrite concat_assoc. apply* okt_tvr.
destruct m1; simpl. auto. auto.
Qed.
(* ********************************************************************** *)
(** Symmetry*)
Lemma m_flip_inv : forall m, m = flip (flip m).
Proof. intros. destruct* m. Qed.
Lemma sym1 : forall E A B m, R E m A B -> R E (flip m) B A.
intros. induction H.
- autos.
- destruct m; apply* STB2.
- destruct m; apply* STB1.
- apply* SFun.
- apply* SVarRefl.
- apply* SVarBndFl.
- apply SVarBnd with (T:=T); try rewrite <- m_flip_inv; auto.
(* - apply SVarTrns with (X:=X); auto. *)
- apply* SAll. destruct m1; simpl. simpl in IHR. auto.
simpl in IHR. auto.
Defined.
Corollary sym2 : forall E A B m, R E m A B <-> R E (flip m) B A.
destruct m; split; apply sym1.
Defined.
Corollary sym : forall E A B, R E Sub A B <-> R E Sup B A.
intros. split; apply sym1.
Defined.
(** In parentheses are given the label of the corresponding
lemma in the description of the POPLMark Challenge. *)
Lemma TopBtmMustEq1 : forall m T1 T2 E,
okt E -> wft E T2 -> R E m T1 (mode_to_sub (flip m)) -> R E m T1 T2.
Proof. introv OK WF H. remember (mode_to_sub (flip m)) as T'.
induction H; try destruct m; inversion* HeqT'.
- subst. eapply SVarBnd. apply H. auto. auto.
- subst. eapply SVarBnd. apply H. auto. auto.
(* - subst. eapply SVarTrns. apply H. apply* IHR2.
- subst. eapply SVarTrns. apply H. apply* IHR2. *)- destruct m2; inversion H3.
Qed.
Lemma TopBtmMustEq2 : forall m T1 T2 E,
okt E -> wft E T2 -> R E m (mode_to_sub m) T1 -> R E m T2 T1.
Proof. introv OK WF H. remember (mode_to_sub m) as T'.
induction H; try destruct m; inversion* HeqT'.
- subst. eapply SVarBndFl. apply H. auto. apply TopBtmMustEq1 with (T2:=T2) in H1; auto.
- subst. eapply SVarBndFl. apply H. auto. apply TopBtmMustEq1 with (T2:=T2) in H1; auto.
(* - subst. eapply SVarTrns. apply* IHR1. apply H0.
- subst. eapply SVarTrns. apply* IHR1. apply H0. *)
- destruct m2; inversion H3. Qed.
Hint Resolve TopBtmMustEq1 TopBtmMustEq2.
Lemma top_btm_var : forall m E T,
R E m T (mode_to_sub (flip m)) -> T = mode_to_sub (flip m) \/( exists X Tn, T = typ_fvar X /\ binds X (bind_tRel m Tn) E
/\ R E m Tn (mode_to_sub (flip m))).
Proof. introv rels. inductions rels; try solve [ destruct m; inversion x].
- left. reflexivity.
- destruct IHrels; auto. right. exists X. exists T.
split; autos. right. exists X. exists T. split; auto.
- destruct m2; inversion x.
Qed.
Lemma TopBtmMustEq3 : forall m T1 T2 E,
R E m T2 T1 -> R E m T1 (mode_to_sub (flip m)) -> R E m T2 (mode_to_sub (flip m)).
Proof. introv R1 R2.
apply top_btm_var in R2. destruct R2; subst.
- auto.
- destruct H. destruct H. destruct H. destruct H0. subst.
inductions R1.
+ destruct m; inversion x.
+ apply* refl.
+ eapply SVarBnd. apply H1. auto. auto.
+ eapply SVarBnd. apply H. auto. eapply IHR1. auto.
apply H1. auto.
+ apply binds_get in H. apply binds_get in H1. rewrite H1 in H.
inversion H. destruct m; inversion H4. Qed.
Lemma form_vtrans : forall m E X A B,
R E m (typ_fvar X) A -> R E m B (typ_fvar X) ->
A = mode_to_sub m \/
(exists X', A = typ_fvar X' /\ ((X' = X) \/ (exists T0, binds X' (bind_tRel (flip m) T0) E /\ R E m (typ_fvar X) T0 ))) \/
B = mode_to_sub (flip m) \/
(exists X', B = typ_fvar X' /\ ((X' = X) \/ (exists T0, binds X' (bind_tRel m T0) E /\ R E (flip m) (typ_fvar X) T0 ))).
Proof. introv R1 R2. gen B. inductions R1; introv R2.
- left*.
- destruct m; inversion x.
- right. left. exists X. split; auto.
- right. right. inductions R2; autos*.
+ destruct m; inversion x.
+ right. exists X0. split; auto. right. exists T0.
split; auto. apply sym1; auto.
+ apply binds_get in H1. apply binds_get in H. rewrite H1 in H.
inversion H. destruct m; inversion H4.
- right. left. exists X0. split; auto.
inductions R2.
+ destruct m; inversion x.
+ right. exists T. rewrite <- m_flip_inv. split; auto.
apply sym1. auto.
+ right. exists T. rewrite <- m_flip_inv. split; auto.
apply sym1. auto.
+ right. exists T. rewrite <- m_flip_inv. split; auto.
apply sym1. auto.
+ right. exists T. rewrite <- m_flip_inv. split; auto.
apply sym1. auto. Qed.
Lemma subvar_inv : forall m E X A,
R E m (typ_fvar X) A ->
A = mode_to_sub m \/
(exists T, binds X (bind_tRel m T) E /\ R E m T A) \/
(exists X', A = typ_fvar X' /\ ((X' = X) \/ (exists T0, binds X' (bind_tRel (flip m) T0) E /\ R E m (typ_fvar X) T0 ))).
Proof. introv Rel. inductions Rel; autos.
- destruct m; inversion x.
- right. right. exists X. split; auto.
- right. left. exists* T.
- right. right. exists X0. split; auto. right. exists T.
rewrite <- m_flip_inv. split; auto. apply* sym1. Qed.
Lemma inv_rinv : forall m E X A, (okt E /\ wft E (typ_fvar X) /\ A = mode_to_sub m) \/
(exists T, binds X (bind_tRel m T) E /\ R E m T A) \/
(exists X', A = typ_fvar X' /\ ((okt E /\ wft E (typ_fvar X) /\ X' = X) \/ (exists T0, binds X' (bind_tRel (flip m) T0) E /\ R E m (typ_fvar X) T0 ))) ->
R E m (typ_fvar X) A.
Proof. introv H. destruct H. destruct H. destruct H0. subst*.
destruct H.
- destruct H. destruct H. eapply SVarBnd. apply H. auto. auto.
- destruct H. destruct H. destruct H0.
+ destruct H0. destruct H1. subst. apply* refl.
+ destruct H0. destruct H0. subst. apply sym2.
eapply SVarBnd. apply H0. auto. apply sym1. auto. Qed.
Lemma subvar_inv_binds : forall m E X A B,
R E m (typ_fvar X) A -> binds X (bind_tRel m B) E ->
R E m B A \/ (exists X', A = typ_fvar X' /\ ((X' = X) \/ (exists T0, binds X' (bind_tRel (flip m) T0) E /\ R E m (typ_fvar X) T0 ))).
Proof. introv Re bnd. lets K:Re. apply subvar_inv in Re.
destruct Re.
- left. subst. apply STB1; autos. eapply wft_from_env_has_sub. autos. apply bnd.
- destruct H.
+ destruct H. destruct H. assert (bind_tRel m x = bind_tRel m B). { eapply binds_functional. apply H.
apply bnd. } inversion H1. subst. left. auto.
+ right*. Qed.
Lemma subvar_inv_binds_fl : forall m E X A B,
R E m (typ_fvar X) A -> binds X (bind_tRel (flip m) B) E ->
A = mode_to_sub m \/
(exists X', A = typ_fvar X' /\ ((X' = X) \/ (exists T0, binds X' (bind_tRel (flip m) T0) E /\ R E m (typ_fvar X) T0 ))).
Proof. introv R Bnd. apply subvar_inv in R. destruct R.
- left*.
- destruct H. destruct H. destruct H. apply binds_get in H. apply binds_get in Bnd.
rewrite Bnd in H. inversion H. destruct m; inversion H2.
right. auto. Qed.
(* ********************************************************************** *)
(** Narrowing and Transitivity (3) *)
Definition transitivity_on Q := forall E m S T,
R E m S Q -> R E m Q T -> R E m S T.
Hint Unfold transitivity_on.
Hint Resolve wft_narrow.
(*
Lemma sub_narrowing_aux : forall m1 m2 Q F E Z P S T,
transitivity_on Q ->
R (E & Z !:m1 <>: Q & F) m2 S T ->
R E m1 P Q ->
R (E & Z !:m1 <>: P & F) m2 S T.
Proof.
introv TransQ SsubT PsubQ.
inductions SsubT; introv.
apply* SInt.
apply* STB1.
apply* STB2.
apply* SFun.
apply* SVarRefl.
tests EQ: (X = Z).
lets M: (@okt_narrow m1 Q).
apply SVarBnd with (T:=P).
asserts~ N: (ok (E & Z !:m1 <>: P & F)).
lets: ok_middle_inv_r N.
assert (m = m1). {
apply binds_middle_eq_inv in H. inversion* H. auto. }
subst; apply* binds_middle_eq.
replace E with (E&empty) in PsubQ; try apply concat_empty_r.
apply sub_weakening with (F:=Z!:m1<>:P&F) in PsubQ.
rewrite concat_empty_r in PsubQ. rewrite concat_assoc in PsubQ.
apply sub_wft in PsubQ. destruct* PsubQ. rewrite concat_empty_r.
rewrite concat_assoc. apply* M. rewrite concat_empty_r in PsubQ.
apply sub_wft in PsubQ. destruct* PsubQ.
apply TransQ.
assert (m = m1). {
apply binds_middle_eq_inv in H. inversion* H. auto. }
subst.
do_rew* concat_assoc (apply_empty* sub_weakening).
binds_get H. inversion H1. subst. autos*.
apply SVarBnd with (T:=T); auto. binds_cases H; auto.
apply* wft_narrow. autos*.
tests EQ: (X = Z).
lets M: (@okt_narrow m1 Q).
apply SVarBndFl with (T:=P).
asserts~ N: (ok (E & Z !:m1 <>: P & F)).
lets: ok_middle_inv_r N.
assert (m = m1). {
apply binds_middle_eq_inv in H. inversion* H. auto. }
subst; apply* binds_middle_eq.
replace E with (E&empty) in PsubQ; try apply concat_empty_r.
apply sub_weakening with (F:=Z!:m1<>:P&F) in PsubQ.
rewrite concat_empty_r in PsubQ. rewrite concat_assoc in PsubQ.
apply sub_wft in PsubQ. destruct* PsubQ. rewrite concat_empty_r.
rewrite concat_assoc. apply* M. rewrite concat_empty_r in PsubQ.
apply sub_wft in PsubQ. destruct* PsubQ.
apply TransQ.
assert (m = m1). {
apply binds_middle_eq_inv in H. inversion* H. auto. }
subst.
do_rew* concat_assoc (apply_empty* sub_weakening).
binds_get H. inversion H1. subst. autos*.
apply SVarBndFl with (T:=T); auto. binds_cases H; auto.
apply* wft_narrow. autos*.
(* apply SVarTrns with (X:=X); autos*. *)
apply SAll with (L:=L). apply IHSsubT with (Q0:=Q); autos*.
(*apply* wft_narrow. apply ok_from_okt.
apply* okt_narrow.*) (*pick_fresh X. assert (X \notin L). auto.*)
intros.
apply H0 with (E0:=E) (Z0:=Z) (m3:=m1) (Q0:=Q) (F0:= (F & X !: m0 <>: SelectBound m0)) in H1; auto.
rewrite concat_assoc in H1. auto. rewrite concat_assoc. auto.
Qed. *)
Lemma Var_Trans : forall E m T1 T2 X,
R E m T1 (typ_fvar X) -> R E m (typ_fvar X) T2 ->
R E m T1 T2.
Proof. introv H1 H2. gen T2. inductions H1.
- destruct m; inversion x.
- introv Hy. apply* STB2.
- autos.
- introv Hy. eapply SVarBnd. apply H.
autos. apply IHR with (X0:=X). autos. auto.
- introv Hy. apply sym2 in Hy. inductions Hy.
+ destruct m; inversion x.
+ apply* STB1.
+ eapply SVarBndFl. apply H. auto. auto.
+ eapply SVarBndFl. apply H3. auto. apply sym2.
apply IHHy with (X0:=X); autos.
+ apply binds_get in H3. apply binds_get in H.
rewrite H3 in H. inversion H. destruct m; inversion H5.
Qed.
Lemma sub_transitivity : forall Q E S m T,
R E m S Q -> R E m Q T -> R E m S T.
Proof. introv R1. gen T. induction R1; introv R2; autos.
- inductions R2; autos.
+ destruct m; inversion x.
+ assert (forall T0, R E m T T0 -> R E m (typ_fvar X) T0).
{ intros. eapply SVarBnd. apply H. auto. auto. }
clear H. clear IHR2. inductions R2; autos.
* destruct m; inversion x.
* apply sym1. apply H2. autos.
* apply sym1. apply H1. apply sym2. apply SFun.
apply IHR1_1. apply sym1. auto.
apply IHR1_2. apply sym1. auto.
* apply IHR2 with (C0:=C) (D0:=D); autos.
introv Hy. apply H2. eapply SVarBnd. apply H.
auto. apply Hy.
- eapply SVarBnd. apply H. auto. apply IHR1. auto.
- apply Var_Trans with (X:=X). eapply SVarBndFl.
apply H. auto. auto. auto.
- inductions R2; autos.
+ apply STB1. auto. apply wft_all with (L:=L \u dom E). autos.
intros. assert (X \notin L). { autos. } apply H in H4.
destruct m; simpl in H4.
auto. apply_empty (@wft_narrow m1 (SelectBound m1)). auto. apply ok_from_okt.
apply okt_tvr. auto. auto. auto. apply_empty (@wft_narrow m1 (SelectBound m1)).
auto. apply ok_from_okt.
apply okt_tvr. auto. auto. auto.
+ destruct m; inversion x.
+ assert (forall T0, R E m T T0 -> R E m (typ_fvar X) T0).
{ intros. eapply SVarBnd. apply H1. auto. auto. }
(*clear H3.*) clear H1. clear IHR2. inductions R2; autos.
* destruct m; inversion x.
* apply sym1. apply H4. apply STB2. auto. apply wft_all with (L:=L \u dom E).
auto. intros. assert (X0 \notin L). { autos. } apply H1 in H6.
destruct m; simpl in H6.
apply_empty (@wft_narrow m1 (SelectBound m1)). auto. auto.
apply_empty (@wft_narrow m1 (SelectBound m1)). auto. auto.
* apply IHR2 with (m2:=m1) (T5:=T2) (T6:=T4); autos.
introv Hy. apply H4. eapply SVarBnd. apply H. auto.
auto.
* apply sym1. apply H4. apply SAll with (L:=L \u L0 \u dom E); autos.
{ destruct m1; simpl; simpl in IHR1; simpl in R2.
apply sym2. apply IHR1. apply sym1. auto.
apply sym2. apply IHR1. apply sym1. auto. } clear IHR2. clear H0.
introv NI. apply sym2. apply H3. auto.
apply sym1. auto.
+ apply SAll with (L:= L \u L0); autos.
Qed.
Lemma sub_narrowing : forall Q E m m' F Z P S T,
R E m' P Q ->
R (E & Z !: m' <>: Q & F) m S T ->
R (E & Z !: m' <>: P & F) m S T.
Proof. introv R1 R2. inductions R2; autos; intros.
- apply* SInt.
- apply* STB1.
- apply* STB2.
- apply SFun. eapply IHR2_1. apply R1.
auto. eapply IHR2_2. apply R1. auto.
- apply* SVarRefl.
- tests EQ:(X=Z).
+ lets M: (@okt_narrow m' Q).
apply SVarBnd with (T:=P).
asserts~ N: (ok (E & Z !:m' <>: P & F)); auto.
lets: ok_middle_inv_r N.
assert (m = m'). {
apply binds_middle_eq_inv in H. inversion* H. auto. }
subst; apply* binds_middle_eq. auto.
replace E with (E&empty) in R1; try apply concat_empty_r.
apply sub_weakening with (F:=Z!:m'<>:P&F) in R1.
rewrite concat_empty_r in R1. rewrite concat_assoc in R1.
apply sub_wft in R1. destruct* R1. rewrite concat_empty_r.
rewrite concat_assoc. apply* M. rewrite concat_empty_r in R1.
apply sub_wft in R1. destruct* R1.
assert (T = Q). {
apply binds_middle_eq_inv in H. inversion* H. auto. }
assert (m = m'). {
apply binds_middle_eq_inv in H. inversion* H. auto. }
subst. eapply sub_transitivity. apply_empty sub_weakening.
apply_empty sub_weakening. apply R1. apply okt_strengthen_l with (F:=F).
auto. auto. apply IHR2 with (Q0:=Q); auto.
+ apply SVarBnd with (T:=T); auto. binds_cases H; auto.
apply wft_narrow with (V:=Q); auto. apply ok_from_okt.
apply okt_narrow with (V:=Q); auto. eapply IHR2.
apply R1. auto.
- tests EQ:(X=Z).
+ lets M: (@okt_narrow m' Q).
apply SVarBndFl with (T:=P).
asserts~ N: (ok (E & Z !:m' <>: P & F)); auto.
lets: ok_middle_inv_r N.
assert (m = m'). {
apply binds_middle_eq_inv in H. inversion* H. auto. }
subst; apply* binds_middle_eq. auto.
replace E with (E&empty) in R1; try apply concat_empty_r.
apply sub_weakening with (F:=Z!:m'<>:P&F) in R1.
rewrite concat_empty_r in R1. rewrite concat_assoc in R1.
apply sub_wft in R1. destruct* R1. rewrite concat_empty_r.
rewrite concat_assoc. apply* M. rewrite concat_empty_r in R1.
apply sub_wft in R1. destruct* R1.
assert (T = Q). {
apply binds_middle_eq_inv in H. inversion* H. auto. }
assert (m = m'). {
apply binds_middle_eq_inv in H. inversion* H. auto. }
subst. eapply sub_transitivity. apply_empty sub_weakening.
apply_empty sub_weakening. apply R1. apply okt_strengthen_l with (F:=F).
auto. auto. apply IHR2 with (Q0:=Q); auto.
+ apply SVarBndFl with (T:=T); auto. binds_cases H; auto.
apply wft_narrow with (V:=Q); auto. apply ok_from_okt.
apply okt_narrow with (V:=Q); auto. eapply IHR2.
apply R1. auto.
- apply SAll with (L:=L). apply IHR2 with (Q0:=Q); autos*.
intros. auto.
apply H0 with (Q0:=Q) (Z0:=Z) (E0:=E) (m'0:=m') (F0:=F & X !: m1 <>: SelectBound m1) in H1.
rewrite concat_assoc in H1. auto. auto. rewrite concat_assoc. auto.
Qed.
(* ********************************************************************** *)
(** Type substitution preserves subtyping (10) *)
Lemma okt_notin : forall E F X v,
okt(E&X~v&F) -> X # E.
Proof. induction F using env_ind; introv OK.
- rewrite concat_empty_r in OK. destruct v. apply okt_push_tvr_inv in OK.
destruct* OK. apply okt_push_typ_inv in OK. destruct* OK.
- rewrite concat_assoc in OK. destruct v. apply okt_push_tvr_inv in OK.
destruct* OK. apply okt_push_typ_inv in OK. destruct* OK. Qed.
Lemma sub_through_subst_tt : forall m1 m2 Q E F Z S T P,
R (E & Z !: m1 <>: Q & F) m2 S T ->
R E m1 P Q ->
R (E & map (subst_tb Z P) F) m2 (subst_tt Z P S) (subst_tt Z P T).
Proof.
introv SsubT PsubQ.
inductions SsubT; introv; simpl subst_tt.
apply* SInt.
destruct m; apply* STB1.
destruct m; apply* STB2.
autos*.
case_var.
apply* refl.
apply* SVarRefl.
inversions H0. binds_cases H3.
apply* wft_var.
apply wft_var with (m:=m0) (T1:=subst_tt Z P T1). unsimpl_map_bind*.
case_var.
apply binds_middle_eq_inv in H. inversion H. subst.
apply sub_transitivity with (Q:=Q). apply_empty* sub_weakening.
rewrite* <- (@subst_tt_fresh Z P Q).
apply (@notin_fv_wf E).
apply sub_wft in PsubQ. destruct* PsubQ.
apply sub_regular in SsubT. destruct SsubT. eapply okt_notin. apply H1.
apply ok_from_okt. apply sub_regular in SsubT. destruct* SsubT.
apply SVarBnd with (T:=(subst_tt Z P T)). rewrite* (@map_subst_tb_id E Z P).
binds_cases H; unsimpl_map_bind*.
apply sub_regular in PsubQ. destruct* PsubQ.
apply* IHSsubT.
case_var.
apply binds_middle_eq_inv in H; auto. inversion H. subst.
assert ( R (E & map (subst_tb Z P) F) m1 (subst_tt Z P Q) (subst_tt Z P F0)).
{ autos*. } apply sym1. apply sub_transitivity with (Q:=(subst_tt Z P Q)); auto.
rewrite* (@subst_tt_fresh Z P Q). apply_empty sub_weakening; auto.
apply (@notin_fv_wf E); auto. apply sub_regular in SsubT. destruct SsubT.
apply okt_notin with (E:=E) (v:=(bind_tRel m1 Q)) (F:=F); auto.
apply SVarBndFl with (T:=(subst_tt Z P T)). rewrite* (@map_subst_tb_id E Z P).
binds_cases H; unsimpl_map_bind*. apply* wft_subst_tb.
apply* IHSsubT.
(*assert (R (E & map (subst_tb Z P) F) m (subst_tt Z P T1) (subst_tt Z P (typ_fvar X))).
{ autos*. }
assert (R (E & map (subst_tb Z P) F) m (subst_tt Z P (typ_fvar X)) (subst_tt Z P T2)).
{ autos*. } eapply sub_transitivity. apply H. auto.*)
apply SAll with (L:=L \u \{Z}).
apply* IHSsubT.
intros. assert (X \notin L). auto.
apply H0 with (m3:=m1) (Q0:=Q) (E0:=E) (F0:=(F & X !: m0 <>: SelectBound m0)) (Z0:=Z) in H2.
assert ((bind_tRel m0 (SelectBound m0)) = (subst_tb Z P (bind_tRel m0 (SelectBound m0)))).
{ destruct m0; autos. } rewrite H3.
rewrite* subst_tt_open_tt_var. rewrite* subst_tt_open_tt_var.
rewrite <- concat_assoc. rewrite <- map_push. auto.
rewrite concat_assoc. auto. auto.
Qed.
(* ********************************************************************** *)
(** * Properties of Typing *)
(* ********************************************************************** *)
(** Weakening (5) *)
Lemma typing_weakening : forall E F G e T,
typing (E & G) e T ->
okt (E & F & G) ->
typing (E & F & G) e T.
Proof.
introv Typ. gen F. inductions Typ; introv Ok.
apply* typing_unit.
apply* typing_var. apply* binds_weaken.
apply* typing_nval.
apply* typing_nsucc.
apply* typing_nind.
apply_fresh* typing_abs as x. forwards~ K: (H x).
apply_ih_bind (H0 x); eauto.
apply* typing_app.
apply_fresh* typing_tabs as X. forwards~ K : (H X).
apply_ih_bind (H0 X); eauto.
apply* typing_tapp. apply* sub_weakening.
apply* typing_sub. apply* sub_weakening.
Qed.
(* ********************************************************************** *)
(** Strengthening (6) *)
Lemma sub_strengthening : forall m x U E F S T,
R (E & x ~: U & F) m S T ->
R (E & F) m S T.
Proof.
intros m x U E F S T SsubT.
inductions SsubT; introv; autos* wft_strengthen.
(* case: fvar trans *)
apply SVarBnd with (T:=T); autos*. binds_cases H; autos*.
apply SVarBndFl with (T:=T); autos*. binds_cases H; autos*.
(*apply SVarTrns with (X:=X); autos*. *)
(* case: all *)
apply SAll with (L:= L); autos*. intros. apply_ih_bind* H0.
Qed.
(************************************************************************ *)
(** Preservation by Term Substitution (8) *)
Lemma typing_through_subst_ee : forall U E F x T e u,
typing (E & x ~: U & F) e T ->
typing E u U ->
typing (E & F) (subst_ee x u e) T.
Proof.
introv TypT TypU. inductions TypT; introv; simpl.
apply* typing_unit.
case_var.
binds_get H0. apply_empty* typing_weakening.
binds_cases H0; apply* typing_var.
apply* typing_nval.
apply* typing_nsucc.
apply* typing_nind.
apply_fresh* typing_abs as y.
rewrite* subst_ee_open_ee_var.
apply_ih_bind* H0.
lets M:TypU. apply typing_regular in M. destruct M. auto.
apply* typing_app.
apply_fresh* typing_tabs as Y.
rewrite* subst_ee_open_te_var.
apply_ih_bind* H0.
apply* typing_tapp. apply* sub_strengthening.
apply* typing_sub. apply* sub_strengthening. apply* wft_strengthen.
Qed.
(************************************************************************ *)
(** Preservation by Type Substitution (11) *)
Lemma typing_through_subst_te : forall E F Z m Ty e T P,
typing (E & Z!: m <>: Ty & F) e T ->
R E m P Ty ->
typing (E & map (subst_tb Z P) F) (subst_te Z P e) (subst_tt Z P T).
Proof.
introv Typ WFT.
inductions Typ; simpls subst_tt; simpls subst_te; autos*.
subst. apply* typing_unit.
subst. apply* typing_var. rewrite* (@map_subst_tb_id E Z P).
binds_cases H0; unsimpl_map_bind*.
subst. apply* typing_nind.
(* case abs *)
apply_fresh* typing_abs as y.
unsimpl (subst_tb Z P (bind_typ V)).
rewrite* subst_te_open_ee_var.
apply_ih_map_bind* H0.
(* case tabs *)
apply_fresh* typing_tabs as Y.
unsimpl (subst_tb Z P (bind_tRel m0 Tk)).
rewrite* subst_te_open_te_var.
rewrite* subst_tt_open_tt_var.
apply_ih_map_bind* H0.
rewrite* subst_tt_open_tt. apply* typing_tapp.
apply* sub_through_subst_tt.
apply* typing_sub. apply* sub_through_subst_tt.
Qed.
(* ********************************************************************** *)
(** * Preservation *)
(* ********************************************************************** *)
(** Inversions for Typing (13) *)
Lemma typing_inv_abs : forall E S1 e1 T,
typing E (trm_abs S1 e1) T ->
(forall U1 U2, R E Sub T (typ_arrow U1 U2) ->
R E Sub U1 S1
/\ exists S2, exists L, forall x, x \notin L ->
typing (E & x ~: S1) (e1 open_ee_var x) S2 /\ R E Sup U2 S2).
Proof.
introv Typ. gen_eq e: (trm_abs S1 e1). gen S1 e1.
induction Typ; intros S1 b1 EQ U1 U2 R; inversions EQ.
inversions* R. split.
rewrite <- sym2 in H6. auto.
exists T1. exists L. split; autos.
rewrite sym2. apply H8.
apply IHTyp. autos. eapply sub_transitivity.
apply H. apply R.
(*pick_fresh Y. assert (Y \notin L). { autos. }
eapply H0 in H3.
apply IHTyp. auto.
eapply trans. apply H. apply R. *)
Qed.
Fact typing_inv_abs_not_sup : ~(forall E S1 e1 T,
typing E (trm_abs S1 e1) T ->
(forall U1 U2, R E Sup T (typ_arrow U1 U2) ->
R E Sup U1 S1
/\ exists S2, exists L, forall x, x \notin L ->
typing (E & x ~: S1) (e1 open_ee_var x) S2 /\ R E Sub U2 S2)).
Proof.
intros contra.
assert (R empty Sup typ_int typ_top).
{ specialize (@contra empty typ_top (trm_nval 0) typ_top).
assert (typing empty (trm_abs typ_top (trm_nval 0)) typ_top).
{ eapply typing_sub. apply typing_abs with (L:=\{}). introv X.
assert (trm_nval 0 open_ee_var x = trm_nval 0).
{ unfold open_ee. simpl. reflexivity. }
rewrite H. apply typing_nval.
autos. apply STB1. auto. auto. auto. }
apply contra with (U1 := typ_int) (U2 := typ_top) in H.
destruct H. apply H. apply STB2. auto. auto.
}
inversion H. Qed.
Lemma typing_inv_tabs : forall E e T m0 T0,
typing E (trm_tabs m0 T0 e) T ->
forall U m T', R E Sub T (typ_all m T' U) ->
exists S, exists L, forall X, X \notin L ->
typing (E & X!:m <>: T0) (e open_te_var X) (S open_tt_var X)
/\ R (E & X!:m <>: T') Sub (S open_tt_var X) (U open_tt_var X)
/\ m = m0
/\ R E (BindPosVariance Sub m) T0 T'.
Proof.
introv Ty Sub. remember (trm_tabs m0 T0 e) as trm.
remember (typ_all m T' U) as ty.
induction Ty; inversion Heqtrm; subst; clear H0.
- exists T. inversion Sub. subst. exists (L \u L0).
introv NI. apply notin_union_r in NI. destruct NI. split.
apply* H. split; auto. apply_empty sub_narrowing.
apply STB1; auto. destruct m; simpl in H9; simpl; auto.
- apply* IHTy. eapply sub_transitivity. apply H. auto.
Qed.
(* ********************************************************************** *)
(** Preservation Result (20) *)
Lemma preservation_result : preservation.
Proof.
introv Typ. gen e'. induction Typ; introv Red;
try solve [ inversion Red ].
- inversion Red. apply* typing_nval.
apply* typing_nsucc.
- inversion Red; try solve [apply* typing_nind].
subst. auto.
subst. apply* typing_app.
(* case: app *)
- inversions Red; try solve [ apply* typing_app ].
destruct~ (typing_inv_abs Typ1 (U1:=T1) (U2:=T2)) as [P1 [S2 [L P2]]].
apply refl with (E:=E). apply typing_regular in Typ1. destruct Typ1.
destruct H0. auto. auto.
pick_fresh X. forwards~ K: (P2 X). destruct K.
rewrite* (@subst_ee_intro X).
apply_empty (@typing_through_subst_ee V).
apply* (@typing_sub S2). apply* sym2.
apply_empty sub_weakening. auto. auto.
inversion Typ1. subst. apply Typ2. subst. eapply typing_sub. apply Typ2. auto.
auto.
(* case: tapp *)
-inversions Red; try solve [ apply* typing_tapp ].
destruct (typing_inv_tabs Typ (U:=T1) (m:=m) (T':=Tb)).
apply* refl. (* apply typing_regular in Typ.
apply refl with (E:=E). destruct Typ. destruct H1. auto. *)
apply typing_regular in Typ. destruct Typ.
destruct H3. inversion H5; subst. destruct H0.
pick_fresh X. forwards~ K : ( H0 X). destruct K.
rewrite* (@subst_te_intro X). rewrite* (@subst_tt_intro X).
replace E with (E & map (subst_tb X T2) empty).
replace (E & X!: m <>: T) with (E & X!: m <>: T & empty) in H6.
eapply typing_sub.
apply (typing_through_subst_te H6) .
destruct H7. destruct H8. eapply sub_transitivity.
apply H. apply sym2. apply H10.
apply* (@sub_through_subst_tt m Sub Tb E empty X).
destruct H7. rewrite concat_empty_r. auto.
apply* wft_subst_tb.
apply* concat_empty_r.
rewrite map_empty. apply* concat_empty_r.
(* case sub *)
- apply* typing_sub.
Qed.
(* ********************************************************************** *)
(** * Progress *)
(* ********************************************************************** *)
(** Canonical Forms (14) *)
Lemma canonical_form_abs : forall t U1 U2,
value t -> typing empty t (typ_arrow U1 U2) ->
exists V, exists e1, t = trm_abs V e1.
Proof.
introv Val Typ.
gen_eq T: (typ_arrow U1 U2). intro st.
assert (R empty Sub T (typ_arrow U1 U2)).
{ rewrite st.
apply refl with (E:=empty); auto. subst. apply typing_regular in Typ.
destruct Typ. destruct H0. auto. }
clear st. gen_eq E: (@empty typ). gen U1 U2.
induction Typ; introv EQT EQE;
try solve [ inversion Val | inversion EQT | eauto ].
subst. assert (R E0 Sub S (typ_arrow U1 U2)). {
eapply sub_transitivity. apply H. apply EQT. }
eapply IHTyp. apply Val. apply H1. reflexivity. Qed.
Lemma canonical_form_nat : forall t,
value t -> typing empty t typ_int ->
exists v, t = trm_nval v.
Proof.
introv Val Typ.
gen_eq T: typ_int. intro st. assert (R empty Sub T typ_int). { rewrite* st. }
clear st. gen_eq E: (@empty typ).
induction Typ; introv EQE;
try solve [ inversion Val | inversion EQE | eauto | inversion H ].
eapply IHTyp. apply Val. eapply sub_transitivity. apply H0. apply H. apply EQE. Qed.
Lemma tall_inv : forall E m m1 m2 T1 T2 M N,
R E m (typ_all m1 T1 M) (typ_all m2 T2 N) -> m1=m2 /\
R E (BindPosVariance m2 m) T1 T2.
Proof. introv TA. inductions TA.
- destruct m; inversion x.
- destruct m; inversion x.
- split; auto. Qed.
Lemma canonical_form_tabs : forall t T m U,
value t -> typing empty t (typ_all m T U) ->
exists T' e1, t = trm_tabs m T' e1 /\ R empty (BindPosVariance m Sub) T' T.
Proof.
introv Val Typ.
gen_eq Ty: (typ_all m T U). intro st.
assert (R empty Sub Ty (typ_all m T U)). { rewrite* st.
apply refl with (E:=empty); auto. subst. apply typing_regular in Typ. destruct Typ.
destruct H0. auto. }
clear st. gen_eq E: (@empty typ).
induction Typ; introv EQE;
try solve [ inversion Val | inversion EQE | eauto | inversion H ].
apply tall_inv in H.
exists Tk. exists e. split. destruct H. subst. auto.
destruct H. auto.
apply* IHTyp. eapply sub_transitivity.
apply H0. auto. Qed.
(* ********************************************************************** *)
(** Progress Result (16) *)
Lemma progress_result : progress.
Proof.
introv Typ. gen_eq E: (@empty typ). lets Typ': Typ. remember empty as Env.
induction Typ; intros EQ; subst.
left*.
(* case: var *)
false* binds_empty_inv.
(* case: nval *)
left*.
(* case: succ *)
right*.
destruct* IHTyp as [Val1 | R1].
destruct (canonical_form_nat Val1 Typ) as [ v ].
subst. exists (trm_nval (S v)). auto.
destruct R1. exists (trm_nsucc x). auto.
(* case : ind *)
right*.
{
assert (value t1 \/ (exists e', red t1 e')). { auto. }
assert (value t2 \/ (exists e', red t2 e')). { auto. }
assert (value t3 \/ (exists e', red t3 e')). { auto. }
clear IHTyp1 IHTyp2 IHTyp3. destruct H0; subst.
- destruct (canonical_form_nat H0 Typ1) as [ v ].
destruct H2.
+ subst. destruct v.
* exists t2. apply red_ind_izero; auto.
* exists (trm_app t3 (trm_nind (trm_nval v) t2 t3)).
apply red_ind_isucc. apply* typing_regular. auto.
+ destruct* H2.
- destruct* H0. }
(* case: abs *)
left*.
(* case: app *)
right. destruct* IHTyp1 as [Val1 | [e1' Rede1']].
destruct* IHTyp2 as [Val2 | [e2' Rede2']].
destruct (canonical_form_abs Val1 Typ1) as [S [e3 EQ]].
subst. exists* (open_ee e3 e2).
left*.
right. destruct* IHTyp as [Val | [e1 Red]].
destruct (canonical_form_tabs Val Typ).
destruct H0. destruct H0.
subst. exists* (open_te x0 T2).
autos*.
Qed.
|
State Before: Ξ± : Type u
Ξ² : Type v
Ξ³ : Type w
ΞΉ : Sort x
a b : Ξ±
s sβ sβ t tβ tβ u : Set Ξ±
h : a β s
β’ s β© insert a t = insert a (s β© t) State After: no goals Tactic: rw [insert_inter_distrib, insert_eq_of_mem h]
|
module Control.Effect.NonDet
import Control.EffectAlgebra
import Control.Monad.List
import Data.List1
||| Add non-determinism to a computation,
||| e.i. alternatives for program flow.
||| Choice between these alternatives is not directly specified and is
||| deferred to a particular handler.
public export
data ChoiceE : (Type -> Type) -> (Type -> Type) where
Choose : List (m a) -> ChoiceE m a
namespace Algebra
alg : Algebra sig m
=> (f : Functor ctx)
=> ctx ()
-> Handler ctx n (ListT m)
-> (ChoiceE :+: sig) n a
-> (ListT m) (ctx a)
alg ctxx hdl (Inl (Choose list)) =
go list
where
go : List (n a) -> ListT m (ctx a)
go [] = pure []
go (x :: xs) = (<+>) @{ListT} (hdl (x <$ ctxx)) (go xs)
alg ctxx hdl (Inr other) =
-- hdl : Handler ctx n (ListT m)
-- hdl' : Handler (List m) (ListT m) m
-- ? : Handler (List m . ctx) n m
EffectAlgebra.alg {f = Functor.Compose @{(ListM, %search)}}
{ctx = ListM m . ctx}
{m} (ctxx :: pure [])
((~<~) @{%search} @{Functor.ListM} { ctx1 = ListM m
, ctx2 = ctx
, l = n
, m = ListT m
, n = m} f hdl) other
where
f : Handler (ListM m) (ListT m) m
f = join @{Monad.ListT} . pure
||| Handle choice by accumulating all alternatives in a list transformer.
%hint export
Concat : (al : Algebra sig m) => Algebra (ChoiceE :+: sig) (ListT m)
Concat = MkAlgebra @{Monad.ListT} Algebra.alg
||| Introduce non-deterministic branching to a computation.
public export
oneOf : Inj ChoiceE sig
=> Algebra sig m
=> List a
-> m a
oneOf list =
send (Choose (map pure list))
||| Introduce non-deterministic branching to a computation.
public export
oneOfM : Inj ChoiceE sig
=> Algebra sig m
=> List (m a)
-> m a
oneOfM list =
send (Choose list)
|
<a href="https://colab.research.google.com/github/Ipsit1234/QML-HEP-Evaluation-Test-GSOC-2021/blob/main/QML_HEP_GSoC_2021_Task_2.ipynb" target="_parent"></a>
# Task II: Quantum Generative Adversarial Network (QGAN) Part
You will explore how best to apply a quantum generative adversarial network
(QGAN) to solve a High Energy Data analysis issue, more specifically, separating
the signal events from the background events. You should use the Google Cirq and
Tensorflow Quantum (TFQ) libraries for this task.
A set of input samples (simulated with Delphes) is provided in NumPy NPZ format
[Download Input](https://drive.google.com/file/d/1r_MZB_crfpij6r3SxPDeU_3JD6t6AxAj/view). In the input file, there are only 100 samples for training and 100
samples for testing so it wonβt take much computing resources to accomplish this
task. The signal events are labeled with 1 while the background events are labeled
with 0.
Be sure to show that you understand how to fine tune your machine learning model
to improve the performance. The performance can be evaluated with classification
accuracy or Area Under ROC Curve (AUC).
## Downloading the dataset
```python
!gdown --id 1r_MZB_crfpij6r3SxPDeU_3JD6t6AxAj -O events.npz
```
Downloading...
From: https://drive.google.com/uc?id=1r_MZB_crfpij6r3SxPDeU_3JD6t6AxAj
To: /content/events.npz
100% 9.14k/9.14k [00:00<00:00, 8.67MB/s]
## Setting up the required libraries
```python
!pip install -q tensorflow==2.3.1
!pip install -q tensorflow-quantum
import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
import seaborn as sns
from sklearn.metrics import roc_curve, auc
%matplotlib inline
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
```
## Loading the data
```python
data = np.load('./events.npz', allow_pickle=True)
training_input = data['training_input']
test_input = data['test_input']
```
```python
training_input
```
array({'0': array([[-0.43079088, 0.86834819, -0.92614721, -0.92662029, -0.56900862],
[ 0.33924198, 0.56155499, 0.93097459, -0.91631726, -0.54463516],
[-0.42888879, 0.87064961, -0.92782179, -0.77533991, -0.58329176],
[-0.43262871, 0.86128919, -0.92240878, -0.88048862, -0.49963115],
[-0.99925345, -0.99949586, 0.07753685, -0.84218034, -0.5149399 ],
[-0.99631106, -0.99775978, 0.0756427 , -0.54117216, -0.66299335],
[-0.42645921, 0.87141204, -0.92908723, -0.52650143, -0.62187526],
[ 0.34317906, 0.57125045, 0.92638556, -0.85113425, -0.40170562],
[-0.99904849, -0.99933931, 0.07737929, -0.81161066, -0.53550246],
[ 0.3371327 , 0.55874622, 0.92996976, -0.9117092 , -0.50996097],
[ 0.89649306, -0.95523176, -0.66298651, -0.71276678, -0.62698893],
[ 0.34293232, 0.56408047, 0.93448436, -0.88789589, -0.56154273],
[-0.43055876, 0.86615566, -0.92532229, -0.82531102, -0.61433506],
[ 0.33970589, 0.56676702, 0.92567667, -0.91562035, -0.5946945 ],
[-0.99924224, -0.99951208, 0.07752116, -0.8360764 , -0.56981171],
[-0.43099755, 0.86651251, -0.925269 , -0.86698757, -0.5334677 ],
[-0.99937446, -0.99960218, 0.07759084, -0.84990046, -0.57999577],
[-0.99889821, -0.99925173, 0.07726642, -0.78825187, -0.58779546],
[ 0.34950661, 0.58567909, 0.91615208, -0.55392065, -0.71591931],
[-0.9996095 , -0.99972522, 0.07776863, -0.87858433, -0.51991104],
[-0.99941236, -0.99961243, 0.07764686, -0.85816986, -0.53408948],
[-0.99900111, -0.99932936, 0.07736239, -0.80987373, -0.54108498],
[-0.99903613, -0.99944072, 0.0774002 , -0.82528366, -0.58735909],
[-0.99865334, -0.99912034, 0.07716685, -0.77627523, -0.53754642],
[-0.42986358, 0.86654897, -0.925718 , -0.73862815, -0.58674809],
[-0.42826609, 0.8738673 , -0.929425 , -0.83386636, -0.57230481],
[-0.4292864 , 0.86711 , -0.92616079, -0.67992738, -0.58893727],
[-0.99891246, -0.99935218, 0.07730719, -0.81056136, -0.58817068],
[-0.99943724, -0.99960331, 0.07761935, -0.84897875, -0.57251576],
[-0.43294759, 0.86021519, -0.92189498, -0.87279564, -0.59891923],
[-0.99916459, -0.99946953, 0.07745698, -0.82671955, -0.58793255],
[-0.99341325, -0.99601417, 0.07387947, -0.24695737, -0.73035246],
[-0.99991548, -0.99993058, 0.07794216, -0.90504651, -0.56418312],
[-0.99922291, -0.99953394, 0.07750004, -0.83864938, -0.5907458 ],
[-0.43169009, 0.8646533 , -0.92421266, -0.89228919, -0.52078182],
[-0.99880664, -0.99928854, 0.07727578, -0.80952183, -0.5345482 ],
[-0.42616344, 0.88033088, -0.93295403, -0.82686517, -0.55077716],
[-0.43139955, 0.86308588, -0.92365803, -0.79283965, -0.56396112],
[-0.42911949, 0.86960978, -0.92732509, -0.76203901, -0.59011644],
[-0.43195732, 0.86474019, -0.92418549, -0.92918364, -0.59731155],
[ 0.88467977, -0.95414347, -0.66293153, -0.82036316, -0.59800758],
[ 0.88789478, -0.95460466, -0.66579754, -0.74234352, -0.64920965],
[-0.99902527, -0.99935633, 0.07734482, -0.80286049, -0.61041081],
[ 0.91931151, -0.94505739, -0.67409081, -0.63394305, -0.62119434],
[-0.43082934, 0.86509194, -0.92477223, -0.78937098, -0.60215747],
[ 0.34116539, 0.5652242 , 0.93031814, -0.91565387, -0.59557556],
[-0.42622734, 0.88083524, -0.93317473, -0.86019392, -0.57646517],
[-0.99827896, -0.99902924, 0.07692876, -0.75539493, -0.62698962],
[-0.99820057, -0.99867978, 0.07680484, -0.70805718, -0.47476004],
[-0.43178015, 0.86266855, -0.92335757, -0.83674549, -0.5820067 ]]), '1': array([[-0.42298067, 0.88630865, -0.93661218, -0.64944313, -0.39193538],
[ 0.90999432, -0.94429141, -0.6746157 , -0.80518637, -0.53296538],
[-0.99909734, -0.99933762, 0.07749262, -0.83351171, -0.393053 ],
[ 0.35152705, 0.5794319 , 0.91806358, -0.00923369, -0.75412351],
[ 0.34399902, 0.57339474, 0.92616223, -0.91269157, -0.51149302],
[-0.42905645, 0.86662382, -0.92592506, -0.65770698, -0.38264457],
[ 0.88005236, -0.9599969 , -0.66609191, -0.8764421 , -0.58517572],
[-0.99793345, -0.99872791, 0.07670388, -0.71240287, -0.56441582],
[ 0.91213483, -0.91733005, -0.65528741, -0.67448585, -0.51199249],
[ 0.88551226, -0.94868142, -0.66252618, -0.90843762, -0.59915173],
[ 0.89150504, -0.94960047, -0.67068894, 0.15565222, -0.84421124],
[ 0.88909834, -0.94318762, -0.67148287, -0.84064276, -0.60912642],
[ 0.90255888, -0.92328057, -0.67519978, -0.57106936, -0.66341163],
[ 0.8976049 , -0.94774689, -0.66648593, -0.65464116, -0.52345396],
[-0.43030625, 0.86671232, -0.92539398, -0.82285501, -0.16938372],
[-0.42585346, 0.86764478, -0.9275629 , -0.32038269, -0.50555058],
[-0.43160184, 0.86429225, -0.92408762, -0.85714772, -0.54078138],
[ 0.34190452, 0.57019409, 0.9243911 , -0.8128156 , -0.58818964],
[ 0.89132992, -0.9483933 , -0.65619896, -0.88958426, -0.54497086],
[ 0.890744 , -0.95340342, -0.66924037, -0.79894801, -0.56738322],
[ 0.91361551, -0.94972251, -0.67585399, -0.50145697, -0.1911723 ],
[ 0.88935183, -0.94313315, -0.66904492, -0.79169152, -0.54132759],
[ 0.34476283, 0.57803763, 0.92061164, -0.80268017, -0.32524356],
[ 0.88926914, -0.95421273, -0.66842321, -0.68806207, -0.60578623],
[ 0.89505106, -0.9451298 , -0.67363669, -0.69723999, -0.66076491],
[ 0.362334 , 0.59572753, 0.93213198, -0.72362975, -0.59988639],
[-0.4288074 , 0.86967951, -0.92739944, -0.74532187, -0.46450815],
[ 0.88718043, -0.95091359, -0.65983102, -0.8940726 , -0.53211548],
[-0.99846294, -0.99900932, 0.07703519, -0.7582664 , -0.54351981],
[ 0.34178597, 0.56585585, 0.92943783, -0.85569345, -0.56113033],
[ 0.88296536, -0.95481506, -0.66270552, -0.78421127, -0.61179041],
[ 0.88426395, -0.95297205, -0.6626738 , -0.65294941, -0.46763447],
[-0.99590417, -0.99728851, 0.07527463, -0.45478035, -0.64937587],
[ 0.34028029, 0.56005082, 0.93145617, -0.76996587, -0.55614608],
[ 0.88938047, -0.95010936, -0.66373879, -0.80909527, -0.45277393],
[-0.99688379, -0.99798169, 0.07591516, -0.57297135, -0.64869519],
[ 0.90560361, -0.9344575 , -0.65804998, -0.7947352 , -0.58626245],
[ 0.3407186 , 0.5649425 , 0.92917793, -0.88543543, -0.45824451],
[ 0.34652975, 0.56915775, 0.93002691, -0.5463081 , -0.61838199],
[ 0.34864817, 0.58382871, 0.91784001, -0.62395809, -0.67006945],
[-0.4268417 , 0.87348543, -0.92980882, -0.61323903, -0.53234345],
[ 0.93378275, -0.85174182, -0.70941954, 0.05491566, -0.27530413],
[ 0.34210266, 0.56613037, 0.92717996, -0.69534422, -0.62937982],
[ 0.3507178 , 0.57835822, 0.93047705, -0.73815929, -0.53465596],
[ 0.35258416, 0.58849015, 0.92018786, -0.62128929, -0.58711745],
[ 0.92789668, -0.90635417, -0.64723127, -0.61636877, -0.534791 ],
[-0.43018391, 0.86569257, -0.92522163, -0.72360566, -0.50565552],
[ 0.3490436 , 0.5855566 , 0.91734464, -0.60993714, -0.58527924],
[ 0.34434515, 0.56629373, 0.93026591, -0.6060978 , -0.64930218],
[ 0.88125135, -0.95437964, -0.66664384, -0.78187561, -0.64345757]])},
dtype=object)
```python
def prepare_data(training_input, test_input):
x_train_0 = training_input.item()['0']
x_train_1 = training_input.item()['1']
x_test_0 = test_input.item()['0']
x_test_1 = test_input.item()['1']
x_train = np.zeros((len(x_train_0) + len(x_train_1), x_train_0.shape[1]), dtype=np.float32)
x_test = np.zeros((len(x_test_0) + len(x_test_1), x_test_0.shape[1]), dtype=np.float32)
y_train = np.zeros((len(x_train_0) + len(x_train_1),), dtype=np.int32)
y_test = np.zeros((len(x_test_0) + len(x_test_1),), dtype=np.int32)
x_train[:len(x_train_0), :] = x_train_0
x_train[len(x_train_0):, :] = x_train_1
y_train[:len(x_train_0)] = 0
y_train[len(x_train_0):] = 1
x_test[:len(x_test_0), :] = x_test_0
x_test[len(x_test_0):, :] = x_test_1
y_test[:len(x_test_0)] = 0
y_test[len(x_test_0):] = 1
idx1 = np.random.permutation(len(x_train))
idx2 = np.random.permutation(len(x_test))
x_train, y_train = x_train[idx1], y_train[idx1]
x_test, y_test = x_test[idx2], y_test[idx2]
print('Shape of the training set:', x_train.shape)
print('Shape of the test set:', x_test.shape)
return x_train, y_train, x_test, y_test
x_train, y_train, x_test, y_test = prepare_data(training_input, test_input)
```
Shape of the training set: (100, 5)
Shape of the test set: (100, 5)
## Approach
We will make use of a Quantum GAN in the following:
1. Train a GAN to produce samples that look like they came from quantum circuits.
2. Add a classification path to the discriminator and minimize both the minimax loss and classification loss.
3. We will use a random quantum circuit to generate random inputs for the generator. The intution behind this is that the data that was provided are the results (measurements) taken from some quantum experiment. So if we succeed in training a GAN which generates outputs similar to the experimental data, this will help in identifying new or other possible outcomes of the same quantum experiment which have been missed in the dataset provided.
4. Simultaneously training the discriminator to classify signal events and background events will help in identifying the signal events generated from the fully trained generator.
## Data Generation
As provided in the dataset, each datapoint is 5-dimensional. Hence we will use 5 qubits and pass them through a random quantum circuit and then use these measurements as inputs to the GAN
```python
def generate_circuit(qubits):
"""Generate a random circuit on qubits."""
random_circuit = cirq.generate_boixo_2018_supremacy_circuits_v2(qubits, cz_depth=2, seed=123242)
return random_circuit
def generate_data(circuit, n_samples):
"""Draw `n_samples` samples from circuit into a tf.Tensor."""
return tf.squeeze(tfq.layers.Sample()(circuit, repetitions=n_samples).to_tensor())
```
```python
# sample data and circuit structure
qubits = cirq.GridQubit.rect(1, 5)
random_circuit_m = generate_circuit(qubits) + cirq.measure_each(*qubits)
SVGCircuit(random_circuit_m)
```
findfont: Font family ['Arial'] not found. Falling back to DejaVu Sans.
```python
generate_data(random_circuit_m, 10)
```
<tf.Tensor: shape=(10, 5), dtype=int8, numpy=
array([[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0]], dtype=int8)>
We will generate 200 random training data
```python
N_SAMPLES = 200
N_QUBITS = 5
QUBITS = cirq.GridQubit.rect(1, N_QUBITS)
REFERENCE_CIRCUIT = generate_circuit(QUBITS)
random_data = generate_data(REFERENCE_CIRCUIT, N_SAMPLES)
random_data
```
<tf.Tensor: shape=(200, 5), dtype=int8, numpy=
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 1],
[0, 0, 1, 0, 1],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 1],
[0, 1, 0, 0, 1],
[0, 1, 0, 0, 1],
[0, 1, 0, 0, 1],
[0, 1, 0, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 0, 1, 1],
[0, 1, 0, 1, 1],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 1],
[0, 1, 1, 0, 1],
[0, 1, 1, 0, 1],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 1],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 1, 0],
[1, 0, 0, 1, 0],
[1, 0, 0, 1, 0],
[1, 0, 0, 1, 0],
[1, 0, 0, 1, 0],
[1, 0, 0, 1, 0],
[1, 0, 0, 1, 1],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 0, 1],
[1, 0, 1, 0, 1],
[1, 0, 1, 0, 1],
[1, 0, 1, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 1, 1, 1],
[1, 0, 1, 1, 1],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 0, 1, 0],
[1, 1, 0, 1, 0],
[1, 1, 0, 1, 0],
[1, 1, 0, 1, 0],
[1, 1, 0, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 1],
[1, 1, 1, 0, 1],
[1, 1, 1, 0, 1],
[1, 1, 1, 0, 1],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 0]], dtype=int8)>
## Building a Model
This GAN will be used to produce measurements corresponding to signal/background events.
```python
def make_generator():
"""Construct generator model."""
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(256, use_bias=False, input_shape=(N_QUBITS,), activation='elu'))
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Dense(N_QUBITS, activation=tf.keras.activations.tanh))
return model
def make_discriminator():
"""Construct discriminator model along with a classifier."""
inp = tf.keras.Input(shape=(N_QUBITS, ), dtype=tf.float32)
out = tf.keras.layers.Dense(256, use_bias=False, activation='elu')(inp)
out = tf.keras.layers.Dense(128, activation='relu')(out)
out = tf.keras.layers.Dropout(0.4)(out)
out = tf.keras.layers.Dense(64, activation='relu')(out)
out = tf.keras.layers.Dropout(0.3)(out)
classification = tf.keras.layers.Dense(2, activation='softmax')(out)
discrimination = tf.keras.layers.Dense(1, activation='sigmoid')(out)
model = tf.keras.Model(inputs=[inp], outputs=[discrimination, classification])
return model
```
Let us instantiate our models, define the losses and define the `train_step` function which will be executed in each epoch
```python
generator = make_generator()
discriminator = make_discriminator()
```
```python
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
"""Computes the discriminator loss."""
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
"""Compute the generator loss."""
return cross_entropy(tf.ones_like(fake_output), fake_output)
generator_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
```
```python
BATCH_SIZE = 16
bce = tf.keras.losses.BinaryCrossentropy(from_logits=False)
# auc = tf.keras.metrics.AUC()
@tf.function
def train_step(images, labels, noise):
"""Run train step on provided image batch."""
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_data = generator(noise, training=True)
real_output, real_preds = discriminator(images, training=True)
fake_output, fake_preds = discriminator(generated_data, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
disc_loss = disc_loss + bce(tf.one_hot(tf.squeeze(labels), depth=2), real_preds)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
# auc.update_state(tf.one_hot(tf.squeeze(labels), depth=2), real_preds)
return gen_loss, disc_loss
```
```python
def train(data, labels, noise, epochs):
"""Launch full training for the given number of epochs."""
batched_data = tf.data.Dataset.from_tensor_slices(data).batch(BATCH_SIZE)
batched_labels = tf.data.Dataset.from_tensor_slices(labels).batch(BATCH_SIZE)
batched_noise = tf.data.Dataset.from_tensor_slices(noise).batch(BATCH_SIZE)
AUC = tf.keras.metrics.AUC()
g_losses = []
d_losses = []
# aucs = []
for epoch in range(epochs):
g_epoch_losses = []
d_epoch_losses = []
# aucs_epoch = []
for i, (data_batch, labels_batch, noise_batch) in enumerate(zip(batched_data, batched_labels, batched_noise)):
gl, dl = train_step(data_batch, labels_batch, noise_batch)
g_epoch_losses.append(gl)
d_epoch_losses.append(dl)
# aucs_epoch.append(auc_roc)
g_losses.append(tf.reduce_mean(g_epoch_losses))
d_losses.append(tf.reduce_mean(d_epoch_losses))
print('Epoch: {}, Generator Loss: {}, Discriminator Loss: {}'.format(epoch, tf.reduce_mean(g_epoch_losses), tf.reduce_mean(d_epoch_losses)))
# aucs.append(tf.reduce_mean(aucs_epoch))
return g_losses, d_losses
```
```python
gen_losses, disc_losses = train(x_train, y_train, random_data, 2000)
```
Epoch: 0, Generator Loss: 0.47288545966148376, Discriminator Loss: 2.1426262855529785
Epoch: 1, Generator Loss: 0.47159355878829956, Discriminator Loss: 2.1388280391693115
Epoch: 2, Generator Loss: 0.470787912607193, Discriminator Loss: 2.0825936794281006
Epoch: 3, Generator Loss: 0.4702128767967224, Discriminator Loss: 2.075483798980713
Epoch: 4, Generator Loss: 0.47020336985588074, Discriminator Loss: 2.0505168437957764
Epoch: 5, Generator Loss: 0.4700762927532196, Discriminator Loss: 2.022904872894287
Epoch: 6, Generator Loss: 0.46946173906326294, Discriminator Loss: 2.0530457496643066
Epoch: 7, Generator Loss: 0.4677629768848419, Discriminator Loss: 2.0332977771759033
Epoch: 8, Generator Loss: 0.4693600833415985, Discriminator Loss: 2.048710346221924
Epoch: 9, Generator Loss: 0.4706823527812958, Discriminator Loss: 2.019632339477539
Epoch: 10, Generator Loss: 0.4711288809776306, Discriminator Loss: 2.0153427124023438
Epoch: 11, Generator Loss: 0.47589582204818726, Discriminator Loss: 2.0159380435943604
Epoch: 12, Generator Loss: 0.4789740741252899, Discriminator Loss: 2.0038421154022217
Epoch: 13, Generator Loss: 0.4811742901802063, Discriminator Loss: 1.968962550163269
Epoch: 14, Generator Loss: 0.48366862535476685, Discriminator Loss: 1.984475016593933
Epoch: 15, Generator Loss: 0.4791991710662842, Discriminator Loss: 2.003448009490967
Epoch: 16, Generator Loss: 0.4744584262371063, Discriminator Loss: 1.9951093196868896
Epoch: 17, Generator Loss: 0.4726622998714447, Discriminator Loss: 1.9844080209732056
Epoch: 18, Generator Loss: 0.4746064245700836, Discriminator Loss: 1.9852495193481445
Epoch: 19, Generator Loss: 0.4767344892024994, Discriminator Loss: 1.9912365674972534
Epoch: 20, Generator Loss: 0.47933536767959595, Discriminator Loss: 1.9573673009872437
Epoch: 21, Generator Loss: 0.4778843820095062, Discriminator Loss: 1.997721791267395
Epoch: 22, Generator Loss: 0.473391056060791, Discriminator Loss: 1.95980966091156
Epoch: 23, Generator Loss: 0.4726634919643402, Discriminator Loss: 1.9842766523361206
Epoch: 24, Generator Loss: 0.4760975241661072, Discriminator Loss: 1.9720042943954468
Epoch: 25, Generator Loss: 0.48795565962791443, Discriminator Loss: 1.9472404718399048
Epoch: 26, Generator Loss: 0.4994671046733856, Discriminator Loss: 1.9453903436660767
Epoch: 27, Generator Loss: 0.5054343342781067, Discriminator Loss: 1.9152615070343018
Epoch: 28, Generator Loss: 0.508525550365448, Discriminator Loss: 1.9260485172271729
Epoch: 29, Generator Loss: 0.5018197894096375, Discriminator Loss: 1.9515644311904907
Epoch: 30, Generator Loss: 0.48237642645835876, Discriminator Loss: 1.9889060258865356
Epoch: 31, Generator Loss: 0.47576358914375305, Discriminator Loss: 1.9954891204833984
Epoch: 32, Generator Loss: 0.47649702429771423, Discriminator Loss: 1.9981650114059448
Epoch: 33, Generator Loss: 0.47945308685302734, Discriminator Loss: 2.0044641494750977
Epoch: 34, Generator Loss: 0.48640182614326477, Discriminator Loss: 1.9839919805526733
Epoch: 35, Generator Loss: 0.5043705105781555, Discriminator Loss: 1.964692234992981
Epoch: 36, Generator Loss: 0.5061931610107422, Discriminator Loss: 1.9607325792312622
Epoch: 37, Generator Loss: 0.5090411305427551, Discriminator Loss: 1.9640862941741943
Epoch: 38, Generator Loss: 0.49888092279434204, Discriminator Loss: 2.0025503635406494
Epoch: 39, Generator Loss: 0.49890926480293274, Discriminator Loss: 1.9784700870513916
Epoch: 40, Generator Loss: 0.4987829327583313, Discriminator Loss: 1.9651585817337036
Epoch: 41, Generator Loss: 0.5023303627967834, Discriminator Loss: 1.9435651302337646
Epoch: 42, Generator Loss: 0.5030931830406189, Discriminator Loss: 1.970147967338562
Epoch: 43, Generator Loss: 0.5103368163108826, Discriminator Loss: 1.948770523071289
Epoch: 44, Generator Loss: 0.5141062140464783, Discriminator Loss: 1.9382458925247192
Epoch: 45, Generator Loss: 0.5192903280258179, Discriminator Loss: 1.923143982887268
Epoch: 46, Generator Loss: 0.5226637125015259, Discriminator Loss: 1.9440218210220337
Epoch: 47, Generator Loss: 0.5246140956878662, Discriminator Loss: 1.9339735507965088
Epoch: 48, Generator Loss: 0.5278750658035278, Discriminator Loss: 1.9330395460128784
Epoch: 49, Generator Loss: 0.5215190052986145, Discriminator Loss: 1.9442846775054932
Epoch: 50, Generator Loss: 0.5162323117256165, Discriminator Loss: 1.9721218347549438
Epoch: 51, Generator Loss: 0.5048933625221252, Discriminator Loss: 1.9678698778152466
Epoch: 52, Generator Loss: 0.509625256061554, Discriminator Loss: 1.9625767469406128
Epoch: 53, Generator Loss: 0.5198760628700256, Discriminator Loss: 1.9369432926177979
Epoch: 54, Generator Loss: 0.5260513424873352, Discriminator Loss: 1.9441606998443604
Epoch: 55, Generator Loss: 0.5376901626586914, Discriminator Loss: 1.936803936958313
Epoch: 56, Generator Loss: 0.5560902953147888, Discriminator Loss: 1.902709722518921
Epoch: 57, Generator Loss: 0.5605687499046326, Discriminator Loss: 1.8947279453277588
Epoch: 58, Generator Loss: 0.5672340393066406, Discriminator Loss: 1.898500680923462
Epoch: 59, Generator Loss: 0.5689089894294739, Discriminator Loss: 1.871029019355774
Epoch: 60, Generator Loss: 0.5697230696678162, Discriminator Loss: 1.8731046915054321
Epoch: 61, Generator Loss: 0.5583624839782715, Discriminator Loss: 1.8996940851211548
Epoch: 62, Generator Loss: 0.5345408320426941, Discriminator Loss: 1.9253835678100586
Epoch: 63, Generator Loss: 0.5130673050880432, Discriminator Loss: 1.9621416330337524
Epoch: 64, Generator Loss: 0.5060693621635437, Discriminator Loss: 1.965281367301941
Epoch: 65, Generator Loss: 0.523578941822052, Discriminator Loss: 1.9250215291976929
Epoch: 66, Generator Loss: 0.5341764688491821, Discriminator Loss: 1.9169458150863647
Epoch: 67, Generator Loss: 0.5437857508659363, Discriminator Loss: 1.8830159902572632
Epoch: 68, Generator Loss: 0.5410133004188538, Discriminator Loss: 1.9133960008621216
Epoch: 69, Generator Loss: 0.535211443901062, Discriminator Loss: 1.8671048879623413
Epoch: 70, Generator Loss: 0.5227625966072083, Discriminator Loss: 1.9147918224334717
Epoch: 71, Generator Loss: 0.5165602564811707, Discriminator Loss: 1.9021399021148682
Epoch: 72, Generator Loss: 0.5219352841377258, Discriminator Loss: 1.8909399509429932
Epoch: 73, Generator Loss: 0.5299494862556458, Discriminator Loss: 1.913786768913269
Epoch: 74, Generator Loss: 0.5370683073997498, Discriminator Loss: 1.8993290662765503
Epoch: 75, Generator Loss: 0.5543458461761475, Discriminator Loss: 1.886603593826294
Epoch: 76, Generator Loss: 0.5559374690055847, Discriminator Loss: 1.8833478689193726
Epoch: 77, Generator Loss: 0.5516044497489929, Discriminator Loss: 1.88387930393219
Epoch: 78, Generator Loss: 0.5634049773216248, Discriminator Loss: 1.8520065546035767
Epoch: 79, Generator Loss: 0.5627610087394714, Discriminator Loss: 1.8465543985366821
Epoch: 80, Generator Loss: 0.5510416030883789, Discriminator Loss: 1.8662376403808594
Epoch: 81, Generator Loss: 0.5503302216529846, Discriminator Loss: 1.915558099746704
Epoch: 82, Generator Loss: 0.5445582866668701, Discriminator Loss: 1.9156755208969116
Epoch: 83, Generator Loss: 0.5506776571273804, Discriminator Loss: 1.876555323600769
Epoch: 84, Generator Loss: 0.5673772692680359, Discriminator Loss: 1.8943994045257568
Epoch: 85, Generator Loss: 0.5826940536499023, Discriminator Loss: 1.8510185480117798
Epoch: 86, Generator Loss: 0.5903192758560181, Discriminator Loss: 1.828652262687683
Epoch: 87, Generator Loss: 0.5916497111320496, Discriminator Loss: 1.8171485662460327
Epoch: 88, Generator Loss: 0.5896720886230469, Discriminator Loss: 1.7961512804031372
Epoch: 89, Generator Loss: 0.5867503881454468, Discriminator Loss: 1.8081086874008179
Epoch: 90, Generator Loss: 0.561775803565979, Discriminator Loss: 1.8444185256958008
Epoch: 91, Generator Loss: 0.552556037902832, Discriminator Loss: 1.8795503377914429
Epoch: 92, Generator Loss: 0.544338583946228, Discriminator Loss: 1.8654581308364868
Epoch: 93, Generator Loss: 0.5510985255241394, Discriminator Loss: 1.845579981803894
Epoch: 94, Generator Loss: 0.5495144724845886, Discriminator Loss: 1.839232087135315
Epoch: 95, Generator Loss: 0.5556743741035461, Discriminator Loss: 1.8285958766937256
Epoch: 96, Generator Loss: 0.5433610081672668, Discriminator Loss: 1.8459646701812744
Epoch: 97, Generator Loss: 0.5369371771812439, Discriminator Loss: 1.8679707050323486
Epoch: 98, Generator Loss: 0.5460633635520935, Discriminator Loss: 1.8344632387161255
Epoch: 99, Generator Loss: 0.5498455762863159, Discriminator Loss: 1.873538851737976
Epoch: 100, Generator Loss: 0.5569822788238525, Discriminator Loss: 1.8296430110931396
Epoch: 101, Generator Loss: 0.5649753212928772, Discriminator Loss: 1.8473527431488037
Epoch: 102, Generator Loss: 0.5755732655525208, Discriminator Loss: 1.8351280689239502
Epoch: 103, Generator Loss: 0.5833231210708618, Discriminator Loss: 1.8038426637649536
Epoch: 104, Generator Loss: 0.5890332460403442, Discriminator Loss: 1.8082283735275269
Epoch: 105, Generator Loss: 0.5832279920578003, Discriminator Loss: 1.8057630062103271
Epoch: 106, Generator Loss: 0.5815386176109314, Discriminator Loss: 1.821812391281128
Epoch: 107, Generator Loss: 0.5709091424942017, Discriminator Loss: 1.8217051029205322
Epoch: 108, Generator Loss: 0.5474334359169006, Discriminator Loss: 1.8874613046646118
Epoch: 109, Generator Loss: 0.5683592557907104, Discriminator Loss: 1.8294686079025269
Epoch: 110, Generator Loss: 0.5851566195487976, Discriminator Loss: 1.862564206123352
Epoch: 111, Generator Loss: 0.5988563299179077, Discriminator Loss: 1.8206329345703125
Epoch: 112, Generator Loss: 0.6115578413009644, Discriminator Loss: 1.773983359336853
Epoch: 113, Generator Loss: 0.6199137568473816, Discriminator Loss: 1.8008215427398682
Epoch: 114, Generator Loss: 0.6248680949211121, Discriminator Loss: 1.8012686967849731
Epoch: 115, Generator Loss: 0.6262296438217163, Discriminator Loss: 1.783456563949585
Epoch: 116, Generator Loss: 0.6282876133918762, Discriminator Loss: 1.7769858837127686
Epoch: 117, Generator Loss: 0.6285579800605774, Discriminator Loss: 1.7650648355484009
Epoch: 118, Generator Loss: 0.6294587254524231, Discriminator Loss: 1.7778254747390747
Epoch: 119, Generator Loss: 0.6305748820304871, Discriminator Loss: 1.7778427600860596
Epoch: 120, Generator Loss: 0.6291638612747192, Discriminator Loss: 1.775632619857788
Epoch: 121, Generator Loss: 0.6280948519706726, Discriminator Loss: 1.7537888288497925
Epoch: 122, Generator Loss: 0.6300249099731445, Discriminator Loss: 1.7844229936599731
Epoch: 123, Generator Loss: 0.6283208727836609, Discriminator Loss: 1.754599928855896
Epoch: 124, Generator Loss: 0.6298123002052307, Discriminator Loss: 1.7848894596099854
Epoch: 125, Generator Loss: 0.6309837698936462, Discriminator Loss: 1.7474391460418701
Epoch: 126, Generator Loss: 0.6320422887802124, Discriminator Loss: 1.7583450078964233
Epoch: 127, Generator Loss: 0.6400904059410095, Discriminator Loss: 1.7694214582443237
Epoch: 128, Generator Loss: 0.6324629187583923, Discriminator Loss: 1.7774016857147217
Epoch: 129, Generator Loss: 0.6124916076660156, Discriminator Loss: 1.8186702728271484
Epoch: 130, Generator Loss: 0.5734853148460388, Discriminator Loss: 1.837594747543335
Epoch: 131, Generator Loss: 0.5636682510375977, Discriminator Loss: 1.887058973312378
Epoch: 132, Generator Loss: 0.5782691240310669, Discriminator Loss: 1.8639087677001953
Epoch: 133, Generator Loss: 0.5855530500411987, Discriminator Loss: 1.8551642894744873
Epoch: 134, Generator Loss: 0.5991559624671936, Discriminator Loss: 1.8197938203811646
Epoch: 135, Generator Loss: 0.6184874773025513, Discriminator Loss: 1.8181065320968628
Epoch: 136, Generator Loss: 0.6258505582809448, Discriminator Loss: 1.8089030981063843
Epoch: 137, Generator Loss: 0.6345445513725281, Discriminator Loss: 1.8162752389907837
Epoch: 138, Generator Loss: 0.6385933756828308, Discriminator Loss: 1.8260458707809448
Epoch: 139, Generator Loss: 0.6473274230957031, Discriminator Loss: 1.7981773614883423
Epoch: 140, Generator Loss: 0.6472809910774231, Discriminator Loss: 1.7697031497955322
Epoch: 141, Generator Loss: 0.650701642036438, Discriminator Loss: 1.743865728378296
Epoch: 142, Generator Loss: 0.6521008610725403, Discriminator Loss: 1.7176873683929443
Epoch: 143, Generator Loss: 0.6444106698036194, Discriminator Loss: 1.7693579196929932
Epoch: 144, Generator Loss: 0.641905665397644, Discriminator Loss: 1.7705204486846924
Epoch: 145, Generator Loss: 0.6374165415763855, Discriminator Loss: 1.8058663606643677
Epoch: 146, Generator Loss: 0.6235266923904419, Discriminator Loss: 1.7381870746612549
Epoch: 147, Generator Loss: 0.5892828702926636, Discriminator Loss: 1.852325439453125
Epoch: 148, Generator Loss: 0.5753353834152222, Discriminator Loss: 1.8449815511703491
Epoch: 149, Generator Loss: 0.6026539206504822, Discriminator Loss: 1.7581708431243896
Epoch: 150, Generator Loss: 0.6236670613288879, Discriminator Loss: 1.760986089706421
Epoch: 151, Generator Loss: 0.6250039339065552, Discriminator Loss: 1.768956184387207
Epoch: 152, Generator Loss: 0.6256861090660095, Discriminator Loss: 1.7794551849365234
Epoch: 153, Generator Loss: 0.6346012949943542, Discriminator Loss: 1.760243535041809
Epoch: 154, Generator Loss: 0.628298819065094, Discriminator Loss: 1.749287724494934
Epoch: 155, Generator Loss: 0.6368821859359741, Discriminator Loss: 1.7152551412582397
Epoch: 156, Generator Loss: 0.6314118504524231, Discriminator Loss: 1.7351112365722656
Epoch: 157, Generator Loss: 0.6265581250190735, Discriminator Loss: 1.736602544784546
Epoch: 158, Generator Loss: 0.6100559234619141, Discriminator Loss: 1.7446779012680054
Epoch: 159, Generator Loss: 0.5922154188156128, Discriminator Loss: 1.7559831142425537
Epoch: 160, Generator Loss: 0.5712250471115112, Discriminator Loss: 1.8211967945098877
Epoch: 161, Generator Loss: 0.5644485354423523, Discriminator Loss: 1.8665589094161987
Epoch: 162, Generator Loss: 0.6024867296218872, Discriminator Loss: 1.8104267120361328
Epoch: 163, Generator Loss: 0.6274945139884949, Discriminator Loss: 1.7998268604278564
Epoch: 164, Generator Loss: 0.643936038017273, Discriminator Loss: 1.7096644639968872
Epoch: 165, Generator Loss: 0.6404080986976624, Discriminator Loss: 1.716945767402649
Epoch: 166, Generator Loss: 0.6532421708106995, Discriminator Loss: 1.7037891149520874
Epoch: 167, Generator Loss: 0.6447615027427673, Discriminator Loss: 1.7099530696868896
Epoch: 168, Generator Loss: 0.6475247740745544, Discriminator Loss: 1.6623265743255615
Epoch: 169, Generator Loss: 0.6348186731338501, Discriminator Loss: 1.6947166919708252
Epoch: 170, Generator Loss: 0.6414856314659119, Discriminator Loss: 1.6926909685134888
Epoch: 171, Generator Loss: 0.6314753890037537, Discriminator Loss: 1.660185694694519
Epoch: 172, Generator Loss: 0.6261246800422668, Discriminator Loss: 1.7072498798370361
Epoch: 173, Generator Loss: 0.6210373640060425, Discriminator Loss: 1.675012469291687
Epoch: 174, Generator Loss: 0.5940043330192566, Discriminator Loss: 1.7331515550613403
Epoch: 175, Generator Loss: 0.5574861168861389, Discriminator Loss: 1.7930545806884766
Epoch: 176, Generator Loss: 0.542355477809906, Discriminator Loss: 1.8148338794708252
Epoch: 177, Generator Loss: 0.5822253823280334, Discriminator Loss: 1.8181102275848389
Epoch: 178, Generator Loss: 0.5935879349708557, Discriminator Loss: 1.8004159927368164
Epoch: 179, Generator Loss: 0.6095434427261353, Discriminator Loss: 1.7807486057281494
Epoch: 180, Generator Loss: 0.6232584714889526, Discriminator Loss: 1.7791073322296143
Epoch: 181, Generator Loss: 0.6337913274765015, Discriminator Loss: 1.7705801725387573
Epoch: 182, Generator Loss: 0.6357728242874146, Discriminator Loss: 1.7532390356063843
Epoch: 183, Generator Loss: 0.6398280262947083, Discriminator Loss: 1.7407302856445312
Epoch: 184, Generator Loss: 0.634765625, Discriminator Loss: 1.7535192966461182
Epoch: 185, Generator Loss: 0.63277667760849, Discriminator Loss: 1.7463499307632446
Epoch: 186, Generator Loss: 0.6328725218772888, Discriminator Loss: 1.7686328887939453
Epoch: 187, Generator Loss: 0.6205809712409973, Discriminator Loss: 1.7650877237319946
Epoch: 188, Generator Loss: 0.5941453576087952, Discriminator Loss: 1.8217060565948486
Epoch: 189, Generator Loss: 0.6100953817367554, Discriminator Loss: 1.7752925157546997
Epoch: 190, Generator Loss: 0.6226822733879089, Discriminator Loss: 1.7728627920150757
Epoch: 191, Generator Loss: 0.6314853429794312, Discriminator Loss: 1.7534593343734741
Epoch: 192, Generator Loss: 0.637579083442688, Discriminator Loss: 1.7179620265960693
Epoch: 193, Generator Loss: 0.6405450105667114, Discriminator Loss: 1.725502848625183
Epoch: 194, Generator Loss: 0.6397368311882019, Discriminator Loss: 1.6983562707901
Epoch: 195, Generator Loss: 0.6388319730758667, Discriminator Loss: 1.7154982089996338
Epoch: 196, Generator Loss: 0.6341049075126648, Discriminator Loss: 1.728189468383789
Epoch: 197, Generator Loss: 0.6432934403419495, Discriminator Loss: 1.7179791927337646
Epoch: 198, Generator Loss: 0.6484194993972778, Discriminator Loss: 1.6843165159225464
Epoch: 199, Generator Loss: 0.6386139988899231, Discriminator Loss: 1.6859838962554932
Epoch: 200, Generator Loss: 0.6301169991493225, Discriminator Loss: 1.682997703552246
Epoch: 201, Generator Loss: 0.6347073912620544, Discriminator Loss: 1.6391927003860474
Epoch: 202, Generator Loss: 0.629494309425354, Discriminator Loss: 1.6566816568374634
Epoch: 203, Generator Loss: 0.615505039691925, Discriminator Loss: 1.699209213256836
Epoch: 204, Generator Loss: 0.6223717927932739, Discriminator Loss: 1.6770910024642944
Epoch: 205, Generator Loss: 0.6162890195846558, Discriminator Loss: 1.6603543758392334
Epoch: 206, Generator Loss: 0.6068199872970581, Discriminator Loss: 1.7019388675689697
Epoch: 207, Generator Loss: 0.5835185050964355, Discriminator Loss: 1.732129454612732
Epoch: 208, Generator Loss: 0.5958709716796875, Discriminator Loss: 1.735878825187683
Epoch: 209, Generator Loss: 0.5849490761756897, Discriminator Loss: 1.7640939950942993
Epoch: 210, Generator Loss: 0.5637327432632446, Discriminator Loss: 1.8124845027923584
Epoch: 211, Generator Loss: 0.577876627445221, Discriminator Loss: 1.8146675825119019
Epoch: 212, Generator Loss: 0.5868808031082153, Discriminator Loss: 1.812463402748108
Epoch: 213, Generator Loss: 0.605700671672821, Discriminator Loss: 1.7507508993148804
Epoch: 214, Generator Loss: 0.6198753714561462, Discriminator Loss: 1.7286328077316284
Epoch: 215, Generator Loss: 0.6174247860908508, Discriminator Loss: 1.7417566776275635
Epoch: 216, Generator Loss: 0.613113284111023, Discriminator Loss: 1.765594244003296
Epoch: 217, Generator Loss: 0.6264870762825012, Discriminator Loss: 1.6827411651611328
Epoch: 218, Generator Loss: 0.6183872222900391, Discriminator Loss: 1.7059530019760132
Epoch: 219, Generator Loss: 0.6173415184020996, Discriminator Loss: 1.7229164838790894
Epoch: 220, Generator Loss: 0.6050001978874207, Discriminator Loss: 1.7366943359375
Epoch: 221, Generator Loss: 0.6172733306884766, Discriminator Loss: 1.674242377281189
Epoch: 222, Generator Loss: 0.6176896691322327, Discriminator Loss: 1.7133352756500244
Epoch: 223, Generator Loss: 0.610809862613678, Discriminator Loss: 1.6950114965438843
Epoch: 224, Generator Loss: 0.6104924082756042, Discriminator Loss: 1.75283682346344
Epoch: 225, Generator Loss: 0.6131807565689087, Discriminator Loss: 1.7120122909545898
Epoch: 226, Generator Loss: 0.6219436526298523, Discriminator Loss: 1.7476533651351929
Epoch: 227, Generator Loss: 0.6235187649726868, Discriminator Loss: 1.7707799673080444
Epoch: 228, Generator Loss: 0.6321061253547668, Discriminator Loss: 1.6742284297943115
Epoch: 229, Generator Loss: 0.6126621961593628, Discriminator Loss: 1.7203556299209595
Epoch: 230, Generator Loss: 0.6216983199119568, Discriminator Loss: 1.6999882459640503
Epoch: 231, Generator Loss: 0.6149224042892456, Discriminator Loss: 1.741202473640442
Epoch: 232, Generator Loss: 0.626215398311615, Discriminator Loss: 1.75324285030365
Epoch: 233, Generator Loss: 0.6238604187965393, Discriminator Loss: 1.7004338502883911
Epoch: 234, Generator Loss: 0.624783456325531, Discriminator Loss: 1.709208369255066
Epoch: 235, Generator Loss: 0.6322792768478394, Discriminator Loss: 1.7244776487350464
Epoch: 236, Generator Loss: 0.6291399002075195, Discriminator Loss: 1.703481912612915
Epoch: 237, Generator Loss: 0.6249092817306519, Discriminator Loss: 1.7036558389663696
Epoch: 238, Generator Loss: 0.6288641691207886, Discriminator Loss: 1.6899945735931396
Epoch: 239, Generator Loss: 0.6295877695083618, Discriminator Loss: 1.7107700109481812
Epoch: 240, Generator Loss: 0.6331409811973572, Discriminator Loss: 1.693745493888855
Epoch: 241, Generator Loss: 0.6244977712631226, Discriminator Loss: 1.7336817979812622
Epoch: 242, Generator Loss: 0.6192163825035095, Discriminator Loss: 1.7143076658248901
Epoch: 243, Generator Loss: 0.618996262550354, Discriminator Loss: 1.745056390762329
Epoch: 244, Generator Loss: 0.633124053478241, Discriminator Loss: 1.6888076066970825
Epoch: 245, Generator Loss: 0.6242901682853699, Discriminator Loss: 1.730173110961914
Epoch: 246, Generator Loss: 0.6301984190940857, Discriminator Loss: 1.7000253200531006
Epoch: 247, Generator Loss: 0.6281982660293579, Discriminator Loss: 1.7466261386871338
Epoch: 248, Generator Loss: 0.6365850567817688, Discriminator Loss: 1.7463300228118896
Epoch: 249, Generator Loss: 0.6325564980506897, Discriminator Loss: 1.7576972246170044
Epoch: 250, Generator Loss: 0.6371110081672668, Discriminator Loss: 1.707656979560852
Epoch: 251, Generator Loss: 0.6365411877632141, Discriminator Loss: 1.7142974138259888
Epoch: 252, Generator Loss: 0.6340939402580261, Discriminator Loss: 1.695809245109558
Epoch: 253, Generator Loss: 0.6294987797737122, Discriminator Loss: 1.746159553527832
Epoch: 254, Generator Loss: 0.6255390048027039, Discriminator Loss: 1.676321268081665
Epoch: 255, Generator Loss: 0.6201111078262329, Discriminator Loss: 1.729650855064392
Epoch: 256, Generator Loss: 0.6224362254142761, Discriminator Loss: 1.6679507493972778
Epoch: 257, Generator Loss: 0.6230950951576233, Discriminator Loss: 1.7347079515457153
Epoch: 258, Generator Loss: 0.6113643646240234, Discriminator Loss: 1.7098867893218994
Epoch: 259, Generator Loss: 0.6205706000328064, Discriminator Loss: 1.7021971940994263
Epoch: 260, Generator Loss: 0.6246237754821777, Discriminator Loss: 1.7006195783615112
Epoch: 261, Generator Loss: 0.6167707443237305, Discriminator Loss: 1.7234348058700562
Epoch: 262, Generator Loss: 0.6311796307563782, Discriminator Loss: 1.7174619436264038
Epoch: 263, Generator Loss: 0.6179898381233215, Discriminator Loss: 1.7127645015716553
Epoch: 264, Generator Loss: 0.6204423308372498, Discriminator Loss: 1.7028361558914185
Epoch: 265, Generator Loss: 0.6133394837379456, Discriminator Loss: 1.6926206350326538
Epoch: 266, Generator Loss: 0.6211324334144592, Discriminator Loss: 1.7179358005523682
Epoch: 267, Generator Loss: 0.625482976436615, Discriminator Loss: 1.6775175333023071
Epoch: 268, Generator Loss: 0.6245230436325073, Discriminator Loss: 1.7046822309494019
Epoch: 269, Generator Loss: 0.6272510290145874, Discriminator Loss: 1.722633957862854
Epoch: 270, Generator Loss: 0.6352478265762329, Discriminator Loss: 1.7091001272201538
Epoch: 271, Generator Loss: 0.63001948595047, Discriminator Loss: 1.687362790107727
Epoch: 272, Generator Loss: 0.6199166178703308, Discriminator Loss: 1.7096284627914429
Epoch: 273, Generator Loss: 0.6292887926101685, Discriminator Loss: 1.7140878438949585
Epoch: 274, Generator Loss: 0.6361770033836365, Discriminator Loss: 1.681007981300354
Epoch: 275, Generator Loss: 0.6291784644126892, Discriminator Loss: 1.7483857870101929
Epoch: 276, Generator Loss: 0.6315180659294128, Discriminator Loss: 1.7215346097946167
Epoch: 277, Generator Loss: 0.6314154863357544, Discriminator Loss: 1.7012547254562378
Epoch: 278, Generator Loss: 0.63746577501297, Discriminator Loss: 1.7148911952972412
Epoch: 279, Generator Loss: 0.6352987885475159, Discriminator Loss: 1.698099136352539
Epoch: 280, Generator Loss: 0.6360382437705994, Discriminator Loss: 1.6983792781829834
Epoch: 281, Generator Loss: 0.6355541348457336, Discriminator Loss: 1.6732347011566162
Epoch: 282, Generator Loss: 0.6315398812294006, Discriminator Loss: 1.6969462633132935
Epoch: 283, Generator Loss: 0.6267959475517273, Discriminator Loss: 1.7253066301345825
Epoch: 284, Generator Loss: 0.6349143981933594, Discriminator Loss: 1.6942905187606812
Epoch: 285, Generator Loss: 0.6350799202919006, Discriminator Loss: 1.7513391971588135
Epoch: 286, Generator Loss: 0.6315780282020569, Discriminator Loss: 1.7159690856933594
Epoch: 287, Generator Loss: 0.6353291273117065, Discriminator Loss: 1.6938655376434326
Epoch: 288, Generator Loss: 0.6284250617027283, Discriminator Loss: 1.732704997062683
Epoch: 289, Generator Loss: 0.627655029296875, Discriminator Loss: 1.728252649307251
Epoch: 290, Generator Loss: 0.6375523805618286, Discriminator Loss: 1.6391680240631104
Epoch: 291, Generator Loss: 0.6345260739326477, Discriminator Loss: 1.6660269498825073
Epoch: 292, Generator Loss: 0.6323086023330688, Discriminator Loss: 1.73190438747406
Epoch: 293, Generator Loss: 0.6317147016525269, Discriminator Loss: 1.7209004163742065
Epoch: 294, Generator Loss: 0.6281710863113403, Discriminator Loss: 1.6950528621673584
Epoch: 295, Generator Loss: 0.6317554116249084, Discriminator Loss: 1.6676512956619263
Epoch: 296, Generator Loss: 0.6337504386901855, Discriminator Loss: 1.718414068222046
Epoch: 297, Generator Loss: 0.6366174817085266, Discriminator Loss: 1.7014961242675781
Epoch: 298, Generator Loss: 0.64112389087677, Discriminator Loss: 1.6978299617767334
Epoch: 299, Generator Loss: 0.6359738111495972, Discriminator Loss: 1.6829395294189453
Epoch: 300, Generator Loss: 0.6392512917518616, Discriminator Loss: 1.668739914894104
Epoch: 301, Generator Loss: 0.6416367292404175, Discriminator Loss: 1.6946498155593872
Epoch: 302, Generator Loss: 0.6406598687171936, Discriminator Loss: 1.7617634534835815
Epoch: 303, Generator Loss: 0.6414175033569336, Discriminator Loss: 1.720671534538269
Epoch: 304, Generator Loss: 0.63744056224823, Discriminator Loss: 1.67775559425354
Epoch: 305, Generator Loss: 0.6414657831192017, Discriminator Loss: 1.6948049068450928
Epoch: 306, Generator Loss: 0.6470726728439331, Discriminator Loss: 1.6779272556304932
Epoch: 307, Generator Loss: 0.6411290764808655, Discriminator Loss: 1.6670846939086914
Epoch: 308, Generator Loss: 0.6360321044921875, Discriminator Loss: 1.6787768602371216
Epoch: 309, Generator Loss: 0.6412560343742371, Discriminator Loss: 1.701042890548706
Epoch: 310, Generator Loss: 0.6406087279319763, Discriminator Loss: 1.6843072175979614
Epoch: 311, Generator Loss: 0.6488772630691528, Discriminator Loss: 1.6673251390457153
Epoch: 312, Generator Loss: 0.6352800726890564, Discriminator Loss: 1.6697102785110474
Epoch: 313, Generator Loss: 0.642504096031189, Discriminator Loss: 1.7025638818740845
Epoch: 314, Generator Loss: 0.643029510974884, Discriminator Loss: 1.6820297241210938
Epoch: 315, Generator Loss: 0.6405842900276184, Discriminator Loss: 1.7076090574264526
Epoch: 316, Generator Loss: 0.6423209309577942, Discriminator Loss: 1.695090413093567
Epoch: 317, Generator Loss: 0.6439040899276733, Discriminator Loss: 1.7476117610931396
Epoch: 318, Generator Loss: 0.6419994235038757, Discriminator Loss: 1.6418421268463135
Epoch: 319, Generator Loss: 0.6411580443382263, Discriminator Loss: 1.6744927167892456
Epoch: 320, Generator Loss: 0.6336439847946167, Discriminator Loss: 1.7232341766357422
Epoch: 321, Generator Loss: 0.641753613948822, Discriminator Loss: 1.7189486026763916
Epoch: 322, Generator Loss: 0.6411749124526978, Discriminator Loss: 1.7273023128509521
Epoch: 323, Generator Loss: 0.6538487672805786, Discriminator Loss: 1.6982523202896118
Epoch: 324, Generator Loss: 0.6393874287605286, Discriminator Loss: 1.6944007873535156
Epoch: 325, Generator Loss: 0.643139660358429, Discriminator Loss: 1.709736943244934
Epoch: 326, Generator Loss: 0.6471273303031921, Discriminator Loss: 1.696259617805481
Epoch: 327, Generator Loss: 0.6439687013626099, Discriminator Loss: 1.7098512649536133
Epoch: 328, Generator Loss: 0.6441207528114319, Discriminator Loss: 1.6927881240844727
Epoch: 329, Generator Loss: 0.6538675427436829, Discriminator Loss: 1.6943638324737549
Epoch: 330, Generator Loss: 0.6443697810173035, Discriminator Loss: 1.6886612176895142
Epoch: 331, Generator Loss: 0.6463592648506165, Discriminator Loss: 1.698427438735962
Epoch: 332, Generator Loss: 0.6530896425247192, Discriminator Loss: 1.7214006185531616
Epoch: 333, Generator Loss: 0.648661196231842, Discriminator Loss: 1.6833547353744507
Epoch: 334, Generator Loss: 0.6480315327644348, Discriminator Loss: 1.677469253540039
Epoch: 335, Generator Loss: 0.6461464762687683, Discriminator Loss: 1.6372073888778687
Epoch: 336, Generator Loss: 0.6507520079612732, Discriminator Loss: 1.6977587938308716
Epoch: 337, Generator Loss: 0.650206983089447, Discriminator Loss: 1.7230720520019531
Epoch: 338, Generator Loss: 0.6487578749656677, Discriminator Loss: 1.6764246225357056
Epoch: 339, Generator Loss: 0.6530085802078247, Discriminator Loss: 1.696730375289917
Epoch: 340, Generator Loss: 0.6482005715370178, Discriminator Loss: 1.6860673427581787
Epoch: 341, Generator Loss: 0.651825487613678, Discriminator Loss: 1.6996127367019653
Epoch: 342, Generator Loss: 0.6499524712562561, Discriminator Loss: 1.7055333852767944
Epoch: 343, Generator Loss: 0.6494059562683105, Discriminator Loss: 1.6800388097763062
Epoch: 344, Generator Loss: 0.6409929990768433, Discriminator Loss: 1.6861563920974731
Epoch: 345, Generator Loss: 0.6549736857414246, Discriminator Loss: 1.6458942890167236
Epoch: 346, Generator Loss: 0.6486549973487854, Discriminator Loss: 1.6705856323242188
Epoch: 347, Generator Loss: 0.651098906993866, Discriminator Loss: 1.698678731918335
Epoch: 348, Generator Loss: 0.6534900069236755, Discriminator Loss: 1.7280921936035156
Epoch: 349, Generator Loss: 0.655157208442688, Discriminator Loss: 1.731066346168518
Epoch: 350, Generator Loss: 0.6536402702331543, Discriminator Loss: 1.6886059045791626
Epoch: 351, Generator Loss: 0.6563877463340759, Discriminator Loss: 1.6613121032714844
Epoch: 352, Generator Loss: 0.6432307362556458, Discriminator Loss: 1.6946407556533813
Epoch: 353, Generator Loss: 0.646729052066803, Discriminator Loss: 1.6917955875396729
Epoch: 354, Generator Loss: 0.643084704875946, Discriminator Loss: 1.695717692375183
Epoch: 355, Generator Loss: 0.6356577277183533, Discriminator Loss: 1.6649482250213623
Epoch: 356, Generator Loss: 0.6420130133628845, Discriminator Loss: 1.7270227670669556
Epoch: 357, Generator Loss: 0.6449942588806152, Discriminator Loss: 1.7102911472320557
Epoch: 358, Generator Loss: 0.6454346776008606, Discriminator Loss: 1.6970735788345337
Epoch: 359, Generator Loss: 0.6483719944953918, Discriminator Loss: 1.6393067836761475
Epoch: 360, Generator Loss: 0.6413000822067261, Discriminator Loss: 1.714529037475586
Epoch: 361, Generator Loss: 0.650255024433136, Discriminator Loss: 1.6897858381271362
Epoch: 362, Generator Loss: 0.6560665965080261, Discriminator Loss: 1.6731330156326294
Epoch: 363, Generator Loss: 0.6527413129806519, Discriminator Loss: 1.6634390354156494
Epoch: 364, Generator Loss: 0.6465504765510559, Discriminator Loss: 1.716728925704956
Epoch: 365, Generator Loss: 0.6543140411376953, Discriminator Loss: 1.6930490732192993
Epoch: 366, Generator Loss: 0.6534043550491333, Discriminator Loss: 1.6745396852493286
Epoch: 367, Generator Loss: 0.6479657888412476, Discriminator Loss: 1.6946688890457153
Epoch: 368, Generator Loss: 0.6478835344314575, Discriminator Loss: 1.7000287771224976
Epoch: 369, Generator Loss: 0.6524452567100525, Discriminator Loss: 1.7122230529785156
Epoch: 370, Generator Loss: 0.6588584184646606, Discriminator Loss: 1.68062424659729
Epoch: 371, Generator Loss: 0.6600852608680725, Discriminator Loss: 1.7112023830413818
Epoch: 372, Generator Loss: 0.6518851518630981, Discriminator Loss: 1.688549280166626
Epoch: 373, Generator Loss: 0.650481104850769, Discriminator Loss: 1.6847975254058838
Epoch: 374, Generator Loss: 0.6486272215843201, Discriminator Loss: 1.7053890228271484
Epoch: 375, Generator Loss: 0.657839298248291, Discriminator Loss: 1.6730172634124756
Epoch: 376, Generator Loss: 0.6502650380134583, Discriminator Loss: 1.666657567024231
Epoch: 377, Generator Loss: 0.6393639445304871, Discriminator Loss: 1.7691067457199097
Epoch: 378, Generator Loss: 0.649840235710144, Discriminator Loss: 1.7038177251815796
Epoch: 379, Generator Loss: 0.6512786149978638, Discriminator Loss: 1.6509445905685425
Epoch: 380, Generator Loss: 0.6479384303092957, Discriminator Loss: 1.655727505683899
Epoch: 381, Generator Loss: 0.6518164873123169, Discriminator Loss: 1.6746279001235962
Epoch: 382, Generator Loss: 0.6529671549797058, Discriminator Loss: 1.676395297050476
Epoch: 383, Generator Loss: 0.6492168307304382, Discriminator Loss: 1.6755074262619019
Epoch: 384, Generator Loss: 0.6566900014877319, Discriminator Loss: 1.6674937009811401
Epoch: 385, Generator Loss: 0.6416869759559631, Discriminator Loss: 1.679625153541565
Epoch: 386, Generator Loss: 0.6485247015953064, Discriminator Loss: 1.6929353475570679
Epoch: 387, Generator Loss: 0.657448410987854, Discriminator Loss: 1.6521095037460327
Epoch: 388, Generator Loss: 0.6494102478027344, Discriminator Loss: 1.6663278341293335
Epoch: 389, Generator Loss: 0.6529092788696289, Discriminator Loss: 1.673516869544983
Epoch: 390, Generator Loss: 0.652161180973053, Discriminator Loss: 1.696226716041565
Epoch: 391, Generator Loss: 0.6512358784675598, Discriminator Loss: 1.6839112043380737
Epoch: 392, Generator Loss: 0.6528710126876831, Discriminator Loss: 1.703217625617981
Epoch: 393, Generator Loss: 0.6523156762123108, Discriminator Loss: 1.6728049516677856
Epoch: 394, Generator Loss: 0.6518306732177734, Discriminator Loss: 1.679080843925476
Epoch: 395, Generator Loss: 0.6523564457893372, Discriminator Loss: 1.698641061782837
Epoch: 396, Generator Loss: 0.6550713181495667, Discriminator Loss: 1.654215693473816
Epoch: 397, Generator Loss: 0.6569225192070007, Discriminator Loss: 1.6904383897781372
Epoch: 398, Generator Loss: 0.6514827013015747, Discriminator Loss: 1.681557536125183
Epoch: 399, Generator Loss: 0.6569217443466187, Discriminator Loss: 1.6766446828842163
Epoch: 400, Generator Loss: 0.655833899974823, Discriminator Loss: 1.7053451538085938
Epoch: 401, Generator Loss: 0.6608584523200989, Discriminator Loss: 1.6943312883377075
Epoch: 402, Generator Loss: 0.6633474230766296, Discriminator Loss: 1.6925908327102661
Epoch: 403, Generator Loss: 0.6640467047691345, Discriminator Loss: 1.7077337503433228
Epoch: 404, Generator Loss: 0.662066638469696, Discriminator Loss: 1.6984193325042725
Epoch: 405, Generator Loss: 0.656554639339447, Discriminator Loss: 1.6842330694198608
Epoch: 406, Generator Loss: 0.6572906374931335, Discriminator Loss: 1.6836738586425781
Epoch: 407, Generator Loss: 0.6572604775428772, Discriminator Loss: 1.668912649154663
Epoch: 408, Generator Loss: 0.6536831259727478, Discriminator Loss: 1.6939363479614258
Epoch: 409, Generator Loss: 0.6552592515945435, Discriminator Loss: 1.6606038808822632
Epoch: 410, Generator Loss: 0.6529344916343689, Discriminator Loss: 1.6881322860717773
Epoch: 411, Generator Loss: 0.648289680480957, Discriminator Loss: 1.7213157415390015
Epoch: 412, Generator Loss: 0.6481661200523376, Discriminator Loss: 1.6586428880691528
Epoch: 413, Generator Loss: 0.6449481844902039, Discriminator Loss: 1.6717207431793213
Epoch: 414, Generator Loss: 0.6488967537879944, Discriminator Loss: 1.6819826364517212
Epoch: 415, Generator Loss: 0.6490365266799927, Discriminator Loss: 1.665506362915039
Epoch: 416, Generator Loss: 0.6500602960586548, Discriminator Loss: 1.6866968870162964
Epoch: 417, Generator Loss: 0.6575499773025513, Discriminator Loss: 1.6417170763015747
Epoch: 418, Generator Loss: 0.6498726010322571, Discriminator Loss: 1.6775449514389038
Epoch: 419, Generator Loss: 0.6484846472740173, Discriminator Loss: 1.6591447591781616
Epoch: 420, Generator Loss: 0.6550324559211731, Discriminator Loss: 1.6640703678131104
Epoch: 421, Generator Loss: 0.6562325358390808, Discriminator Loss: 1.6806488037109375
Epoch: 422, Generator Loss: 0.6425971984863281, Discriminator Loss: 1.6959245204925537
Epoch: 423, Generator Loss: 0.6473649740219116, Discriminator Loss: 1.6937849521636963
Epoch: 424, Generator Loss: 0.6484030485153198, Discriminator Loss: 1.6990060806274414
Epoch: 425, Generator Loss: 0.650237500667572, Discriminator Loss: 1.6693354845046997
Epoch: 426, Generator Loss: 0.6468190550804138, Discriminator Loss: 1.6998389959335327
Epoch: 427, Generator Loss: 0.6389466524124146, Discriminator Loss: 1.66777765750885
Epoch: 428, Generator Loss: 0.6454753875732422, Discriminator Loss: 1.6802736520767212
Epoch: 429, Generator Loss: 0.642048716545105, Discriminator Loss: 1.7517350912094116
Epoch: 430, Generator Loss: 0.6427013278007507, Discriminator Loss: 1.6579419374465942
Epoch: 431, Generator Loss: 0.6410039067268372, Discriminator Loss: 1.6682668924331665
Epoch: 432, Generator Loss: 0.6349992752075195, Discriminator Loss: 1.7153549194335938
Epoch: 433, Generator Loss: 0.6312108039855957, Discriminator Loss: 1.6873199939727783
Epoch: 434, Generator Loss: 0.6441245675086975, Discriminator Loss: 1.6712820529937744
Epoch: 435, Generator Loss: 0.6489973664283752, Discriminator Loss: 1.7061254978179932
Epoch: 436, Generator Loss: 0.6436470150947571, Discriminator Loss: 1.6660109758377075
Epoch: 437, Generator Loss: 0.6382003426551819, Discriminator Loss: 1.7276619672775269
Epoch: 438, Generator Loss: 0.6365179419517517, Discriminator Loss: 1.7289892435073853
Epoch: 439, Generator Loss: 0.6418127417564392, Discriminator Loss: 1.6832600831985474
Epoch: 440, Generator Loss: 0.6386955380439758, Discriminator Loss: 1.7099024057388306
Epoch: 441, Generator Loss: 0.6392078995704651, Discriminator Loss: 1.7121803760528564
Epoch: 442, Generator Loss: 0.6381601691246033, Discriminator Loss: 1.6889714002609253
Epoch: 443, Generator Loss: 0.6391387581825256, Discriminator Loss: 1.6843321323394775
Epoch: 444, Generator Loss: 0.6372507810592651, Discriminator Loss: 1.6699146032333374
Epoch: 445, Generator Loss: 0.6365130543708801, Discriminator Loss: 1.6779212951660156
Epoch: 446, Generator Loss: 0.6298976540565491, Discriminator Loss: 1.6823819875717163
Epoch: 447, Generator Loss: 0.6354244947433472, Discriminator Loss: 1.6564441919326782
Epoch: 448, Generator Loss: 0.6344457864761353, Discriminator Loss: 1.7039587497711182
Epoch: 449, Generator Loss: 0.6365236639976501, Discriminator Loss: 1.681031584739685
Epoch: 450, Generator Loss: 0.6305633187294006, Discriminator Loss: 1.7422239780426025
Epoch: 451, Generator Loss: 0.6382112503051758, Discriminator Loss: 1.7130056619644165
Epoch: 452, Generator Loss: 0.6380292177200317, Discriminator Loss: 1.6965081691741943
Epoch: 453, Generator Loss: 0.6336358189582825, Discriminator Loss: 1.6863670349121094
Epoch: 454, Generator Loss: 0.6311357617378235, Discriminator Loss: 1.699590802192688
Epoch: 455, Generator Loss: 0.6267305016517639, Discriminator Loss: 1.7259668111801147
Epoch: 456, Generator Loss: 0.6269058585166931, Discriminator Loss: 1.7010345458984375
Epoch: 457, Generator Loss: 0.6302587389945984, Discriminator Loss: 1.7359391450881958
Epoch: 458, Generator Loss: 0.6227053999900818, Discriminator Loss: 1.7203015089035034
Epoch: 459, Generator Loss: 0.6242478489875793, Discriminator Loss: 1.7130452394485474
Epoch: 460, Generator Loss: 0.6421509385108948, Discriminator Loss: 1.7371852397918701
Epoch: 461, Generator Loss: 0.6391928791999817, Discriminator Loss: 1.7134233713150024
Epoch: 462, Generator Loss: 0.6387771964073181, Discriminator Loss: 1.746658205986023
Epoch: 463, Generator Loss: 0.6373630166053772, Discriminator Loss: 1.7320626974105835
Epoch: 464, Generator Loss: 0.6409958004951477, Discriminator Loss: 1.7239080667495728
Epoch: 465, Generator Loss: 0.6380200982093811, Discriminator Loss: 1.7392240762710571
Epoch: 466, Generator Loss: 0.6382822394371033, Discriminator Loss: 1.7268637418746948
Epoch: 467, Generator Loss: 0.6472324728965759, Discriminator Loss: 1.6907659769058228
Epoch: 468, Generator Loss: 0.6422556638717651, Discriminator Loss: 1.740857720375061
Epoch: 469, Generator Loss: 0.6468260884284973, Discriminator Loss: 1.7115331888198853
Epoch: 470, Generator Loss: 0.6374098062515259, Discriminator Loss: 1.7264858484268188
Epoch: 471, Generator Loss: 0.6377214193344116, Discriminator Loss: 1.7260189056396484
Epoch: 472, Generator Loss: 0.6398425698280334, Discriminator Loss: 1.72616708278656
Epoch: 473, Generator Loss: 0.6440829038619995, Discriminator Loss: 1.7271982431411743
Epoch: 474, Generator Loss: 0.644637405872345, Discriminator Loss: 1.6986690759658813
Epoch: 475, Generator Loss: 0.6458317041397095, Discriminator Loss: 1.7493088245391846
Epoch: 476, Generator Loss: 0.6408771872520447, Discriminator Loss: 1.7010904550552368
Epoch: 477, Generator Loss: 0.6440266966819763, Discriminator Loss: 1.6921608448028564
Epoch: 478, Generator Loss: 0.6328639388084412, Discriminator Loss: 1.7435247898101807
Epoch: 479, Generator Loss: 0.6493088006973267, Discriminator Loss: 1.6666240692138672
Epoch: 480, Generator Loss: 0.6469473242759705, Discriminator Loss: 1.741018533706665
Epoch: 481, Generator Loss: 0.6483659148216248, Discriminator Loss: 1.7095192670822144
Epoch: 482, Generator Loss: 0.6544705629348755, Discriminator Loss: 1.7004305124282837
Epoch: 483, Generator Loss: 0.639735996723175, Discriminator Loss: 1.6825374364852905
Epoch: 484, Generator Loss: 0.6387744545936584, Discriminator Loss: 1.7525376081466675
Epoch: 485, Generator Loss: 0.6404043436050415, Discriminator Loss: 1.7331864833831787
Epoch: 486, Generator Loss: 0.6376029253005981, Discriminator Loss: 1.7226918935775757
Epoch: 487, Generator Loss: 0.6497175097465515, Discriminator Loss: 1.718881607055664
Epoch: 488, Generator Loss: 0.6383171677589417, Discriminator Loss: 1.7296276092529297
Epoch: 489, Generator Loss: 0.6386598348617554, Discriminator Loss: 1.7561150789260864
Epoch: 490, Generator Loss: 0.6334605813026428, Discriminator Loss: 1.7275303602218628
Epoch: 491, Generator Loss: 0.6460512280464172, Discriminator Loss: 1.7396091222763062
Epoch: 492, Generator Loss: 0.6466561555862427, Discriminator Loss: 1.7021329402923584
Epoch: 493, Generator Loss: 0.6545848846435547, Discriminator Loss: 1.675658106803894
Epoch: 494, Generator Loss: 0.6409793496131897, Discriminator Loss: 1.7288497686386108
Epoch: 495, Generator Loss: 0.6479927897453308, Discriminator Loss: 1.7237962484359741
Epoch: 496, Generator Loss: 0.6459988355636597, Discriminator Loss: 1.7035281658172607
Epoch: 497, Generator Loss: 0.6468120217323303, Discriminator Loss: 1.732664942741394
Epoch: 498, Generator Loss: 0.646313488483429, Discriminator Loss: 1.7399671077728271
Epoch: 499, Generator Loss: 0.6446083188056946, Discriminator Loss: 1.7300862073898315
Epoch: 500, Generator Loss: 0.6465867757797241, Discriminator Loss: 1.7159841060638428
Epoch: 501, Generator Loss: 0.6460285186767578, Discriminator Loss: 1.6874383687973022
Epoch: 502, Generator Loss: 0.6456257104873657, Discriminator Loss: 1.7023175954818726
Epoch: 503, Generator Loss: 0.643633246421814, Discriminator Loss: 1.682584524154663
Epoch: 504, Generator Loss: 0.6315022110939026, Discriminator Loss: 1.712424397468567
Epoch: 505, Generator Loss: 0.6404158473014832, Discriminator Loss: 1.7217235565185547
Epoch: 506, Generator Loss: 0.6407049894332886, Discriminator Loss: 1.7395509481430054
Epoch: 507, Generator Loss: 0.651261031627655, Discriminator Loss: 1.7207889556884766
Epoch: 508, Generator Loss: 0.6371370553970337, Discriminator Loss: 1.7288981676101685
Epoch: 509, Generator Loss: 0.6368877291679382, Discriminator Loss: 1.733722448348999
Epoch: 510, Generator Loss: 0.6415016055107117, Discriminator Loss: 1.699857473373413
Epoch: 511, Generator Loss: 0.6460474133491516, Discriminator Loss: 1.7265273332595825
Epoch: 512, Generator Loss: 0.6464323401451111, Discriminator Loss: 1.691421627998352
Epoch: 513, Generator Loss: 0.6431714296340942, Discriminator Loss: 1.7233145236968994
Epoch: 514, Generator Loss: 0.6478354334831238, Discriminator Loss: 1.696202278137207
Epoch: 515, Generator Loss: 0.6436597108840942, Discriminator Loss: 1.689874529838562
Epoch: 516, Generator Loss: 0.6445439457893372, Discriminator Loss: 1.7291213274002075
Epoch: 517, Generator Loss: 0.6450526118278503, Discriminator Loss: 1.6791390180587769
Epoch: 518, Generator Loss: 0.6417635679244995, Discriminator Loss: 1.7001069784164429
Epoch: 519, Generator Loss: 0.6416174173355103, Discriminator Loss: 1.6857212781906128
Epoch: 520, Generator Loss: 0.6346318125724792, Discriminator Loss: 1.733219861984253
Epoch: 521, Generator Loss: 0.6401599645614624, Discriminator Loss: 1.7015329599380493
Epoch: 522, Generator Loss: 0.6417166590690613, Discriminator Loss: 1.7246837615966797
Epoch: 523, Generator Loss: 0.6437986493110657, Discriminator Loss: 1.7250064611434937
Epoch: 524, Generator Loss: 0.6435602903366089, Discriminator Loss: 1.714203119277954
Epoch: 525, Generator Loss: 0.6505936980247498, Discriminator Loss: 1.6878184080123901
Epoch: 526, Generator Loss: 0.6451969742774963, Discriminator Loss: 1.706188440322876
Epoch: 527, Generator Loss: 0.6391844153404236, Discriminator Loss: 1.71918785572052
Epoch: 528, Generator Loss: 0.6497907638549805, Discriminator Loss: 1.69139564037323
Epoch: 529, Generator Loss: 0.6435853242874146, Discriminator Loss: 1.7027615308761597
Epoch: 530, Generator Loss: 0.6478977203369141, Discriminator Loss: 1.6680095195770264
Epoch: 531, Generator Loss: 0.6422362923622131, Discriminator Loss: 1.6901931762695312
Epoch: 532, Generator Loss: 0.6395028233528137, Discriminator Loss: 1.7183085680007935
Epoch: 533, Generator Loss: 0.6390719413757324, Discriminator Loss: 1.752616286277771
Epoch: 534, Generator Loss: 0.6461448073387146, Discriminator Loss: 1.7244123220443726
Epoch: 535, Generator Loss: 0.6382750272750854, Discriminator Loss: 1.7053464651107788
Epoch: 536, Generator Loss: 0.6468350291252136, Discriminator Loss: 1.7276287078857422
Epoch: 537, Generator Loss: 0.6451752781867981, Discriminator Loss: 1.733986735343933
Epoch: 538, Generator Loss: 0.6455855369567871, Discriminator Loss: 1.7040364742279053
Epoch: 539, Generator Loss: 0.6458898782730103, Discriminator Loss: 1.7507258653640747
Epoch: 540, Generator Loss: 0.6437700390815735, Discriminator Loss: 1.6975432634353638
Epoch: 541, Generator Loss: 0.6432673335075378, Discriminator Loss: 1.682424783706665
Epoch: 542, Generator Loss: 0.6449616551399231, Discriminator Loss: 1.7289115190505981
Epoch: 543, Generator Loss: 0.6366567611694336, Discriminator Loss: 1.708222508430481
Epoch: 544, Generator Loss: 0.6322803497314453, Discriminator Loss: 1.7114955186843872
Epoch: 545, Generator Loss: 0.6455966830253601, Discriminator Loss: 1.6807993650436401
Epoch: 546, Generator Loss: 0.6406574845314026, Discriminator Loss: 1.7394542694091797
Epoch: 547, Generator Loss: 0.6416850686073303, Discriminator Loss: 1.724012017250061
Epoch: 548, Generator Loss: 0.6401681303977966, Discriminator Loss: 1.704384446144104
Epoch: 549, Generator Loss: 0.6320453882217407, Discriminator Loss: 1.7224096059799194
Epoch: 550, Generator Loss: 0.6362686157226562, Discriminator Loss: 1.7556642293930054
Epoch: 551, Generator Loss: 0.6494402885437012, Discriminator Loss: 1.6904728412628174
Epoch: 552, Generator Loss: 0.6489729285240173, Discriminator Loss: 1.7021461725234985
Epoch: 553, Generator Loss: 0.649836540222168, Discriminator Loss: 1.715810775756836
Epoch: 554, Generator Loss: 0.6393277049064636, Discriminator Loss: 1.7054554224014282
Epoch: 555, Generator Loss: 0.6521444320678711, Discriminator Loss: 1.72475266456604
Epoch: 556, Generator Loss: 0.64891517162323, Discriminator Loss: 1.6891615390777588
Epoch: 557, Generator Loss: 0.6475425362586975, Discriminator Loss: 1.7075157165527344
Epoch: 558, Generator Loss: 0.6434658169746399, Discriminator Loss: 1.7023178339004517
Epoch: 559, Generator Loss: 0.6396093368530273, Discriminator Loss: 1.6593021154403687
Epoch: 560, Generator Loss: 0.6456161737442017, Discriminator Loss: 1.7162034511566162
Epoch: 561, Generator Loss: 0.6336517930030823, Discriminator Loss: 1.7600843906402588
Epoch: 562, Generator Loss: 0.6502593159675598, Discriminator Loss: 1.6701890230178833
Epoch: 563, Generator Loss: 0.6462160348892212, Discriminator Loss: 1.7266788482666016
Epoch: 564, Generator Loss: 0.6390146613121033, Discriminator Loss: 1.7181769609451294
Epoch: 565, Generator Loss: 0.6494898796081543, Discriminator Loss: 1.7173501253128052
Epoch: 566, Generator Loss: 0.6480706334114075, Discriminator Loss: 1.7200766801834106
Epoch: 567, Generator Loss: 0.6385363340377808, Discriminator Loss: 1.7352887392044067
Epoch: 568, Generator Loss: 0.645427405834198, Discriminator Loss: 1.6773624420166016
Epoch: 569, Generator Loss: 0.6406273245811462, Discriminator Loss: 1.7098504304885864
Epoch: 570, Generator Loss: 0.6411681771278381, Discriminator Loss: 1.689942717552185
Epoch: 571, Generator Loss: 0.6484667658805847, Discriminator Loss: 1.7147146463394165
Epoch: 572, Generator Loss: 0.6379316449165344, Discriminator Loss: 1.71744704246521
Epoch: 573, Generator Loss: 0.6381335854530334, Discriminator Loss: 1.698133111000061
Epoch: 574, Generator Loss: 0.6452977061271667, Discriminator Loss: 1.715394139289856
Epoch: 575, Generator Loss: 0.6459864377975464, Discriminator Loss: 1.7314311265945435
Epoch: 576, Generator Loss: 0.6459803581237793, Discriminator Loss: 1.7258292436599731
Epoch: 577, Generator Loss: 0.6444963216781616, Discriminator Loss: 1.7474660873413086
Epoch: 578, Generator Loss: 0.6510491967201233, Discriminator Loss: 1.68548583984375
Epoch: 579, Generator Loss: 0.6476432085037231, Discriminator Loss: 1.6777387857437134
Epoch: 580, Generator Loss: 0.6536946296691895, Discriminator Loss: 1.691680908203125
Epoch: 581, Generator Loss: 0.6467289328575134, Discriminator Loss: 1.7328191995620728
Epoch: 582, Generator Loss: 0.6532516479492188, Discriminator Loss: 1.7276585102081299
Epoch: 583, Generator Loss: 0.6447929739952087, Discriminator Loss: 1.677324891090393
Epoch: 584, Generator Loss: 0.6445941925048828, Discriminator Loss: 1.7300300598144531
Epoch: 585, Generator Loss: 0.6438347101211548, Discriminator Loss: 1.6921042203903198
Epoch: 586, Generator Loss: 0.6491560935974121, Discriminator Loss: 1.7227003574371338
Epoch: 587, Generator Loss: 0.6552411317825317, Discriminator Loss: 1.68121337890625
Epoch: 588, Generator Loss: 0.6446952819824219, Discriminator Loss: 1.7126566171646118
Epoch: 589, Generator Loss: 0.6544064283370972, Discriminator Loss: 1.7300485372543335
Epoch: 590, Generator Loss: 0.6547861695289612, Discriminator Loss: 1.688804268836975
Epoch: 591, Generator Loss: 0.6581984162330627, Discriminator Loss: 1.7197335958480835
Epoch: 592, Generator Loss: 0.6561910510063171, Discriminator Loss: 1.7396231889724731
Epoch: 593, Generator Loss: 0.6609682440757751, Discriminator Loss: 1.7209975719451904
Epoch: 594, Generator Loss: 0.6471766233444214, Discriminator Loss: 1.6934856176376343
Epoch: 595, Generator Loss: 0.6462430953979492, Discriminator Loss: 1.6919153928756714
Epoch: 596, Generator Loss: 0.6408125758171082, Discriminator Loss: 1.7283484935760498
Epoch: 597, Generator Loss: 0.6493585705757141, Discriminator Loss: 1.7023932933807373
Epoch: 598, Generator Loss: 0.650325357913971, Discriminator Loss: 1.7073715925216675
Epoch: 599, Generator Loss: 0.6464393734931946, Discriminator Loss: 1.7107115983963013
Epoch: 600, Generator Loss: 0.640667736530304, Discriminator Loss: 1.7643671035766602
Epoch: 601, Generator Loss: 0.6360573768615723, Discriminator Loss: 1.7052233219146729
Epoch: 602, Generator Loss: 0.6408621668815613, Discriminator Loss: 1.7274101972579956
Epoch: 603, Generator Loss: 0.6508602499961853, Discriminator Loss: 1.700880765914917
Epoch: 604, Generator Loss: 0.6443660855293274, Discriminator Loss: 1.6816424131393433
Epoch: 605, Generator Loss: 0.6543483734130859, Discriminator Loss: 1.717677354812622
Epoch: 606, Generator Loss: 0.644430935382843, Discriminator Loss: 1.7002851963043213
Epoch: 607, Generator Loss: 0.6484087705612183, Discriminator Loss: 1.6780575513839722
Epoch: 608, Generator Loss: 0.6374434232711792, Discriminator Loss: 1.6895710229873657
Epoch: 609, Generator Loss: 0.6419127583503723, Discriminator Loss: 1.7350033521652222
Epoch: 610, Generator Loss: 0.6450770497322083, Discriminator Loss: 1.6905438899993896
Epoch: 611, Generator Loss: 0.6390124559402466, Discriminator Loss: 1.723897099494934
Epoch: 612, Generator Loss: 0.6385976672172546, Discriminator Loss: 1.7208298444747925
Epoch: 613, Generator Loss: 0.6532154679298401, Discriminator Loss: 1.721074104309082
Epoch: 614, Generator Loss: 0.6480951309204102, Discriminator Loss: 1.685648798942566
Epoch: 615, Generator Loss: 0.6473840475082397, Discriminator Loss: 1.7613219022750854
Epoch: 616, Generator Loss: 0.6498456597328186, Discriminator Loss: 1.698244333267212
Epoch: 617, Generator Loss: 0.6521903276443481, Discriminator Loss: 1.7075685262680054
Epoch: 618, Generator Loss: 0.647883415222168, Discriminator Loss: 1.7007532119750977
Epoch: 619, Generator Loss: 0.6451229453086853, Discriminator Loss: 1.7299894094467163
Epoch: 620, Generator Loss: 0.6522132754325867, Discriminator Loss: 1.7024046182632446
Epoch: 621, Generator Loss: 0.6495028734207153, Discriminator Loss: 1.7064999341964722
Epoch: 622, Generator Loss: 0.6487794518470764, Discriminator Loss: 1.7128114700317383
Epoch: 623, Generator Loss: 0.6468440294265747, Discriminator Loss: 1.7018928527832031
Epoch: 624, Generator Loss: 0.6501750946044922, Discriminator Loss: 1.704114317893982
Epoch: 625, Generator Loss: 0.6519048810005188, Discriminator Loss: 1.7134772539138794
Epoch: 626, Generator Loss: 0.6496487855911255, Discriminator Loss: 1.7133853435516357
Epoch: 627, Generator Loss: 0.6488069295883179, Discriminator Loss: 1.7192658185958862
Epoch: 628, Generator Loss: 0.6520361304283142, Discriminator Loss: 1.7396137714385986
Epoch: 629, Generator Loss: 0.6389988660812378, Discriminator Loss: 1.7110227346420288
Epoch: 630, Generator Loss: 0.6508556008338928, Discriminator Loss: 1.6788095235824585
Epoch: 631, Generator Loss: 0.6489812135696411, Discriminator Loss: 1.7069934606552124
Epoch: 632, Generator Loss: 0.6446418762207031, Discriminator Loss: 1.6941732168197632
Epoch: 633, Generator Loss: 0.6439553499221802, Discriminator Loss: 1.703308343887329
Epoch: 634, Generator Loss: 0.645875096321106, Discriminator Loss: 1.7379515171051025
Epoch: 635, Generator Loss: 0.6495761871337891, Discriminator Loss: 1.7349752187728882
Epoch: 636, Generator Loss: 0.6406208276748657, Discriminator Loss: 1.7139689922332764
Epoch: 637, Generator Loss: 0.6434067487716675, Discriminator Loss: 1.723318338394165
Epoch: 638, Generator Loss: 0.6582432985305786, Discriminator Loss: 1.7031503915786743
Epoch: 639, Generator Loss: 0.6505247950553894, Discriminator Loss: 1.724421501159668
Epoch: 640, Generator Loss: 0.6423639059066772, Discriminator Loss: 1.708424687385559
Epoch: 641, Generator Loss: 0.6457765698432922, Discriminator Loss: 1.6687899827957153
Epoch: 642, Generator Loss: 0.6474562287330627, Discriminator Loss: 1.7423897981643677
Epoch: 643, Generator Loss: 0.6507746577262878, Discriminator Loss: 1.7254942655563354
Epoch: 644, Generator Loss: 0.652487576007843, Discriminator Loss: 1.6841703653335571
Epoch: 645, Generator Loss: 0.642878532409668, Discriminator Loss: 1.709596037864685
Epoch: 646, Generator Loss: 0.6481768488883972, Discriminator Loss: 1.6813005208969116
Epoch: 647, Generator Loss: 0.644088864326477, Discriminator Loss: 1.7251845598220825
Epoch: 648, Generator Loss: 0.6482861638069153, Discriminator Loss: 1.7014007568359375
Epoch: 649, Generator Loss: 0.6480976939201355, Discriminator Loss: 1.7090208530426025
Epoch: 650, Generator Loss: 0.641251266002655, Discriminator Loss: 1.7292425632476807
Epoch: 651, Generator Loss: 0.6420422792434692, Discriminator Loss: 1.7293959856033325
Epoch: 652, Generator Loss: 0.6498850584030151, Discriminator Loss: 1.6765506267547607
Epoch: 653, Generator Loss: 0.6470620036125183, Discriminator Loss: 1.6961252689361572
Epoch: 654, Generator Loss: 0.6525403261184692, Discriminator Loss: 1.7000200748443604
Epoch: 655, Generator Loss: 0.6552196741104126, Discriminator Loss: 1.7249294519424438
Epoch: 656, Generator Loss: 0.6552778482437134, Discriminator Loss: 1.7124086618423462
Epoch: 657, Generator Loss: 0.6496023535728455, Discriminator Loss: 1.7230451107025146
Epoch: 658, Generator Loss: 0.6598016023635864, Discriminator Loss: 1.6910802125930786
Epoch: 659, Generator Loss: 0.6551877856254578, Discriminator Loss: 1.727120041847229
Epoch: 660, Generator Loss: 0.6588956117630005, Discriminator Loss: 1.6999547481536865
Epoch: 661, Generator Loss: 0.6610201597213745, Discriminator Loss: 1.6816712617874146
Epoch: 662, Generator Loss: 0.6516863703727722, Discriminator Loss: 1.7263306379318237
Epoch: 663, Generator Loss: 0.6554455161094666, Discriminator Loss: 1.694117784500122
Epoch: 664, Generator Loss: 0.6591094732284546, Discriminator Loss: 1.7212132215499878
Epoch: 665, Generator Loss: 0.6475030779838562, Discriminator Loss: 1.688982605934143
Epoch: 666, Generator Loss: 0.6522754430770874, Discriminator Loss: 1.6969629526138306
Epoch: 667, Generator Loss: 0.6547427177429199, Discriminator Loss: 1.7026126384735107
Epoch: 668, Generator Loss: 0.6445921659469604, Discriminator Loss: 1.7033116817474365
Epoch: 669, Generator Loss: 0.6509907841682434, Discriminator Loss: 1.6917507648468018
Epoch: 670, Generator Loss: 0.6414352059364319, Discriminator Loss: 1.6912163496017456
Epoch: 671, Generator Loss: 0.6466999053955078, Discriminator Loss: 1.716010332107544
Epoch: 672, Generator Loss: 0.6409691572189331, Discriminator Loss: 1.6857430934906006
Epoch: 673, Generator Loss: 0.6476035714149475, Discriminator Loss: 1.6896809339523315
Epoch: 674, Generator Loss: 0.6481903195381165, Discriminator Loss: 1.7004072666168213
Epoch: 675, Generator Loss: 0.6451383829116821, Discriminator Loss: 1.6611257791519165
Epoch: 676, Generator Loss: 0.6327113509178162, Discriminator Loss: 1.7317324876785278
Epoch: 677, Generator Loss: 0.6377248167991638, Discriminator Loss: 1.7006465196609497
Epoch: 678, Generator Loss: 0.6433922052383423, Discriminator Loss: 1.7071669101715088
Epoch: 679, Generator Loss: 0.6497190594673157, Discriminator Loss: 1.6828477382659912
Epoch: 680, Generator Loss: 0.6439818143844604, Discriminator Loss: 1.7026736736297607
Epoch: 681, Generator Loss: 0.6469658613204956, Discriminator Loss: 1.6991424560546875
Epoch: 682, Generator Loss: 0.6398240923881531, Discriminator Loss: 1.6997405290603638
Epoch: 683, Generator Loss: 0.6391592621803284, Discriminator Loss: 1.7318267822265625
Epoch: 684, Generator Loss: 0.6408663392066956, Discriminator Loss: 1.673862338066101
Epoch: 685, Generator Loss: 0.6500198245048523, Discriminator Loss: 1.685040831565857
Epoch: 686, Generator Loss: 0.6358771920204163, Discriminator Loss: 1.7216780185699463
Epoch: 687, Generator Loss: 0.6395267844200134, Discriminator Loss: 1.7305868864059448
Epoch: 688, Generator Loss: 0.6420048475265503, Discriminator Loss: 1.699788212776184
Epoch: 689, Generator Loss: 0.6447049379348755, Discriminator Loss: 1.7149924039840698
Epoch: 690, Generator Loss: 0.6535834074020386, Discriminator Loss: 1.7112070322036743
Epoch: 691, Generator Loss: 0.6467687487602234, Discriminator Loss: 1.691772699356079
Epoch: 692, Generator Loss: 0.636663019657135, Discriminator Loss: 1.6497966051101685
Epoch: 693, Generator Loss: 0.6400681734085083, Discriminator Loss: 1.6793047189712524
Epoch: 694, Generator Loss: 0.643056333065033, Discriminator Loss: 1.6821733713150024
Epoch: 695, Generator Loss: 0.64157634973526, Discriminator Loss: 1.6699150800704956
Epoch: 696, Generator Loss: 0.6401907205581665, Discriminator Loss: 1.6892337799072266
Epoch: 697, Generator Loss: 0.6330589652061462, Discriminator Loss: 1.677409052848816
Epoch: 698, Generator Loss: 0.6385116577148438, Discriminator Loss: 1.7031491994857788
Epoch: 699, Generator Loss: 0.6364205479621887, Discriminator Loss: 1.733725905418396
Epoch: 700, Generator Loss: 0.6317831873893738, Discriminator Loss: 1.7031773328781128
Epoch: 701, Generator Loss: 0.6478490233421326, Discriminator Loss: 1.7116345167160034
Epoch: 702, Generator Loss: 0.6404679417610168, Discriminator Loss: 1.6890389919281006
Epoch: 703, Generator Loss: 0.6385825872421265, Discriminator Loss: 1.7102320194244385
Epoch: 704, Generator Loss: 0.6532966494560242, Discriminator Loss: 1.7021243572235107
Epoch: 705, Generator Loss: 0.6487284898757935, Discriminator Loss: 1.6546480655670166
Epoch: 706, Generator Loss: 0.6519132256507874, Discriminator Loss: 1.6760019063949585
Epoch: 707, Generator Loss: 0.6471331715583801, Discriminator Loss: 1.684066891670227
Epoch: 708, Generator Loss: 0.6470133066177368, Discriminator Loss: 1.6751196384429932
Epoch: 709, Generator Loss: 0.6434577107429504, Discriminator Loss: 1.6570206880569458
Epoch: 710, Generator Loss: 0.6412639617919922, Discriminator Loss: 1.6884324550628662
Epoch: 711, Generator Loss: 0.6441124677658081, Discriminator Loss: 1.7134150266647339
Epoch: 712, Generator Loss: 0.6473022699356079, Discriminator Loss: 1.6906318664550781
Epoch: 713, Generator Loss: 0.6435247659683228, Discriminator Loss: 1.6807698011398315
Epoch: 714, Generator Loss: 0.6436974406242371, Discriminator Loss: 1.7165021896362305
Epoch: 715, Generator Loss: 0.6473265886306763, Discriminator Loss: 1.7013685703277588
Epoch: 716, Generator Loss: 0.6397422552108765, Discriminator Loss: 1.7117912769317627
Epoch: 717, Generator Loss: 0.6453207731246948, Discriminator Loss: 1.6846321821212769
Epoch: 718, Generator Loss: 0.6398537755012512, Discriminator Loss: 1.7061866521835327
Epoch: 719, Generator Loss: 0.6508920788764954, Discriminator Loss: 1.6802626848220825
Epoch: 720, Generator Loss: 0.6444445848464966, Discriminator Loss: 1.7013481855392456
Epoch: 721, Generator Loss: 0.6505058407783508, Discriminator Loss: 1.716732144355774
Epoch: 722, Generator Loss: 0.6473464965820312, Discriminator Loss: 1.7014559507369995
Epoch: 723, Generator Loss: 0.6448990702629089, Discriminator Loss: 1.6986925601959229
Epoch: 724, Generator Loss: 0.6504625678062439, Discriminator Loss: 1.6873323917388916
Epoch: 725, Generator Loss: 0.6497464179992676, Discriminator Loss: 1.6949042081832886
Epoch: 726, Generator Loss: 0.6522478461265564, Discriminator Loss: 1.6927471160888672
Epoch: 727, Generator Loss: 0.6479756236076355, Discriminator Loss: 1.6814329624176025
Epoch: 728, Generator Loss: 0.6464875340461731, Discriminator Loss: 1.6981178522109985
Epoch: 729, Generator Loss: 0.6415838599205017, Discriminator Loss: 1.6870149374008179
Epoch: 730, Generator Loss: 0.6426458954811096, Discriminator Loss: 1.6779733896255493
Epoch: 731, Generator Loss: 0.64557945728302, Discriminator Loss: 1.6843950748443604
Epoch: 732, Generator Loss: 0.6513968706130981, Discriminator Loss: 1.6686490774154663
Epoch: 733, Generator Loss: 0.6459425091743469, Discriminator Loss: 1.7219676971435547
Epoch: 734, Generator Loss: 0.6419215798377991, Discriminator Loss: 1.6957749128341675
Epoch: 735, Generator Loss: 0.6463278532028198, Discriminator Loss: 1.6745575666427612
Epoch: 736, Generator Loss: 0.6488265991210938, Discriminator Loss: 1.7147865295410156
Epoch: 737, Generator Loss: 0.6525413393974304, Discriminator Loss: 1.6956892013549805
Epoch: 738, Generator Loss: 0.6470385193824768, Discriminator Loss: 1.7312358617782593
Epoch: 739, Generator Loss: 0.6473317742347717, Discriminator Loss: 1.6918449401855469
Epoch: 740, Generator Loss: 0.6487342119216919, Discriminator Loss: 1.6814501285552979
Epoch: 741, Generator Loss: 0.6435590982437134, Discriminator Loss: 1.6876095533370972
Epoch: 742, Generator Loss: 0.6395849585533142, Discriminator Loss: 1.699951410293579
Epoch: 743, Generator Loss: 0.644643247127533, Discriminator Loss: 1.6984503269195557
Epoch: 744, Generator Loss: 0.6471146941184998, Discriminator Loss: 1.712123990058899
Epoch: 745, Generator Loss: 0.6470648050308228, Discriminator Loss: 1.671948790550232
Epoch: 746, Generator Loss: 0.6399591565132141, Discriminator Loss: 1.68153977394104
Epoch: 747, Generator Loss: 0.6464433073997498, Discriminator Loss: 1.7010051012039185
Epoch: 748, Generator Loss: 0.6498083472251892, Discriminator Loss: 1.701948881149292
Epoch: 749, Generator Loss: 0.6479542851448059, Discriminator Loss: 1.6838973760604858
Epoch: 750, Generator Loss: 0.64307701587677, Discriminator Loss: 1.6997483968734741
Epoch: 751, Generator Loss: 0.6515929102897644, Discriminator Loss: 1.6954702138900757
Epoch: 752, Generator Loss: 0.6460015177726746, Discriminator Loss: 1.706880807876587
Epoch: 753, Generator Loss: 0.6415852904319763, Discriminator Loss: 1.68752920627594
Epoch: 754, Generator Loss: 0.6494743227958679, Discriminator Loss: 1.6795198917388916
Epoch: 755, Generator Loss: 0.6482347846031189, Discriminator Loss: 1.7042187452316284
Epoch: 756, Generator Loss: 0.6443434953689575, Discriminator Loss: 1.6941226720809937
Epoch: 757, Generator Loss: 0.6454507112503052, Discriminator Loss: 1.6864975690841675
Epoch: 758, Generator Loss: 0.643109917640686, Discriminator Loss: 1.6671899557113647
Epoch: 759, Generator Loss: 0.6529294848442078, Discriminator Loss: 1.6861461400985718
Epoch: 760, Generator Loss: 0.6438174247741699, Discriminator Loss: 1.70931875705719
Epoch: 761, Generator Loss: 0.6582310795783997, Discriminator Loss: 1.654891014099121
Epoch: 762, Generator Loss: 0.6489982604980469, Discriminator Loss: 1.6764832735061646
Epoch: 763, Generator Loss: 0.6463857293128967, Discriminator Loss: 1.6900039911270142
Epoch: 764, Generator Loss: 0.6573041677474976, Discriminator Loss: 1.6679399013519287
Epoch: 765, Generator Loss: 0.6482601761817932, Discriminator Loss: 1.6725728511810303
Epoch: 766, Generator Loss: 0.6458998918533325, Discriminator Loss: 1.6830204725265503
Epoch: 767, Generator Loss: 0.6491058468818665, Discriminator Loss: 1.676790475845337
Epoch: 768, Generator Loss: 0.6470144987106323, Discriminator Loss: 1.6808502674102783
Epoch: 769, Generator Loss: 0.642812192440033, Discriminator Loss: 1.6906501054763794
Epoch: 770, Generator Loss: 0.6365771889686584, Discriminator Loss: 1.6672942638397217
Epoch: 771, Generator Loss: 0.6453655362129211, Discriminator Loss: 1.6843831539154053
Epoch: 772, Generator Loss: 0.6340989470481873, Discriminator Loss: 1.6859967708587646
Epoch: 773, Generator Loss: 0.6429591178894043, Discriminator Loss: 1.722176194190979
Epoch: 774, Generator Loss: 0.6477354764938354, Discriminator Loss: 1.6444982290267944
Epoch: 775, Generator Loss: 0.6292028427124023, Discriminator Loss: 1.6534775495529175
Epoch: 776, Generator Loss: 0.6249591708183289, Discriminator Loss: 1.715687870979309
Epoch: 777, Generator Loss: 0.6299771070480347, Discriminator Loss: 1.6845495700836182
Epoch: 778, Generator Loss: 0.6275840401649475, Discriminator Loss: 1.7489240169525146
Epoch: 779, Generator Loss: 0.6371291875839233, Discriminator Loss: 1.7346246242523193
Epoch: 780, Generator Loss: 0.6447266340255737, Discriminator Loss: 1.6902333498001099
Epoch: 781, Generator Loss: 0.6300956010818481, Discriminator Loss: 1.667650580406189
Epoch: 782, Generator Loss: 0.6396809220314026, Discriminator Loss: 1.658516764640808
Epoch: 783, Generator Loss: 0.6314967274665833, Discriminator Loss: 1.71120285987854
Epoch: 784, Generator Loss: 0.6397515535354614, Discriminator Loss: 1.6867932081222534
Epoch: 785, Generator Loss: 0.642111599445343, Discriminator Loss: 1.714389443397522
Epoch: 786, Generator Loss: 0.6434993743896484, Discriminator Loss: 1.6609569787979126
Epoch: 787, Generator Loss: 0.6469904780387878, Discriminator Loss: 1.738085389137268
Epoch: 788, Generator Loss: 0.6392697691917419, Discriminator Loss: 1.7076529264450073
Epoch: 789, Generator Loss: 0.6459762454032898, Discriminator Loss: 1.6899471282958984
Epoch: 790, Generator Loss: 0.6465340852737427, Discriminator Loss: 1.688905119895935
Epoch: 791, Generator Loss: 0.6471802592277527, Discriminator Loss: 1.6884241104125977
Epoch: 792, Generator Loss: 0.6433486938476562, Discriminator Loss: 1.6890718936920166
Epoch: 793, Generator Loss: 0.6495346426963806, Discriminator Loss: 1.6874850988388062
Epoch: 794, Generator Loss: 0.6457193493843079, Discriminator Loss: 1.701111912727356
Epoch: 795, Generator Loss: 0.646174967288971, Discriminator Loss: 1.6698459386825562
Epoch: 796, Generator Loss: 0.644953727722168, Discriminator Loss: 1.7134424448013306
Epoch: 797, Generator Loss: 0.6456112861633301, Discriminator Loss: 1.6563189029693604
Epoch: 798, Generator Loss: 0.6462131142616272, Discriminator Loss: 1.6736215353012085
Epoch: 799, Generator Loss: 0.6474617719650269, Discriminator Loss: 1.6953423023223877
Epoch: 800, Generator Loss: 0.6444851160049438, Discriminator Loss: 1.708457112312317
Epoch: 801, Generator Loss: 0.6504328846931458, Discriminator Loss: 1.6993917226791382
Epoch: 802, Generator Loss: 0.6523750424385071, Discriminator Loss: 1.6880384683609009
Epoch: 803, Generator Loss: 0.6532325744628906, Discriminator Loss: 1.6493464708328247
Epoch: 804, Generator Loss: 0.6428362727165222, Discriminator Loss: 1.6698048114776611
Epoch: 805, Generator Loss: 0.6448794007301331, Discriminator Loss: 1.6639974117279053
Epoch: 806, Generator Loss: 0.6373741030693054, Discriminator Loss: 1.698905348777771
Epoch: 807, Generator Loss: 0.645259439945221, Discriminator Loss: 1.6668585538864136
Epoch: 808, Generator Loss: 0.6438730955123901, Discriminator Loss: 1.6846847534179688
Epoch: 809, Generator Loss: 0.6420916318893433, Discriminator Loss: 1.6988307237625122
Epoch: 810, Generator Loss: 0.6509504914283752, Discriminator Loss: 1.658785104751587
Epoch: 811, Generator Loss: 0.6368170976638794, Discriminator Loss: 1.6991512775421143
Epoch: 812, Generator Loss: 0.6453040838241577, Discriminator Loss: 1.651410698890686
Epoch: 813, Generator Loss: 0.6459422707557678, Discriminator Loss: 1.7235463857650757
Epoch: 814, Generator Loss: 0.6482740044593811, Discriminator Loss: 1.6971060037612915
Epoch: 815, Generator Loss: 0.6415413618087769, Discriminator Loss: 1.7134684324264526
Epoch: 816, Generator Loss: 0.6521839499473572, Discriminator Loss: 1.700764536857605
Epoch: 817, Generator Loss: 0.6512317657470703, Discriminator Loss: 1.682900071144104
Epoch: 818, Generator Loss: 0.6461367011070251, Discriminator Loss: 1.6639320850372314
Epoch: 819, Generator Loss: 0.647760808467865, Discriminator Loss: 1.6698148250579834
Epoch: 820, Generator Loss: 0.6385040283203125, Discriminator Loss: 1.689350962638855
Epoch: 821, Generator Loss: 0.6383013129234314, Discriminator Loss: 1.7015478610992432
Epoch: 822, Generator Loss: 0.6478620767593384, Discriminator Loss: 1.6936986446380615
Epoch: 823, Generator Loss: 0.6455589532852173, Discriminator Loss: 1.6838206052780151
Epoch: 824, Generator Loss: 0.6472979187965393, Discriminator Loss: 1.6719515323638916
Epoch: 825, Generator Loss: 0.6509844660758972, Discriminator Loss: 1.6869994401931763
Epoch: 826, Generator Loss: 0.641825795173645, Discriminator Loss: 1.6944301128387451
Epoch: 827, Generator Loss: 0.6347590088844299, Discriminator Loss: 1.689614176750183
Epoch: 828, Generator Loss: 0.6492579579353333, Discriminator Loss: 1.6682952642440796
Epoch: 829, Generator Loss: 0.6344814300537109, Discriminator Loss: 1.6861628293991089
Epoch: 830, Generator Loss: 0.6350279450416565, Discriminator Loss: 1.699157953262329
Epoch: 831, Generator Loss: 0.6338053941726685, Discriminator Loss: 1.69450843334198
Epoch: 832, Generator Loss: 0.6286574602127075, Discriminator Loss: 1.6767548322677612
Epoch: 833, Generator Loss: 0.6234298348426819, Discriminator Loss: 1.665231466293335
Epoch: 834, Generator Loss: 0.6288501024246216, Discriminator Loss: 1.7117830514907837
Epoch: 835, Generator Loss: 0.6353852152824402, Discriminator Loss: 1.6733992099761963
Epoch: 836, Generator Loss: 0.6249300837516785, Discriminator Loss: 1.6993669271469116
Epoch: 837, Generator Loss: 0.6322340965270996, Discriminator Loss: 1.6541074514389038
Epoch: 838, Generator Loss: 0.6432577967643738, Discriminator Loss: 1.732157826423645
Epoch: 839, Generator Loss: 0.6342883706092834, Discriminator Loss: 1.6698881387710571
Epoch: 840, Generator Loss: 0.6335920691490173, Discriminator Loss: 1.6787515878677368
Epoch: 841, Generator Loss: 0.6468864679336548, Discriminator Loss: 1.6998026371002197
Epoch: 842, Generator Loss: 0.6474236249923706, Discriminator Loss: 1.701973795890808
Epoch: 843, Generator Loss: 0.646966278553009, Discriminator Loss: 1.6718199253082275
Epoch: 844, Generator Loss: 0.6464241147041321, Discriminator Loss: 1.6771689653396606
Epoch: 845, Generator Loss: 0.6436119079589844, Discriminator Loss: 1.6836856603622437
Epoch: 846, Generator Loss: 0.6447038650512695, Discriminator Loss: 1.6657428741455078
Epoch: 847, Generator Loss: 0.643575131893158, Discriminator Loss: 1.6953004598617554
Epoch: 848, Generator Loss: 0.6439548134803772, Discriminator Loss: 1.6941919326782227
Epoch: 849, Generator Loss: 0.6444023251533508, Discriminator Loss: 1.6473432779312134
Epoch: 850, Generator Loss: 0.6435033679008484, Discriminator Loss: 1.6676075458526611
Epoch: 851, Generator Loss: 0.6410353779792786, Discriminator Loss: 1.6864688396453857
Epoch: 852, Generator Loss: 0.6380628943443298, Discriminator Loss: 1.7044686079025269
Epoch: 853, Generator Loss: 0.6416370272636414, Discriminator Loss: 1.6941779851913452
Epoch: 854, Generator Loss: 0.637104868888855, Discriminator Loss: 1.692208170890808
Epoch: 855, Generator Loss: 0.6373717188835144, Discriminator Loss: 1.6706877946853638
Epoch: 856, Generator Loss: 0.648043692111969, Discriminator Loss: 1.6834051609039307
Epoch: 857, Generator Loss: 0.6430210471153259, Discriminator Loss: 1.6871057748794556
Epoch: 858, Generator Loss: 0.6490249633789062, Discriminator Loss: 1.6574829816818237
Epoch: 859, Generator Loss: 0.6438630819320679, Discriminator Loss: 1.6763256788253784
Epoch: 860, Generator Loss: 0.6436120867729187, Discriminator Loss: 1.6939095258712769
Epoch: 861, Generator Loss: 0.6371508836746216, Discriminator Loss: 1.6697616577148438
Epoch: 862, Generator Loss: 0.6344031691551208, Discriminator Loss: 1.7212742567062378
Epoch: 863, Generator Loss: 0.6486796140670776, Discriminator Loss: 1.6936308145523071
Epoch: 864, Generator Loss: 0.6404510140419006, Discriminator Loss: 1.6690387725830078
Epoch: 865, Generator Loss: 0.6404638886451721, Discriminator Loss: 1.6841017007827759
Epoch: 866, Generator Loss: 0.6466824412345886, Discriminator Loss: 1.6654150485992432
Epoch: 867, Generator Loss: 0.6479066014289856, Discriminator Loss: 1.7022093534469604
Epoch: 868, Generator Loss: 0.6469464898109436, Discriminator Loss: 1.7041388750076294
Epoch: 869, Generator Loss: 0.643588125705719, Discriminator Loss: 1.6739662885665894
Epoch: 870, Generator Loss: 0.6456783413887024, Discriminator Loss: 1.6774803400039673
Epoch: 871, Generator Loss: 0.6498838067054749, Discriminator Loss: 1.6766918897628784
Epoch: 872, Generator Loss: 0.6481409072875977, Discriminator Loss: 1.6888803243637085
Epoch: 873, Generator Loss: 0.6471696496009827, Discriminator Loss: 1.6902731657028198
Epoch: 874, Generator Loss: 0.653039276599884, Discriminator Loss: 1.6859287023544312
Epoch: 875, Generator Loss: 0.647546112537384, Discriminator Loss: 1.6752945184707642
Epoch: 876, Generator Loss: 0.6455156207084656, Discriminator Loss: 1.6606072187423706
Epoch: 877, Generator Loss: 0.6521846055984497, Discriminator Loss: 1.689477562904358
Epoch: 878, Generator Loss: 0.6519935727119446, Discriminator Loss: 1.6982775926589966
Epoch: 879, Generator Loss: 0.6572600603103638, Discriminator Loss: 1.647428274154663
Epoch: 880, Generator Loss: 0.6559563279151917, Discriminator Loss: 1.6672943830490112
Epoch: 881, Generator Loss: 0.6527286171913147, Discriminator Loss: 1.6454914808273315
Epoch: 882, Generator Loss: 0.639464795589447, Discriminator Loss: 1.651898980140686
Epoch: 883, Generator Loss: 0.6389814019203186, Discriminator Loss: 1.6733735799789429
Epoch: 884, Generator Loss: 0.6412025690078735, Discriminator Loss: 1.6632921695709229
Epoch: 885, Generator Loss: 0.6392243504524231, Discriminator Loss: 1.7342363595962524
Epoch: 886, Generator Loss: 0.6374303102493286, Discriminator Loss: 1.690898060798645
Epoch: 887, Generator Loss: 0.6308087110519409, Discriminator Loss: 1.6720257997512817
Epoch: 888, Generator Loss: 0.6332067847251892, Discriminator Loss: 1.6905430555343628
Epoch: 889, Generator Loss: 0.6326988935470581, Discriminator Loss: 1.657012701034546
Epoch: 890, Generator Loss: 0.6267148852348328, Discriminator Loss: 1.6859959363937378
Epoch: 891, Generator Loss: 0.623721718788147, Discriminator Loss: 1.6886694431304932
Epoch: 892, Generator Loss: 0.6213251352310181, Discriminator Loss: 1.7302616834640503
Epoch: 893, Generator Loss: 0.635186493396759, Discriminator Loss: 1.7061434984207153
Epoch: 894, Generator Loss: 0.6342921257019043, Discriminator Loss: 1.6842916011810303
Epoch: 895, Generator Loss: 0.6303486824035645, Discriminator Loss: 1.6999636888504028
Epoch: 896, Generator Loss: 0.6277429461479187, Discriminator Loss: 1.6979912519454956
Epoch: 897, Generator Loss: 0.6398482322692871, Discriminator Loss: 1.6644891500473022
Epoch: 898, Generator Loss: 0.6475699543952942, Discriminator Loss: 1.6782225370407104
Epoch: 899, Generator Loss: 0.6417468190193176, Discriminator Loss: 1.6812816858291626
Epoch: 900, Generator Loss: 0.6446961164474487, Discriminator Loss: 1.6953086853027344
Epoch: 901, Generator Loss: 0.6461649537086487, Discriminator Loss: 1.6835520267486572
Epoch: 902, Generator Loss: 0.6407870054244995, Discriminator Loss: 1.6949354410171509
Epoch: 903, Generator Loss: 0.6385378241539001, Discriminator Loss: 1.6903635263442993
Epoch: 904, Generator Loss: 0.6442316174507141, Discriminator Loss: 1.6634455919265747
Epoch: 905, Generator Loss: 0.6462773680686951, Discriminator Loss: 1.663248062133789
Epoch: 906, Generator Loss: 0.6412237882614136, Discriminator Loss: 1.7047537565231323
Epoch: 907, Generator Loss: 0.6442143321037292, Discriminator Loss: 1.6794270277023315
Epoch: 908, Generator Loss: 0.6510756611824036, Discriminator Loss: 1.7028783559799194
Epoch: 909, Generator Loss: 0.6463631391525269, Discriminator Loss: 1.7337805032730103
Epoch: 910, Generator Loss: 0.6431803703308105, Discriminator Loss: 1.6838675737380981
Epoch: 911, Generator Loss: 0.6543909907341003, Discriminator Loss: 1.6219362020492554
Epoch: 912, Generator Loss: 0.6420890688896179, Discriminator Loss: 1.6907455921173096
Epoch: 913, Generator Loss: 0.6412531733512878, Discriminator Loss: 1.6703691482543945
Epoch: 914, Generator Loss: 0.6454876661300659, Discriminator Loss: 1.6610437631607056
Epoch: 915, Generator Loss: 0.6446781158447266, Discriminator Loss: 1.6957942247390747
Epoch: 916, Generator Loss: 0.641388475894928, Discriminator Loss: 1.6840860843658447
Epoch: 917, Generator Loss: 0.6512919664382935, Discriminator Loss: 1.7019147872924805
Epoch: 918, Generator Loss: 0.6423724293708801, Discriminator Loss: 1.6586627960205078
Epoch: 919, Generator Loss: 0.6410511136054993, Discriminator Loss: 1.7042564153671265
Epoch: 920, Generator Loss: 0.6445048451423645, Discriminator Loss: 1.665042519569397
Epoch: 921, Generator Loss: 0.6416832804679871, Discriminator Loss: 1.66871178150177
Epoch: 922, Generator Loss: 0.6474618911743164, Discriminator Loss: 1.6780444383621216
Epoch: 923, Generator Loss: 0.6473535895347595, Discriminator Loss: 1.6927995681762695
Epoch: 924, Generator Loss: 0.6501808166503906, Discriminator Loss: 1.6669687032699585
Epoch: 925, Generator Loss: 0.6476714015007019, Discriminator Loss: 1.6908963918685913
Epoch: 926, Generator Loss: 0.648737907409668, Discriminator Loss: 1.6695107221603394
Epoch: 927, Generator Loss: 0.6412931084632874, Discriminator Loss: 1.7100414037704468
Epoch: 928, Generator Loss: 0.6543822884559631, Discriminator Loss: 1.6781543493270874
Epoch: 929, Generator Loss: 0.6556010842323303, Discriminator Loss: 1.6547144651412964
Epoch: 930, Generator Loss: 0.6472338438034058, Discriminator Loss: 1.6981847286224365
Epoch: 931, Generator Loss: 0.6545968651771545, Discriminator Loss: 1.647918462753296
Epoch: 932, Generator Loss: 0.6487293243408203, Discriminator Loss: 1.6687551736831665
Epoch: 933, Generator Loss: 0.6517857909202576, Discriminator Loss: 1.687915563583374
Epoch: 934, Generator Loss: 0.6536468863487244, Discriminator Loss: 1.6828334331512451
Epoch: 935, Generator Loss: 0.6577009558677673, Discriminator Loss: 1.654845118522644
Epoch: 936, Generator Loss: 0.652551531791687, Discriminator Loss: 1.6769635677337646
Epoch: 937, Generator Loss: 0.6545099020004272, Discriminator Loss: 1.6609342098236084
Epoch: 938, Generator Loss: 0.6504921913146973, Discriminator Loss: 1.6926213502883911
Epoch: 939, Generator Loss: 0.649067223072052, Discriminator Loss: 1.672222375869751
Epoch: 940, Generator Loss: 0.648470401763916, Discriminator Loss: 1.641292929649353
Epoch: 941, Generator Loss: 0.6478667259216309, Discriminator Loss: 1.672516942024231
Epoch: 942, Generator Loss: 0.6499871015548706, Discriminator Loss: 1.6764332056045532
Epoch: 943, Generator Loss: 0.6433542966842651, Discriminator Loss: 1.6407592296600342
Epoch: 944, Generator Loss: 0.6346005201339722, Discriminator Loss: 1.6843923330307007
Epoch: 945, Generator Loss: 0.6402190327644348, Discriminator Loss: 1.6881657838821411
Epoch: 946, Generator Loss: 0.6361384391784668, Discriminator Loss: 1.660211443901062
Epoch: 947, Generator Loss: 0.6307412385940552, Discriminator Loss: 1.6766997575759888
Epoch: 948, Generator Loss: 0.6394586563110352, Discriminator Loss: 1.640747308731079
Epoch: 949, Generator Loss: 0.6276842951774597, Discriminator Loss: 1.6923192739486694
Epoch: 950, Generator Loss: 0.6381983757019043, Discriminator Loss: 1.6670491695404053
Epoch: 951, Generator Loss: 0.6308623552322388, Discriminator Loss: 1.6977864503860474
Epoch: 952, Generator Loss: 0.6386958360671997, Discriminator Loss: 1.6770102977752686
Epoch: 953, Generator Loss: 0.6379613876342773, Discriminator Loss: 1.6999932527542114
Epoch: 954, Generator Loss: 0.6332514882087708, Discriminator Loss: 1.6762632131576538
Epoch: 955, Generator Loss: 0.6324372291564941, Discriminator Loss: 1.7024307250976562
Epoch: 956, Generator Loss: 0.6302662491798401, Discriminator Loss: 1.6605632305145264
Epoch: 957, Generator Loss: 0.6169535517692566, Discriminator Loss: 1.6973581314086914
Epoch: 958, Generator Loss: 0.6266957521438599, Discriminator Loss: 1.7076683044433594
Epoch: 959, Generator Loss: 0.6308096051216125, Discriminator Loss: 1.6312190294265747
Epoch: 960, Generator Loss: 0.6222925186157227, Discriminator Loss: 1.6966724395751953
Epoch: 961, Generator Loss: 0.6432718634605408, Discriminator Loss: 1.6772640943527222
Epoch: 962, Generator Loss: 0.645840585231781, Discriminator Loss: 1.679280400276184
Epoch: 963, Generator Loss: 0.6496676206588745, Discriminator Loss: 1.6930577754974365
Epoch: 964, Generator Loss: 0.644074022769928, Discriminator Loss: 1.6629143953323364
Epoch: 965, Generator Loss: 0.640508234500885, Discriminator Loss: 1.7152215242385864
Epoch: 966, Generator Loss: 0.642883837223053, Discriminator Loss: 1.6762198209762573
Epoch: 967, Generator Loss: 0.6453239321708679, Discriminator Loss: 1.6659414768218994
Epoch: 968, Generator Loss: 0.6519098877906799, Discriminator Loss: 1.6596927642822266
Epoch: 969, Generator Loss: 0.6489405035972595, Discriminator Loss: 1.7079856395721436
Epoch: 970, Generator Loss: 0.6518526077270508, Discriminator Loss: 1.6738616228103638
Epoch: 971, Generator Loss: 0.6478977203369141, Discriminator Loss: 1.6705843210220337
Epoch: 972, Generator Loss: 0.6499918103218079, Discriminator Loss: 1.6667543649673462
Epoch: 973, Generator Loss: 0.6534369587898254, Discriminator Loss: 1.690002679824829
Epoch: 974, Generator Loss: 0.6465455889701843, Discriminator Loss: 1.6816914081573486
Epoch: 975, Generator Loss: 0.6541441679000854, Discriminator Loss: 1.6981830596923828
Epoch: 976, Generator Loss: 0.65855473279953, Discriminator Loss: 1.6607091426849365
Epoch: 977, Generator Loss: 0.649769127368927, Discriminator Loss: 1.7022501230239868
Epoch: 978, Generator Loss: 0.6493767499923706, Discriminator Loss: 1.681581735610962
Epoch: 979, Generator Loss: 0.6476168632507324, Discriminator Loss: 1.6732395887374878
Epoch: 980, Generator Loss: 0.6498528718948364, Discriminator Loss: 1.6637760400772095
Epoch: 981, Generator Loss: 0.6434953808784485, Discriminator Loss: 1.7017279863357544
Epoch: 982, Generator Loss: 0.6432884335517883, Discriminator Loss: 1.6733802556991577
Epoch: 983, Generator Loss: 0.651581346988678, Discriminator Loss: 1.670411229133606
Epoch: 984, Generator Loss: 0.6481914520263672, Discriminator Loss: 1.677817702293396
Epoch: 985, Generator Loss: 0.6487250924110413, Discriminator Loss: 1.7028416395187378
Epoch: 986, Generator Loss: 0.6470733284950256, Discriminator Loss: 1.7148908376693726
Epoch: 987, Generator Loss: 0.6525593996047974, Discriminator Loss: 1.6931438446044922
Epoch: 988, Generator Loss: 0.6482371687889099, Discriminator Loss: 1.6736136674880981
Epoch: 989, Generator Loss: 0.6463660597801208, Discriminator Loss: 1.6736193895339966
Epoch: 990, Generator Loss: 0.6433032155036926, Discriminator Loss: 1.6605441570281982
Epoch: 991, Generator Loss: 0.6489080786705017, Discriminator Loss: 1.6890615224838257
Epoch: 992, Generator Loss: 0.6557827591896057, Discriminator Loss: 1.6264772415161133
Epoch: 993, Generator Loss: 0.6466866135597229, Discriminator Loss: 1.6473532915115356
Epoch: 994, Generator Loss: 0.6439887285232544, Discriminator Loss: 1.6728202104568481
Epoch: 995, Generator Loss: 0.6452072262763977, Discriminator Loss: 1.6788474321365356
Epoch: 996, Generator Loss: 0.6500336527824402, Discriminator Loss: 1.664977788925171
Epoch: 997, Generator Loss: 0.6534667611122131, Discriminator Loss: 1.659953236579895
Epoch: 998, Generator Loss: 0.6533231735229492, Discriminator Loss: 1.6543101072311401
Epoch: 999, Generator Loss: 0.6523329615592957, Discriminator Loss: 1.654296636581421
Epoch: 1000, Generator Loss: 0.6545578837394714, Discriminator Loss: 1.7095508575439453
Epoch: 1001, Generator Loss: 0.6497458219528198, Discriminator Loss: 1.6904691457748413
Epoch: 1002, Generator Loss: 0.650953471660614, Discriminator Loss: 1.6595782041549683
Epoch: 1003, Generator Loss: 0.6458336710929871, Discriminator Loss: 1.7060775756835938
Epoch: 1004, Generator Loss: 0.6400992274284363, Discriminator Loss: 1.680001139640808
Epoch: 1005, Generator Loss: 0.64464271068573, Discriminator Loss: 1.6767663955688477
Epoch: 1006, Generator Loss: 0.6469523310661316, Discriminator Loss: 1.6745768785476685
Epoch: 1007, Generator Loss: 0.6544378399848938, Discriminator Loss: 1.6709190607070923
Epoch: 1008, Generator Loss: 0.6440057158470154, Discriminator Loss: 1.6830092668533325
Epoch: 1009, Generator Loss: 0.647577166557312, Discriminator Loss: 1.6747910976409912
Epoch: 1010, Generator Loss: 0.653142511844635, Discriminator Loss: 1.6775071620941162
Epoch: 1011, Generator Loss: 0.6564585566520691, Discriminator Loss: 1.65201735496521
Epoch: 1012, Generator Loss: 0.6501021385192871, Discriminator Loss: 1.642349123954773
Epoch: 1013, Generator Loss: 0.6466683149337769, Discriminator Loss: 1.6583834886550903
Epoch: 1014, Generator Loss: 0.6559926271438599, Discriminator Loss: 1.6642245054244995
Epoch: 1015, Generator Loss: 0.6504499316215515, Discriminator Loss: 1.6621639728546143
Epoch: 1016, Generator Loss: 0.6472968459129333, Discriminator Loss: 1.689492106437683
Epoch: 1017, Generator Loss: 0.6458834409713745, Discriminator Loss: 1.6840795278549194
Epoch: 1018, Generator Loss: 0.6397076845169067, Discriminator Loss: 1.6834638118743896
Epoch: 1019, Generator Loss: 0.6449151635169983, Discriminator Loss: 1.6779592037200928
Epoch: 1020, Generator Loss: 0.6381171941757202, Discriminator Loss: 1.6874809265136719
Epoch: 1021, Generator Loss: 0.6462339162826538, Discriminator Loss: 1.6918843984603882
Epoch: 1022, Generator Loss: 0.6513643264770508, Discriminator Loss: 1.6519521474838257
Epoch: 1023, Generator Loss: 0.6450982689857483, Discriminator Loss: 1.6609361171722412
Epoch: 1024, Generator Loss: 0.6421942114830017, Discriminator Loss: 1.699310302734375
Epoch: 1025, Generator Loss: 0.6420653462409973, Discriminator Loss: 1.6866520643234253
Epoch: 1026, Generator Loss: 0.6413701772689819, Discriminator Loss: 1.6706316471099854
Epoch: 1027, Generator Loss: 0.6494686007499695, Discriminator Loss: 1.6924301385879517
Epoch: 1028, Generator Loss: 0.6410335898399353, Discriminator Loss: 1.6978062391281128
Epoch: 1029, Generator Loss: 0.643086314201355, Discriminator Loss: 1.6981031894683838
Epoch: 1030, Generator Loss: 0.6417050957679749, Discriminator Loss: 1.7032287120819092
Epoch: 1031, Generator Loss: 0.6486301422119141, Discriminator Loss: 1.6808805465698242
Epoch: 1032, Generator Loss: 0.6498674750328064, Discriminator Loss: 1.6256779432296753
Epoch: 1033, Generator Loss: 0.6544308066368103, Discriminator Loss: 1.6783477067947388
Epoch: 1034, Generator Loss: 0.6557580232620239, Discriminator Loss: 1.677991271018982
Epoch: 1035, Generator Loss: 0.6516487002372742, Discriminator Loss: 1.6736118793487549
Epoch: 1036, Generator Loss: 0.6417420506477356, Discriminator Loss: 1.6875171661376953
Epoch: 1037, Generator Loss: 0.6517005562782288, Discriminator Loss: 1.679491639137268
Epoch: 1038, Generator Loss: 0.6396104097366333, Discriminator Loss: 1.70478093624115
Epoch: 1039, Generator Loss: 0.6575397849082947, Discriminator Loss: 1.703431487083435
Epoch: 1040, Generator Loss: 0.6542254090309143, Discriminator Loss: 1.6966451406478882
Epoch: 1041, Generator Loss: 0.6493098139762878, Discriminator Loss: 1.6668449640274048
Epoch: 1042, Generator Loss: 0.6538373827934265, Discriminator Loss: 1.6854370832443237
Epoch: 1043, Generator Loss: 0.6596593856811523, Discriminator Loss: 1.68960702419281
Epoch: 1044, Generator Loss: 0.6567384004592896, Discriminator Loss: 1.673371434211731
Epoch: 1045, Generator Loss: 0.6612784266471863, Discriminator Loss: 1.6388171911239624
Epoch: 1046, Generator Loss: 0.6604262590408325, Discriminator Loss: 1.6596848964691162
Epoch: 1047, Generator Loss: 0.6608721613883972, Discriminator Loss: 1.63776433467865
Epoch: 1048, Generator Loss: 0.6553564071655273, Discriminator Loss: 1.666810393333435
Epoch: 1049, Generator Loss: 0.6596790552139282, Discriminator Loss: 1.6629515886306763
Epoch: 1050, Generator Loss: 0.6543067097663879, Discriminator Loss: 1.679915189743042
Epoch: 1051, Generator Loss: 0.6595374345779419, Discriminator Loss: 1.6411610841751099
Epoch: 1052, Generator Loss: 0.6456162333488464, Discriminator Loss: 1.6617794036865234
Epoch: 1053, Generator Loss: 0.649627685546875, Discriminator Loss: 1.6748054027557373
Epoch: 1054, Generator Loss: 0.6560127139091492, Discriminator Loss: 1.6866226196289062
Epoch: 1055, Generator Loss: 0.651210367679596, Discriminator Loss: 1.6582897901535034
Epoch: 1056, Generator Loss: 0.6525540351867676, Discriminator Loss: 1.7126176357269287
Epoch: 1057, Generator Loss: 0.6584601402282715, Discriminator Loss: 1.7025349140167236
Epoch: 1058, Generator Loss: 0.6519074440002441, Discriminator Loss: 1.6438398361206055
Epoch: 1059, Generator Loss: 0.658030092716217, Discriminator Loss: 1.6537972688674927
Epoch: 1060, Generator Loss: 0.6552554368972778, Discriminator Loss: 1.6838738918304443
Epoch: 1061, Generator Loss: 0.653412401676178, Discriminator Loss: 1.6951009035110474
Epoch: 1062, Generator Loss: 0.663392961025238, Discriminator Loss: 1.6950147151947021
Epoch: 1063, Generator Loss: 0.6605915427207947, Discriminator Loss: 1.6563581228256226
Epoch: 1064, Generator Loss: 0.6536003351211548, Discriminator Loss: 1.670927882194519
Epoch: 1065, Generator Loss: 0.6617608666419983, Discriminator Loss: 1.6946213245391846
Epoch: 1066, Generator Loss: 0.6633986234664917, Discriminator Loss: 1.6756203174591064
Epoch: 1067, Generator Loss: 0.6540139317512512, Discriminator Loss: 1.6853199005126953
Epoch: 1068, Generator Loss: 0.6534045934677124, Discriminator Loss: 1.6684468984603882
Epoch: 1069, Generator Loss: 0.657817542552948, Discriminator Loss: 1.6768425703048706
Epoch: 1070, Generator Loss: 0.6580556035041809, Discriminator Loss: 1.6692792177200317
Epoch: 1071, Generator Loss: 0.6557177901268005, Discriminator Loss: 1.6701767444610596
Epoch: 1072, Generator Loss: 0.6535516977310181, Discriminator Loss: 1.6437863111495972
Epoch: 1073, Generator Loss: 0.6596497297286987, Discriminator Loss: 1.6531459093093872
Epoch: 1074, Generator Loss: 0.6528570055961609, Discriminator Loss: 1.7048672437667847
Epoch: 1075, Generator Loss: 0.6555608510971069, Discriminator Loss: 1.685302495956421
Epoch: 1076, Generator Loss: 0.6572316288948059, Discriminator Loss: 1.6684659719467163
Epoch: 1077, Generator Loss: 0.6580737233161926, Discriminator Loss: 1.656374216079712
Epoch: 1078, Generator Loss: 0.6534423828125, Discriminator Loss: 1.6599832773208618
Epoch: 1079, Generator Loss: 0.6485797762870789, Discriminator Loss: 1.65749192237854
Epoch: 1080, Generator Loss: 0.6490947604179382, Discriminator Loss: 1.6746360063552856
Epoch: 1081, Generator Loss: 0.6518474221229553, Discriminator Loss: 1.6597055196762085
Epoch: 1082, Generator Loss: 0.6486597657203674, Discriminator Loss: 1.6124275922775269
Epoch: 1083, Generator Loss: 0.650050699710846, Discriminator Loss: 1.6555671691894531
Epoch: 1084, Generator Loss: 0.6465370059013367, Discriminator Loss: 1.6436645984649658
Epoch: 1085, Generator Loss: 0.645898163318634, Discriminator Loss: 1.6792100667953491
Epoch: 1086, Generator Loss: 0.6509347558021545, Discriminator Loss: 1.6682502031326294
Epoch: 1087, Generator Loss: 0.6523917317390442, Discriminator Loss: 1.6558376550674438
Epoch: 1088, Generator Loss: 0.6546353101730347, Discriminator Loss: 1.6906274557113647
Epoch: 1089, Generator Loss: 0.6522423624992371, Discriminator Loss: 1.6540333032608032
Epoch: 1090, Generator Loss: 0.6391345858573914, Discriminator Loss: 1.6799956560134888
Epoch: 1091, Generator Loss: 0.6562709212303162, Discriminator Loss: 1.682117223739624
Epoch: 1092, Generator Loss: 0.6481834053993225, Discriminator Loss: 1.6360406875610352
Epoch: 1093, Generator Loss: 0.642146646976471, Discriminator Loss: 1.6503450870513916
Epoch: 1094, Generator Loss: 0.647358775138855, Discriminator Loss: 1.6672338247299194
Epoch: 1095, Generator Loss: 0.6507096290588379, Discriminator Loss: 1.6704330444335938
Epoch: 1096, Generator Loss: 0.6444815397262573, Discriminator Loss: 1.6645309925079346
Epoch: 1097, Generator Loss: 0.6414772868156433, Discriminator Loss: 1.6722300052642822
Epoch: 1098, Generator Loss: 0.6435736417770386, Discriminator Loss: 1.6665223836898804
Epoch: 1099, Generator Loss: 0.6440314650535583, Discriminator Loss: 1.6776268482208252
Epoch: 1100, Generator Loss: 0.6401309370994568, Discriminator Loss: 1.6745531558990479
Epoch: 1101, Generator Loss: 0.6374984383583069, Discriminator Loss: 1.6788520812988281
Epoch: 1102, Generator Loss: 0.6467112898826599, Discriminator Loss: 1.6662005186080933
Epoch: 1103, Generator Loss: 0.6332675814628601, Discriminator Loss: 1.7037584781646729
Epoch: 1104, Generator Loss: 0.6375776529312134, Discriminator Loss: 1.6291592121124268
Epoch: 1105, Generator Loss: 0.6426326036453247, Discriminator Loss: 1.6530648469924927
Epoch: 1106, Generator Loss: 0.6453632712364197, Discriminator Loss: 1.6917628049850464
Epoch: 1107, Generator Loss: 0.6443458795547485, Discriminator Loss: 1.6347973346710205
Epoch: 1108, Generator Loss: 0.6465269923210144, Discriminator Loss: 1.650148630142212
Epoch: 1109, Generator Loss: 0.6454489827156067, Discriminator Loss: 1.6628717184066772
Epoch: 1110, Generator Loss: 0.6387604475021362, Discriminator Loss: 1.6956311464309692
Epoch: 1111, Generator Loss: 0.6410619020462036, Discriminator Loss: 1.6733248233795166
Epoch: 1112, Generator Loss: 0.6399866342544556, Discriminator Loss: 1.669708490371704
Epoch: 1113, Generator Loss: 0.6381648182868958, Discriminator Loss: 1.6743978261947632
Epoch: 1114, Generator Loss: 0.6439035534858704, Discriminator Loss: 1.6506794691085815
Epoch: 1115, Generator Loss: 0.6479878425598145, Discriminator Loss: 1.6630688905715942
Epoch: 1116, Generator Loss: 0.6391757130622864, Discriminator Loss: 1.6430206298828125
Epoch: 1117, Generator Loss: 0.6374538540840149, Discriminator Loss: 1.6823607683181763
Epoch: 1118, Generator Loss: 0.6510518789291382, Discriminator Loss: 1.6814641952514648
Epoch: 1119, Generator Loss: 0.6490689516067505, Discriminator Loss: 1.7116843461990356
Epoch: 1120, Generator Loss: 0.6573408842086792, Discriminator Loss: 1.6356258392333984
Epoch: 1121, Generator Loss: 0.6525334119796753, Discriminator Loss: 1.6447101831436157
Epoch: 1122, Generator Loss: 0.654327929019928, Discriminator Loss: 1.67890202999115
Epoch: 1123, Generator Loss: 0.6561023592948914, Discriminator Loss: 1.6463215351104736
Epoch: 1124, Generator Loss: 0.6558276414871216, Discriminator Loss: 1.6595121622085571
Epoch: 1125, Generator Loss: 0.6517801284790039, Discriminator Loss: 1.6866587400436401
Epoch: 1126, Generator Loss: 0.6523953676223755, Discriminator Loss: 1.7253564596176147
Epoch: 1127, Generator Loss: 0.6556571125984192, Discriminator Loss: 1.6603329181671143
Epoch: 1128, Generator Loss: 0.6580336689949036, Discriminator Loss: 1.6477347612380981
Epoch: 1129, Generator Loss: 0.6492061614990234, Discriminator Loss: 1.698004126548767
Epoch: 1130, Generator Loss: 0.6553652882575989, Discriminator Loss: 1.6364322900772095
Epoch: 1131, Generator Loss: 0.655481219291687, Discriminator Loss: 1.6653510332107544
Epoch: 1132, Generator Loss: 0.6522723436355591, Discriminator Loss: 1.6799472570419312
Epoch: 1133, Generator Loss: 0.6514487862586975, Discriminator Loss: 1.672151803970337
Epoch: 1134, Generator Loss: 0.6415582895278931, Discriminator Loss: 1.6848933696746826
Epoch: 1135, Generator Loss: 0.6509315371513367, Discriminator Loss: 1.667061686515808
Epoch: 1136, Generator Loss: 0.6556640267372131, Discriminator Loss: 1.6776809692382812
Epoch: 1137, Generator Loss: 0.6495510935783386, Discriminator Loss: 1.6474812030792236
Epoch: 1138, Generator Loss: 0.6538923978805542, Discriminator Loss: 1.7020193338394165
Epoch: 1139, Generator Loss: 0.6513382792472839, Discriminator Loss: 1.6687861680984497
Epoch: 1140, Generator Loss: 0.6505554914474487, Discriminator Loss: 1.673471212387085
Epoch: 1141, Generator Loss: 0.6535704731941223, Discriminator Loss: 1.6769574880599976
Epoch: 1142, Generator Loss: 0.6571369171142578, Discriminator Loss: 1.6974124908447266
Epoch: 1143, Generator Loss: 0.6521667838096619, Discriminator Loss: 1.6580873727798462
Epoch: 1144, Generator Loss: 0.6569467186927795, Discriminator Loss: 1.7094935178756714
Epoch: 1145, Generator Loss: 0.6512962579727173, Discriminator Loss: 1.655438780784607
Epoch: 1146, Generator Loss: 0.6493626832962036, Discriminator Loss: 1.6713361740112305
Epoch: 1147, Generator Loss: 0.6452003121376038, Discriminator Loss: 1.6291719675064087
Epoch: 1148, Generator Loss: 0.6376761198043823, Discriminator Loss: 1.6745296716690063
Epoch: 1149, Generator Loss: 0.6553465127944946, Discriminator Loss: 1.6107842922210693
Epoch: 1150, Generator Loss: 0.6477042436599731, Discriminator Loss: 1.635528326034546
Epoch: 1151, Generator Loss: 0.6392406821250916, Discriminator Loss: 1.7099491357803345
Epoch: 1152, Generator Loss: 0.6380015015602112, Discriminator Loss: 1.6370304822921753
Epoch: 1153, Generator Loss: 0.6406666040420532, Discriminator Loss: 1.6190201044082642
Epoch: 1154, Generator Loss: 0.6413539052009583, Discriminator Loss: 1.6611436605453491
Epoch: 1155, Generator Loss: 0.6367269158363342, Discriminator Loss: 1.6914044618606567
Epoch: 1156, Generator Loss: 0.6393346786499023, Discriminator Loss: 1.6802583932876587
Epoch: 1157, Generator Loss: 0.6393870711326599, Discriminator Loss: 1.67672598361969
Epoch: 1158, Generator Loss: 0.6383585333824158, Discriminator Loss: 1.674614667892456
Epoch: 1159, Generator Loss: 0.644036591053009, Discriminator Loss: 1.6571682691574097
Epoch: 1160, Generator Loss: 0.6431364417076111, Discriminator Loss: 1.657144546508789
Epoch: 1161, Generator Loss: 0.6448215842247009, Discriminator Loss: 1.6429437398910522
Epoch: 1162, Generator Loss: 0.6419533491134644, Discriminator Loss: 1.6056791543960571
Epoch: 1163, Generator Loss: 0.6416446566581726, Discriminator Loss: 1.5818977355957031
Epoch: 1164, Generator Loss: 0.6548519134521484, Discriminator Loss: 1.5999356508255005
Epoch: 1165, Generator Loss: 0.6420032382011414, Discriminator Loss: 1.6220964193344116
Epoch: 1166, Generator Loss: 0.6505847573280334, Discriminator Loss: 1.5674089193344116
Epoch: 1167, Generator Loss: 0.6427062153816223, Discriminator Loss: 1.5828253030776978
Epoch: 1168, Generator Loss: 0.6407782435417175, Discriminator Loss: 1.589030146598816
Epoch: 1169, Generator Loss: 0.6410986185073853, Discriminator Loss: 1.5948489904403687
Epoch: 1170, Generator Loss: 0.6456122994422913, Discriminator Loss: 1.5888527631759644
Epoch: 1171, Generator Loss: 0.6491497159004211, Discriminator Loss: 1.6090869903564453
Epoch: 1172, Generator Loss: 0.6435423493385315, Discriminator Loss: 1.6331459283828735
Epoch: 1173, Generator Loss: 0.6303786635398865, Discriminator Loss: 1.6258840560913086
Epoch: 1174, Generator Loss: 0.582170307636261, Discriminator Loss: 1.6716331243515015
Epoch: 1175, Generator Loss: 0.5800583958625793, Discriminator Loss: 1.7005958557128906
Epoch: 1176, Generator Loss: 0.5804582238197327, Discriminator Loss: 1.6898419857025146
Epoch: 1177, Generator Loss: 0.5912343263626099, Discriminator Loss: 1.7561770677566528
Epoch: 1178, Generator Loss: 0.6226202845573425, Discriminator Loss: 1.6695175170898438
Epoch: 1179, Generator Loss: 0.6348945498466492, Discriminator Loss: 1.7125791311264038
Epoch: 1180, Generator Loss: 0.6374363303184509, Discriminator Loss: 1.6726343631744385
Epoch: 1181, Generator Loss: 0.6320720314979553, Discriminator Loss: 1.7130171060562134
Epoch: 1182, Generator Loss: 0.64141446352005, Discriminator Loss: 1.7185968160629272
Epoch: 1183, Generator Loss: 0.6411744356155396, Discriminator Loss: 1.7126057147979736
Epoch: 1184, Generator Loss: 0.640991747379303, Discriminator Loss: 1.664078712463379
Epoch: 1185, Generator Loss: 0.6468594670295715, Discriminator Loss: 1.6371678113937378
Epoch: 1186, Generator Loss: 0.6457719206809998, Discriminator Loss: 1.6901301145553589
Epoch: 1187, Generator Loss: 0.6518090963363647, Discriminator Loss: 1.6737916469573975
Epoch: 1188, Generator Loss: 0.644311249256134, Discriminator Loss: 1.6805058717727661
Epoch: 1189, Generator Loss: 0.6551898121833801, Discriminator Loss: 1.6754744052886963
Epoch: 1190, Generator Loss: 0.6498600244522095, Discriminator Loss: 1.7017217874526978
Epoch: 1191, Generator Loss: 0.6587594151496887, Discriminator Loss: 1.663667917251587
Epoch: 1192, Generator Loss: 0.6525000333786011, Discriminator Loss: 1.664498209953308
Epoch: 1193, Generator Loss: 0.6517258882522583, Discriminator Loss: 1.7034164667129517
Epoch: 1194, Generator Loss: 0.6524985432624817, Discriminator Loss: 1.6786613464355469
Epoch: 1195, Generator Loss: 0.6542423963546753, Discriminator Loss: 1.6772912740707397
Epoch: 1196, Generator Loss: 0.6533303260803223, Discriminator Loss: 1.6728452444076538
Epoch: 1197, Generator Loss: 0.663564145565033, Discriminator Loss: 1.6453417539596558
Epoch: 1198, Generator Loss: 0.6367782354354858, Discriminator Loss: 1.6648823022842407
Epoch: 1199, Generator Loss: 0.6455888152122498, Discriminator Loss: 1.6442896127700806
Epoch: 1200, Generator Loss: 0.6387848258018494, Discriminator Loss: 1.7004314661026
Epoch: 1201, Generator Loss: 0.6335766911506653, Discriminator Loss: 1.6814690828323364
Epoch: 1202, Generator Loss: 0.6341056227684021, Discriminator Loss: 1.6849850416183472
Epoch: 1203, Generator Loss: 0.6436320543289185, Discriminator Loss: 1.6574395895004272
Epoch: 1204, Generator Loss: 0.6505817770957947, Discriminator Loss: 1.6722317934036255
Epoch: 1205, Generator Loss: 0.6442270874977112, Discriminator Loss: 1.6693357229232788
Epoch: 1206, Generator Loss: 0.6483955383300781, Discriminator Loss: 1.6528500318527222
Epoch: 1207, Generator Loss: 0.6564576029777527, Discriminator Loss: 1.6724779605865479
Epoch: 1208, Generator Loss: 0.6538344621658325, Discriminator Loss: 1.6752369403839111
Epoch: 1209, Generator Loss: 0.6575826406478882, Discriminator Loss: 1.668224573135376
Epoch: 1210, Generator Loss: 0.6576094627380371, Discriminator Loss: 1.6922645568847656
Epoch: 1211, Generator Loss: 0.655213475227356, Discriminator Loss: 1.6593043804168701
Epoch: 1212, Generator Loss: 0.6517152190208435, Discriminator Loss: 1.656184196472168
Epoch: 1213, Generator Loss: 0.6567950248718262, Discriminator Loss: 1.6412822008132935
Epoch: 1214, Generator Loss: 0.6616367101669312, Discriminator Loss: 1.6649812459945679
Epoch: 1215, Generator Loss: 0.6530846357345581, Discriminator Loss: 1.663331151008606
Epoch: 1216, Generator Loss: 0.6568703651428223, Discriminator Loss: 1.6737139225006104
Epoch: 1217, Generator Loss: 0.6583381295204163, Discriminator Loss: 1.6306931972503662
Epoch: 1218, Generator Loss: 0.6529985070228577, Discriminator Loss: 1.661277174949646
Epoch: 1219, Generator Loss: 0.6612951159477234, Discriminator Loss: 1.681122064590454
Epoch: 1220, Generator Loss: 0.6581515073776245, Discriminator Loss: 1.6311800479888916
Epoch: 1221, Generator Loss: 0.6594228744506836, Discriminator Loss: 1.6595962047576904
Epoch: 1222, Generator Loss: 0.6568355560302734, Discriminator Loss: 1.6690502166748047
Epoch: 1223, Generator Loss: 0.6604880094528198, Discriminator Loss: 1.6635663509368896
Epoch: 1224, Generator Loss: 0.6593460440635681, Discriminator Loss: 1.651484489440918
Epoch: 1225, Generator Loss: 0.6665200591087341, Discriminator Loss: 1.6712507009506226
Epoch: 1226, Generator Loss: 0.6578783392906189, Discriminator Loss: 1.6542160511016846
Epoch: 1227, Generator Loss: 0.6584072113037109, Discriminator Loss: 1.6402149200439453
Epoch: 1228, Generator Loss: 0.662287175655365, Discriminator Loss: 1.6472290754318237
Epoch: 1229, Generator Loss: 0.655992865562439, Discriminator Loss: 1.7074915170669556
Epoch: 1230, Generator Loss: 0.6606817841529846, Discriminator Loss: 1.6523689031600952
Epoch: 1231, Generator Loss: 0.664672315120697, Discriminator Loss: 1.6423028707504272
Epoch: 1232, Generator Loss: 0.661228597164154, Discriminator Loss: 1.6342144012451172
Epoch: 1233, Generator Loss: 0.6663321256637573, Discriminator Loss: 1.6511577367782593
Epoch: 1234, Generator Loss: 0.6553473472595215, Discriminator Loss: 1.6815918684005737
Epoch: 1235, Generator Loss: 0.6639971137046814, Discriminator Loss: 1.645577073097229
Epoch: 1236, Generator Loss: 0.6659244894981384, Discriminator Loss: 1.6479674577713013
Epoch: 1237, Generator Loss: 0.6597194075584412, Discriminator Loss: 1.670525312423706
Epoch: 1238, Generator Loss: 0.6630571484565735, Discriminator Loss: 1.642651915550232
Epoch: 1239, Generator Loss: 0.6595485806465149, Discriminator Loss: 1.665457010269165
Epoch: 1240, Generator Loss: 0.6574819684028625, Discriminator Loss: 1.6333731412887573
Epoch: 1241, Generator Loss: 0.6577057242393494, Discriminator Loss: 1.6614841222763062
Epoch: 1242, Generator Loss: 0.6495617628097534, Discriminator Loss: 1.6683508157730103
Epoch: 1243, Generator Loss: 0.6570148468017578, Discriminator Loss: 1.6318538188934326
Epoch: 1244, Generator Loss: 0.6516122221946716, Discriminator Loss: 1.6126329898834229
Epoch: 1245, Generator Loss: 0.6460831761360168, Discriminator Loss: 1.6535875797271729
Epoch: 1246, Generator Loss: 0.6564846038818359, Discriminator Loss: 1.6633306741714478
Epoch: 1247, Generator Loss: 0.6472182869911194, Discriminator Loss: 1.6884450912475586
Epoch: 1248, Generator Loss: 0.6570234298706055, Discriminator Loss: 1.646406888961792
Epoch: 1249, Generator Loss: 0.6463174819946289, Discriminator Loss: 1.6338547468185425
Epoch: 1250, Generator Loss: 0.6512469053268433, Discriminator Loss: 1.6754158735275269
Epoch: 1251, Generator Loss: 0.6571413278579712, Discriminator Loss: 1.6420619487762451
Epoch: 1252, Generator Loss: 0.649053156375885, Discriminator Loss: 1.6814134120941162
Epoch: 1253, Generator Loss: 0.6495621800422668, Discriminator Loss: 1.6456615924835205
Epoch: 1254, Generator Loss: 0.6529827117919922, Discriminator Loss: 1.6723383665084839
Epoch: 1255, Generator Loss: 0.6461490392684937, Discriminator Loss: 1.6615219116210938
Epoch: 1256, Generator Loss: 0.648934543132782, Discriminator Loss: 1.6550557613372803
Epoch: 1257, Generator Loss: 0.6488696336746216, Discriminator Loss: 1.6581262350082397
Epoch: 1258, Generator Loss: 0.6476975679397583, Discriminator Loss: 1.6355394124984741
Epoch: 1259, Generator Loss: 0.6555215716362, Discriminator Loss: 1.6714153289794922
Epoch: 1260, Generator Loss: 0.6525635123252869, Discriminator Loss: 1.6109732389450073
Epoch: 1261, Generator Loss: 0.6444579362869263, Discriminator Loss: 1.6559669971466064
Epoch: 1262, Generator Loss: 0.643211841583252, Discriminator Loss: 1.6661745309829712
Epoch: 1263, Generator Loss: 0.6604682207107544, Discriminator Loss: 1.647653341293335
Epoch: 1264, Generator Loss: 0.6488139033317566, Discriminator Loss: 1.6573295593261719
Epoch: 1265, Generator Loss: 0.6554189920425415, Discriminator Loss: 1.6520349979400635
Epoch: 1266, Generator Loss: 0.6481828689575195, Discriminator Loss: 1.6995155811309814
Epoch: 1267, Generator Loss: 0.6568138003349304, Discriminator Loss: 1.6576728820800781
Epoch: 1268, Generator Loss: 0.6551809310913086, Discriminator Loss: 1.693595290184021
Epoch: 1269, Generator Loss: 0.6609169244766235, Discriminator Loss: 1.6618362665176392
Epoch: 1270, Generator Loss: 0.6569326519966125, Discriminator Loss: 1.6806684732437134
Epoch: 1271, Generator Loss: 0.6550309062004089, Discriminator Loss: 1.686145544052124
Epoch: 1272, Generator Loss: 0.6517230868339539, Discriminator Loss: 1.6503115892410278
Epoch: 1273, Generator Loss: 0.6569312214851379, Discriminator Loss: 1.62273108959198
Epoch: 1274, Generator Loss: 0.6563076972961426, Discriminator Loss: 1.6275283098220825
Epoch: 1275, Generator Loss: 0.6507123112678528, Discriminator Loss: 1.6577852964401245
Epoch: 1276, Generator Loss: 0.6625983119010925, Discriminator Loss: 1.6506175994873047
Epoch: 1277, Generator Loss: 0.6604320406913757, Discriminator Loss: 1.651310682296753
Epoch: 1278, Generator Loss: 0.6537402868270874, Discriminator Loss: 1.6805360317230225
Epoch: 1279, Generator Loss: 0.656126856803894, Discriminator Loss: 1.6517858505249023
Epoch: 1280, Generator Loss: 0.6634419560432434, Discriminator Loss: 1.6701668500900269
Epoch: 1281, Generator Loss: 0.6572081446647644, Discriminator Loss: 1.6593761444091797
Epoch: 1282, Generator Loss: 0.6618607640266418, Discriminator Loss: 1.645729899406433
Epoch: 1283, Generator Loss: 0.66155606508255, Discriminator Loss: 1.6788848638534546
Epoch: 1284, Generator Loss: 0.6526760458946228, Discriminator Loss: 1.6467612981796265
Epoch: 1285, Generator Loss: 0.6514220833778381, Discriminator Loss: 1.667316198348999
Epoch: 1286, Generator Loss: 0.6488319635391235, Discriminator Loss: 1.6492310762405396
Epoch: 1287, Generator Loss: 0.6490567326545715, Discriminator Loss: 1.6519107818603516
Epoch: 1288, Generator Loss: 0.6603417992591858, Discriminator Loss: 1.6611175537109375
Epoch: 1289, Generator Loss: 0.659924328327179, Discriminator Loss: 1.669698715209961
Epoch: 1290, Generator Loss: 0.6579888463020325, Discriminator Loss: 1.6461057662963867
Epoch: 1291, Generator Loss: 0.6631989479064941, Discriminator Loss: 1.619891881942749
Epoch: 1292, Generator Loss: 0.6549245119094849, Discriminator Loss: 1.6333200931549072
Epoch: 1293, Generator Loss: 0.6525278687477112, Discriminator Loss: 1.658944845199585
Epoch: 1294, Generator Loss: 0.6480426788330078, Discriminator Loss: 1.6828699111938477
Epoch: 1295, Generator Loss: 0.6523095369338989, Discriminator Loss: 1.6259708404541016
Epoch: 1296, Generator Loss: 0.6517998576164246, Discriminator Loss: 1.6356550455093384
Epoch: 1297, Generator Loss: 0.6494055986404419, Discriminator Loss: 1.639508605003357
Epoch: 1298, Generator Loss: 0.6532209515571594, Discriminator Loss: 1.622077226638794
Epoch: 1299, Generator Loss: 0.6499229669570923, Discriminator Loss: 1.6078447103500366
Epoch: 1300, Generator Loss: 0.6418628096580505, Discriminator Loss: 1.5690386295318604
Epoch: 1301, Generator Loss: 0.6426067352294922, Discriminator Loss: 1.6144112348556519
Epoch: 1302, Generator Loss: 0.6285651326179504, Discriminator Loss: 1.5892590284347534
Epoch: 1303, Generator Loss: 0.6249789595603943, Discriminator Loss: 1.6109968423843384
Epoch: 1304, Generator Loss: 0.5947089791297913, Discriminator Loss: 1.6679933071136475
Epoch: 1305, Generator Loss: 0.5850836634635925, Discriminator Loss: 1.6738673448562622
Epoch: 1306, Generator Loss: 0.600967288017273, Discriminator Loss: 1.6951391696929932
Epoch: 1307, Generator Loss: 0.6168094277381897, Discriminator Loss: 1.645403265953064
Epoch: 1308, Generator Loss: 0.6246881484985352, Discriminator Loss: 1.689113974571228
Epoch: 1309, Generator Loss: 0.6312369704246521, Discriminator Loss: 1.6348329782485962
Epoch: 1310, Generator Loss: 0.6363827586174011, Discriminator Loss: 1.6716439723968506
Epoch: 1311, Generator Loss: 0.6330011487007141, Discriminator Loss: 1.6741737127304077
Epoch: 1312, Generator Loss: 0.6424239277839661, Discriminator Loss: 1.6702117919921875
Epoch: 1313, Generator Loss: 0.6409842371940613, Discriminator Loss: 1.6909923553466797
Epoch: 1314, Generator Loss: 0.6508497595787048, Discriminator Loss: 1.6407569646835327
Epoch: 1315, Generator Loss: 0.6398538947105408, Discriminator Loss: 1.664854884147644
Epoch: 1316, Generator Loss: 0.6451045870780945, Discriminator Loss: 1.6511999368667603
Epoch: 1317, Generator Loss: 0.6491714119911194, Discriminator Loss: 1.6139259338378906
Epoch: 1318, Generator Loss: 0.6404816508293152, Discriminator Loss: 1.6457159519195557
Epoch: 1319, Generator Loss: 0.6459512114524841, Discriminator Loss: 1.6475096940994263
Epoch: 1320, Generator Loss: 0.6477831602096558, Discriminator Loss: 1.6583430767059326
Epoch: 1321, Generator Loss: 0.6496142148971558, Discriminator Loss: 1.6780344247817993
Epoch: 1322, Generator Loss: 0.6514231562614441, Discriminator Loss: 1.668905258178711
Epoch: 1323, Generator Loss: 0.6539020538330078, Discriminator Loss: 1.6433804035186768
Epoch: 1324, Generator Loss: 0.6497960686683655, Discriminator Loss: 1.6795635223388672
Epoch: 1325, Generator Loss: 0.6558197736740112, Discriminator Loss: 1.6470773220062256
Epoch: 1326, Generator Loss: 0.6530619859695435, Discriminator Loss: 1.6548281908035278
Epoch: 1327, Generator Loss: 0.6527928113937378, Discriminator Loss: 1.6337203979492188
Epoch: 1328, Generator Loss: 0.650340735912323, Discriminator Loss: 1.6813390254974365
Epoch: 1329, Generator Loss: 0.6589441299438477, Discriminator Loss: 1.6539212465286255
Epoch: 1330, Generator Loss: 0.6559054255485535, Discriminator Loss: 1.6442512273788452
Epoch: 1331, Generator Loss: 0.6485267281532288, Discriminator Loss: 1.6683921813964844
Epoch: 1332, Generator Loss: 0.6525558233261108, Discriminator Loss: 1.6730924844741821
Epoch: 1333, Generator Loss: 0.6586003303527832, Discriminator Loss: 1.649617314338684
Epoch: 1334, Generator Loss: 0.6543957591056824, Discriminator Loss: 1.6667085886001587
Epoch: 1335, Generator Loss: 0.6522388458251953, Discriminator Loss: 1.674619436264038
Epoch: 1336, Generator Loss: 0.6568306088447571, Discriminator Loss: 1.6267865896224976
Epoch: 1337, Generator Loss: 0.65485680103302, Discriminator Loss: 1.684126853942871
Epoch: 1338, Generator Loss: 0.6575249433517456, Discriminator Loss: 1.5911023616790771
Epoch: 1339, Generator Loss: 0.6550391316413879, Discriminator Loss: 1.6823457479476929
Epoch: 1340, Generator Loss: 0.6554521322250366, Discriminator Loss: 1.6761424541473389
Epoch: 1341, Generator Loss: 0.6577993631362915, Discriminator Loss: 1.6640088558197021
Epoch: 1342, Generator Loss: 0.6514478921890259, Discriminator Loss: 1.6120929718017578
Epoch: 1343, Generator Loss: 0.6558846235275269, Discriminator Loss: 1.6789289712905884
Epoch: 1344, Generator Loss: 0.660625159740448, Discriminator Loss: 1.6808301210403442
Epoch: 1345, Generator Loss: 0.6547390818595886, Discriminator Loss: 1.6274152994155884
Epoch: 1346, Generator Loss: 0.6576013565063477, Discriminator Loss: 1.6730738878250122
Epoch: 1347, Generator Loss: 0.6549287438392639, Discriminator Loss: 1.6392295360565186
Epoch: 1348, Generator Loss: 0.6562065482139587, Discriminator Loss: 1.6636428833007812
Epoch: 1349, Generator Loss: 0.6492525935173035, Discriminator Loss: 1.6764018535614014
Epoch: 1350, Generator Loss: 0.6484156847000122, Discriminator Loss: 1.6637922525405884
Epoch: 1351, Generator Loss: 0.6601997017860413, Discriminator Loss: 1.6507748365402222
Epoch: 1352, Generator Loss: 0.6525670289993286, Discriminator Loss: 1.6379822492599487
Epoch: 1353, Generator Loss: 0.6520934700965881, Discriminator Loss: 1.6746572256088257
Epoch: 1354, Generator Loss: 0.6608067750930786, Discriminator Loss: 1.6695268154144287
Epoch: 1355, Generator Loss: 0.6557994484901428, Discriminator Loss: 1.68948233127594
Epoch: 1356, Generator Loss: 0.6560629606246948, Discriminator Loss: 1.7012053728103638
Epoch: 1357, Generator Loss: 0.6504091620445251, Discriminator Loss: 1.6159236431121826
Epoch: 1358, Generator Loss: 0.653484046459198, Discriminator Loss: 1.6490404605865479
Epoch: 1359, Generator Loss: 0.6490179896354675, Discriminator Loss: 1.667814016342163
Epoch: 1360, Generator Loss: 0.6563626527786255, Discriminator Loss: 1.6430461406707764
Epoch: 1361, Generator Loss: 0.6595810055732727, Discriminator Loss: 1.6645545959472656
Epoch: 1362, Generator Loss: 0.6518763303756714, Discriminator Loss: 1.638771891593933
Epoch: 1363, Generator Loss: 0.6547175049781799, Discriminator Loss: 1.6517945528030396
Epoch: 1364, Generator Loss: 0.6567093729972839, Discriminator Loss: 1.6385873556137085
Epoch: 1365, Generator Loss: 0.6515759825706482, Discriminator Loss: 1.6978590488433838
Epoch: 1366, Generator Loss: 0.6544405817985535, Discriminator Loss: 1.673156976699829
Epoch: 1367, Generator Loss: 0.6521363854408264, Discriminator Loss: 1.6472216844558716
Epoch: 1368, Generator Loss: 0.6503686904907227, Discriminator Loss: 1.6593793630599976
Epoch: 1369, Generator Loss: 0.6518344283103943, Discriminator Loss: 1.6513633728027344
Epoch: 1370, Generator Loss: 0.6489633917808533, Discriminator Loss: 1.6366578340530396
Epoch: 1371, Generator Loss: 0.6501855850219727, Discriminator Loss: 1.6459428071975708
Epoch: 1372, Generator Loss: 0.6447911858558655, Discriminator Loss: 1.7110360860824585
Epoch: 1373, Generator Loss: 0.6529685854911804, Discriminator Loss: 1.6396534442901611
Epoch: 1374, Generator Loss: 0.6560923457145691, Discriminator Loss: 1.6347157955169678
Epoch: 1375, Generator Loss: 0.6490367650985718, Discriminator Loss: 1.658794641494751
Epoch: 1376, Generator Loss: 0.6506088376045227, Discriminator Loss: 1.6556072235107422
Epoch: 1377, Generator Loss: 0.6489596962928772, Discriminator Loss: 1.661228895187378
Epoch: 1378, Generator Loss: 0.6527214646339417, Discriminator Loss: 1.667049765586853
Epoch: 1379, Generator Loss: 0.6529183983802795, Discriminator Loss: 1.685502052307129
Epoch: 1380, Generator Loss: 0.6568640470504761, Discriminator Loss: 1.6518309116363525
Epoch: 1381, Generator Loss: 0.6518393754959106, Discriminator Loss: 1.6204591989517212
Epoch: 1382, Generator Loss: 0.6591614484786987, Discriminator Loss: 1.6384990215301514
Epoch: 1383, Generator Loss: 0.6598365902900696, Discriminator Loss: 1.680704116821289
Epoch: 1384, Generator Loss: 0.6496005654335022, Discriminator Loss: 1.6709786653518677
Epoch: 1385, Generator Loss: 0.6513761878013611, Discriminator Loss: 1.6586331129074097
Epoch: 1386, Generator Loss: 0.6581485867500305, Discriminator Loss: 1.6656635999679565
Epoch: 1387, Generator Loss: 0.6555308103561401, Discriminator Loss: 1.6737765073776245
Epoch: 1388, Generator Loss: 0.6552044153213501, Discriminator Loss: 1.6267225742340088
Epoch: 1389, Generator Loss: 0.6576852202415466, Discriminator Loss: 1.6396116018295288
Epoch: 1390, Generator Loss: 0.6568583846092224, Discriminator Loss: 1.7009421586990356
Epoch: 1391, Generator Loss: 0.6564041972160339, Discriminator Loss: 1.6158987283706665
Epoch: 1392, Generator Loss: 0.6506990194320679, Discriminator Loss: 1.6517212390899658
Epoch: 1393, Generator Loss: 0.6549952626228333, Discriminator Loss: 1.650553584098816
Epoch: 1394, Generator Loss: 0.659880518913269, Discriminator Loss: 1.6630779504776
Epoch: 1395, Generator Loss: 0.6580024361610413, Discriminator Loss: 1.6677281856536865
Epoch: 1396, Generator Loss: 0.6548729538917542, Discriminator Loss: 1.645447015762329
Epoch: 1397, Generator Loss: 0.6544448137283325, Discriminator Loss: 1.6757452487945557
Epoch: 1398, Generator Loss: 0.6600199937820435, Discriminator Loss: 1.635045051574707
Epoch: 1399, Generator Loss: 0.6544896364212036, Discriminator Loss: 1.6702972650527954
Epoch: 1400, Generator Loss: 0.6603055596351624, Discriminator Loss: 1.6506913900375366
Epoch: 1401, Generator Loss: 0.6553611159324646, Discriminator Loss: 1.6341562271118164
Epoch: 1402, Generator Loss: 0.6566607356071472, Discriminator Loss: 1.6581367254257202
Epoch: 1403, Generator Loss: 0.6620674729347229, Discriminator Loss: 1.6969013214111328
Epoch: 1404, Generator Loss: 0.6609893441200256, Discriminator Loss: 1.6285349130630493
Epoch: 1405, Generator Loss: 0.6584230661392212, Discriminator Loss: 1.6492846012115479
Epoch: 1406, Generator Loss: 0.656855583190918, Discriminator Loss: 1.6808531284332275
Epoch: 1407, Generator Loss: 0.6590844392776489, Discriminator Loss: 1.6096456050872803
Epoch: 1408, Generator Loss: 0.6562138199806213, Discriminator Loss: 1.6509798765182495
Epoch: 1409, Generator Loss: 0.6604600548744202, Discriminator Loss: 1.6339671611785889
Epoch: 1410, Generator Loss: 0.6618840098381042, Discriminator Loss: 1.648142695426941
Epoch: 1411, Generator Loss: 0.6653160452842712, Discriminator Loss: 1.666109323501587
Epoch: 1412, Generator Loss: 0.6562235951423645, Discriminator Loss: 1.6907094717025757
Epoch: 1413, Generator Loss: 0.6614949107170105, Discriminator Loss: 1.639661431312561
Epoch: 1414, Generator Loss: 0.6565523743629456, Discriminator Loss: 1.6257809400558472
Epoch: 1415, Generator Loss: 0.6568604111671448, Discriminator Loss: 1.6851071119308472
Epoch: 1416, Generator Loss: 0.6569623947143555, Discriminator Loss: 1.6646875143051147
Epoch: 1417, Generator Loss: 0.6543565988540649, Discriminator Loss: 1.6506017446517944
Epoch: 1418, Generator Loss: 0.6563649773597717, Discriminator Loss: 1.6141937971115112
Epoch: 1419, Generator Loss: 0.6596975326538086, Discriminator Loss: 1.6386464834213257
Epoch: 1420, Generator Loss: 0.6657269597053528, Discriminator Loss: 1.610144853591919
Epoch: 1421, Generator Loss: 0.6587496399879456, Discriminator Loss: 1.6303255558013916
Epoch: 1422, Generator Loss: 0.6635017395019531, Discriminator Loss: 1.6312607526779175
Epoch: 1423, Generator Loss: 0.6579347252845764, Discriminator Loss: 1.650673747062683
Epoch: 1424, Generator Loss: 0.6510380506515503, Discriminator Loss: 1.669442892074585
Epoch: 1425, Generator Loss: 0.655156135559082, Discriminator Loss: 1.6463392972946167
Epoch: 1426, Generator Loss: 0.6607983708381653, Discriminator Loss: 1.6719008684158325
Epoch: 1427, Generator Loss: 0.6585878133773804, Discriminator Loss: 1.6453914642333984
Epoch: 1428, Generator Loss: 0.6616678237915039, Discriminator Loss: 1.6267188787460327
Epoch: 1429, Generator Loss: 0.6540742516517639, Discriminator Loss: 1.6494191884994507
Epoch: 1430, Generator Loss: 0.6572757363319397, Discriminator Loss: 1.6896820068359375
Epoch: 1431, Generator Loss: 0.6591415405273438, Discriminator Loss: 1.6513326168060303
Epoch: 1432, Generator Loss: 0.6634434461593628, Discriminator Loss: 1.6597716808319092
Epoch: 1433, Generator Loss: 0.661585807800293, Discriminator Loss: 1.628661036491394
Epoch: 1434, Generator Loss: 0.6579685211181641, Discriminator Loss: 1.6663296222686768
Epoch: 1435, Generator Loss: 0.6586350202560425, Discriminator Loss: 1.6048848628997803
Epoch: 1436, Generator Loss: 0.6545435190200806, Discriminator Loss: 1.652557134628296
Epoch: 1437, Generator Loss: 0.6585139036178589, Discriminator Loss: 1.653820276260376
Epoch: 1438, Generator Loss: 0.6620820164680481, Discriminator Loss: 1.6498386859893799
Epoch: 1439, Generator Loss: 0.6592152714729309, Discriminator Loss: 1.6556146144866943
Epoch: 1440, Generator Loss: 0.6562591791152954, Discriminator Loss: 1.6591466665267944
Epoch: 1441, Generator Loss: 0.6631302237510681, Discriminator Loss: 1.6013959646224976
Epoch: 1442, Generator Loss: 0.6576060652732849, Discriminator Loss: 1.6491153240203857
Epoch: 1443, Generator Loss: 0.6621440052986145, Discriminator Loss: 1.64711594581604
Epoch: 1444, Generator Loss: 0.6596289873123169, Discriminator Loss: 1.6247752904891968
Epoch: 1445, Generator Loss: 0.6640765070915222, Discriminator Loss: 1.6575137376785278
Epoch: 1446, Generator Loss: 0.6527663469314575, Discriminator Loss: 1.66153883934021
Epoch: 1447, Generator Loss: 0.6572738289833069, Discriminator Loss: 1.6726351976394653
Epoch: 1448, Generator Loss: 0.6574092507362366, Discriminator Loss: 1.6645002365112305
Epoch: 1449, Generator Loss: 0.6582712531089783, Discriminator Loss: 1.6454588174819946
Epoch: 1450, Generator Loss: 0.6603453755378723, Discriminator Loss: 1.6618454456329346
Epoch: 1451, Generator Loss: 0.6586240530014038, Discriminator Loss: 1.6688897609710693
Epoch: 1452, Generator Loss: 0.6498944163322449, Discriminator Loss: 1.6512340307235718
Epoch: 1453, Generator Loss: 0.6559500098228455, Discriminator Loss: 1.6273186206817627
Epoch: 1454, Generator Loss: 0.6528449654579163, Discriminator Loss: 1.6383827924728394
Epoch: 1455, Generator Loss: 0.6519349217414856, Discriminator Loss: 1.6604292392730713
Epoch: 1456, Generator Loss: 0.6546324491500854, Discriminator Loss: 1.63625168800354
Epoch: 1457, Generator Loss: 0.6488752365112305, Discriminator Loss: 1.6884313821792603
Epoch: 1458, Generator Loss: 0.6507828831672668, Discriminator Loss: 1.6609452962875366
Epoch: 1459, Generator Loss: 0.6583625078201294, Discriminator Loss: 1.6385400295257568
Epoch: 1460, Generator Loss: 0.6511878371238708, Discriminator Loss: 1.613842248916626
Epoch: 1461, Generator Loss: 0.6481748223304749, Discriminator Loss: 1.61484694480896
Epoch: 1462, Generator Loss: 0.6499820351600647, Discriminator Loss: 1.644868016242981
Epoch: 1463, Generator Loss: 0.6462215185165405, Discriminator Loss: 1.6486090421676636
Epoch: 1464, Generator Loss: 0.6483065485954285, Discriminator Loss: 1.6479692459106445
Epoch: 1465, Generator Loss: 0.6490894556045532, Discriminator Loss: 1.6677674055099487
Epoch: 1466, Generator Loss: 0.6543803811073303, Discriminator Loss: 1.6583195924758911
Epoch: 1467, Generator Loss: 0.6574227213859558, Discriminator Loss: 1.6439917087554932
Epoch: 1468, Generator Loss: 0.6533251404762268, Discriminator Loss: 1.6354949474334717
Epoch: 1469, Generator Loss: 0.6468241810798645, Discriminator Loss: 1.6264654397964478
Epoch: 1470, Generator Loss: 0.6425334215164185, Discriminator Loss: 1.6567834615707397
Epoch: 1471, Generator Loss: 0.6468720436096191, Discriminator Loss: 1.6812056303024292
Epoch: 1472, Generator Loss: 0.6509811282157898, Discriminator Loss: 1.6513183116912842
Epoch: 1473, Generator Loss: 0.6564846038818359, Discriminator Loss: 1.6513375043869019
Epoch: 1474, Generator Loss: 0.6536237001419067, Discriminator Loss: 1.682418704032898
Epoch: 1475, Generator Loss: 0.6590045690536499, Discriminator Loss: 1.6584688425064087
Epoch: 1476, Generator Loss: 0.6613169312477112, Discriminator Loss: 1.6551769971847534
Epoch: 1477, Generator Loss: 0.6633310914039612, Discriminator Loss: 1.637795090675354
Epoch: 1478, Generator Loss: 0.6600578427314758, Discriminator Loss: 1.63441002368927
Epoch: 1479, Generator Loss: 0.6542024612426758, Discriminator Loss: 1.674103856086731
Epoch: 1480, Generator Loss: 0.6532198786735535, Discriminator Loss: 1.6344457864761353
Epoch: 1481, Generator Loss: 0.6563940644264221, Discriminator Loss: 1.7030116319656372
Epoch: 1482, Generator Loss: 0.6615994572639465, Discriminator Loss: 1.670164704322815
Epoch: 1483, Generator Loss: 0.6580435633659363, Discriminator Loss: 1.6221288442611694
Epoch: 1484, Generator Loss: 0.6597716212272644, Discriminator Loss: 1.6702147722244263
Epoch: 1485, Generator Loss: 0.6623496413230896, Discriminator Loss: 1.6070078611373901
Epoch: 1486, Generator Loss: 0.6602875590324402, Discriminator Loss: 1.6423860788345337
Epoch: 1487, Generator Loss: 0.6556998491287231, Discriminator Loss: 1.6367653608322144
Epoch: 1488, Generator Loss: 0.6632151007652283, Discriminator Loss: 1.6285607814788818
Epoch: 1489, Generator Loss: 0.6618161797523499, Discriminator Loss: 1.6322563886642456
Epoch: 1490, Generator Loss: 0.6637691855430603, Discriminator Loss: 1.6080896854400635
Epoch: 1491, Generator Loss: 0.6605356335639954, Discriminator Loss: 1.6315311193466187
Epoch: 1492, Generator Loss: 0.6557715535163879, Discriminator Loss: 1.6262400150299072
Epoch: 1493, Generator Loss: 0.6591891646385193, Discriminator Loss: 1.6622053384780884
Epoch: 1494, Generator Loss: 0.6513634920120239, Discriminator Loss: 1.6412559747695923
Epoch: 1495, Generator Loss: 0.6554900407791138, Discriminator Loss: 1.6564377546310425
Epoch: 1496, Generator Loss: 0.6457972526550293, Discriminator Loss: 1.6123942136764526
Epoch: 1497, Generator Loss: 0.6547765731811523, Discriminator Loss: 1.6115500926971436
Epoch: 1498, Generator Loss: 0.6446139216423035, Discriminator Loss: 1.6456836462020874
Epoch: 1499, Generator Loss: 0.6432395577430725, Discriminator Loss: 1.6217248439788818
Epoch: 1500, Generator Loss: 0.6430150270462036, Discriminator Loss: 1.6513696908950806
Epoch: 1501, Generator Loss: 0.6425524353981018, Discriminator Loss: 1.6144014596939087
Epoch: 1502, Generator Loss: 0.639177680015564, Discriminator Loss: 1.5908173322677612
Epoch: 1503, Generator Loss: 0.629531979560852, Discriminator Loss: 1.6631834506988525
Epoch: 1504, Generator Loss: 0.6248326897621155, Discriminator Loss: 1.6568048000335693
Epoch: 1505, Generator Loss: 0.6462618708610535, Discriminator Loss: 1.6712895631790161
Epoch: 1506, Generator Loss: 0.6499453783035278, Discriminator Loss: 1.6243343353271484
Epoch: 1507, Generator Loss: 0.6502984166145325, Discriminator Loss: 1.6207648515701294
Epoch: 1508, Generator Loss: 0.6465982794761658, Discriminator Loss: 1.6329339742660522
Epoch: 1509, Generator Loss: 0.6551907658576965, Discriminator Loss: 1.6792831420898438
Epoch: 1510, Generator Loss: 0.6498937606811523, Discriminator Loss: 1.6307498216629028
Epoch: 1511, Generator Loss: 0.6509402394294739, Discriminator Loss: 1.62574303150177
Epoch: 1512, Generator Loss: 0.6554293632507324, Discriminator Loss: 1.6558105945587158
Epoch: 1513, Generator Loss: 0.6560513377189636, Discriminator Loss: 1.6418384313583374
Epoch: 1514, Generator Loss: 0.6533184051513672, Discriminator Loss: 1.626468300819397
Epoch: 1515, Generator Loss: 0.6568331718444824, Discriminator Loss: 1.6205729246139526
Epoch: 1516, Generator Loss: 0.656605064868927, Discriminator Loss: 1.6122194528579712
Epoch: 1517, Generator Loss: 0.6515697836875916, Discriminator Loss: 1.6324684619903564
Epoch: 1518, Generator Loss: 0.6570647358894348, Discriminator Loss: 1.625020980834961
Epoch: 1519, Generator Loss: 0.6526316404342651, Discriminator Loss: 1.6759655475616455
Epoch: 1520, Generator Loss: 0.6491765379905701, Discriminator Loss: 1.66908860206604
Epoch: 1521, Generator Loss: 0.655202329158783, Discriminator Loss: 1.6579797267913818
Epoch: 1522, Generator Loss: 0.646727979183197, Discriminator Loss: 1.710609793663025
Epoch: 1523, Generator Loss: 0.6539167165756226, Discriminator Loss: 1.6386330127716064
Epoch: 1524, Generator Loss: 0.6540970206260681, Discriminator Loss: 1.648244023323059
Epoch: 1525, Generator Loss: 0.6580397486686707, Discriminator Loss: 1.6265666484832764
Epoch: 1526, Generator Loss: 0.6526103615760803, Discriminator Loss: 1.6231087446212769
Epoch: 1527, Generator Loss: 0.6546220779418945, Discriminator Loss: 1.6682406663894653
Epoch: 1528, Generator Loss: 0.6488274335861206, Discriminator Loss: 1.6366199254989624
Epoch: 1529, Generator Loss: 0.6526719927787781, Discriminator Loss: 1.6288906335830688
Epoch: 1530, Generator Loss: 0.6514195203781128, Discriminator Loss: 1.6144895553588867
Epoch: 1531, Generator Loss: 0.6485229134559631, Discriminator Loss: 1.6265182495117188
Epoch: 1532, Generator Loss: 0.6517993211746216, Discriminator Loss: 1.6343315839767456
Epoch: 1533, Generator Loss: 0.6546005606651306, Discriminator Loss: 1.635905385017395
Epoch: 1534, Generator Loss: 0.6559202075004578, Discriminator Loss: 1.6727389097213745
Epoch: 1535, Generator Loss: 0.646848738193512, Discriminator Loss: 1.6296888589859009
Epoch: 1536, Generator Loss: 0.6527727842330933, Discriminator Loss: 1.6377522945404053
Epoch: 1537, Generator Loss: 0.6557617783546448, Discriminator Loss: 1.6380418539047241
Epoch: 1538, Generator Loss: 0.6564304232597351, Discriminator Loss: 1.6052619218826294
Epoch: 1539, Generator Loss: 0.6530006527900696, Discriminator Loss: 1.629730463027954
Epoch: 1540, Generator Loss: 0.6532015800476074, Discriminator Loss: 1.648751974105835
Epoch: 1541, Generator Loss: 0.6439740061759949, Discriminator Loss: 1.6691255569458008
Epoch: 1542, Generator Loss: 0.6584257483482361, Discriminator Loss: 1.6598739624023438
Epoch: 1543, Generator Loss: 0.6528401970863342, Discriminator Loss: 1.659842610359192
Epoch: 1544, Generator Loss: 0.6512211561203003, Discriminator Loss: 1.6718968152999878
Epoch: 1545, Generator Loss: 0.6536005139350891, Discriminator Loss: 1.6328240633010864
Epoch: 1546, Generator Loss: 0.6536248922348022, Discriminator Loss: 1.6496965885162354
Epoch: 1547, Generator Loss: 0.6527608036994934, Discriminator Loss: 1.69568932056427
Epoch: 1548, Generator Loss: 0.6587553024291992, Discriminator Loss: 1.6262110471725464
Epoch: 1549, Generator Loss: 0.6521804928779602, Discriminator Loss: 1.6503639221191406
Epoch: 1550, Generator Loss: 0.6535734534263611, Discriminator Loss: 1.64067804813385
Epoch: 1551, Generator Loss: 0.6585046052932739, Discriminator Loss: 1.6370869874954224
Epoch: 1552, Generator Loss: 0.6554802656173706, Discriminator Loss: 1.6100322008132935
Epoch: 1553, Generator Loss: 0.6581031084060669, Discriminator Loss: 1.6425378322601318
Epoch: 1554, Generator Loss: 0.6541128158569336, Discriminator Loss: 1.601530909538269
Epoch: 1555, Generator Loss: 0.6592316031455994, Discriminator Loss: 1.6354291439056396
Epoch: 1556, Generator Loss: 0.6561371684074402, Discriminator Loss: 1.65523362159729
Epoch: 1557, Generator Loss: 0.6565679907798767, Discriminator Loss: 1.648837685585022
Epoch: 1558, Generator Loss: 0.6602242588996887, Discriminator Loss: 1.6922686100006104
Epoch: 1559, Generator Loss: 0.6551467180252075, Discriminator Loss: 1.6457716226577759
Epoch: 1560, Generator Loss: 0.6567196249961853, Discriminator Loss: 1.6762927770614624
Epoch: 1561, Generator Loss: 0.6556415557861328, Discriminator Loss: 1.6305326223373413
Epoch: 1562, Generator Loss: 0.6597387194633484, Discriminator Loss: 1.6262365579605103
Epoch: 1563, Generator Loss: 0.6589251756668091, Discriminator Loss: 1.6018799543380737
Epoch: 1564, Generator Loss: 0.6615744829177856, Discriminator Loss: 1.645588994026184
Epoch: 1565, Generator Loss: 0.6577230095863342, Discriminator Loss: 1.6258983612060547
Epoch: 1566, Generator Loss: 0.6631428599357605, Discriminator Loss: 1.6628156900405884
Epoch: 1567, Generator Loss: 0.6569553017616272, Discriminator Loss: 1.626931071281433
Epoch: 1568, Generator Loss: 0.6596799492835999, Discriminator Loss: 1.633476972579956
Epoch: 1569, Generator Loss: 0.6640573143959045, Discriminator Loss: 1.6366384029388428
Epoch: 1570, Generator Loss: 0.6553875803947449, Discriminator Loss: 1.6534397602081299
Epoch: 1571, Generator Loss: 0.654504656791687, Discriminator Loss: 1.6339447498321533
Epoch: 1572, Generator Loss: 0.6633333563804626, Discriminator Loss: 1.6694252490997314
Epoch: 1573, Generator Loss: 0.6614107489585876, Discriminator Loss: 1.659788727760315
Epoch: 1574, Generator Loss: 0.6618319153785706, Discriminator Loss: 1.6225032806396484
Epoch: 1575, Generator Loss: 0.6630333662033081, Discriminator Loss: 1.6547483205795288
Epoch: 1576, Generator Loss: 0.6661492586135864, Discriminator Loss: 1.6441758871078491
Epoch: 1577, Generator Loss: 0.6613043546676636, Discriminator Loss: 1.6375778913497925
Epoch: 1578, Generator Loss: 0.6655408143997192, Discriminator Loss: 1.6356645822525024
Epoch: 1579, Generator Loss: 0.6607957482337952, Discriminator Loss: 1.642498254776001
Epoch: 1580, Generator Loss: 0.6578585505485535, Discriminator Loss: 1.6074007749557495
Epoch: 1581, Generator Loss: 0.6647090911865234, Discriminator Loss: 1.6239099502563477
Epoch: 1582, Generator Loss: 0.6576879620552063, Discriminator Loss: 1.6337969303131104
Epoch: 1583, Generator Loss: 0.6564692854881287, Discriminator Loss: 1.6510039567947388
Epoch: 1584, Generator Loss: 0.6543906331062317, Discriminator Loss: 1.6757138967514038
Epoch: 1585, Generator Loss: 0.6648194193840027, Discriminator Loss: 1.6291993856430054
Epoch: 1586, Generator Loss: 0.6534827351570129, Discriminator Loss: 1.6582914590835571
Epoch: 1587, Generator Loss: 0.6595600247383118, Discriminator Loss: 1.6139475107192993
Epoch: 1588, Generator Loss: 0.6544595956802368, Discriminator Loss: 1.6309829950332642
Epoch: 1589, Generator Loss: 0.6597399711608887, Discriminator Loss: 1.6173051595687866
Epoch: 1590, Generator Loss: 0.6542343497276306, Discriminator Loss: 1.6419252157211304
Epoch: 1591, Generator Loss: 0.6495491862297058, Discriminator Loss: 1.6606786251068115
Epoch: 1592, Generator Loss: 0.6549233198165894, Discriminator Loss: 1.6538456678390503
Epoch: 1593, Generator Loss: 0.6512560248374939, Discriminator Loss: 1.6197515726089478
Epoch: 1594, Generator Loss: 0.6505410075187683, Discriminator Loss: 1.6584292650222778
Epoch: 1595, Generator Loss: 0.652539074420929, Discriminator Loss: 1.6527807712554932
Epoch: 1596, Generator Loss: 0.6572249531745911, Discriminator Loss: 1.6638017892837524
Epoch: 1597, Generator Loss: 0.6531451344490051, Discriminator Loss: 1.6515238285064697
Epoch: 1598, Generator Loss: 0.6488203406333923, Discriminator Loss: 1.6320741176605225
Epoch: 1599, Generator Loss: 0.6512960195541382, Discriminator Loss: 1.6424801349639893
Epoch: 1600, Generator Loss: 0.6551752090454102, Discriminator Loss: 1.6235895156860352
Epoch: 1601, Generator Loss: 0.650301992893219, Discriminator Loss: 1.6275379657745361
Epoch: 1602, Generator Loss: 0.6568396687507629, Discriminator Loss: 1.645675778388977
Epoch: 1603, Generator Loss: 0.6594691872596741, Discriminator Loss: 1.6334999799728394
Epoch: 1604, Generator Loss: 0.649903416633606, Discriminator Loss: 1.6466197967529297
Epoch: 1605, Generator Loss: 0.6540797352790833, Discriminator Loss: 1.6862975358963013
Epoch: 1606, Generator Loss: 0.6566153764724731, Discriminator Loss: 1.6298929452896118
Epoch: 1607, Generator Loss: 0.6585566401481628, Discriminator Loss: 1.648863673210144
Epoch: 1608, Generator Loss: 0.6571311354637146, Discriminator Loss: 1.6051260232925415
Epoch: 1609, Generator Loss: 0.6556621193885803, Discriminator Loss: 1.631734013557434
Epoch: 1610, Generator Loss: 0.6566658020019531, Discriminator Loss: 1.632034420967102
Epoch: 1611, Generator Loss: 0.6563991904258728, Discriminator Loss: 1.6380224227905273
Epoch: 1612, Generator Loss: 0.6491378545761108, Discriminator Loss: 1.6248540878295898
Epoch: 1613, Generator Loss: 0.6385233998298645, Discriminator Loss: 1.6345354318618774
Epoch: 1614, Generator Loss: 0.6392912268638611, Discriminator Loss: 1.636953353881836
Epoch: 1615, Generator Loss: 0.6547282934188843, Discriminator Loss: 1.6897753477096558
Epoch: 1616, Generator Loss: 0.6483994722366333, Discriminator Loss: 1.6510124206542969
Epoch: 1617, Generator Loss: 0.6496419906616211, Discriminator Loss: 1.6552388668060303
Epoch: 1618, Generator Loss: 0.648278534412384, Discriminator Loss: 1.6201809644699097
Epoch: 1619, Generator Loss: 0.6482684016227722, Discriminator Loss: 1.6990163326263428
Epoch: 1620, Generator Loss: 0.6476268768310547, Discriminator Loss: 1.652233362197876
Epoch: 1621, Generator Loss: 0.6475459337234497, Discriminator Loss: 1.6750566959381104
Epoch: 1622, Generator Loss: 0.6483855247497559, Discriminator Loss: 1.677287220954895
Epoch: 1623, Generator Loss: 0.6492304801940918, Discriminator Loss: 1.656685471534729
Epoch: 1624, Generator Loss: 0.6556753516197205, Discriminator Loss: 1.6112209558486938
Epoch: 1625, Generator Loss: 0.6491680145263672, Discriminator Loss: 1.6337462663650513
Epoch: 1626, Generator Loss: 0.652688205242157, Discriminator Loss: 1.632559061050415
Epoch: 1627, Generator Loss: 0.65782231092453, Discriminator Loss: 1.6580308675765991
Epoch: 1628, Generator Loss: 0.6543638110160828, Discriminator Loss: 1.627069354057312
Epoch: 1629, Generator Loss: 0.651861846446991, Discriminator Loss: 1.624916672706604
Epoch: 1630, Generator Loss: 0.6469680070877075, Discriminator Loss: 1.6779931783676147
Epoch: 1631, Generator Loss: 0.6531122326850891, Discriminator Loss: 1.655408263206482
Epoch: 1632, Generator Loss: 0.6562142372131348, Discriminator Loss: 1.6313215494155884
Epoch: 1633, Generator Loss: 0.6633647084236145, Discriminator Loss: 1.6102796792984009
Epoch: 1634, Generator Loss: 0.6565575003623962, Discriminator Loss: 1.6616719961166382
Epoch: 1635, Generator Loss: 0.6536487936973572, Discriminator Loss: 1.6246798038482666
Epoch: 1636, Generator Loss: 0.6482735872268677, Discriminator Loss: 1.7231532335281372
Epoch: 1637, Generator Loss: 0.6518999934196472, Discriminator Loss: 1.6053187847137451
Epoch: 1638, Generator Loss: 0.6547825932502747, Discriminator Loss: 1.6303426027297974
Epoch: 1639, Generator Loss: 0.6460922956466675, Discriminator Loss: 1.630059003829956
Epoch: 1640, Generator Loss: 0.6519483923912048, Discriminator Loss: 1.6547437906265259
Epoch: 1641, Generator Loss: 0.6406944394111633, Discriminator Loss: 1.6317037343978882
Epoch: 1642, Generator Loss: 0.6568986773490906, Discriminator Loss: 1.6402969360351562
Epoch: 1643, Generator Loss: 0.6567639112472534, Discriminator Loss: 1.6338691711425781
Epoch: 1644, Generator Loss: 0.6537891030311584, Discriminator Loss: 1.625017762184143
Epoch: 1645, Generator Loss: 0.6517496705055237, Discriminator Loss: 1.680055856704712
Epoch: 1646, Generator Loss: 0.652094304561615, Discriminator Loss: 1.6255958080291748
Epoch: 1647, Generator Loss: 0.6520093679428101, Discriminator Loss: 1.6542407274246216
Epoch: 1648, Generator Loss: 0.6566704511642456, Discriminator Loss: 1.6243832111358643
Epoch: 1649, Generator Loss: 0.653880774974823, Discriminator Loss: 1.5886460542678833
Epoch: 1650, Generator Loss: 0.6531192660331726, Discriminator Loss: 1.6125257015228271
Epoch: 1651, Generator Loss: 0.6473612189292908, Discriminator Loss: 1.6193948984146118
Epoch: 1652, Generator Loss: 0.6489068269729614, Discriminator Loss: 1.6175490617752075
Epoch: 1653, Generator Loss: 0.6449757218360901, Discriminator Loss: 1.6161364316940308
Epoch: 1654, Generator Loss: 0.6519954800605774, Discriminator Loss: 1.6135355234146118
Epoch: 1655, Generator Loss: 0.6522507071495056, Discriminator Loss: 1.6083323955535889
Epoch: 1656, Generator Loss: 0.6448208689689636, Discriminator Loss: 1.6301555633544922
Epoch: 1657, Generator Loss: 0.6583870649337769, Discriminator Loss: 1.632287859916687
Epoch: 1658, Generator Loss: 0.6528702974319458, Discriminator Loss: 1.6627379655838013
Epoch: 1659, Generator Loss: 0.655659556388855, Discriminator Loss: 1.644542932510376
Epoch: 1660, Generator Loss: 0.6562634706497192, Discriminator Loss: 1.647443175315857
Epoch: 1661, Generator Loss: 0.6457090377807617, Discriminator Loss: 1.6173120737075806
Epoch: 1662, Generator Loss: 0.6528894305229187, Discriminator Loss: 1.6411277055740356
Epoch: 1663, Generator Loss: 0.6546854376792908, Discriminator Loss: 1.6150790452957153
Epoch: 1664, Generator Loss: 0.6495345234870911, Discriminator Loss: 1.6150048971176147
Epoch: 1665, Generator Loss: 0.6485819220542908, Discriminator Loss: 1.666728138923645
Epoch: 1666, Generator Loss: 0.6556211113929749, Discriminator Loss: 1.6540015935897827
Epoch: 1667, Generator Loss: 0.652903139591217, Discriminator Loss: 1.58879554271698
Epoch: 1668, Generator Loss: 0.653870701789856, Discriminator Loss: 1.6050292253494263
Epoch: 1669, Generator Loss: 0.6512956619262695, Discriminator Loss: 1.690651535987854
Epoch: 1670, Generator Loss: 0.6545925736427307, Discriminator Loss: 1.6028846502304077
Epoch: 1671, Generator Loss: 0.6495651006698608, Discriminator Loss: 1.6545655727386475
Epoch: 1672, Generator Loss: 0.6578125357627869, Discriminator Loss: 1.6682994365692139
Epoch: 1673, Generator Loss: 0.6594480872154236, Discriminator Loss: 1.6559642553329468
Epoch: 1674, Generator Loss: 0.6529659628868103, Discriminator Loss: 1.6137278079986572
Epoch: 1675, Generator Loss: 0.658396303653717, Discriminator Loss: 1.6378377676010132
Epoch: 1676, Generator Loss: 0.6596769690513611, Discriminator Loss: 1.6107999086380005
Epoch: 1677, Generator Loss: 0.6518006324768066, Discriminator Loss: 1.657368779182434
Epoch: 1678, Generator Loss: 0.6593114137649536, Discriminator Loss: 1.6460336446762085
Epoch: 1679, Generator Loss: 0.6569777727127075, Discriminator Loss: 1.6522020101547241
Epoch: 1680, Generator Loss: 0.6561955809593201, Discriminator Loss: 1.6836549043655396
Epoch: 1681, Generator Loss: 0.6501918435096741, Discriminator Loss: 1.653076171875
Epoch: 1682, Generator Loss: 0.6587163209915161, Discriminator Loss: 1.6605108976364136
Epoch: 1683, Generator Loss: 0.6583786606788635, Discriminator Loss: 1.6120675802230835
Epoch: 1684, Generator Loss: 0.6570128798484802, Discriminator Loss: 1.6566814184188843
Epoch: 1685, Generator Loss: 0.6546218991279602, Discriminator Loss: 1.6407889127731323
Epoch: 1686, Generator Loss: 0.65517657995224, Discriminator Loss: 1.6114180088043213
Epoch: 1687, Generator Loss: 0.6530166268348694, Discriminator Loss: 1.6785327196121216
Epoch: 1688, Generator Loss: 0.6521416902542114, Discriminator Loss: 1.6450636386871338
Epoch: 1689, Generator Loss: 0.6528257727622986, Discriminator Loss: 1.6475402116775513
Epoch: 1690, Generator Loss: 0.6546869277954102, Discriminator Loss: 1.647735357284546
Epoch: 1691, Generator Loss: 0.655400276184082, Discriminator Loss: 1.6343152523040771
Epoch: 1692, Generator Loss: 0.6579924821853638, Discriminator Loss: 1.5981581211090088
Epoch: 1693, Generator Loss: 0.6548903584480286, Discriminator Loss: 1.619358777999878
Epoch: 1694, Generator Loss: 0.6534680724143982, Discriminator Loss: 1.6251699924468994
Epoch: 1695, Generator Loss: 0.6506252288818359, Discriminator Loss: 1.662994623184204
Epoch: 1696, Generator Loss: 0.6467809081077576, Discriminator Loss: 1.6555105447769165
Epoch: 1697, Generator Loss: 0.6532773375511169, Discriminator Loss: 1.6323574781417847
Epoch: 1698, Generator Loss: 0.6453878283500671, Discriminator Loss: 1.6560518741607666
Epoch: 1699, Generator Loss: 0.6442491412162781, Discriminator Loss: 1.6453348398208618
Epoch: 1700, Generator Loss: 0.6382367014884949, Discriminator Loss: 1.6335813999176025
Epoch: 1701, Generator Loss: 0.6288038492202759, Discriminator Loss: 1.6438560485839844
Epoch: 1702, Generator Loss: 0.6304918527603149, Discriminator Loss: 1.6676357984542847
Epoch: 1703, Generator Loss: 0.640824019908905, Discriminator Loss: 1.6363680362701416
Epoch: 1704, Generator Loss: 0.6385825276374817, Discriminator Loss: 1.6457644701004028
Epoch: 1705, Generator Loss: 0.6515824198722839, Discriminator Loss: 1.6390453577041626
Epoch: 1706, Generator Loss: 0.6412593126296997, Discriminator Loss: 1.662535309791565
Epoch: 1707, Generator Loss: 0.6527104377746582, Discriminator Loss: 1.6069953441619873
Epoch: 1708, Generator Loss: 0.6484580636024475, Discriminator Loss: 1.623600959777832
Epoch: 1709, Generator Loss: 0.6509181261062622, Discriminator Loss: 1.6448934078216553
Epoch: 1710, Generator Loss: 0.6506490707397461, Discriminator Loss: 1.685516357421875
Epoch: 1711, Generator Loss: 0.6506959795951843, Discriminator Loss: 1.6520928144454956
Epoch: 1712, Generator Loss: 0.6495113372802734, Discriminator Loss: 1.6215476989746094
Epoch: 1713, Generator Loss: 0.6492448449134827, Discriminator Loss: 1.6094659566879272
Epoch: 1714, Generator Loss: 0.6505246758460999, Discriminator Loss: 1.6039296388626099
Epoch: 1715, Generator Loss: 0.6519113183021545, Discriminator Loss: 1.6326382160186768
Epoch: 1716, Generator Loss: 0.6532357335090637, Discriminator Loss: 1.6409968137741089
Epoch: 1717, Generator Loss: 0.6546155214309692, Discriminator Loss: 1.6176389455795288
Epoch: 1718, Generator Loss: 0.6479170918464661, Discriminator Loss: 1.657737135887146
Epoch: 1719, Generator Loss: 0.6575778722763062, Discriminator Loss: 1.5835329294204712
Epoch: 1720, Generator Loss: 0.6519566774368286, Discriminator Loss: 1.6423341035842896
Epoch: 1721, Generator Loss: 0.6484892964363098, Discriminator Loss: 1.683053731918335
Epoch: 1722, Generator Loss: 0.650642991065979, Discriminator Loss: 1.637835144996643
Epoch: 1723, Generator Loss: 0.6558594703674316, Discriminator Loss: 1.6194809675216675
Epoch: 1724, Generator Loss: 0.6518282294273376, Discriminator Loss: 1.6467530727386475
Epoch: 1725, Generator Loss: 0.6478360295295715, Discriminator Loss: 1.6514146327972412
Epoch: 1726, Generator Loss: 0.6444883346557617, Discriminator Loss: 1.6412168741226196
Epoch: 1727, Generator Loss: 0.6534343361854553, Discriminator Loss: 1.6000248193740845
Epoch: 1728, Generator Loss: 0.6465891599655151, Discriminator Loss: 1.6461135149002075
Epoch: 1729, Generator Loss: 0.6560482978820801, Discriminator Loss: 1.6189697980880737
Epoch: 1730, Generator Loss: 0.6539630889892578, Discriminator Loss: 1.638292908668518
Epoch: 1731, Generator Loss: 0.6429851651191711, Discriminator Loss: 1.6228147745132446
Epoch: 1732, Generator Loss: 0.6601763963699341, Discriminator Loss: 1.5845214128494263
Epoch: 1733, Generator Loss: 0.6501480937004089, Discriminator Loss: 1.6750613451004028
Epoch: 1734, Generator Loss: 0.6476469039916992, Discriminator Loss: 1.6229828596115112
Epoch: 1735, Generator Loss: 0.6498419046401978, Discriminator Loss: 1.6099436283111572
Epoch: 1736, Generator Loss: 0.6593090891838074, Discriminator Loss: 1.631111741065979
Epoch: 1737, Generator Loss: 0.6623263955116272, Discriminator Loss: 1.6281987428665161
Epoch: 1738, Generator Loss: 0.6511895060539246, Discriminator Loss: 1.6469980478286743
Epoch: 1739, Generator Loss: 0.6631816029548645, Discriminator Loss: 1.6421808004379272
Epoch: 1740, Generator Loss: 0.6571289896965027, Discriminator Loss: 1.645585060119629
Epoch: 1741, Generator Loss: 0.6533675789833069, Discriminator Loss: 1.6209204196929932
Epoch: 1742, Generator Loss: 0.6534132361412048, Discriminator Loss: 1.6463850736618042
Epoch: 1743, Generator Loss: 0.6589935421943665, Discriminator Loss: 1.6381340026855469
Epoch: 1744, Generator Loss: 0.6510239243507385, Discriminator Loss: 1.6832106113433838
Epoch: 1745, Generator Loss: 0.6552651524543762, Discriminator Loss: 1.6216716766357422
Epoch: 1746, Generator Loss: 0.6596789360046387, Discriminator Loss: 1.6453181505203247
Epoch: 1747, Generator Loss: 0.6544429063796997, Discriminator Loss: 1.6794025897979736
Epoch: 1748, Generator Loss: 0.6523520350456238, Discriminator Loss: 1.6090196371078491
Epoch: 1749, Generator Loss: 0.6486700177192688, Discriminator Loss: 1.6707324981689453
Epoch: 1750, Generator Loss: 0.6519966125488281, Discriminator Loss: 1.659147024154663
Epoch: 1751, Generator Loss: 0.6537075638771057, Discriminator Loss: 1.620564341545105
Epoch: 1752, Generator Loss: 0.650515615940094, Discriminator Loss: 1.6733399629592896
Epoch: 1753, Generator Loss: 0.657791018486023, Discriminator Loss: 1.6267133951187134
Epoch: 1754, Generator Loss: 0.6560988426208496, Discriminator Loss: 1.6001840829849243
Epoch: 1755, Generator Loss: 0.6530471444129944, Discriminator Loss: 1.6165739297866821
Epoch: 1756, Generator Loss: 0.6495729088783264, Discriminator Loss: 1.623319387435913
Epoch: 1757, Generator Loss: 0.6449612379074097, Discriminator Loss: 1.6438552141189575
Epoch: 1758, Generator Loss: 0.6442117691040039, Discriminator Loss: 1.6420222520828247
Epoch: 1759, Generator Loss: 0.6356333494186401, Discriminator Loss: 1.6693509817123413
Epoch: 1760, Generator Loss: 0.6450234651565552, Discriminator Loss: 1.6252601146697998
Epoch: 1761, Generator Loss: 0.6510728001594543, Discriminator Loss: 1.629752278327942
Epoch: 1762, Generator Loss: 0.6473866105079651, Discriminator Loss: 1.642048716545105
Epoch: 1763, Generator Loss: 0.6473207473754883, Discriminator Loss: 1.660629391670227
Epoch: 1764, Generator Loss: 0.6535657048225403, Discriminator Loss: 1.6599925756454468
Epoch: 1765, Generator Loss: 0.6439605355262756, Discriminator Loss: 1.585633635520935
Epoch: 1766, Generator Loss: 0.6353449821472168, Discriminator Loss: 1.6394360065460205
Epoch: 1767, Generator Loss: 0.6435695290565491, Discriminator Loss: 1.6440099477767944
Epoch: 1768, Generator Loss: 0.6461548209190369, Discriminator Loss: 1.6127984523773193
Epoch: 1769, Generator Loss: 0.6378973722457886, Discriminator Loss: 1.6355202198028564
Epoch: 1770, Generator Loss: 0.6420193314552307, Discriminator Loss: 1.6494786739349365
Epoch: 1771, Generator Loss: 0.6480590105056763, Discriminator Loss: 1.622864007949829
Epoch: 1772, Generator Loss: 0.6347000002861023, Discriminator Loss: 1.6678617000579834
Epoch: 1773, Generator Loss: 0.6475251317024231, Discriminator Loss: 1.5885587930679321
Epoch: 1774, Generator Loss: 0.646622359752655, Discriminator Loss: 1.6217676401138306
Epoch: 1775, Generator Loss: 0.6399731636047363, Discriminator Loss: 1.6354771852493286
Epoch: 1776, Generator Loss: 0.652175784111023, Discriminator Loss: 1.6406112909317017
Epoch: 1777, Generator Loss: 0.646121621131897, Discriminator Loss: 1.6015946865081787
Epoch: 1778, Generator Loss: 0.6517065763473511, Discriminator Loss: 1.618026852607727
Epoch: 1779, Generator Loss: 0.6506118774414062, Discriminator Loss: 1.6058937311172485
Epoch: 1780, Generator Loss: 0.648282527923584, Discriminator Loss: 1.6316713094711304
Epoch: 1781, Generator Loss: 0.6548487544059753, Discriminator Loss: 1.627036213874817
Epoch: 1782, Generator Loss: 0.6474102735519409, Discriminator Loss: 1.599818468093872
Epoch: 1783, Generator Loss: 0.6473628878593445, Discriminator Loss: 1.6708959341049194
Epoch: 1784, Generator Loss: 0.6462445855140686, Discriminator Loss: 1.6803582906723022
Epoch: 1785, Generator Loss: 0.6420494914054871, Discriminator Loss: 1.6521869897842407
Epoch: 1786, Generator Loss: 0.6467174887657166, Discriminator Loss: 1.6096153259277344
Epoch: 1787, Generator Loss: 0.642593502998352, Discriminator Loss: 1.6848869323730469
Epoch: 1788, Generator Loss: 0.6478058099746704, Discriminator Loss: 1.5978747606277466
Epoch: 1789, Generator Loss: 0.6453962326049805, Discriminator Loss: 1.6229517459869385
Epoch: 1790, Generator Loss: 0.6545789837837219, Discriminator Loss: 1.6232032775878906
Epoch: 1791, Generator Loss: 0.6558545231819153, Discriminator Loss: 1.638946533203125
Epoch: 1792, Generator Loss: 0.6539954543113708, Discriminator Loss: 1.6237809658050537
Epoch: 1793, Generator Loss: 0.651610791683197, Discriminator Loss: 1.6391575336456299
Epoch: 1794, Generator Loss: 0.6475487351417542, Discriminator Loss: 1.6100488901138306
Epoch: 1795, Generator Loss: 0.650598406791687, Discriminator Loss: 1.6166504621505737
Epoch: 1796, Generator Loss: 0.6640235781669617, Discriminator Loss: 1.5625602006912231
Epoch: 1797, Generator Loss: 0.6480736136436462, Discriminator Loss: 1.6364033222198486
Epoch: 1798, Generator Loss: 0.6580473184585571, Discriminator Loss: 1.6319435834884644
Epoch: 1799, Generator Loss: 0.6473811268806458, Discriminator Loss: 1.644296407699585
Epoch: 1800, Generator Loss: 0.6536617279052734, Discriminator Loss: 1.6131311655044556
Epoch: 1801, Generator Loss: 0.6447882056236267, Discriminator Loss: 1.6277697086334229
Epoch: 1802, Generator Loss: 0.6464903950691223, Discriminator Loss: 1.6483118534088135
Epoch: 1803, Generator Loss: 0.6456980109214783, Discriminator Loss: 1.6363998651504517
Epoch: 1804, Generator Loss: 0.6440253257751465, Discriminator Loss: 1.6428767442703247
Epoch: 1805, Generator Loss: 0.6505350470542908, Discriminator Loss: 1.634218454360962
Epoch: 1806, Generator Loss: 0.6456427574157715, Discriminator Loss: 1.623016595840454
Epoch: 1807, Generator Loss: 0.6580362915992737, Discriminator Loss: 1.5910485982894897
Epoch: 1808, Generator Loss: 0.6537281274795532, Discriminator Loss: 1.6165841817855835
Epoch: 1809, Generator Loss: 0.6449630856513977, Discriminator Loss: 1.6295095682144165
Epoch: 1810, Generator Loss: 0.6465985178947449, Discriminator Loss: 1.649148941040039
Epoch: 1811, Generator Loss: 0.6567949652671814, Discriminator Loss: 1.601843237876892
Epoch: 1812, Generator Loss: 0.6555870771408081, Discriminator Loss: 1.641560435295105
Epoch: 1813, Generator Loss: 0.6489578485488892, Discriminator Loss: 1.6073665618896484
Epoch: 1814, Generator Loss: 0.6519541144371033, Discriminator Loss: 1.606593370437622
Epoch: 1815, Generator Loss: 0.6510949730873108, Discriminator Loss: 1.6355189085006714
Epoch: 1816, Generator Loss: 0.6513667702674866, Discriminator Loss: 1.6360634565353394
Epoch: 1817, Generator Loss: 0.6493940353393555, Discriminator Loss: 1.6274839639663696
Epoch: 1818, Generator Loss: 0.6583282351493835, Discriminator Loss: 1.6399568319320679
Epoch: 1819, Generator Loss: 0.6556312441825867, Discriminator Loss: 1.607146978378296
Epoch: 1820, Generator Loss: 0.6606396436691284, Discriminator Loss: 1.622454285621643
Epoch: 1821, Generator Loss: 0.6626428961753845, Discriminator Loss: 1.591143250465393
Epoch: 1822, Generator Loss: 0.6645842790603638, Discriminator Loss: 1.587556004524231
Epoch: 1823, Generator Loss: 0.6587685942649841, Discriminator Loss: 1.62772536277771
Epoch: 1824, Generator Loss: 0.6618954539299011, Discriminator Loss: 1.6146926879882812
Epoch: 1825, Generator Loss: 0.6626787185668945, Discriminator Loss: 1.6330426931381226
Epoch: 1826, Generator Loss: 0.6592888236045837, Discriminator Loss: 1.6283233165740967
Epoch: 1827, Generator Loss: 0.6594291925430298, Discriminator Loss: 1.6032493114471436
Epoch: 1828, Generator Loss: 0.6590520143508911, Discriminator Loss: 1.614859938621521
Epoch: 1829, Generator Loss: 0.6534847021102905, Discriminator Loss: 1.6124765872955322
Epoch: 1830, Generator Loss: 0.6595659255981445, Discriminator Loss: 1.6144441366195679
Epoch: 1831, Generator Loss: 0.6597766876220703, Discriminator Loss: 1.6219812631607056
Epoch: 1832, Generator Loss: 0.6632370948791504, Discriminator Loss: 1.6229207515716553
Epoch: 1833, Generator Loss: 0.6589912176132202, Discriminator Loss: 1.6011043787002563
Epoch: 1834, Generator Loss: 0.6605494618415833, Discriminator Loss: 1.6083365678787231
Epoch: 1835, Generator Loss: 0.6549267768859863, Discriminator Loss: 1.6734199523925781
Epoch: 1836, Generator Loss: 0.6547761559486389, Discriminator Loss: 1.598307490348816
Epoch: 1837, Generator Loss: 0.644949734210968, Discriminator Loss: 1.638305425643921
Epoch: 1838, Generator Loss: 0.6587953567504883, Discriminator Loss: 1.6213887929916382
Epoch: 1839, Generator Loss: 0.6537121534347534, Discriminator Loss: 1.5991630554199219
Epoch: 1840, Generator Loss: 0.6595215201377869, Discriminator Loss: 1.6198999881744385
Epoch: 1841, Generator Loss: 0.6453233361244202, Discriminator Loss: 1.6345939636230469
Epoch: 1842, Generator Loss: 0.6586936116218567, Discriminator Loss: 1.6453279256820679
Epoch: 1843, Generator Loss: 0.6536874771118164, Discriminator Loss: 1.6546516418457031
Epoch: 1844, Generator Loss: 0.6547709107398987, Discriminator Loss: 1.6129769086837769
Epoch: 1845, Generator Loss: 0.6535406112670898, Discriminator Loss: 1.6283196210861206
Epoch: 1846, Generator Loss: 0.6561371684074402, Discriminator Loss: 1.623366117477417
Epoch: 1847, Generator Loss: 0.6454161405563354, Discriminator Loss: 1.6632194519042969
Epoch: 1848, Generator Loss: 0.6586037874221802, Discriminator Loss: 1.6040343046188354
Epoch: 1849, Generator Loss: 0.6430248618125916, Discriminator Loss: 1.6539993286132812
Epoch: 1850, Generator Loss: 0.6494547724723816, Discriminator Loss: 1.6520004272460938
Epoch: 1851, Generator Loss: 0.6567553281784058, Discriminator Loss: 1.6289911270141602
Epoch: 1852, Generator Loss: 0.6531633734703064, Discriminator Loss: 1.6291052103042603
Epoch: 1853, Generator Loss: 0.6555538773536682, Discriminator Loss: 1.5790538787841797
Epoch: 1854, Generator Loss: 0.6540935635566711, Discriminator Loss: 1.5922986268997192
Epoch: 1855, Generator Loss: 0.6589505076408386, Discriminator Loss: 1.6439679861068726
Epoch: 1856, Generator Loss: 0.643476665019989, Discriminator Loss: 1.6209920644760132
Epoch: 1857, Generator Loss: 0.6509631276130676, Discriminator Loss: 1.588797926902771
Epoch: 1858, Generator Loss: 0.6428138613700867, Discriminator Loss: 1.6049013137817383
Epoch: 1859, Generator Loss: 0.6340370774269104, Discriminator Loss: 1.645251989364624
Epoch: 1860, Generator Loss: 0.6426934003829956, Discriminator Loss: 1.6107276678085327
Epoch: 1861, Generator Loss: 0.6409763693809509, Discriminator Loss: 1.5948251485824585
Epoch: 1862, Generator Loss: 0.6342477202415466, Discriminator Loss: 1.569774866104126
Epoch: 1863, Generator Loss: 0.6423574090003967, Discriminator Loss: 1.6162844896316528
Epoch: 1864, Generator Loss: 0.6306461691856384, Discriminator Loss: 1.5837738513946533
Epoch: 1865, Generator Loss: 0.6273293495178223, Discriminator Loss: 1.6446369886398315
Epoch: 1866, Generator Loss: 0.6212558746337891, Discriminator Loss: 1.5877749919891357
Epoch: 1867, Generator Loss: 0.5955389738082886, Discriminator Loss: 1.6383552551269531
Epoch: 1868, Generator Loss: 0.6062150001525879, Discriminator Loss: 1.623372197151184
Epoch: 1869, Generator Loss: 0.621296763420105, Discriminator Loss: 1.6153274774551392
Epoch: 1870, Generator Loss: 0.6404319405555725, Discriminator Loss: 1.6389122009277344
Epoch: 1871, Generator Loss: 0.6423466801643372, Discriminator Loss: 1.6409885883331299
Epoch: 1872, Generator Loss: 0.6432111859321594, Discriminator Loss: 1.674855351448059
Epoch: 1873, Generator Loss: 0.6328492760658264, Discriminator Loss: 1.6045619249343872
Epoch: 1874, Generator Loss: 0.6317957639694214, Discriminator Loss: 1.6952440738677979
Epoch: 1875, Generator Loss: 0.6436141133308411, Discriminator Loss: 1.6149686574935913
Epoch: 1876, Generator Loss: 0.6411986947059631, Discriminator Loss: 1.59356689453125
Epoch: 1877, Generator Loss: 0.6436057686805725, Discriminator Loss: 1.6040176153182983
Epoch: 1878, Generator Loss: 0.6478557586669922, Discriminator Loss: 1.6413203477859497
Epoch: 1879, Generator Loss: 0.6430660486221313, Discriminator Loss: 1.6245206594467163
Epoch: 1880, Generator Loss: 0.6401670575141907, Discriminator Loss: 1.6436097621917725
Epoch: 1881, Generator Loss: 0.6490224599838257, Discriminator Loss: 1.6252151727676392
Epoch: 1882, Generator Loss: 0.6475142240524292, Discriminator Loss: 1.6147280931472778
Epoch: 1883, Generator Loss: 0.6481371521949768, Discriminator Loss: 1.6107608079910278
Epoch: 1884, Generator Loss: 0.6508674621582031, Discriminator Loss: 1.6175956726074219
Epoch: 1885, Generator Loss: 0.6484428644180298, Discriminator Loss: 1.6458665132522583
Epoch: 1886, Generator Loss: 0.6512245535850525, Discriminator Loss: 1.635262370109558
Epoch: 1887, Generator Loss: 0.6494889855384827, Discriminator Loss: 1.6211844682693481
Epoch: 1888, Generator Loss: 0.6458370089530945, Discriminator Loss: 1.6414741277694702
Epoch: 1889, Generator Loss: 0.6512726545333862, Discriminator Loss: 1.617169976234436
Epoch: 1890, Generator Loss: 0.6570948362350464, Discriminator Loss: 1.6256872415542603
Epoch: 1891, Generator Loss: 0.6492359042167664, Discriminator Loss: 1.6153337955474854
Epoch: 1892, Generator Loss: 0.6551958918571472, Discriminator Loss: 1.6149423122406006
Epoch: 1893, Generator Loss: 0.6541203260421753, Discriminator Loss: 1.647684097290039
Epoch: 1894, Generator Loss: 0.6483802795410156, Discriminator Loss: 1.6085736751556396
Epoch: 1895, Generator Loss: 0.6581233143806458, Discriminator Loss: 1.624055027961731
Epoch: 1896, Generator Loss: 0.6568870544433594, Discriminator Loss: 1.613381266593933
Epoch: 1897, Generator Loss: 0.6529243588447571, Discriminator Loss: 1.590219497680664
Epoch: 1898, Generator Loss: 0.6507320404052734, Discriminator Loss: 1.639015793800354
Epoch: 1899, Generator Loss: 0.6507376432418823, Discriminator Loss: 1.6151853799819946
Epoch: 1900, Generator Loss: 0.6434527635574341, Discriminator Loss: 1.5976791381835938
Epoch: 1901, Generator Loss: 0.6396297216415405, Discriminator Loss: 1.6882750988006592
Epoch: 1902, Generator Loss: 0.6587964296340942, Discriminator Loss: 1.6309877634048462
Epoch: 1903, Generator Loss: 0.6576092839241028, Discriminator Loss: 1.6007518768310547
Epoch: 1904, Generator Loss: 0.65821772813797, Discriminator Loss: 1.5716561079025269
Epoch: 1905, Generator Loss: 0.6597491502761841, Discriminator Loss: 1.6187368631362915
Epoch: 1906, Generator Loss: 0.6554492115974426, Discriminator Loss: 1.597124695777893
Epoch: 1907, Generator Loss: 0.6582861542701721, Discriminator Loss: 1.6355226039886475
Epoch: 1908, Generator Loss: 0.6541919708251953, Discriminator Loss: 1.6220967769622803
Epoch: 1909, Generator Loss: 0.6550762057304382, Discriminator Loss: 1.6219959259033203
Epoch: 1910, Generator Loss: 0.6518670916557312, Discriminator Loss: 1.6675831079483032
Epoch: 1911, Generator Loss: 0.65174400806427, Discriminator Loss: 1.5686525106430054
Epoch: 1912, Generator Loss: 0.6526713371276855, Discriminator Loss: 1.5829145908355713
Epoch: 1913, Generator Loss: 0.6321597695350647, Discriminator Loss: 1.6327601671218872
Epoch: 1914, Generator Loss: 0.6419686079025269, Discriminator Loss: 1.6363706588745117
Epoch: 1915, Generator Loss: 0.6511275172233582, Discriminator Loss: 1.6339657306671143
Epoch: 1916, Generator Loss: 0.6453272104263306, Discriminator Loss: 1.6324776411056519
Epoch: 1917, Generator Loss: 0.6458774209022522, Discriminator Loss: 1.627583622932434
Epoch: 1918, Generator Loss: 0.6396728754043579, Discriminator Loss: 1.6132242679595947
Epoch: 1919, Generator Loss: 0.6428807377815247, Discriminator Loss: 1.6136847734451294
Epoch: 1920, Generator Loss: 0.6474935412406921, Discriminator Loss: 1.590140700340271
Epoch: 1921, Generator Loss: 0.6481093168258667, Discriminator Loss: 1.6420449018478394
Epoch: 1922, Generator Loss: 0.6480216383934021, Discriminator Loss: 1.5951385498046875
Epoch: 1923, Generator Loss: 0.6464029550552368, Discriminator Loss: 1.605172038078308
Epoch: 1924, Generator Loss: 0.6449764966964722, Discriminator Loss: 1.6111136674880981
Epoch: 1925, Generator Loss: 0.647483229637146, Discriminator Loss: 1.646208643913269
Epoch: 1926, Generator Loss: 0.6510300636291504, Discriminator Loss: 1.6020969152450562
Epoch: 1927, Generator Loss: 0.6443389058113098, Discriminator Loss: 1.5966521501541138
Epoch: 1928, Generator Loss: 0.6492210030555725, Discriminator Loss: 1.616578459739685
Epoch: 1929, Generator Loss: 0.6477026343345642, Discriminator Loss: 1.630889654159546
Epoch: 1930, Generator Loss: 0.6396942138671875, Discriminator Loss: 1.6506996154785156
Epoch: 1931, Generator Loss: 0.6294507384300232, Discriminator Loss: 1.6333032846450806
Epoch: 1932, Generator Loss: 0.6442384719848633, Discriminator Loss: 1.6143856048583984
Epoch: 1933, Generator Loss: 0.6458913683891296, Discriminator Loss: 1.5797702074050903
Epoch: 1934, Generator Loss: 0.6406584978103638, Discriminator Loss: 1.6194536685943604
Epoch: 1935, Generator Loss: 0.6419585347175598, Discriminator Loss: 1.6019827127456665
Epoch: 1936, Generator Loss: 0.6411952972412109, Discriminator Loss: 1.6081655025482178
Epoch: 1937, Generator Loss: 0.6455941200256348, Discriminator Loss: 1.6314626932144165
Epoch: 1938, Generator Loss: 0.6381065249443054, Discriminator Loss: 1.6158369779586792
Epoch: 1939, Generator Loss: 0.6456729173660278, Discriminator Loss: 1.6072579622268677
Epoch: 1940, Generator Loss: 0.649987518787384, Discriminator Loss: 1.6689327955245972
Epoch: 1941, Generator Loss: 0.6461527943611145, Discriminator Loss: 1.565750002861023
Epoch: 1942, Generator Loss: 0.6483668684959412, Discriminator Loss: 1.6500054597854614
Epoch: 1943, Generator Loss: 0.6485314965248108, Discriminator Loss: 1.6161988973617554
Epoch: 1944, Generator Loss: 0.6473978757858276, Discriminator Loss: 1.6287362575531006
Epoch: 1945, Generator Loss: 0.6488468050956726, Discriminator Loss: 1.6481634378433228
Epoch: 1946, Generator Loss: 0.6471266746520996, Discriminator Loss: 1.6251885890960693
Epoch: 1947, Generator Loss: 0.6436840891838074, Discriminator Loss: 1.5908581018447876
Epoch: 1948, Generator Loss: 0.6492195725440979, Discriminator Loss: 1.6073366403579712
Epoch: 1949, Generator Loss: 0.6482626795768738, Discriminator Loss: 1.5750154256820679
Epoch: 1950, Generator Loss: 0.6371179819107056, Discriminator Loss: 1.6446936130523682
Epoch: 1951, Generator Loss: 0.6544084548950195, Discriminator Loss: 1.6334102153778076
Epoch: 1952, Generator Loss: 0.6520885229110718, Discriminator Loss: 1.619052767753601
Epoch: 1953, Generator Loss: 0.6508389711380005, Discriminator Loss: 1.6321983337402344
Epoch: 1954, Generator Loss: 0.6511875987052917, Discriminator Loss: 1.634764313697815
Epoch: 1955, Generator Loss: 0.6540475487709045, Discriminator Loss: 1.6334043741226196
Epoch: 1956, Generator Loss: 0.654155433177948, Discriminator Loss: 1.6145964860916138
Epoch: 1957, Generator Loss: 0.6534972786903381, Discriminator Loss: 1.5732710361480713
Epoch: 1958, Generator Loss: 0.6598106026649475, Discriminator Loss: 1.5832538604736328
Epoch: 1959, Generator Loss: 0.6545242667198181, Discriminator Loss: 1.6213607788085938
Epoch: 1960, Generator Loss: 0.6603838801383972, Discriminator Loss: 1.5815366506576538
Epoch: 1961, Generator Loss: 0.6539797186851501, Discriminator Loss: 1.6255651712417603
Epoch: 1962, Generator Loss: 0.6551851034164429, Discriminator Loss: 1.6681205034255981
Epoch: 1963, Generator Loss: 0.65157550573349, Discriminator Loss: 1.602130651473999
Epoch: 1964, Generator Loss: 0.6498141884803772, Discriminator Loss: 1.6046115159988403
Epoch: 1965, Generator Loss: 0.6520034670829773, Discriminator Loss: 1.6020467281341553
Epoch: 1966, Generator Loss: 0.6574442982673645, Discriminator Loss: 1.5798293352127075
Epoch: 1967, Generator Loss: 0.6527938842773438, Discriminator Loss: 1.6517431735992432
Epoch: 1968, Generator Loss: 0.6546083092689514, Discriminator Loss: 1.6255950927734375
Epoch: 1969, Generator Loss: 0.6526427268981934, Discriminator Loss: 1.5957614183425903
Epoch: 1970, Generator Loss: 0.6581749320030212, Discriminator Loss: 1.6084855794906616
Epoch: 1971, Generator Loss: 0.6546540856361389, Discriminator Loss: 1.5959855318069458
Epoch: 1972, Generator Loss: 0.6539710164070129, Discriminator Loss: 1.5776625871658325
Epoch: 1973, Generator Loss: 0.6556568145751953, Discriminator Loss: 1.6415863037109375
Epoch: 1974, Generator Loss: 0.6558828949928284, Discriminator Loss: 1.6174284219741821
Epoch: 1975, Generator Loss: 0.6526843905448914, Discriminator Loss: 1.6208572387695312
Epoch: 1976, Generator Loss: 0.6447165608406067, Discriminator Loss: 1.6345770359039307
Epoch: 1977, Generator Loss: 0.6538957953453064, Discriminator Loss: 1.6503320932388306
Epoch: 1978, Generator Loss: 0.6530653834342957, Discriminator Loss: 1.580855369567871
Epoch: 1979, Generator Loss: 0.6594002842903137, Discriminator Loss: 1.6303014755249023
Epoch: 1980, Generator Loss: 0.6576257944107056, Discriminator Loss: 1.6405789852142334
Epoch: 1981, Generator Loss: 0.6554898023605347, Discriminator Loss: 1.6009682416915894
Epoch: 1982, Generator Loss: 0.6556205153465271, Discriminator Loss: 1.655796766281128
Epoch: 1983, Generator Loss: 0.6558471918106079, Discriminator Loss: 1.6093695163726807
Epoch: 1984, Generator Loss: 0.6546164155006409, Discriminator Loss: 1.613748550415039
Epoch: 1985, Generator Loss: 0.6542171835899353, Discriminator Loss: 1.6093980073928833
Epoch: 1986, Generator Loss: 0.6668943762779236, Discriminator Loss: 1.608987808227539
Epoch: 1987, Generator Loss: 0.6544974446296692, Discriminator Loss: 1.6112470626831055
Epoch: 1988, Generator Loss: 0.6623495817184448, Discriminator Loss: 1.606998085975647
Epoch: 1989, Generator Loss: 0.6614161133766174, Discriminator Loss: 1.5976201295852661
Epoch: 1990, Generator Loss: 0.6475855708122253, Discriminator Loss: 1.629447102546692
Epoch: 1991, Generator Loss: 0.6543802618980408, Discriminator Loss: 1.5875195264816284
Epoch: 1992, Generator Loss: 0.6524730324745178, Discriminator Loss: 1.6025962829589844
Epoch: 1993, Generator Loss: 0.6493362784385681, Discriminator Loss: 1.5991889238357544
Epoch: 1994, Generator Loss: 0.645079493522644, Discriminator Loss: 1.599306344985962
Epoch: 1995, Generator Loss: 0.6447820663452148, Discriminator Loss: 1.6743837594985962
Epoch: 1996, Generator Loss: 0.6398566365242004, Discriminator Loss: 1.6614418029785156
Epoch: 1997, Generator Loss: 0.6429937481880188, Discriminator Loss: 1.6341878175735474
Epoch: 1998, Generator Loss: 0.6486613154411316, Discriminator Loss: 1.6576141119003296
Epoch: 1999, Generator Loss: 0.6412960886955261, Discriminator Loss: 1.5965783596038818
```python
plt.title('Generator Loss')
plt.plot(gen_losses, 'r-')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show()
```
```python
plt.title('Discriminator Loss')
plt.plot(disc_losses, 'b-')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show()
```
## Using the Discriminator for Classification
We will now evaluate the performance of discriminator on the original training data as a classifier. We will check both the classification accuracy and Area Under ROC Curve as the metrics.
```python
_, train_predictions = discriminator(tf.convert_to_tensor(x_train))
train_predictions.shape
```
TensorShape([100, 2])
```python
binary_accuracy = tf.keras.metrics.BinaryAccuracy()
binary_accuracy.update_state(tf.one_hot(tf.squeeze(y_train), depth=2), train_predictions)
print('Training Accuracy: %.4f %s' % (binary_accuracy.result().numpy()*100, '%'))
```
Training Accuracy: 88.0000 %
```python
fpr, tpr, _ = roc_curve(y_train, tf.argmax(train_predictions,1).numpy())
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic Curve')
plt.legend(loc="lower right")
plt.show()
```
```python
_, test_predictions = discriminator(tf.convert_to_tensor(x_test))
test_predictions.shape
```
TensorShape([100, 2])
```python
binary_accuracy = tf.keras.metrics.BinaryAccuracy()
binary_accuracy.update_state(tf.one_hot(tf.squeeze(y_test), depth=2), test_predictions)
print('Test Accuracy: %.4f %s' % (binary_accuracy.result().numpy()*100, '%'))
```
Test Accuracy: 69.0000 %
```python
fpr, tpr, _ = roc_curve(y_test, tf.argmax(test_predictions,1).numpy())
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic Curve')
plt.legend(loc="lower right")
plt.show()
```
We will now look at the predictions on the generated synthetic data.
```python
generator_outputs = generator(random_data)
generator_outputs.shape
```
TensorShape([200, 5])
```python
_, predictions_synthetic = discriminator(generator_outputs)
predictions_synthetic.shape
```
TensorShape([200, 2])
```python
predicted_labels_synthetic = tf.argmax(predictions_synthetic, 1)
predicted_labels_synthetic[:20]
```
<tf.Tensor: shape=(20,), dtype=int64, numpy=array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0])>
## Improving the Performance
It can be seen from the loss vs iterations plots that the generator has more or less converged, but the discriminator hasn't. The AUC scores suggest that the model is actually learning. This can be improved in the fillowing ways:
1. Use slightly higher learning rates while training the discriminator.
2. As the generator converged, we can take the synthetic data generated by it and add to our original training set. We can again start training the GAN so that the discriminator becomes more robust.
3. Training for a larger number of epochs.
4. Using adaptive learning rates and learning rate scheduling.
|
[GOAL]
C : Type u
instβΒ² : Category.{v, u} C
instβΒΉ : HasFiniteProducts C
ΞΉ : Type w
instβ : Finite ΞΉ
β’ HasLimitsOfShape (Discrete ΞΉ) C
[PROOFSTEP]
rcases Finite.exists_equiv_fin ΞΉ with β¨n, β¨eβ©β©
[GOAL]
case intro.intro
C : Type u
instβΒ² : Category.{v, u} C
instβΒΉ : HasFiniteProducts C
ΞΉ : Type w
instβ : Finite ΞΉ
n : β
e : ΞΉ β Fin n
β’ HasLimitsOfShape (Discrete ΞΉ) C
[PROOFSTEP]
haveI : HasLimitsOfShape (Discrete (Fin n)) C := HasFiniteProducts.out n
[GOAL]
case intro.intro
C : Type u
instβΒ² : Category.{v, u} C
instβΒΉ : HasFiniteProducts C
ΞΉ : Type w
instβ : Finite ΞΉ
n : β
e : ΞΉ β Fin n
this : HasLimitsOfShape (Discrete (Fin n)) C
β’ HasLimitsOfShape (Discrete ΞΉ) C
[PROOFSTEP]
exact hasLimitsOfShape_of_equivalence (Discrete.equivalence e.symm)
[GOAL]
C : Type u
instβΒ² : Category.{v, u} C
instβΒΉ : HasFiniteCoproducts C
ΞΉ : Type w
instβ : Finite ΞΉ
β’ HasColimitsOfShape (Discrete ΞΉ) C
[PROOFSTEP]
rcases Finite.exists_equiv_fin ΞΉ with β¨n, β¨eβ©β©
[GOAL]
case intro.intro
C : Type u
instβΒ² : Category.{v, u} C
instβΒΉ : HasFiniteCoproducts C
ΞΉ : Type w
instβ : Finite ΞΉ
n : β
e : ΞΉ β Fin n
β’ HasColimitsOfShape (Discrete ΞΉ) C
[PROOFSTEP]
haveI : HasColimitsOfShape (Discrete (Fin n)) C := HasFiniteCoproducts.out n
[GOAL]
case intro.intro
C : Type u
instβΒ² : Category.{v, u} C
instβΒΉ : HasFiniteCoproducts C
ΞΉ : Type w
instβ : Finite ΞΉ
n : β
e : ΞΉ β Fin n
this : HasColimitsOfShape (Discrete (Fin n)) C
β’ HasColimitsOfShape (Discrete ΞΉ) C
[PROOFSTEP]
exact hasColimitsOfShape_of_equivalence (Discrete.equivalence e.symm)
[GOAL]
C : Type u
instβΒΉ : Category.{v, u} C
instβ : HasFiniteColimits C
J : β
β’ HasColimitsOfShape (Discrete (Fin J)) C
[PROOFSTEP]
infer_instance
|
> module FastSimpleProb.MonadicPostulates
> import FastSimpleProb.SimpleProb
> import FastSimpleProb.BasicOperations
> import FastSimpleProb.MonadicOperations
> import FastSimpleProb.Functor
> import Double.Predicates
> import NonNegDouble.NonNegDouble
> import NonNegDouble.BasicOperations
> import Functor.Predicates
> %default total
> %access public export
> %auto_implicits off
* On |fmap| and |rescale|:
> postulate
> fmapRescaleLemma : {A : Type} ->
> (f : A -> NonNegDouble) ->
> (p : NonNegDouble) -> (pp : Positive (toDouble p)) ->
> (sp : SimpleProb A) ->
> rescale p pp (fmap f sp) = fmap f (rescale p pp sp)
> postulate
> naturalRescale : (p : NonNegDouble) -> (pp : Positive (toDouble p)) ->
> Natural (rescale p pp)
* On |fmap| and |normalize|:
> postulate
> fmapNormalizeLemma : {A, B : Type} ->
> (f : A -> B) ->
> (sp : SimpleProb A) ->
> normalize (fmap f sp) = fmap f (normalize sp)
> postulate
> naturalNormalize : Natural normalize
|
```python
%%capture
%run utils.ipynb
%run quadrotor_model.ipynb
```
# Sensors
This notebook implements the sensors that are used on-board a quadcopter: accelerometer, gyro, GPS, camera, etc. Each sensor implemented herein inherits from the following abstract `Sensor` class and must override the `read` method.
```python
class Sensor(object):
"""Sensor
An abstract base class for all sensors.
"""
def __init__(self):
self.name = "Abstract Sensor"
def __str__(self):
return self.name
def read(self, quad, n, Ts):
return 0
```
## Sensor Manager
In order to allow flexibility in sensor configurations, a `SensorManager` class is created. A custom set of sensors is registered with the manager for each simulation. During a simulation, the [quadsim](quadsim.ipynb) `Simulator` class asks the sensor manager to produce a data packet that represents the current sensor readings from the suite of on-board sensors.
```python
class SensorManager(object):
"""Sensor Manager
"""
def __init__(self):
# create a list for sensor objects
self.sensors = []
def register(self, sensor):
self.sensors += [sensor]
def get_data_packet(self, quad, i, Ts):
# dictionary of sensor data, keyed by sensor name
pkt = {}
for s in self.sensors:
pkt[s.name] = s.read(quad, i, Ts)
return pkt
```
## Camera
Electro-optical (EO) cameras are extremely useful in autonomy and robotics. Their rich source of visual information enables a wide variety of applications. Robotic vision is a very active research area, with common themes such as: vision-based simultaneous localization and mapping (SLAM), visual-inertial odometry, object recognition with convolutional neural networks, image-based visual servoing, and target tracking.
In this simulation, the purpose of the camera is to measure normalized bearing vectors to interesting points such as pixel features, targets, or landmarks. As such, we will focus solely on aspects of camera geometry (as opposed to semantic understanding, pixel intensities, etc). Feature locations in the inertial frame are given to the camera which then projects them onto the pixel plane.
### Coordinate Frames
There are a number of important coordinate frames associated with a camera sensor.
### Camera Geometry
The pinhole camera model is the most commonly used camera model. This allows us to simplify the optical characteristics and focus on the geometry of how 3D objects are imaged. Suppose that the point $P$ exists in 3D space and can be expressed in the inertial frame as $P^i = \begin{bmatrix} x^i & y^i & z^i\end{bmatrix}^\top$. Using the pinhole camera model, the perspective projection model is used to image this point as
$$
\begin{equation}
\lambda \begin{bmatrix} u \\ v \\ 1 \end{bmatrix} =
\begin{bmatrix} f_x & 0 & c_x \\ 0 & f_y & c_y \\ 0 & 0 & 1 \end{bmatrix}
\left[ R_i^c \mid t_i^c \right]
\begin{bmatrix} x^i \\ y^i \\ z^i \\ 1 \end{bmatrix},
\end{equation}
$$
where $\lambda = z^i$ is the unknown scale factor associated with depth, $u$ and $v$ are respectively the $x$ and $y$ pixels, and $K$ is the *camera calibration matrix*, *intrinsic camera matrix*, or simply *camera matrix* and can be found via camera calibration.
This gives a result in pixels in the image plane attached to the camera frame, see the figure below.
Written out in component form, the perspective projection equations are
$$
\begin{align}
u &= fx' + c_x = f\frac{x}{z} + c_x \\
v &= fy' + c_y = f\frac{y}{z} + c_y
\end{align}
$$
<div style="text-align:center">
Figure 1: Camera geometry
</div>
```python
class Camera(Sensor):
"""Camera
"""
def __init__(self, fps=30, size=(800,600), hfovd=33.99, pxnoise=0, tick=None):
self.name = "Camera"
# Register a callback that gets called when a new
# frame is to be registered. The user can use this
# callback to propagate features in the camera's
# FOV. The function can return inertial positions
# to the camera to be imaged and projected onto
# the pixel plane. The function signature is:
#
# def tick_fn(capture, n, Ts)
#
self.fns = [tick] if tick else []
# Calculate camera parameters
self.fps = fps
self.size = size
self.hfov = np.radians(hfovd) # angular FOV for width (degrees)
self.f_px = f = (size[0]/2)/np.tan(self.hfov/2)
self.pxnoise_std = pxnoise
cx = size[0]/2
cy = size[1]/2
self.K = np.array([[f, 0, cx],
[0, f, cy],
[0, 0, 1]])
# Camera transformation from quadrotor body frame
self.t = np.zeros((3,1))
self.R = np.eye(3)
# Last camera measurement
self.meas = None
# how many sensor reads have there been
self.ticks = 0
def _camera_projection(self, quad, inertial):
"""Camera Projection
Uses the camera matrix to project features onto the pixel plane.
"""
# given inertial measurements of features, calculate
# the feature positions in the vehicle frame
p_veh = inertial - quad.r
# transform to the camera frame
# TODO: Add in translation from body to camera
Rv2b = Rot_v_to_b(*quad.Phi.flatten())
Rb2c = self.R
p_cam = Rb2c.dot(Rv2b.dot(p_veh))
# perspective transformation using camera matrix
# force homogeneous coordinates (normalize out depth)
hpx = self.K.dot(p_cam)
hpx = hpx / hpx[2,:]
# extract pixels from homogeneous coordiantes
# and add pixel noise (AWGN)
px = hpx[:2,:]
px += self.pxnoise_std*np.random.randn(*px.shape)
# TODO: remove measurements outside of FOV
return px
def set_transformation(self, t=None, R=None):
"""Set Transformation
Sets the transformation from the quadrotor body frame
to the camera frame. The transformation is represented
as a translation t and rotation R.
"""
self.t = t if t is not None else np.zeros((3,1))
self.R = R if R is not None else np.eye(3)
def register_feature(self, feature):
self.fns += [feature.tick]
def read(self, quad, n, Ts):
# empty list of measurements to be filled by tick functions
inertial_measurements = None
# Call any associated tick functions
capture = True
for fn in self.fns:
data = fn(capture, n, Ts)
if data is not None:
if inertial_measurements is None:
inertial_measurements = data
else:
inertial_measurements = np.hstack((inertial_measurements, data))
# if no measurements were received, bail now
if inertial_measurements is None:
return None
# Enforce fps (to closest Ts) before doing camera stuff
if np.mod(self.ticks,round(1/(self.fps*Ts))) == 0:
# project inertial measurements onto pixel plane
self.meas = self._camera_projection(quad, inertial_measurements)
self.ticks += 1
return self.meas
```
```python
def target_tick(capture, n, Ts):
# ellipse
f = 0.01 # Hz
x = 10*np.sin(2*np.pi*f*n*Ts)
y = 5*np.cos(2*np.pi*f*n*Ts)
return np.array([[x,y,0]]).T
camera = Camera(tick=target_tick)
camera.set_transformation(R=rot3d(45,0,90))
# Instantiate a quadrotor model with the given initial conditions
quad = Quadrotor(r=np.array([[-30],[0],[-30]]),
v=np.array([[0],[0],[0]]),
Phi=np.array([[0],[0],[0]]))
# How many iterations are needed
Ts = 0.01
Tf = 0.1
N = int(Tf/Ts)
for i in range(N):
x = camera.read(quad, i, Ts)
print(x.T)
```
[[ 554.238 300.000]]
[[ 554.238 300.000]]
[[ 554.238 300.000]]
[[ 554.189 299.589]]
[[ 554.189 299.589]]
[[ 554.189 299.589]]
[[ 554.140 299.178]]
[[ 554.140 299.178]]
[[ 554.140 299.178]]
[[ 554.090 298.768]]
## Accelerometer
## Rate Gyro
## GPS
|
\section*{Week 5: Acquisitions / Financial Investments}
\subsection*{Acquisitions and goodwill}
Steps for \textit{allocating the purchase price} :
\begin{enumerate}[noitemsep,topsep=0pt]
\item Fair value of tangigle assets and liabilities.
\item \textit{Identifiable} intangile assets: customer relationships, trade names, patents, etc. Subject to amortization with zero salvage value.
\item Goodwill. An intangible assets that is not separately identifiable: Everything else (the plug).
\end{enumerate}
\subsection*{Investments}
\includegraphics[width=7cm]{assets/fair_value_vs_historical_acct}
\subsection*{Passive Investments}
HTM: Hold to maturity \\
AFS: Available for sale \\
TRD: Trading
\begin{tabular}{ |c|c|c| }
\hline
& B/S Effect & I/S Effect \\
\hline
HTM (debt) & no & \\
AFS (debt \& equity) & yes & no \\
TRD (debt \& equity) & yes & yes \\
\hline
\end{tabular}
Changes in market value affect the balance sheet for AFS and TRD securities. Changes in market value affect income statement only for TRD securities.
\textbf{HTM Example:}
(A) Purchase 100 par bonds with face value of \$10. This is a 10\% stake. \\
(B) A year later, bonds are trading at \$15 each. Trek Company does not sell. \\
(C) The following year, the bonds issue a coupon payment totaling \$100. \\
(D) At maturity, the bonds are trading at \$10 each. Trek Company receives back principal in cash.
\begin{tabular}{ |c|c|c||c|c| }
\hline
Event & Cash & Inv & RE & OCI \\
\hline
(A) & -1000 & 1000 & & \\
(B) & & & & \\
(C) & 100 & & 100 & \\
(D) & 1000 & -1000 & & \\
\hline
\end{tabular}
\textbf{TRD Example:}
(A)-(C) same as previous example \\
(D) The following day, bonds are trading at \$13 each. Trek Company sells all of their bonds for cash.
\begin{tabular}{ |c|c|c||c|c| }
\hline
Event & Cash & Inv & RE & OCI \\
\hline
(A) & -1000 & 1000 & & \\
(B) & & 500 & 500 & \\
(C) & 100 & & 100 & \\
(D) & 1300 & -1500 & -200 & \\
\hline
\end{tabular} \\
\textbf{AFS Example:}
(A)-(D) the same but AFS accounting.\\
\begin{tabular}{ |c|c|c||c|c| }
\hline
Event & Cash & Inv & RE & OCI \\
\hline
(A) & -1000 & 1000 & & \\
(B) & & 500 & & 500 \\
(C) & 100 & & 100 & \\
(D) & 1300 & -1500 & 300 & -500 \\
\hline
\end{tabular}
\subsection*{Equity Method}
\begin{itemize}[noitemsep,topsep=0pt]
\item Initially record the investment at acquisition cost.
\item Adjust the book value of the investment by the investorβs share of dividends and
earnings or losses.
\item Record investorβs share of investeeβs profit on the investorβs income statements.
\item Dividends received reduce investment; they do not give rise to dividend income.
\end{itemize}
Example: On 1/1/2010, Zsa Zsa purchased 1,000 shares of Zoltan common stock for \$15
cash per share. This 1,000 shares represents a 30\% interest in Zoltan.
\begin{itemize}[noitemsep,topsep=0pt]
\item Zoltanβs book value per share is \$10. Zsa Zsa is paying a premium because it believes
that Zoltan has unrecorded patents (with a life of 10 years) of \$5 per share.
\item On 6/30, Zsa Zsa received a dividend of \$1 per share on Zoltan common stock.
\item At 12/31/2010 the market price of Zoltan common stock is \$13 per share. (this creates no entry)
\item Zoltan reports its earnings for 2010 as \$20,000.
\item On 12/31/2010 Zsa Zsa amortizes the unrecorded patents (with a life of 10 years) of \$5 per share.
\item on 6/30/2011 Zsa Zsa sells the 1,000 shares of Zoltan common stock for \$17 per share
\end{itemize}
\begin{tabular}{ |c|c|c||c||c| }
\hline
Dt & Cash & Inv & RE & Event \\
\hline
1/1 & -15000 & 15000 & & buy@15\\
6/30 & 1000 & -1000 & & dividend \\
12/31 & & 6000 & 6000 & $.3 \cdot 20000$\\
12/31 & & -500 & -500 & $\frac{5}{10} 1000$ \\
6/30 & 17000 & -19500 & -2500 & sell@17 \\
\hline
\end{tabular}
\subsection*{Take Away}
\begin{itemize}[noitemsep,topsep=0pt]
\item Passive investments $\implies$ mark-to-market
\item With some but not complete control $\implies$ equity method
\item Greater than 50\% ownership $\implies$ consolidate
\item Whether it is equity or consolidated method makes a big difference on the appearance
of the statements. Financial ratios (leverage ratios) will be very different
\end{itemize}
|
Inductive day : Type :=
| monday : day
| tuesday : day
| wednesday : day
| thursday : day
| friday : day
| saturday : day
| sunday : day.
Definition next_weekday (d : day) : day :=
match d with
| monday => tuesday
| tuesday => wednesday
| wednesday => thursday
| thursday => friday
| friday => monday
| saturday => monday
| sunday => monday
end.
Eval compute in (next_weekday friday).
(* ==> monday : day *)
Eval compute in (next_weekday (next_weekday saturday)).
(* ==> tuesday : day *)
Inductive bool : Type :=
| true : bool
| false : bool.
Definition negb (b : bool) : bool :=
match b with
| true => false
| false => true
end.
Definition andb (b1 : bool) (b2 : bool) : bool :=
match b1 with
| true => b2
| false => false
end.
Definition orb (b1 : bool) (b2 : bool) : bool :=
match b1 with
| true => true
| false => b2
end.
Example test_orb1: (orb true false) = true.
Proof. reflexivity. Qed.
Example test_orb2: (orb false false) = false.
Proof. reflexivity. Qed.
Example test_orb3: (orb false true) = true.
Proof. reflexivity. Qed.
Example test_orb4: (orb true true) = true.
Proof. reflexivity. Qed.\u00e5
|
The Assembly and power @-@ sharing Executive were suspended several times but were restored again in 2007 . In that year the British government officially ended its military support of the police in Northern Ireland ( Operation Banner ) and began withdrawing troops . On 27 June 2012 , Northern Ireland 's deputy first minister and former IRA commander , Martin McGuinness , shook hands with Queen Elizabeth II in Belfast , symbolising reconciliation between the two sides .
|
\chapter{C Admin API}
\label{chap:api:c-admin}
\section{Operations}
\label{sec:api:c:admin:ops}
\input{\topdir/c/admin/ops.tex}
|
% test for propagation and geodesic extraction on 2D planar shape
path(path, 'toolbox/');
path(path, 'data/');
name = 'chicken';
name = 'apple';
name = 'cavern';
name = 'camel';
name = 'giraffe';
rep = 'results/shape-geodesics/';
if not(exist(rep))
mkdir(rep);
end
n = 128;
M = rescale( load_image(name,n), 0,1 );
M = perform_blurring(M,5);
M = double(M>0.5);
% make sure pixels on the boundary are black
if M(1)==1
M = 1-M;
end
warning off;
imwrite(1-M, [rep name '-shape.png'], 'png');
warning off;
% compute geodesic distance
clf;
imagesc(M); axis image; axis off;
title('click on a point inside the shape');
[y,x] = ginput(1);
start_points = round([x y]');
W = ones(n);
L = zeros(n)-Inf; L(M==1) = +Inf;
options.constraint_map = L;
disp('Compute distance function');
[D,S,Q] = perform_fast_marching(W, start_points, options);
bound = compute_shape_boundary(M);
nbound = size(bound,1);
npaths = 30;
sel = round(linspace(1,nbound+1,npaths+1)); sel(end) = [];
end_points = bound(sel,:);
disp('Extract paths');
paths = {};
D1 = D; D1(M==0) = 1e9;
for i=1:npaths
paths{i} = compute_geodesic(D1,end_points(i,:)');
% paths{i} = compute_discrete_geodesic(D1,end_points(i,:)')';
end
ms = 30; lw = 3;
% display
A = convert_distance_color(D);
clf; hold on;
imageplot(A); axis image; axis off;
for i=1:npaths
end_point = end_points(i,:);
h = plot( paths{i}(2,:), paths{i}(1,:), 'k' );
set(h, 'LineWidth', lw);
h = plot(end_point(2),end_point(1), '.b');
set(h, 'MarkerSize', ms);
end
h = plot(start_points(2),start_points(1), '.r');
set(h, 'MarkerSize', ms);
hold off;
colormap jet(256);
axis ij;
saveas(gcf, [rep name '-geodesics.png'], 'png');
|
#include <boost/python.hpp>
#include "../../crypto-sdk/lib/ed25519key.h"
#include "../../crypto-sdk/lib/utils.h"
using namespace std;
using namespace boost::python;
using namespace Orbs;
void ExportED25519() {
class_<ED25519Key, boost::noncopyable>("ED25519Key", init<>())
.def(init<string>())
.def(init<string, string>())
.add_property("public_key", +[](const ED25519Key &k) {
return Utils::Vec2Hex(k.GetPublicKey());
})
.add_property("unsafe_private_key", +[](const ED25519Key &k) {
return Utils::Vec2Hex(k.GetPrivateKeyUnsafe());
})
.add_property("has_private_key", &ED25519Key::HasPrivateKey)
.def("sign", +[](const ED25519Key &k, const string &message) {
const vector<uint8_t> rawMessage(message.cbegin(), message.cend());
return Utils::Vec2Hex(k.Sign(rawMessage));
})
.def("verify", +[](const ED25519Key &k, const string &message, const string &signature) {
const vector<uint8_t> rawMessage(message.cbegin(), message.cend());
const vector<uint8_t> rawSignature(Orbs::Utils::Hex2Vec(signature));
return k.Verify(rawMessage, rawSignature);
})
;
}
|
Thanks for the reply @v_kyr, that did the job. I didn't think to check for any fixes in the beta. But that is exactly what was missing from the grid manager, and the isometric studio also is a nice addition. Although I don't know why they didn't just call it axonometric studio. I know isometric is the most popular form of axonometric projection but still.
This formula does seem to solve the alignment of 2 of the 3 planes, but the downside is that the grid isn't uniform anymore, making it impossible to make proportional cubes for example. I don't know if this is possible but it seems to me that the solution is to shift the plane consisting of the first and up axis and the plane consisting of the second and up axis down or up to align with the plane consisting of the first and second axis. I would really like to have other grid types be able to have aligned plane sets, just as the isometric grid type does.
|
C <HTML><PRE>
C PROGRAM A N R A Y, VERSION 4.73 (PRAHA, JUNE 2013)
C!! modificaΓ§Γ΅es marcadas com !! em fev/2017 por Liliana Alcazar
C*******************************************************************
C
C PROGRAM ANRAY IS DESIGNED FOR RAY, TRAVEL TIME AND
C AMPLITUDE COMPUTATIONS IN 3D GENERAL ANISOTROPIC AND ISOTROPIC
C LATERALLY VARYING LAYERED MEDIA. THE PROGRAM MAKES POSSIBLE
C COMPUTATION OF RAYS SPECIFIED BY INITIAL ANGLES AT THE SOURCE,
C I.E., INITIAL-VALUE RAY TRACING, OR RAYS STARTING FROM THE
C SOURCE AND TERMINATING ON A VERTICAL OR SURFACE PROFILE, I.E.
C BOUNDARY-VALUE RAY TRACING. RAY AMPLITUDES CAN BE COMPUTED
C ALONG RAYS.
C
C*******************************************************************
C
C
CHARACTER*80 MTEXT,FILEIN,FILEOU,FILE1,FILE2,FILE3,FILE4,FILE5
CHARACTER*80 FILE6,FILE7
DIMENSION Y(18)
COMMON /AUXI/ IANI(20),INTR,INT1,IPREC,KRE,IREFR,LAY,NDER,IPRINT,
1 MPRINT,NTR,ISQRT,NAUX,ISOUR,MAUX,MREG,MDIM,IPOL,MSCON,LOUT,
2 IAMP,MTRNS,ICOEF,IAD,IRHO,ISHEAR,IAC,IRT,mori
COMMON /AUXX/ MMX(20),MMY(20),MMXY(20)
COMMON /APROX/ A11,A12,A13,A14,A15,A16,A22,A23,A24,A25,A26,A33,
1 A34,A35,A36,A44,A45,A46,A55,A56,A66,
1 DXA11,DXA12,DXA13,DXA14,DXA15,DXA16,DXA22,DXA23,
1 DXA24,DXA25,DXA26,DXA33,DXA34,DXA35,DXA36,DXA44,
1 DXA45,DXA46,DXA55,DXA56,DXA66,
1 DYA11,DYA12,DYA13,DYA14,DYA15,DYA16,DYA22,DYA23,
1 DYA24,DYA25,DYA26,DYA33,DYA34,DYA35,DYA36,DYA44,
1 DYA45,DYA46,DYA55,DYA56,DYA66,
1 DZA11,DZA12,DZA13,DZA14,DZA15,DZA16,DZA22,DZA23,
1 DZA24,DZA25,DZA26,DZA33,DZA34,DZA35,DZA36,DZA44,
1 DZA45,DZA46,DZA55,DZA56,DZA66,
1 A2546,A1266,A1355,A1456,A3645,A2344
INTEGER CODE
COMMON /COD/ CODE(50,2),KREF,KC,ITYPE
COMMON /DIST/ DST(200),NDST,REPS,PROF(2),NDSTP,PREPS,LNDST,
1XPRF,YPRF,ILOC
COMMON /DENS/ RHO(20)
COMPLEX PS
COMMON /RAY/ AY(28,2000),DS(20,50),KINT(50),HHH(3,3),TMAX,
1 PS(3,7,50),IS(8,50),N,IREF,IND,IND1
COMMON /INTRF/ Z(1000),SX(350),SY(350),NX(20),NY(20),BRD(6),NINT,
1 XINTA
COMMON /ZERO/ RNULL
COMMON/VSP/XVSP,YVSP,XNRM,YNRM,ICOD,IVSP
COMMON/VRML/LUBRD,LUGRD,LUIND,LURAY
C
C**************************************************
C
LIN=5
LOU=6
LU1=1
LU2=2
LU3=3
LUBRD=7
LUGRD=8
LUIND=9
LURAY=10
FILEIN='anray.dat'
FILEOU='anray.out'
FILE1='lu1.anray' !!
FILE2='lu2.anray' !!
FILE3=' '
FILE4=' '
FILE5=' '
FILE6=' '
FILE7=' '
C!! WRITE(*,'(2A)') ' (ANRAY) SPECIFY NAMES OF INPUT AND OUTPUT',
C!! 1' FILES LIN, LOU, LU1, LU2, LU3, LUBRD, LUGRD, LUIND, LURAY: '
C!! READ(*,*) FILEIN,FILEOU,FILE1,FILE2,FILE3,FILE4,FILE5,FILE6,FILE7
WRITE(*,*) 'SPECIFY NAME OF INPUT MODEL FILE (in a anray format)' !!
READ(*,*) FILEIN !!
IF(FILE1.EQ.' ') LU1=0
IF(FILE2.EQ.' ') LU2=0
IF(FILE3.EQ.' ') LU3=0
IF(FILE4.EQ.' ') LUBRD=0
IF(FILE5.EQ.' ') LUGRD=0
IF(FILE6.EQ.' ') LUIND=0
IF(FILE7.EQ.' ') LURAY=0
LOUT=LOU
OPEN(LIN,FILE=FILEIN,FORM='FORMATTED',STATUS='OLD')
OPEN(LOU,FILE=FILEOU,FORM='FORMATTED')
IF(LU1.NE.0)OPEN(LU1,FILE=FILE1,FORM='FORMATTED')
IF(LU2.NE.0)OPEN(LU2,FILE=FILE2,FORM='FORMATTED')
IF(LU3.NE.0)OPEN(LU3,FILE=FILE3,FORM='FORMATTED')
IF(LUBRD.NE.0)OPEN(LUBRD,FILE=FILE4,FORM='FORMATTED')
IF(LUGRD.NE.0)OPEN(LUGRD,FILE=FILE5,FORM='FORMATTED')
IF(LUIND.NE.0)OPEN(LUIND,FILE=FILE6,FORM='FORMATTED')
IF(LURAY.NE.0)OPEN(LURAY,FILE=FILE7,FORM='FORMATTED')
C
C**************************************************
C
WRITE(LOU,777)
777 FORMAT(///,'***********************'
1,//,' PROGRAM A N R A Y ',//,
2'***********************',//)
NCODE=1
MTEXT='ANRAY'
INUL=4
READ(LIN,*)MTEXT
WRITE(LOU,115)MTEXT
READ(LIN,*)INULL,ISURF
IF(INULL.EQ.0)INULL=INUL
RNULL=10.**(-INULL)
WRITE(LOU,106)INULL,ISURF
C
C
C SPECIFICATION OF THE MODEL
C
CALL MODEL(MTEXT,LIN)
C
C GENERATE FILE FOR PLOTTING VARIOUS CHARACTERISTIC SURFACES
C
IF(LU3.NE.0)CALL SURFPL(LIN,LU3)
C
C GENERATE FILE FOR VRML PLOTTING BOUNDARIES OF THE MODEL
C
IF(LUBRD.NE.0)CALL BOX(BRD)
C
C GENERATE FILE FOR PLOTTING RAYS
C
IF(LURAY.NE.0)WRITE(LURAY,113)
IF(LURAY.NE.0)WRITE(LURAY,105)
C
C SPECIFICATION OF SYNTHETIC SEISMOGRAMS
C
2 ICONT=1
MEP=0
MOUT=0
MDIM=0
METHOD=0
MREG=0
ITMAX=10
IPOL=0
IPREC=0
IRAYPL=0
IPRINT=0
IAMP=0
MTRNS=0
ICOEF=0
IRT=0
ILOC=0
MCOD=0
MORI=0
READ(LIN,*)ICONT,MEP,MOUT,MDIM,METHOD,MREG,ITMAX,
1IPOL,IPREC,IRAYPL,IPRINT,IAMP,MTRNS,ICOEF,IRT,ILOC,MCOD,MORI
WRITE(LOU,102)ICONT,MEP,MOUT,MDIM,METHOD,MREG,ITMAX,
1IPOL,IPREC,IRAYPL,IPRINT,IAMP,MTRNS,ICOEF,IRT,ILOC,MCOD,MORI
IF(ICONT.EQ.0)GO TO 99
C
C
c IF(MEP.NE.0.AND.MDIM.EQ.0)MDIM=1
IVSP=0
IF(ILOC.EQ.0)ITPR=3
IF(ILOC.EQ.1)THEN
IVSP=1
ITPR=43
MREG=1
END IF
IF(ILOC.GT.1)THEN
ITPR=ILOC+100
END IF
C
IF(MEP.EQ.0)THEN
NDST=0
END IF
C
IF(MEP.EQ.1)THEN
NDST=1
READ(LIN,*)XREC,YREC
WRITE(LOU,104)XREC,YREC
GO TO 4
END IF
IF(MEP.LT.0)THEN
NDST=-MEP
PROF(1)=0.
XPRF=0.
YPRF=0.
READ(LIN,*)PROF(1),(DST(I),I=1,NDST),XPRF,YPRF
WRITE(LOU,104)PROF(1),(DST(I),I=1,NDST),XPRF,YPRF
IF(NDST.EQ.1)RSTEP=1.
IF(NDST.EQ.1)DST(2)=DST(1)+1.
IF(NDST.EQ.1)GO TO 4
RSTEP=(DST(NDST)-DST(1))/FLOAT(NDST-1)
END IF
C
IF(MEP.GT.0)THEN
NDST=MEP
READ(LIN,*)PROF(1),RMIN,RSTEP,XPRF,YPRF
WRITE(LOU,104)PROF(1),RMIN,RSTEP,XPRF,YPRF
DO 13 I=1,MEP
13 DST(I)=RMIN+(I-1)*RSTEP
IF(NDST.EQ.1)DST(2)=RMIN+RSTEP
END IF
PROF(2)=PROF(1)+1.
NDSTP=1
C
IF(IVSP.EQ.1.AND.NDST.NE.0)THEN
READ(LIN,*)XVSP,YVSP
WRITE(LOU,104)XVSP,YVSP
END IF
C
4 TSOUR=0.
DT=1.
AC=0.0001
REPS=0.05
PREPS=0.05
READ(LIN,*)XSOUR,YSOUR,ZSOUR,TSOUR,DT,AC,REPS,PREPS
WRITE(LOU,104)XSOUR,YSOUR,ZSOUR,TSOUR,DT,AC,REPS,PREPS
C
IF(ABS(XPRF).LT..000001.AND.ABS(YPRF).LT..000001)THEN
XPRF=XSOUR
YPRF=YSOUR
END IF
IF(MEP.EQ.1)THEN
XE=XREC-XPRF
YE=YREC-YPRF
RPRF=SQRT(XE*XE+YE*YE)
XATAN=ATAN2(YE,XE)
PROF(1)=XATAN
RMIN=RPRF
DST(1)=RMIN
WRITE(LOU,104)RPRF,XATAN
RSTEP=100.
DST(2)=DST(1)+100.
PROF(2)=PROF(1)+1.
NDSTP=1
END IF
C
IF(IVSP.EQ.1.AND.NDST.NE.0)THEN
XNRM=XVSP-XSOUR
YNRM=YVSP-YSOUR
AUX=SQRT(XNRM*XNRM+YNRM*YNRM)
XNRM=XNRM/AUX
YNRM=YNRM/AUX
PROF(1)=ATAN2(YNRM,XNRM)
PROF(2)=PROF(1)+1.
XPRF=XSOUR
YPRF=YSOUR
END IF
IF(MCOD.EQ.0)THEN
READ(LIN,*)AMIN,ASTEP,AMAX
WRITE(LOU,104)AMIN,ASTEP,AMAX
READ(LIN,*)BMIN,BSTEP,BMAX
IF(ABS(BSTEP).LT..000001)THEN
BMIN=PROF(1)-.3
BMAX=PROF(1)+.4
BSTEP=.6
END IF
WRITE(LOU,104)BMIN,BSTEP,BMAX
END IF
IF((MREG.EQ.0.OR.MREG.EQ.2).AND.MDIM.NE.0) WRITE(LOU,'(/,A,/)')
1 ' COEFFICIENTS OF CONVERSION ARE APPLIED'
IF((MREG.NE.0.AND.MREG.NE.2).AND.MDIM.NE.0) WRITE(LOU,'(/,A,/)')
1 ' COEFFICIENTS OF CONVERSION ARE *** NOT *** APPLIED'
TMAX=10000.
IND=-1
NDER=1
CALL RAYA(XSOUR,YSOUR,ZSOUR,TSOUR,AMIN1,BMIN,PX,PY,PZ,XX,YY,ZZ,T,
1DT,AC)
Y(1)=XSOUR
Y(2)=YSOUR
Y(3)=ZSOUR
IF(IND.EQ.50)WRITE(LOU,111)IND
IF(IND.EQ.50)GO TO 99
LAY=IND
ISOUR=IND
ITYPE=3
CALL PARDIS(Y,0)
VP=SQRT(A11)
IF(IRHO.EQ.0)RO=1.7+.2*VP
IF(IRHO.EQ.1)RO=RHO(IND)
C
C GENERATE FILE LU2 FOR SYNTHETIC SEISMOGRAM COMPUTATIONS
C
IF(LU2.NE.0.AND.NDST.NE.0)THEN
WRITE(LU2,115)MTEXT
KSH=2
WRITE(LU2,100)NDST,KSH,ILOC
WRITE(LU2,104)XSOUR,YSOUR,ZSOUR,TSOUR,RSTEP,RO
IF(MEP.NE.1)WRITE(LU2,104)(DST(I),I=1,NDST)
IF(MEP.EQ.1)WRITE(LU2,104)XREC,YREC,RPRF,XATAN
END IF
C
C LOOP FOR ELEMENTARY WAVES
C
20 READ(LIN,*)KC,KREF,((CODE(I,K),K=1,2),I=1,KREF)
WRITE(LOU,100)KC,KREF,((CODE(I,K),K=1,2),I=1,KREF)
IF(KREF.EQ.0)GOTO 2
IF(MOUT.NE.0)WRITE(LOU,107)
WRITE(LOU,103)NCODE,KC,KREF,((CODE(I,K),K=1,2),I=1,KREF)
C
IF(MCOD.NE.0)THEN
READ(LIN,*)AMIN,ASTEP,AMAX
WRITE(LOU,104)AMIN,ASTEP,AMAX
READ(LIN,*)BMIN,BSTEP,BMAX
IF(ABS(BSTEP).LT..000001)THEN
BMIN=PROF(1)-.3
BMAX=PROF(1)+.4
BSTEP=.6
END IF
WRITE(LOU,104)BMIN,BSTEP,BMAX
END IF
C
C GENERATE FILE LU1 FOR PLOTTING OF RAY DIAGRAMS,
C TIME-DISTANCE AND AMPLITUDE-DISTANCE CURVES
C
IF(LU1.EQ.0.OR.NDST.EQ.0)GO TO 21
WRITE(LU1,100)ICONT,NDST,ILOC
WRITE(LU1,104)RO
NPN=2
APN=0.
WRITE(LU1,100)NPN,NPN,NPN
WRITE(LU1,101)APN,APN,APN,APN,APN
WRITE(LU1,101)APN,APN,APN,APN,APN
WRITE(LU1,104)Xprf,Yprf,0.0,PROF(1)
WRITE(LU1,104)(DST(I),I=1,NDST)
21 CONTINUE
C
C
C SEARCH FOR THE NUMBER OF THE ELEMENT OF THE RAY, STARTING FROM
C WHICH THE WAVE DOES UNDERTAKE NEITHER REFLECTION NOR CONVERSION
C
ICOD=0
IF(IVSP.EQ.0)GO TO 35
DO 34 I=1,KREF
ICOD=KREF-I+1
IF(ICOD.EQ.1) GO TO 34
IC1=CODE(ICOD,1)
IC2=CODE(ICOD-1,1)
IF((IC1-IC2).EQ.0)GO TO 35
IC1=CODE(ICOD,2)
IC2=CODE(ICOD-1,2)
IF((IC1-IC2).NE.0)GO TO 35
34 CONTINUE
35 CONTINUE
IF(MOUT.NE.0)WRITE(LOU,108)
C
C
CALL RECEIV(XSOUR,YSOUR,ZSOUR,TSOUR,DT,AC,ITMAX,AMIN,ASTEP,
1AMAX,BMIN,BSTEP,BMAX,MOUT,LU1,LU2,METHOD,ITPR,NCODE)
IF(IND.EQ.14) WRITE(LOU,111) IND
NCODE=NCODE+1
GOTO 20
C
C END OF LOOP FOR ELEMENTARY WAVES
C
C
100 FORMAT(26I3)
101 FORMAT(5E15.5)
102 FORMAT(1H0,////,2X,26I3)
103 FORMAT(4X,I4,9X,100I3)
104 FORMAT(8F10.5)
105 FORMAT('/')
106 FORMAT(17I5)
107 FORMAT(//2X,'INT.CODE',5X,'E X T E R N A L C O D E')
108 FORMAT(//)
111 FORMAT(/2X,'IND=',I5,/)
113 FORMAT(6H'RAYS')
115 FORMAT(A)
C
99 CONTINUE
IF(LURAY.NE.0)WRITE(LURAY,105)
IF(LU1.NE.0.AND.NDST.NE.0)WRITE(LU1,100)ICONT,ICONT
IF(LU1.NE.0)REWIND LU1
IF(LU2.NE.0)REWIND LU2
C
STOP
END
C
C *********************************************************
C
SUBROUTINE AMPL (AMPX,AMPY,AMPZ,UU)
C
C ROUTINE FOR COMPUTING COMPLEX VECTORIAL RAY AMPLITUDES
C
C OUTPUT PARAMETERS
C AMPX(2),AMPY(2),AMPZ(2) - X,Y AND Z COMPONENTS OF COMPLEX
C VECTORIAL RAY AMPLITUDES IN THE MODEL COORDINATES. FOR P WAVE
C IN ANY MEDIUM AND FOR S WAVES IN AN ANISOTROPIC MEDIUM, I=1.
C FOR S WAVE GENERATED IN AN ISOTROPIC MEDIUM, I=1,2. I=1 AND 2
C CORRESPOND TO S WAVES SPECIFIED AT THE SOURCE BY VECTORS E1
C E2. VECTORS E1 AND E2 TOGETHER WITH UNIT VECTOR TANGENT TO
C THE RAY FORM A BASIS OF RAY CENTRED COORDINATE SYSTEM.
C UU - PRODUCT OF RATIOS OF DENSITIES AND COSINES OF INCIDENCE
C AND OF REFLECTION/TRANSMISSION AT POINTS WHERE THE RAY CROSSES
c INTERFACES.
C
C CALLED FROM: RECEIV
C ROUTINES CALLED: POLAR,TRANSL,COEF
C
DIMENSION Y(18),UN(3),POLD(3),PNEW(3)
COMPLEX AMPX(2),AMPY(2),AMPZ(2),CR(3),UC(3),STU(6),C1,C2,C3
COMMON /AUXI/ IANI(20),INTR,INT1,IPREC,KRE,IREFR,LAY,NDER,IPRINT,
1 MPRINT,NTR,ISQRT,NAUX,ISOUR,MAUX,MREG,MDIM,IPOL,MSCON,LOU,
2 IAMP,MTRNS,ICOEF,IAD,IRHO,ISHEAR,IAC,IRT,mori
COMMON /DIST/ DST(200),NDST,REPS,PROF(2),NDSTP,PREPS,LNDST,
1XPRF,YPRF,ILOC
INTEGER CODE
COMMON /COD/ CODE(50,2),KREF,KC,ITYPE
COMMON /DENS/ RHO(20)
COMPLEX PS
COMMON /RAY/ AY(28,2000),DS(20,50),KINT(50),HHH(3,3),tmax,
1 PS(3,7,50),IS(8,50),N,IREF,IND,IND1
COMMON /RAY2/ DRY(3,2000)
C
KSS=1
ISHEAR=0
ITYPE=CODE(1,2)
IF(IANI(ISOUR).EQ.0.AND.ITYPE.NE.3)THEN
ISHEAR=1
ITYPE=1
END IF
ITP=ITYPE
DO 1 I=1,2
AMPX(I)=CMPLX(0.,0.)
AMPY(I)=CMPLX(0.,0.)
AMPZ(I)=CMPLX(0.,0.)
1 CONTINUE
C
3000 NN=N
IDD=0
N2=0
N1=1
IRE=IREF
AV=1.
C
C SPECIFICATION OF DISPLACEMENT VECTOR AT SOURCE
C IN RAY CENTERED COORDINATES
C
DO 5 I=1,3
CR(I)=(0.,0.)
5 CONTINUE
CR(ITP)=(1.,0.)
IREF1=IREF-1
IF(IRE.GT.1)INAUM=CODE(IRE-1,1)-CODE(IRE,1)
IF(MREG.GE.1.AND.IRE.GT.1.AND.ILOC.GT.1.AND.INAUM.GE.0)THEN
IREF1=IREF1+1
CODE(IREF1+1,2)=3
END IF
IF(IREF1.EQ.0) GOTO 100
C
C LOOP OVER INTERFACES
C
DO 10 I=1,IREF1
IREF=I
IF(KC.NE.0) ITYPE=CODE(IREF,2)
N=KINT(IREF)
IF(N.EQ.0) THEN
IDD=1
GO TO 10
ELSE
N1=N2+1
N2=N
IF(IDD.NE.0) N2=-N2
IDD=0
C
C COMPUTATION OF POLARIZATION VECTORS
C CONSIDERED POLARIZATION VECTOR(S) ARE STORED IN CORRESPONDING
C COLUMNS OF THE MATRIX HHH. OTHER COLUMNS ARE ZERO.
C
CALL POLAR(N1,N2,NN,IREF)
END IF
DO 20 K=1,6
Y(K)=AY(K+1,N)
20 CONTINUE
IF(IAMP.GT.0)WRITE(LOU,'(a,2i5,6f10.5)')' AMPL:I,N,Y',I,N,
1(Y(L),L=1,6)
DO 30 K=1,3
POLD(K)=Y(K+3)
PS(K,7,IREF)=Y(K+3)
30 CONTINUE
DO 40 K=1,3
UN(K)=DS(K,IREF)
40 CONTINUE
LAY=IS(1,IREF)
ITRANS=IS(2,IREF)
ITR1=ITRANS
IF(UN(3).GT.0.0) GOTO 50
C
C RAY STRIKING THE INTERFACE FROM ABOVE
C
IF(ITRANS.EQ.0) THEN
LAY=LAY+1
ITRANS=1
GOTO 70
END IF
IF(ITRANS.GT.0) THEN
LAY=LAY-1
ITRANS=0
GOTO 70
END IF
C
C RAY STRIKING THE INTERFACE FROM BELOW
C
50 IF(ITRANS.EQ.0) THEN
LAY=LAY-1
ITRANS=1
GOTO 70
END IF
IF(ITRANS.GT.0) THEN
LAY=LAY+1
ITRANS=0
GOTO 70
END IF
C
C SLOWNESS VECTORS ON THE SIDE OF THE INTERFACE WHERE GENERATED
C WAVE PROPAGATES WERE DETERMINED DURING THE CALL OF TRANSL IN THE
C ROUTINE OUT. HERE REMAINING SLOWNESS VECTORS ON THE OTHER SIDE
C OF THE INTERFACE ARE DETERMINED
C
C REDEFINITION OF IREF FOR CALL OF ROUTINE TRANSL
C
70 IF(LAY.EQ.0) THEN
DO 71 K=4,6
DO 71 L=1,3
PS(L,K,IREF)=CMPLX(0.,0.)
71 CONTINUE
GO TO 75
END IF
IREF=IREF+1
CALL TRANSL(Y,POLD,PNEW,UN,ITRANS,0)
IF(IND.EQ.10)RETURN
IREF=IREF-1
75 IF(IAMP.NE.0)THEN
WRITE(LOU,'(A)')' REFLECTED/TRANSMITTED SLOWNESS VECTORS'
WRITE(LOU,'(6F12.6)')((PS(L,K,IREF),L=1,3),K=1,6)
END IF
AV1=(DS(11,IREF)*DS(10,IREF))/(DS(8,IREF)*DS(7,IREF))
AV=AV*AV1
IF(IAMP.GT.0) THEN
WRITE(LOU,'(A)') 'ROI,ROG,UNVGI,UNVGG,AV1,AV'
WRITE(LOU,'(6F10.5)') DS(8,IREF),
1 DS(11,IREF),DS(7,IREF),DS(10,IREF),AV1,AV
WRITE(LOU,'(A,/,6F12.5,/,3(3F12.5/))') ' CR,HHH',
2 CR,((HHH(J,K),J=1,3),K=1,3)
END IF
C
C COMPUTATION OF AMPLITUDE COEFFICIENTS OF REFLECTED/TRANSMITTED WAVES
C
C
C COMPUTATION OF CARTESIAN COMPONENTS OF INCIDENT DISPLACEMENT VECTOR
C
DO 87 K=1,3
STU(K)=CMPLX(0.,0.)
DO 87 J=1,3
STU(K)=HHH(J,K)*CR(J)+STU(K)
87 CONTINUE
IF(IAMP.GT.0)WRITE(LOU,'(A,6F10.5)') ' STU',(STU(K),K=1,3)
IF(KC.NE.0)ITYPE=CODE(IREF+1,2)
IF(MREG.GE.1.AND.IRE.GT.1.AND.I.EQ.IREF1.AND.ILOC.GT.1.AND.
1(CODE(IRE,1).LE.CODE(IRE-1,1)))ITR1=1
CALL COEF(STU,CR,ITR1)
IF(IND.EQ.11)RETURN
BCR=SQRT(REAL(CR(1)*CONJG(CR(1))+CR(2)*CONJG(CR(2))
1 +CR(3)*CONJG(CR(3))))
IF(BCR.LT.1.E-10) THEN
DO 88 K=1,3
UC(K)=(0.,0.)
88 CONTINUE
GOTO 130
END IF
10 CONTINUE
C
C END OF LOOP OVER INTERFACES
C
C TERMINATION POINT
C
100 CONTINUE
IF(IRE.GT.1)INAUM=CODE(IRE-1,1)-CODE(IRE,1)
IF((MREG.GE.1.AND.IRE.GT.1).AND.ILOC.GT.1.AND.INAUM.GE.0)THEN
DO 200 K=1,3
Y(K+3)=REAL(PS(K,6,IREF1))
200 CONTINUE
V=1./SQRT(Y(4)*Y(4)+Y(5)*Y(5)+Y(6)*Y(6))
DO 201 K=1,3
HHH(1,K)=0.
HHH(2,K)=0.
HHH(3,K)=V*Y(K+3)
201 CONTINUE
ELSE
N1=N2+1
N2=NN
IF(KC.NE.0)ITYPE=CODE(IRE,2)
IF(KC.NE.0)IS(7,IRE)=CODE(IRE,1)
CALL POLAR(N1,N2,NN,IRE)
END IF
C
C COMPUTATION OF CARTESIAN COMPONENTS OF INCIDENT DISPLACEMENT VECTOR
C
DO 107 K=1,3
STU(K)=CMPLX(0.,0.)
DO 107 J=1,3
STU(K)=HHH(J,K)*CR(J)+STU(K)
107 CONTINUE
IF(IAMP.GT.0)WRITE(LOU,'(A,6F10.5)') ' STU',(STU(K),K=1,3)
C
IF(IRE.GT.1)INAUM=CODE(IRE-1,1)-CODE(IRE,1)
IF(MREG.EQ.1.OR.MREG.EQ.3.OR.
1(MREG.EQ.2.AND.IRE.GT.1.AND.INAUM.GE.0))THEN
UC(1)=STU(1)
UC(2)=STU(2)
UC(3)=STU(3)
IF(MREG.GT.1) THEN
C
C CALCULATION OF PRESSURE AT THE TERMINATION POINT
C
C1=UC(1)
C2=UC(2)
C3=UC(3)
ARE=REAL(C1)
IF(ARE.LT.0.)ARE=-ARE
AIM=AIMAG(C1)
APHI=ATAN2(AIM,ARE)
ARE=SQRT(REAL(C1*CONJG(C1)+C2*CONJG(C2)+C3*CONJG(C3)))
UC(1)=ARE*CMPLX(COS(APHI),SIN(APHI))
UC(2)=(0.,0.)
UC(3)=(0.,0.)
IF(IAMP.GT.0)WRITE(LOU,'(A,4F10.5)') ' UC(1),ARE,APHI',
1 UC(1),ARE,APHI
END IF
GOTO 110
END IF
DO 105 K=1,6
Y(K)=AY(K+1,NN)
IF(K.LE.3)GO TO 105
PS(K-3,7,IRE)=Y(K)
POLD(K-3)=Y(K)
UN(K-3)=DS(K-3,IRE)
105 CONTINUE
N=NN
IF(MREG.EQ.0.OR.MREG.EQ.2) THEN
IREF=IREF+1
IF(INTR.EQ.LAY)LAY=LAY-1
IF(INTR.NE.LAY)LAY=LAY+1
CALL TRANSL(Y,POLD,PNEW,UN,1,0)
END IF
IREF=IRE
IF(IAMP.GT.0)THEN
WRITE(LOU,'(A)')
1 ' REFLECTED SLOWNESS VECTORS AT TERMINATION POINT'
WRITE(LOU,'(6F12.6)')((PS(L,K,IRE),L=1,3),K=1,3)
END IF
C
C COMPUTATION OF CONVERSION COEFFICIENTS
C
KTR=999
CALL COEF(STU,UC,KTR)
IF(IND.EQ.11)RETURN
110 CONTINUE
DO 115 K=1,3
Y(K)=AY(K+4,NN)
115 CONTINUE
VPEND=1./SQRT(Y(1)*Y(1)+Y(2)*Y(2)+Y(3)*Y(3))
IF(IRE.GT.1)INAUM=CODE(IRE-1,1)-CODE(IRE,1)
IF((MREG.GE.1.AND.IRE.GT.1).AND.ILOC.GT.1.AND.
1INAUM.GE.0)VPEND=V
DO 120 K=1,3
Y(K)=AY(K+4,1)
120 CONTINUE
VP0=1./SQRT(Y(1)*Y(1)+Y(2)*Y(2)+Y(3)*Y(3))
RHO0=0.2*SQRT(AY(8,1))+1.7
IF(IRHO.NE.0) RHO0=RHO(ISOUR)
RHEND=0.2*SQRT(AY(8,NN))+1.7
IF(IRHO.NE.0) RHEND=RHO(LAY)
AV=AV*VP0*RHO0
AV=AV/(VPEND*RHEND)
UU=SQRT(ABS(AV))
IF(IAMP.GT.0)
1WRITE(LOU,'(A,4F12.6)')'VP0,RH0,VPEND,RHEND',VP0,RHO0,VPEND,RHEND
130 CONTINUE
N=NN
IREF=IRE
AMPX(KSS)=UC(1)
AMPY(KSS)=UC(2)
AMPZ(KSS)=UC(3)
IF(MREG.GT.1)AMPX(KSS)=AMPX(KSS)*VPEND*RHEND
IF(ISHEAR.NE.0.AND.KSS.NE.2) THEN
KSS=2
ITP=2
GOTO 3000
END IF
RETURN
END
C
C *********************************************************
C
SUBROUTINE APPROX(X,Y,YD,KDIM)
C
C THE ROUTINE PERFORMS THIRD-ORDER INTERPOLATION BETWEEN POINTS
C YOLD AND YNEW PARAMETERIZED BY AN INDEPENDENT VARIABLE X.
C DOLD, DNEW ARE THE FIRST DERIVATIVES OF Y WITH RESPECT
C TO X AT THE POINTS YOLD AND YNEW.
C
DIMENSION Y(18),YD(18)
COMMON/APPR/ XOLD,XNEW,YOLD(18),DOLD(18),YNEW(18),DNEW(18)
C
A=(X-XNEW)/(XNEW-XOLD)
AUX=A+1.
A1=(2.*A+3.)*A*A
A2=1.-A1
B1=AUX*A*(X-XNEW)
B2=AUX*A*(X-XOLD)
AD1=6.*A*AUX/(XNEW-XOLD)
AD2=-AD1
BD1=A*(3.*A+2.)
BD2=AUX*(3.*A+1.)
DO 1 I=1,KDIM
Y(I)=A1*YOLD(I)+A2*YNEW(I)+B1*DOLD(I)+B2*DNEW(I)
YD(I)=AD1*YOLD(I)+AD2*YNEW(I)+BD1*DOLD(I)+BD2*DNEW(I)
1 CONTINUE
RETURN
END
C
C *********************************************************
C
SUBROUTINE BIAP(MX1,MX,MY1,MY,MXY1)
C
DIMENSION X(200),FX(200),V(1000)
COMMON/ZCOEF/ A02(1000),A20(1000),A22(1000)
COMMON /INTRF/ Z(1000),SX(350),SY(350),NX(20),NY(20),BRD(6),NINT,
1 XINTA
EQUIVALENCE(Z(1),V(1))
C
C ROUTINE DETERMINING THE COEFFICIENTS
C OF BICUBIC SPLINE INTERPOLATION
C
DO 1 J=1,MX
L=MX1+J-1
1 X(J)=SX(L)
DO 3 I=1,MY
DO 2 J=1,MX
K=MXY1+(J-1)*MY+I-1
2 FX(J)=V(K)
CALL SPLIN(X,FX,1,MX)
DO 3 J=1,MX
K=MXY1+(J-1)*MY+I-1
3 A20(K)=FX(J)
C
DO 4 I=1,MY
L=MY1+I-1
4 X(I)=SY(L)
DO 6 J=1,MX
DO 5 I=1,MY
K=MXY1+(J-1)*MY+I-1
5 FX(I)=V(K)
CALL SPLIN(X,FX,1,MY)
DO 6 I=1,MY
K=MXY1+(J-1)*MY+I-1
6 A02(K)=FX(I)
C
DO 7 J=1,MX
L=MX1+J-1
7 X(J)=SX(L)
DO 9 I=1,MY
DO 8 J=1,MX
K=MXY1+(J-1)*MY+I-1
8 FX(J)=A02(K)
CALL SPLIN(X,FX,1,MX)
DO 9 J=1,MX
K=MXY1+(J-1)*MY+I-1
9 A22(K)=FX(J)
C
RETURN
END
C
C
C *********************************************************
C
SUBROUTINE CHRM(Y)
C
C ROUTINE FOR THE COMPUTATION OF THE ELEMENTS OF THE CHRISTOFFEL
C MATRIX FOR AN ARBITRARY ANISOTROPIC MEDIUM
C
DIMENSION Y(18)
COMMON /APROX/ A11,A12,A13,A14,A15,A16,A22,A23,A24,A25,A26,A33,
1 A34,A35,A36,A44,A45,A46,A55,A56,A66,
1 DXA11,DXA12,DXA13,DXA14,DXA15,DXA16,DXA22,DXA23,
1 DXA24,DXA25,DXA26,DXA33,DXA34,DXA35,DXA36,DXA44,
1 DXA45,DXA46,DXA55,DXA56,DXA66,
1 DYA11,DYA12,DYA13,DYA14,DYA15,DYA16,DYA22,DYA23,
1 DYA24,DYA25,DYA26,DYA33,DYA34,DYA35,DYA36,DYA44,
1 DYA45,DYA46,DYA55,DYA56,DYA66,
1 DZA11,DZA12,DZA13,DZA14,DZA15,DZA16,DZA22,DZA23,
1 DZA24,DZA25,DZA26,DZA33,DZA34,DZA35,DZA36,DZA44,
1 DZA45,DZA46,DZA55,DZA56,DZA66,
1 A2546,A1266,A1355,A1456,A3645,A2344
COMPLEX PS
COMMON /RAY/ AY(28,2000),DS(20,50),KINT(50),HHH(3,3),tmax,
1 PS(3,7,50),IS(8,50),N,IREF,IND,IND1
COMMON /AUXI/ IANI(20),INTR,INT1,IPREC,KRE,IREFR,LAY,NDER,IPRINT,
1 MPRINT,NTR,ISQRT,NAUX,ISOUR,MAUX,MREG,MDIM,IPOL,MSCON,LOUT,
2 IAMP,MTRNS,ICOEF,IAD,IRHO,ISHEAR,IAC,IRT,mori
INTEGER CODE
COMMON /COD/ CODE(50,2),KREF,KC,ITYPE
COMMON /DJK/ D11,D12,D13,D22,D23,D33,DTR
COMMON /GAM/ C11,C12,C13,C22,C23,C33
C
P1=Y(4)
P2=Y(5)
P3=Y(6)
P2P3=P2*P3
P1P2=P1*P2
P1P3=P1*P3
P1P1=P1*P1
P2P2=P2*P2
P3P3=P3*P3
C11=P1P1*A11+P2P2*A66+P3P3*A55
1+2.*(P2P3*A56+P1P3*A15+P1P2*A16)
C22=P1P1*A66+P2P2*A22+P3P3*A44
1+2.*(P2P3*A24+P1P3*A46+P1P2*A26)
C33=P1P1*A55+P2P2*A44+P3P3*A33
1+2.*(P2P3*A34+P1P3*A35+P1P2*A45)
C23=P1P1*A56+P2P2*A24+P3P3*A34
1 +P2P3*A2344+P1P3*A3645+P1P2*A2546
C13=P1P1*A15+P2P2*A46+P3P3*A35
1 +P2P3*A3645+P1P3*A1355+P1P2*A1456
C12=P1P1*A16+P2P2*A26+P3P3*A45
1 +P2P3*A2546+P1P3*A1456+P1P2*A1266
C11N=C11-1.
C22N=C22-1.
C33N=C33-1.
C23SQ=C23*C23
C13SQ=C13*C13
C12SQ=C12*C12
D11=C22N*C33N-C23SQ
D22=C11N*C33N-C13SQ
D33=C11N*C22N-C12SQ
D12=C13*C23-C12*C33N
D13=C12*C23-C13*C22N
D23=C12*C13-C23*C11N
DTR=D11+D22+D33
IF(ABS(DTR).LT.0.0000001)THEN
WRITE(LOUT,'(A)')'CHRM: SHEAR WAVE SINGULARITY'
IND=10
END IF
RETURN
END
C
C *********************************************************
C
SUBROUTINE CHRM1(C,PN,UN)
C
C ROUTINE FOR THE COMPUTATION OF THE ELEMENTS OF THE CHRISTOFFEL
C MATRIX FOR AN ARBITRARY ANISOTROPIC MEDIUM
C
DIMENSION C(3,3),PN(3),UN(3)
COMMON /APROX/ A11,A12,A13,A14,A15,A16,A22,A23,A24,A25,A26,A33,
1 A34,A35,A36,A44,A45,A46,A55,A56,A66,
1 DXA11,DXA12,DXA13,DXA14,DXA15,DXA16,DXA22,DXA23,
1 DXA24,DXA25,DXA26,DXA33,DXA34,DXA35,DXA36,DXA44,
1 DXA45,DXA46,DXA55,DXA56,DXA66,
1 DYA11,DYA12,DYA13,DYA14,DYA15,DYA16,DYA22,DYA23,
1 DYA24,DYA25,DYA26,DYA33,DYA34,DYA35,DYA36,DYA44,
1 DYA45,DYA46,DYA55,DYA56,DYA66,
1 DZA11,DZA12,DZA13,DZA14,DZA15,DZA16,DZA22,DZA23,
1 DZA24,DZA25,DZA26,DZA33,DZA34,DZA35,DZA36,DZA44,
1 DZA45,DZA46,DZA55,DZA56,DZA66,
1 A2546,A1266,A1355,A1456,A3645,A2344
C
P1=PN(1)
P2=PN(2)
P3=PN(3)
U1=UN(1)
U2=UN(2)
U3=UN(3)
P2U3=P2*U3
P3U2=P3*U2
P1U2=P1*U2
P2U1=P2*U1
P1U3=P1*U3
P3U1=P3*U1
P1U1=P1*U1
P2U2=P2*U2
P3U3=P3*U3
C(1,1)=P1U1*A11+P2U2*A66+P3U3*A55
1+(P2U3+P3U2)*A56+(P1U3+P3U1)*A15+(P1U2+P2U1)*A16
C(2,2)=P1U1*A66+P2U2*A22+P3U3*A44
1+(P2U3+P3U2)*A24+(P1U3+P3U1)*A46+(P1U2+P2U1)*A26
C(3,3)=P1U1*A55+P2U2*A44+P3U3*A33
1+(P2U3+P3U2)*A34+(P1U3+P3U1)*A35+(P1U2+P2U1)*A45
C(2,3)=P1U1*A56+P2U2*A24+P3U3*A34
1+0.5*((P2U3+P3U2)*A2344+(P1U3+P3U1)*A3645+(P1U2+P2U1)*A2546)
C(1,3)=P1U1*A15+P2U2*A46+P3U3*A35
1+0.5*((P2U3+P3U2)*A3645+(P1U3+P3U1)*A1355+(P1U2+P2U1)*A1456)
C(1,2)=P1U1*A16+P2U2*A26+P3U3*A45
1+0.5*((P2U3+P3U2)*A2546+(P1U3+P3U1)*A1456+(P1U2+P2U1)*A1266)
C(2,1)=C(1,2)
C(3,2)=C(2,3)
C(3,1)=C(1,3)
RETURN
END
C
C *********************************************************
C
SUBROUTINE CHRM2(Y,G,i)
C
C EVALUATES ELEMENTS OF THE CHRISTOFFEL MATRIX
C
DIMENSION a(21),Y(18),G(3,3)
COMMON/GAM/G11,G12,G13,G22,G23,G33
COMMON /APROX1/ e(21,10)
COMMON /AUXI/ IANI(20),INTR,INT1,IPREC,KRE,IREFR,LAY,NDER,IPRINT,
1 MPRINT,NTR,ISQRT,NAUX,ISOUR,MAUX,MREG,MDIM,IPOL,MSCON,LOUT,
2 IAMP,MTRNS,ICOEF,IAD,IRHO,ISHEAR,IAC,IRT,mori
C
DO 1 J=1,21
A(J)=E(J,I)
1 CONTINUE
P1=Y(4)
P2=Y(5)
P3=Y(6)
P11=P1*P1
P12=P1*P2
P13=P1*P3
P22=P2*P2
P23=P2*P3
P33=P3*P3
G11=A(1)*P11+A(21)*P22+A(19)*P33+
1 2.*(A(6)*P12+A(5)*P13+A(20)*P23)
G22=A(21)*P11+A(7)*P22+A(16)*P33+
1 2.*(A(11)*P12+A(18)*P13+A(9)*P23)
G33=A(19)*P11+A(16)*P22+A(12)*P33+
1 2.*(A(17)*P12+A(14)*P13+A(13)*P23)
G12=A(6)*P11+A(11)*P22+A(17)*P33+
1 (A(21)+A(2))*P12+(A(20)+A(4))*P13+(A(10)+A(18))*P23
G13=A(5)*P11+A(18)*P22+A(14)*P33+
1 (A(20)+A(4))*P12+(A(19)+A(3))*P13+(A(17)+A(15))*P23
G23=A(20)*P11+A(9)*P22+A(13)*P33+
1 (A(10)+A(18))*P12+(A(17)+A(15))*P13+(A(16)+A(8))*P23
G(1,1)=G11
G(1,2)=G12
G(1,3)=G13
G(2,1)=G12
G(2,2)=G22
G(2,3)=G23
G(3,1)=G13
G(3,2)=G23
G(3,3)=G33
RETURN
END
C
C *********************************************************
C
SUBROUTINE PCHRM(Y,G,L,I)
C
C EVALUATES FIRST DERIVATIVES OF ELEMENTS OF CHRISTOFFEL MATRIX
C WITH RESPECT TO THE L-TH COMPONENT OF THE SLOWNESS VECTOR
C
DIMENSION A(21),Y(18),G(3,3)
COMMON /APROX1/ E(21,10)
C
DO 1 J=1,21
A(J)=E(J,I)
1 CONTINUE
P1=Y(4)
P2=Y(5)
P3=Y(6)
IF(L.EQ.1)THEN
G(1,1)=2.*(A(1)*P1+A(6)*P2+A(5)*P3)
G(2,2)=2.*(A(21)*P1+A(11)*P2+A(18)*P3)
G(3,3)=2.*(A(19)*P1+A(17)*P2+A(14)*P3)
AUX=2.*A(6)*P1+(A(21)+A(2))*P2+(A(20)+A(4))*P3
G(1,2)=AUX
G(2,1)=AUX
AUX=2.*A(5)*P1+(A(20)+A(4))*P2+(A(19)+A(3))*P3
G(1,3)=AUX
G(3,1)=AUX
AUX=2.*A(20)*P1+(A(10)+A(18))*P2+(A(17)+A(15))*P3
G(2,3)=AUX
G(3,2)=AUX
END IF
IF(L.EQ.2)THEN
G(1,1)=2.*(A(6)*P1+A(21)*P2+A(20)*P3)
G(2,2)=2.*(A(11)*P1+A(7)*P2+A(9)*P3)
G(3,3)=2.*(A(17)*P1+A(16)*P2+A(13)*P3)
AUX=2.*A(11)*P2+(A(21)+A(2))*P1+(A(10)+A(18))*P3
G(1,2)=AUX
G(2,1)=AUX
AUX=2.*A(18)*P2+(A(20)+A(4))*P1+(A(17)+A(15))*P3
G(1,3)=AUX
G(3,1)=AUX
AUX=2.*A(9)*P2+(A(10)+A(18))*P1+(A(16)+A(8))*P3
G(2,3)=AUX
G(3,2)=AUX
END IF
IF(L.EQ.3)THEN
G(1,1)=2.*(A(5)*P1+A(20)*P2+A(19)*P3)
G(2,2)=2.*(A(18)*P1+A(9)*P2+A(16)*P3)
G(3,3)=2.*(A(14)*P1+A(13)*P2+A(12)*P3)
AUX=2.*A(17)*P3+(A(20)+A(4))*P1+(A(10)+A(18))*P2
G(1,2)=AUX
G(2,1)=AUX
AUX=2.*A(14)*P3+(A(19)+A(3))*P1+(A(17)+A(15))*P2
G(1,3)=AUX
G(3,1)=AUX
AUX=2.*A(13)*P3+(A(17)+A(15))*P1+(A(16)+A(8))*P2
G(2,3)=AUX
G(3,2)=AUX
END IF
RETURN
END
C
C *********************************************************
C
SUBROUTINE PPCHRM(G,L,M,i)
C
C EVALUATES SECOND DERIVATIVES OF ELEMENTS OF CHRISTOFFEL MATRIX
C WITH RESPECT TO THE L-TH AND M-TH COMPONENTS OF THE SLOWNESS
C VECTOR
C
DIMENSION a(21),G(3,3)
COMMON /APROX1/ e(21,10)
C
do 1 j=1,21
a(j)=e(j,i)
1 continue
IF(L.EQ.1.AND.M.EQ.1)THEN
G(1,1)=2.*A(1)
G(2,2)=2.*A(21)
G(3,3)=2.*A(19)
AUX=2.*A(6)
G(1,2)=AUX
G(2,1)=AUX
AUX=2.*A(5)
G(1,3)=AUX
G(3,1)=AUX
AUX=2.*A(20)
G(2,3)=AUX
G(3,2)=AUX
END IF
IF(L.EQ.2.AND.M.EQ.2)THEN
G(1,1)=2.*A(21)
G(2,2)=2.*A(7)
G(3,3)=2.*A(16)
AUX=2.*A(11)
G(1,2)=AUX
G(2,1)=AUX
AUX=2.*A(18)
G(1,3)=AUX
G(3,1)=AUX
AUX=2.*A(9)
G(2,3)=AUX
G(3,2)=AUX
END IF
IF(L.EQ.3.AND.M.EQ.3)THEN
G(1,1)=2.*A(19)
G(2,2)=2.*A(16)
G(3,3)=2.*A(12)
AUX=2.*A(17)
G(1,2)=AUX
G(2,1)=AUX
AUX=2.*A(14)
G(1,3)=AUX
G(3,1)=AUX
AUX=2.*A(13)
G(2,3)=AUX
G(3,2)=AUX
END IF
IF((L.EQ.1.AND.M.EQ.2).OR.(L.EQ.2.AND.M.EQ.1))THEN
G(1,1)=2.*A(6)
G(2,2)=2.*A(11)
G(3,3)=2.*A(17)
AUX=A(21)+A(2)
G(1,2)=AUX
G(2,1)=AUX
AUX=A(20)+A(4)
G(1,3)=AUX
G(3,1)=AUX
AUX=A(10)+A(18)
G(2,3)=AUX
G(3,2)=AUX
END IF
IF((L.EQ.1.AND.M.EQ.3).OR.(L.EQ.3.AND.M.EQ.1))THEN
G(1,1)=2.*A(5)
G(2,2)=2.*A(18)
G(3,3)=2.*A(14)
AUX=A(20)+A(4)
G(1,2)=AUX
G(2,1)=AUX
AUX=A(19)+A(3)
G(1,3)=AUX
G(3,1)=AUX
AUX=A(17)+A(15)
G(2,3)=AUX
G(3,2)=AUX
END IF
IF((L.EQ.2.AND.M.EQ.3).OR.(L.EQ.3.AND.M.EQ.2))THEN
G(1,1)=2.*A(20)
G(2,2)=2.*A(9)
G(3,3)=2.*A(13)
AUX=A(10)+A(18)
G(1,2)=AUX
G(2,1)=AUX
AUX=A(17)+A(15)
G(1,3)=AUX
G(3,1)=AUX
AUX=A(16)+A(8)
G(2,3)=AUX
G(3,2)=AUX
END IF
RETURN
END
C
C *********************************************************
C
SUBROUTINE FACETS(N1,N2,NSRF)
INTEGER LU,N1,N2,NSRF
C
C Subroutine FACETS writes the index file listing the vertices of each
C tetragon covering the structural interface. The vertices are assumed
C to be stored in a separate file, with inner loop over N1 points along
C the first horizontal axis, middle loop over N2 points along the second
C horizontal axis and outer loop over the surfaces. The vertices are
C indexed by positive integers according to their order in the vertex
C file.
C
C Input:
C LU... Logical unit number connected to the output file to be
C written by this subroutine.
C N1... Number of points along the first horizontal axis.
C N2... Number of points along the second horizontal axis.
C NSRF... Number of interfaces.
C The input parameters are not altered.
C
C No output.
C
C Output index file with the tetragons:
C For each tetragon, a line containing I1,I2,I3,I4,/
C I1,I2,I3,I4... Indices of the vertices of the tetragon.
C The vertices are indexed by positive integers according to
C their order in the respective vertex file.
C /... List of vertices is terminated by a slash.
C
C Date: 1999, October 4
C Coded by Ludek Klimes
C
C-----------------------------------------------------------------------
C
C Auxiliary storage locations:
CHARACTER*9 FORMAT
INTEGER I1,I2,ISRF
COMMON/VRML/LUBRD,LUGRD,LU,LURAY
C
IF(LU.EQ.0)RETURN
C Setting output format:
FORMAT='(4(I0,A))'
I1=INT(ALOG10(FLOAT(N1*N2*NSRF)+0.5))+1
FORMAT(5:5)=CHAR(ICHAR('0')+I1)
C
C Writing the file:
DO 33 ISRF=0,N1*N2*(NSRF-1),N1*N2
DO 32 I2=ISRF,ISRF+N1*(N2-2),N1
DO 31 I1=I2+1,I2+N1-1
WRITE(LU,FORMAT) I1,' ',I1+1,' ',I1+1+N1,' ',I1+N1,' /'
31 CONTINUE
32 CONTINUE
33 CONTINUE
C
RETURN
END
C
C *********************************************************
C
SUBROUTINE BOX(BRD)
C
DIMENSION BRD(6)
COMMON/VRML/LUBRD,LUGRD,LUIND,LURAY
C
WRITE(LUBRD,109)
WRITE(LUBRD,105)
I=1
WRITE(LUBRD,112)I
WRITE(LUBRD,110)BRD(1),BRD(3),BRD(5)
WRITE(LUBRD,110)BRD(1),BRD(4),BRD(5)
WRITE(LUBRD,110)BRD(1),BRD(4),BRD(6)
WRITE(LUBRD,110)BRD(1),BRD(3),BRD(6)
WRITE(LUBRD,110)BRD(1),BRD(3),BRD(5)
WRITE(LUBRD,105)
I=2
WRITE(LUBRD,112)I
WRITE(LUBRD,110)BRD(1),BRD(3),BRD(5)
WRITE(LUBRD,110)BRD(1),BRD(3),BRD(6)
WRITE(LUBRD,110)BRD(2),BRD(3),BRD(6)
WRITE(LUBRD,110)BRD(2),BRD(3),BRD(5)
WRITE(LUBRD,110)BRD(1),BRD(3),BRD(5)
WRITE(LUBRD,105)
I=3
WRITE(LUBRD,112)I
WRITE(LUBRD,110)BRD(2),BRD(3),BRD(5)
WRITE(LUBRD,110)BRD(2),BRD(4),BRD(5)
WRITE(LUBRD,110)BRD(2),BRD(4),BRD(6)
WRITE(LUBRD,110)BRD(2),BRD(3),BRD(6)
WRITE(LUBRD,110)BRD(2),BRD(3),BRD(5)
WRITE(LUBRD,105)
I=4
WRITE(LUBRD,112)I
WRITE(LUBRD,110)BRD(1),BRD(4),BRD(5)
WRITE(LUBRD,110)BRD(1),BRD(4),BRD(6)
WRITE(LUBRD,110)BRD(2),BRD(4),BRD(6)
WRITE(LUBRD,110)BRD(2),BRD(4),BRD(5)
WRITE(LUBRD,110)BRD(1),BRD(4),BRD(5)
WRITE(LUBRD,105)
I=1
WRITE(LUBRD,112)I
WRITE(LUBRD,110)BRD(1),BRD(3),BRD(5)
WRITE(LUBRD,110)BRD(1),BRD(4),BRD(5)
WRITE(LUBRD,110)BRD(2),BRD(4),BRD(5)
WRITE(LUBRD,110)BRD(2),BRD(3),BRD(5)
WRITE(LUBRD,110)BRD(1),BRD(3),BRD(5)
WRITE(LUBRD,105)
I=1
WRITE(LUBRD,112)I
WRITE(LUBRD,110)BRD(1),BRD(3),BRD(6)
WRITE(LUBRD,110)BRD(1),BRD(4),BRD(6)
WRITE(LUBRD,110)BRD(2),BRD(4),BRD(6)
WRITE(LUBRD,110)BRD(2),BRD(3),BRD(6)
WRITE(LUBRD,110)BRD(1),BRD(3),BRD(6)
WRITE(LUBRD,105)
WRITE(LUBRD,105)
C
105 FORMAT('/')
109 FORMAT(25H'BOUNDARIES OF THE MODEL')
110 FORMAT(3(F10.5,1X),'/')
112 FORMAT(6H'BOUND,I1,1H',1X,'/')
C
RETURN
END
C
C=======================================================================
C
INCLUDE 'a2.for'
C <A HREF="a2.for" TYPE="text/html">a2.for</A>
INCLUDE 'a3.for'
C <A HREF="a3.for" TYPE="text/html">a3.for</A>
INCLUDE 'a42.for' !!
C <A HREF="a4.for" TYPE="text/html">a4.for</A>
INCLUDE 'a5.for'
C <A HREF="a5.for" TYPE="text/html">a5.for</A>
C
C <A NAME="MOD"></A><FONT COLOR="RED">Interpolation method:</FONT>
C Include just one of the following files 'mod*.for':
C (a) Isosurface interpolation:
C INCLUDE 'modis.for'
C <A HREF="modis.for" TYPE="text/html">modis.for</A>
C (b) (Bi-)(tri-)cubic B-spline interpolation:
INCLUDE 'modbs.for'
C <A HREF="modbs.for" TYPE="text/html">modbs.for</A>
C
C=======================================================================
C </PRE>
|
%PREX_EIGENFACES PRTools example on the use of images and eigenfaces
help prex_eigenfaces
echo on
% Load all faces (may take a while)
faces = prdataset(orl);
faces = setprior(faces,0); % give them equal priors
a = gendat(faces,ones(1,40)); % select one image per class
% Compute the eigenfaces
w = pcam(a);
% Display them
newfig(1,3); show(w); drawnow;
% Project all faces onto the eigenface space
b = [];
for j = 1:40
a = seldat(faces,j);
b = [b;a*w];
% Don't echo loops
echo off
end
echo on
% Show a scatterplot of the first two eigenfaces
newfig(2,3)
scatterd(b)
title('Scatterplot of the first two eigenfaces')
% Compute leave-one-out error curve
featsizes = [1 2 3 5 7 10 15 20 30 39];
e = zeros(1,length(featsizes));
for j = 1:length(featsizes)
k = featsizes(j);
e(j) = testk(b(:,1:k),1);
echo off
end
echo on
% Plot error curve
newfig(3,3)
plot(featsizes,e)
xlabel('Number of eigenfaces')
ylabel('Error')
echo off
|
\pagebreak
\section{Abbreviations and References}
\input{8-abbreviations-and-references/8.1-abbreviations.tex}
\input{8-abbreviations-and-references/8.2-references.tex}
|
Formal statement is: lemma at_right_to_0: "at_right a = filtermap (\<lambda>x. x + a) (at_right 0)" for a :: real Informal statement is: The filter at_right a is equal to the filter at_right 0 shifted by a.
|
They lost 4 men in this action . The fifth team also succeeded in completing all their objectives but almost half its men were killed . The other two Commando groups were not as successful . The MLs transporting Groups One and Two had almost all been destroyed on their approach . ML 457 was the only boat to land its Commandos on the Old Mole and only ML 177 had managed to reach the gates at the old entrance to the basin . That team succeeded in planting charges on two tugboats moored in the basin .
|
= = = Ratings = = =
|
= = = Ratings = = =
|
= = = Ratings = = =
|
= = = Ratings = = =
|
/-
# References
1. Avigad, Jeremy. βTheorem Proving in Leanβ, n.d.
-/
-- Exercise 1
--
-- Open a namespace `Hidden` to avoid naming conflicts, and use the equation
-- compiler to define addition, multiplication, and exponentiation on the
-- natural numbers. Then use the equation compiler to derive some of their basic
-- properties.
namespace ex1
def add : Nat β Nat β Nat
| m, Nat.zero => m
| m, Nat.succ n => Nat.succ (add m n)
def mul : Nat β Nat β Nat
| _, Nat.zero => 0
| m, Nat.succ n => add m (mul m n)
def exp : Nat β Nat β Nat
| _, Nat.zero => 1
| m, Nat.succ n => mul m (exp m n)
end ex1
-- Exercise 2
--
-- Similarly, use the equation compiler to define some basic operations on lists
-- (like the reverse function) and prove theorems about lists by induction (such
-- as the fact that `reverse (reverse xs) = xs` for any list `xs`).
namespace ex2
variable {Ξ± : Type _}
def reverse : List Ξ± β List Ξ±
| [] => []
| (head :: tail) => reverse tail ++ [head]
-- Proof of `reverse (reverse xs) = xs` shown in previous exercise.
end ex2
-- Exercise 3
--
-- Define your own function to carry out course-of-value recursion on the
-- natural numbers. Similarly, see if you can figure out how to define
-- `WellFounded.fix` on your own.
namespace ex3
def below {motive : Nat β Type} : Nat β Type
| Nat.zero => PUnit
| Nat.succ n => PProd (PProd (motive n) (@below motive n)) (PUnit : Type)
-- TODO: Sort out how to write `brecOn` and `WellFounded.fix`.
end ex3
-- Exercise 4
--
-- Following the examples in Section Dependent Pattern Matching, define a
-- function that will append two vectors. This is tricky; you will have to
-- define an auxiliary function.
namespace ex4
inductive Vector (Ξ± : Type u) : Nat β Type u
| nil : Vector Ξ± 0
| cons : Ξ± β {n : Nat} β Vector Ξ± n β Vector Ξ± (n + 1)
namespace Vector
-- TODO: Sort out how to write `append`.
end Vector
end ex4
-- Exercise 5
--
-- Consider the following type of arithmetic expressions. The idea is that
-- `var n` is a variable, `vβ`, and `const n` is the constant whose value is
-- `n`.
namespace ex5
inductive Expr where
| const : Nat β Expr
| var : Nat β Expr
| plus : Expr β Expr β Expr
| times : Expr β Expr β Expr
deriving Repr
open Expr
def sampleExpr : Expr :=
plus (times (var 0) (const 7)) (times (const 2) (var 1))
-- Here `sampleExpr` represents `(vβ * 7) + (2 * vβ)`. Write a function that
-- evaluates such an expression, evaluating each `var n` to `v n`.
def eval (v : Nat β Nat) : Expr β Nat
| const n => sorry
| var n => v n
| plus eβ eβ => sorry
| times eβ eβ => sorry
def sampleVal : Nat β Nat
| 0 => 5
| 1 => 6
| _ => 0
-- Try it out. You should get 47 here.
-- #eval eval sampleVal sampleExpr
-- Implement "constant fusion," a procedure that simplifies subterms like
-- `5 + 7` to `12`. Using the auxiliary function `simpConst`, define a function
-- "fuse": to simplify a plus or a times, first simplify the arguments
-- recursively, and then apply `simpConst` to try to simplify the result.
def simpConst : Expr β Expr
| plus (const nβ) (const nβ) => const (nβ + nβ)
| times (const nβ) (const nβ) => const (nβ * nβ)
| e => e
def fuse : Expr β Expr := sorry
theorem simpConst_eq (v : Nat β Nat)
: β e : Expr, eval v (simpConst e) = eval v e :=
sorry
theorem fuse_eq (v : Nat β Nat)
: β e : Expr, eval v (fuse e) = eval v e :=
sorry
-- The last two theorems show that the definitions preserve the value.
end ex5
|
Home > Crokids Flower Queen dress with flowers and circles on it and matching leggings.
Crokids Flower Queen dress with flowers and circles on it and matching leggings.
Crokids Flower Queen dress with flowers and circles on it and matching leggings. So comfortable and and a popular brand.
|
import ..lectures.love02_backward_proofs_demo
/-! # LoVe Exercise 2: Backward Proofs -/
set_option pp.beta true
set_option pp.generalized_field_notation false
namespace LoVe
namespace backward_proofs
/-! ## Question 1: Connectives and Quantifiers
1.1. Carry out the following proofs using basic tactics.
Hint: Some strategies for carrying out such proofs are described at the end of
Section 2.3 in the Hitchhiker's Guide. -/
lemma I (a : Prop) :
a β a :=
begin
intro ha,
exact ha
end
lemma K (a b : Prop) :
a β b β b :=
begin
intros ha hb,
exact hb
end
lemma C (a b c : Prop) :
(a β b β c) β b β a β c :=
begin
intros hg hb ha,
apply hg,
exact ha,
exact hb
end
lemma proj_1st (a : Prop) :
a β a β a :=
begin
intros ha ha',
exact ha
end
/-! Please give a different answer than for `proj_1st`: -/
lemma proj_2nd (a : Prop) :
a β a β a :=
begin
intros ha ha',
exact ha'
end
lemma some_nonsense (a b c : Prop) :
(a β b β c) β a β (a β c) β b β c :=
begin
intros hg ha hf hb,
apply hg,
exact ha,
exact hb
end
/-! 1.2. Prove the contraposition rule using basic tactics. -/
lemma contrapositive (a b : Prop) :
(a β b) β Β¬ b β Β¬ a :=
begin
intros hab hnb ha,
apply hnb,
apply hab,
apply ha
end
/-! 1.3. Prove the distributivity of `β` over `β§` using basic tactics.
Hint: This exercise is tricky, especially the right-to-left direction. Some
forward reasoning, like in the proof of `and_swapβ` in the lecture, might be
necessary. -/
lemma forall_and {Ξ± : Type} (p q : Ξ± β Prop) :
(βx, p x β§ q x) β (βx, p x) β§ (βx, q x) :=
begin
apply iff.intro,
{ intro h,
apply and.intro,
{ intro x,
apply and.elim_left,
apply h },
{ intro x,
apply and.elim_right,
apply h } },
{ intros h x,
apply and.intro,
{ apply and.elim_left h },
{ apply and.elim_right h } }
end
/-! ## Question 2: Natural Numbers
2.1. Prove the following recursive equations on the first argument of the
`mul` operator defined in lecture 1. -/
#check mul
lemma mul_zero (n : β) :
mul 0 n = 0 :=
begin
induction' n,
{ refl },
{ simp [add, mul, ih] }
end
lemma mul_succ (m n : β) :
mul (nat.succ m) n = add (mul m n) n :=
begin
induction' n,
{ refl },
{ simp [add, add_succ, add_assoc, mul, ih] }
end
/-! 2.2. Prove commutativity and associativity of multiplication using the
`induction'` tactic. Choose the induction variable carefully. -/
lemma mul_comm (m n : β) :
mul m n = mul n m :=
begin
induction' m,
{ simp [mul, mul_zero] },
{ simp [mul, mul_succ, ih],
cc }
end
lemma mul_assoc (l m n : β) :
mul (mul l m) n = mul l (mul m n) :=
begin
induction' n,
{ refl },
{ simp [mul, mul_add, ih] }
end
/-! 2.3. Prove the symmetric variant of `mul_add` using `rw`. To apply
commutativity at a specific position, instantiate the rule by passing some
arguments (e.g., `mul_comm _ l`). -/
lemma add_mul (l m n : β) :
mul (add l m) n = add (mul n l) (mul n m) :=
begin
rw mul_comm _ n,
rw mul_add
end
/-! ## Question 3 (**optional**): Intuitionistic Logic
Intuitionistic logic is extended to classical logic by assuming a classical
axiom. There are several possibilities for the choice of axiom. In this
question, we are concerned with the logical equivalence of three different
axioms: -/
def excluded_middle : Prop :=
βa : Prop, a β¨ Β¬ a
def peirce : Prop :=
βa b : Prop, ((a β b) β a) β a
def double_negation : Prop :=
βa : Prop, (¬¬ a) β a
/-! For the proofs below, please avoid using lemmas from Lean's `classical`
namespace, as this would defeat the purpose of the exercise.
3.1 (**optional**). Prove the following implication using tactics.
Hint: You will need `or.elim` and `false.elim`. You can use
`rw excluded_middle` to unfold the definition of `excluded_middle`,
and similarly for `peirce`. -/
lemma peirce_of_em :
excluded_middle β peirce :=
begin
rw excluded_middle,
rw peirce,
intro hem,
intros a b haba,
apply or.elim (hem a),
{ intro,
assumption },
{ intro hna,
apply haba,
intro ha,
apply false.elim,
apply hna,
assumption }
end
/-! 3.2 (**optional**). Prove the following implication using tactics. -/
lemma dn_of_peirce :
peirce β double_negation :=
begin
rw peirce,
rw double_negation,
intros hpeirce a hnna,
apply hpeirce a false,
intro hna,
apply false.elim,
apply hnna,
exact hna
end
/-! We leave the remaining implication for the homework: -/
namespace sorry_lemmas
lemma em_of_dn :
double_negation β excluded_middle :=
sorry
end sorry_lemmas
end backward_proofs
end LoVe
|
#Π±ΠΈΠ½ΠΎΠΌΠ½ΠΎ ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈΠ΅ - n Π½Π΅Π·Π°Π²ΠΈΡΠΈΠΌΠΈ ΠΎΠΏΠΈΡΠ° Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ Π·Π° ΡΡΠΏΠ΅Ρ
Π½Π° Π²ΡΠ΅ΠΊΠΈ p ΠΈ Π·Π° ΠΏΡΠΎΠ²Π°Π» 1-p=q
#dbinom - Π (X = x)
x <- seq(0,50,by=1)
y <- dbinom(x = x, size = 50, prob = 0.2) #Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΡΠ° Π΄Π° ΠΈΠΌΠ°Ρ x ΡΡΠΏΠ΅ΡΠ½ΠΈ ΠΎΠΏΠΈΡΠ° ΠΎΡ ΠΎΠ±ΡΠΎ size ΠΎΠΏΠΈΡΠ° Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ Π·Π° ΡΡΠ΅Ρ
prob
plot(x,y)
y <- dbinom(x = x, size = 50,prob = 0.5)
plot(x,y)
#pbinom - P(X <= x)
pbinom(q = 24, size = 50, prob = 0.5) #Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΡΠ° Π΄Π° ΠΈΠΌΠ°Ρ ΠΏΠΎ-ΠΌΠ°Π»ΠΊΠΎ ΠΈΠ»ΠΈ ΡΠ°Π²Π½ΠΎ Π½Π° q ΡΡΠΏΠ΅Ρ
Π° ΠΎΡ ΠΎΠ±ΡΠΎ size ΠΎΠΏΠΈΡΠ° Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ Π·Π° ΡΡΠ΅Ρ
prob
pbinom(q = 25, size = 50, prob = 0.5, lower.tail = FALSE) #P(X>x) -> Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΡΠ° Π΄Π° ΠΈΠΌΠ°Ρ ΠΏΠΎΠ²Π΅ΡΠ΅ ΠΎΡ q ΡΡΠΏΠ΅Ρ
Π° ΠΎΡ ΠΎΠ±ΡΠΎ size ΠΎΠΏΠΈΡΠ° Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ Π·Π° ΡΡΠ΅Ρ
prob
#qbinom - ΠΏΠΎ Π·Π°Π΄Π°Π΄Π΅Π½Π° Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ Π½Π°ΠΌΠΈΡΠ° ΠΊΠΎΠ»ΠΊΠΎ ΡΡΠΏΠ΅Ρ
Π° ΠΎΡ size ΠΎΠΏΠΈΡΠ° Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ Π·Π° ΡΡΠΏΠ΅Ρ
prob Π΅ ΠΈΠΌΠ°Π»ΠΎ (ΠΎΠ±ΡΠ°ΡΠ½ΠΎΡΠΎ Π½Π° dbinom)
qbinom(p = 0.4438624,size = 50, prob = 1/2)
qbinom(p = 0.5561376,size = 50, prob = 0.5)
#rbinom Π·Π° Π³Π΅Π½Π΅ΡΠΈΡΠ°Π½Π΅ Π½Π° n ΡΠ»ΡΡΠ°ΠΉΠ½ΠΎ ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈ Π²Π΅Π»ΠΈΡΠΈΠ½ΠΈ ΠΎΡ 0 Π΄ΠΎ size Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΠ° Π·Π° ΡΡΠΏΠ΅Ρ
prob
rbinom(n = 5, size = 10, prob = 0.2)
rbinom(n = 5, size = 100,prob = 0.2)
#ΠΎΡΡΠΈΡΠ°ΡΠ΅Π»Π½ΠΎ Π±ΠΈΠ½ΠΎΠΌΠ½ΠΎ ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈΠ΅ - ΠΊΠΎΠ»ΠΊΠΎ Π½Π΅ΡΡΠ΅Ρ
Π° Π΄Π° ΠΏΠΎΠ»ΡΡΠ°Π²Π°Π½Π΅ Π½Π° ΡΠΈΠΊΡΠΈΡΠ°Π½ Π±ΡΠΉ ΡΡΠΏΠ΅Ρ
ΠΈ
#dnbinom - Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΡΠ° Π΄Π° ΠΈΠΌΠ° x Π½Π΅ΡΡΠΏΠ΅Ρ
Π° Π΄Π° ΠΏΠΎΠ»ΡΡΠ°Π²Π°Π½Π΅ΡΠΎ Π½Π° size-ΡΠΈΡ ΡΡΠΏΠ΅Ρ
Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ Π·Π° ΡΡΠΏΠ΅Ρ
prob
dnbinom(x = c(1:10), size = 5, prob = 0.5)
#pnbinom
pnbinom(q = 8, size = 3, prob = 0.2) #Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΡΠ° Π΄Π° ΠΈΠΌΠ° ΠΏΠΎ-ΠΌΠ°Π»ΠΊΠΎ ΠΈΠ»ΠΈ ΡΠ°Π²Π½ΠΎ Π½Π° q Π½Π΅ΡΡΠΏΠ΅Ρ
Π° Π΄o ΠΏΠΎΠ»ΡΡΠ°Π²Π°Π½Π΅ΡΠΎ Π½Π° size-ΡΠΈΡ ΡΡΠΏΠ΅Ρ
Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ Π·Π° ΡΡΠΏΠ΅Ρ
prob
pnbinom(q = 8, size = 3, prob = 0.2, lower.tail = FALSE) #ΠΏΠΎΠ²Π΅ΡΠ΅ ΠΎΡ q Π½Π΅ΡΡΠΏΠ΅Ρ
Π° Π΄ΠΎ ΠΏΠΎΠ»ΡΡΠ°Π²Π°Π½Π΅ΡΠΎ Π½Π° size-ΡΠΈΡ ΡΡΠΏΠ΅Ρ
Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ Π·Π° ΡΡΠΏΠ΅Ρ
prob
#qnbinom ΠΈ rnbinom Π°Π½Π°Π»ΠΎΠ³ΠΈΡΠ½ΠΈ Π½Π° qbinom ΠΈ rbinom
#Π₯ΠΈΠΏΠ΅ΡΠ³Π΅ΠΎΠΌΠ΅ΡΡΠΈΡΠ½ΠΎ ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈΠ΅ - N ΠΎΠ±Π΅ΠΊΡΠ°, M ΠΎΡ ΡΡΡ
ΡΠ° ΠΌΠ°ΡΠΊΠΈΡΠ°Π½ΠΈ, ΠΏΠΎ ΡΠ»ΡΡΠ°Π΅Π½ Π½Π°ΡΠΈΠ½ ΠΈΠ·Π±ΠΈΡΠ°ΠΌΠ΅ n Π½Π° Π±ΡΠΎΠΉ. X - Π±ΡΠΎΡΡ Π½Π° ΠΌΠ°ΡΠΊΠΈΡΠ°Π½ΠΈΡΠ΅, ΠΈΠ·ΠΌΠ΅ΠΆΠ΄Ρ ΡΠ΅Π·ΠΈ ΠΊΠΎΠΈΡΠΎ ΡΠΌΠ΅ ΠΈΠ·Π±ΡΠ°Π»ΠΈ ΡΠ»ΡΡΠ°ΠΉΠ½ΠΎ
#ΡΡΠ½ΠΊΡΠΈΡΡΠ° hyper() Ρ ΠΏΡΠ΅ΡΠΈΠΊΡΠΈ d,p,q,r Π·Π° ΡΡΠΎΡΠ²Π΅ΡΠ½ΠΎ ΠΏΠ»ΡΡΠ½ΠΎΡΡ, ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π½ΠΈΠ΅, ΠΊΠ²Π°Π½ΡΠΈΠ» ΠΈ ΡΠ»ΡΡΠ°ΠΉΠ½ΠΎ ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈΠ΅(ΡΠΈΠΌΡΠ»ΠΈΡΠ°Π½Π΅)
success=c(0:6)
dhyper(x = success, m = 6, n = 30, k = 6)
#ΠΠΎΠ°ΡΠΎΠ½ΠΎΠ²ΠΎ ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈΠ΅ - Π±ΠΈΠ½ΠΎΠΌΠ½ΠΎ ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈΠ΅, ΠΊΠΎΠ΅ΡΠΎ ΠΈΠΌΠ° ΠΌΠ½ΠΎΠ³ΠΎ ΠΌΠ°Π»ΠΊΠ° Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ
#n->inf, p->0, np = lamda
#Π€ΡΠ½ΠΊΡΠΈΡ pois() c ΠΏΡΠ΅ΡΠΈΠΊΡΠΈ d,p,q,r Π·Π° ΡΡΠΎΡΠ²Π΅ΡΠ½ΠΎ ΠΏΠ»ΡΡΠ½ΠΎΡΡ, ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π½ΠΈΠ΅, ΠΊΠ²Π°Π½ΡΠΈΠ» ΠΈ ΡΠ»ΡΡΠ°ΠΉΠ½ΠΎ ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈΠ΅(ΡΠΈΠΌΡΠ»ΠΈΡΠ°Π½Π΅)
ppois(q = 16, lambda = 12)
#-----------------------------------------------------
#01
#Π±ΠΈΠ½ΠΎΠΌΠ½ΠΎ ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈΠ΅ n=79, k=29, p=1/2
result = choose(79,29) * 1/(2^29) * 1/(2^50) #choose ΡΠΌΡΡΠ° Π±ΠΈΠ½ΠΎΠΌΠ΅Π½ ΠΊΠΎΠ΅ΡΠΈΡΠΈΠ΅Π½Ρ
result.binom = dbinom(x = 29, size = 79, prob = 1/2)
#02
not.fixed = pbinom(q = 2, size = c(3,4,5,7,8), prob = 1/2) #ΠΏΠΎ-ΠΌΠ°Π»ΠΊΠΎ ΠΈΠ»ΠΈ ΡΠ°Π²Π½ΠΎ Π½Π° size ΠΎΠΏΠΈΡΠ° Π·Π° ΠΈΠ·ΡΠ΅Π³Π»ΡΠ½Π΅ Π½Π° 2 Π΅Π·ΠΈΡΠ°
fixed = dbinom(x = 2, size = c(3,4,5,7,8), prob = 1/2) #ΡΠΎΡΠ½ΠΎ size ΠΎΠΏΠΈΡΠ° Π·Π° ΠΈΠ·ΡΠ΅Π³Π»ΡΠ½Π΅ Π½Π° 2 Π΅Π·ΠΈΡΠ°
plot(x= c(3,4,5,7,8), y = not.fixed, type = "l", main = "Dstribution of two ezi", xlab = "Number of tries", ylab = "Probability")
lines(x= c(3,4,5,7,8), y = fixed, type = "p", col = "red")
#03
#ΠΎΡΠ΅Π»Π²Π°Π½Π΅ΡΠΎ Π½Π° ΡΡΠΎΠΉΠΊΠ° Π΅ Π±ΠΈΠ½ΠΎΠΌΠ½ΠΎ ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π΅Π½Π° ΡΠ»ΡΡΠ°ΠΉΠ½Π° Π²Π΅Π»ΠΈΡΠΈΠ½Π° Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ Π·Π° ΡΡΠΏΠ΅Ρ
p = 0.7
n = qbinom(p = 1 - 0.18522, size = 3, prob = 0.7 ) + 3 #???
x = qbinom(p = 0.18522, size = c(3:10), prob = 0.7 ) #Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ 0.18522 ΠΏΡΠΎΠ²Π΅ΡΡΠ²Π°ΠΌΠ΅ ΠΎΡ 3 Π΄ΠΎ Π½ΡΠΊΠΎΠ»ΠΊΠΎ(10) ΠΎΠΏΠΈΡΠ° Π½Π° ΠΊΠΎΠΉ ΠΎΠΏΠΈΡ ΡΠ΅ Π΅ ΡΡΠ΅Π»ΠΈΠ» ΡΡΠΈ ΠΏΡΡΠΈ
names(x) = c(3:10)
result = as.numeric(names(x[x == 3]))
# => Π½Π° 5-ΡΠΈ ΠΈ 6-ΡΠΈ ΠΎΠΏΠΈΡ ΡΠ΅ Π΅ ΠΎΡΠ΅Π»ΠΈΠ» ΡΡΠΈ ΡΡΠΎΠΉΠΊΠΈ Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ 0.18522
#04
#Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΡΠ° Π΄Π° ΠΈΠΌΠ° <=35 ΠΆΠ΅Π½ΠΈ Π² ΠΈΠ·Π²Π°Π΄ΠΊΠ° ΠΎΡ k = 100 ΡΠΎΠ²Π΅ΠΊΠ°, ΠΊΠ°ΡΠΎ ΠΆΠ΅Π½ΠΈΡΠ΅ ΡΠ° m = 40% ΠΎΡ 600000 ΡΠΎΠ²Π΅ΠΊΠ°, Π° ΠΌΡΠΆΠ΅ΡΠ΅ ΡΠ° n = 60% ΠΎΡ 600000 ΡΠΎΠ²Π΅ΠΊΠ°
theoretical = phyper(q = 35, m = 0.4*600000, n = 0.6*600000, k = 100)
pop = rep(c(0,1),c(360000, 240000)) #ΡΠΈΠΌΡΠ»ΠΈΡΠ°ΠΌΠ΅ 600000 ΡΠΎΠ²Π΅ΠΊΠ° -> 360000 Π½ΡΠ»ΠΈ Π·Π° ΠΌΡΠΆΠ΅ΡΠ΅ ΠΈ 240000 Π΅Π΄ΠΈΠ½ΠΈΡΠΈ Π·Π° ΠΆΠ΅Π½ΠΈΡΠ΅
res=sapply(c(0:1000), function(x) sum(sample(pop,100))) # 1000 Π΅Π»Π΅ΠΌΠ΅Π½ΡΠ΅Π½ Π²Π΅ΠΊΡΠΎΡ Ρ ΡΠ΅Π·ΡΠ»ΡΠ°ΡΠΈΡΠ΅ Π·Π° ΠΎΠ±ΡΠΈΡ Π±ΡΠΎΠΉ ΠΆΠ΅Π½ΠΈ Π² ΡΠ»ΡΡΠ°ΠΉΠ½Π° ΠΈΠ·Π²Π°Π΄ΠΊΠ° ΠΎΡ 100 ΡΠΎΠ²Π΅ΠΊΠ°
empirical = sum(res<=35)/100
#05
#ΠΏΠΎΠ°ΡΠΎΠ½ΠΎΠ²ΠΎ ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈΠ΅ Ρ Π»Π°ΠΌΠ΄Π° = 2 ΠΎΠ±Π°ΠΆΠ΄Π°Π½ΠΈΡ Π·Π° ΠΌΠΈΠ½ΡΡΠ°
# => ΠΏΠΎΠ°ΡΠΎΠ½ΠΎΠ²ΠΎ ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π΅Π½Π΅ΠΈ Ρ Π»Π°ΠΌΠ΄Π° = 2*5 = 10 ΠΎΠ±Π°ΠΆΠ΄Π°Π½ΠΈΡ Π·Π° 5 ΠΌΠΈΠ½ΡΡΠΈ
exactly.2 = dpois(x = 2, lambda = 10) # P(X = 2)
less.than.2 = ppois(q = 1, lambda = 10) # P(X <= 1) = P(X < 2)
more.than.2 = ppois(q = 1, lambda = 10, lower.tail = FALSE) # P(X > 1) = P(X >= 2)
#06
#ΠΎΡΡΠΈΡΠ°ΡΠ΅Π»Π½ΠΎ Π±ΠΈΠ½ΠΎΠΌΠ½ΠΎ ΡΠ°Π·ΠΏΡΠ΅Π΄Π΅Π»Π΅Π½ΠΈΠ΅ Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡ Π·Π° ΡΡΠΏΠ΅Ρ
4/52 (4 Π°ΡΠ° ΠΎΡ 52 ΠΊΠ°ΡΡΠΈ)
#ΡΡΡΡΠΈΠΌ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΡΠ° Π·Π° 4 ΠΏΡΠΎΠ²Π°Π»Π° Π΄ΠΎ ΠΏΡΡΠ²ΠΈΡ ΡΡΠΏΠ΅Ρ
=> 5-ΡΠ°ΡΠ° ΠΈΠ·ΡΠ΅Π³Π»Π΅Π½Π° ΠΊΠ°ΡΡΠ° Π΅ Π°ΡΠΎ
dnbinom(x = 4, size = 1, prob = 4/52)
dgeom(x = 4, prob = 4/52)
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Algebra.RingSolver.CommRingSolver where
open import Cubical.Foundations.Prelude
open import Cubical.Data.FinData
open import Cubical.Data.Nat using (β)
open import Cubical.Data.Nat.Order using (zero-β€)
open import Cubical.Data.Vec.Base
open import Cubical.Algebra.CommRing
open import Cubical.Algebra.Ring
open import Cubical.Algebra.RingSolver.RawAlgebra renaming (β¨_β© to β¨_β©α΅£)
open import Cubical.Algebra.RingSolver.AlgebraExpression public
open import Cubical.Algebra.RingSolver.CommRingHornerForms
open import Cubical.Algebra.RingSolver.CommRingEvalHom
private
variable
β : Level
module EqualityToNormalform (R : CommRing {β}) where
Ξ½R = CommRingβRawβ€Algebra R
open CommRingStr (snd R)
open Theory (CommRingβRing R)
open Eval β€AsRawRing Ξ½R
open IteratedHornerOperations Ξ½R
open HomomorphismProperties R
β€Expr : (n : β) β Type _
β€Expr = Expr β€AsRawRing (fst R)
normalize : (n : β) β β€Expr n β IteratedHornerForms Ξ½R n
normalize n (K r) = Constant n Ξ½R r
normalize n (β£ k) = Variable n Ξ½R k
normalize n (x +' y) =
(normalize n x) +β (normalize n y)
normalize n (x Β·' y) =
(normalize n x) Β·β (normalize n y)
normalize n (-' x) = -β (normalize n x)
isEqualToNormalform :
(n : β)
(e : β€Expr n) (xs : Vec (fst R) n)
β eval n (normalize n e) xs β‘ β¦ e β§ xs
isEqualToNormalform β.zero (K r) [] = refl
isEqualToNormalform (β.suc n) (K r) (x β· xs) =
eval (β.suc n) (Constant (β.suc n) Ξ½R r) (x β· xs) β‘β¨ refl β©
eval (β.suc n) (0β Β·X+ Constant n Ξ½R r) (x β· xs) β‘β¨ combineCasesEval 0β (Constant n Ξ½R r) x xs β©
eval (β.suc n) 0β (x β· xs) Β· x + eval n (Constant n Ξ½R r) xs
β‘β¨ cong (Ξ» u β u Β· x + eval n (Constant n Ξ½R r) xs) (Eval0H _ (x β· xs)) β©
0r Β· x + eval n (Constant n Ξ½R r) xs
β‘β¨ cong (Ξ» u β u + eval n (Constant n Ξ½R r) xs) (0LeftAnnihilates _) β©
0r + eval n (Constant n Ξ½R r) xs β‘β¨ +Lid _ β©
eval n (Constant n Ξ½R r) xs β‘β¨ isEqualToNormalform n (K r) xs β©
_ β
isEqualToNormalform (β.suc n) (β£ zero) (x β· xs) =
eval (β.suc n) (1β Β·X+ 0β) (x β· xs) β‘β¨ refl β©
eval (β.suc n) 1β (x β· xs) Β· x + eval n 0β xs β‘β¨ cong (Ξ» u β u Β· x + eval n 0β xs)
(Eval1β _ (x β· xs)) β©
1r Β· x + eval n 0β xs β‘β¨ cong (Ξ» u β 1r Β· x + u ) (Eval0H _ xs) β©
1r Β· x + 0r β‘β¨ +Rid _ β©
1r Β· x β‘β¨ Β·Lid _ β©
x β
isEqualToNormalform (β.suc n) (β£ (suc k)) (x β· xs) =
eval (β.suc n) (0β Β·X+ Variable n Ξ½R k) (x β· xs) β‘β¨ combineCasesEval 0β (Variable n Ξ½R k) x xs β©
eval (β.suc n) 0β (x β· xs) Β· x + eval n (Variable n Ξ½R k) xs
β‘β¨ cong (Ξ» u β u Β· x + eval n (Variable n Ξ½R k) xs) (Eval0H _ (x β· xs)) β©
0r Β· x + eval n (Variable n Ξ½R k) xs
β‘β¨ cong (Ξ» u β u + eval n (Variable n Ξ½R k) xs) (0LeftAnnihilates _) β©
0r + eval n (Variable n Ξ½R k) xs β‘β¨ +Lid _ β©
eval n (Variable n Ξ½R k) xs
β‘β¨ isEqualToNormalform n (β£ k) xs β©
β¦ β£ (suc k) β§ (x β· xs) β
isEqualToNormalform β.zero (-' e) [] =
eval β.zero (-β (normalize β.zero e)) [] β‘β¨ -EvalDist β.zero
(normalize β.zero e)
[] β©
- eval β.zero (normalize β.zero e) [] β‘β¨ cong -_
(isEqualToNormalform
β.zero e [] ) β©
- β¦ e β§ [] β
isEqualToNormalform (β.suc n) (-' e) (x β· xs) =
eval (β.suc n) (-β (normalize (β.suc n) e)) (x β· xs) β‘β¨ -EvalDist (β.suc n)
(normalize
(β.suc n) e)
(x β· xs) β©
- eval (β.suc n) (normalize (β.suc n) e) (x β· xs) β‘β¨ cong -_
(isEqualToNormalform
(β.suc n) e (x β· xs) ) β©
- β¦ e β§ (x β· xs) β
isEqualToNormalform β.zero (e +' eβ) [] =
eval β.zero (normalize β.zero e +β normalize β.zero eβ) []
β‘β¨ +Homeval β.zero (normalize β.zero e) _ [] β©
eval β.zero (normalize β.zero e) []
+ eval β.zero (normalize β.zero eβ) []
β‘β¨ cong (Ξ» u β u + eval β.zero (normalize β.zero eβ) [])
(isEqualToNormalform β.zero e []) β©
β¦ e β§ []
+ eval β.zero (normalize β.zero eβ) []
β‘β¨ cong (Ξ» u β β¦ e β§ [] + u) (isEqualToNormalform β.zero eβ []) β©
β¦ e β§ [] + β¦ eβ β§ [] β
isEqualToNormalform (β.suc n) (e +' eβ) (x β· xs) =
eval (β.suc n) (normalize (β.suc n) e
+β normalize (β.suc n) eβ) (x β· xs)
β‘β¨ +Homeval (β.suc n) (normalize (β.suc n) e) _ (x β· xs) β©
eval (β.suc n) (normalize (β.suc n) e) (x β· xs)
+ eval (β.suc n) (normalize (β.suc n) eβ) (x β· xs)
β‘β¨ cong (Ξ» u β u + eval (β.suc n) (normalize (β.suc n) eβ) (x β· xs))
(isEqualToNormalform (β.suc n) e (x β· xs)) β©
β¦ e β§ (x β· xs)
+ eval (β.suc n) (normalize (β.suc n) eβ) (x β· xs)
β‘β¨ cong (Ξ» u β β¦ e β§ (x β· xs) + u)
(isEqualToNormalform (β.suc n) eβ (x β· xs)) β©
β¦ e β§ (x β· xs) + β¦ eβ β§ (x β· xs) β
isEqualToNormalform β.zero (e Β·' eβ) [] =
eval β.zero (normalize β.zero e Β·β normalize β.zero eβ) []
β‘β¨ Β·Homeval β.zero (normalize β.zero e) _ [] β©
eval β.zero (normalize β.zero e) []
Β· eval β.zero (normalize β.zero eβ) []
β‘β¨ cong (Ξ» u β u Β· eval β.zero (normalize β.zero eβ) [])
(isEqualToNormalform β.zero e []) β©
β¦ e β§ []
Β· eval β.zero (normalize β.zero eβ) []
β‘β¨ cong (Ξ» u β β¦ e β§ [] Β· u) (isEqualToNormalform β.zero eβ []) β©
β¦ e β§ [] Β· β¦ eβ β§ [] β
isEqualToNormalform (β.suc n) (e Β·' eβ) (x β· xs) =
eval (β.suc n) (normalize (β.suc n) e
Β·β normalize (β.suc n) eβ) (x β· xs)
β‘β¨ Β·Homeval (β.suc n) (normalize (β.suc n) e) _ (x β· xs) β©
eval (β.suc n) (normalize (β.suc n) e) (x β· xs)
Β· eval (β.suc n) (normalize (β.suc n) eβ) (x β· xs)
β‘β¨ cong (Ξ» u β u Β· eval (β.suc n) (normalize (β.suc n) eβ) (x β· xs))
(isEqualToNormalform (β.suc n) e (x β· xs)) β©
β¦ e β§ (x β· xs)
Β· eval (β.suc n) (normalize (β.suc n) eβ) (x β· xs)
β‘β¨ cong (Ξ» u β β¦ e β§ (x β· xs) Β· u)
(isEqualToNormalform (β.suc n) eβ (x β· xs)) β©
β¦ e β§ (x β· xs) Β· β¦ eβ β§ (x β· xs) β
solve :
{n : β} (eβ eβ : β€Expr n) (xs : Vec (fst R) n)
(p : eval n (normalize n eβ) xs β‘ eval n (normalize n eβ) xs)
β β¦ eβ β§ xs β‘ β¦ eβ β§ xs
solve eβ eβ xs p =
β¦ eβ β§ xs β‘β¨ sym (isEqualToNormalform _ eβ xs) β©
eval _ (normalize _ eβ) xs β‘β¨ p β©
eval _ (normalize _ eβ) xs β‘β¨ isEqualToNormalform _ eβ xs β©
β¦ eβ β§ xs β
β€Expr : (R : CommRing {β}) (n : β)
β _
β€Expr R n = EqualityToNormalform.β€Expr R n
solve : (R : CommRing {β})
{n : β} (eβ eβ : β€Expr R n) (xs : Vec (fst R) n)
(p : eval n (EqualityToNormalform.normalize R n eβ) xs β‘ eval n (EqualityToNormalform.normalize R n eβ) xs)
β _
solve R = EqualityToNormalform.solve R
module VarNames3 (R : CommRing {β}) where
X1 : β€Expr R 3
X1 = β£ Fin.zero
X2 : β€Expr R 3
X2 = β£ (suc Fin.zero)
X3 : β€Expr R 3
X3 = β£ (suc (suc Fin.zero))
module VarNames4 (R : CommRing {β}) where
X1 : β€Expr R 4
X1 = β£ Fin.zero
X2 : β€Expr R 4
X2 = β£ (suc Fin.zero)
X3 : β€Expr R 4
X3 = β£ (suc (suc Fin.zero))
X4 : β€Expr R 4
X4 = β£ (suc (suc (suc Fin.zero)))
module VarNames5 (R : CommRing {β}) where
X1 : β€Expr R 5
X1 = β£ Fin.zero
X2 : β€Expr R 5
X2 = β£ (suc Fin.zero)
X3 : β€Expr R 5
X3 = β£ (suc (suc Fin.zero))
X4 : β€Expr R 5
X4 = β£ (suc (suc (suc Fin.zero)))
X5 : β€Expr R 5
X5 = β£ (suc (suc (suc (suc Fin.zero))))
module VarNames6 (R : CommRing {β}) where
X1 : β€Expr R 6
X1 = β£ Fin.zero
X2 : β€Expr R 6
X2 = β£ (suc Fin.zero)
X3 : β€Expr R 6
X3 = β£ (suc (suc Fin.zero))
X4 : β€Expr R 6
X4 = β£ (suc (suc (suc Fin.zero)))
X5 : β€Expr R 6
X5 = β£ (suc (suc (suc (suc Fin.zero))))
X6 : β€Expr R 6
X6 = β£ (suc (suc (suc (suc (suc Fin.zero)))))
|
% Chapter Template
\chapter{Deep Semantics for Sentiment Analysis} % Main chapter title
\label{unl} % Change X to a consecutive number; for referencing this chapter elsewhere, use \ref{ChapterX}
\lhead{Chapter 6. \emph{Deep Semantics for Sentiment Analysis}} % Change X to a consecutive number; this is for the header on each page - perhaps a shortened title
%----------------------------------------------------------------------------------------
% SECTION 1
%----------------------------------------------------------------------------------------
Existing methods for sentiment analysis use supervised approaches which take into account all the subjective words and or phrases. Due to this, the fact that not
all of these words and phrases actually contribute to the overall sentiment of the \textit{text} is ignored. We propose an unsupervised rule-based approach using
deep semantic processing to identify only relevant subjective terms. We generate a UNL (Universal Networking Language) graph for the input \textit{text}. Rules are
applied on the graph to extract relevant terms. The sentiment expressed in these terms is used to figure out the overall sentiment of the \textit{text}. Results on
binary sentiment classification have shown promising results.
\section{Introduction}
Many works in sentiment analysis try to make use of shallow processing techniques. The common thing in all these works is that they merely try to identify sentiment-bearing
expressions as shown by \citep*{ruppenhofer2012semantic}. No effort has been made to identify which expression actually contributes to the overall sentiment of the text.
In \citep*{mukherjee2012sentiment} these expressions are given weight-age according to their position w.r.t. the discourse elements in the \textit{text}. But it still takes into
account each expression.
Semantic analysis is essential to understand the exact meaning conveyed in the \textit{text}. Some words tend to mislead the meaning of a given piece of \textit{text} as
shown in the previous example. WSD (Word Sense Disambiguation) is a technique which can been used to get the right sense of the word. \citep*{balamurali2011harnessing} have
made use of WordNet synsets for a supervised sentiment classification task. \citep*{martin2010word} and \citep*{rentoumi2009sentiment} have also shown a performance improvement
by using WSD as compared to word-based features for a supervised sentiment classification task. In \citep*{saif2012semantic}, semantic concepts have been used as additional
features in addition to word-based features to show a performance improvement. Syntagmatic or structural properties of text are used in many NLP applications like machine
translation, speech recognition, named entity recognition, etc. A clustering based approach which makes use of syntactic features of text has been shown to improve performance
in \citep*{arhaves}. Another approach can be found in \citep*{mukherjee2012sentiment} which makes use of lightweight discourse for sentiment analysis. In general, approaches
using semantic analysis are expensive than syntax-based approaches due to the shallow processing involved in the latter. As pointed out earlier, all these works incorporate all
the sentiment-bearing expressions to evaluate the overall sentiment of the \textit{text}. The fact that not all expressions contribute to the overall sentiment is completely
ignored due to this. Our approach tries to resolve this issue. To do this, we create a UNL graph for each piece of \textit{text} and include only the relevant expressions to
predict the sentiment. Relevant expressions are those which satisfy the rules/conditions. After getting these expressions, we use a simple dictionary lookup along with attributes
of words in a UNL graph to calculate the sentiment.
In the next section, we will go through some of the related work in this direction.
\section{Related Work}
There has been a lot of work on using semantics in sentiment analysis. \citep*{saif2012semantic} have made use of semantic concepts as additional features in a word-based
supervised sentiment classifier. Each entity is treated as a semantic concept e.g. \textit{iPhone, Apple, Microsoft, MacBook, iPad, etc.}. Using these concepts as features,
they try to measure their correlation with positive and negative sentiments. In \citep*{verma2009incorporating}, effort has been made to construct document feature vectors
that are sentiment-sensitive and use world knowledge. This has been achieved by incorporating sentiment-bearing words as features in document vectors. The use of WordNet synsets
is found in \citep*{balamurali2011harnessing}, \citep*{rentoumi2009sentiment} and \citep*{martin2010word}. The one thing common with these approaches is that they make use of
shallow semantics. An argument has been made in \citep*{choi2008learning} for determining the polarity of a sentiment-bearing expression that words or constituents within the
expression can interact with each other to yield a particular overall polarity. Structural inference motivated by compositional semantics has been used in this work. This work
shows use of deep semantic information for the task of sentiment classification. A novel use of semantic frames is found in \citep*{ruppenhofer2012semantic}. As a step towards
making use of deep semantics, they propose SentiFrameNet which is an extension to FrameNet. A semantic frame can be thought of as a conceptual structure describing an event,
relation, or object and the participants in it. It has been shown that potential and relevant sentiment bearing expressions can be easily pulled out from the sentence using
the SentiFrameNet. All these works try to bridge the gap between rule-based and machine-learning based approaches but except the work in \citep*{ruppenhofer2012semantic},
all the other approaches consider all the sentiment-bearing expressions in the text.
\section{Use of Deep Semantics}\label{deep}
Before devising any solution to a problem, it is advisable to have a concise definition of the problem. Let us look at the formal definition of the sentiment analysis
problem as given in \citep*{liu2010sentiment}. Before we do that, let us consider the following review for a movie, \textit{"1) I went to watch the new James Bond flick,
Skyfall at IMAX which is the best theater in Mumbai with my brother a month ago. 2) I really liked the seating arrangement over there. 3) The screenplay was superb
and kept me guessing till the end. 4) My brother doesnβt like the hospitality in the theater even now. 5) The movie is really good and the best bond flick ever."}
This is a snippet of the review for a movie named Skyfall . There are many entities and opinions expressed in it. 1) is an objective statement. 2) is subjective but
is intended for the theater and not the movie. 3) is a positive statement about the screenplay which is an important aspect of the movie. 4) is a subjective
statement but is made by the authorβs brother and also it is about the hospitality in the theater and not the movie or any of its aspects. 5) reflects a positive
view of the movie for the author. We can see from this example that not only the opinion but the opinion holder and the entity about which the opinion has been expressed
are also very important for sentiment analysis. Also, as can be seen from 1), 4) and 5) there is also a notion of time associated with every sentiment expressed.
Now, let us define the sentiment analysis problem formally as given in \citep*{liu2010sentiment}.
\textit{A direct opinion about the object is a quintuple \(<o_j,f_{jk},oo_{ijkl},h_i,t_l>\), where \(o_j\) is the the object, \(f_{jk}\) is the feature of the object
\(o_j\), \(oo_{ijkl}\) is the orientation or polarity of the opinion on feature \(f_{jk}\) of object \(o_j\), \(h_i\) is the opinion holder and \(t_i\) is the time
when the opinion is expressed by \(h_i\).}
As can be seen from the formal definition of sentiment analysis and the motivating example, not all sentiment-bearing expressions contribute to the overall sentiment
of the \textit{text}. To solve this problem, we can make use of semantic roles in the \textit{text}. Semantic role is the underlying relationship that the underlying
participant has with the main verb. To identify the semantic roles, we make use of UNL in our approach.
\subsection{UNL (Universal Networking Language)}
UNL is declarative formal language specifically designed to represent semantic data extracted from natural language texts. In UNL, the information is represented by
a semantic network, also called UNL graph. UNL graph is made up of three discrete semantic entities, Universal Words, Universal Relations, and Universal Attributes.
Universal Words are nodes in the semantic network, Universal Relations are arcs linking UWs, and Universal attributes are properties of UWs. To understand UNL better,
let us consider an example. UNL graph for \textit{"I like that bad boy"} is as shown in the figure.
\includegraphics[width=\textwidth]{unlexample.png}
\begin{center}
Figure 5.1 UNL Example
\end{center}
Here, \textit{"I"}, \textit{"like"}, \textit{"bad"}, and \textit{"boy"} are the UWs. \textit{"agt"} (agent), \textit{"obj"} (patient), and \textit{"mod"} (modifier) are the
Universal Relations. Universal attributes are the properties associated with UWs which will be explained as and when necessary with the rules of our algorithm.
\subsubsection*{UNL relations}
Syntax of a UNL relation is as shown below,
\[\label{eqn:unlsyntax}
<rel>:<scope><source>;<target>
\]
Where, \(<rel>\) is the name of the relation, \(<scope>\) is the scope of the relation, \(<source>\) is the UW that assigns the relation, and \(<target>\) is the UW that receives the relation \\
We have considered the following Universal relations in our approach,
\begin{enumerate}
\item \underline{\textit{agt} relation} \(:\) \textit{agt} stands for agent. An agent is a participant in action that provokes a change of state or location. The \textit{agt}
relation for the sentence \textit{"John killed Mary"} is \textit{agt( killed , John )}. This means that the action of \textit{killing} was performed by \textit{John}.\\
\item \underline{\textit{obj} relation} \(:\) \textit{obj} stands for patient. A patient is a participant in action that undergoes a change of state or location. The \textit{obj}
relation for the sentence \textit{"John killed Mary"} is \textit{obj( killed , Mary )}. This means that the patient/object of \textit{killing} is \textit{Mary}.\\
\item \underline{\textit{aoj} relation} \(:\) \textit{aoj} stands for object of an attribute. In the sentence \textit{"John is happy"}, the \textit{aoj} relation is
\textit{aoj( happy , John )}.\\
\item \underline{\textit{mod} relation} \(:\) \textit{mod} stands for modifier of an object. In the sentence \textit{"a beautiful book"}, the \textit{mod} relation is
\textit{mod( book , beautiful )}.\\
\item \underline{\textit{man} relation} \(:\) \textit{man} relation stands for manner. It is used to indicate how the action, experience or process of an event is carried out.
In the sentence \textit{"The scenery is beautifully shot"}, the \textit{man} relation is \textit{man( beautifully , shot )}.\\
\item \underline{\textit{and} relation} \(:\) \textit{and} relation is used to state a conjunction between two entities. In the sentence \textit{"Happy and cheerful"},
the \textit{and} relation is \textit{and(Happy,cheerful)}.
\end{enumerate}
\subsection{Architecture}
As shown in the \textit{UNL} example, the modifier \textit{"bad"} is associated with the object of the main verb. It shouldn't affect the sentiment of the main agent.
Therefore, we can ignore the modifier relation of the main object in such cases. After doing that, the sentiment of this sentence can be inferred to be positive.
The approach followed in the project is to first generate a UNL graph for the given input sentence. Then a set of rules is applied and used to infer the sentiment of the
sentence. The process is shown in Figure 6.2. The UNL generator shown in the Figure 6.2 has been developed at CFILT.\footnote{\url{http://www.cfilt.iitb.ac.in/}} Before, the given piece of text is passed on to the UNL
generator, it goes through a number of pre-processing stages. Removal of redundant punctuations, special characters, emoticons, etc. are part of this process. This is
extremely important because the UNL generator is not able to handle special characters at the moment. We can see that, the performance of the overall system is limited
by this. A more robust version of the UNL generator will certainly allow the system to infer the sentiment more accurately.
\includegraphics[width=\textwidth]{unlusage.png}
\begin{center}
Figure 5.2 Architecture
\end{center}
\subsection{Rules}
There is a separate rule for each relation. For each UW (Universal word) considered, if it has a \textit{@not} attribute then its polarity is reversed. Rules used by the system
are as follows,
\begin{enumerate}
\item If a given UW is source of the \textit{agt} relation, then its polarity is added to the overall polarity of the text.
\underline{e.g.,} \textit{"I like her"}. Here, the agt relation will be \textit{agt ( like , I )}. The polarity of like being positive, the overall polarity of the text is positive.
\underline{e.g,} \textit{"I don't like her"}. Here the agt relation will be \textit{agt ( like@not , I )}. The polarity of like is positive but it has an attribute \textit{@not}
so its polarity is negative. The overall polarity of the text is negative in this case. \\
\item If a given UW is source or target of the \textit{obj} relation and has the attribute \textit{@entry} then its polarity is added to the overall polarity of the text. This rule
merely takes into account the main verb of the sentence into account, and the it's is polarity considered.
\underline{e.g.,} \textit{"I like her"}, here the obj relation will be \textit{obj ( like@entry , her )}. The polarity of like being positive, the overall polarity of the text is positive \\
\item If a given UW is the source of the \textit{aoj} relation and has the attribute \textit{@entry} then its polarity is added to the overall polarity of the text.
\underline{e.g.,} \textit{"Going downtown tonight it will be amazing on the waterfront with the snow"}. Here, the \textit{aoj} relation is \textit{aoj ( amazing@entry , it )}.
\textit{amazing} has a positive polarity and therefore overall polarity is positive in this case.\\
\item If a given UW is the target of the \textit{mod} relation and the source UW has the attribute \textit{@entry} or has the attribute \textit{@indef} then polarity of the
target UW is added to the overall polarity of the text.
\underline{e.g.,} \textit{"I like that bad boy"}. Here, the aoj relation is \textit{mod ( boy , bad )}. \textit{bad} has a negative polarity but the source UW, boy does not have an
\textit{@entry} attribute. So, in this case negative polarity of bad is not considered as should be the case.
\underline{e.g.,} \textit{"She has a gorgeous face"}. Here, the mod relation is \textit{mod ( face@indef , gorgeous )}. \textit{gorgeous} has a positive polarity and face has an
attribute \textit{@indef}. So, polarity of gorgeous should be considered. \\
\item If a given UW is the target of the \textit{man} relation and the source UW has the attribute \textit{@entry} then polarity of the target UW is added to the overall polarity of
the text. Or if the target UW has the attribute \textit{@entry} then also we can consider polarity of the target UW.
\underline{e.g.,} \textit{"He always runs fast"}. Here, the \textit{aoj} relation is \textit{mod ( run@entry , fast )}. \textit{fast} has a positive polarity and the source UW, run
has the \textit{@entry} attribute. So, in this case positive polarity of fast is added to the overall polarity of the sentence.
Polarities of both the source and target UW of the \textit{and} relation are considered. \\
\item In \textit{"Happy and Cheerful"}, the \textit{and} relation is \textit{and(Happy, Cheerful)}. \textit{Happy} and \textit{Cheerful}, both have a positive
polarity, which gives this sentence an overall positive polarity.
\end{enumerate}
The polarity value of each individual word is looked up in a dictionary of positive of negative words used is \citep*{liu2010sentiment} After all the rules are applied, summation of all the calculated polarity values is done.
If this sum is greater than 0 then it is considered as positive, and negative otherwise. This system is negative biased due to the fact that people often tend to express negative sentiment
indirectly or by comparison with something good.
In the next section, we will explain the experimental setup, results obtained and subsequent discussion.
\section{Experiments}
Analysis was performed for monolingual binary sentiment classification task. The language used in this case was \textit{English}. The comparison was done between 5 systems
viz. System using words as features, WordNet sense based system as given in \citep*{balamurali2011harnessing}, Clusters based system as described in ~\citep*{arhaves},
Discourse rules based system as given in \citep*{mukherjee2012sentiment}, and the UNL rule based system.
\subsection{Datasets}
Two polarity datasets were used to perform the experiments.
\begin{enumerate}
\item \underline{EN-TD:} English Tourism corpus as used in \citep*{ye2009sentiment}. It consists of 594 positive and 593 negative reviews.
\item \underline{EN-PD:} English Product (music albums) review corpus \citep*{blitzer2007biographies}. It consists of 702 positive and 702 negative
reviews.
\end{enumerate}
For the WordNet sense, and Clusters based systems, a manually sense tagged version of the (EN-PD) has been used. Also, a automatically sense tagged version of
(EN-TD) was used on these systems. The tagging in the later case was using an automated WSD engine, trained on a tourism domain \citep*{balamurali2013lost}.
The results reported for supervised systems are based on 10-fold cross validation.
\subsection{Results}\label{results}
\begin{center}
\begin{tabular}[h]{|l|c|c|}
\hline
\textbf{System} & \textbf{EN-TD} & \textbf{EN-PD} \\ \hline \hline
Bag of Words & 85.53 & 73.24 \\ \hline
Synset-based & 88.47 & 71.58 \\ \hline
Cluster-based & \textbf{95.20} & 79.36 \\ \hline
Discourse-based & 71.52 & 64.81 \\ \hline
UNL rule-based & 86.08 & \textbf{79.55} \\ \hline
\hline
\end{tabular}
\end{center}
\begin{center}
Table 5.1 Classification accuracy (in \%) for monolingual binary sentiment classification
\end{center}
The results for monolingual binary sentiment classification task are shown in Table 5.1. The results reported are the best results obtained in
case of supervised systems. The cluster based system performs the best in both cases. The UNL rule-based system performs better only than the bag of words
and discourse rule based system. For EN-PD ( music album reviews ) dataset, the UNL based system outperforms every other system . These results are very promising
for a rule-based system. The difference between accuracy for positive and negative reviews for the rule-based systems viz. Discourse rules based and UNL rules based
is shown in Table 5.2. It can be seen that the Discourse rules based system performs slightly better than the UNL based system for positive
reviews. On the other hand, the UNL rules based system outperforms it in case of negative reviews by a huge margin.
\begin{center}
\begin{tabular}[h]{l|c|c|c|c|}
\cline{2-5}
& \multicolumn{2}{|c|}{\textbf{EN-TD}} & \multicolumn{2}{|c|}{\textbf{EN-PD}} \\ \hline
\textbf{System} & \textbf{Pos} & \textbf{Neg} & \textbf{Pos} & \textbf{Neg} \\ \hline \hline
Discourse rules & 94.94 & 48.06 & \textbf{92.73} & 36.89 \\ \hline
UNL rules & \textbf{95.72} & \textbf{76.44} & 90.75 & \textbf{68.35} \\ \hline
\hline
\end{tabular}
\end{center}
\begin{center}
Table 5.2 Classification accuracy (in \%) for positive and negative reviews
\end{center}
\subsection{Discussion}
The UNL generator used in this case is the bottleneck in terms of performance. Also, it makes use of the standard NLP tools viz. parsing, co-reference resolution, etc.
to assign the proper semantic roles in the given \textit{text}. It is well known fact that these techniques work properly only on structured data. The language used in
the reviews present in both the datasets is unstructured in considerable number of cases. The UNL generator is still in its infancy and cannot handle \textit{text}
involving special characters. Due to these reasons, a proper UNL graph is not generated in some cases. Also, it is not able to generator proper UNL graphs for even well
structured sentences like \textit{"It is not very good"}. In this case, the UW, \textit{good} should have an attribute \textit{@not} which implies that it is used in
a negative sense. On the contrary, the UNL generator does not assign any such attribute to it, leading to incorrect classification. As a result of all these things, the
classification accuracy is low.
Negative reviews have certain characteristics which make them difficult to classify. Some of them are listed below.
\begin{enumerate}
\item \textbf{Relative opinions containing positive words}
\begin{enumerate}
\item Sentences like \textit{``It is good but could be better''} are very common in negative reviews.
\item A user in a review about a certain travel destination says, \textit{``Came here just two days ago. Lucky to have got our flight back home''}
\end{enumerate}
\item \textbf{Some negative reviews are just to raise caution} \\
A negative review about \textit{Las Vegas} contained the sentence, \textit{``Do not go alone to Las Vegas if you are a female''}
\item \textbf{Mixed opinions} \\
Some reviews contain mixed opinions. For example, \textit{``Beautiful Architecture. Expensive food''}.
\item \textbf{A sense of time} \\
We stated the importance of time in sentiment analysis in \sref{deep}. A perfect example which shows the importance of time is, \textit{``A place I used to love. Not
anymore''}. One more example is \textit{``The best city in the world, `Once upon a time'.''}.
\item \textbf{Lack of domain knowledge} \\
Some reviews require domain knowledge to understand their meaning. Consider the sentence \textit{``They gave us a Queen-size bed''}. Here, the the phrase
\textit{``Queen-size bed''} refers to a small bed. People usually refer to beds having sufficient space as \textit{``King-size bed''}.
\item \textbf{Domain dependence of sentiment} \\
Sentiment is domain dependent as given in ~\citep*{liu2010sentiment}. The adjective \textit{cheesy} might be positive for a food item but is definitely negative in cases
like \textit{``The place is full of cheesy shows''}.
\item \textbf{Comparison with other entities} \\
Reviewers often criticize a place/thing by praising other places/things. In the EN-TD dataset, negative reviews were full of sentences like \textit{``If you want dirt, go
to L.A. . If you want peace, go to Switzerland."}.
\item \textbf{Sarcasm} \\
Sarcasm is the most notorious problems in \textit{SA}. \textit{``I love New York especially the 'Lovely brown fog'''} and \textit{``I adore the 'phony fantasy-land' that
Vegas is''} are examples of sarcasm. Sarcasm is a very difficult problem to tackle. Some related works can be found in ~\citep*{carvalho2009clues} and ~\citep*{gonzalez2011identifying}.
\item \textbf{New notations} \\
During our error analysis, we observed sentences like, \textit{``I love it ?!?!''}, \textit{``Too \$\$\$\$''}, \textit{``No value for \$\$\$\$''}. In these examples the
negative sentiment is clear but difficult to detect.
\end{enumerate}
In some cases, the reviewers make use of their native language and expressions. This is a big problem for the task of monolingual sentiment classification. From this
discussion, it is clear that two major points of concern are unstructured language and hidden sentiment.
\section*{SUMMARY}
In the beginning, we expressed the motivation behind this approach. After that, related work was discussed. Then we explained the approach to use deep semantics in detail.
The system using deep semantics was compared with some state of the art systems for the sentiment classification task with two datasets. This was followed up by some
interesting observations and error analysis.
In the next chapter, we conclude and comment on some future work.
\clearpage
|
lemma scaleR_le_cancel_left_neg: "c < 0 \<Longrightarrow> c *\<^sub>R a \<le> c *\<^sub>R b \<longleftrightarrow> b \<le> a" for b :: "'a::ordered_real_vector"
|
function [varargout] = iscolumn(varargin)
% ISCOLUMNN is a drop-in replacement for the same function that was
% introduced in MATLAB R2010b.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% try to automatically remove incorrect compat folders from the path, see https://github.com/fieldtrip/fieldtrip/issues/899
if isempty(strfind(mfilename('fullpath'), matlabroot))
% automatic cleanup does not work when FieldTrip is installed inside the MATLAB path, see https://github.com/fieldtrip/fieldtrip/issues/1527
alternatives = which(mfilename, '-all');
if ~iscell(alternatives)
% this is needed for octave, see https://github.com/fieldtrip/fieldtrip/pull/1171
alternatives = {alternatives};
end
keep = true(size(alternatives));
for i=1:numel(alternatives)
keep(i) = keep(i) && ~any(alternatives{i}=='@'); % exclude methods from classes
keep(i) = keep(i) && alternatives{i}(end)~='p'; % exclude precompiled files
end
alternatives = alternatives(keep);
if exist(mfilename, 'builtin') || any(strncmp(alternatives, matlabroot, length(matlabroot)) & cellfun(@isempty, strfind(alternatives, fullfile('private', mfilename))))
% remove this directory from the path
p = fileparts(mfilename('fullpath'));
warning('removing "%s" from your path, see http://bit.ly/2SPPjUS', p);
rmpath(p);
% call the original MATLAB function
if exist(mfilename, 'builtin')
[varargout{1:nargout}] = builtin(mfilename, varargin{:});
else
[varargout{1:nargout}] = feval(mfilename, varargin{:});
end
return
end
end % automatic cleanup of compat directories
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% this is where the actual replacement code starts
% function tf = iscolumn(x)
% deal with the input arguments
if nargin==1
[x] = deal(varargin{1:1});
else
error('incorrect number of input arguments')
end
tf = length(size(x))==2 && size(x,2)==1;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% deal with the output arguments
varargout = {tf};
|
I had another debate with my friend again regarding the cost of living in Singapore. Naturally, we referred to the article below, titled "Inflation could hit 5% early next year, then taper off", and argued about the notion of "spending less = lower cost of living = lower standard of living?".
Mr Lim, Singaporeβs Minister for Trade and Industry, commented that by spending on alternative, cheaper goods, we can effectively lower the cost of living. However, some people seem to interpret that as lowering the standard of living.
Instead of saying that βswitching to cheaper products can reduce the cost of livingβ, Minister Lim would have been more accurate to say, βswitching to cheaper products can lower the standard of livingβ. For example, instead of living in a 5-room HDB flat, you can live in a 1-room HDB flat (a cheaper product). Instead of having chicken rice and vegetables for lunch, you can just eat plain porridge (a cheaper product).
Living in a 1-room HDB flat and eating plain porridge constitutes a lower standard of living. So yes, by switching to cheaper products, you can lower your standard of living. And a lower standard of living does cost less to maintain.
My friend shared the perception that, by spending on (possibly) lower quality (and hence cheaper) goods, there will be less demand for the costlier goods. Given such a scenario, it is possible for the costlier goods to cost cheaper since the demand is now lowered. After all, economics is all about demand and supply β and hence Mr Limβs advice is not entirely wrong.
He also added that, it is not possible for everyone to stop spending β so people should spend less β perhaps on cheaper products, but not entirely stop spending because the entire economy will collapse. Thus to prevent either extremes from happening β (i) goods getting costlier (ii) economy collapsing, Singaporeans have to start spending moderately so that both goals become achievable.
From the point of economics, I have to agree with his points. However, the entity that we are talking about are humans, and not robots, and so a greater amount of PR has to be injected while getting people to face reality.
Mr Lim had been infamous for his statements when he was the health minister, which includes asking the women to "save on one hairdo and use the money for breast screening", his regret in intervening to admit a premature baby to KKH to save the babyβs life because "β¦in the end, the baby continued to be in intensive care, and KKH now runs up a total bill of more than $300,000β¦", and his call to raise hospital rates to hotel rates because "if these patients (hospital overstayers) want to treat hospitals like a hotel, then theyβll have to be charged hotel rates.", of whom mostly are likely to be older than 60, with no income, or are from families with incomes below $1000.
AS CONSUMER prices continue to rise, inflation in Singapore will likely surge to 4 or 5 per cent in the first quarter of next year.
But it should taper off by the second half of the year to βmore normal conditionsβ, said Trade and Industry Minister Lim Hng Kiang yesterday.
The average rate for next year should be around 3 per cent.
Fuelled mainly by rising global oil and food prices, inflation recorded a 13-year high of 2.9 per cent in August. It is expected to dip to 2.7 per cent in the last quarter, Mr Lim told Parliament.
But it was his 2008 forecast that made analysts and consumers sit up yesterday.
Citigroup economist Chua Hak Bin said that the 5 per cent rate predicted would be a βhistoric highβ in the 25 years since 1983. The previous high was in July 1991, when it hit 4 per cent.
Most economies, including Singaporeβs, size up inflation by tracking the Consumer Price Index, or CPI. The CPI measures the cost of a basket of goods and services consumed by most households.
Yesterday, Mr Lim cautioned against βinterpreting a rise in the headline CPI as necessarily reflecting an increase in the cost of livingβ.
It depends on the individual householdβs spending. βSwitching to cheaper products can reduce the cost of living despite a rise in the CPI,β he added.
A CPI increase may also not reflect actual hikes in consumer prices. For instance, flat prices soared, but flat owners do not pay rent.
Higher inflation, he said, should also be viewed against rapid economic growth, with the gross domestic product rising more than 6 per cent on average since 2003 and wages also on the up.
However, MPs such as Madam Halimah Yacob worry that residents, especially the elderly on fixed incomes, are feeling the pinch. βThey go to the market with a similar sum of money. But they can buy less,β she said.
He sketched out how the landscape will look like next year.
Explaining why there will be a spike in inflation before it plateaus, he cited two reasons: First, it is as compared to the first quarter of this year, when inflation was at 0.5 per cent and oil prices were low.
Second, the βone-offβ effect of the goods and services tax hike, which will be felt until next June.
Thereafter, the trend will βrevert to more normal conditions in the second half of next yearβ.
The numbers come against a global backdrop of rising oil and food prices, such as more expensive chicken due to costlier feed. Adverse weather in food-supplying countries has also reduced supply, even as demand has risen.
Diversifying sources is one way to maintain more stable food prices, Mr Lim said, but there was a limit to this given the worldwide increase in food prices being seen now.
But inflation has not affected Singaporeβs economic competitiveness, he said.
βWe are tracking our competitiveness position very closely and so far we are in quite a good position,β he said, adding that inflation here was lower than in other countries.
He noted that imported inflation has been reduced because of the policy of gradually appreciating the Singapore dollar.
Other watchers suggest more aggressive measures. Citigroupβs Dr Chua, for instance, believes that the economy is in danger of overheating.
He called on the Government to re-prioritise projects, given that unemployment is already at a low.
Previous: Plans for today (like who will bother).. haha..
|
import main2
import combinatorics.simple_graph.adj_matrix
import combinatorics.simple_graph.strongly_regular
/-!
# Strong regular graphs
This file attempts to construct strong regular graphs from
regular symmetric Hadamard matrices with constant diagonal (RSHCD).
-/
set_option pp.beta true
variables {Ξ± I R V : Type*}
variables [fintype V] [fintype I] -- [semiring R]
open matrix simple_graph fintype finset
open_locale big_operators matrix
local notation `n` := (fintype.card V : β)
namespace matrix
class adj_matrix
[mul_zero_one_class Ξ±] [nontrivial Ξ±]
(A : matrix I I Ξ±) : Prop :=
(zero_or_one [] : β i j, (A i j) = 0 β¨ (A i j) = 1 . obviously)
(sym [] : A.is_sym . obviously)
(loopless [] : β i, A i i = 0 . obviously)
lemma is_sym_of_adj_matrix
[semiring R] (G : simple_graph I) [decidable_rel G.adj] :
(G.adj_matrix R).is_sym := transpose_adj_matrix G
instance is_adj_matrix_of_adj_matrix
[semiring R] [nontrivial R] (G : simple_graph I) [decidable_rel G.adj] :
adj_matrix (G.adj_matrix R) :=
{ zero_or_one := Ξ» i j, by by_cases G.adj i j; simp*,
sym := is_sym_of_adj_matrix G,
loopless := Ξ» i, by simp }
#check compl_adj
def compl
[mul_zero_one_class Ξ±] [nontrivial Ξ±] [decidable_eq Ξ±] [decidable_eq V]
(A : matrix V V Ξ±) [adj_matrix A] : matrix V V Ξ± :=
Ξ» i j, ite (i = i) 0 (ite (A i j = 0) 1 0)
@[simp]
lemma diag_ne_one_of_adj_matrix
[mul_zero_one_class Ξ±] [nontrivial Ξ±]
(A : matrix V V Ξ±) [c : adj_matrix A] (i : V) :
A i i β 1 :=
by simp [c.loopless]
def to_graph
[mul_zero_one_class Ξ±] [nontrivial Ξ±] [decidable_eq Ξ±]
(A : matrix V V Ξ±) [c : adj_matrix A]:
simple_graph V :=
{ adj := Ξ» i j, ite (A i j = 1) true false,
sym := Ξ» i j h, by simp only [c.sym.apply' i j]; convert h,
loopless := Ξ» i, by simp }
instance
[mul_zero_one_class Ξ±] [nontrivial Ξ±] [decidable_eq Ξ±]
(A : matrix V V Ξ±) [c : adj_matrix A] :
decidable_rel A.to_graph.adj :=
by {simp [to_graph], apply_instance}
#check is_regular_of_degree
lemma to_graph_is_SRG_of
[non_assoc_semiring Ξ±] [nontrivial Ξ±] [decidable_eq Ξ±] [decidable_eq V]
(A : matrix V V Ξ±) [adj_matrix A] {k l m : β}
(eqβ : A β¬ (π : matrix V V Ξ±) = k β’ π)
(eqβ : A β¬ A = k β’ 1 + l β’ A + m β’ A.compl):
is_SRG_of A.to_graph (card I) k l m := sorry
-------------------------------------------------------------------------------
class RSHCD (H : matrix I I β) extends Hadamard_matrix H : Prop :=
(regular [] : β i j, β b, H i b = β a, H a j)
(sym [] : H.is_sym)
(const_diag [] : β i j, H i i = H i j)
namespace RSHCD
def diag [inhabited I] (H : matrix I I β) [RSHCD H] : β :=
H (default I) (default I)
lemma regular_row
(H : matrix I I β) [RSHCD H] (a b : I) :
β j : I, H a j = β j : I, H b j :=
by rw [regular H a a, regular H b a]
def row_sum [inhabited I] (H : matrix I I β) [RSHCD H] : β :=
β j : I, H (default I) j
@[simp] lemma eq_row_sum
[inhabited I] (H : matrix I I β) [RSHCD H] (i : I) :
β j : I, H i j = β j : I, H (default I) j :=
regular_row H i (default I)
def to_adj [inhabited V] (H : matrix V V β) [RSHCD H] :
matrix V V β :=
((1 : β) / 2) β’ (π - (diag H) β’ H)
def to_adj_eqβ
[inhabited V] (H : matrix V V β) [RSHCD H] :
(to_adj H) β¬ (π : matrix V V β) =
((n - (diag H) * (row_sum H)) / 2) β’ π :=
begin
have : (n - (diag H) * (row_sum H)) / 2 =
((1 : β) / 2) * (n - (diag H) * (row_sum H)) := by field_simp,
rw[this], ext i j,
simp [matrix.mul, all_one, to_adj, row_sum, βfinset.mul_sum],
congr,
end
end RSHCD
open RSHCD
instance [inhabited V] (H : matrix V V β) [RSHCD H] :
adj_matrix (to_adj H) := {..}
def to_graph_of_RSHD [inhabited V] (H : matrix V V β) [RSHCD H] :
simple_graph V := (to_adj H).to_graph
instance adj.decidable_rel'
[inhabited V] (H : matrix V V β) [RSHCD H] :
decidable_rel H.to_graph_of_RSHD.adj :=
by simp [to_graph_of_RSHD]; apply_instance
lemma to_graph_is_SRG_of_RSHD
[inhabited V] [decidable_eq V] (H : matrix V V β) [RSHCD H] :
is_SRG_of H.to_graph_of_RSHD sorry sorry sorry sorry := sorry
end matrix
#check transpose_adj_matrix
#check simple_graph
#check adj_matrix
#check from_rel
#check is_SRG_of
#check is_regular_of_degree
|
# Test name methods
@testset "Infinite Variable Name" begin
# initialize model and infinite variable
m = InfiniteModel()
info = VariableInfo(false, 0, false, 0, false, 0, false, 0, false, false)
param = InfOptParameter(IntervalSet(0, 1), Number[], false)
pref = add_parameter(m, param, "test1")
pref2 = add_parameter(m, param, "test2")
var = InfiniteVariable(info, (pref, pref2))
m.vars[1] = var
m.var_to_name[1] = "var"
vref = InfiniteVariableRef(m, 1)
# JuMP.name
@testset "JuMP.name" begin
@test name(vref) == "var"
end
# parameter_refs
@testset "parameter_refs" begin
@test parameter_refs(vref) == (pref, pref2)
end
# JuMP.set_name
@testset "JuMP.set_name" begin
# make extra infinite variable
vref2 = InfiniteVariableRef(m, 2)
m.vars[2] = var
# test normal
@test isa(set_name(vref, "new"), Nothing)
@test name(vref) == "new(test1, test2)"
# test default
@test isa(set_name(vref2, ""), Nothing)
@test name(vref2) == "noname(test1, test2)"
end
# _make_variable_ref
@testset "_make_variable_ref" begin
@test InfiniteOpt._make_variable_ref(m, 1) == vref
end
# parameter_by_name
@testset "JuMP.variable_by_name" begin
# test normal
@test variable_by_name(m, "new(test1, test2)") == vref
@test isa(variable_by_name(m, "test(test1, test2)"), Nothing)
# prepare variable with same name
m.vars[2] = var
m.var_to_name[2] = "new(test1, test2)"
m.name_to_var = nothing
# test multiple name error
@test_throws ErrorException variable_by_name(m, "new(test1, test2)")
end
# _root_name
@testset "_root_name" begin
@test InfiniteOpt._root_name(vref) == "new"
end
end
# Test variable definition methods
@testset "Infinite Variable Definition" begin
# initialize model and infinite variable info
m = InfiniteModel()
param = InfOptParameter(IntervalSet(0, 1), Number[], false)
pref = add_parameter(m, param, "test")
pref2 = add_parameter(m, param, "ΞΈ")
prefs = @infinite_parameter(m, x[1:2], set = IntervalSet(0, 1))
info = VariableInfo(false, 0, false, 0, false, 0, false, 0, false, false)
info2 = VariableInfo(true, 0, true, 0, true, 0, true, 0, true, false)
info3 = VariableInfo(true, 0, true, 0, true, 0, true, 0, false, true)
# _check_parameter_tuple
@testset "_check_parameter_tuple" begin
@test isa(InfiniteOpt._check_parameter_tuple(error, (pref, prefs)),
Nothing)
@test_throws ErrorException InfiniteOpt._check_parameter_tuple(error,
(pref, prefs, 2))
end
# _make_formatted_tuple
@testset "_make_formatted_tuple" begin
@test isa(InfiniteOpt._make_formatted_tuple((pref, prefs)), Tuple)
@test isa(InfiniteOpt._make_formatted_tuple((pref, prefs))[2],
JuMP.Containers.SparseAxisArray)
@test isa(InfiniteOpt._make_formatted_tuple((pref, prefs))[1],
ParameterRef)
end
# _check_tuple_groups
@testset "_check_tuple_groups" begin
# prepare param tuple
tuple = InfiniteOpt._make_formatted_tuple((pref, prefs))
# test normal
@test isa(InfiniteOpt._check_tuple_groups(error, tuple), Nothing)
# prepare bad param tuple
tuple = InfiniteOpt._make_formatted_tuple(([pref; pref2], prefs))
# test for errors
@test_throws ErrorException InfiniteOpt._check_tuple_groups(error,
tuple)
@test_throws ErrorException InfiniteOpt._check_tuple_groups(error,
(pref, pref))
end
# _make_variable
@testset "_make_variable" begin
# test for each error message
@test_throws ErrorException InfiniteOpt._make_variable(error, info, Val(Infinite),
bob = 42)
@test_throws ErrorException InfiniteOpt._make_variable(error, info, :bad)
@test_throws ErrorException InfiniteOpt._make_variable(error, info, Val(Infinite))
@test_throws ErrorException InfiniteOpt._make_variable(error, info, Val(Infinite),
parameter_refs = (pref, 2))
@test_throws ErrorException InfiniteOpt._make_variable(error, info, Val(Infinite),
parameter_refs = (pref, pref))
# defined expected output
expected = InfiniteVariable(info, (pref,))
# test for expected output
@test InfiniteOpt._make_variable(error, info, Val(Infinite),
parameter_refs = pref).info == expected.info
@test InfiniteOpt._make_variable(error, info, Val(Infinite),
parameter_refs = pref).parameter_refs == expected.parameter_refs
# test various types of param tuples
@test InfiniteOpt._make_variable(error, info, Val(Infinite),
parameter_refs = (pref, pref2)).parameter_refs == (pref, pref2)
tuple = InfiniteOpt._make_formatted_tuple((pref, prefs))
@test InfiniteOpt._make_variable(error, info, Val(Infinite),
parameter_refs = (pref, prefs)).parameter_refs == tuple
tuple = InfiniteOpt._make_formatted_tuple((prefs,))
@test InfiniteOpt._make_variable(error, info, Val(Infinite),
parameter_refs = prefs).parameter_refs == tuple
end
# build_variable
@testset "JuMP.build_variable" begin
# test for each error message
@test_throws ErrorException build_variable(error, info, Infinite,
bob = 42)
@test_throws ErrorException build_variable(error, info, :bad)
@test_throws ErrorException build_variable(error, info, Point,
parameter_refs = pref)
@test_throws ErrorException build_variable(error, info, Infinite)
@test_throws ErrorException build_variable(error, info, Infinite,
parameter_refs = (pref, 2))
@test_throws ErrorException build_variable(error, info, Infinite,
parameter_refs = (pref, pref),
error = error)
# defined expected output
expected = InfiniteVariable(info, (pref,))
# test for expected output
@test build_variable(error, info, Infinite,
parameter_refs = pref).info == expected.info
@test build_variable(error, info, Infinite,
parameter_refs = pref).parameter_refs == expected.parameter_refs
# test various types of param tuples
@test build_variable(error, info, Infinite,
parameter_refs = (pref, pref2)).parameter_refs == (pref, pref2)
tuple = InfiniteOpt._make_formatted_tuple((pref, prefs))
@test build_variable(error, info, Infinite,
parameter_refs = (pref, prefs)).parameter_refs == tuple
tuple = InfiniteOpt._make_formatted_tuple((prefs,))
@test build_variable(error, info, Infinite,
parameter_refs = prefs).parameter_refs == tuple
end
# _update_param_var_mapping
@testset "_update_param_var_mapping" begin
# initialize secondary model and infinite variable
m2 = InfiniteModel()
param = InfOptParameter(IntervalSet(0, 1), Number[], false)
pref3 = add_parameter(m2, param, "test")
prefs2 = @infinite_parameter(m2, x[1:2], set = IntervalSet(0, 1))
ivref = InfiniteVariableRef(m2, 1)
ivref2 = InfiniteVariableRef(m2, 2)
# prepare tuple
tuple = (pref3, prefs2)
tuple = InfiniteOpt._make_formatted_tuple(tuple)
# test normal
@test isa(InfiniteOpt._update_param_var_mapping(ivref, tuple), Nothing)
@test m2.param_to_vars[1] == [1]
@test m2.param_to_vars[2] == [1]
@test m2.param_to_vars[3] == [1]
@test isa(InfiniteOpt._update_param_var_mapping(ivref2, tuple), Nothing)
@test m2.param_to_vars[1] == [1, 2]
@test m2.param_to_vars[2] == [1, 2]
@test m2.param_to_vars[3] == [1, 2]
end
# _check_parameters_valid
@testset "_check_parameters_valid" begin
# prepare param tuple
tuple = (pref, prefs, copy(pref2, InfiniteModel()))
tuple = InfiniteOpt._make_formatted_tuple(tuple)
# test that catches error
@test_throws ErrorException InfiniteOpt._check_parameters_valid(m, tuple)
# test normal
@test isa(InfiniteOpt._check_parameters_valid(m, (pref, pref2)), Nothing)
end
# _check_make_variable_ref
@testset "_check_make_variable_ref" begin
# prepare secondary model and parameter and variable
m2 = InfiniteModel()
param = InfOptParameter(IntervalSet(0, 1), Number[], false)
pref3 = add_parameter(m2, param, "test")
v = build_variable(error, info, Infinite,
parameter_refs = pref3)
# test for error of invalid variable
@test_throws ErrorException InfiniteOpt._check_make_variable_ref(m, v)
# prepare normal variable
v = build_variable(error, info, Infinite, parameter_refs = pref)
# test normal
@test InfiniteOpt._check_make_variable_ref(m, v) == InfiniteVariableRef(m, 0)
@test m.param_to_vars[1] == [0]
delete!(m.param_to_vars, 1)
# test with other variable object
@test_throws ErrorException InfiniteOpt._check_make_variable_ref(m, :bad)
end
# add_variable
@testset "JuMP.add_variable" begin
# prepare secondary model and parameter and variable
m2 = InfiniteModel()
param = InfOptParameter(IntervalSet(0, 1), Number[], false)
pref3 = add_parameter(m2, param, "test")
v = build_variable(error, info, Infinite,
parameter_refs = pref3)
# test for error of invalid variable
@test_throws ErrorException add_variable(m, v)
# prepare normal variable
v = build_variable(error, info, Infinite, parameter_refs = pref)
# test normal
@test add_variable(m, v, "name") == InfiniteVariableRef(m, 2)
@test haskey(m.vars, 2)
@test m.param_to_vars[1] == [2]
@test m.var_to_name[2] == "name(test)"
# prepare infinite variable with all the possible info additions
v = build_variable(error, info2, Infinite, parameter_refs = pref)
# test info addition functions
vref = InfiniteVariableRef(m, 3)
@test add_variable(m, v, "name") == vref
@test !optimizer_model_ready(m)
# lower bound
@test has_lower_bound(vref)
@test JuMP._lower_bound_index(vref) == 1
@test isa(m.constrs[1], ScalarConstraint{InfiniteVariableRef,
MOI.GreaterThan{Float64}})
@test m.constr_in_var_info[1]
# upper bound
@test has_upper_bound(vref)
@test JuMP._upper_bound_index(vref) == 2
@test isa(m.constrs[2], ScalarConstraint{InfiniteVariableRef,
MOI.LessThan{Float64}})
@test m.constr_in_var_info[2]
# fix
@test is_fixed(vref)
@test JuMP._fix_index(vref) == 3
@test isa(m.constrs[3], ScalarConstraint{InfiniteVariableRef,
MOI.EqualTo{Float64}})
@test m.constr_in_var_info[3]
# binary
@test is_binary(vref)
@test JuMP._binary_index(vref) == 4
@test isa(m.constrs[4], ScalarConstraint{InfiniteVariableRef,
MOI.ZeroOne})
@test m.constr_in_var_info[4]
@test m.var_to_constrs[3] == [1, 2, 3, 4]
# prepare infinite variable with integer info addition
v = build_variable(error, info3, Infinite, parameter_refs = pref)
# test integer addition functions
vref = InfiniteVariableRef(m, 4)
@test add_variable(m, v, "name") == vref
@test !optimizer_model_ready(m)
@test is_integer(vref)
@test JuMP._integer_index(vref) == 8
@test isa(m.constrs[8], ScalarConstraint{InfiniteVariableRef,
MOI.Integer})
@test m.constr_in_var_info[8]
@test m.var_to_constrs[4] == [5, 6, 7, 8]
end
end
# Test name methods
@testset "Point Variable Name" begin
# initialize model and point variable
m = InfiniteModel()
info = VariableInfo(false, 0, false, 0, false, 0, false, 0, false, false)
param = InfOptParameter(IntervalSet(0, 1), Number[], false)
pref = add_parameter(m, param, "test1")
pref2 = add_parameter(m, param, "test2")
ivar = InfiniteVariable(info, (pref, pref2))
ivref = add_variable(m, ivar, "ivar")
var = PointVariable(info, ivref, (0.5, 0.5))
m.vars[2] = var
m.var_to_name[2] = "var"
vref = PointVariableRef(m, 2)
# JuMP.name
@testset "JuMP.name" begin
@test name(vref) == "var"
end
# infinite_variable_ref
@testset "infinite_variable_ref" begin
@test infinite_variable_ref(vref) == ivref
end
# _make_str_value (Number)
@testset "_make_str_value (Number)" begin
@test InfiniteOpt._make_str_value(1.0) == "1"
end
# _make_str_value (Array)
@testset "_make_str_value (Array)" begin
# test short array
values = convert(JuMPC.SparseAxisArray, [1., 2., 3.])
@test InfiniteOpt._make_str_value(values) == "[1, 2, 3]"
# test long array
values = convert(JuMPC.SparseAxisArray, [1., 2., 3., 4., 5., 6.])
@test InfiniteOpt._make_str_value(values) == "[1, ..., 6]"
end
# JuMP.set_name
@testset "JuMP.set_name" begin
# prepare a secondary point variable
vref2 = PointVariableRef(m, 3)
m.vars[3] = var
# test normal
@test isa(set_name(vref, "new"), Nothing)
@test name(vref) == "new"
# test default
@test isa(set_name(vref2, ""), Nothing)
@test name(vref2) == "ivar(0.5, 0.5)"
# test other default
m.next_var_index = 3
ivref2 = add_variable(m, InfiniteVariable(info, (pref,)), "ivar2")
m.vars[5] = PointVariable(info, ivref2, (0.5,))
m.var_to_name[5] = "var42"
vref3 = PointVariableRef(m, 5)
@test isa(set_name(vref3, ""), Nothing)
@test name(vref3) == "ivar2(0.5)"
end
# _make_variable_ref
@testset "_make_variable_ref" begin
@test InfiniteOpt._make_variable_ref(m, 2) == vref
end
# parameter_by_name
@testset "JuMP.variable_by_name" begin
# test normal
@test variable_by_name(m, "new") == vref
@test isa(variable_by_name(m, "test"), Nothing)
# make variable with duplicate name
m.vars[3] = var
m.var_to_name[3] = "new"
m.name_to_var = nothing
# test for multiple name error
@test_throws ErrorException variable_by_name(m, "new")
end
end
# Test variable definition methods
@testset "Point Variable Definition" begin
# initialize model and infinite variables
m = InfiniteModel()
param = InfOptParameter(IntervalSet(0, 1), Number[], false)
pref = add_parameter(m, param, "test")
pref2 = add_parameter(m, param, "ΞΈ")
prefs = @infinite_parameter(m, x[1:2], set = IntervalSet(0, 1))
info = VariableInfo(false, 0., false, 0., false, 0., false, NaN, false, false)
info2 = VariableInfo(true, 0, true, 0, true, 0, true, 0, true, false)
info3 = VariableInfo(true, 0, true, 0, true, 0, true, 0, false, true)
ivar = InfiniteVariable(info, (pref, pref2))
ivref = add_variable(m, ivar, "ivar")
ivar2 = build_variable(error, info, Infinite, parameter_refs = (pref, prefs))
ivref2 = add_variable(m, ivar2, "ivar2")
# _check_tuple_shape
@testset "_check_tuple_shape" begin
# test normal
@test isa(InfiniteOpt._check_tuple_shape(error, ivref, (0.5, 0.5)),
Nothing)
# prepare param value tuple
tuple = InfiniteOpt._make_formatted_tuple((0.5, [0.5, 0.5]))
# test normal with array
@test isa(InfiniteOpt._check_tuple_shape(error, ivref2, tuple), Nothing)
# test for errors in shape
@test_throws ErrorException InfiniteOpt._check_tuple_shape(error, ivref,
(0.5,))
@test_throws ErrorException InfiniteOpt._check_tuple_shape(error, ivref,
(0.5, [0.5]))
@test_throws ErrorException InfiniteOpt._check_tuple_shape(error, ivref2,
(0.5, 0.5))
tuple = InfiniteOpt._make_formatted_tuple((0.5, [0.5, 0.5, 0.5]))
@test_throws ErrorException InfiniteOpt._check_tuple_shape(error, ivref2,
tuple)
end
# _check_tuple_values
@testset "_check_tuple_values" begin
# test normal
@test isa(InfiniteOpt._check_tuple_values(error, ivref, (0.5, 0.5)),
Nothing)
# prepare array test
tuple = InfiniteOpt._make_formatted_tuple((0, [0.5, 1]))
# test normal with array
@test isa(InfiniteOpt._check_tuple_values(error, ivref2, tuple), Nothing)
# test for out of bound errors
@test_throws ErrorException InfiniteOpt._check_tuple_values(error, ivref,
(0, 2))
tuple = InfiniteOpt._make_formatted_tuple((0, [2, 1]))
@test_throws ErrorException InfiniteOpt._check_tuple_values(error, ivref2,
tuple)
end
# _update_point_info
@testset "_update_point_info" begin
# prepare info for test
new_info = VariableInfo(true, 0., true, 0., false, 0., true, 0., true,
false)
InfiniteOpt._update_variable_info(ivref, new_info)
# test with current info
@test InfiniteOpt._update_point_info(info, ivref) == new_info
# prepare info for test
new_info = VariableInfo(false, 0., false, 0., true, 0., true, 0., false,
true)
InfiniteOpt._update_variable_info(ivref, new_info)
# test with current info
@test InfiniteOpt._update_point_info(info, ivref) == new_info
# prepare info for test
curr_info = VariableInfo(true, 0., true, 0., false, 0., true, 0., true,
false)
# test with current info
@test InfiniteOpt._update_point_info(curr_info, ivref) == curr_info
# undo info changes
InfiniteOpt._update_variable_info(ivref, info)
end
# test _make_variable
@testset "_make_variable" begin
# test for all errors
@test_throws ErrorException InfiniteOpt._make_variable(error, info,
Val(Point), parameter_refs = pref)
@test_throws ErrorException InfiniteOpt._make_variable(error, info,
Val(Point))
@test_throws ErrorException InfiniteOpt._make_variable(error, info,
Val(Point), infinite_variable_ref = ivref)
@test_throws ErrorException InfiniteOpt._make_variable(error, info,
Val(Point), parameter_values = 3)
# test a variety of builds
@test InfiniteOpt._make_variable(error, info, Val(Point), infinite_variable_ref = ivref,
parameter_values = (0.5, 0.5)).infinite_variable_ref == ivref
@test InfiniteOpt._make_variable(error, info, Val(Point), infinite_variable_ref = ivref,
parameter_values = (0.5, 0.5)).parameter_values == (0.5, 0.5)
@test InfiniteOpt._make_variable(error, info, Val(Point), infinite_variable_ref = ivref,
parameter_values = (0.5, 0.5)).info == info
@test_throws ErrorException InfiniteOpt._make_variable(error, info, Val(Point),
infinite_variable_ref = ivref,
parameter_values = (0.5, 2))
@test InfiniteOpt._make_variable(error, info, Val(Point), infinite_variable_ref = ivref2,
parameter_values = (0.5, [0, 0])).infinite_variable_ref == ivref2
tuple = InfiniteOpt._make_formatted_tuple((0.5, [0, 0]))
@test InfiniteOpt._make_variable(error, info, Val(Point), infinite_variable_ref = ivref2,
parameter_values = (0.5, [0, 0])).parameter_values == tuple
@test_throws ErrorException InfiniteOpt._make_variable(error, info, Val(Point),
infinite_variable_ref = ivref2,
parameter_values = (0.5, [0, 0, 0]))
end
# build_variable
@testset "JuMP.build_variable" begin
# test for all errors
@test_throws ErrorException build_variable(error, info, Infinite,
infinite_variable_ref = ivref)
@test_throws ErrorException build_variable(error, info, Infinite,
parameter_values = 3)
@test_throws ErrorException build_variable(error, info, Point)
@test_throws ErrorException build_variable(error, info, Point,
infinite_variable_ref = ivref)
@test_throws ErrorException build_variable(error, info, Point,
parameter_values = 3)
# test a variety of builds
@test build_variable(error, info, Point, infinite_variable_ref = ivref,
parameter_values = (0.5, 0.5)).infinite_variable_ref == ivref
@test build_variable(error, info, Point, infinite_variable_ref = ivref,
parameter_values = (0.5, 0.5)).parameter_values == (0.5, 0.5)
@test build_variable(error, info, Point, infinite_variable_ref = ivref,
parameter_values = (0.5, 0.5)).info == info
@test_throws ErrorException build_variable(error, info, Point,
infinite_variable_ref = ivref,
parameter_values = (0.5, 2))
@test build_variable(error, info, Point, infinite_variable_ref = ivref2,
parameter_values = (0.5, [0, 0])).infinite_variable_ref == ivref2
tuple = InfiniteOpt._make_formatted_tuple((0.5, [0, 0]))
@test build_variable(error, info, Point, infinite_variable_ref = ivref2,
parameter_values = (0.5, [0, 0])).parameter_values == tuple
@test_throws ErrorException build_variable(error, info, Point,
infinite_variable_ref = ivref2,
parameter_values = (0.5, [0, 0, 0]))
end
# _update_param_supports
@testset "_update_param_supports" begin
# test normal
@test isa(InfiniteOpt._update_param_supports(ivref, (0.5, 1)), Nothing)
@test supports(pref) == [0.5]
@test supports(pref2) == [1]
# prepare array tuple
tuple = InfiniteOpt._make_formatted_tuple((0.5, [0, 1]))
# test normal with array
@test isa(InfiniteOpt._update_param_supports(ivref2, tuple), Nothing)
@test supports(pref) == [0.5]
@test supports(prefs[1]) == [0]
@test supports(prefs[2]) == [1]
end
# _update_infinite_point_mapping
@testset "_update_infinite_point_mapping" begin
# test first addition
pvref = PointVariableRef(m, 12)
@test isa(InfiniteOpt._update_infinite_point_mapping(pvref, ivref),
Nothing)
@test m.infinite_to_points[JuMP.index(ivref)] == [12]
# test second addition
pvref = PointVariableRef(m, 42)
@test isa(InfiniteOpt._update_infinite_point_mapping(pvref, ivref),
Nothing)
@test m.infinite_to_points[JuMP.index(ivref)] == [12, 42]
# undo changes
delete!( m.infinite_to_points, JuMP.index(ivref))
end
# _check_make_variable_ref
@testset "_check_make_variable_ref" begin
# prepare secondary model and infinite variable
m2 = InfiniteModel()
pref3 = add_parameter(m2, param, "test")
ivar3 = InfiniteVariable(info, (pref3,))
ivref3 = add_variable(m2, ivar3, "ivar")
v = build_variable(error, info, Point, infinite_variable_ref = ivref3,
parameter_values = 0.5)
# test for invalid variable error
@test_throws ErrorException InfiniteOpt._check_make_variable_ref(m, v)
# test normal
v = build_variable(error, info, Point, infinite_variable_ref = ivref,
parameter_values = (0, 1))
@test InfiniteOpt._check_make_variable_ref(m, v) == PointVariableRef(m, 2)
@test supports(pref) == [0.5, 0]
@test supports(pref2) == [1]
@test m.infinite_to_points[JuMP.index(ivref)] == [2]
delete!(m.infinite_to_points, JuMP.index(ivref))
end
# add_variable
@testset "JuMP.add_variable" begin
# prepare secondary model and infinite variable
m2 = InfiniteModel()
pref3 = add_parameter(m2, param, "test")
ivar3 = InfiniteVariable(info, (pref3,))
ivref3 = add_variable(m2, ivar3, "ivar")
v = build_variable(error, info, Point, infinite_variable_ref = ivref3,
parameter_values = 0.5)
# test for invalid variable error
@test_throws ErrorException add_variable(m, v)
# test normal
v = build_variable(error, info, Point, infinite_variable_ref = ivref,
parameter_values = (0, 1))
@test add_variable(m, v, "name") == PointVariableRef(m, 4)
@test haskey(m.vars, 4)
@test supports(pref) == [0.5, 0]
@test supports(pref2) == [1]
@test m.var_to_name[4] == "name"
@test m.infinite_to_points[JuMP.index(ivref)] == [4]
# prepare infinite variable with all the possible info additions
v = build_variable(error, info2, Point, infinite_variable_ref = ivref,
parameter_values = (0, 1))
# test info addition functions
vref = PointVariableRef(m, 5)
@test add_variable(m, v, "name") == vref
@test !optimizer_model_ready(m)
# lower bound
@test has_lower_bound(vref)
@test JuMP._lower_bound_index(vref) == 1
@test isa(m.constrs[1], ScalarConstraint{PointVariableRef,
MOI.GreaterThan{Float64}})
@test m.constr_in_var_info[1]
# upper bound
@test has_upper_bound(vref)
@test JuMP._upper_bound_index(vref) == 2
@test isa(m.constrs[2], ScalarConstraint{PointVariableRef,
MOI.LessThan{Float64}})
@test m.constr_in_var_info[2]
# fix
@test is_fixed(vref)
@test JuMP._fix_index(vref) == 3
@test isa(m.constrs[3], ScalarConstraint{PointVariableRef,
MOI.EqualTo{Float64}})
@test m.constr_in_var_info[3]
# binary
@test is_binary(vref)
@test JuMP._binary_index(vref) == 4
@test isa(m.constrs[4], ScalarConstraint{PointVariableRef,
MOI.ZeroOne})
@test m.constr_in_var_info[4]
@test m.var_to_constrs[5] == [1, 2, 3, 4]
# prepare infinite variable with integer info addition
v = build_variable(error, info3, Point, infinite_variable_ref = ivref,
parameter_values = (0, 1))
# test integer addition functions
vref = PointVariableRef(m, 6)
@test add_variable(m, v, "name") == vref
@test !optimizer_model_ready(m)
@test is_integer(vref)
@test JuMP._integer_index(vref) == 8
@test isa(m.constrs[8], ScalarConstraint{PointVariableRef,
MOI.Integer})
@test m.constr_in_var_info[8]
@test m.var_to_constrs[6] == [5, 6, 7, 8]
end
end
# Test name methods
@testset "Hold Variable Name" begin
# initialize model and variable
m = InfiniteModel()
info = VariableInfo(false, 0, false, 0, false, 0, false, 0, false, false)
bounds = ParameterBounds()
var = HoldVariable(info, bounds)
m.vars[1] = var
m.var_to_name[1] = "test"
vref = HoldVariableRef(m, 1)
# JuMP.name
@testset "JuMP.name" begin
@test name(vref) == "test"
end
# JuMP.set_name
@testset "JuMP.set_name" begin
@test isa(set_name(vref, "new"), Nothing)
@test name(vref) == "new"
end
# _make_variable_ref
@testset "_make_variable_ref" begin
@test InfiniteOpt._make_variable_ref(m, 1) == vref
end
# parameter_by_name
@testset "JuMP.variable_by_name" begin
# test normal
@test variable_by_name(m, "new") == vref
@test isa(variable_by_name(m, "test2"), Nothing)
# prepare variable with duplicate name
m.vars[2] = var
m.var_to_name[2] = "new"
m.name_to_var = nothing
# test for duplciate name error
@test_throws ErrorException variable_by_name(m, "new")
end
end
# Test variable definition methods
@testset "Hold Variable Definition" begin
# initialize model and info
m = InfiniteModel()
info = VariableInfo(false, 0, false, 0, false, 0, false, 0, false, false)
info2 = VariableInfo(true, 0, true, 0, true, 0, true, 0, true, false)
info3 = VariableInfo(true, 0, true, 0, true, 0, true, 0, false, true)
bounds = ParameterBounds()
@infinite_parameter(m, 0 <= par <= 10)
@infinite_parameter(m, 0 <= pars[1:2] <= 10)
# test _check_bounds
@testset "_check_bounds" begin
# test normal
@test isa(InfiniteOpt._check_bounds(ParameterBounds(Dict(par => IntervalSet(0, 1)))),
Nothing)
# test errors
@test_throws ErrorException InfiniteOpt._check_bounds(
ParameterBounds(Dict(par => IntervalSet(-1, 1))))
@test_throws ErrorException InfiniteOpt._check_bounds(
ParameterBounds(Dict(par => IntervalSet(0, 11))))
end
# _make_variable
@testset "_make_variable" begin
# test normal
expected = HoldVariable(info, bounds)
@test InfiniteOpt._make_variable(error, info, Val(Hold)).info == expected.info
# test errors
@test_throws ErrorException InfiniteOpt._make_variable(error, info,
Val(Hold), parameter_values = 3)
end
# build_variable
@testset "JuMP.build_variable" begin
# test normal
expected = HoldVariable(info, bounds)
@test build_variable(error, info, Hold).info == expected.info
# test errors
@test_throws ErrorException build_variable(error, info, Point,
parameter_bounds = bounds)
end
# _validate_bounds
@testset "_validate_bounds" begin
# test normal
@test isa(InfiniteOpt._validate_bounds(m,
ParameterBounds(Dict(par => IntervalSet(0, 1)))), Nothing)
# test error
par2 = ParameterRef(InfiniteModel(), 1)
@test_throws ErrorException InfiniteOpt._validate_bounds(m,
ParameterBounds(Dict(par2 => IntervalSet(0, 1))))
# test support addition
@test isa(InfiniteOpt._validate_bounds(m,
ParameterBounds(Dict(par => IntervalSet(0, 0)))), Nothing)
@test supports(par) == [0]
end
# _check_make_variable_ref
@testset "_check_make_variable_ref" begin
# test normal
v = build_variable(error, info, Hold)
@test InfiniteOpt._check_make_variable_ref(m, v) == HoldVariableRef(m, 0)
# test with bounds
bounds = ParameterBounds(Dict(par => IntervalSet(0, 2)))
v = build_variable(error, info, Hold, parameter_bounds = bounds)
@test InfiniteOpt._check_make_variable_ref(m, v) == HoldVariableRef(m, 0)
@test m.has_hold_bounds
m.has_hold_bounds = false
# test bad bounds
@infinite_parameter(InfiniteModel(), par2 in [0, 2])
v = build_variable(error, info, Hold,
parameter_bounds = ParameterBounds(Dict(par2 => IntervalSet(0, 1))))
@test_throws ErrorException InfiniteOpt._check_make_variable_ref(m, v)
end
# add_variable
@testset "JuMP.add_variable" begin
v = build_variable(error, info, Hold)
@test add_variable(m, v, "name") == HoldVariableRef(m, 1)
@test haskey(m.vars, 1)
@test m.var_to_name[1] == "name"
# prepare infinite variable with all the possible info additions
v = build_variable(error, info2, Hold)
# test info addition functions
vref = HoldVariableRef(m, 2)
@test add_variable(m, v, "name") == vref
@test !optimizer_model_ready(m)
# lower bound
@test has_lower_bound(vref)
@test JuMP._lower_bound_index(vref) == 1
@test isa(m.constrs[1], ScalarConstraint{HoldVariableRef,
MOI.GreaterThan{Float64}})
@test m.constr_in_var_info[1]
# upper bound
@test has_upper_bound(vref)
@test JuMP._upper_bound_index(vref) == 2
@test isa(m.constrs[2], ScalarConstraint{HoldVariableRef,
MOI.LessThan{Float64}})
@test m.constr_in_var_info[2]
# fix
@test is_fixed(vref)
@test JuMP._fix_index(vref) == 3
@test isa(m.constrs[3], ScalarConstraint{HoldVariableRef,
MOI.EqualTo{Float64}})
@test m.constr_in_var_info[3]
# binary
@test is_binary(vref)
@test JuMP._binary_index(vref) == 4
@test isa(m.constrs[4], ScalarConstraint{HoldVariableRef,
MOI.ZeroOne})
@test m.constr_in_var_info[4]
@test m.var_to_constrs[2] == [1, 2, 3, 4]
# prepare infinite variable with integer info addition
v = build_variable(error, info3, Hold)
# test integer addition functions
vref = HoldVariableRef(m, 3)
@test add_variable(m, v, "name") == vref
@test !optimizer_model_ready(m)
@test is_integer(vref)
@test JuMP._integer_index(vref) == 8
@test isa(m.constrs[8], ScalarConstraint{HoldVariableRef,
MOI.Integer})
@test m.constr_in_var_info[8]
@test m.var_to_constrs[3] == [5, 6, 7, 8]
end
end
|
(*
* Copyright 2023, Proofcraft Pty Ltd
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: GPL-2.0-only
*)
theory Example_Valid_State
imports
"ArchNoninterference"
"Lib.Distinct_Cmd"
begin
section \<open>Example\<close>
(* This example is a classic 'one way information flow'
example, where information is allowed to flow from Low to High,
but not the reverse. We consider a typical scenario where
shared memory and an notification for notifications are used to
implement a ring-buffer. We consider the NTFN to be in the domain of High,
and the shared memory to be in the domain of Low. *)
(* basic machine-level declarations that need to happen outside the locale *)
consts s0_context :: user_context
(* define the irqs to come regularly every 10 *)
axiomatization where
irq_oracle_def: "ARM.irq_oracle \<equiv> \<lambda>pos. if pos mod 10 = 0 then 10 else 0"
context begin interpretation Arch . (*FIXME: arch_split*)
subsection \<open>We show that the authority graph does not let
information flow from High to Low\<close>
datatype auth_graph_label = High | Low | IRQ0
abbreviation partition_label where
"partition_label x \<equiv> OrdinaryLabel x"
definition Sys1AuthGraph :: "(auth_graph_label subject_label) auth_graph" where
"Sys1AuthGraph \<equiv>
{ (partition_label High,Read,partition_label Low),
(partition_label Low,Notify,partition_label High),
(partition_label Low,Reset,partition_label High),
(SilcLabel,Notify,partition_label High),
(SilcLabel,Reset,partition_label High)
} \<union> {(x, a, y). x = y}"
lemma subjectReads_Low: "subjectReads Sys1AuthGraph (partition_label Low) = {partition_label Low}"
apply(rule equalityI)
apply(rule subsetI)
apply(erule subjectReads.induct, (fastforce simp: Sys1AuthGraph_def)+)
done
lemma Low_in_subjectReads_High:
"partition_label Low \<in> subjectReads Sys1AuthGraph (partition_label High)"
apply (simp add: Sys1AuthGraph_def reads_read)
done
lemma subjectReads_High: "subjectReads Sys1AuthGraph (partition_label High) = {partition_label High,partition_label Low}"
apply(rule equalityI)
apply(rule subsetI)
apply(erule subjectReads.induct, (fastforce simp: Sys1AuthGraph_def)+)
apply(auto intro: Low_in_subjectReads_High)
done
lemma subjectReads_IRQ0: "subjectReads Sys1AuthGraph (partition_label IRQ0) = {partition_label IRQ0}"
apply(rule equalityI)
apply(rule subsetI)
apply(erule subjectReads.induct, (fastforce simp: Sys1AuthGraph_def)+)
done
lemma High_in_subjectAffects_Low:
"partition_label High \<in> subjectAffects Sys1AuthGraph (partition_label Low)"
apply(rule affects_ep)
apply (simp add: Sys1AuthGraph_def)
apply (rule disjI1, simp+)
done
lemma subjectAffects_Low: "subjectAffects Sys1AuthGraph (partition_label Low) = {partition_label Low, partition_label High}"
apply(rule equalityI)
apply(rule subsetI)
apply(erule subjectAffects.induct, (fastforce simp: Sys1AuthGraph_def)+)
apply(auto intro: affects_lrefl High_in_subjectAffects_Low)
done
lemma subjectAffects_High: "subjectAffects Sys1AuthGraph (partition_label High) = {partition_label High}"
apply(rule equalityI)
apply(rule subsetI)
apply(erule subjectAffects.induct, (fastforce simp: Sys1AuthGraph_def)+)
apply(auto intro: affects_lrefl)
done
lemma subjectAffects_IRQ0: "subjectAffects Sys1AuthGraph (partition_label IRQ0) = {partition_label IRQ0}"
apply(rule equalityI)
apply(rule subsetI)
apply(erule subjectAffects.induct, (fastforce simp: Sys1AuthGraph_def)+)
apply(auto intro: affects_lrefl)
done
lemmas subjectReads = subjectReads_High subjectReads_Low subjectReads_IRQ0
lemma partsSubjectAffects_Low: "partsSubjectAffects Sys1AuthGraph Low = {Partition Low, Partition High}"
apply(auto simp: partsSubjectAffects_def image_def label_can_affect_partition_def subjectReads subjectAffects_Low | case_tac xa, rename_tac xa)+
done
lemma partsSubjectAffects_High: "partsSubjectAffects Sys1AuthGraph High = {Partition High}"
apply(auto simp: partsSubjectAffects_def image_def label_can_affect_partition_def subjectReads subjectAffects_High | rename_tac xa, case_tac xa)+
done
lemma partsSubjectAffects_IRQ0: "partsSubjectAffects Sys1AuthGraph IRQ0 = {Partition IRQ0}"
apply(auto simp: partsSubjectAffects_def image_def label_can_affect_partition_def subjectReads subjectAffects_IRQ0 | rename_tac xa, case_tac xa)+
done
lemmas partsSubjectAffects =
partsSubjectAffects_High partsSubjectAffects_Low partsSubjectAffects_IRQ0
definition example_policy where
"example_policy \<equiv> {(PSched, d)|d. True} \<union>
{(d,e). d = e} \<union>
{(Partition Low, Partition High)}"
lemma "policyFlows Sys1AuthGraph = example_policy"
apply(rule equalityI)
apply(rule subsetI)
apply(clarsimp simp: example_policy_def)
apply(erule policyFlows.cases)
apply(case_tac l, auto simp: partsSubjectAffects)[1]
apply assumption
apply(rule subsetI)
apply(clarsimp simp: example_policy_def)
apply(elim disjE)
apply(fastforce simp: partsSubjectAffects intro: policy_affects)
apply(fastforce intro: policy_scheduler)
apply(fastforce intro: policyFlows_refl refl_onD)
done
subsection \<open>We show there exists a valid initial state associated to the
above authority graph\<close>
text \<open>
This example (modified from ../access-control/ExampleSystem) is a system Sys1 made
of 2 main components Low and High, connected through an notification NTFN.
Both Low and High contains:
. one TCB
. one vspace made up of one page directory
. each pd contains a single page table, with access to a shared page in memory
Low can read/write to this page, High can only read
. one cspace made up of one cnode
. each cspace contains 4 caps:
one to the tcb
one to the cnode itself
one to the vspace
one to the ntfn
Low can send to the ntfn while High can receive from it.
Attempt to ASCII art:
-------- ---- ---- --------
| | | | | | | |
V | | V S R | V | V
Low_tcb(3079)-->Low_cnode(6)--->ntfn(9)<---High_cnode(7)<--High_tcb(3080)
| | | |
V | | V
Low_pd(3063)<----- -------> High_pd(3065)
| |
V R/W R V
Low_pt(3072)---------------->shared_page<-----------------High_pt(3077)
(the references are derived from the dump of the SAC system)
The aim is to be able to prove
valid_initial_state s0_internal Sys1PAS timer_irq utf
where Sys1PAS is the label graph defining the AC policy for Sys1 using
the authority graph defined above and s0 is the state of Sys1 described above.
\<close>
subsubsection \<open>Defining the State\<close>
definition "ntfn_ptr \<equiv> kernel_base + 0x10"
definition "Low_tcb_ptr \<equiv> kernel_base + 0x200"
definition "High_tcb_ptr = kernel_base + 0x400"
definition "idle_tcb_ptr = kernel_base + 0x1000"
definition "Low_pt_ptr = kernel_base + 0x800"
definition "High_pt_ptr = kernel_base + 0xC00"
(* init_globals_frame \<equiv> {kernel_base + 0x5000,... kernel_base + 0x5FFF} *)
definition "shared_page_ptr_virt = kernel_base + 0x6000"
definition "shared_page_ptr_phys = addrFromPPtr shared_page_ptr_virt"
definition "Low_pd_ptr = kernel_base + 0x20000"
definition "High_pd_ptr = kernel_base + 0x24000"
definition "Low_cnode_ptr = kernel_base + 0x10000"
definition "High_cnode_ptr = kernel_base + 0x14000"
definition "Silc_cnode_ptr = kernel_base + 0x18000"
definition "irq_cnode_ptr = kernel_base + 0x1C000"
(* init_global_pd \<equiv> {kernel_base + 0x60000,... kernel_base + 0x603555} *)
definition "timer_irq \<equiv> 10" (* not sure exactly how this fits in *)
definition "Low_mcp \<equiv> 5 :: word8"
definition "Low_prio \<equiv> 5 :: word8"
definition "High_mcp \<equiv> 5 :: word8"
definition "High_prio \<equiv> 5 :: word8"
definition "Low_time_slice \<equiv> 0 :: nat"
definition "High_time_slice \<equiv> 5 :: nat"
definition "Low_domain \<equiv> 0 :: word8"
definition "High_domain \<equiv> 1 :: word8"
lemmas s0_ptr_defs =
Low_cnode_ptr_def High_cnode_ptr_def Silc_cnode_ptr_def ntfn_ptr_def irq_cnode_ptr_def
Low_pd_ptr_def High_pd_ptr_def Low_pt_ptr_def High_pt_ptr_def Low_tcb_ptr_def
High_tcb_ptr_def idle_tcb_ptr_def timer_irq_def Low_prio_def High_prio_def Low_time_slice_def
Low_domain_def High_domain_def init_irq_node_ptr_def init_globals_frame_def init_global_pd_def
kernel_base_def shared_page_ptr_virt_def
(* Distinctness proof of kernel pointers. *)
distinct ptrs_distinct [simp]:
Low_tcb_ptr High_tcb_ptr idle_tcb_ptr
Low_pt_ptr High_pt_ptr
shared_page_ptr_virt ntfn_ptr
Low_pd_ptr High_pd_ptr
Low_cnode_ptr High_cnode_ptr Silc_cnode_ptr irq_cnode_ptr
init_globals_frame init_global_pd
by (auto simp: s0_ptr_defs)
text \<open>We need to define the asids of each pd and pt to ensure that
the object is included in the right ASID-label\<close>
text \<open>Low's ASID\<close>
definition
Low_asid :: machine_word
where
"Low_asid \<equiv> 1<<asid_low_bits"
text \<open>High's ASID\<close>
definition
High_asid :: machine_word
where
"High_asid \<equiv> 2<<asid_low_bits"
lemma "asid_high_bits_of High_asid \<noteq> asid_high_bits_of Low_asid"
by (simp add: Low_asid_def asid_high_bits_of_def High_asid_def asid_low_bits_def)
text \<open>converting a nat to a bool list of size 10 - for the cnodes\<close>
definition
nat_to_bl :: "nat \<Rightarrow> nat \<Rightarrow> bool list option"
where
"nat_to_bl bits n \<equiv>
if n \<ge> 2^bits then
None
else
Some $ bin_to_bl bits (of_nat n)"
lemma nat_to_bl_id [simp]: "nat_to_bl (size (x :: (('a::len) word))) (unat x) = Some (to_bl x)"
by (clarsimp simp: nat_to_bl_def to_bl_def le_def word_size)
definition
the_nat_to_bl :: "nat \<Rightarrow> nat \<Rightarrow> bool list"
where
"the_nat_to_bl sz n \<equiv>
the (nat_to_bl sz (n mod 2^sz))"
abbreviation (input)
the_nat_to_bl_10 :: "nat \<Rightarrow> bool list"
where
"the_nat_to_bl_10 n \<equiv> the_nat_to_bl 10 n"
lemma len_the_nat_to_bl [simp]:
"length (the_nat_to_bl x y) = x"
apply (clarsimp simp: the_nat_to_bl_def nat_to_bl_def)
apply safe
apply (metis le_def mod_less_divisor nat_zero_less_power_iff zero_less_numeral)
apply (clarsimp simp: len_bin_to_bl_aux not_le)
done
lemma tcb_cnode_index_nat_to_bl [simp]:
"the_nat_to_bl_10 n \<noteq> tcb_cnode_index n"
by (clarsimp simp: tcb_cnode_index_def intro!: length_neq)
lemma mod_less_self [simp]:
"a \<le> b mod a \<longleftrightarrow> ((a :: nat) = 0)"
by (metis mod_less_divisor nat_neq_iff not_less not_less0)
lemma split_div_mod:
"a = (b::nat) \<longleftrightarrow> (a div k = b div k \<and> a mod k = b mod k)"
by (metis mult_div_mod_eq)
lemma nat_to_bl_eq:
assumes "a < 2 ^ n \<or> b < 2 ^ n"
shows "nat_to_bl n a = nat_to_bl n b \<longleftrightarrow> a = b"
using assms
apply -
apply (erule disjE_R)
apply (clarsimp simp: nat_to_bl_def)
apply (case_tac "a \<ge> 2 ^ n")
apply (clarsimp simp: nat_to_bl_def)
apply (clarsimp simp: not_le)
apply (induct n arbitrary: a b)
apply (clarsimp simp: nat_to_bl_def)
apply atomize
apply (clarsimp simp: nat_to_bl_def)
apply (erule_tac x="a div 2" in allE)
apply (erule_tac x="b div 2" in allE)
apply (erule impE)
apply (metis power_commutes td_gal_lt zero_less_numeral)
apply (clarsimp simp: bin_last_def zdiv_int)
apply (rule iffI [rotated], clarsimp)
apply (subst (asm) (1 2 3 4) bin_to_bl_aux_alt)
apply (clarsimp simp: mod_eq_dvd_iff)
apply (subst split_div_mod [where k=2])
apply clarsimp
apply presburger
done
lemma nat_to_bl_mod_n_eq [simp]:
"nat_to_bl n a = nat_to_bl n b \<longleftrightarrow> ((a = b \<and> a < 2 ^ n) \<or> (a \<ge> 2 ^ n \<and> b \<ge> 2 ^ n))"
apply (rule iffI)
apply (clarsimp simp: not_le)
apply (subst (asm) nat_to_bl_eq, simp)
apply clarsimp
apply (erule disjE)
apply clarsimp
apply (clarsimp simp: nat_to_bl_def)
done
lemma the_the_eq:
"\<lbrakk> x \<noteq> None; y \<noteq> None \<rbrakk> \<Longrightarrow> (the x = the y) = (x = y)"
by auto
lemma the_nat_to_bl_eq [simp]:
"(the_nat_to_bl n a = the_nat_to_bl m b) \<longleftrightarrow> (n = m \<and> (a mod 2 ^ n = b mod 2 ^ n))"
apply (case_tac "n = m")
apply (clarsimp simp: the_nat_to_bl_def)
apply (subst the_the_eq)
apply (clarsimp simp: nat_to_bl_def)
apply (clarsimp simp: nat_to_bl_def)
apply simp
apply simp
apply (metis len_the_nat_to_bl)
done
lemma empty_cnode_eq_Some [simp]:
"(empty_cnode n x = Some y) = (length x = n \<and> y = NullCap)"
by (clarsimp simp: empty_cnode_def, metis)
lemma empty_cnode_eq_None [simp]:
"(empty_cnode n x = None) = (length x \<noteq> n)"
by (clarsimp simp: empty_cnode_def)
text \<open>Low's CSpace\<close>
definition
Low_caps :: cnode_contents
where
"Low_caps \<equiv>
(empty_cnode 10)
( (the_nat_to_bl_10 1)
\<mapsto> ThreadCap Low_tcb_ptr,
(the_nat_to_bl_10 2)
\<mapsto> CNodeCap Low_cnode_ptr 10 (the_nat_to_bl_10 2),
(the_nat_to_bl_10 3)
\<mapsto> ArchObjectCap (PageDirectoryCap Low_pd_ptr
(Some Low_asid)),
(the_nat_to_bl_10 318)
\<mapsto> NotificationCap ntfn_ptr 0 {AllowSend} )"
definition
Low_cnode :: kernel_object
where
"Low_cnode \<equiv> CNode 10 Low_caps"
lemma ran_empty_cnode [simp]:
"ran (empty_cnode C) = {NullCap}"
by (auto simp: empty_cnode_def ran_def Ex_list_of_length intro: set_eqI)
lemma empty_cnode_app [simp]:
"length x = n \<Longrightarrow> empty_cnode n x = Some NullCap"
by (auto simp: empty_cnode_def)
lemma in_ran_If [simp]:
"(x \<in> ran (\<lambda>n. if P n then A n else B n))
\<longleftrightarrow> (\<exists>n. P n \<and> A n = Some x) \<or> (\<exists>n. \<not> P n \<and> B n = Some x)"
by (auto simp: ran_def)
lemma Low_caps_ran:
"ran Low_caps = {ThreadCap Low_tcb_ptr,
CNodeCap Low_cnode_ptr 10 (the_nat_to_bl_10 2),
ArchObjectCap (PageDirectoryCap Low_pd_ptr
(Some Low_asid)),
NotificationCap ntfn_ptr 0 {AllowSend},
NullCap}"
apply (rule equalityI)
apply (clarsimp simp: Low_caps_def fun_upd_def empty_cnode_def split: if_split_asm)
apply (clarsimp simp: Low_caps_def fun_upd_def empty_cnode_def split: if_split_asm
cong: conj_cong)
apply (rule exI [where x="the_nat_to_bl_10 0"])
apply simp
done
text \<open>High's Cspace\<close>
definition
High_caps :: cnode_contents
where
"High_caps \<equiv>
(empty_cnode 10)
( (the_nat_to_bl_10 1)
\<mapsto> ThreadCap High_tcb_ptr,
(the_nat_to_bl_10 2)
\<mapsto> CNodeCap High_cnode_ptr 10 (the_nat_to_bl_10 2),
(the_nat_to_bl_10 3)
\<mapsto> ArchObjectCap (PageDirectoryCap High_pd_ptr
(Some High_asid)),
(the_nat_to_bl_10 318)
\<mapsto> NotificationCap ntfn_ptr 0 {AllowRecv}) "
definition
High_cnode :: kernel_object
where
"High_cnode \<equiv> CNode 10 High_caps"
lemma High_caps_ran:
"ran High_caps = {ThreadCap High_tcb_ptr,
CNodeCap High_cnode_ptr 10 (the_nat_to_bl_10 2),
ArchObjectCap (PageDirectoryCap High_pd_ptr
(Some High_asid)),
NotificationCap ntfn_ptr 0 {AllowRecv},
NullCap}"
apply (rule equalityI)
apply (clarsimp simp: High_caps_def ran_def empty_cnode_def split: if_split_asm)
apply (clarsimp simp: High_caps_def ran_def empty_cnode_def split: if_split_asm
cong: conj_cong)
apply (rule exI [where x="the_nat_to_bl_10 0"])
apply simp
done
text \<open>We need a copy of boundary crossing caps owned by SilcLabel.
The only such cap is Low's cap to the notification\<close>
definition
Silc_caps :: cnode_contents
where
"Silc_caps \<equiv>
(empty_cnode 10)
( (the_nat_to_bl_10 2)
\<mapsto> CNodeCap Silc_cnode_ptr 10 (the_nat_to_bl_10 2),
(the_nat_to_bl_10 318)
\<mapsto> NotificationCap ntfn_ptr 0 {AllowSend} )"
definition
Silc_cnode :: kernel_object
where
"Silc_cnode \<equiv> CNode 10 Silc_caps"
lemma Silc_caps_ran:
"ran Silc_caps = {CNodeCap Silc_cnode_ptr 10 (the_nat_to_bl_10 2),
NotificationCap ntfn_ptr 0 {AllowSend},
NullCap}"
apply (rule equalityI)
apply (clarsimp simp: Silc_caps_def ran_def empty_cnode_def)
apply (clarsimp simp: ran_def Silc_caps_def empty_cnode_def cong: conj_cong)
apply (rule_tac x="the_nat_to_bl_10 0" in exI)
apply simp
done
text \<open>notification between Low and High\<close>
definition
ntfn :: kernel_object
where
"ntfn \<equiv> Notification \<lparr>ntfn_obj = WaitingNtfn [High_tcb_ptr], ntfn_bound_tcb=None\<rparr>"
text \<open>Low's VSpace (PageDirectory)\<close>
definition
Low_pt' :: "word8 \<Rightarrow> pte "
where
"Low_pt' \<equiv> (\<lambda>_. InvalidPTE)
(0 := SmallPagePTE shared_page_ptr_phys {} vm_read_write)"
definition
Low_pt :: kernel_object
where
"Low_pt \<equiv> ArchObj (PageTable Low_pt')"
definition
Low_pd' :: "12 word \<Rightarrow> pde "
where
"Low_pd' \<equiv>
global_pd
(0 := PageTablePDE
(addrFromPPtr Low_pt_ptr)
{}
undefined )"
(* used addrFromPPtr because proof gives me ptrFromAddr.. TODO: check
if it's right *)
definition
Low_pd :: kernel_object
where
"Low_pd \<equiv> ArchObj (PageDirectory Low_pd')"
text \<open>High's VSpace (PageDirectory)\<close>
definition
High_pt' :: "word8 \<Rightarrow> pte "
where
"High_pt' \<equiv>
(\<lambda>_. InvalidPTE)
(0 := SmallPagePTE shared_page_ptr_phys {} vm_read_only)"
definition
High_pt :: kernel_object
where
"High_pt \<equiv> ArchObj (PageTable High_pt')"
definition
High_pd' :: "12 word \<Rightarrow> pde "
where
"High_pd' \<equiv>
global_pd
(0 := PageTablePDE
(addrFromPPtr High_pt_ptr)
{}
undefined )"
(* used addrFromPPtr because proof gives me ptrFromAddr.. TODO: check
if it's right *)
definition
High_pd :: kernel_object
where
"High_pd \<equiv> ArchObj (PageDirectory High_pd')"
text \<open>Low's tcb\<close>
definition
Low_tcb :: kernel_object
where
"Low_tcb \<equiv>
TCB \<lparr>
tcb_ctable = CNodeCap Low_cnode_ptr 10 (the_nat_to_bl_10 2),
tcb_vtable = ArchObjectCap
(PageDirectoryCap Low_pd_ptr (Some Low_asid)),
tcb_reply = ReplyCap Low_tcb_ptr True {AllowGrant, AllowWrite}, \<comment> \<open>master reply cap\<close>
tcb_caller = NullCap,
tcb_ipcframe = NullCap,
tcb_state = Running,
tcb_fault_handler = replicate word_bits False,
tcb_ipc_buffer = 0,
tcb_fault = None,
tcb_bound_notification = None,
tcb_mcpriority = Low_mcp,
tcb_arch = \<lparr>tcb_context = undefined\<rparr>\<rparr>"
definition
Low_etcb :: etcb
where
"Low_etcb \<equiv> \<lparr>tcb_priority = Low_prio,
tcb_time_slice = Low_time_slice,
tcb_domain = Low_domain\<rparr>"
text \<open>High's tcb\<close>
definition
High_tcb :: kernel_object
where
"High_tcb \<equiv>
TCB \<lparr>
tcb_ctable = CNodeCap High_cnode_ptr 10 (the_nat_to_bl_10 2) ,
tcb_vtable = ArchObjectCap
(PageDirectoryCap High_pd_ptr (Some High_asid)),
tcb_reply = ReplyCap High_tcb_ptr True {AllowGrant,AllowWrite}, \<comment> \<open>master reply cap to itself\<close>
tcb_caller = NullCap,
tcb_ipcframe = NullCap,
tcb_state = BlockedOnNotification ntfn_ptr,
tcb_fault_handler = replicate word_bits False,
tcb_ipc_buffer = 0,
tcb_fault = None,
tcb_bound_notification = None,
tcb_mcpriority = High_mcp,
tcb_arch = \<lparr>tcb_context = undefined\<rparr>\<rparr>"
definition
High_etcb :: etcb
where
"High_etcb \<equiv> \<lparr>tcb_priority = High_prio,
tcb_time_slice = High_time_slice,
tcb_domain = High_domain\<rparr>"
text \<open>idle's tcb\<close>
definition
idle_tcb :: kernel_object
where
"idle_tcb \<equiv>
TCB \<lparr>
tcb_ctable = NullCap,
tcb_vtable = NullCap,
tcb_reply = NullCap,
tcb_caller = NullCap,
tcb_ipcframe = NullCap,
tcb_state = IdleThreadState,
tcb_fault_handler = replicate word_bits False,
tcb_ipc_buffer = 0,
tcb_fault = None,
tcb_bound_notification = None,
tcb_mcpriority = default_priority,
tcb_arch = \<lparr>tcb_context = empty_context\<rparr>\<rparr>"
definition
"irq_cnode \<equiv> CNode 0 (Map.empty([] \<mapsto> cap.NullCap))"
definition
kh0 :: kheap
where
"kh0 \<equiv> (\<lambda>x. if \<exists>irq::10 word. init_irq_node_ptr + (ucast irq << cte_level_bits) = x
then Some (CNode 0 (empty_cnode 0)) else None)
(Low_cnode_ptr \<mapsto> Low_cnode,
High_cnode_ptr \<mapsto> High_cnode,
Silc_cnode_ptr \<mapsto> Silc_cnode,
ntfn_ptr \<mapsto> ntfn,
irq_cnode_ptr \<mapsto> irq_cnode,
Low_pd_ptr \<mapsto> Low_pd,
High_pd_ptr \<mapsto> High_pd,
Low_pt_ptr \<mapsto> Low_pt,
High_pt_ptr \<mapsto> High_pt,
Low_tcb_ptr \<mapsto> Low_tcb,
High_tcb_ptr \<mapsto> High_tcb,
idle_tcb_ptr \<mapsto> idle_tcb,
init_globals_frame \<mapsto> ArchObj (DataPage False ARMSmallPage),
init_global_pd \<mapsto> ArchObj (PageDirectory global_pd))"
lemma irq_node_offs_min:
"init_irq_node_ptr \<le> init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits)"
apply (rule_tac sz=28 in machine_word_plus_mono_right_split)
apply (simp add: unat_word_ariths mask_def shiftl_t2n s0_ptr_defs cte_level_bits_def)
apply (cut_tac x=irq and 'a=32 in ucast_less)
apply simp
apply (simp add: word_less_nat_alt)
apply (simp add: word_bits_def)
done
lemma irq_node_offs_max:
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) < init_irq_node_ptr + 0x4000"
apply (simp add: s0_ptr_defs cte_level_bits_def shiftl_t2n)
apply (cut_tac x=irq and 'a=32 in ucast_less)
apply simp
apply (simp add: word_less_nat_alt unat_word_ariths)
done
definition irq_node_offs_range where
"irq_node_offs_range \<equiv> {x. init_irq_node_ptr \<le> x \<and> x < init_irq_node_ptr + 0x4000}
\<inter> {x. is_aligned x cte_level_bits}"
lemma irq_node_offs_in_range:
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits)
\<in> irq_node_offs_range"
apply (clarsimp simp: irq_node_offs_min irq_node_offs_max irq_node_offs_range_def)
apply (rule is_aligned_add[OF _ is_aligned_shift])
apply (simp add: is_aligned_def s0_ptr_defs cte_level_bits_def)
done
lemma irq_node_offs_range_correct:
"x \<in> irq_node_offs_range
\<Longrightarrow> \<exists>irq. x = init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits)"
apply (clarsimp simp: irq_node_offs_min irq_node_offs_max irq_node_offs_range_def
s0_ptr_defs cte_level_bits_def)
apply (rule_tac x="ucast ((x - 0xE0008000) >> 4)" in exI)
apply (clarsimp simp: ucast_ucast_mask)
apply (subst aligned_shiftr_mask_shiftl)
apply (rule aligned_sub_aligned)
apply assumption
apply (simp add: is_aligned_def)
apply simp
apply simp
apply (rule_tac n=14 in mask_eqI)
apply (subst mask_add_aligned)
apply (simp add: is_aligned_def)
apply (simp add: mask_twice)
apply (simp add: diff_conv_add_uminus del: add_uminus_conv_diff)
apply (subst add.commute[symmetric])
apply (subst mask_add_aligned)
apply (simp add: is_aligned_def)
apply simp
apply (simp add: diff_conv_add_uminus del: add_uminus_conv_diff)
apply (subst add_mask_lower_bits)
apply (simp add: is_aligned_def)
apply clarsimp
apply (cut_tac x=x and y="0xE000BFFF" and n=14 in neg_mask_mono_le)
apply (force dest: word_less_sub_1)
apply (drule_tac n=14 in aligned_le_sharp)
apply (simp add: is_aligned_def)
apply (simp add: mask_def)
done
lemma irq_node_offs_range_distinct[simp]:
"Low_cnode_ptr \<notin> irq_node_offs_range"
"High_cnode_ptr \<notin> irq_node_offs_range"
"Silc_cnode_ptr \<notin> irq_node_offs_range"
"ntfn_ptr \<notin> irq_node_offs_range"
"irq_cnode_ptr \<notin> irq_node_offs_range"
"Low_pd_ptr \<notin> irq_node_offs_range"
"High_pd_ptr \<notin> irq_node_offs_range"
"Low_pt_ptr \<notin> irq_node_offs_range"
"High_pt_ptr \<notin> irq_node_offs_range"
"Low_tcb_ptr \<notin> irq_node_offs_range"
"High_tcb_ptr \<notin> irq_node_offs_range"
"idle_tcb_ptr \<notin> irq_node_offs_range"
"init_globals_frame \<notin> irq_node_offs_range"
"init_global_pd \<notin> irq_node_offs_range"
by(simp add:irq_node_offs_range_def s0_ptr_defs)+
lemma irq_node_offs_distinct[simp]:
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> Low_cnode_ptr"
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> High_cnode_ptr"
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> Silc_cnode_ptr"
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> ntfn_ptr"
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> irq_cnode_ptr"
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> Low_pd_ptr"
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> High_pd_ptr"
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> Low_pt_ptr"
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> High_pt_ptr"
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> Low_tcb_ptr"
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> High_tcb_ptr"
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> idle_tcb_ptr"
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> init_globals_frame"
"init_irq_node_ptr + (ucast (irq:: 10 word) << cte_level_bits) \<noteq> init_global_pd"
by (simp add:not_inD[symmetric, OF _ irq_node_offs_in_range])+
lemma kh0_dom:
"dom kh0 = {init_globals_frame, init_global_pd, idle_tcb_ptr, High_tcb_ptr, Low_tcb_ptr,
High_pt_ptr, Low_pt_ptr, High_pd_ptr, Low_pd_ptr, irq_cnode_ptr, ntfn_ptr,
Silc_cnode_ptr, High_cnode_ptr, Low_cnode_ptr} \<union>
irq_node_offs_range"
apply (rule equalityI)
apply (simp add: kh0_def dom_def)
apply (clarsimp simp: irq_node_offs_in_range)
apply (clarsimp simp: dom_def)
apply (rule conjI, clarsimp simp: kh0_def)+
apply (force simp: kh0_def cte_level_bits_def dest: irq_node_offs_range_correct)
done
lemmas kh0_SomeD' = set_mp[OF equalityD1[OF kh0_dom[simplified dom_def]], OF CollectI, simplified, OF exI]
lemma kh0_SomeD:
"kh0 x = Some y \<Longrightarrow>
x = init_globals_frame \<and> y = ArchObj (DataPage False ARMSmallPage) \<or>
x = init_global_pd \<and> y = ArchObj (PageDirectory global_pd) \<or>
x = idle_tcb_ptr \<and> y = idle_tcb \<or>
x = High_tcb_ptr \<and> y = High_tcb \<or>
x = Low_tcb_ptr \<and> y = Low_tcb \<or>
x = High_pt_ptr \<and> y = High_pt \<or>
x = Low_pt_ptr \<and> y = Low_pt \<or>
x = High_pd_ptr \<and> y = High_pd \<or>
x = Low_pd_ptr \<and> y = Low_pd \<or>
x = irq_cnode_ptr \<and> y = irq_cnode \<or>
x = ntfn_ptr \<and> y = ntfn \<or>
x = Silc_cnode_ptr \<and> y = Silc_cnode \<or>
x = High_cnode_ptr \<and> y = High_cnode \<or>
x = Low_cnode_ptr \<and> y = Low_cnode \<or>
x \<in> irq_node_offs_range \<and> y = CNode 0 (empty_cnode 0)"
apply (frule kh0_SomeD')
apply (erule disjE, simp add: kh0_def
| force simp: kh0_def split: if_split_asm)+
done
lemmas kh0_obj_def =
Low_cnode_def High_cnode_def Silc_cnode_def ntfn_def irq_cnode_def Low_pd_def
High_pd_def Low_pt_def High_pt_def Low_tcb_def High_tcb_def idle_tcb_def
definition exst0 :: "det_ext" where
"exst0 \<equiv> \<lparr>work_units_completed_internal = undefined,
scheduler_action_internal = resume_cur_thread,
ekheap_internal = [Low_tcb_ptr \<mapsto> Low_etcb,
High_tcb_ptr \<mapsto> High_etcb,
idle_tcb_ptr \<mapsto> default_etcb],
domain_list_internal = [(0, 10), (1, 10)],
domain_index_internal = 0,
cur_domain_internal = 0,
domain_time_internal = 5,
ready_queues_internal = (const (const [])),
cdt_list_internal = const []\<rparr>"
lemmas ekh0_obj_def =
Low_etcb_def High_etcb_def default_etcb_def
definition machine_state0 :: "machine_state" where
"machine_state0 \<equiv> \<lparr>irq_masks = (\<lambda>irq. if irq = timer_irq then False else True),
irq_state = 0,
underlying_memory = const 0,
device_state = Map.empty,
exclusive_state = undefined,
machine_state_rest = undefined\<rparr>"
definition arch_state0 :: "arch_state" where
"arch_state0 \<equiv> \<lparr>arm_asid_table = Map.empty,
arm_hwasid_table = Map.empty, arm_next_asid = 0, arm_asid_map = Map.empty,
arm_global_pd = init_global_pd, arm_global_pts = [],
arm_kernel_vspace =
\<lambda>ref. if ref \<in> {kernel_base..kernel_base + mask 20} then ArmVSpaceKernelWindow
else ArmVSpaceInvalidRegion\<rparr>"
definition
s0_internal :: "det_ext state"
where
"s0_internal \<equiv> \<lparr>
kheap = kh0,
cdt = Map.empty,
is_original_cap = (\<lambda>_. False) ((Low_tcb_ptr, tcb_cnode_index 2) := True,
(High_tcb_ptr, tcb_cnode_index 2) := True),
cur_thread = Low_tcb_ptr,
idle_thread = idle_tcb_ptr,
machine_state = machine_state0,
interrupt_irq_node = (\<lambda>irq. init_irq_node_ptr + (ucast irq << cte_level_bits)),
interrupt_states = (\<lambda>_. irq_state.IRQInactive) (timer_irq := irq_state.IRQTimer),
arch_state = arch_state0,
exst = exst0
\<rparr>"
subsubsection \<open>Defining the policy graph\<close>
(* FIXME: should incorporate SharedPage above *)
(* There is an NTFN in the High label, a SharedPage in the Low label *)
definition
Sys1AgentMap :: "(auth_graph_label subject_label) agent_map"
where
"Sys1AgentMap \<equiv>
(\<lambda>p. if p \<in> ptr_range shared_page_ptr_virt pageBits
then partition_label Low else partition_label IRQ0)
\<comment> \<open>set the range of the shared_page to Low, default everything else to IRQ0\<close>
(Low_cnode_ptr := partition_label Low,
High_cnode_ptr := partition_label High,
ntfn_ptr := partition_label High,
irq_cnode_ptr := partition_label IRQ0,
Silc_cnode_ptr := SilcLabel,
Low_pd_ptr := partition_label Low,
High_pd_ptr := partition_label High,
Low_pt_ptr := partition_label Low,
High_pt_ptr := partition_label High,
Low_tcb_ptr := partition_label Low,
High_tcb_ptr := partition_label High,
idle_tcb_ptr := partition_label Low)"
lemma Sys1AgentMap_simps:
"Sys1AgentMap Low_cnode_ptr = partition_label Low"
"Sys1AgentMap High_cnode_ptr = partition_label High"
"Sys1AgentMap ntfn_ptr = partition_label High"
"Sys1AgentMap irq_cnode_ptr = partition_label IRQ0"
"Sys1AgentMap Silc_cnode_ptr = SilcLabel"
"Sys1AgentMap Low_pd_ptr = partition_label Low"
"Sys1AgentMap High_pd_ptr = partition_label High"
"Sys1AgentMap Low_pt_ptr = partition_label Low"
"Sys1AgentMap High_pt_ptr = partition_label High"
"Sys1AgentMap Low_tcb_ptr = partition_label Low"
"Sys1AgentMap High_tcb_ptr = partition_label High"
"Sys1AgentMap idle_tcb_ptr = partition_label Low"
"\<And>p. p \<in> ptr_range shared_page_ptr_virt pageBits
\<Longrightarrow> Sys1AgentMap p = partition_label Low"
unfolding Sys1AgentMap_def
apply simp_all
by (auto simp: s0_ptr_defs ptr_range_def pageBits_def)
definition
Sys1ASIDMap :: "(auth_graph_label subject_label) agent_asid_map"
where
"Sys1ASIDMap \<equiv>
(\<lambda>x. if (asid_high_bits_of x = asid_high_bits_of Low_asid)
then partition_label Low
else if (asid_high_bits_of x = asid_high_bits_of High_asid)
then partition_label High else undefined)"
(* We include 2 domains, Low is associated to domain 0, High to domain 1, we default the rest of the possible domains to High *)
definition Sys1PAS :: "(auth_graph_label subject_label) PAS" where
"Sys1PAS \<equiv> \<lparr>
pasObjectAbs = Sys1AgentMap,
pasASIDAbs = Sys1ASIDMap,
pasIRQAbs = (\<lambda>_. partition_label IRQ0),
pasPolicy = Sys1AuthGraph,
pasSubject = partition_label Low,
pasMayActivate = True,
pasMayEditReadyQueues = True, pasMaySendIrqs = False,
pasDomainAbs = ((\<lambda>_. {partition_label High})(0 := {partition_label Low}))
\<rparr>"
subsubsection \<open>Proof of pas_refined for Sys1\<close>
lemma High_caps_well_formed: "well_formed_cnode_n 10 High_caps"
by (auto simp: High_caps_def well_formed_cnode_n_def split: if_split_asm)
lemma Low_caps_well_formed: "well_formed_cnode_n 10 Low_caps"
by (auto simp: Low_caps_def well_formed_cnode_n_def split: if_split_asm)
lemma Silc_caps_well_formed: "well_formed_cnode_n 10 Silc_caps"
by (auto simp: Silc_caps_def well_formed_cnode_n_def split: if_split_asm)
lemma s0_caps_of_state :
"caps_of_state s0_internal p = Some cap \<Longrightarrow>
cap = NullCap \<or>
(p,cap) \<in>
{ ((Low_cnode_ptr::obj_ref,(the_nat_to_bl_10 1)), ThreadCap Low_tcb_ptr),
((Low_cnode_ptr::obj_ref,(the_nat_to_bl_10 2)), CNodeCap Low_cnode_ptr 10 (the_nat_to_bl_10 2)),
((Low_cnode_ptr::obj_ref,(the_nat_to_bl_10 3)), ArchObjectCap (PageDirectoryCap Low_pd_ptr (Some Low_asid))),
((Low_cnode_ptr::obj_ref,(the_nat_to_bl_10 318)),NotificationCap ntfn_ptr 0 {AllowSend}),
((High_cnode_ptr::obj_ref,(the_nat_to_bl_10 1)), ThreadCap High_tcb_ptr),
((High_cnode_ptr::obj_ref,(the_nat_to_bl_10 2)), CNodeCap High_cnode_ptr 10 (the_nat_to_bl_10 2)),
((High_cnode_ptr::obj_ref,(the_nat_to_bl_10 3)), ArchObjectCap (PageDirectoryCap High_pd_ptr (Some High_asid))),
((High_cnode_ptr::obj_ref,(the_nat_to_bl_10 318)),NotificationCap ntfn_ptr 0 {AllowRecv}) ,
((Silc_cnode_ptr::obj_ref,(the_nat_to_bl_10 2)),CNodeCap Silc_cnode_ptr 10 (the_nat_to_bl_10 2)),
((Silc_cnode_ptr::obj_ref,(the_nat_to_bl_10 318)),NotificationCap ntfn_ptr 0 {AllowSend}),
((Low_tcb_ptr::obj_ref, (tcb_cnode_index 0)), CNodeCap Low_cnode_ptr 10 (the_nat_to_bl_10 2)),
((Low_tcb_ptr::obj_ref, (tcb_cnode_index 1)), ArchObjectCap (PageDirectoryCap Low_pd_ptr (Some Low_asid))),
((Low_tcb_ptr::obj_ref, (tcb_cnode_index 2)), ReplyCap Low_tcb_ptr True {AllowGrant, AllowWrite}),
((Low_tcb_ptr::obj_ref, (tcb_cnode_index 3)), NullCap),
((Low_tcb_ptr::obj_ref, (tcb_cnode_index 4)), NullCap),
((High_tcb_ptr::obj_ref, (tcb_cnode_index 0)), CNodeCap High_cnode_ptr 10 (the_nat_to_bl_10 2)),
((High_tcb_ptr::obj_ref, (tcb_cnode_index 1)), ArchObjectCap (PageDirectoryCap High_pd_ptr (Some High_asid))),
((High_tcb_ptr::obj_ref, (tcb_cnode_index 2)), ReplyCap High_tcb_ptr True {AllowGrant, AllowWrite}),
((High_tcb_ptr::obj_ref, (tcb_cnode_index 3)), NullCap),
((High_tcb_ptr::obj_ref, (tcb_cnode_index 4)), NullCap)} "
supply if_cong[cong]
apply (insert High_caps_well_formed)
apply (insert Low_caps_well_formed)
apply (insert Silc_caps_well_formed)
apply (simp add: caps_of_state_cte_wp_at cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def)
apply (case_tac p, clarsimp)
apply (clarsimp split: if_splits)
apply (clarsimp simp: cte_wp_at_cases tcb_cap_cases_def
split: if_split_asm)+
apply (clarsimp simp: Silc_caps_def split: if_splits)
apply (clarsimp simp: High_caps_def split: if_splits)
apply (clarsimp simp: Low_caps_def cte_wp_at_cases split: if_splits)
done
lemma tcb_states_of_state_s0:
"tcb_states_of_state s0_internal = [High_tcb_ptr \<mapsto> thread_state.BlockedOnNotification ntfn_ptr, Low_tcb_ptr \<mapsto> thread_state.Running, idle_tcb_ptr \<mapsto> thread_state.IdleThreadState ]"
unfolding s0_internal_def tcb_states_of_state_def
apply (rule ext)
apply (simp add: get_tcb_def)
apply (simp add: kh0_def kh0_obj_def)
done
lemma thread_bounds_of_state_s0:
"thread_bound_ntfns s0_internal = Map.empty"
unfolding s0_internal_def thread_bound_ntfns_def
apply (rule ext)
apply (simp add: get_tcb_def)
apply (simp add: kh0_def kh0_obj_def)
done
lemma Sys1_wellformed':
"policy_wellformed (pasPolicy Sys1PAS) False irqs x"
apply (clarsimp simp: Sys1PAS_def policy_wellformed_def Sys1AuthGraph_def)
done
corollary Sys1_wellformed:
"x \<in> range (pasObjectAbs Sys1PAS) \<union> \<Union>(range (pasDomainAbs Sys1PAS)) - {SilcLabel} \<Longrightarrow>
policy_wellformed (pasPolicy Sys1PAS) False irqs x"
by (rule Sys1_wellformed')
lemma Sys1_pas_wellformed:
"pas_wellformed Sys1PAS"
apply (clarsimp simp: Sys1PAS_def policy_wellformed_def Sys1AuthGraph_def)
done
lemma domains_of_state_s0[simp]:
"domains_of_state s0_internal = {(High_tcb_ptr, High_domain), (Low_tcb_ptr, Low_domain), (idle_tcb_ptr, default_domain)}"
apply(rule equalityI)
apply(rule subsetI)
apply clarsimp
apply (erule domains_of_state_aux.cases)
apply (clarsimp simp: s0_internal_def exst0_def ekh0_obj_def split: if_split_asm)
apply clarsimp
apply (force simp: s0_internal_def exst0_def ekh0_obj_def intro: domains_of_state_aux.domtcbs)+
done
lemma Sys1_pas_refined:
"pas_refined Sys1PAS s0_internal"
apply (clarsimp simp: pas_refined_def)
apply (intro conjI)
apply (simp add: Sys1_pas_wellformed)
apply (clarsimp simp: irq_map_wellformed_aux_def s0_internal_def Sys1PAS_def)
apply (clarsimp simp: Sys1AgentMap_def)
apply (clarsimp simp: s0_ptr_defs ptr_range_def pageBits_def cte_level_bits_def)
apply word_bitwise
apply (clarsimp simp: tcb_domain_map_wellformed_aux_def
Sys1PAS_def Sys1AgentMap_def
default_domain_def minBound_word
High_domain_def Low_domain_def cte_level_bits_def)
apply (clarsimp simp: auth_graph_map_def
Sys1PAS_def
state_objs_to_policy_def
state_bits_to_policy_def)
apply (erule state_bits_to_policyp.cases, simp_all, clarsimp)
apply (drule s0_caps_of_state, clarsimp)
apply (simp add: Sys1AuthGraph_def)
apply (elim disjE conjE, auto simp: Sys1AgentMap_simps cap_auth_conferred_def cap_rights_to_auth_def)[1]
apply (drule s0_caps_of_state, clarsimp)
apply (elim disjE, simp_all)[1]
apply (clarsimp simp: state_refs_of_def thread_st_auth_def tcb_states_of_state_s0
Sys1AuthGraph_def Sys1AgentMap_simps split: if_splits)
apply (clarsimp simp: state_refs_of_def thread_st_auth_def thread_bounds_of_state_s0)
apply (simp add: s0_internal_def) (* this is OK because cdt is empty..*)
apply (simp add: s0_internal_def) (* this is OK because cdt is empty..*)
apply (clarsimp simp: state_vrefs_def
vs_refs_no_global_pts_def
s0_internal_def kh0_def Sys1AgentMap_simps
kh0_obj_def comp_def Low_pt'_def High_pt'_def
pte_ref_def pde_ref2_def Low_pd'_def High_pd'_def
Sys1AuthGraph_def ptr_range_def vspace_cap_rights_to_auth_def
vm_read_only_def vm_read_write_def
dest!: graph_ofD
split: if_splits)
apply (rule Sys1AgentMap_simps(13))
apply (simp add: ptr_range_def pageBits_def shared_page_ptr_phys_def)
apply (erule notE)
apply (rule Sys1AgentMap_simps(13)[symmetric])
apply (simp add: ptr_range_def pageBits_def shared_page_ptr_phys_def)
apply (rule subsetI, clarsimp)
apply (erule state_asids_to_policy_aux.cases)
apply clarsimp
apply (drule s0_caps_of_state, clarsimp)
apply (simp add: Sys1AuthGraph_def Sys1PAS_def Sys1ASIDMap_def)
apply (elim disjE conjE, simp_all add: Sys1AgentMap_simps cap_auth_conferred_def
cap_rights_to_auth_def Low_asid_def High_asid_def
asid_low_bits_def asid_high_bits_of_def )[1]
apply (clarsimp simp: state_vrefs_def
vs_refs_no_global_pts_def
s0_internal_def kh0_def Sys1AgentMap_simps
kh0_obj_def comp_def Low_pt'_def High_pt'_def
pte_ref_def pde_ref2_def Low_pd'_def High_pd'_def
Sys1AuthGraph_def ptr_range_def
dest!: graph_ofD
split: if_splits)
apply (clarsimp simp: s0_internal_def arch_state0_def)
apply (rule subsetI, clarsimp)
apply (erule state_irqs_to_policy_aux.cases)
apply (simp add: Sys1AuthGraph_def Sys1PAS_def Sys1ASIDMap_def)
apply (drule s0_caps_of_state)
apply (simp add: Sys1AuthGraph_def Sys1PAS_def Sys1ASIDMap_def)
apply (elim disjE conjE, simp_all add: Sys1AgentMap_simps cap_auth_conferred_def cap_rights_to_auth_def Low_asid_def High_asid_def
asid_low_bits_def asid_high_bits_of_def )[1]
done
lemma Sys1_pas_cur_domain:
"pas_cur_domain Sys1PAS s0_internal"
by (simp add: s0_internal_def exst0_def Sys1PAS_def)
lemma Sys1_current_subject_idemp:
"Sys1PAS\<lparr>pasSubject := the_elem (pasDomainAbs Sys1PAS (cur_domain s0_internal))\<rparr> = Sys1PAS"
apply (simp add: Sys1PAS_def s0_internal_def exst0_def)
done
lemma pasMaySendIrqs_Sys1PAS[simp]:
"pasMaySendIrqs Sys1PAS = False"
by(auto simp: Sys1PAS_def)
lemma Sys1_pas_domains_distinct:
"pas_domains_distinct Sys1PAS"
apply (clarsimp simp: Sys1PAS_def pas_domains_distinct_def)
done
lemma Sys1_pas_wellformed_noninterference:
"pas_wellformed_noninterference Sys1PAS"
apply (simp add: pas_wellformed_noninterference_def)
apply (intro conjI ballI allI)
apply (blast intro: Sys1_wellformed)
apply (clarsimp simp: Sys1PAS_def policy_wellformed_def Sys1AuthGraph_def)
apply (rule Sys1_pas_domains_distinct)
done
lemma silc_inv_s0:
"silc_inv Sys1PAS s0_internal s0_internal"
apply (clarsimp simp: silc_inv_def)
apply (rule conjI, simp add: Sys1PAS_def)
apply (rule conjI)
apply (clarsimp simp: Sys1PAS_def Sys1AgentMap_def
s0_internal_def kh0_def obj_at_def kh0_obj_def
is_cap_table_def Silc_caps_well_formed split: if_split_asm)
apply (rule conjI)
apply (clarsimp simp: Sys1PAS_def Sys1AuthGraph_def)
apply (rule conjI)
apply clarsimp
apply (rule_tac x=Silc_cnode_ptr in exI)
apply (rule conjI)
apply (rule_tac x="the_nat_to_bl_10 318" in exI)
apply (clarsimp simp: slots_holding_overlapping_caps_def2)
apply (case_tac "cap = NullCap")
apply clarsimp
apply (simp add: cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def)
apply (case_tac a, clarsimp)
apply (clarsimp split: if_splits)
apply ((clarsimp simp: intra_label_cap_def cte_wp_at_cases tcb_cap_cases_def
cap_points_to_label_def split: if_split_asm)+)[8]
apply (clarsimp simp: intra_label_cap_def cap_points_to_label_def)
apply (drule cte_wp_at_caps_of_state' s0_caps_of_state)+
apply ((erule disjE |
clarsimp simp: Sys1PAS_def Sys1AgentMap_simps
the_nat_to_bl_def nat_to_bl_def ctes_wp_at_def cte_wp_at_cases
s0_internal_def kh0_def kh0_obj_def Silc_caps_well_formed obj_refs_def
| simp add: Silc_caps_def)+)[1]
apply (simp add: Sys1PAS_def Sys1AgentMap_simps)
apply (intro conjI)
apply (clarsimp simp: all_children_def s0_internal_def silc_dom_equiv_def equiv_for_refl)
apply (clarsimp simp: all_children_def s0_internal_def silc_dom_equiv_def equiv_for_refl)
apply (clarsimp simp: Invariants_AI.cte_wp_at_caps_of_state )
by (auto simp:is_transferable.simps dest:s0_caps_of_state)
lemma only_timer_irq_s0:
"only_timer_irq timer_irq s0_internal"
apply (clarsimp simp: only_timer_irq_def s0_internal_def irq_is_recurring_def is_irq_at_def
irq_at_def Let_def irq_oracle_def machine_state0_def timer_irq_def)
apply presburger
done
lemma domain_sep_inv_s0:
"domain_sep_inv False s0_internal s0_internal"
apply (clarsimp simp: domain_sep_inv_def)
apply (force dest: cte_wp_at_caps_of_state' s0_caps_of_state
| rule conjI allI | clarsimp simp: s0_internal_def)+
done
lemma only_timer_irq_inv_s0:
"only_timer_irq_inv timer_irq s0_internal s0_internal"
by (simp add: only_timer_irq_inv_def only_timer_irq_s0 domain_sep_inv_s0)
lemma Sys1_guarded_pas_domain:
"guarded_pas_domain Sys1PAS s0_internal"
by (clarsimp simp: guarded_pas_domain_def Sys1PAS_def s0_internal_def
exst0_def Sys1AgentMap_simps)
lemma s0_valid_domain_list:
"valid_domain_list s0_internal"
by (clarsimp simp: valid_domain_list_2_def s0_internal_def exst0_def)
definition
"s0 \<equiv> ((if ct_idle s0_internal then idle_context s0_internal else s0_context,s0_internal),KernelExit)"
subsubsection \<open>einvs\<close>
lemma well_formed_cnode_n_s0_caps[simp]:
"well_formed_cnode_n 10 High_caps"
"well_formed_cnode_n 10 Low_caps"
"well_formed_cnode_n 10 Silc_caps"
"\<not> well_formed_cnode_n 10 [[] \<mapsto> NullCap]"
apply ((force simp: High_caps_def Low_caps_def Silc_caps_def well_formed_cnode_n_def
the_nat_to_bl_def nat_to_bl_def dom_empty_cnode)+)[3]
apply (clarsimp simp: well_formed_cnode_n_def)
apply (drule eqset_imp_iff[where x="[]"])
apply simp
done
lemma valid_caps_s0[simp]:
"s0_internal \<turnstile> ThreadCap Low_tcb_ptr"
"s0_internal \<turnstile> ThreadCap High_tcb_ptr"
"s0_internal \<turnstile> CNodeCap Low_cnode_ptr 10 (the_nat_to_bl_10 2)"
"s0_internal \<turnstile> CNodeCap High_cnode_ptr 10 (the_nat_to_bl_10 2)"
"s0_internal \<turnstile> CNodeCap Silc_cnode_ptr 10 (the_nat_to_bl_10 2)"
"s0_internal \<turnstile> ArchObjectCap (PageDirectoryCap Low_pd_ptr (Some Low_asid))"
"s0_internal \<turnstile> ArchObjectCap (PageDirectoryCap High_pd_ptr (Some High_asid))"
"s0_internal \<turnstile> NotificationCap ntfn_ptr 0 {AllowWrite}"
"s0_internal \<turnstile> NotificationCap ntfn_ptr 0 {AllowRead}"
"s0_internal \<turnstile> ReplyCap Low_tcb_ptr True {AllowGrant,AllowWrite}"
"s0_internal \<turnstile> ReplyCap High_tcb_ptr True {AllowGrant,AllowWrite}"
by (simp_all add: valid_cap_def s0_internal_def s0_ptr_defs cap_aligned_def is_aligned_def
word_bits_def cte_level_bits_def the_nat_to_bl_def
nat_to_bl_def Low_asid_def High_asid_def asid_low_bits_def asid_bits_def
obj_at_def kh0_def kh0_obj_def is_tcb_def is_cap_table_def a_type_def
is_ntfn_def)
lemma valid_obj_s0[simp]:
"valid_obj Low_cnode_ptr Low_cnode s0_internal"
"valid_obj High_cnode_ptr High_cnode s0_internal"
"valid_obj Silc_cnode_ptr Silc_cnode s0_internal"
"valid_obj ntfn_ptr ntfn s0_internal"
"valid_obj irq_cnode_ptr irq_cnode s0_internal"
"valid_obj Low_pd_ptr Low_pd s0_internal"
"valid_obj High_pd_ptr High_pd s0_internal"
"valid_obj Low_pt_ptr Low_pt s0_internal"
"valid_obj High_pt_ptr High_pt s0_internal"
"valid_obj Low_tcb_ptr Low_tcb s0_internal"
"valid_obj High_tcb_ptr High_tcb s0_internal"
"valid_obj idle_tcb_ptr idle_tcb s0_internal"
"valid_obj init_global_pd (ArchObj (PageDirectory ((\<lambda>_. InvalidPDE)
(ucast (kernel_base >> 20) := SectionPDE (addrFromPPtr kernel_base) {} 0 {}))))
s0_internal"
"valid_obj init_globals_frame (ArchObj (DataPage False ARMSmallPage)) s0_internal"
apply (simp_all add: valid_obj_def kh0_obj_def)
apply (simp add: valid_cs_def Low_caps_ran High_caps_ran Silc_caps_ran
valid_cs_size_def word_bits_def cte_level_bits_def)+
apply (simp add: valid_ntfn_def obj_at_def s0_internal_def kh0_def
High_tcb_def is_tcb_def)
apply (simp add: valid_cs_def valid_cs_size_def word_bits_def cte_level_bits_def)
apply (simp add: well_formed_cnode_n_def)
apply (fastforce simp: Low_pd'_def High_pd'_def Low_pt'_def High_pt'_def
Low_pt_ptr_def High_pt_ptr_def
shared_page_ptr_phys_def shared_page_ptr_virt_def
valid_vm_rights_def vm_kernel_only_def
kernel_base_def pageBits_def pt_bits_def vmsz_aligned_def
is_aligned_def[THEN iffD2]
is_aligned_addrFromPPtr_n)+
apply (clarsimp simp: valid_tcb_def tcb_cap_cases_def is_master_reply_cap_def
valid_ipc_buffer_cap_def valid_tcb_state_def valid_arch_tcb_def
| simp add: obj_at_def s0_internal_def kh0_def kh0_obj_def is_ntfn_def
is_valid_vtable_root_def)+
apply (simp add: valid_vm_rights_def vm_kernel_only_def
kernel_base_def pageBits_def vmsz_aligned_def
is_aligned_def[THEN iffD2]
is_aligned_addrFromPPtr_n)
done
lemma valid_objs_s0:
"valid_objs s0_internal"
apply (clarsimp simp: valid_objs_def)
apply (subst(asm) s0_internal_def kh0_def)+
apply (simp split: if_split_asm)
apply force+
apply (clarsimp simp: valid_obj_def valid_cs_def empty_cnode_def valid_cs_size_def ran_def
cte_level_bits_def word_bits_def well_formed_cnode_n_def dom_def)
done
lemma pspace_aligned_s0:
"pspace_aligned s0_internal"
apply (clarsimp simp: pspace_aligned_def s0_internal_def)
apply (drule kh0_SomeD)
apply (erule disjE
| (subst is_aligned_def,
fastforce simp: s0_ptr_defs cte_level_bits_def kh0_def kh0_obj_def))+
apply (clarsimp simp: cte_level_bits_def)
apply (drule irq_node_offs_range_correct)
apply (clarsimp simp: s0_ptr_defs cte_level_bits_def)
apply (rule is_aligned_add[OF _ is_aligned_shift])
apply (simp add: is_aligned_def s0_ptr_defs cte_level_bits_def)
done
lemma pspace_distinct_s0:
"pspace_distinct s0_internal"
apply (clarsimp simp: pspace_distinct_def s0_internal_def)
apply (drule kh0_SomeD)+
apply (case_tac "x \<in> irq_node_offs_range \<and> y \<in> irq_node_offs_range")
apply clarsimp
apply (drule irq_node_offs_range_correct)+
apply clarsimp
apply (clarsimp simp: s0_ptr_defs cte_level_bits_def)
apply (case_tac "(ucast irq << 4) < (ucast irqa << 4)")
apply (frule udvd_decr'[where K="0x10::32 word" and ua=0, simplified])
apply (simp add: shiftl_t2n uint_word_ariths)
apply (subst mod_mult_mult1[where c="2^4" and b="2^28", simplified])
apply simp
apply (simp add: shiftl_t2n uint_word_ariths)
apply (subst mod_mult_mult1[where c="2^4" and b="2^28", simplified])
apply simp
apply (simp add: shiftl_def uint_shiftl word_size bintrunc_shiftl)
apply (simp add: shiftl_int_def take_bit_eq_mod push_bit_eq_mult)
apply (frule_tac y="ucast irq << 4" in word_plus_mono_right[where x="0xE000800F"])
apply (simp add: shiftl_t2n)
apply (case_tac "(1::32 word) \<le> ucast irqa")
apply (drule_tac i=1 and k="0x10" in word_mult_le_mono1)
apply simp
apply (cut_tac x=irqa and 'a=32 in ucast_less)
apply simp
apply (simp add: word_less_nat_alt)
apply (simp add: mult.commute)
apply (drule_tac y="0x10" and x="0xE0007FFF" in word_plus_mono_right)
apply (rule_tac sz=28 in machine_word_plus_mono_right_split)
apply (simp add: unat_word_ariths mask_def)
apply (cut_tac x=irqa and 'a=32 in ucast_less)
apply simp
apply (simp add: word_less_nat_alt)
apply (simp add: word_bits_def)
apply simp
apply (simp add: lt1_neq0)
apply (drule(1) order_trans_rules(23))
apply clarsimp
apply (drule_tac a="0xE0008000 + (ucast irqa << 4)" and b="ucast irqa << 4"
and c="0xE0007FFF + (ucast irqa << 4)" and d="ucast irqa << 4" in word_sub_mono)
apply simp
apply simp
apply (rule_tac sz=28 in machine_word_plus_mono_right_split)
apply (simp add: unat_word_ariths mask_def shiftl_t2n)
apply (cut_tac x=irqa and 'a=32 in ucast_less)
apply simp
apply (simp add: word_less_nat_alt)
apply (simp add: word_bits_def)
apply simp
apply (rule_tac sz=28 in machine_word_plus_mono_right_split)
apply (simp add: unat_word_ariths mask_def shiftl_t2n)
apply (cut_tac x=irqa and 'a=32 in ucast_less)
apply simp
apply (simp add: word_less_nat_alt)
apply (simp add: word_bits_def)
apply simp
apply (case_tac "(ucast irq << 4) > (ucast irqa << 4)")
apply (frule udvd_decr'[where K="0x10::32 word" and ua=0, simplified])
apply (simp add: shiftl_t2n uint_word_ariths)
apply (subst mod_mult_mult1[where c="2^4" and b="2^28", simplified])
apply simp
apply (simp add: shiftl_t2n uint_word_ariths)
apply (subst mod_mult_mult1[where c="2^4" and b="2^28", simplified])
apply simp
apply (simp add: shiftl_def uint_shiftl word_size bintrunc_shiftl)
apply (simp add: shiftl_int_def take_bit_eq_mod push_bit_eq_mult)
apply (frule_tac y="ucast irqa << 4" in word_plus_mono_right[where x="0xE000800F"])
apply (simp add: shiftl_t2n)
apply (case_tac "(1::32 word) \<le> ucast irq")
apply (drule_tac i=1 and k="0x10" in word_mult_le_mono1)
apply simp
apply (cut_tac x=irq and 'a=32 in ucast_less)
apply simp
apply (simp add: word_less_nat_alt)
apply (simp add: mult.commute)
apply (drule_tac y="0x10" and x="0xE0007FFF" in word_plus_mono_right)
apply (rule_tac sz=28 in machine_word_plus_mono_right_split)
apply (simp add: unat_word_ariths mask_def)
apply (cut_tac x=irq and 'a=32 in ucast_less)
apply simp
apply (simp add: word_less_nat_alt)
apply (simp add: word_bits_def)
apply simp
apply (simp add: lt1_neq0)
apply (drule(1) order_trans_rules(23))
apply clarsimp
apply (drule_tac a="0xE0008000 + (ucast irq << 4)" and b="ucast irq << 4"
and c="0xE0007FFF + (ucast irq << 4)" and d="ucast irq << 4" in word_sub_mono)
apply simp
apply simp
apply (rule_tac sz=28 in machine_word_plus_mono_right_split)
apply (simp add: unat_word_ariths mask_def shiftl_t2n)
apply (cut_tac x=irq and 'a=32 in ucast_less)
apply simp
apply (simp add: word_less_nat_alt)
apply (simp add: word_bits_def)
apply simp
apply (rule_tac sz=28 in machine_word_plus_mono_right_split)
apply (simp add: unat_word_ariths mask_def shiftl_t2n)
apply (cut_tac x=irq and 'a=32 in ucast_less)
apply simp
apply (simp add: word_less_nat_alt)
apply (simp add: word_bits_def)
apply simp
apply simp
by ((simp | erule disjE | clarsimp simp: kh0_obj_def cte_level_bits_def s0_ptr_defs
| clarsimp simp: irq_node_offs_range_def s0_ptr_defs,
drule_tac x="0xF" in word_plus_strict_mono_right, simp, simp add: add.commute,
drule(1) notE[rotated, OF less_trans, OF _ _ leD, rotated 2] |
drule(1) notE[rotated, OF le_less_trans, OF _ _ leD, rotated 2], simp, assumption)+)
lemma valid_pspace_s0[simp]:
"valid_pspace s0_internal"
apply (simp add: valid_pspace_def pspace_distinct_s0 pspace_aligned_s0 valid_objs_s0)
apply (rule conjI)
apply (clarsimp simp: if_live_then_nonz_cap_def)
apply (subst(asm) s0_internal_def)
apply (clarsimp simp: live_def hyp_live_def obj_at_def kh0_def kh0_obj_def s0_ptr_defs split: if_split_asm)
apply (clarsimp simp: ex_nonz_cap_to_def)
apply (rule_tac x="High_cnode_ptr" in exI)
apply (rule_tac x="the_nat_to_bl_10 1" in exI)
apply (force simp: cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def s0_ptr_defs tcb_cap_cases_def High_caps_def the_nat_to_bl_def nat_to_bl_def well_formed_cnode_n_def dom_empty_cnode)
apply (clarsimp simp: ex_nonz_cap_to_def)
apply (rule_tac x="Low_cnode_ptr" in exI)
apply (rule_tac x="the_nat_to_bl_10 1" in exI)
apply (force simp: cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def s0_ptr_defs tcb_cap_cases_def Low_caps_def the_nat_to_bl_def nat_to_bl_def well_formed_cnode_n_def dom_empty_cnode)
apply (clarsimp simp: ex_nonz_cap_to_def)
apply (rule_tac x="High_cnode_ptr" in exI)
apply (rule_tac x="the_nat_to_bl_10 318" in exI)
apply (force simp: cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def s0_ptr_defs tcb_cap_cases_def High_caps_def the_nat_to_bl_def nat_to_bl_def well_formed_cnode_n_def dom_empty_cnode)
apply (rule conjI)
apply (simp add: Invariants_AI.cte_wp_at_caps_of_state zombies_final_def)
apply (force dest: s0_caps_of_state simp: is_zombie_def)
apply (rule conjI)
apply (clarsimp simp: sym_refs_def state_refs_of_def state_hyp_refs_of_def s0_internal_def)
apply (subst(asm) kh0_def)
apply (clarsimp split: if_split_asm)
apply (simp add: refs_of_def kh0_def s0_ptr_defs kh0_obj_def)+
apply (clarsimp simp: sym_refs_def state_hyp_refs_of_def s0_internal_def)
apply (subst(asm) kh0_def)
apply (clarsimp split: if_split_asm)
by (simp add: refs_of_def kh0_def s0_ptr_defs kh0_obj_def)+
lemma descendants_s0[simp]:
"descendants_of (a, b) (cdt s0_internal) = {}"
apply (rule set_eqI)
apply clarsimp
apply (drule descendants_of_NoneD[rotated])
apply (simp add: s0_internal_def)+
done
lemma valid_mdb_s0[simp]:
"valid_mdb s0_internal"
apply (simp add: valid_mdb_def reply_mdb_def)
apply (intro conjI)
apply (clarsimp simp: mdb_cte_at_def s0_internal_def)
apply (force dest: s0_caps_of_state simp: untyped_mdb_def)
apply (clarsimp simp: descendants_inc_def)
apply (clarsimp simp: no_mloop_def s0_internal_def cdt_parent_defs)
apply (clarsimp simp: untyped_inc_def)
apply (drule s0_caps_of_state)+
apply ((simp | erule disjE)+)[1]
apply (force dest: s0_caps_of_state simp: ut_revocable_def)
apply (force dest: s0_caps_of_state simp: irq_revocable_def)
apply (clarsimp simp: reply_master_revocable_def)
apply (drule s0_caps_of_state)
apply ((simp add: is_master_reply_cap_def s0_internal_def s0_ptr_defs | erule disjE)+)[1]
apply (force dest: s0_caps_of_state simp: reply_caps_mdb_def)
apply (clarsimp simp: reply_masters_mdb_def)
apply (simp add: s0_internal_def)
done
lemma valid_ioc_s0[simp]:
"valid_ioc s0_internal"
by (clarsimp simp: cte_wp_at_cases tcb_cap_cases_def valid_ioc_def
s0_internal_def kh0_def kh0_obj_def split: if_split_asm)+
lemma valid_idle_s0[simp]:
"valid_idle s0_internal"
apply (clarsimp simp: valid_idle_def st_tcb_at_tcb_states_of_state_eq
thread_bounds_of_state_s0
identity_eq[symmetric] tcb_states_of_state_s0
valid_arch_idle_def)
by (simp add: s0_ptr_defs s0_internal_def idle_thread_ptr_def pred_tcb_at_def obj_at_def kh0_def idle_tcb_def)
lemma only_idle_s0[simp]:
"only_idle s0_internal"
apply (clarsimp simp: only_idle_def st_tcb_at_tcb_states_of_state_eq
identity_eq[symmetric] tcb_states_of_state_s0)
apply (simp add: s0_ptr_defs s0_internal_def)
done
lemma if_unsafe_then_cap_s0[simp]:
"if_unsafe_then_cap s0_internal"
apply (clarsimp simp: if_unsafe_then_cap_def ex_cte_cap_wp_to_def)
apply (drule s0_caps_of_state)
apply (case_tac "a=Low_cnode_ptr")
apply (rule_tac x=Low_tcb_ptr in exI, rule_tac x="tcb_cnode_index 0" in exI)
apply ((clarsimp simp: cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def
tcb_cap_cases_def the_nat_to_bl_def nat_to_bl_def
Low_caps_def | erule disjE)+)[1]
apply (case_tac "a=High_cnode_ptr")
apply (rule_tac x=High_tcb_ptr in exI, rule_tac x="tcb_cnode_index 0" in exI)
apply ((clarsimp simp: cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def
tcb_cap_cases_def the_nat_to_bl_def nat_to_bl_def
High_caps_def | erule disjE)+)[1]
apply (case_tac "a=Low_tcb_ptr")
apply (rule_tac x=Low_cnode_ptr in exI, rule_tac x="the_nat_to_bl_10 1" in exI)
apply ((clarsimp simp: cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def
tcb_cap_cases_def the_nat_to_bl_def nat_to_bl_def
Low_caps_def well_formed_cnode_n_def dom_empty_cnode
| erule disjE | force)+)[1]
apply (case_tac "a=High_tcb_ptr")
apply (rule_tac x=High_cnode_ptr in exI, rule_tac x="the_nat_to_bl_10 1" in exI)
apply ((clarsimp simp: cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def
tcb_cap_cases_def the_nat_to_bl_def nat_to_bl_def
High_caps_def well_formed_cnode_n_def dom_empty_cnode
| erule disjE | force)+)[1]
apply (rule_tac x=Silc_cnode_ptr in exI, rule_tac x="the_nat_to_bl_10 2" in exI)
apply ((clarsimp simp: cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def
tcb_cap_cases_def the_nat_to_bl_def nat_to_bl_def
Silc_caps_def well_formed_cnode_n_def dom_empty_cnode
| erule disjE | force)+)[1]
done
lemma valid_reply_caps_s0[simp]:
"valid_reply_caps s0_internal"
apply (clarsimp simp: valid_reply_caps_def)
apply (rule conjI)
apply (force dest: s0_caps_of_state
simp: Invariants_AI.cte_wp_at_caps_of_state has_reply_cap_def is_reply_cap_to_def)
apply (clarsimp simp: unique_reply_caps_def)
apply (drule s0_caps_of_state)+
apply (erule disjE | simp add: is_reply_cap_def)+
done
lemma valid_reply_masters_s0[simp]:
"valid_reply_masters s0_internal"
apply (clarsimp simp: valid_reply_masters_def)
apply (force dest: s0_caps_of_state
simp: Invariants_AI.cte_wp_at_caps_of_state is_master_reply_cap_to_def)
done
lemma valid_global_refs_s0[simp]:
"valid_global_refs s0_internal"
apply (clarsimp simp: valid_global_refs_def valid_refs_def)
apply (simp add: Invariants_AI.cte_wp_at_caps_of_state)
apply clarsimp
apply (drule s0_caps_of_state)
apply (clarsimp simp: global_refs_def s0_internal_def arch_state0_def)
apply (erule disjE | simp add: cap_range_def
| clarsimp simp: irq_node_offs_distinct[symmetric]
| simp only: s0_ptr_defs, force)+
done
lemma valid_arch_state_s0[simp]:
"valid_arch_state s0_internal"
apply (clarsimp simp: valid_arch_state_def s0_internal_def arch_state0_def)
apply (intro conjI)
apply (clarsimp simp: obj_at_def kh0_def)
apply (simp add: valid_asid_table_def)
apply (clarsimp simp: obj_at_def kh0_def a_type_def)
apply (simp add: valid_global_pts_def)
apply (simp add: is_inv_def)
done
lemma valid_irq_node_s0[simp]:
"valid_irq_node s0_internal"
apply (clarsimp simp: valid_irq_node_def)
apply (rule conjI)
apply (simp add: s0_internal_def)
apply (rule injI)
apply simp
apply (rule ccontr)
apply (rule_tac bnd="0x400" and 'a=32 in shift_distinct_helper[rotated 3])
apply assumption
apply (simp add: cte_level_bits_def)
apply (simp add: cte_level_bits_def)
apply (rule ucast_less[where 'b=10, simplified])
apply simp
apply (rule ucast_less[where 'b=10, simplified])
apply simp
apply (rule notI)
apply (drule ucast_up_inj)
apply simp
apply simp
apply (clarsimp simp: obj_at_def s0_internal_def)
apply (force simp: kh0_def is_cap_table_def well_formed_cnode_n_def dom_empty_cnode)
done
lemma valid_irq_handlers_s0[simp]:
"valid_irq_handlers s0_internal"
apply (clarsimp simp: valid_irq_handlers_def ran_def)
apply (force dest: s0_caps_of_state)
done
lemma valid_irq_state_s0[simp]:
"valid_irq_states s0_internal"
apply (clarsimp simp: valid_irq_states_def valid_irq_masks_def s0_internal_def machine_state0_def)
done
lemma valid_machine_state_s0[simp]:
"valid_machine_state s0_internal"
apply (clarsimp simp: valid_machine_state_def s0_internal_def machine_state0_def in_user_frame_def obj_at_def const_def)
done
lemma valid_arch_objs_s0[simp]:
"valid_vspace_objs s0_internal"
apply (clarsimp simp: valid_vspace_objs_def obj_at_def s0_internal_def)
apply (drule kh0_SomeD)
apply (erule disjE | clarsimp simp: addrFromPPtr_def
| erule vs_lookupE, force simp: arch_state0_def vs_asid_refs_def)+
done
lemma valid_arch_caps_s0[simp]:
"valid_arch_caps s0_internal"
apply (clarsimp simp: valid_arch_caps_def)
apply (intro conjI)
apply (clarsimp simp: valid_vs_lookup_def vs_lookup_pages_def vs_asid_refs_def
s0_internal_def arch_state0_def)
apply (clarsimp simp: valid_table_caps_def is_pd_cap_def is_pt_cap_def)
apply (drule s0_caps_of_state)
apply (erule disjE | simp)+
apply (clarsimp simp: unique_table_caps_def is_pd_cap_def is_pt_cap_def)
apply (drule s0_caps_of_state)+
apply (erule disjE | simp)+
apply (clarsimp simp: unique_table_refs_def table_cap_ref_def)
apply (drule s0_caps_of_state)+
by auto
lemma valid_global_objs_s0[simp]:
"valid_global_objs s0_internal"
apply (clarsimp simp: valid_global_objs_def s0_internal_def arch_state0_def)
apply (force simp: valid_vso_at_def obj_at_def kh0_def kh0_obj_def
is_aligned_addrFromPPtr kernel_base_aligned_pageBits
kernel_mapping_slots_def empty_table_def pde_ref_def valid_pde_mappings_def)
done
lemma valid_kernel_mappings_s0[simp]:
"valid_kernel_mappings s0_internal"
apply (clarsimp simp: valid_kernel_mappings_def s0_internal_def ran_def
valid_kernel_mappings_if_pd_def split: kernel_object.splits
arch_kernel_obj.splits)
apply (drule kh0_SomeD)
apply (clarsimp simp: arch_state0_def kernel_mapping_slots_def)
apply (erule disjE | simp add: pde_ref_def s0_ptr_defs kh0_obj_def High_pd'_def Low_pd'_def
split: if_split_asm pde.splits)+
done
lemma equal_kernel_mappings_s0[simp]:
"equal_kernel_mappings s0_internal"
apply (clarsimp simp: equal_kernel_mappings_def obj_at_def s0_internal_def)
apply (drule kh0_SomeD)+
apply (force simp: kh0_obj_def High_pd'_def Low_pd'_def s0_ptr_defs kernel_mapping_slots_def)
done
lemma valid_asid_map_s0[simp]:
"valid_asid_map s0_internal"
apply (clarsimp simp: valid_asid_map_def s0_internal_def arch_state0_def)
done
lemma valid_global_pd_mappings_s0[simp]:
"valid_global_vspace_mappings s0_internal"
apply (clarsimp simp: valid_global_vspace_mappings_def s0_internal_def arch_state0_def
obj_at_def kh0_def kh0_obj_def s0_ptr_defs valid_pd_kernel_mappings_def
valid_pde_kernel_mappings_def pde_mapping_bits_def mask_def)
apply (rule conjI)
apply force
apply clarsimp
apply (subgoal_tac "xa - 0xFFFFF \<le> ucast x << 20")
apply (case_tac "ucast x << 20 > (0xE0000000::32 word)")
apply (subgoal_tac "(0xE0100000::32 word) \<le> ucast x << 20")
apply ((drule(1) order_trans_rules(23))+, force)
apply (simp add: shiftl_t2n)
apply (cut_tac p="0xE0000000::32 word" and n=20 and m=20 and q="0x100000 * ucast x" in word_plus_power_2_offset_le)
apply (simp add: is_aligned_def)
apply (simp add: is_aligned_def unat_word_ariths)
apply (subst mod_mult_mult1[where c="2^20" and b="2^12", simplified])
apply simp
apply simp
apply simp
apply simp
apply simp
apply (case_tac "ucast x << 20 < (0xE0000000::32 word)")
apply (subgoal_tac "(0xE0000000::32 word) - 0x100000 \<ge> ucast x << 20")
apply (subgoal_tac "0xFFFFF + (ucast x << 20) \<le> 0xDFFFFFFF")
apply (drule_tac y="0xFFFFF + (ucast x << 20)" and z="0xDFFFFFFF::32 word" in order_trans_rules(23))
apply simp
apply ((drule(1) order_trans_rules(23))+, force)
apply (simp add: add.commute)
apply (simp add: word_plus_mono_left[where x="0xFFFFF" and z="0xDFF00000", simplified])
apply (simp add: shiftl_t2n)
apply (rule udvd_decr'[where K="0x100000" and q="0xE0000000" and ua=0, simplified])
apply simp
apply (simp add: uint_word_ariths)
apply (subst mod_mult_mult1[where c="2^20" and b="2^12", simplified])
apply simp
apply simp
apply simp
apply (erule notE)
apply (cut_tac x="ucast x::32 word" and n=20 in shiftl_shiftr_id)
apply simp
apply (simp add: ucast_less[where 'b=12, simplified])
apply simp
apply (rule ucast_up_inj[where 'b=32])
apply simp
apply simp
apply (drule_tac c="0xFFFFF + (ucast x << 20)" and d="0xFFFFF" and b="0xFFFFF" in word_sub_mono)
apply simp
apply (rule word_sub_le)
apply (rule order_trans_rules(23)[rotated], assumption)
apply simp
apply (simp add: add.commute)
apply (rule no_plus_overflow_neg)
apply simp
apply (drule_tac x="ucast x << 20" in order_trans_rules(23), assumption)
apply (simp add: le_less_trans)
apply simp
done
lemma pspace_in_kernel_window_s0[simp]:
"pspace_in_kernel_window s0_internal"
apply (clarsimp simp: pspace_in_kernel_window_def s0_internal_def)
apply (drule kh0_SomeD)
apply (erule disjE | simp add: arch_state0_def kh0_obj_def s0_ptr_defs mask_def
irq_node_offs_range_def cte_level_bits_def | rule conjI
| rule order_trans_rules(23)[rotated] order_trans_rules(23), force, force)+
apply (force intro: order_trans_rules(23)[rotated])
apply clarsimp
apply (drule_tac x=y in le_less_trans)
apply (rule neq_le_trans[rotated])
apply (rule word_plus_mono_right)
apply (rule less_imp_le)
apply simp+
apply (force intro: less_imp_le less_le_trans)
done
lemma cap_refs_in_kernel_window_s0[simp]:
"cap_refs_in_kernel_window s0_internal"
apply (clarsimp simp: cap_refs_in_kernel_window_def valid_refs_def cap_range_def
Invariants_AI.cte_wp_at_caps_of_state)
apply (drule s0_caps_of_state)
apply (erule disjE | simp add: arch_state0_def s0_internal_def s0_ptr_defs mask_def)+
done
lemma cur_tcb_s0[simp]:
"cur_tcb s0_internal"
by (simp add: cur_tcb_def s0_ptr_defs s0_internal_def kh0_def kh0_obj_def obj_at_def is_tcb_def)
lemma valid_list_s0[simp]:
"valid_list s0_internal"
apply (simp add: valid_list_2_def s0_internal_def exst0_def const_def)
done
lemma valid_sched_s0[simp]:
"valid_sched s0_internal"
apply (simp add: valid_sched_def s0_internal_def exst0_def)
apply (intro conjI)
apply (clarsimp simp: valid_etcbs_def s0_ptr_defs kh0_def kh0_obj_def is_etcb_at'_def
st_tcb_at_kh_def obj_at_kh_def obj_at_def)
apply (clarsimp simp: const_def)
apply (clarsimp simp: const_def)
apply (clarsimp simp: valid_sched_action_def is_activatable_def st_tcb_at_kh_def
obj_at_kh_def obj_at_def kh0_def kh0_obj_def s0_ptr_defs)
apply (clarsimp simp: ct_in_cur_domain_def in_cur_domain_def etcb_at'_def ekh0_obj_def
s0_ptr_defs)
apply (clarsimp simp: const_def valid_blocked_def st_tcb_at_kh_def obj_at_kh_def obj_at_def
kh0_def kh0_obj_def split: if_split_asm)
apply (clarsimp simp: valid_idle_etcb_def etcb_at'_def ekh0_obj_def s0_ptr_defs idle_thread_ptr_def)
done
lemma respects_device_trivial:
"pspace_respects_device_region s0_internal"
"cap_refs_respects_device_region s0_internal"
apply (clarsimp simp: s0_internal_def pspace_respects_device_region_def machine_state0_def device_mem_def
in_device_frame_def kh0_obj_def obj_at_kh_def obj_at_def kh0_def
split: if_splits)[1]
apply fastforce
apply (clarsimp simp: cap_refs_respects_device_region_def Invariants_AI.cte_wp_at_caps_of_state
cap_range_respects_device_region_def machine_state0_def)
apply (intro conjI impI)
apply (drule s0_caps_of_state)
apply fastforce
apply (clarsimp simp: s0_internal_def machine_state0_def)
done
lemma einvs_s0:
"einvs s0_internal"
apply (simp add: valid_state_def invs_def respects_device_trivial)
done
lemma obj_valid_pdpt_kh0:
"x \<in> ran kh0 \<Longrightarrow> obj_valid_pdpt x"
by (auto simp: kh0_def valid_entries_def obj_valid_pdpt_def idle_tcb_def High_tcb_def Low_tcb_def
High_pt_def High_pt'_def entries_align_def Low_pt_def High_pd_def Low_pt'_def High_pd'_def
Low_pd_def irq_cnode_def ntfn_def Silc_cnode_def High_cnode_def Low_cnode_def Low_pd'_def)
subsubsection \<open>Haskell state\<close>
text \<open>One invariant we need on s0 is that there exists
an associated Haskell state satisfying the invariants.
This does not yet exist.\<close>
lemma Sys1_valid_initial_state_noenabled:
assumes extras_s0: "step_restrict s0"
assumes utf_det: "\<forall>pl pr pxn tc ms s. det_inv InUserMode tc s \<and> einvs s \<and> context_matches_state pl pr pxn ms s \<and> ct_running s
\<longrightarrow> (\<exists>x. utf (cur_thread s) pl pr pxn (tc, ms) = {x})"
assumes utf_non_empty: "\<forall>t pl pr pxn tc ms. utf t pl pr pxn (tc, ms) \<noteq> {}"
assumes utf_non_interrupt: "\<forall>t pl pr pxn tc ms e f g. (e,f,g) \<in> utf t pl pr pxn (tc, ms) \<longrightarrow> e \<noteq> Some Interrupt"
assumes det_inv_invariant: "invariant_over_ADT_if det_inv utf"
assumes det_inv_s0: "det_inv KernelExit (cur_context s0_internal) s0_internal"
shows "valid_initial_state_noenabled det_inv utf s0_internal Sys1PAS timer_irq s0_context"
apply (unfold_locales, simp_all only: pasMaySendIrqs_Sys1PAS)
apply (insert det_inv_invariant)[9]
apply (erule(2) invariant_over_ADT_if.det_inv_abs_state)
apply ((erule invariant_over_ADT_if.det_inv_abs_state
invariant_over_ADT_if.check_active_irq_if_Idle_det_inv
invariant_over_ADT_if.check_active_irq_if_User_det_inv
invariant_over_ADT_if.do_user_op_if_det_inv
invariant_over_ADT_if.handle_preemption_if_det_inv
invariant_over_ADT_if.kernel_entry_if_Interrupt_det_inv
invariant_over_ADT_if.kernel_entry_if_det_inv
invariant_over_ADT_if.kernel_exit_if_det_inv
invariant_over_ADT_if.schedule_if_det_inv)+)[8]
apply (rule Sys1_pas_cur_domain)
apply (rule Sys1_pas_wellformed_noninterference)
apply (simp only: einvs_s0)
apply (simp add: Sys1_current_subject_idemp)
apply (simp add: only_timer_irq_inv_s0 silc_inv_s0 Sys1_pas_cur_domain
domain_sep_inv_s0 Sys1_pas_refined Sys1_guarded_pas_domain
idle_equiv_refl)
apply (clarsimp simp: obj_valid_pdpt_kh0 valid_domain_list_2_def s0_internal_def exst0_def)
apply (simp add: det_inv_s0)
apply (simp add: s0_internal_def exst0_def)
apply (simp add: ct_in_state_def st_tcb_at_tcb_states_of_state_eq
identity_eq[symmetric] tcb_states_of_state_s0)
apply (simp add: s0_ptr_defs s0_internal_def)
apply (simp add: s0_internal_def exst0_def)
apply (rule utf_det)
apply (rule utf_non_empty)
apply (rule utf_non_interrupt)
apply (simp add: extras_s0[simplified s0_def])
done
text \<open>the extra assumptions in valid_initial_state of being enabled,
and a serial system, follow from ADT_IF_Refine\<close>
end
end
|
# # Predicting credit approval by applicants' characteristics
# ### Instructions
# In this exercise we will predict the approval (yes/no) for credit applications from the applicant characteristics.
# As the data comes from a real-world log from a financial institution, both fields' names and values have been replaced with meaningless symbols to preserve anonymity.
# In detail, the attributes of this dataset are:
# - A1: b, a.
# - A2: continuous.
# - A3: continuous.
# - A4: u, y, l, t.
# - A5: g, p, gg.
# - A6: c, d, cc, i, j, k, m, r, q, w, x, e, aa, ff.
# - A7: v, h, bb, j, n, z, dd, ff, o.
# - A8: continuous.
# - A9: t, f.
# - A10: t, f.
# - A11: continuous.
# - A12: t, f.
# - A13: g, p, s.
# - A14: continuous.
# - A15: continuous.
# - A16: +,- (class attribute) - what we want to predict
# Further information concerning this dataset can be found online on the [UCI Machine Learning Repository dedicated page](https://archive.ics.uci.edu/ml/datasets/Credit+Approval)
# Our prediction concerns the positive or negative outcome of the credit application.
# While you can use any supervised ML algorithm, I suggest the [`Random Forests`](https://sylvaticus.github.io/BetaML.jl/dev/Trees.html) from BetaML because of their ease of use and the presence of numerous categorical data and missing data that would require additional work with most other algorithms.
# ------------------------------------------------------------------------------
# ### 1) Start by setting the working directory to the directory of this file and activate it.
# If you have the provided `Manifest.toml` file in the directory, just run `Pkg.instantiate()`, otherwise manually add the packages Pipe, HTTP, CSV, DataFrames, Plots and BetaML
# Also, seed the random seed with the integer `123`.
cd(@__DIR__)
using Pkg
Pkg.activate(".")
# If using a Julia version different than 1.7 please uncomment and run the following line (reproductibility guarantee will hower be lost)
# Pkg.resolve()
Pkg.instantiate()
using Random
Random.seed!(123)
# ------------------------------------------------------------------------------
# ### 2) Load the packages/modules Pipe, HTTP, CSV, DataFrames, Plots, BetaML
using Pipe, HTTP, CSV, DataFrames, Plots, BetaML
# ### 3) Load from internet or from local file the input data.
# You can use a pipeline from HTTP.get() to CSV.File to finally a DataFrame.
# Use the parameter `missingstring="?"` in the `CSV.File()` call.
dataURL = "https://archive.ics.uci.edu/ml/machine-learning-databases/credit-screening/crx.data"
# [...] write your code here...
# ------------------------------------------------------------------------------
# ### 4) Now create the X matrix and Y vector
# Create the X matrix of features using the first to the second-to-last column of the data you loaded above and the Y vector by taking the last column.
# If you use the random forests algorithm suggested above, the only data preprocessing you need to do is to convert the X from a DataFrame to a Matrix and to `collect` the Y to a vector. Otherwise be sure to encode the categorical data, skip or impute the missing data and scale the feature matrix as required by the algorithm you employ.
# [...] write your code here...
# ------------------------------------------------------------------------------
# ### 5) Partition your data in (xtrain,xtest) and (ytrain,ytest)
# (e.g. using 80% for the training and 20% for testing)
# You can use the BetaML [`partition()`](https://sylvaticus.github.io/BetaML.jl/dev/Utils.html#BetaML.Api.partition-Union{Tuple{T},%20Tuple{AbstractVector{T},%20AbstractVector{Float64}}}%20where%20T%3C:AbstractArray) function.
# Be sure to shuffle your data if you didn't do it earlier! (that's done by default)
# [...] write your code here...
# ------------------------------------------------------------------------------
# ### 6) (optional but suggested) Find the best hyper-parameters for your model, i.e. the ones that lead to the highest accuracy under the records not used for training.
# You can use the [`crossValidation`](https://sylvaticus.github.io/BetaML.jl/dev/Utils.html#BetaML.Utils.crossValidation) function here.
# The idea is that for each hyper-parameter you have a range of possible values, and for each hyper-parameter, you first set `bestAcc=0.0` and then loop on each possible value, you run crossValidation with that particular value to compute the average training accuracy with that specific value under different data samples, and if it is better than the current `bestAcc` you save it as the new `bestAcc` and the parameter value as the best value for that specific hyper-parameter.
# After you have found the best hyper-parameter value for one specific hyper-parameter, you can switch to the second hyper-parameter repeating the procedure but using the best value for the first hyper-parameter that you found earlier, and you continue with the other hyper-parameters.
# Note that if you limit the hyper-parameter space sufficiently you could also directly loop over all the possible combinations of hyper-parameters.
# If you use the Random Forests from BetaML consider the following hyper-parameter ranges:
nTrees_range = 20:5:60
splittingCriterion_range = [gini,entropy]
maxDepth_range = [10,15,20,25,30,500]
minRecords_range = [1,2,3,4,5]
maxFeatures_range = [2,3,4,5,6]
Ξ²_range = [0.0,0.5,1,2,5,10,20,50,100]
# To train a Random Forest in BetaML use:
# `myForest = buildForest(xtrain,ytrain, nTrees; <other hyper-parameters>)`
# And then to predict and compute the accuracy use:
# ```julia
# Ε·train=predict(myforest,xtrain)
# trainAccuracy = accuracy(Ε·train,ytrain)
# ```
# This activity is "semi-optional" as Random Forests have very good default values, so the gain you will likely obtain with tuning the various hyper-parameters is not expected to be very high. But it is a good exercise to arrive at this result by yourself !
# [...] write your code here...
# ------------------------------------------------------------------------------
# ### 7) Perform the final training with the best hyperparameters and compute the accuracy on the test set
# If you have chosen good hyperparameters, your accuracy should be in the 98%-99% range for training and 81%-89% range for testing
# [...] write your code here...
|
[STATEMENT]
lemma valid_adv_start_bounds:
assumes "valid_window args t0 sub rho w" "w_i w < w_j w"
shows "w_i (adv_start args w) = Suc (w_i w)" "w_j (adv_start args w) = w_j w"
"w_tj (adv_start args w) = w_tj w" "w_sj (adv_start args w) = w_sj w"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (w_i (adv_start args w) = Suc (w_i w) &&& w_j (adv_start args w) = w_j w) &&& w_tj (adv_start args w) = w_tj w &&& w_sj (adv_start args w) = w_sj w
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
valid_window args t0 sub rho w
w_i w < w_j w
goal (1 subgoal):
1. (w_i (adv_start args w) = Suc (w_i w) &&& w_j (adv_start args w) = w_j w) &&& w_tj (adv_start args w) = w_tj w &&& w_sj (adv_start args w) = w_sj w
[PROOF STEP]
by (auto simp: adv_start_def Let_def valid_window_def split: option.splits prod.splits
elim: reaches_on.cases)
|
(*
Author: David Sanan
Maintainer: David Sanan, sanan at ntu edu sg
License: LGPL
*)
(* Title: Sep_Prod_Instance.thy
Author: David Sanan, NTU
Copyright (C) 2015-2016 David Sanan
Some rights reserved, NTU
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
This library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
USA
*)
theory Sep_Prod_Instance
imports Sep_Algebra.Separation_Algebra "Separata.Separata"
begin
section{* Product of Separation Algebras Instantiation *}
instantiation prod::(sep_algebra,sep_algebra) sep_algebra
begin
definition zero_prod_def: "0 \<equiv> (0,0)"
definition plus_prod_def: "p1 + p2 \<equiv> ((fst p1) + (fst p2),(snd p1) + (snd p2))"
definition sep_disj_prod_def: "sep_disj p1 p2 \<equiv> ((fst p1) ## (fst p2) \<and> (snd p1) ## (snd p2))"
instance
apply standard
apply (simp add: sep_disj_prod_def zero_prod_def)
apply (simp add: sep_disj_commute sep_disj_prod_def)
apply (simp add: zero_prod_def plus_prod_def)
apply (simp add: plus_prod_def sep_disj_prod_def sep_disj_commute sep_add_commute)
apply (simp add: plus_prod_def sep_add_assoc sep_disj_prod_def)
apply (simp add: sep_disj_prod_def plus_prod_def )
apply (fastforce intro:sep_disj_addD1)
apply (simp add: sep_disj_prod_def prod_def plus_prod_def sep_disj_addI1)
done
end
instantiation prod::(heap_sep_algebra, heap_sep_algebra) heap_sep_algebra
begin
instance
proof
fix x :: "'a \<times> 'b" and z :: "'a \<times> 'b" and y :: "'a \<times> 'b"
assume a1: "x + z = y + z"
assume a2: "x ## z"
assume a3: "y ## z"
have f4: "fst x + fst z = fst y + fst z \<and> snd x + snd z = snd y + snd z"
using a1 by (simp add: plus_prod_def)
have f5: "\<forall>p pa. p ## pa = ((fst p::'a) ## fst pa \<and> (snd p::'b) ## snd pa)"
using sep_disj_prod_def by blast
hence f6: "fst x = fst y"
using f4 a3 a2 by (meson sep_add_cancel)
have "snd x = snd y"
using f5 f4 a3 a2 by (meson sep_add_cancel)
thus "x = y"
using f6 by (simp add: prod_eq_iff)
next
fix x:: "'a \<times> 'b"
assume "x##x"
thus "x=0"
by (metis sep_add_disj sep_disj_prod_def surjective_pairing zero_prod_def)
next
fix a :: "'a \<times> 'b" and b :: "'a \<times> 'b" and c :: "'a \<times> 'b" and d :: "'a \<times> 'b" and w :: "'a \<times> 'b"
assume wab:"a + b = w" and wcd:"c + d = w" and abdis:"a ## b" and cddis:"c ## d"
then obtain a1 a2 b1 b2 c1 c2 d1 d2 w1 w2 where
a:"a= (a1,a2)" and
b:"b= (b1,b2)" and
c:"c= (c1,c2)" and
d:"d= (d1,d2)" and
e:"w= (w1,w2)" by fastforce
have "\<exists>e1 f1 g1 h1. a1=e1+f1 \<and> b1 = g1 + h1 \<and> c1=e1+g1 \<and> d1 = f1+h1 \<and>
e1##f1 \<and> g1##h1 \<and> e1##g1 \<and> f1##h1"
using wab wcd abdis cddis a b c d e
unfolding plus_prod_def sep_disj_prod_def
using sep_add_cross_split
by fastforce
also have "\<exists>e2 f2 g2 h2. a2=e2+f2 \<and> b2 = g2 + h2 \<and> c2=e2+g2 \<and> d2 = f2+h2 \<and>
e2##f2 \<and> g2##h2 \<and> e2##g2 \<and> f2##h2"
using wab wcd abdis cddis a b c d e
unfolding plus_prod_def sep_disj_prod_def
using sep_add_cross_split
by fastforce
ultimately show "\<exists> e f g h. e + f = a \<and> g + h = b \<and> e + g = c \<and> f + h = d \<and>
e ## f \<and> g ## h \<and> e ## g \<and> f ## h"
using a b c d e
unfolding plus_prod_def sep_disj_prod_def
by fastforce
next
fix x :: "'a \<times> 'b" and y :: "'a \<times> 'b"
assume "x+y=0" and
"x##y"
thus "x=0"
proof -
have f1: "(fst x + fst y, snd x + snd y) = 0"
by (metis (full_types) \<open>x + y = 0\<close> plus_prod_def)
then have f2: "fst x = 0"
by (metis (no_types) \<open>x ## y\<close> fst_conv sep_add_ind_unit sep_disj_prod_def zero_prod_def)
have "snd x + snd y = 0"
using f1 by (metis snd_conv zero_prod_def)
then show ?thesis
using f2 by (metis (no_types) \<open>x ## y\<close> fst_conv plus_prod_def sep_add_ind_unit sep_add_zero sep_disj_prod_def snd_conv zero_prod_def)
qed
next
fix x :: "'a \<times> 'b" and y :: "'a \<times> 'b" and z :: "'a \<times> 'b"
assume "x ## y" and "y ## z" and "x ## z"
then have "x ## (fst y + fst z, snd y + snd z)"
by (metis \<open>x ## y\<close> \<open>x ## z\<close> \<open>y ## z\<close> disj_dstri fst_conv sep_disj_prod_def snd_conv)
thus "x ## y + z" by (metis plus_prod_def)
next
fix x :: "'a \<times> 'b" and y :: "'a \<times> 'b" and z :: "'a \<times> 'b"
assume "y ## z"
then show "x ## y + z = (x ## y \<and> x ## z)"
unfolding sep_disj_prod_def plus_prod_def
by auto
next
fix x :: "'a \<times> 'b" and y :: "'a \<times> 'b"
assume "x ## x" and "x + x = y"
thus "x=y"
by (metis disjoint_zero_sym plus_prod_def sep_add_disj sep_add_zero_sym sep_disj_prod_def)
qed
end
lemma fst_fst_dist:"fst (fst x + fst y) = fst (fst x) + fst (fst y)"
by (simp add: plus_prod_def)
lemma fst_snd_dist:"fst (snd x + snd y) = fst (snd x) + fst (snd y)"
by (simp add: plus_prod_def)
lemma snd_fst_dist:"snd (fst x + fst y) = snd (fst x) + snd (fst y)"
by (simp add: plus_prod_def)
lemma snd_snd_dist:"snd (snd x + snd y) = snd (snd x) + snd (snd y)"
by (simp add: plus_prod_def)
lemma dis_sep:"(\<sigma>1, \<sigma>2) = (x1',x2') + (x1'',x2'') \<and>
(x1',x2') ## (x1'',x2'') \<Longrightarrow>
\<sigma>1 =(x1'+ x1'') \<and> x1' ## x1'' \<and> x2' ## x2''
\<and> \<sigma>2 =(x2'+ x2'')"
by (simp add: plus_prod_def sep_disj_prod_def)
lemma substate_prod: "\<sigma>1 \<preceq> \<sigma>1' \<and> \<sigma>2 \<preceq> \<sigma>2' \<Longrightarrow> (\<sigma>1,\<sigma>2) \<preceq> (\<sigma>1',\<sigma>2')"
proof -
assume a1:"\<sigma>1 \<preceq> \<sigma>1' \<and> \<sigma>2 \<preceq> \<sigma>2'"
then obtain x where sub_x:"\<sigma>1 ## x \<and> \<sigma>1 + x = \<sigma>1'" using sep_substate_def by blast
with a1 obtain y where sub_y:"\<sigma>2 ## y \<and> \<sigma>2 + y = \<sigma>2'" using sep_substate_def by blast
have dis_12:"(\<sigma>1,\<sigma>2)##(x,y)" using sub_x sub_y by (simp add: sep_disj_prod_def)
have union_12:"(\<sigma>1',\<sigma>2') = (\<sigma>1,\<sigma>2)+(x,y)" using sub_x sub_y by (simp add: plus_prod_def)
show "(\<sigma>1,\<sigma>2) \<preceq> (\<sigma>1',\<sigma>2')" using sep_substate_def dis_12 union_12 by auto
qed
lemma disj_sep_substate:
"(\<sigma>1,\<sigma>'\<triangleright>\<sigma>1') \<and> (\<sigma>2,\<sigma>''\<triangleright>\<sigma>2') \<Longrightarrow>
(\<sigma>1,\<sigma>2) \<preceq> (\<sigma>1',\<sigma>2')"
proof-
assume a1:"(\<sigma>1,\<sigma>'\<triangleright>\<sigma>1') \<and> (\<sigma>2,\<sigma>''\<triangleright>\<sigma>2')"
thus "(\<sigma>1,\<sigma>2) \<preceq> (\<sigma>1',\<sigma>2')"
by (metis substate_prod tern_rel_def sep_substate_disj_add)
qed
lemma sep_tran_disjoint_split:
"(x , y \<triangleright> (\<sigma>1::('a::heap_sep_algebra, 'a::heap_sep_algebra)prod,\<sigma>2)) \<Longrightarrow>
(\<sigma>1 , \<sigma>'\<triangleright> \<sigma>1') \<and> (\<sigma>2 , \<sigma>''\<triangleright> \<sigma>2')\<Longrightarrow>
(\<sigma>1',\<sigma>2') = (((fst (fst x) + fst (fst y) + fst \<sigma>'),snd (fst x) + snd (fst y) + snd \<sigma>'),
fst (snd x) + fst (snd y) + fst \<sigma>'', snd (snd x) + snd (snd y) + snd \<sigma>'')"
proof-
assume a1:"(x, y \<triangleright> (\<sigma>1,\<sigma>2))"
then have descomp_sigma:"\<sigma>1 = fst x + fst y \<and> \<sigma>2 = snd x + snd y \<and> fst x ## fst y \<and> snd x ## snd y"
by (simp add: tern_rel_def plus_prod_def sep_disj_prod_def)
assume a2: "(\<sigma>1 , \<sigma>'\<triangleright> \<sigma>1') \<and> (\<sigma>2 , \<sigma>''\<triangleright> \<sigma>2')"
then show "(\<sigma>1',\<sigma>2') =(((fst (fst x) + fst (fst y) + fst \<sigma>'),snd (fst x) + snd (fst y) + snd \<sigma>'),
fst (snd x) + fst (snd y) + fst \<sigma>'', snd (snd x) + snd (snd y) + snd \<sigma>'')"
by (simp add: descomp_sigma plus_prod_def tern_rel_def)
qed
lemma sep_tran_disjoint_disj1:
"(x , y \<triangleright> (\<sigma>1::('a::heap_sep_algebra, 'a::heap_sep_algebra)prod,\<sigma>2)) \<Longrightarrow>
(\<sigma>1 , \<sigma>'\<triangleright> \<sigma>1') \<and> (\<sigma>2 , \<sigma>''\<triangleright> \<sigma>2')\<Longrightarrow>
(fst (fst x + fst y) ## fst \<sigma>')
\<and> (snd (fst x + fst y) ## snd \<sigma>')
\<and> ((fst (snd x + snd y)) ## fst \<sigma>'')
\<and> ((snd (snd x + snd y)) ## snd \<sigma>'')
"
proof -
assume a1:"(x, y \<triangleright> (\<sigma>1,\<sigma>2))"
then have descomp_sigma:
"\<sigma>1 = fst x + fst y \<and> \<sigma>2 = snd x + snd y \<and>
fst x ## fst y \<and> snd x ## snd y"
by (simp add: tern_rel_def plus_prod_def sep_disj_prod_def)
assume a2: "(\<sigma>1 , \<sigma>'\<triangleright> \<sigma>1') \<and> (\<sigma>2 , \<sigma>''\<triangleright> \<sigma>2')"
then show " (fst (fst x + fst y) ## fst \<sigma>')
\<and> (snd (fst x + fst y) ## snd \<sigma>')
\<and> ((fst (snd x + snd y)) ## fst \<sigma>'')
\<and> ((snd (snd x + snd y)) ## snd \<sigma>'')
"
by (simp add: descomp_sigma sep_disj_prod_def tern_rel_def)
qed
lemma sep_tran_disjoint_disj:
"(x , y \<triangleright> (\<sigma>1::('a::heap_sep_algebra, 'a::heap_sep_algebra)prod,\<sigma>2)) \<Longrightarrow>
(\<sigma>1 , \<sigma>'\<triangleright> \<sigma>1') \<and> (\<sigma>2 , \<sigma>''\<triangleright> \<sigma>2')\<Longrightarrow>
(fst (fst x) ## fst \<sigma>') \<and> (fst (fst y) ## fst \<sigma>')
\<and> (snd (fst x) ## snd \<sigma>') \<and> (snd (fst y) ## snd \<sigma>')
\<and> (fst (snd x) ## fst \<sigma>'') \<and> (fst (snd y) ## fst \<sigma>'')
\<and> (snd (snd x) ## snd \<sigma>'') \<and> (snd (snd y) ## snd \<sigma>'')
"
proof -
assume a1:"(x, y \<triangleright> (\<sigma>1,\<sigma>2))"
then have descomp_sigma:
"\<sigma>1 = fst x + fst y \<and> \<sigma>2 = snd x + snd y \<and>
fst x ## fst y \<and> snd x ## snd y"
by (simp add: tern_rel_def plus_prod_def sep_disj_prod_def)
then have sep_comp:"fst (fst x)## fst (fst y) \<and> snd (fst x) ## snd (fst y) \<and>
fst (snd x)## fst (snd y) \<and> snd (snd x) ## snd (snd y)"
by (simp add: tern_rel_def plus_prod_def sep_disj_prod_def)
assume a2: "(\<sigma>1 , \<sigma>'\<triangleright> \<sigma>1') \<and> (\<sigma>2 , \<sigma>''\<triangleright> \<sigma>2')"
then have " (fst (fst x + fst y) ## fst \<sigma>')
\<and> (snd (fst x + fst y) ## snd \<sigma>')
\<and> ((fst (snd x + snd y)) ## fst \<sigma>'')
\<and> ((snd (snd x + snd y)) ## snd \<sigma>'')
" using a1 a2 sep_tran_disjoint_disj1 by blast
then have disjall:" ((fst (fst x)) + (fst (fst y)) ## fst \<sigma>')
\<and> (snd (fst x) + snd( fst y) ## snd \<sigma>')
\<and> ((fst (snd x) + fst (snd y)) ## fst \<sigma>'')
\<and> ((snd (snd x) + snd (snd y)) ## snd \<sigma>'')
" by (simp add: plus_prod_def)
then show "(fst (fst x) ## fst \<sigma>') \<and> (fst (fst y) ## fst \<sigma>')
\<and> (snd (fst x) ## snd \<sigma>') \<and> (snd (fst y) ## snd \<sigma>')
\<and> (fst (snd x) ## fst \<sigma>'') \<and> (fst (snd y) ## fst \<sigma>'')
\<and> (snd (snd x) ## snd \<sigma>'') \<and> (snd (snd y) ## snd \<sigma>'')"
using sep_comp sep_add_disjD by metis
qed
lemma disj_union_dist1: "(\<sigma>1 , \<sigma>'\<triangleright> \<sigma>1') \<and> (\<sigma>2 , \<sigma>''\<triangleright> \<sigma>2') \<Longrightarrow>
((\<sigma>1,\<sigma>2),(\<sigma>',\<sigma>'')\<triangleright> (\<sigma>1',\<sigma>2'))"
unfolding tern_rel_def
by (simp add: plus_prod_def sep_disj_prod_def)
lemma disj_union_dist2: "((\<sigma>1,\<sigma>2),(\<sigma>',\<sigma>'')\<triangleright> (\<sigma>1',\<sigma>2')) \<Longrightarrow>
(\<sigma>1 , \<sigma>'\<triangleright> \<sigma>1') \<and> (\<sigma>2 , \<sigma>''\<triangleright> \<sigma>2')"
unfolding tern_rel_def
by (simp add: plus_prod_def sep_disj_prod_def)
lemma disj_union_dist: "((\<sigma>1 , \<sigma>'\<triangleright> \<sigma>1') \<and> (\<sigma>2 , \<sigma>''\<triangleright> \<sigma>2')) =
((\<sigma>1,\<sigma>2),(\<sigma>',\<sigma>'')\<triangleright> (\<sigma>1',\<sigma>2'))"
using disj_union_dist1 disj_union_dist2 by blast
lemma sep_tran_eq_y':
"(x , y \<triangleright> (\<sigma>1::('a::heap_sep_algebra, 'a::heap_sep_algebra)prod,\<sigma>2)) \<Longrightarrow>
(\<sigma>1 , \<sigma>'\<triangleright> \<sigma>1') \<and> (\<sigma>2 , \<sigma>''\<triangleright> \<sigma>2')\<Longrightarrow>
\<exists>x' y'. (x' , y' \<triangleright> (\<sigma>1',\<sigma>2')) \<and> (fst y'=snd y')"
proof-
assume a1:"(x, y \<triangleright> (\<sigma>1,\<sigma>2))"
then have descomp_sigma:"\<sigma>1 = fst x + fst y \<and> \<sigma>2 = snd x + snd y \<and> fst x ## fst y \<and> snd x ## snd y"
by (simp add: tern_rel_def plus_prod_def sep_disj_prod_def)
assume a2: "(\<sigma>1 , \<sigma>'\<triangleright> \<sigma>1') \<and> (\<sigma>2 , \<sigma>''\<triangleright> \<sigma>2')"
then have "(( fst x + fst y), \<sigma>'\<triangleright> \<sigma>1') \<and> ((snd x + snd y), \<sigma>''\<triangleright> \<sigma>2')"
using descomp_sigma by auto
have descomp_sigma1':"fst \<sigma>1' = fst \<sigma>1 + fst \<sigma>' \<and>
snd \<sigma>1' = snd \<sigma>1 + snd \<sigma>' \<and>
fst \<sigma>1 ## fst \<sigma>' \<and> snd \<sigma>1 ## snd \<sigma>'" using a2
by (auto simp add: tern_rel_def plus_prod_def sep_disj_prod_def)
have descomp_sigma1':"fst \<sigma>2' = fst \<sigma>2 + fst \<sigma>'' \<and>
snd \<sigma>2' = snd \<sigma>2 + snd \<sigma>'' \<and>
fst \<sigma>2 ## fst \<sigma>'' \<and> snd \<sigma>2 ## snd \<sigma>''"
using a2
by (auto simp add: tern_rel_def plus_prod_def sep_disj_prod_def)
then show "\<exists>x' y'. (x' , y'\<triangleright>(\<sigma>1',\<sigma>2')) \<and> (fst y'=snd y')"
by (metis (no_types) eq_fst_iff eq_snd_iff sep_add_zero tern_rel_def sep_disj_zero zero_prod_def)
qed
lemma sep_dis_con_eq:
"x ## y \<and> (h::('a::sep_algebra, 'a::sep_algebra)prod) = x + y \<Longrightarrow>
x' ## y' \<and> h = x' + y' \<Longrightarrow>
x+y=x'+y'"
by simp
(*
instantiation prod::(heap_sep_algebra_labelled_sequents, heap_sep_algebra_labelled_sequents) heap_sep_algebra_labelled_sequents
begin
instance proof qed
end
*)
end
|
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: BSD-2-Clause
*)
chapter "List Manipulation Functions"
theory List_Lib
imports Main
begin
definition list_replace :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a list" where
"list_replace list a b \<equiv> map (\<lambda>x. if x = a then b else x) list"
primrec list_replace_list :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a list \<Rightarrow> 'a list" where
"list_replace_list [] a list' = []" |
"list_replace_list (x # xs) a list' = (if x = a then list' @ xs
else x # list_replace_list xs a list')"
definition list_swap :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a list" where
"list_swap list a b \<equiv> map (\<lambda>x. if x = a then b else if x = b then a else x) list"
primrec list_insert_after :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a list" where
"list_insert_after [] a b = []" |
"list_insert_after (x # xs) a b = (if x = a then x # b # xs
else x # list_insert_after xs a b)"
primrec list_remove :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a list" where
"list_remove [] a = []" |
"list_remove (x # xs) a = (if x = a then (list_remove xs a)
else x # (list_remove xs a))"
fun after_in_list :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a option" where
"after_in_list [] a = None" |
"after_in_list [x] a = None" |
"after_in_list (x # y # xs) a = (if a = x then Some y else after_in_list (y # xs) a)"
lemma zip_take1:
"zip (take n xs) ys = take n (zip xs ys)"
apply (induct xs arbitrary: n ys)
apply simp_all
apply (case_tac n, simp_all)
apply (case_tac ys, simp_all)
done
lemma zip_take2:
"zip xs (take n ys) = take n (zip xs ys)"
apply (induct xs arbitrary: n ys)
apply simp_all
apply (case_tac n, simp_all)
apply (case_tac ys, simp_all)
done
lemmas zip_take = zip_take1 zip_take2
lemma replicate_append: "replicate n x @ (x # xs) = replicate (n + 1) x @ xs"
by (induct n, simp+)
end
|
% function calvinNNDetection()
%
% Copyright by Holger Caesar, 2016
% Global variables
global glDatasetFolder glFeaturesFolder;
assert(~isempty(glDatasetFolder) && ~isempty(glFeaturesFolder));
%%% Settings
% Dataset
vocYear = 2010;
trainName = 'train';
testName = 'val';
% Specify paths
vocName = sprintf('VOC%d', vocYear);
datasetDir = [fullfile(glDatasetFolder, vocName), '/'];
outputFolder = fullfile(glFeaturesFolder, 'CNN-Models', 'FRCN', vocName, sprintf('%s-testRelease', vocName));
netPath = fullfile(glFeaturesFolder, 'CNN-Models', 'matconvnet', 'imagenet-vgg-verydeep-16.mat');
logFilePath = fullfile(outputFolder, 'log.txt');
% Fix randomness
randSeed = 42;
rng(randSeed);
% Setup dataset specific options and check validity
setupDataOpts(vocYear, testName, datasetDir);
global DATAopts;
assert(~isempty(DATAopts), 'Error: Dataset not initialized properly!');
% Task-specific
nnOpts.testFn = @testDetection;
nnOpts.misc.overlapNms = 0.3;
nnOpts.derOutputs = {'objective', 1, 'regressObjective', 1};
% General
nnOpts.batchSize = 2;
nnOpts.numSubBatches = nnOpts.batchSize; % 1 image per sub-batch
nnOpts.weightDecay = 5e-4;
nnOpts.momentum = 0.9;
nnOpts.numEpochs = 16;
nnOpts.learningRate = [repmat(1e-3, 12, 1); repmat(1e-4, 4, 1)];
nnOpts.misc.netPath = netPath;
nnOpts.expDir = outputFolder;
nnOpts.gpus = 1; % for automatic selection use: SelectIdleGpu();
% Create outputFolder
if ~exist(outputFolder, 'dir')
mkdir(outputFolder);
end
% Start logging
diary(logFilePath);
%%% Setup
% Start from pretrained network
net = load(nnOpts.misc.netPath);
% Setup imdb
imdb = setupImdbDetection(trainName, testName, net);
% Create calvinNN CNN class
% By default, network is transformed into fast-rcnn with bbox regression
calvinn = CalvinNN(net, imdb, nnOpts);
%%% Train
calvinn.train();
%%% Test
stats = calvinn.test();
%%% Eval
evalDetection(testName, imdb, stats, nnOpts);
|
On 7 β 10 March 1915 , the Grand Fleet conducted a sweep in the northern North Sea , during which it undertook training manoeuvres . Another such cruise took place during 16 β 19 March . On 11 April , the Grand Fleet conducted a patrol in the central North Sea and returned to port on 14 April ; another patrol in the area took place during 17 β 19 April , followed by gunnery drills off the Shetlands on 20 β 21 April . The Grand Fleet conducted a sweep into the central North Sea during 17 β 19 May without encountering German vessels . Another patrol followed during 29 β 31 May ; it too was uneventful . The fleet conducted gunnery training in mid @-@ June . During 2 β 5 September , the fleet went on another cruise in the northern end of the North Sea and conducted gunnery drills . Throughout the rest of the month , the Grand Fleet conducted numerous training exercises .
|
# Spectral Bandpass Dependence Correction
Reflectance spectrophotometers measuring energy at intervals larger than a single wavelength (5nm or 10nm steps for example) integrates the energy between $\lambda_{i-1}$ and $\lambda_{i+1}$ for a given wavelength $\lambda_{i}$.
The sampled spectral reflectance data $P^\prime$ needs to be corrected to retrieve the true spectral reflectance data $P$ if the spectrophotometer operating software has not applied spectral bandpass dependence correction. <a name="back_reference_1"></a><a href="#reference_1">[1]</a><a name="back_reference_2"></a><a href="#reference_2">[2]</a>
## Stearns & Stearns Method
Stearns and Stearns (1988) proposed the following equation for spectral bandpass dependence correction:
$$
\begin{equation}
P_i = -\alpha P^\prime_{i -1} + (1 + 2\alpha)P^\prime_{i} -\alpha P^\prime_{i+1}
\end{equation}
$$
where $\alpha = 0.083$ and if the wavelengths being corrected are the first or last one the equation for the first wavelength should be:
$$
\begin{equation}
P_i = (1 + \alpha)P^\prime_{i} - \alpha P^\prime_{i+1}
\end{equation}
$$
and for the last wavelength:
$$
\begin{equation}
P_i = (1 + \alpha)P^\prime_{i} - \alpha P^\prime_{i-1}
\end{equation}
$$
Implementation in [Colour](https://github.com/colour-science/colour/) is available through the *colour.bandpass_correction_stearns* definition or the generic *colour.bandpass_correction* with *method='Stearns'* argument:
```python
%matplotlib inline
```
```python
import colour
from colour.plotting import *
colour.utilities.filter_warnings(True, False)
colour_plotting_defaults()
# Spectral bandpass dependence correction.
street_light_spd_data = {
380: 8.9770000e-003,
382: 5.8380000e-003,
384: 8.3290000e-003,
386: 8.6940000e-003,
388: 1.0450000e-002,
390: 1.0940000e-002,
392: 8.4260000e-003,
394: 1.1720000e-002,
396: 1.2260000e-002,
398: 7.4550000e-003,
400: 9.8730000e-003,
402: 1.2970000e-002,
404: 1.4000000e-002,
406: 1.1000000e-002,
408: 1.1330000e-002,
410: 1.2100000e-002,
412: 1.4070000e-002,
414: 1.5150000e-002,
416: 1.4800000e-002,
418: 1.6800000e-002,
420: 1.6850000e-002,
422: 1.7070000e-002,
424: 1.7220000e-002,
426: 1.8250000e-002,
428: 1.9930000e-002,
430: 2.2640000e-002,
432: 2.4630000e-002,
434: 2.5250000e-002,
436: 2.6690000e-002,
438: 2.8320000e-002,
440: 2.5500000e-002,
442: 1.8450000e-002,
444: 1.6470000e-002,
446: 2.2470000e-002,
448: 3.6250000e-002,
450: 4.3970000e-002,
452: 2.7090000e-002,
454: 2.2400000e-002,
456: 1.4380000e-002,
458: 1.3210000e-002,
460: 1.8250000e-002,
462: 2.6440000e-002,
464: 4.5690000e-002,
466: 9.2240000e-002,
468: 6.0570000e-002,
470: 2.6740000e-002,
472: 2.2430000e-002,
474: 3.4190000e-002,
476: 2.8160000e-002,
478: 1.9570000e-002,
480: 1.8430000e-002,
482: 1.9800000e-002,
484: 2.1840000e-002,
486: 2.2840000e-002,
488: 2.5760000e-002,
490: 2.9800000e-002,
492: 3.6620000e-002,
494: 6.2500000e-002,
496: 1.7130000e-001,
498: 2.3920000e-001,
500: 1.0620000e-001,
502: 4.1250000e-002,
504: 3.3340000e-002,
506: 3.0820000e-002,
508: 3.0750000e-002,
510: 3.2500000e-002,
512: 4.5570000e-002,
514: 7.5490000e-002,
516: 6.6560000e-002,
518: 3.9350000e-002,
520: 3.3880000e-002,
522: 3.4610000e-002,
524: 3.6270000e-002,
526: 3.6580000e-002,
528: 3.7990000e-002,
530: 4.0010000e-002,
532: 4.0540000e-002,
534: 4.2380000e-002,
536: 4.4190000e-002,
538: 4.6760000e-002,
540: 5.1490000e-002,
542: 5.7320000e-002,
544: 7.0770000e-002,
546: 1.0230000e-001,
548: 1.6330000e-001,
550: 2.3550000e-001,
552: 2.7540000e-001,
554: 2.9590000e-001,
556: 3.2950000e-001,
558: 3.7630000e-001,
560: 4.1420000e-001,
562: 4.4850000e-001,
564: 5.3330000e-001,
566: 7.3490000e-001,
568: 8.6530000e-001,
570: 7.8120000e-001,
572: 6.8580000e-001,
574: 6.6740000e-001,
576: 6.9300000e-001,
578: 6.9540000e-001,
580: 6.3260000e-001,
582: 4.6240000e-001,
584: 2.3550000e-001,
586: 8.4450000e-002,
588: 3.5550000e-002,
590: 4.0580000e-002,
592: 1.3370000e-001,
594: 3.4150000e-001,
596: 5.8250000e-001,
598: 7.2080000e-001,
600: 7.6530000e-001,
602: 7.5290000e-001,
604: 7.1080000e-001,
606: 6.5840000e-001,
608: 6.0140000e-001,
610: 5.5270000e-001,
612: 5.4450000e-001,
614: 5.9260000e-001,
616: 5.4520000e-001,
618: 4.4690000e-001,
620: 3.9040000e-001,
622: 3.5880000e-001,
624: 3.3400000e-001,
626: 3.1480000e-001,
628: 2.9800000e-001,
630: 2.8090000e-001,
632: 2.6370000e-001,
634: 2.5010000e-001,
636: 2.3610000e-001,
638: 2.2550000e-001,
640: 2.1680000e-001,
642: 2.0720000e-001,
644: 1.9920000e-001,
646: 1.9070000e-001,
648: 1.8520000e-001,
650: 1.7970000e-001,
652: 1.7410000e-001,
654: 1.7070000e-001,
656: 1.6500000e-001,
658: 1.6080000e-001,
660: 1.5660000e-001,
662: 1.5330000e-001,
664: 1.4860000e-001,
666: 1.4540000e-001,
668: 1.4260000e-001,
670: 1.3840000e-001,
672: 1.3500000e-001,
674: 1.3180000e-001,
676: 1.2730000e-001,
678: 1.2390000e-001,
680: 1.2210000e-001,
682: 1.1840000e-001,
684: 1.1530000e-001,
686: 1.1210000e-001,
688: 1.1060000e-001,
690: 1.0950000e-001,
692: 1.0840000e-001,
694: 1.0740000e-001,
696: 1.0630000e-001,
698: 1.0550000e-001,
700: 1.0380000e-001,
702: 1.0250000e-001,
704: 1.0380000e-001,
706: 1.0250000e-001,
708: 1.0130000e-001,
710: 1.0020000e-001,
712: 9.8310000e-002,
714: 9.8630000e-002,
716: 9.8140000e-002,
718: 9.6680000e-002,
720: 9.4430000e-002,
722: 9.4050000e-002,
724: 9.2510000e-002,
726: 9.1880000e-002,
728: 9.1120000e-002,
730: 8.9860000e-002,
732: 8.9460000e-002,
734: 8.8610000e-002,
736: 8.9640000e-002,
738: 8.9910000e-002,
740: 8.7700000e-002,
742: 8.7540000e-002,
744: 8.5880000e-002,
746: 8.1340000e-002,
748: 8.8200000e-002,
750: 8.9410000e-002,
752: 8.9360000e-002,
754: 8.4970000e-002,
756: 8.9030000e-002,
758: 8.7810000e-002,
760: 8.5330000e-002,
762: 8.5880000e-002,
764: 1.1310000e-001,
766: 1.6180000e-001,
768: 1.6770000e-001,
770: 1.5340000e-001,
772: 1.1740000e-001,
774: 9.2280000e-002,
776: 9.0480000e-002,
778: 9.0020000e-002,
780: 8.8190000e-002}
street_light_spd = colour.SpectralPowerDistribution(
street_light_spd_data, name='Street Light')
bandpass_corrected_street_light_spd = street_light_spd.copy()
bandpass_corrected_street_light_spd.name = 'Street Light (Bandpass Corrected)'
bandpass_corrected_street_light_spd = colour.colorimetry.bandpass_correction(
bandpass_corrected_street_light_spd, method='Stearns 1988')
multi_spd_plot([street_light_spd, bandpass_corrected_street_light_spd],
title='Stearns Bandpass Correction')
```
We can then calculate the [$\Delta E^\star_{ab}$](http://en.wikipedia.org/wiki/Color_difference#Delta_E) between the two spectral power distributions:
```python
cmfs = colour.colorimetry.STANDARD_OBSERVERS_CMFS['CIE 1931 2 Degree Standard Observer']
street_light_XYZ = colour.spectral_to_XYZ(
street_light_spd.interpolate(
colour.SpectralShape(interval=1)), cmfs)
bandpass_corrected_street_light_XYZ = colour.spectral_to_XYZ(
bandpass_corrected_street_light_spd.interpolate(
colour.SpectralShape(interval=1)), cmfs)
# Converting the *CIE XYZ* colourspace values to *CIE Lab* colourspace
# and calculating *Delta E*.
colour.difference.delta_E_CIE2000(*colour.XYZ_to_Lab(
[street_light_XYZ / 100, bandpass_corrected_street_light_XYZ / 100]))
```
0.0053840303670959853
## Bibliography
1. <a href="#back_reference_1">^<a> <a name="reference_1"></a>Westland, S., Ripamonti, C., & Cheung, V. (2012). Correction for Spectral Bandpass. In *Computational Colour Science Using MATLAB* (2nd ed., p. 38). ISBN:978-0-470-66569-5
2. <a href="#back_reference_2">^<a> <a name="reference_2"></a>Stearns, E. I., & Stearns, R. E. (1988). An example of a method for correcting radiance data for Bandpass error. Color Research & Application, 13(4), 257β259. doi:10.1002/col.5080130410
|
#missing data model.
#fits a global level predictor for each independent variable.
#global level predictor is site level prior.
#site level estimate is plot level prior.
#plot level estimate is core level prior.
#if core level estimate is absent it is drawn from site level estimate.
#load some core level data missing at core and plot scale.
rm(list=ls())
#load data
d <- readRDS('/fs/data3/caverill/NEFI_microbial/NEON_data_aggregation/core.table.rds')
d <- d[d$siteID %in% c('ORNL','DSNY'),] #subset to two sites for development.
y <- as.matrix(d[,c('soilTemp','soilInWaterpH')]) #matrix of x values.
#drop factor levels. Important when subsetting to a few sites.
d$siteID <-droplevels(d$siteID)
d$plotID <-droplevels(d$plotID)
#setup categorical predictor matrices.
plot.preds <- model.matrix( ~ plotID - 1, data = d)
site.preds <- model.matrix( ~ siteID - 1, data = d)
#indexing.
plot_plot <- as.factor(unique(d$plotID))
plot_site <- as.factor(substr(plot_plot,1,4))
jags.model = "
model {
#priors
for(j in 1:N.x){
glob[j] ~ dnorm(0,1.0E-4) #global level parameter prior.
for(k in 1:n.site.preds){site[k,j] <- glob[j]} #prior on site level parameters is glob level parameter.
for(k in 1:n.plot.preds){plot[k,j] <- site[plot_site[k],j]} #prior on plot level parameters is site level parameter.
sigma[j] ~ dunif(0, 100)
glob.tau[j] <- pow(sigma[j], -2)
site.tau[j] <- pow(sigma[j], -2)
plot.tau[j] <- pow(sigma[j], -2)
core.tau[j] <- pow(sigma[j], -2)
}
#combine priors and parameters
for(j in 1:N.x){
for(i in 1:N){
glob.x[i,j] ~ dnorm(glob[j],glob.tau[j])
site.hat[i,j] <- inprod(site[,j], site.preds[i,])
plot.hat[i,j] <- inprod(plot[,j], plot.preds[i,])
site.x[i,j] ~ dnorm(site.hat[i,j], site.tau[j])
plot.x[i,j] ~ dnorm(plot.hat[i,j], plot.tau[j])
core.x[i,j] ~ dnorm( plot.x[i,j], core.tau[j]) #predict missing core values based on plot estimate.
}
}
} #end model
"
jags.data <- list(core.x = y,
plot.preds=plot.preds, site.preds=site.preds,
plot_site = plot_site,
n.plot.preds = ncol(plot.preds), n.site.preds = ncol(site.preds),
N.x = ncol(y), N = nrow(d))
jags.out <- runjags::run.jags(jags.model,
data = jags.data,
n.chains = 3,
monitor = c('plot','site','core.x'))
#check estiamted values of temp and pH.
out <- summary(jags.out)
core.vals <- out[grep('core.x',rownames(out)),]
temp.new <- core.vals[grep(',1]',rownames(core.vals)),]
pH.new <- core.vals[grep(',2]',rownames(core.vals)),]
temp <- data.frame(d$soilTemp,d$plotID,temp.new[,1:4])
pH <- data.frame(d$soilInWaterpH,d$plotID, pH.new[,1:4])
|
!@interface
subroutine tabSlist(nVal,sVal,mxsTab,nTab,sTab)
!@end/interface
!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS !
!-----------------------------------------------------------------------
!
! !ROUTINE: tabSlist - merge into a table of sorted unique strings
!
! !INTERFACE:
! <@interface
!
! !DESCRIPTION:
!
! !EXAMPLES:
!
! !BUGS:
!
! !SEE ALSO:
!
! !SYSTEM ROUTINES:
!
! !FILES USED:
!
! !REVISION HISTORY:
! 03Jan96 - J. Guo - programmed and added the prolog
!_______________________________________________________________________
!@interface
use m_die, only : die
implicit none
integer, intent(in) :: nVal ! size of sVal
character*(*), intent(in) :: sVal(nVal) ! a list of integers
integer, intent(in) :: mxsTab ! possible size of sTab
integer, intent(inout) :: nTab ! true size of sTab
character*(*), intent(inout) :: sTab(mxsTab) ! a sorted table
!@end/interface
!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
! Locals/workspace
integer :: it,iv,in,istat
integer, allocatable :: indx(:) ! (nVal)
character(len=len(sTab)),allocatable :: newTab(:) ! (mxsTab)
integer, parameter :: icap=ichar('A')-ichar('a')
character(len=*), parameter :: myname = 'tabSlist'
!-----------------------------------------------------------------------
if(nVal.eq.0) return ! default result is original table
allocate(indx(nVal), newTab(mxsTab), stat=istat)
if(istat.ne.0) call die(myname,'allocate()',istat)
! sort sVal array
call indexxs(nVal,sVal,indx) ! indexed heap-sort sVal
! merge the sorted sVal and the sorted sTab (assumed) into a
! temporary array newTab
in=0
it=1
iv=1
do while(it.le.nTab .or. iv.le.nVal)
if(it.le.nTab .and. iv.le.nVal) then ! make a comparison
if(sTab(it).lt.sVal(indx(iv))) then
call getTab_ ! store the current sTab entry into newTab
call nextTab_ ! go to the next different sTab entry
elseif(sTab(it).gt.sVal(indx(iv))) then
call getVal_ ! store the current sVal entry into newTab
call nextVal_ ! go to the next different sVal entry
elseif(sTab(it).eq.sVal(indx(iv))) then
call getTab_ ! store the current sTab entry into newTab
call nextTab_ ! go to the next different sTab entry
call nextVal_ ! go to the next different sVal entry
endif
elseif(it.le.nTab) then ! iv .gt. nVal
call getTab_ ! store the current sTab entry into newTab
call nextTab_ ! go to the next different sTab entry
elseif(iv.le.nVal) then ! it .gt. nTab
call getVal_ ! store the current sVal entry into newTab
call nextVal_ ! go to the next different sVal entry
endif
end do
nTab=min(in,mxsTab) ! a new size
sTab(1:nTab)=newTab(1:nTab) ! a new table returned.
deallocate(indx,newTab)
contains
subroutine getTab_
in=in+1
if(in.le.mxsTab) newTab(in)=sTab(it)
end subroutine getTab_
subroutine getVal_
in=in+1
if(in.le.mxsTab) newTab(in)=sVal(indx(iv))
end subroutine getVal_
subroutine nextTab_
character(len=len(sTab)) :: curTab
logical next
curTab=sTab(it) ! with the current sTab(it) value
it=it+1
next=it.le.nTab
if(next) next=curTab.eq.sTab(it)
do while(next)
it=it+1
next=it.le.nTab
if(next) next=curTab.eq.sTab(it)
end do
end subroutine nextTab_
subroutine nextVal_
character(len=len(sVal)) :: curVal
logical next
curVal=sVal(indx(iv)) ! with the current sVal(indx(iv)) value
it=it+1
next=iv.le.nVal
if(next) next=curVal.eq.sVal(indx(iv))
do while(next)
iv=iv+1
next=iv.le.nVal
if(next) next=curVal.eq.sVal(indx(iv))
end do
end subroutine nextVal_
!_______________________________________________________________________
end subroutine tabSlist
!.
|
\declareIM{ppg}{pre-pre-G}{2021-04-30}{AOS using ComCam; Zernikes}{AOS Zernikes}
\completeIM{\thisIM}{2021-05-27 \JIRA{DM}{28667}{}}
Executive Summary:
β
Demonstrate the processing of wave front images for ComCam from raw to Zernikes.
\textbf{Does not map to P6}
β
\subsection{Goals of IM}
β
Demonstrate ability to:
β
\begin{itemize}
\item Process Defocussed Images and Generate Zernike coefficients
\end{itemize}
β
\subsection{Prerequisites}
\begin{itemize}
\item PhoSim images with donuts from ComCam ingested into a butler repo.
\item Working version of wavefront estimation pipeline (wep).
\end{itemize}
\subsection{Procedure}
\begin{itemize}
\item Generate and ingest 1 simulated image with each desired piston: ($[0, -1.5, 0, 1.5, 0]$ mm)
and appropriate
metadata in each image:
\begin{itemize}
\item Boresight and rotator angle
\item Piston from camera hexapod
\end{itemize}
\item Run a Gen3 pipeline from the command line that:
\begin{itemize}
\item gets data from the butler
\item runs the ISR
\item finds and process isolated donuts
\item estimates the wavefront in terms of annular Zernikes from each pair of donuts
\item average the resulting Zernikes over the stars in the field.
\end{itemize}
\item uses the butler to put the description of the wavefront to disk
\end{itemize}
\subsection{Acceptance Criteria}
\begin{itemize}
\item A member of SITCom must be able to carry out these operations at NCSA.
This need not be done using RSP; a login shell on \eg \texttt{lsst-devl3} is acceptable, and
the SITCom member may be required to install and build packages from \texttt{git}.
\item Confirm that the results are as expected. This should be carried out using a notebook on the RSP,
and the tester may be required to install the notebook from \texttt{git}.
\end{itemize}
\subsection{Status}
\begin{description}
\item[2021-07-30]
\begin{itemize}
\item Images taken using nublado
\item Automatic ingestion into gen3 butler in Chile
\item Transfer to NCSA and ingestion in gen3 butler visible from \gls{RSP}
\item Calibs (bias, dark) generated in Chile
\end{itemize}
\end{description}
|
import torch.nn as nn
import torch
from engineer.models.registry import DEPTH
import numpy as np
from ..common import ConvBlock
import torch.nn.functional as F
@DEPTH.register_module
class DepthNormalizer(nn.Module):
def __init__(self, input_size:int = 512,z_size:int = 200):
'''
Class about DepthNormalizer
which use to generate depth-information
Parameters:
input_size: the size of image, initially, 512 x 512
z_size: z normalization factor
'''
super(DepthNormalizer, self).__init__()
self.input_size = input_size
self.z_size = z_size
self.name = "DepthNormalizer"
self.input_para=dict(
input_size=input_size,
z_size=z_size
)
def forward(self, z, calibs=None, index_feat=None)->torch.Tensor:
'''
Normalize z_feature
Parameters:
z_feat: [B, 1, N] depth value for z in the image coordinate system
calibs: cameara matrix
:return:
normalized features
z_feat [B,1,N]
'''
z_feat = z * (self.input_size // 2) / self.z_size
return z_feat
@property
def name(self):
__repr = "{}(Parameters: ".format(self.__name)
for key in self.input_para.keys():
__repr+="{}:{}, ".format(key,self.input_para[key])
__repr=__repr[:-2]
return __repr+')'
@name.setter
def name(self,v):
self.__name = v
|
Blog post exploring whether or not LOO-CV can be used to compare models that try to explain some data $y$ with models trying to explain the same data after a transformation $z=f(y)$. Inspired by [@tiagocc question](https://discourse.mc-stan.org/t/very-simple-loo-question/9258) on Stan Forums. This post has two sections, the first one is the mathematical derivation of the equations used and their application on a validation example, and the second section is a real example. In addition to the LOO-CV usage examples and explanations, another goal of this notebook is to show and highlight the capabilities of [ArviZ](https://arviz-devs.github.io/arviz/).
This post has been automatically generated from a Jupyter notebook that can be downloaded [here]({{ site.url }}/notebooks/loo/LOO-CV_transformed_data.ipynb)
```python
import pystan
import pandas as pd
import numpy as np
import arviz as az
import matplotlib.pyplot as plt
```
```python
plt.style.use('../forty_blog.mplstyle')
```
## Mathematical derivation and validation example
In the first example, we will compare two equivalent models:
1. $y \sim \text{LogNormal}(\mu, \sigma)$
2. $\log y \sim \text{Normal}(\mu, \sigma)$
### Model definition and execution
Define the data and execute the two models
```python
mu = 2
sigma = 1
logy = np.random.normal(loc=mu, scale=sigma, size=30)
y = np.exp(logy) # y will then be distributed as lognormal
data = {
'N': len(y),
'y': y,
'logy': logy
}
```
```python
with open("lognormal.stan", "r") as f:
lognormal_code = f.read()
```
<details>
<summary markdown='span'>Stan code for LogNormal model
</summary>
```python
print(lognormal_code)
```
data {
int<lower=0> N;
vector[N] y;
}
parameters {
real mu;
real<lower=0> sigma;
}
model {
y ~ lognormal(mu, sigma);
}
generated quantities {
vector[N] log_lik;
vector[N] y_hat;
for (i in 1:N) {
log_lik[i] = lognormal_lpdf(y[i] | mu, sigma);
y_hat[i] = lognormal_rng(mu, sigma);
}
}
</details><br/>
```python
sm_lognormal = pystan.StanModel(model_code=lognormal_code)
fit_lognormal = sm_lognormal.sampling(data=data, iter=1000, chains=4)
```
INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_fa0385baccb7b330f85e0cacaa99fa9d NOW.
```python
idata_lognormal = az.from_pystan(
posterior=fit_lognormal,
posterior_predictive='y_hat',
observed_data=['y'],
log_likelihood='log_lik',
)
```
```python
with open("normal_on_log.stan", "r") as f:
normal_on_log_code = f.read()
```
<details>
<summary markdown='span'>Stan code for Normal on Log data model
</summary>
```python
print(normal_on_log_code)
```
data {
int<lower=0> N;
vector[N] logy;
}
parameters {
real mu;
real<lower=0> sigma;
}
model {
logy ~ normal(mu, sigma);
}
generated quantities {
vector[N] log_lik;
vector[N] logy_hat;
for (i in 1:N) {
log_lik[i] = normal_lpdf(logy[i] | mu, sigma);
logy_hat[i] = normal_rng(mu, sigma);
}
}
</details><br/>
```python
sm_normal = pystan.StanModel(model_code=normal_on_log_code)
fit_normal = sm_normal.sampling(data=data, iter=1000, chains=4)
```
INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_6b25918853568e528afbe629c1103e09 NOW.
```python
idata_normal = az.from_pystan(
posterior=fit_normal,
posterior_predictive='logy_hat',
observed_data=['logy'],
log_likelihood='log_lik',
)
```
Check model convergence. Use `az.summary` to in one view that the effective sample size (ESS) is large enough and $\hat{R}$ is close to one.
```python
az.summary(idata_lognormal)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>mean</th>
<th>sd</th>
<th>hpd_3%</th>
<th>hpd_97%</th>
<th>mcse_mean</th>
<th>mcse_sd</th>
<th>ess_mean</th>
<th>ess_sd</th>
<th>ess_bulk</th>
<th>ess_tail</th>
<th>r_hat</th>
</tr>
</thead>
<tbody>
<tr>
<th>mu</th>
<td>2.151</td>
<td>0.218</td>
<td>1.753</td>
<td>2.549</td>
<td>0.006</td>
<td>0.004</td>
<td>1338.0</td>
<td>1285.0</td>
<td>1382.0</td>
<td>1221.0</td>
<td>1.0</td>
</tr>
<tr>
<th>sigma</th>
<td>1.204</td>
<td>0.168</td>
<td>0.901</td>
<td>1.510</td>
<td>0.005</td>
<td>0.004</td>
<td>1090.0</td>
<td>1066.0</td>
<td>1126.0</td>
<td>1002.0</td>
<td>1.0</td>
</tr>
</tbody>
</table>
</div>
```python
az.summary(idata_normal)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>mean</th>
<th>sd</th>
<th>hpd_3%</th>
<th>hpd_97%</th>
<th>mcse_mean</th>
<th>mcse_sd</th>
<th>ess_mean</th>
<th>ess_sd</th>
<th>ess_bulk</th>
<th>ess_tail</th>
<th>r_hat</th>
</tr>
</thead>
<tbody>
<tr>
<th>mu</th>
<td>2.154</td>
<td>0.222</td>
<td>1.731</td>
<td>2.568</td>
<td>0.006</td>
<td>0.004</td>
<td>1402.0</td>
<td>1386.0</td>
<td>1421.0</td>
<td>1201.0</td>
<td>1.0</td>
</tr>
<tr>
<th>sigma</th>
<td>1.194</td>
<td>0.160</td>
<td>0.902</td>
<td>1.492</td>
<td>0.004</td>
<td>0.003</td>
<td>1333.0</td>
<td>1273.0</td>
<td>1428.0</td>
<td>1067.0</td>
<td>1.0</td>
</tr>
</tbody>
</table>
</div>
In addition, we can plot the quantile ESS plot for one of them directly with `plot_ess`
```python
az.plot_ess(idata_normal, kind="quantile", color="k");
```
### Posterior validation
Check that both models are equivalent and do indeed give the same result for both parameters.
```python
az.plot_posterior(idata_lognormal);
```
```python
az.plot_posterior(idata_normal);
```
### Calculate LOO-CV
Now we get to calculate LOO-CV using Pareto Smoothed Importance Sampling as detailed in Vehtari et al., 2017. As we explained above, both models are equivalent, but one is in terms of $y$ and the other in terms of $\log y$. Therefore, their likelihoods will be on different scales, and hence, their expected log predictive density will also be different.
```python
az.loo(idata_lognormal)
```
Computed from 2000 by 30 log-likelihood matrix
Estimate SE
IC_loo 226.00 14.38
p_loo 2.05 -
```python
az.loo(idata_normal)
```
Computed from 2000 by 30 log-likelihood matrix
Estimate SE
IC_loo 96.66 8.71
p_loo 2.00 -
We have found that as expected, the two models yield different results despite being actually the same model. This is because. LOO is estimated from the log likelihood, $\log p(y_i\mid\theta^s)$, being $i$ the observation id, and $s$ the MCMC sample id. Following Vehtari et al., 2017, this log likelihood is used to calculate the PSIS weights and to estimate the expected log pointwise predictive density in the following way:
1. Calculate raw importance weights: $r_i^s = \frac{1}{p(y_i\mid\theta^s)}$
2. Smooth the $r_i^s$ (see original paper for details) to get the PSIS weights $w_i^s$
3. Calculate elpd LOO as:
$$ \text{elpd}_{psis-loo} = \sum_{i=1}^n \log \left( \frac{\sum_s w_i^s p(y_i|\theta^s)}{\sum_s w_i^s} \right) $$
This will estimate the out of sample predictive fit of $y$ (where $y$ is the data of the model. Therefore, for the first model, using a LogNormal distribution, we are indeed calculating the desired quantity:
$$ \text{elpd}_{psis-loo}^{(1)} \approx \sum_{i=1}^n \log p(y_i|y_{-i}) $$
Whereas for the second model, we are calculating:
$$ \text{elpd}_{psis-loo}^{(2)} \approx \sum_{i=1}^n \log p(z_i|z_{-i}) $$
being $z_i = \log y_i$. We actually have two different probability density functions, one over $y$ which from here on we will note $p_y(y)$, and $p_z(z)$.
In order to estimate the elpd loo for $y$ from the data in the second model, $z$, we have to describe $p_y(y)$ as a function of $z$ and $p_z(z)$. We know that $y$ and $z$ are actually related, and we can use this relation to find how would the random variable $y$ (which is actually a transformation of the random variable $z$) be distributed. This is done with the Jacobian. Therefore:
$$
p_y(y|\theta)=p_z(z|\theta)|\frac{dz}{dy}|=\frac{1}{|y|}p_z(z|\theta)=e^{-z}p_z(z|\theta)
$$
In the log scale:
$$
\log p_y(y|\theta)=-z + \log p_z(z|\theta)
$$
We apply the results to the log likelihood data of the second model (the normal on the logarithm instead of the lognormal) and check that now the result does coincide with the LOO-CV estimated by the lognormal model.
```python
old_like = idata_normal.sample_stats.log_likelihood
z = logy
idata_normal.sample_stats["log_likelihood"] = -z+old_like
```
```python
az.loo(idata_normal)
```
Computed from 2000 by 30 log-likelihood matrix
Estimate SE
IC_loo 225.84 14.46
p_loo 2.00 -
## Real example
We will now use as data a subsample of a [real dataset](https://docs.google.com/spreadsheets/d/1gt1Dvi7AnQJiBb5vKaxanTis_sfd4sC4sVoswM1Fz7s/pub#). The subset has been selected using:
```python
df = pd.read_excel("indicator breast female incidence.xlsx").set_index("Breast Female Incidence").dropna(thresh=20).T
df.to_csv("indicator_breast_female_incidence.csv")
```
Below, the data is loaded and plotted for inspection.
```python
df = pd.read_csv("indicator_breast_female_incidence.csv", index_col=0)
df.plot();
```
In order to show different examples of LOO on transformed data, we will take into account the following models:
$$
\begin{align}
&y=a_1 x+a_0 \\
&y=e^{b_0}e^{b_1 x} &\rightarrow& \quad\log y = z_1 = b_1 x + b_0\\
&y=c_1^2 x^2 + 2 c_1 c_2 x + c_0^2 &\rightarrow& \quad\sqrt{y} = z_2 = c_1 x + c_0
\end{align}
$$
This models have been chosen mainly because of their simplicity. In addition, they can all be applied using the same Stan code and the data looks kind of linear. This will put the focus of the example on the loo calculation instead of on the model itself. For the online example, the data from Finland has been chosen, but feel free to download the notebook and experiment with it.
```python
y_data = df.Finland
z1_data = np.log(y_data)
z2_data = np.sqrt(y_data)
x_data = df.index/100 # rescale to set both to a similar scale
dict_y = {"N": len(x_data), "y": y_data, "x": x_data}
dict_z1 = {"N": len(x_data), "y": z1_data, "x": x_data}
dict_z2 = {"N": len(x_data), "y": z2_data, "x": x_data}
coords = {"year": x_data}
dims = {"y": ["year"], "log_likelihood": ["year"]}
```
```python
with open("linear_regression.stan", "r") as f:
lr_code = f.read()
```
<details>
<summary markdown='span'>Stan code for Linear Regression
</summary>
```python
print(lr_code)
```
data {
int<lower=0> N;
vector[N] x;
vector[N] y;
}
parameters {
real b0;
real b1;
real<lower=0> sigma_e;
}
model {
b0 ~ normal(0, 20);
b1 ~ normal(0, 20);
for (i in 1:N) {
y[i] ~ normal(b0 + b1 * x[i], sigma_e);
}
}
generated quantities {
vector[N] log_lik;
vector[N] y_hat;
for (i in 1:N) {
log_lik[i] = normal_lpdf(y[i] | b0 + b1 * x[i], sigma_e);
y_hat[i] = normal_rng(b0 + b1 * x[i], sigma_e);
}
}
</details><br/>
```python
sm_lr = pystan.StanModel(model_code=lr_code)
control = {"max_treedepth": 15}
```
INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_48dbd7ee0ddc95eb18559d7bcb63f497 NOW.
```python
fit_y = sm_lr.sampling(data=dict_y, iter=1500, chains=6, control=control)
```
```python
fit_z1 = sm_lr.sampling(data=dict_z1, iter=1500, chains=6, control=control)
```
```python
fit_z2 = sm_lr.sampling(data=dict_z2, iter=1500, chains=6, control=control)
```
<details>
<summary markdown='span'>Convertion to InferenceData and posterior exploration
</summary>
```python
idata_y = az.from_pystan(
posterior=fit_y,
posterior_predictive='y_hat',
observed_data=['y'],
log_likelihood='log_lik',
coords=coords,
dims=dims,
)
idata_y.posterior = idata_y.posterior.rename({"b0": "a0", "b1": "a1"})
az.plot_posterior(idata_y);
```
```python
idata_z1 = az.from_pystan(
posterior=fit_z1,
posterior_predictive='y_hat',
observed_data=['y'],
log_likelihood='log_lik',
coords=coords,
dims=dims,
)
az.plot_posterior(idata_z1);
```
```python
idata_z2 = az.from_pystan(
posterior=fit_z2,
posterior_predictive='y_hat',
observed_data=['y'],
log_likelihood='log_lik',
coords=coords,
dims=dims,
)
idata_z2.posterior = idata_z2.posterior.rename({"b0": "c0", "b1": "c1"})
az.plot_posterior(idata_z2);
```
</details><br/>
In order to compare the out of sample predictive accuracy, we have to apply the Jacobian transformation to the 2 latter models, so that all of them are in terms of $y$.
Note: we will use LOO instead of Leave Future Out algorithm even though it may be more appropriate because the Jacobian transformation to be applied is the same in both cases. Moreover, PSIS-LOO does not require refitting, and it is already implemented in ArviZ.
The transformation to apply to the second model $z_1 = \log y$ is the same as the previous example:
```python
old_loo_z1 = az.loo(idata_z1).loo
old_like = idata_z1.sample_stats.log_likelihood
idata_z1.sample_stats["log_likelihood"] = -z1_data.values+old_like
```
In the case of the third model, $z_2 = \sqrt{y}$:
$$ |\frac{dz}{dy}| = |\frac{1}{2\sqrt{y}}| = \frac{1}{2 z_2} \quad \rightarrow \quad \log |\frac{dz}{dy}| = -\log (2 z_2)$$
```python
old_loo_z2 = az.loo(idata_z2).loo
old_like = idata_z2.sample_stats.log_likelihood
idata_z2.sample_stats["log_likelihood"] = -np.log(2*z2_data.values)+old_like
```
```python
az.loo(idata_y)
```
Computed from 4500 by 46 log-likelihood matrix
Estimate SE
IC_loo 388.43 7.72
p_loo 1.56 -
```python
print("LOO before Jacobian transformation: {:.2f}".format(old_loo_z1))
print(az.loo(idata_z1))
```
LOO before Jacobian transformation: -141.43
Computed from 4500 by 46 log-likelihood matrix
Estimate SE
IC_loo 200.56 8.95
p_loo 3.03 -
```python
print("LOO before Jacobian transformation: {:.2f}".format(old_loo_z2))
print(az.loo(idata_z2))
```
LOO before Jacobian transformation: -4.93
Computed from 4500 by 46 log-likelihood matrix
Estimate SE
IC_loo 229.84 7.56
p_loo 2.83 -
## References
Vehtari, A., Gelman, A., and Gabry, J. (2017): Practical Bayesian Model Evaluation Using Leave-One-OutCross-Validation and WAIC, _Statistics and Computing_, vol. 27(5), pp. 1413β1432.
```python
```
|
using Pkg
Pkg.add("ArgParse")
Pkg.add("DifferentialEquations")
Pkg.add("DiffEqCallbacks")
Pkg.add("Random")
Pkg.add("SparseArrays")
Pkg.add("StatsBase")
Pkg.add("Distributions")
|
#example of 'one liners' where its safer to break the explicit syntax rules in the name of style
#turn a DNA sequence into a lower case vector
s2v = function(dna_string) tolower(strsplit(dna_string, "")[[1]])
#turn a vector of DNA sequence into a string
v2s = function(dna_vec) paste(dna_vec, collapse = "")
#get a random DNA nucleotide
#the argument with a default lets this be
#used in solving two problems!
random_bp = function(exclude_base = NULL){
bps = c('a', 't', 'g', 'c')
if(!is.null(exclude_base)){
bps = bps[bps != exclude_base]
}
sample(bps, 1)
}
# a simplified version of a function to introduce errors into DNA sequences
#by using functions, I've been able to use the random_bp to accomplish to related
#but different tasks with minimal effort
error_introduce = function(dna_string, global_mutation_rate = 0.01, global_indel_rate = 0.01){
org_vec = s2v(dna_string)
new_seq = c()
for(i in 1:length(org_vec)){
b = org_vec[[i]]
prob = runif(1)
if(prob < global_mutation_rate){
#point mutation
new_b = random_bp(exclude_base=b)
new_seq = c(new_seq,new_b)
} else if ((global_mutation_rate < prob) && (prob<(global_indel_rate+global_mutation_rate))){
#indel
in_prob = runif(1)
if(in_prob<0.5){
#insertion
#add the base
new_seq = c(new_seq, b)
#insert a base after
new_seq = c(new_seq, random_bp())
}else{
#deletion
#don't add anything so base is skipped
#this 'else' statement could be omitted for brevity
next
}
}else{
new_seq = c(new_seq, b)
}
}
output = v2s(new_seq)
return(output)
}
#reusing the 'building blocks' I've made to easily do something else
random_add = function(seq, side = 3 , max = 100){
#side says where the addition is made
#1 = front
#2 = back
#3 = both
front_seq = c()
back_seq = c()
if(side == 1 || side == 3){
for(i in 1:sample.int(max, 1)){
front_seq = c(front_seq, random_bp())
}
}
if(side == 2 || side == 3){
for(i in 1:sample.int(max, 1)){
back_seq = c(back_seq, random_bp())
}
}
return(v2s(c(front_seq, seq, back_seq)))
}
dna_string = "ctctacttgatttttggtgcatgagcaggaatagttggaatagctttaagtttactaattcgcgctgaactaggtcaacccggatctcttttaggggatgatcagatttataatgtgatcgtaaccgcccatgcctttgtaataatcttttttatggttatacctgtaataattggtggctttggcaattgacttgttcctttaataattggtgcaccagatatagcattccctcgaataaataatataagtttctggcttcttcctccttcgttcttacttctcctggcctccgcaggagtagaagctggagcaggaaccggatgaactgtatatcctcctttagcaggtaatttagcacatgctggcccctctgttgatttagccatcttttcccttcatttggccggtatctcatcaattttagcctctattaattttattacaactattattaatataaaacccccaactatttctcaatatcaaacaccattatttgtttgatctattcttatcaccactgttcttctactccttgctctccctgttcttgcagccggaattacaatattattaacagaccgcaacctcaacactacattctttgaccccgcagggggaggggacccaattctctatcaacactta"
error_introduce(dna_string)
error_introduce("ctctacttgatttttggtgcatgagcaggaatagttggaatagctttaagt")
#you can put those nice little pipes you make into a function to turn
#them into simple and reusable one liners!
library(tidyverse)
#this is matt's code from last week that uses mtcars to make a new col
mutate_df = function(df){
df %>%
mutate(NEW_COLUMN = mpg + cyl) %>%
rename(new_column_new_you = NEW_COLUMN) %>%
# rename can also be used to change the names of fixed positions
arrange(hp) %>%
dplyr::select(-new_column_new_you)
return(df)
}
#all the detail is abstracted away to just this!
new_df = mutate_df(mtcars)
|
(* Title: HOL/Analysis/Cross3.thy
Author: L C Paulson, University of Cambridge
Ported from HOL Light
*)
section\<open>Vector Cross Products in 3 Dimensions\<close>
theory "Cross3"
imports Determinants Cartesian_Euclidean_Space
begin
context includes no_Set_Product_syntax
begin \<comment>\<open>locally disable syntax for set product, to avoid warnings\<close>
definition\<^marker>\<open>tag important\<close> cross3 :: "[real^3, real^3] \<Rightarrow> real^3" (infixr "\<times>" 80)
where "a \<times> b \<equiv>
vector [a$2 * b$3 - a$3 * b$2,
a$3 * b$1 - a$1 * b$3,
a$1 * b$2 - a$2 * b$1]"
end
bundle cross3_syntax begin
notation cross3 (infixr "\<times>" 80)
no_notation Product_Type.Times (infixr "\<times>" 80)
end
bundle no_cross3_syntax begin
no_notation cross3 (infixr "\<times>" 80)
notation Product_Type.Times (infixr "\<times>" 80)
end
unbundle cross3_syntax
subsection\<open> Basic lemmas\<close>
lemmas cross3_simps = cross3_def inner_vec_def sum_3 det_3 vec_eq_iff vector_def algebra_simps
lemma dot_cross_self: "x \<bullet> (x \<times> y) = 0" "x \<bullet> (y \<times> x) = 0" "(x \<times> y) \<bullet> y = 0" "(y \<times> x) \<bullet> y = 0"
by (simp_all add: orthogonal_def cross3_simps)
lemma orthogonal_cross: "orthogonal (x \<times> y) x" "orthogonal (x \<times> y) y"
"orthogonal y (x \<times> y)" "orthogonal (x \<times> y) x"
by (simp_all add: orthogonal_def dot_cross_self)
lemma cross_zero_left [simp]: "0 \<times> x = 0" and cross_zero_right [simp]: "x \<times> 0 = 0" for x::"real^3"
by (simp_all add: cross3_simps)
lemma cross_skew: "(x \<times> y) = -(y \<times> x)" for x::"real^3"
by (simp add: cross3_simps)
lemma cross_refl [simp]: "x \<times> x = 0" for x::"real^3"
by (simp add: cross3_simps)
lemma cross_add_left: "(x + y) \<times> z = (x \<times> z) + (y \<times> z)" for x::"real^3"
by (simp add: cross3_simps)
lemma cross_add_right: "x \<times> (y + z) = (x \<times> y) + (x \<times> z)" for x::"real^3"
by (simp add: cross3_simps)
lemma cross_mult_left: "(c *\<^sub>R x) \<times> y = c *\<^sub>R (x \<times> y)" for x::"real^3"
by (simp add: cross3_simps)
lemma cross_mult_right: "x \<times> (c *\<^sub>R y) = c *\<^sub>R (x \<times> y)" for x::"real^3"
by (simp add: cross3_simps)
lemma cross_minus_left [simp]: "(-x) \<times> y = - (x \<times> y)" for x::"real^3"
by (simp add: cross3_simps)
lemma cross_minus_right [simp]: "x \<times> -y = - (x \<times> y)" for x::"real^3"
by (simp add: cross3_simps)
lemma left_diff_distrib: "(x - y) \<times> z = x \<times> z - y \<times> z" for x::"real^3"
by (simp add: cross3_simps)
lemma right_diff_distrib: "x \<times> (y - z) = x \<times> y - x \<times> z" for x::"real^3"
by (simp add: cross3_simps)
hide_fact (open) left_diff_distrib right_diff_distrib
proposition Jacobi: "x \<times> (y \<times> z) + y \<times> (z \<times> x) + z \<times> (x \<times> y) = 0" for x::"real^3"
by (simp add: cross3_simps)
proposition Lagrange: "x \<times> (y \<times> z) = (x \<bullet> z) *\<^sub>R y - (x \<bullet> y) *\<^sub>R z"
by (simp add: cross3_simps) (metis (full_types) exhaust_3)
proposition cross_triple: "(x \<times> y) \<bullet> z = (y \<times> z) \<bullet> x"
by (simp add: cross3_def inner_vec_def sum_3 vec_eq_iff algebra_simps)
lemma cross_components:
"(x \<times> y)$1 = x$2 * y$3 - y$2 * x$3" "(x \<times> y)$2 = x$3 * y$1 - y$3 * x$1" "(x \<times> y)$3 = x$1 * y$2 - y$1 * x$2"
by (simp_all add: cross3_def inner_vec_def sum_3 vec_eq_iff algebra_simps)
lemma cross_basis: "(axis 1 1) \<times> (axis 2 1) = axis 3 1" "(axis 2 1) \<times> (axis 1 1) = -(axis 3 1)"
"(axis 2 1) \<times> (axis 3 1) = axis 1 1" "(axis 3 1) \<times> (axis 2 1) = -(axis 1 1)"
"(axis 3 1) \<times> (axis 1 1) = axis 2 1" "(axis 1 1) \<times> (axis 3 1) = -(axis 2 1)"
using exhaust_3
by (force simp add: axis_def cross3_simps)+
lemma cross_basis_nonzero:
"u \<noteq> 0 \<Longrightarrow> u \<times> axis 1 1 \<noteq> 0 \<or> u \<times> axis 2 1 \<noteq> 0 \<or> u \<times> axis 3 1 \<noteq> 0"
by (clarsimp simp add: axis_def cross3_simps) (metis exhaust_3)
lemma cross_dot_cancel:
fixes x::"real^3"
assumes deq: "x \<bullet> y = x \<bullet> z" and veq: "x \<times> y = x \<times> z" and x: "x \<noteq> 0"
shows "y = z"
proof -
have "x \<bullet> x \<noteq> 0"
by (simp add: x)
then have "y - z = 0"
using veq
by (metis (no_types, lifting) Cross3.right_diff_distrib Lagrange deq eq_iff_diff_eq_0 inner_diff_right scale_eq_0_iff)
then show ?thesis
using eq_iff_diff_eq_0 by blast
qed
lemma norm_cross_dot: "(norm (x \<times> y))\<^sup>2 + (x \<bullet> y)\<^sup>2 = (norm x * norm y)\<^sup>2"
unfolding power2_norm_eq_inner power_mult_distrib
by (simp add: cross3_simps power2_eq_square)
lemma dot_cross_det: "x \<bullet> (y \<times> z) = det(vector[x,y,z])"
by (simp add: cross3_simps)
lemma cross_cross_det: "(w \<times> x) \<times> (y \<times> z) = det(vector[w,x,z]) *\<^sub>R y - det(vector[w,x,y]) *\<^sub>R z"
using exhaust_3 by (force simp add: cross3_simps)
proposition dot_cross: "(w \<times> x) \<bullet> (y \<times> z) = (w \<bullet> y) * (x \<bullet> z) - (w \<bullet> z) * (x \<bullet> y)"
by (force simp add: cross3_simps)
proposition norm_cross: "(norm (x \<times> y))\<^sup>2 = (norm x)\<^sup>2 * (norm y)\<^sup>2 - (x \<bullet> y)\<^sup>2"
unfolding power2_norm_eq_inner power_mult_distrib
by (simp add: cross3_simps power2_eq_square)
lemma cross_eq_0: "x \<times> y = 0 \<longleftrightarrow> collinear{0,x,y}"
proof -
have "x \<times> y = 0 \<longleftrightarrow> norm (x \<times> y) = 0"
by simp
also have "... \<longleftrightarrow> (norm x * norm y)\<^sup>2 = (x \<bullet> y)\<^sup>2"
using norm_cross [of x y] by (auto simp: power_mult_distrib)
also have "... \<longleftrightarrow> \<bar>x \<bullet> y\<bar> = norm x * norm y"
using power2_eq_iff
by (metis (mono_tags, opaque_lifting) abs_minus abs_norm_cancel abs_power2 norm_mult power_abs real_norm_def)
also have "... \<longleftrightarrow> collinear {0, x, y}"
by (rule norm_cauchy_schwarz_equal)
finally show ?thesis .
qed
lemma cross_eq_self: "x \<times> y = x \<longleftrightarrow> x = 0" "x \<times> y = y \<longleftrightarrow> y = 0"
apply (metis cross_zero_left dot_cross_self(1) inner_eq_zero_iff)
by (metis cross_zero_right dot_cross_self(2) inner_eq_zero_iff)
lemma norm_and_cross_eq_0:
"x \<bullet> y = 0 \<and> x \<times> y = 0 \<longleftrightarrow> x = 0 \<or> y = 0" (is "?lhs = ?rhs")
proof
assume ?lhs
then show ?rhs
by (metis cross_dot_cancel cross_zero_right inner_zero_right)
qed auto
lemma bilinear_cross: "bilinear(\<times>)"
apply (auto simp add: bilinear_def linear_def)
apply unfold_locales
apply (simp add: cross_add_right)
apply (simp add: cross_mult_right)
apply (simp add: cross_add_left)
apply (simp add: cross_mult_left)
done
subsection \<open>Preservation by rotation, or other orthogonal transformation up to sign\<close>
lemma cross_matrix_mult: "transpose A *v ((A *v x) \<times> (A *v y)) = det A *\<^sub>R (x \<times> y)"
apply (simp add: vec_eq_iff )
apply (simp add: vector_matrix_mult_def matrix_vector_mult_def forall_3 cross3_simps)
done
lemma cross_orthogonal_matrix:
assumes "orthogonal_matrix A"
shows "(A *v x) \<times> (A *v y) = det A *\<^sub>R (A *v (x \<times> y))"
proof -
have "mat 1 = transpose (A ** transpose A)"
by (metis (no_types) assms orthogonal_matrix_def transpose_mat)
then show ?thesis
by (metis (no_types) vector_matrix_mul_rid vector_transpose_matrix cross_matrix_mult matrix_vector_mul_assoc matrix_vector_mult_scaleR)
qed
lemma cross_rotation_matrix: "rotation_matrix A \<Longrightarrow> (A *v x) \<times> (A *v y) = A *v (x \<times> y)"
by (simp add: rotation_matrix_def cross_orthogonal_matrix)
lemma cross_rotoinversion_matrix: "rotoinversion_matrix A \<Longrightarrow> (A *v x) \<times> (A *v y) = - A *v (x \<times> y)"
by (simp add: rotoinversion_matrix_def cross_orthogonal_matrix scaleR_matrix_vector_assoc)
lemma cross_orthogonal_transformation:
assumes "orthogonal_transformation f"
shows "(f x) \<times> (f y) = det(matrix f) *\<^sub>R f(x \<times> y)"
proof -
have orth: "orthogonal_matrix (matrix f)"
using assms orthogonal_transformation_matrix by blast
have "matrix f *v z = f z" for z
using assms orthogonal_transformation_matrix by force
with cross_orthogonal_matrix [OF orth] show ?thesis
by simp
qed
lemma cross_linear_image:
"\<lbrakk>linear f; \<And>x. norm(f x) = norm x; det(matrix f) = 1\<rbrakk>
\<Longrightarrow> (f x) \<times> (f y) = f(x \<times> y)"
by (simp add: cross_orthogonal_transformation orthogonal_transformation)
subsection \<open>Continuity\<close>
lemma continuous_cross: "\<lbrakk>continuous F f; continuous F g\<rbrakk> \<Longrightarrow> continuous F (\<lambda>x. (f x) \<times> (g x))"
apply (subst continuous_componentwise)
apply (clarsimp simp add: cross3_simps)
apply (intro continuous_intros; simp)
done
lemma continuous_on_cross:
fixes f :: "'a::t2_space \<Rightarrow> real^3"
shows "\<lbrakk>continuous_on S f; continuous_on S g\<rbrakk> \<Longrightarrow> continuous_on S (\<lambda>x. (f x) \<times> (g x))"
by (simp add: continuous_on_eq_continuous_within continuous_cross)
unbundle no_cross3_syntax
end
|
#ifndef CONSTANTS_H
#define CONSTANTS_H
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <limits.h>
#include <math.h>
#include <time.h>
#include <float.h>
#include <utils/version.h>
#include <utils/help.h>
#include <utils/error.h>
#include <gsl/gsl_statistics_double.h>
/*
* MIN macro
*/
#define MIN(a,b) (((a)<(b))?(a):(b))
/*
* MAX macro
*/
#define MAX(a,b) (((a)>(b))?(a):(b))
/*
* STR macro
*/
#define STR(a) ((a > 0)?("-"):("+"))
/*
* Default value for read_minlen parameter
*/
#define MIN_READ_LEN 0
/*
* Default value for minlen parameter
*/
#define MIN_LEN 15
/*
* Default value for maxlen parameter
*/
#define MAX_LEN 30
/*
* Default value for spacing parameter
*/
#define SPACING 20
/*
* Default value for minheight parameter
*/
#define MIN_READS 10.0f
/*
* Default value for trimming threshold parameter
*/
#define TRIM_THRESHOLD 0.05
/*
* Default value for minimum trimming parameter
*/
#define TRIM_MIN 2
/*
* Default value for maximum trimming parameter
*/
#define TRIM_MAX 20
/*
* Default value for IDR cutoff
*/
#define CUTOFF 2.0f
/*
* Default value for Replicate number
*/
#define REPLICATE_NUMBER 1
/*
* Replicate treatment options
*/
#define REPLICATE_POOL_STR "pool" // default value
#define REPLICATE_MEAN_STR "mean"
#define REPLICATE_REPLICATE_STR "replicate"
#define REPLICATE_POOL 0
#define REPLICATE_MEAN 1
#define REPLICATE_REPLICATE 2
/*
* IDR method options
*/
#define IDR_COMMON_STR "common" // default value
#define IDR_NONE_STR "none"
#define IDR_SERE_STR "sere"
#define IDR_IDR_STR "idr"
#define IDR_COMMON 0
#define IDR_NONE 1
#define IDR_SERE 2
#define IDR_IDR 3
/*
* Maximum length of a contig
*/
#define MAX_CONTIG_LENGTH 200
/*
* Maximum number of contigs
*/
#define MAX_CONTIGS 10000000
/*
* Maximum size, in chars, of an error message
*/
#define MAX_ERR_MSG 100
/*
* Maximum size, in chars, of a given path
*/
#define MAX_PATH 500
/*
* Maximum size, in chars, of the 4th field (name) in the query BED file
*/
#define MAX_FEATURE 50
/*
* Maximum number of replicates
*/
#define MAX_REPLICATES 10
/*
* Maximum number of alignments per heap
*/
#define MAX_ALIGN_HEAP 50000000
/*
* Maximum read length
*/
#define MAX_READ_LENGTH 200
/*
* Maximum profile length
*/
#define MAX_PROFILE_LENGTH 500
/*
* Maximum number of block base pairs
*/
#define MAX_BLOCK 3000
/*
* Alignment strand
*/
#define FWD_STRAND 0 // forward/watson
#define REV_STRAND 1 // reverse/crick
/*
* Alignment validity
*/
#define VALID_ALIGNMENT 1
#define INVALID_ALIGNMENT 0
/*
* Constants for npIDR method
*/
#define ABSOLUTE 0
#define CONDITIONAL 1
/*
* Path separator
*/
#ifdef __unix__
#define PATH_SEPARATOR "/"
#else
#define PATH_SEPARATOR "\\"
#endif
/*
* Output file suffixes
*/
#define PROFILES_SUFFIX "profiles.dat"
#define CONTIGS_SUFFIX "contigs.dat"
#define CROSSCOR_SUFFIX "crosscor.dat"
#define CLUSTERS_SUFFIX "clusters.neWick"
#define ANNOTATION_O_SUFFIX "annotation.bed"
#define TMPROFILES_SUFFIX "tmprofiles.dat"
/*
* Maximum N limit for gaussian white noise generation
*/
#define MAX_GNOISE_N 20
/*
* Cluster cutoff default value
*/
#define CLUSTER_CUTOFF -1.0f
/*
* Condition for existence of annotation file
*/
#define ANNOTATION_CONDITION 0
/*
* Condition for existence of additional profiles file
*/
#define ADDITIONAL_P_CONDITION 0
/*
* Default value for the feature to profile overlap percentage
*/
#define OVERLAP_FTOP 0.9
/*
* Default value for the profile to feature overlap percentage
*/
#define OVERLAP_PTOF 0.5
/*
* Maximum number of annotation files
*/
#define MAX_ANNOTATIONS 10
/*
* Condition for existence of correlations file
*/
#define CORRELATIONS_CONDITION 0
/*
* Constants for profile category
*/
#define NOVEL 0
#define KNOWN 1
/*
* Differential processing default p-value
*/
#define P_VALUE 0.01
/*
* Differential processing default overlap
*/
#define DP_FOLD_CHANGE 7
/*
* Suffix for differentially processed profiles
*/
#define DIFFPROC_PROFILE_O_SUFFIX "diffprofiles.dat"
/*
* Suffix for differentially processed clusters
*/
#define DIFFPROC_CLUSTER_O_SUFFIX "diffclusters.dat"
#endif
|
= = = CNS Special Operations Center = = =
|
[STATEMENT]
lemma weakComm2:
fixes \<Psi> :: 'b
and R :: "('a, 'b, 'c) psi"
and P :: "('a, 'b, 'c) psi"
and \<alpha> :: "'a action"
and P' :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
and A\<^sub>Q :: "name list"
and \<Psi>\<^sub>Q :: 'b
assumes PTrans: "\<Psi> \<otimes> \<Psi>\<^sub>Q : R \<rhd> P \<Longrightarrow>M\<lparr>\<nu>*xvec\<rparr>\<langle>N\<rangle> \<prec> P'"
and FrR: "extractFrame R = \<langle>A\<^sub>R, \<Psi>\<^sub>R\<rangle>"
and QTrans: "\<Psi> \<otimes> \<Psi>\<^sub>R \<rhd> Q \<longmapsto>K\<lparr>N\<rparr> \<prec> Q'"
and FrQ: "extractFrame Q = \<langle>A\<^sub>Q, \<Psi>\<^sub>Q\<rangle>"
and MeqK: "\<Psi> \<otimes> \<Psi>\<^sub>R \<otimes> \<Psi>\<^sub>Q \<turnstile> M \<leftrightarrow> K"
and "A\<^sub>R \<sharp>* \<Psi>"
and "A\<^sub>R \<sharp>* P"
and "A\<^sub>R \<sharp>* Q"
and "A\<^sub>R \<sharp>* R"
and "A\<^sub>R \<sharp>* M"
and "A\<^sub>R \<sharp>* A\<^sub>Q"
and "A\<^sub>Q \<sharp>* \<Psi>"
and "A\<^sub>Q \<sharp>* P"
and "A\<^sub>Q \<sharp>* Q"
and "A\<^sub>Q \<sharp>* R"
and "A\<^sub>Q \<sharp>* K"
and "xvec \<sharp>* Q"
and "xvec \<sharp>* M"
and "xvec \<sharp>* A\<^sub>Q"
and "xvec \<sharp>* A\<^sub>R"
shows "\<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> (\<lparr>\<nu>*xvec\<rparr>(P' \<parallel> Q'))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
from \<open>extractFrame Q = \<langle>A\<^sub>Q, \<Psi>\<^sub>Q\<rangle>\<close> \<open>A\<^sub>Q \<sharp>* \<Psi>\<close> \<open>A\<^sub>Q \<sharp>* P\<close> \<open>A\<^sub>Q \<sharp>* Q\<close> \<open>A\<^sub>Q \<sharp>* R\<close> \<open>A\<^sub>Q \<sharp>* K\<close> \<open>A\<^sub>R \<sharp>* A\<^sub>Q\<close> \<open>xvec \<sharp>* A\<^sub>Q\<close>
[PROOF STATE]
proof (chain)
picking this:
extractFrame Q = \<langle>A\<^sub>Q, \<Psi>\<^sub>Q\<rangle>
A\<^sub>Q \<sharp>* \<Psi>
A\<^sub>Q \<sharp>* P
A\<^sub>Q \<sharp>* Q
A\<^sub>Q \<sharp>* R
A\<^sub>Q \<sharp>* K
A\<^sub>R \<sharp>* A\<^sub>Q
xvec \<sharp>* A\<^sub>Q
[PROOF STEP]
obtain A\<^sub>Q' where FrQ': "extractFrame Q = \<langle>A\<^sub>Q', \<Psi>\<^sub>Q\<rangle>" and "distinct A\<^sub>Q'" and "A\<^sub>Q' \<sharp>* \<Psi>" and "A\<^sub>Q' \<sharp>* P"
and "A\<^sub>Q' \<sharp>* Q" and "A\<^sub>Q' \<sharp>* R" and "A\<^sub>Q' \<sharp>* K" and "A\<^sub>R \<sharp>* A\<^sub>Q'" and "A\<^sub>Q' \<sharp>* xvec"
[PROOF STATE]
proof (prove)
using this:
extractFrame Q = \<langle>A\<^sub>Q, \<Psi>\<^sub>Q\<rangle>
A\<^sub>Q \<sharp>* \<Psi>
A\<^sub>Q \<sharp>* P
A\<^sub>Q \<sharp>* Q
A\<^sub>Q \<sharp>* R
A\<^sub>Q \<sharp>* K
A\<^sub>R \<sharp>* A\<^sub>Q
xvec \<sharp>* A\<^sub>Q
goal (1 subgoal):
1. (\<And>A\<^sub>Q'. \<lbrakk>extractFrame Q = \<langle>A\<^sub>Q', \<Psi>\<^sub>Q\<rangle>; distinct A\<^sub>Q'; A\<^sub>Q' \<sharp>* \<Psi>; A\<^sub>Q' \<sharp>* P; A\<^sub>Q' \<sharp>* Q; A\<^sub>Q' \<sharp>* R; A\<^sub>Q' \<sharp>* K; A\<^sub>R \<sharp>* A\<^sub>Q'; A\<^sub>Q' \<sharp>* xvec\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(rule_tac C="(\<Psi>, P, Q, R, K, A\<^sub>R, xvec)" in distinctFrame) auto
[PROOF STATE]
proof (state)
this:
extractFrame Q = \<langle>A\<^sub>Q', \<Psi>\<^sub>Q\<rangle>
distinct A\<^sub>Q'
A\<^sub>Q' \<sharp>* \<Psi>
A\<^sub>Q' \<sharp>* P
A\<^sub>Q' \<sharp>* Q
A\<^sub>Q' \<sharp>* R
A\<^sub>Q' \<sharp>* K
A\<^sub>R \<sharp>* A\<^sub>Q'
A\<^sub>Q' \<sharp>* xvec
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
from PTrans
[PROOF STATE]
proof (chain)
picking this:
\<Psi> \<otimes> \<Psi>\<^sub>Q : R \<rhd> P \<Longrightarrow>M\<lparr>\<nu>*xvec\<rparr>\<langle>N\<rangle> \<prec> P'
[PROOF STEP]
obtain P'' where PChain: "\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P \<Longrightarrow>\<^sup>^\<^sub>\<tau> P''"
and RimpP'': "insertAssertion (extractFrame R) (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<hookrightarrow>\<^sub>F insertAssertion (extractFrame P'') (\<Psi> \<otimes> \<Psi>\<^sub>Q)"
and P''Trans: "\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P'' \<longmapsto>M\<lparr>\<nu>*xvec\<rparr>\<langle>N\<rangle> \<prec> P'"
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<otimes> \<Psi>\<^sub>Q : R \<rhd> P \<Longrightarrow>M\<lparr>\<nu>*xvec\<rparr>\<langle>N\<rangle> \<prec> P'
goal (1 subgoal):
1. (\<And>P''. \<lbrakk>\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P \<Longrightarrow>\<^sup>^\<^sub>\<tau> P''; insertAssertion (extractFrame R) (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<hookrightarrow>\<^sub>F insertAssertion (extractFrame P'') (\<Psi> \<otimes> \<Psi>\<^sub>Q); \<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P'' \<longmapsto> M\<lparr>\<nu>*xvec\<rparr>\<langle>N\<rangle> \<prec> P'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(rule weakTransitionE)
[PROOF STATE]
proof (state)
this:
\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P \<Longrightarrow>\<^sup>^\<^sub>\<tau> P''
insertAssertion (extractFrame R) (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<hookrightarrow>\<^sub>F insertAssertion (extractFrame P'') (\<Psi> \<otimes> \<Psi>\<^sub>Q)
\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P'' \<longmapsto> M\<lparr>\<nu>*xvec\<rparr>\<langle>N\<rangle> \<prec> P'
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
from PChain \<open>A\<^sub>Q' \<sharp>* P\<close>
[PROOF STATE]
proof (chain)
picking this:
\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P \<Longrightarrow>\<^sup>^\<^sub>\<tau> P''
A\<^sub>Q' \<sharp>* P
[PROOF STEP]
have "A\<^sub>Q' \<sharp>* P''"
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P \<Longrightarrow>\<^sup>^\<^sub>\<tau> P''
A\<^sub>Q' \<sharp>* P
goal (1 subgoal):
1. A\<^sub>Q' \<sharp>* P''
[PROOF STEP]
by(rule tauChainFreshChain)
[PROOF STATE]
proof (state)
this:
A\<^sub>Q' \<sharp>* P''
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
obtain A\<^sub>P'' \<Psi>\<^sub>P'' where FrP'': "extractFrame P'' = \<langle>A\<^sub>P'', \<Psi>\<^sub>P''\<rangle>" and "A\<^sub>P'' \<sharp>* (\<Psi>, A\<^sub>Q', \<Psi>\<^sub>Q, A\<^sub>R, \<Psi>\<^sub>R, M, N, K, R, Q, P'', xvec)" and "distinct A\<^sub>P''"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>A\<^sub>P'' \<Psi>\<^sub>P''. \<lbrakk>extractFrame P'' = \<langle>A\<^sub>P'', \<Psi>\<^sub>P''\<rangle>; A\<^sub>P'' \<sharp>* (\<Psi>, A\<^sub>Q', \<Psi>\<^sub>Q, A\<^sub>R, \<Psi>\<^sub>R, M, N, K, R, Q, P'', xvec); distinct A\<^sub>P''\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(rule freshFrame)
[PROOF STATE]
proof (state)
this:
extractFrame P'' = \<langle>A\<^sub>P'', \<Psi>\<^sub>P''\<rangle>
A\<^sub>P'' \<sharp>* (\<Psi>, A\<^sub>Q', \<Psi>\<^sub>Q, A\<^sub>R, \<Psi>\<^sub>R, M, N, K, R, Q, P'', xvec)
distinct A\<^sub>P''
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
hence "A\<^sub>P'' \<sharp>* \<Psi>" and "A\<^sub>P'' \<sharp>* A\<^sub>Q'" and "A\<^sub>P'' \<sharp>* \<Psi>\<^sub>Q" and "A\<^sub>P'' \<sharp>* M" and "A\<^sub>P'' \<sharp>* R" and "A\<^sub>P'' \<sharp>* Q"
and "A\<^sub>P'' \<sharp>* N" and "A\<^sub>P'' \<sharp>* K" and "A\<^sub>P'' \<sharp>* A\<^sub>R" and "A\<^sub>P'' \<sharp>* P''" and "A\<^sub>P'' \<sharp>* xvec" and "A\<^sub>P'' \<sharp>* \<Psi>\<^sub>R"
[PROOF STATE]
proof (prove)
using this:
extractFrame P'' = \<langle>A\<^sub>P'', \<Psi>\<^sub>P''\<rangle>
A\<^sub>P'' \<sharp>* (\<Psi>, A\<^sub>Q', \<Psi>\<^sub>Q, A\<^sub>R, \<Psi>\<^sub>R, M, N, K, R, Q, P'', xvec)
distinct A\<^sub>P''
goal (1 subgoal):
1. ((A\<^sub>P'' \<sharp>* \<Psi> &&& A\<^sub>P'' \<sharp>* A\<^sub>Q' &&& A\<^sub>P'' \<sharp>* \<Psi>\<^sub>Q) &&& A\<^sub>P'' \<sharp>* M &&& A\<^sub>P'' \<sharp>* R &&& A\<^sub>P'' \<sharp>* Q) &&& (A\<^sub>P'' \<sharp>* N &&& A\<^sub>P'' \<sharp>* K &&& A\<^sub>P'' \<sharp>* A\<^sub>R) &&& A\<^sub>P'' \<sharp>* P'' &&& A\<^sub>P'' \<sharp>* xvec &&& A\<^sub>P'' \<sharp>* \<Psi>\<^sub>R
[PROOF STEP]
by simp+
[PROOF STATE]
proof (state)
this:
A\<^sub>P'' \<sharp>* \<Psi>
A\<^sub>P'' \<sharp>* A\<^sub>Q'
A\<^sub>P'' \<sharp>* \<Psi>\<^sub>Q
A\<^sub>P'' \<sharp>* M
A\<^sub>P'' \<sharp>* R
A\<^sub>P'' \<sharp>* Q
A\<^sub>P'' \<sharp>* N
A\<^sub>P'' \<sharp>* K
A\<^sub>P'' \<sharp>* A\<^sub>R
A\<^sub>P'' \<sharp>* P''
A\<^sub>P'' \<sharp>* xvec
A\<^sub>P'' \<sharp>* \<Psi>\<^sub>R
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
from FrR \<open>A\<^sub>R \<sharp>* A\<^sub>Q'\<close> \<open>A\<^sub>Q' \<sharp>* R\<close>
[PROOF STATE]
proof (chain)
picking this:
extractFrame R = \<langle>A\<^sub>R, \<Psi>\<^sub>R\<rangle>
A\<^sub>R \<sharp>* A\<^sub>Q'
A\<^sub>Q' \<sharp>* R
[PROOF STEP]
have "A\<^sub>Q' \<sharp>* \<Psi>\<^sub>R"
[PROOF STATE]
proof (prove)
using this:
extractFrame R = \<langle>A\<^sub>R, \<Psi>\<^sub>R\<rangle>
A\<^sub>R \<sharp>* A\<^sub>Q'
A\<^sub>Q' \<sharp>* R
goal (1 subgoal):
1. A\<^sub>Q' \<sharp>* \<Psi>\<^sub>R
[PROOF STEP]
by(drule_tac extractFrameFreshChain) auto
[PROOF STATE]
proof (state)
this:
A\<^sub>Q' \<sharp>* \<Psi>\<^sub>R
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
from FrQ' \<open>A\<^sub>R \<sharp>* A\<^sub>Q'\<close> \<open>A\<^sub>R \<sharp>* Q\<close>
[PROOF STATE]
proof (chain)
picking this:
extractFrame Q = \<langle>A\<^sub>Q', \<Psi>\<^sub>Q\<rangle>
A\<^sub>R \<sharp>* A\<^sub>Q'
A\<^sub>R \<sharp>* Q
[PROOF STEP]
have "A\<^sub>R \<sharp>* \<Psi>\<^sub>Q"
[PROOF STATE]
proof (prove)
using this:
extractFrame Q = \<langle>A\<^sub>Q', \<Psi>\<^sub>Q\<rangle>
A\<^sub>R \<sharp>* A\<^sub>Q'
A\<^sub>R \<sharp>* Q
goal (1 subgoal):
1. A\<^sub>R \<sharp>* \<Psi>\<^sub>Q
[PROOF STEP]
by(drule_tac extractFrameFreshChain) auto
[PROOF STATE]
proof (state)
this:
A\<^sub>R \<sharp>* \<Psi>\<^sub>Q
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
have "\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle>
[PROOF STEP]
by(metis frameResChainPres frameNilStatEq Commutativity AssertionStatEqTrans Composition Associativity)
[PROOF STATE]
proof (state)
this:
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle>
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle>
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
with RimpP'' FrP'' FrR \<open>A\<^sub>P'' \<sharp>* \<Psi>\<close> \<open>A\<^sub>R \<sharp>* \<Psi>\<close> \<open>A\<^sub>P'' \<sharp>* \<Psi>\<^sub>Q\<close> \<open>A\<^sub>R \<sharp>* \<Psi>\<^sub>Q\<close>
[PROOF STATE]
proof (chain)
picking this:
insertAssertion (extractFrame R) (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<hookrightarrow>\<^sub>F insertAssertion (extractFrame P'') (\<Psi> \<otimes> \<Psi>\<^sub>Q)
extractFrame P'' = \<langle>A\<^sub>P'', \<Psi>\<^sub>P''\<rangle>
extractFrame R = \<langle>A\<^sub>R, \<Psi>\<^sub>R\<rangle>
A\<^sub>P'' \<sharp>* \<Psi>
A\<^sub>R \<sharp>* \<Psi>
A\<^sub>P'' \<sharp>* \<Psi>\<^sub>Q
A\<^sub>R \<sharp>* \<Psi>\<^sub>Q
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle>
[PROOF STEP]
have "\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle> \<hookrightarrow>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>P''\<rangle>"
[PROOF STATE]
proof (prove)
using this:
insertAssertion (extractFrame R) (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<hookrightarrow>\<^sub>F insertAssertion (extractFrame P'') (\<Psi> \<otimes> \<Psi>\<^sub>Q)
extractFrame P'' = \<langle>A\<^sub>P'', \<Psi>\<^sub>P''\<rangle>
extractFrame R = \<langle>A\<^sub>R, \<Psi>\<^sub>R\<rangle>
A\<^sub>P'' \<sharp>* \<Psi>
A\<^sub>R \<sharp>* \<Psi>
A\<^sub>P'' \<sharp>* \<Psi>\<^sub>Q
A\<^sub>R \<sharp>* \<Psi>\<^sub>Q
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle>
goal (1 subgoal):
1. \<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle> \<hookrightarrow>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>P''\<rangle>
[PROOF STEP]
using freshCompChain
[PROOF STATE]
proof (prove)
using this:
insertAssertion (extractFrame R) (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<hookrightarrow>\<^sub>F insertAssertion (extractFrame P'') (\<Psi> \<otimes> \<Psi>\<^sub>Q)
extractFrame P'' = \<langle>A\<^sub>P'', \<Psi>\<^sub>P''\<rangle>
extractFrame R = \<langle>A\<^sub>R, \<Psi>\<^sub>R\<rangle>
A\<^sub>P'' \<sharp>* \<Psi>
A\<^sub>R \<sharp>* \<Psi>
A\<^sub>P'' \<sharp>* \<Psi>\<^sub>Q
A\<^sub>R \<sharp>* \<Psi>\<^sub>Q
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle>
\<lbrakk>?xvec \<sharp>* ?\<Psi>; ?xvec \<sharp>* ?\<Psi>'\<rbrakk> \<Longrightarrow> ?xvec \<sharp>* (?\<Psi> \<otimes> ?\<Psi>')
\<lbrakk>?Xs \<sharp>* ?\<Psi>; ?Xs \<sharp>* ?\<Psi>'\<rbrakk> \<Longrightarrow> ?Xs \<sharp>* (?\<Psi> \<otimes> ?\<Psi>')
goal (1 subgoal):
1. \<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle> \<hookrightarrow>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>P''\<rangle>
[PROOF STEP]
by(simp add: freshChainSimps)
[PROOF STATE]
proof (state)
this:
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle> \<hookrightarrow>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>P''\<rangle>
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle> \<hookrightarrow>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>P''\<rangle>
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
have "\<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>P''\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>P'') \<otimes> \<Psi>\<^sub>Q\<rangle>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>P''\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>P'') \<otimes> \<Psi>\<^sub>Q\<rangle>
[PROOF STEP]
by(metis frameResChainPres frameNilStatEq Commutativity AssertionStatEqTrans Composition Associativity)
[PROOF STATE]
proof (state)
this:
\<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>P''\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>P'') \<otimes> \<Psi>\<^sub>Q\<rangle>
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle>
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle> \<hookrightarrow>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>P''\<rangle>
\<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>P''\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>P'') \<otimes> \<Psi>\<^sub>Q\<rangle>
[PROOF STEP]
have RImpP'': "\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<hookrightarrow>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>P'') \<otimes> \<Psi>\<^sub>Q\<rangle>"
[PROOF STATE]
proof (prove)
using this:
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle>
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>R\<rangle> \<hookrightarrow>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>P''\<rangle>
\<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>Q) \<otimes> \<Psi>\<^sub>P''\<rangle> \<simeq>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>P'') \<otimes> \<Psi>\<^sub>Q\<rangle>
goal (1 subgoal):
1. \<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<hookrightarrow>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>P'') \<otimes> \<Psi>\<^sub>Q\<rangle>
[PROOF STEP]
by(rule FrameStatEqImpCompose)
[PROOF STATE]
proof (state)
this:
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<hookrightarrow>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>P'') \<otimes> \<Psi>\<^sub>Q\<rangle>
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
from PChain FrQ' \<open>A\<^sub>Q' \<sharp>* \<Psi>\<close> \<open>A\<^sub>Q' \<sharp>* P\<close>
[PROOF STATE]
proof (chain)
picking this:
\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P \<Longrightarrow>\<^sup>^\<^sub>\<tau> P''
extractFrame Q = \<langle>A\<^sub>Q', \<Psi>\<^sub>Q\<rangle>
A\<^sub>Q' \<sharp>* \<Psi>
A\<^sub>Q' \<sharp>* P
[PROOF STEP]
have "\<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sup>^\<^sub>\<tau> P'' \<parallel> Q"
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P \<Longrightarrow>\<^sup>^\<^sub>\<tau> P''
extractFrame Q = \<langle>A\<^sub>Q', \<Psi>\<^sub>Q\<rangle>
A\<^sub>Q' \<sharp>* \<Psi>
A\<^sub>Q' \<sharp>* P
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sup>^\<^sub>\<tau> P'' \<parallel> Q
[PROOF STEP]
by(rule tauChainPar1)
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sup>^\<^sub>\<tau> P'' \<parallel> Q
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sup>^\<^sub>\<tau> P'' \<parallel> Q
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
from QTrans FrR P''Trans MeqK RImpP'' FrP'' FrQ' \<open>distinct A\<^sub>P''\<close> \<open>distinct A\<^sub>Q'\<close> \<open>A\<^sub>P'' \<sharp>* A\<^sub>Q'\<close> \<open>A\<^sub>R \<sharp>* A\<^sub>Q'\<close>
\<open>A\<^sub>Q' \<sharp>* \<Psi>\<close> \<open>A\<^sub>Q' \<sharp>* P''\<close> \<open>A\<^sub>Q' \<sharp>* Q\<close> \<open>A\<^sub>Q' \<sharp>* R\<close> \<open>A\<^sub>Q' \<sharp>* K\<close> \<open>A\<^sub>P'' \<sharp>* \<Psi>\<close> \<open>A\<^sub>P'' \<sharp>* R\<close> \<open>A\<^sub>P'' \<sharp>* Q\<close>
\<open>A\<^sub>P'' \<sharp>* P''\<close> \<open>A\<^sub>P'' \<sharp>* M\<close> \<open>A\<^sub>Q \<sharp>* R\<close> \<open>A\<^sub>R \<sharp>* Q\<close> \<open>A\<^sub>R \<sharp>* M\<close> \<open>xvec \<sharp>* A\<^sub>R\<close> \<open>xvec \<sharp>* M\<close> \<open>A\<^sub>Q' \<sharp>* xvec\<close>
[PROOF STATE]
proof (chain)
picking this:
\<Psi> \<otimes> \<Psi>\<^sub>R \<rhd> Q \<longmapsto> K\<lparr>N\<rparr> \<prec> Q'
extractFrame R = \<langle>A\<^sub>R, \<Psi>\<^sub>R\<rangle>
\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P'' \<longmapsto> M\<lparr>\<nu>*xvec\<rparr>\<langle>N\<rangle> \<prec> P'
\<Psi> \<otimes> \<Psi>\<^sub>R \<otimes> \<Psi>\<^sub>Q \<turnstile> M \<leftrightarrow> K
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<hookrightarrow>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>P'') \<otimes> \<Psi>\<^sub>Q\<rangle>
extractFrame P'' = \<langle>A\<^sub>P'', \<Psi>\<^sub>P''\<rangle>
extractFrame Q = \<langle>A\<^sub>Q', \<Psi>\<^sub>Q\<rangle>
distinct A\<^sub>P''
distinct A\<^sub>Q'
A\<^sub>P'' \<sharp>* A\<^sub>Q'
A\<^sub>R \<sharp>* A\<^sub>Q'
A\<^sub>Q' \<sharp>* \<Psi>
A\<^sub>Q' \<sharp>* P''
A\<^sub>Q' \<sharp>* Q
A\<^sub>Q' \<sharp>* R
A\<^sub>Q' \<sharp>* K
A\<^sub>P'' \<sharp>* \<Psi>
A\<^sub>P'' \<sharp>* R
A\<^sub>P'' \<sharp>* Q
A\<^sub>P'' \<sharp>* P''
A\<^sub>P'' \<sharp>* M
A\<^sub>Q \<sharp>* R
A\<^sub>R \<sharp>* Q
A\<^sub>R \<sharp>* M
xvec \<sharp>* A\<^sub>R
xvec \<sharp>* M
A\<^sub>Q' \<sharp>* xvec
[PROOF STEP]
obtain K' where "\<Psi> \<otimes> \<Psi>\<^sub>P'' \<rhd> Q \<longmapsto>K'\<lparr>N\<rparr> \<prec> Q'" and "\<Psi> \<otimes> \<Psi>\<^sub>P'' \<otimes> \<Psi>\<^sub>Q \<turnstile> M \<leftrightarrow> K'" and "A\<^sub>Q' \<sharp>* K'"
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<otimes> \<Psi>\<^sub>R \<rhd> Q \<longmapsto> K\<lparr>N\<rparr> \<prec> Q'
extractFrame R = \<langle>A\<^sub>R, \<Psi>\<^sub>R\<rangle>
\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P'' \<longmapsto> M\<lparr>\<nu>*xvec\<rparr>\<langle>N\<rangle> \<prec> P'
\<Psi> \<otimes> \<Psi>\<^sub>R \<otimes> \<Psi>\<^sub>Q \<turnstile> M \<leftrightarrow> K
\<langle>A\<^sub>R, (\<Psi> \<otimes> \<Psi>\<^sub>R) \<otimes> \<Psi>\<^sub>Q\<rangle> \<hookrightarrow>\<^sub>F \<langle>A\<^sub>P'', (\<Psi> \<otimes> \<Psi>\<^sub>P'') \<otimes> \<Psi>\<^sub>Q\<rangle>
extractFrame P'' = \<langle>A\<^sub>P'', \<Psi>\<^sub>P''\<rangle>
extractFrame Q = \<langle>A\<^sub>Q', \<Psi>\<^sub>Q\<rangle>
distinct A\<^sub>P''
distinct A\<^sub>Q'
A\<^sub>P'' \<sharp>* A\<^sub>Q'
A\<^sub>R \<sharp>* A\<^sub>Q'
A\<^sub>Q' \<sharp>* \<Psi>
A\<^sub>Q' \<sharp>* P''
A\<^sub>Q' \<sharp>* Q
A\<^sub>Q' \<sharp>* R
A\<^sub>Q' \<sharp>* K
A\<^sub>P'' \<sharp>* \<Psi>
A\<^sub>P'' \<sharp>* R
A\<^sub>P'' \<sharp>* Q
A\<^sub>P'' \<sharp>* P''
A\<^sub>P'' \<sharp>* M
A\<^sub>Q \<sharp>* R
A\<^sub>R \<sharp>* Q
A\<^sub>R \<sharp>* M
xvec \<sharp>* A\<^sub>R
xvec \<sharp>* M
A\<^sub>Q' \<sharp>* xvec
goal (1 subgoal):
1. (\<And>K'. \<lbrakk>\<Psi> \<otimes> \<Psi>\<^sub>P'' \<rhd> Q \<longmapsto> K'\<lparr>N\<rparr> \<prec> Q'; \<Psi> \<otimes> \<Psi>\<^sub>P'' \<otimes> \<Psi>\<^sub>Q \<turnstile> M \<leftrightarrow> K'; A\<^sub>Q' \<sharp>* K'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(rule_tac comm2Aux) (assumption | simp)+
[PROOF STATE]
proof (state)
this:
\<Psi> \<otimes> \<Psi>\<^sub>P'' \<rhd> Q \<longmapsto> K'\<lparr>N\<rparr> \<prec> Q'
\<Psi> \<otimes> \<Psi>\<^sub>P'' \<otimes> \<Psi>\<^sub>Q \<turnstile> M \<leftrightarrow> K'
A\<^sub>Q' \<sharp>* K'
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
with P''Trans FrP''
[PROOF STATE]
proof (chain)
picking this:
\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P'' \<longmapsto> M\<lparr>\<nu>*xvec\<rparr>\<langle>N\<rangle> \<prec> P'
extractFrame P'' = \<langle>A\<^sub>P'', \<Psi>\<^sub>P''\<rangle>
\<Psi> \<otimes> \<Psi>\<^sub>P'' \<rhd> Q \<longmapsto> K'\<lparr>N\<rparr> \<prec> Q'
\<Psi> \<otimes> \<Psi>\<^sub>P'' \<otimes> \<Psi>\<^sub>Q \<turnstile> M \<leftrightarrow> K'
A\<^sub>Q' \<sharp>* K'
[PROOF STEP]
have "\<Psi> \<rhd> P'' \<parallel> Q \<longmapsto>\<tau> \<prec> \<lparr>\<nu>*xvec\<rparr>(P' \<parallel> Q')"
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P'' \<longmapsto> M\<lparr>\<nu>*xvec\<rparr>\<langle>N\<rangle> \<prec> P'
extractFrame P'' = \<langle>A\<^sub>P'', \<Psi>\<^sub>P''\<rangle>
\<Psi> \<otimes> \<Psi>\<^sub>P'' \<rhd> Q \<longmapsto> K'\<lparr>N\<rparr> \<prec> Q'
\<Psi> \<otimes> \<Psi>\<^sub>P'' \<otimes> \<Psi>\<^sub>Q \<turnstile> M \<leftrightarrow> K'
A\<^sub>Q' \<sharp>* K'
goal (1 subgoal):
1. \<Psi> \<rhd> P'' \<parallel> Q \<longmapsto> \<tau> \<prec> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
using FrQ' \<open>A\<^sub>Q' \<sharp>* \<Psi>\<close> \<open>A\<^sub>Q' \<sharp>* P''\<close> \<open>A\<^sub>Q' \<sharp>* Q\<close>
\<open>xvec \<sharp>* Q\<close> \<open>A\<^sub>P'' \<sharp>* \<Psi>\<close> \<open>A\<^sub>P'' \<sharp>* P''\<close> \<open>A\<^sub>P'' \<sharp>* Q\<close> \<open>A\<^sub>P'' \<sharp>* M\<close> \<open>A\<^sub>P'' \<sharp>* A\<^sub>Q'\<close>
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P'' \<longmapsto> M\<lparr>\<nu>*xvec\<rparr>\<langle>N\<rangle> \<prec> P'
extractFrame P'' = \<langle>A\<^sub>P'', \<Psi>\<^sub>P''\<rangle>
\<Psi> \<otimes> \<Psi>\<^sub>P'' \<rhd> Q \<longmapsto> K'\<lparr>N\<rparr> \<prec> Q'
\<Psi> \<otimes> \<Psi>\<^sub>P'' \<otimes> \<Psi>\<^sub>Q \<turnstile> M \<leftrightarrow> K'
A\<^sub>Q' \<sharp>* K'
extractFrame Q = \<langle>A\<^sub>Q', \<Psi>\<^sub>Q\<rangle>
A\<^sub>Q' \<sharp>* \<Psi>
A\<^sub>Q' \<sharp>* P''
A\<^sub>Q' \<sharp>* Q
xvec \<sharp>* Q
A\<^sub>P'' \<sharp>* \<Psi>
A\<^sub>P'' \<sharp>* P''
A\<^sub>P'' \<sharp>* Q
A\<^sub>P'' \<sharp>* M
A\<^sub>P'' \<sharp>* A\<^sub>Q'
goal (1 subgoal):
1. \<Psi> \<rhd> P'' \<parallel> Q \<longmapsto> \<tau> \<prec> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
by(rule_tac Comm2)
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P'' \<parallel> Q \<longmapsto> \<tau> \<prec> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sup>^\<^sub>\<tau> P'' \<parallel> Q
\<Psi> \<rhd> P'' \<parallel> Q \<longmapsto> \<tau> \<prec> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sup>^\<^sub>\<tau> P'' \<parallel> Q
\<Psi> \<rhd> P'' \<parallel> Q \<longmapsto> \<tau> \<prec> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
goal (1 subgoal):
1. \<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
[PROOF STEP]
by(drule_tac tauActTauStepChain) auto
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<parallel> Q \<Longrightarrow>\<^sub>\<tau> \<lparr>\<nu>*xvec\<rparr>P' \<parallel> Q'
goal:
No subgoals!
[PROOF STEP]
qed
|
```python
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sympy
from sympy import Matrix, init_printing
from scipy.sparse.linalg import svds,eigs
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics import pairwise_distances
from time import time
import surprise
from surprise import SVD
from surprise import Dataset
from surprise.model_selection import cross_validate
init_printing()
```
```python
data = pd.read_csv('top50.csv',encoding = "ISO-8859-1")
```
```python
data.index = data["Track.Name"]
data = data[['Beats.Per.Minute',
'Energy', 'Danceability', 'Loudness..dB..', 'Liveness', 'Valence.',
'Length.', 'Acousticness..', 'Speechiness.', 'Popularity']]
```
```python
data
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Artist.Name</th>
<th>Genre</th>
<th>Beats.Per.Minute</th>
<th>Energy</th>
<th>Danceability</th>
<th>Loudness..dB..</th>
<th>Liveness</th>
<th>Valence.</th>
<th>Length.</th>
<th>Acousticness..</th>
<th>Speechiness.</th>
<th>Popularity</th>
</tr>
<tr>
<th>Track.Name</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>SeΓ±orita</th>
<td>Shawn Mendes</td>
<td>canadian pop</td>
<td>117</td>
<td>55</td>
<td>76</td>
<td>-6</td>
<td>8</td>
<td>75</td>
<td>191</td>
<td>4</td>
<td>3</td>
<td>79</td>
</tr>
<tr>
<th>China</th>
<td>Anuel AA</td>
<td>reggaeton flow</td>
<td>105</td>
<td>81</td>
<td>79</td>
<td>-4</td>
<td>8</td>
<td>61</td>
<td>302</td>
<td>8</td>
<td>9</td>
<td>92</td>
</tr>
<tr>
<th>boyfriend (with Social House)</th>
<td>Ariana Grande</td>
<td>dance pop</td>
<td>190</td>
<td>80</td>
<td>40</td>
<td>-4</td>
<td>16</td>
<td>70</td>
<td>186</td>
<td>12</td>
<td>46</td>
<td>85</td>
</tr>
<tr>
<th>Beautiful People (feat. Khalid)</th>
<td>Ed Sheeran</td>
<td>pop</td>
<td>93</td>
<td>65</td>
<td>64</td>
<td>-8</td>
<td>8</td>
<td>55</td>
<td>198</td>
<td>12</td>
<td>19</td>
<td>86</td>
</tr>
<tr>
<th>Goodbyes (Feat. Young Thug)</th>
<td>Post Malone</td>
<td>dfw rap</td>
<td>150</td>
<td>65</td>
<td>58</td>
<td>-4</td>
<td>11</td>
<td>18</td>
<td>175</td>
<td>45</td>
<td>7</td>
<td>94</td>
</tr>
<tr>
<th>I Don't Care (with Justin Bieber)</th>
<td>Ed Sheeran</td>
<td>pop</td>
<td>102</td>
<td>68</td>
<td>80</td>
<td>-5</td>
<td>9</td>
<td>84</td>
<td>220</td>
<td>9</td>
<td>4</td>
<td>84</td>
</tr>
<tr>
<th>Ransom</th>
<td>Lil Tecca</td>
<td>trap music</td>
<td>180</td>
<td>64</td>
<td>75</td>
<td>-6</td>
<td>7</td>
<td>23</td>
<td>131</td>
<td>2</td>
<td>29</td>
<td>92</td>
</tr>
<tr>
<th>How Do You Sleep?</th>
<td>Sam Smith</td>
<td>pop</td>
<td>111</td>
<td>68</td>
<td>48</td>
<td>-5</td>
<td>8</td>
<td>35</td>
<td>202</td>
<td>15</td>
<td>9</td>
<td>90</td>
</tr>
<tr>
<th>Old Town Road - Remix</th>
<td>Lil Nas X</td>
<td>country rap</td>
<td>136</td>
<td>62</td>
<td>88</td>
<td>-6</td>
<td>11</td>
<td>64</td>
<td>157</td>
<td>5</td>
<td>10</td>
<td>87</td>
</tr>
<tr>
<th>bad guy</th>
<td>Billie Eilish</td>
<td>electropop</td>
<td>135</td>
<td>43</td>
<td>70</td>
<td>-11</td>
<td>10</td>
<td>56</td>
<td>194</td>
<td>33</td>
<td>38</td>
<td>95</td>
</tr>
<tr>
<th>Callaita</th>
<td>Bad Bunny</td>
<td>reggaeton</td>
<td>176</td>
<td>62</td>
<td>61</td>
<td>-5</td>
<td>24</td>
<td>24</td>
<td>251</td>
<td>60</td>
<td>31</td>
<td>93</td>
</tr>
<tr>
<th>Loco Contigo (feat. J. Balvin & Tyga)</th>
<td>DJ Snake</td>
<td>dance pop</td>
<td>96</td>
<td>71</td>
<td>82</td>
<td>-4</td>
<td>15</td>
<td>38</td>
<td>185</td>
<td>28</td>
<td>7</td>
<td>86</td>
</tr>
<tr>
<th>Someone You Loved</th>
<td>Lewis Capaldi</td>
<td>pop</td>
<td>110</td>
<td>41</td>
<td>50</td>
<td>-6</td>
<td>11</td>
<td>45</td>
<td>182</td>
<td>75</td>
<td>3</td>
<td>88</td>
</tr>
<tr>
<th>Otro Trago - Remix</th>
<td>Sech</td>
<td>panamanian pop</td>
<td>176</td>
<td>79</td>
<td>73</td>
<td>-2</td>
<td>6</td>
<td>76</td>
<td>288</td>
<td>7</td>
<td>20</td>
<td>87</td>
</tr>
<tr>
<th>Money In The Grave (Drake ft. Rick Ross)</th>
<td>Drake</td>
<td>canadian hip hop</td>
<td>101</td>
<td>50</td>
<td>83</td>
<td>-4</td>
<td>12</td>
<td>10</td>
<td>205</td>
<td>10</td>
<td>5</td>
<td>92</td>
</tr>
<tr>
<th>No Guidance (feat. Drake)</th>
<td>Chris Brown</td>
<td>dance pop</td>
<td>93</td>
<td>45</td>
<td>70</td>
<td>-7</td>
<td>16</td>
<td>14</td>
<td>261</td>
<td>12</td>
<td>15</td>
<td>82</td>
</tr>
<tr>
<th>LA CANCIΓN</th>
<td>J Balvin</td>
<td>latin</td>
<td>176</td>
<td>65</td>
<td>75</td>
<td>-6</td>
<td>11</td>
<td>43</td>
<td>243</td>
<td>15</td>
<td>32</td>
<td>90</td>
</tr>
<tr>
<th>Sunflower - Spider-Man: Into the Spider-Verse</th>
<td>Post Malone</td>
<td>dfw rap</td>
<td>90</td>
<td>48</td>
<td>76</td>
<td>-6</td>
<td>7</td>
<td>91</td>
<td>158</td>
<td>56</td>
<td>5</td>
<td>91</td>
</tr>
<tr>
<th>Lalala</th>
<td>Y2K</td>
<td>canadian hip hop</td>
<td>130</td>
<td>39</td>
<td>84</td>
<td>-8</td>
<td>14</td>
<td>50</td>
<td>161</td>
<td>18</td>
<td>8</td>
<td>88</td>
</tr>
<tr>
<th>Truth Hurts</th>
<td>Lizzo</td>
<td>escape room</td>
<td>158</td>
<td>62</td>
<td>72</td>
<td>-3</td>
<td>12</td>
<td>41</td>
<td>173</td>
<td>11</td>
<td>11</td>
<td>91</td>
</tr>
<tr>
<th>Piece Of Your Heart</th>
<td>MEDUZA</td>
<td>pop house</td>
<td>124</td>
<td>74</td>
<td>68</td>
<td>-7</td>
<td>7</td>
<td>63</td>
<td>153</td>
<td>4</td>
<td>3</td>
<td>91</td>
</tr>
<tr>
<th>Panini</th>
<td>Lil Nas X</td>
<td>country rap</td>
<td>154</td>
<td>59</td>
<td>70</td>
<td>-6</td>
<td>12</td>
<td>48</td>
<td>115</td>
<td>34</td>
<td>8</td>
<td>91</td>
</tr>
<tr>
<th>No Me Conoce - Remix</th>
<td>Jhay Cortez</td>
<td>reggaeton flow</td>
<td>92</td>
<td>79</td>
<td>81</td>
<td>-4</td>
<td>9</td>
<td>58</td>
<td>309</td>
<td>14</td>
<td>7</td>
<td>83</td>
</tr>
<tr>
<th>Soltera - Remix</th>
<td>Lunay</td>
<td>latin</td>
<td>92</td>
<td>78</td>
<td>80</td>
<td>-4</td>
<td>44</td>
<td>80</td>
<td>266</td>
<td>36</td>
<td>4</td>
<td>91</td>
</tr>
<tr>
<th>bad guy (with Justin Bieber)</th>
<td>Billie Eilish</td>
<td>electropop</td>
<td>135</td>
<td>45</td>
<td>67</td>
<td>-11</td>
<td>12</td>
<td>68</td>
<td>195</td>
<td>25</td>
<td>30</td>
<td>89</td>
</tr>
<tr>
<th>If I Can't Have You</th>
<td>Shawn Mendes</td>
<td>canadian pop</td>
<td>124</td>
<td>82</td>
<td>69</td>
<td>-4</td>
<td>13</td>
<td>87</td>
<td>191</td>
<td>49</td>
<td>6</td>
<td>70</td>
</tr>
<tr>
<th>Dance Monkey</th>
<td>Tones and I</td>
<td>australian pop</td>
<td>98</td>
<td>59</td>
<td>82</td>
<td>-6</td>
<td>18</td>
<td>54</td>
<td>210</td>
<td>69</td>
<td>10</td>
<td>83</td>
</tr>
<tr>
<th>It's You</th>
<td>Ali Gatie</td>
<td>canadian hip hop</td>
<td>96</td>
<td>46</td>
<td>73</td>
<td>-7</td>
<td>19</td>
<td>40</td>
<td>213</td>
<td>37</td>
<td>3</td>
<td>89</td>
</tr>
<tr>
<th>Con Calma</th>
<td>Daddy Yankee</td>
<td>latin</td>
<td>94</td>
<td>86</td>
<td>74</td>
<td>-3</td>
<td>6</td>
<td>66</td>
<td>193</td>
<td>11</td>
<td>6</td>
<td>91</td>
</tr>
<tr>
<th>QUE PRETENDES</th>
<td>J Balvin</td>
<td>latin</td>
<td>93</td>
<td>79</td>
<td>64</td>
<td>-4</td>
<td>36</td>
<td>94</td>
<td>222</td>
<td>3</td>
<td>25</td>
<td>89</td>
</tr>
<tr>
<th>Takeaway</th>
<td>The Chainsmokers</td>
<td>edm</td>
<td>85</td>
<td>51</td>
<td>29</td>
<td>-8</td>
<td>10</td>
<td>36</td>
<td>210</td>
<td>12</td>
<td>4</td>
<td>84</td>
</tr>
<tr>
<th>7 rings</th>
<td>Ariana Grande</td>
<td>dance pop</td>
<td>140</td>
<td>32</td>
<td>78</td>
<td>-11</td>
<td>9</td>
<td>33</td>
<td>179</td>
<td>59</td>
<td>33</td>
<td>89</td>
</tr>
<tr>
<th>0.958333333333333</th>
<td>Maluma</td>
<td>reggaeton</td>
<td>96</td>
<td>71</td>
<td>78</td>
<td>-5</td>
<td>9</td>
<td>68</td>
<td>176</td>
<td>22</td>
<td>28</td>
<td>89</td>
</tr>
<tr>
<th>The London (feat. J. Cole & Travis Scott)</th>
<td>Young Thug</td>
<td>atl hip hop</td>
<td>98</td>
<td>59</td>
<td>80</td>
<td>-7</td>
<td>13</td>
<td>18</td>
<td>200</td>
<td>2</td>
<td>15</td>
<td>89</td>
</tr>
<tr>
<th>Never Really Over</th>
<td>Katy Perry</td>
<td>dance pop</td>
<td>100</td>
<td>88</td>
<td>77</td>
<td>-5</td>
<td>32</td>
<td>39</td>
<td>224</td>
<td>19</td>
<td>6</td>
<td>89</td>
</tr>
<tr>
<th>Summer Days (feat. Macklemore & Patrick Stump of Fall Out Boy)</th>
<td>Martin Garrix</td>
<td>big room</td>
<td>114</td>
<td>72</td>
<td>66</td>
<td>-7</td>
<td>14</td>
<td>32</td>
<td>164</td>
<td>18</td>
<td>6</td>
<td>89</td>
</tr>
<tr>
<th>Otro Trago</th>
<td>Sech</td>
<td>panamanian pop</td>
<td>176</td>
<td>70</td>
<td>75</td>
<td>-5</td>
<td>11</td>
<td>62</td>
<td>226</td>
<td>14</td>
<td>34</td>
<td>91</td>
</tr>
<tr>
<th>Antisocial (with Travis Scott)</th>
<td>Ed Sheeran</td>
<td>pop</td>
<td>152</td>
<td>82</td>
<td>72</td>
<td>-5</td>
<td>36</td>
<td>91</td>
<td>162</td>
<td>13</td>
<td>5</td>
<td>87</td>
</tr>
<tr>
<th>Sucker</th>
<td>Jonas Brothers</td>
<td>boy band</td>
<td>138</td>
<td>73</td>
<td>84</td>
<td>-5</td>
<td>11</td>
<td>95</td>
<td>181</td>
<td>4</td>
<td>6</td>
<td>80</td>
</tr>
<tr>
<th>fuck, i'm lonely (with Anne-Marie) - from Β13 Reasons Why: Season 3Β</th>
<td>Lauv</td>
<td>dance pop</td>
<td>95</td>
<td>56</td>
<td>81</td>
<td>-6</td>
<td>6</td>
<td>68</td>
<td>199</td>
<td>48</td>
<td>7</td>
<td>78</td>
</tr>
<tr>
<th>Higher Love</th>
<td>Kygo</td>
<td>edm</td>
<td>104</td>
<td>68</td>
<td>69</td>
<td>-7</td>
<td>10</td>
<td>40</td>
<td>228</td>
<td>2</td>
<td>3</td>
<td>88</td>
</tr>
<tr>
<th>You Need To Calm Down</th>
<td>Taylor Swift</td>
<td>dance pop</td>
<td>85</td>
<td>68</td>
<td>77</td>
<td>-6</td>
<td>7</td>
<td>73</td>
<td>171</td>
<td>1</td>
<td>5</td>
<td>90</td>
</tr>
<tr>
<th>Shallow</th>
<td>Lady Gaga</td>
<td>dance pop</td>
<td>96</td>
<td>39</td>
<td>57</td>
<td>-6</td>
<td>23</td>
<td>32</td>
<td>216</td>
<td>37</td>
<td>3</td>
<td>87</td>
</tr>
<tr>
<th>Talk</th>
<td>Khalid</td>
<td>pop</td>
<td>136</td>
<td>40</td>
<td>90</td>
<td>-9</td>
<td>6</td>
<td>35</td>
<td>198</td>
<td>5</td>
<td>13</td>
<td>84</td>
</tr>
<tr>
<th>Con Altura</th>
<td>ROSALΓA</td>
<td>r&b en espanol</td>
<td>98</td>
<td>69</td>
<td>88</td>
<td>-4</td>
<td>5</td>
<td>75</td>
<td>162</td>
<td>39</td>
<td>12</td>
<td>88</td>
</tr>
<tr>
<th>One Thing Right</th>
<td>Marshmello</td>
<td>brostep</td>
<td>88</td>
<td>62</td>
<td>66</td>
<td>-2</td>
<td>58</td>
<td>44</td>
<td>182</td>
<td>7</td>
<td>5</td>
<td>88</td>
</tr>
<tr>
<th>Te RobarΓ©</th>
<td>Nicky Jam</td>
<td>latin</td>
<td>176</td>
<td>75</td>
<td>67</td>
<td>-4</td>
<td>8</td>
<td>80</td>
<td>202</td>
<td>24</td>
<td>6</td>
<td>88</td>
</tr>
<tr>
<th>Happier</th>
<td>Marshmello</td>
<td>brostep</td>
<td>100</td>
<td>79</td>
<td>69</td>
<td>-3</td>
<td>17</td>
<td>67</td>
<td>214</td>
<td>19</td>
<td>5</td>
<td>88</td>
</tr>
<tr>
<th>Call You Mine</th>
<td>The Chainsmokers</td>
<td>edm</td>
<td>104</td>
<td>70</td>
<td>59</td>
<td>-6</td>
<td>41</td>
<td>50</td>
<td>218</td>
<td>23</td>
<td>3</td>
<td>88</td>
</tr>
<tr>
<th>Cross Me (feat. Chance the Rapper & PnB Rock)</th>
<td>Ed Sheeran</td>
<td>pop</td>
<td>95</td>
<td>79</td>
<td>75</td>
<td>-6</td>
<td>7</td>
<td>61</td>
<td>206</td>
<td>21</td>
<td>12</td>
<td>82</td>
</tr>
</tbody>
</table>
</div>
```python
def index_to_instance(df,index=None):
if index:
return XYZ(df)[index][1]
else:
return XYZ(df)
def XYZ(df):
return sorted(list(zip(list(df.index.codes[0].data),list(df.index.levels[0].array))))
def value_to_index_map(array):
array1 = zip(array,range(len(array)))
return array1
```
```python
class RecSysContentBased():
def __init__(self):
pass
def fit(self,train):
self.train_set = train
df1 = cosine_similarity(train)
self.similarity = df1
self.distances = pairwise_distances(train,metric='euclidean')
def evaluate(self,user):
d = sorted(value_to_index_map(self.distances[user]))
print(d)
return list(index_to_instance(self.train_set,d[i][1]) for i in range(len(d)))
def predict(self):
pass
def test(self,testset):
pass
```
```python
model = RecSysContentBased()
```
```python
np.random.seed(1)
```
```python
w = "qwertyuiopasdfghjklzxcvbnm"
```
```python
model.fit(data)
```
```python
model.evaluate(1)
```
[(0.0, 1), (7.306019863613199, 16), (7.395636885548911, 5), (7.704989210484981, 21), (7.710644461787914, 25), (7.9397938648414295, 12), (8.197515881045817, 19), (8.280594332831667, 3), (8.37692672434156, 24), (8.47324565388931, 2), (8.59168373061565, 9), (8.625026218429008, 17), (8.86561142209829, 8), (8.989000391884632, 0), (9.008214831460636, 22), (9.009691688370886, 18), (9.383269126207303, 13), (9.50125695752467, 6), (9.552747323085569, 10), (9.67897905295402, 14), (9.786878709359785, 23), (9.845251424981102, 7), (10.243872354987145, 11), (10.347373751522337, 15), (10.460654565092929, 4), (11.000486478954194, 20)]
['x',
'a',
'n',
'w',
't',
'z',
'e',
'm',
'f',
'v',
'q',
'd',
'h',
[(0, 'k'),
(1, 'x'),
(2, 'v'),
(3, 'm'),
(4, 'c'),
(5, 'n'),
(6, 'o'),
(7, 'p'),
(8, 'h'),
(9, 'q'),
(10, 'r'),
(11, 's'),
(12, 'z'),
(13, 'y'),
(14, 'i'),
(15, 'j'),
(16, 'a'),
(17, 'd'),
(18, 'l'),
(19, 'e'),
(20, 'g'),
(21, 'w'),
(22, 'b'),
(23, 'u'),
(24, 'f'),
(25, 't')],
'b',
'l',
'y',
'o',
'r',
'i',
'u',
'p',
's',
'j',
'c',
'g']
```python
```
|
using FMRIOpto
using Test
@testset "FMRIOpto.jl" begin
# Write your own tests here.
end
|
// Copyright Louis Dionne 2013-2016
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
#include <boost/hana.hpp>
#include <string>
#include <vector>
namespace hana = boost::hana;
struct yes { std::string toString() const { return "yes"; } };
struct no { };
//! [optionalToString.sfinae]
template <typename T>
std::string optionalToString(T const& obj) {
auto maybe_toString = hana::sfinae([](auto&& x) -> decltype(x.toString()) {
return x.toString();
});
return maybe_toString(obj).value_or("toString not defined");
}
//! [optionalToString.sfinae]
int main() {
BOOST_HANA_RUNTIME_CHECK(optionalToString(yes{}) == "yes");
BOOST_HANA_RUNTIME_CHECK(optionalToString(no{}) == "toString not defined");
{
//! [maybe_add]
auto maybe_add = hana::sfinae([](auto x, auto y) -> decltype(x + y) {
return x + y;
});
maybe_add(1, 2); // hana::just(3)
std::vector<int> v;
maybe_add(v, "foobar"); // hana::nothing
//! [maybe_add]
}
}
|
#remove chimeras with dada2.
#1. clear environment, load functions and packages.----
rm(list=ls())
source('paths.r')
source('NEFI_functions/tic_toc.r')
library(data.table)
#2. Setup paths.----
#get sequence file paths
seq.path <- NEON_ITS.dir
SV_pre.chimera.path <- paste0(seq.path,'SV_pre.chimera_table.rds')
#output file paths.
output_filepath1 <- paste0(seq.path,'SV_table.rds')
output_filepath2 <- NEON_SV.table.path
#3. load data, remove chimeras.----
t.out <- readRDS(SV_pre.chimera.path)
cat('Removing chimeras...\n')
tic()
t.out_nochim <- dada2::removeBimeraDenovo(t.out, method = 'consensus', multithread = TRUE)
cat('Chimeras removed.\n')
toc()
#4. Final save and cleanup.----
#save output.
cat('Saving output.../n')
tic()
saveRDS(t.out_nochim, output_filepath1)
saveRDS(t.out_nochim, output_filepath2)
toc()
cat('Output saved./n')
#clean up if t.out_nochim was created.
if(exists('t.out_nochim')==T){
cmd <- paste0('rm -f ',SV_pre.chimera.path)
system(cmd)
}
#end script.
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas32_2e414m17_17limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition freeze :
{ freeze : feBW_tight -> feBW_limbwidths
| forall a, phiBW_limbwidths (freeze a) = phiBW_tight a }.
Proof.
Set Ltac Profiling.
Time synthesize_freeze ().
Show Ltac Profile.
Time Defined.
Print Assumptions freeze.
|
module MissingDefinition where
open import Agda.Builtin.Equality
Q : Set
data U : Set
S : Set
S = U
T : S β Set
T _ = U
V : Set
W : V β Set
private
X : Set
module AB where
data A : Set
B : (a b : A) β a β‘ b
mutual
Uβ : Set
data Tβ : Uβ β Set
Vβ : (uβ : Uβ) β Tβ uβ β Set
data Wβ (uβ : Uβ) (tβ : Tβ uβ) : Vβ uβ tβ β Set
|
-- Andreas, 2018-09-07 issue #3217 reported by Nisse
--
-- Missing range for cubical error
{-# OPTIONS --cubical #-}
-- {-# OPTIONS -v tc.term.lambda:100 -v tc.term:10 #-}
open import Agda.Builtin.Cubical.Path
data Bool : Set where
true false : Bool
eq : true β‘ false
eq = Ξ» i β true
-- Expected error
-- Issue3217.agda:15,12-16
-- true != false of type Bool
-- when checking that the expression Ξ» i β true has type true β‘ false
|
'''
Copyright 2015 Serendio Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
'''
__author__ = "Satish Palaniappan"
import pickle
import gensim
from scipy import spatial
import operator
import numpy as np
path = "./Model/Seeds22/"
def save_obj(obj, name ):
with open( path + name + '.pkl', 'wb') as f:
pickle.dump(obj, f, protocol=2)
def load_obj(name ):
with open( path + name + '.pkl', 'rb') as f:
return pickle.load(f)
class Categorize(object):
def __init__(self):
## Load Pickle
self.Cluster_lookUP = load_obj("Cluster_lookUP")
self.Cosine_Similarity = load_obj("Cosine_Similarity")
self.num2cat = load_obj("num2cat")
self.Cluster_Model = load_obj("clusterSmall")
self.catVec = load_obj("catVec")
self.model = gensim.models.Word2Vec.load_word2vec_format(path + 'vectors.bin', binary=True)
self.model.init_sims(replace=True)
def CosSim (self,v1,v2):
return (1 - spatial.distance.cosine(v1, v2))
def combine(self,v1,v2):
A = np.add(v1,v2)
M = np.multiply(A,A)
lent=0
for i in M:
lent+=i
return np.divide(A,lent)
def getCategory(self,text):
# Min Score for Each Word
wminScore = 0.30
# sentScore = []
scores=dict()
for i in range(0,22):
scores[i] = 0.0
for phrase in text:
#phrase = phrase[0]
if len(phrase.split()) == 1:
try:
skore = self.Cosine_Similarity[phrase]
if skore > wminScore:
scores[self.Cluster_lookUP[phrase]] += skore
# comment later
# sentScore.append((phrase,self.num2cat[self.Cluster_lookUP[phrase]],skore))
#print(num2cat[Cluster_lookUP[phrase]])
except:
#print(phrase + " Skipped!")
continue
else:
words = phrase.split()
try:
vec = np.array(model[words[0]])
for word in words[1:]:
try:
vec = combine(vec,np.array(model[word]))
except:
#print(word + " Skipped!")
continue
tempCat = self.Cluster_Model.predict(vec)
#print(num2cat[tempCat[0]])
skore = CosSim(vec,self.catVec[tempCat[0]])
if skore > wminScore:
scores[tempCat[0]] += skore
# sentScore.append((phrase,self.num2cat[tempCat[0]],skore))
except:
#print(words[0] + " Skipped!")
continue
thresholdP = 0.50 # This value is in percent
# if u want a more finer prediction set threshold to 0.35 or 0.40 (caution: don't exceed 0.40)
maxS = max(scores.items(), key = operator.itemgetter(1))[1]
threshold = maxS * thresholdP
#Min Score
minScore = 0.40
# if u want a more noise free prediction set threshold to 0.35 or 0.40 (caution: don't exceed 0.40)
flag = 0
if maxS < minScore:
flag = 1
# set max number of cats assignable to any text
catLimit = 6 # change to 3 or less more aggresive model
# more less the value more aggresive the model
scoreSort = sorted(scores.items(), key = operator.itemgetter(1), reverse=True)
#print(scoreSort)
cats = []
f=0
for s in scoreSort:
if s[1] != 0.0 and s[1] > threshold:
f=1
cats.extend([self.num2cat[s[0]]])
else:
continue
if f == 0 or flag == 1: #No Category assigned!
return ("general")
else:
if len(cats) == 1:
ret = str(cats[0])
elif len(cats) <= catLimit:
ret = "|".join(cats)
else:
# ret = "general" or return top most topic
ret = cats[0] +"|"+"general"
return [ret]
|
/-
Copyright (c) 2021 Yury G. Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury G. Kudryashov
-/
import data.set.basic
import data.bool
/-!
# Booleans and set operations
This file contains two trivial lemmas about `bool`, `set.univ`, and `set.range`.
-/
open set
namespace bool
@[simp] lemma univ_eq : (univ : set bool) = {ff, tt} :=
(eq_univ_of_forall bool.dichotomy).symm
@[simp] lemma range_eq {Ξ± : Type*} (f : bool β Ξ±) : range f = {f ff, f tt} :=
by rw [β image_univ, univ_eq, image_pair]
end bool
|
%% Anaylsis_SSVEP
clear all; clc; close all;
%% initialization
DATADIR = 'WHERE\IS\DATA';
%% SSVEP
SSVEPDATA = 'EEG_SSVEP.mat';
STRUCTINFO = {'EEG_SSVEP_train', 'EEG_SSVEP_test'};
SESSIONS = {'session1', 'session2'};
TOTAL_SUBJECTS = 54;
%% INITIALIZATION
FS = 100;
%init
params = {'time', 4;...
'freq' , 60./[5, 7, 9, 11];...
'fs' , FS;...
'band' ,[0.5 40];...
'channel_index', [23:32]; ...
'time_interval' ,[0 4000]; ...
'marker', {'1','up';'2', 'left';'3', 'right';'4', 'down'}; ...
};
%% validation
for sessNum = 1:length(SESSIONS)
session = SESSIONS{sessNum};
fprintf('\n%s validation\n',session);
for subNum = 1:TOTAL_SUBJECTS
subject = sprintf('s%d',subNum);
fprintf('LOAD %s ...\n',subject);
data = importdata(fullfile(DATADIR,session,subject,SSVEPDATA));
CNT{1} = prep_resample(data.(STRUCTINFO{1}), FS,{'Nr', 0});
CNT{2} = prep_resample(data.(STRUCTINFO{2}), FS,{'Nr', 0});
ACC.SSVEP(subNum,sessNum) = SSVEP_performance(CNT, params);
fprintf('%d = %f\n',subNum, ACC.SSVEP(subNum,sessNum));
clear CNT
end
end
|
# Electronic Structure - Basic Structure/Procedure
#### Born-Oppenheimer approximation
In molecules, nuclei are much heavier than electrons, thus they don't move on same time scale; therefore the behaviour of nuclei and electrons can be decoupled.
Therefore, one can first tackle the electronic problem with nuclear coordinates entering only as parameters. The energy levels of the electrons in the molecule can be found by solving the non-relativistic time-independent Schroedinger equation,
$
\begin{align}
\mathcal{H}_{el} |\Psi_n \rangle = E_n |\Psi_n \rangle
\end{align}
$
The ground state energy is given by:
$
\begin{align}
E_0 = \frac{ \langle\Psi_0|H_{el}|\Psi_0\rangle } { \langle\Psi_0|\Psi_0\rangle }
\end{align}
$
We would like to prepare the ground state, $\Psi_0$ on a quantum computer and measure the Hamiltonian expectation value, $E_0$ directly.
### The Hartree-Fock initial state
Good starting point to this problem is the Hartree-Fock method. This method approximates a N-body problem into N one-body problems where each electron evolves in the mean-field of other.
The Hamiltonian can then be expressed in the basis of the solutions of the HF method, also called Molecular Orbitals (MOs).
MOs ($\phi_\mu$) can be occupied or virtual (unoccupied). One MO can contain 2 electrons. For now we work with Spin Orbitals which are associated with a spin up ($\alpha$) or spin down ($\beta$) electron. Thus spin orbitals can contain one electron or be un-occupied.
There are different codes able to find HF solutions: **Gaussian, Psi4, PyQuante, PySCF**
Below we set up a PySCF driver for H2 molecule at equilibrium bond length 0.735 Angstrom, in the singlet state and with no charge.
```python
from qiskit_nature.drivers import UnitsType, Molecule
from qiskit_nature.drivers.second_quantization import (
ElectronicStructureDriverType,
ElectronicStructureMoleculeDriver,
)
molecule = Molecule(
geometry = [
["H", [0.0, 0.0, 0.0]],
["H", [0.0, 0.0, 0.735]]],
charge = 0,
multiplicity = 1
)
driver = ElectronicStructureMoleculeDriver(
molecule,
basis = "sto3g",
driver_type = ElectronicStructureDriverType.PYSCF
)
```
<frozen importlib._bootstrap>:219: RuntimeWarning: scipy._lib.messagestream.MessageStream size changed, may indicate binary incompatibility. Expected 56 from C header, got 64 from PyObject
Molecular Hamiltonian is expressed in terms of fermionic operators and these operators mmust be mapped to spin operators to be Qubit Hamiltonian.
Different mapping types: Jordan-Wigner mapping, Parity mapping, Bravyi-Kitaev mapping.
JW mapping usu maps each Spin Orbital to a qubit.
Below, we set up Electronic Structure Problem to generate the Second quantized operator and a qubit converter that will map it to a qubit operator.
```python
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper, ParityMapper
```
```python
es_problem = ElectronicStructureProblem(driver)
second_q_op = es_problem.second_q_ops()
print(second_q_op[0])
```
Fermionic Operator
register length=4, number terms=14
(0.18093119978423106+0j) * ( +_0 -_1 +_2 -_3 )
+ (-0.18093119978423128+0j) * ( +_0 -_1 -_2 +_3 )
+ (-0.18093119978423128+0j) * ( -_0 +_1 +_2 -_3 )
+ (0.18093119978423144+0j) * ( -_0 +_1 -_2 +_3 ...
Transform this Hamiltonian for given driver defined above we get our qubit operator:
```python
qubit_converter = QubitConverter(mapper=JordanWignerMapper())
qubit_op = qubit_converter.convert(second_q_op[0])
print(qubit_op)
```
0.04523279994605781 * YYYY
+ 0.04523279994605781 * XXYY
+ 0.04523279994605781 * YYXX
+ 0.04523279994605781 * XXXX
- 0.8105479805373281 * IIII
- 0.22575349222402358 * ZIII
+ 0.17218393261915543 * IZII
+ 0.12091263261776633 * ZZII
- 0.22575349222402358 * IIZI
+ 0.17464343068300436 * ZIZI
+ 0.16614543256382414 * IZZI
+ 0.17218393261915543 * IIIZ
+ 0.16614543256382414 * ZIIZ
+ 0.16892753870087904 * IZIZ
+ 0.12091263261776633 * IIZZ
In the minimal (STO-3G) basis set 4 qubits are required. We can reduce number of qubits by using Parity mapping, which allows for removal of 2 qubits by exploiting known symmetries arising from the mapping.
```python
qubit_converter = QubitConverter(mapper=ParityMapper(), two_qubit_reduction=True)
qubit_op = qubit_converter.convert(second_q_op[0], num_particles=es_problem.num_particles)
print(qubit_op)
```
0.18093119978423114 * XX
- 1.0523732457728605 * II
- 0.39793742484317884 * ZI
+ 0.39793742484317884 * IZ
- 0.011280104256235171 * ZZ
This time only two qubits are required.
The Hamiltonian is ready and can be used in a quantum algorithm to find into about electronic structure of corresponding molecule.
|
module vector-test-ctors where
open import bool
open import list
open import vector
----------------------------------------------------------------------
-- syntax
----------------------------------------------------------------------
test-vector : π (π πΉ 2)
test-vector = (ff :: tt :: []) :: (tt :: ff :: []) :: (tt :: ff :: []) :: []
|
[STATEMENT]
lemma sqnf_update [simp]: "\<And>rt dip dsn dsk flg hops sip.
rt \<noteq> update rt dip (dsn, dsk, flg, hops, sip, {})
\<Longrightarrow> sqnf (update rt dip (dsn, dsk, flg, hops, sip, {})) dip = dsk"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>rt dip dsn dsk flg hops sip. rt \<noteq> update rt dip (dsn, dsk, flg, hops, sip, {}) \<Longrightarrow> sqnf (update rt dip (dsn, dsk, flg, hops, sip, {})) dip = dsk
[PROOF STEP]
unfolding update_def sqnf_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>rt dip dsn dsk flg hops sip. rt \<noteq> (case rt dip of None \<Rightarrow> rt(dip \<mapsto> (dsn, dsk, flg, hops, sip, {})) | Some s \<Rightarrow> if \<pi>\<^sub>2 s < \<pi>\<^sub>2 (dsn, dsk, flg, hops, sip, {}) then rt(dip \<mapsto> addpre (dsn, dsk, flg, hops, sip, {}) (\<pi>\<^sub>7 s)) else if \<pi>\<^sub>2 s = \<pi>\<^sub>2 (dsn, dsk, flg, hops, sip, {}) \<and> (\<pi>\<^sub>5 (dsn, dsk, flg, hops, sip, {}) < \<pi>\<^sub>5 s \<or> \<pi>\<^sub>4 s = Aodv_Basic.inv) then rt(dip \<mapsto> addpre (dsn, dsk, flg, hops, sip, {}) (\<pi>\<^sub>7 s)) else if \<pi>\<^sub>3 (dsn, dsk, flg, hops, sip, {}) = unk then rt(dip \<mapsto> (\<pi>\<^sub>2 s, snd (addpre (dsn, dsk, flg, hops, sip, {}) (\<pi>\<^sub>7 s)))) else rt(dip \<mapsto> addpre s (\<pi>\<^sub>7 (dsn, dsk, flg, hops, sip, {})))) \<Longrightarrow> (case (case rt dip of None \<Rightarrow> rt(dip \<mapsto> (dsn, dsk, flg, hops, sip, {})) | Some s \<Rightarrow> if \<pi>\<^sub>2 s < \<pi>\<^sub>2 (dsn, dsk, flg, hops, sip, {}) then rt(dip \<mapsto> addpre (dsn, dsk, flg, hops, sip, {}) (\<pi>\<^sub>7 s)) else if \<pi>\<^sub>2 s = \<pi>\<^sub>2 (dsn, dsk, flg, hops, sip, {}) \<and> (\<pi>\<^sub>5 (dsn, dsk, flg, hops, sip, {}) < \<pi>\<^sub>5 s \<or> \<pi>\<^sub>4 s = Aodv_Basic.inv) then rt(dip \<mapsto> addpre (dsn, dsk, flg, hops, sip, {}) (\<pi>\<^sub>7 s)) else if \<pi>\<^sub>3 (dsn, dsk, flg, hops, sip, {}) = unk then rt(dip \<mapsto> (\<pi>\<^sub>2 s, snd (addpre (dsn, dsk, flg, hops, sip, {}) (\<pi>\<^sub>7 s)))) else rt(dip \<mapsto> addpre s (\<pi>\<^sub>7 (dsn, dsk, flg, hops, sip, {})))) dip of None \<Rightarrow> unk | Some r \<Rightarrow> \<pi>\<^sub>3 r) = dsk
[PROOF STEP]
by (clarsimp split: option.splits if_split_asm) auto
|
{-
Descriptor language for easily defining structures
-}
{-# OPTIONS --cubical --no-import-sorts --no-exact-split --safe #-}
module Cubical.Structures.Macro where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Function
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.SIP
open import Cubical.Functions.FunExtEquiv
open import Cubical.Data.Sigma
open import Cubical.Data.Maybe
open import Cubical.Structures.Constant
open import Cubical.Structures.Maybe
open import Cubical.Structures.NAryOp
open import Cubical.Structures.Parameterized
open import Cubical.Structures.Pointed
open import Cubical.Structures.Product
open import Cubical.Structures.Functorial
data FuncDesc (β : Level) : TypeΟ where
-- constant structure: X β¦ A
constant : β {β'} β Type β' β FuncDesc β
-- pointed structure: X β¦ X
var : FuncDesc β
-- join of structures S,T : X β¦ (S X Γ T X)
_,_ : FuncDesc β β FuncDesc β β FuncDesc β
-- structure S parameterized by constant A : X β¦ (A β S X)
param : β {β'} β (A : Type β') β FuncDesc β β FuncDesc β
-- structure S parameterized by variable argument: X β¦ (X β S X)
maybe : FuncDesc β β FuncDesc β
data Desc (β : Level) : TypeΟ where
-- constant structure: X β¦ A
constant : β {β'} β Type β' β Desc β
-- pointed structure: X β¦ X
var : Desc β
-- join of structures S,T : X β¦ (S X Γ T X)
_,_ : Desc β β Desc β β Desc β
-- structure S parameterized by constant A : X β¦ (A β S X)
param : β {β'} β (A : Type β') β Desc β β Desc β
-- structure S parameterized by variable argument: X β¦ (X β S X)
recvar : Desc β β Desc β
-- Maybe on a structure S: X β¦ Maybe (S X)
maybe : Desc β β Desc β
-- SNS from functorial action
functorial : FuncDesc β β Desc β
-- arbitrary standard notion of structure
foreign : β {β' β''} {S : Type β β Type β'} (ΞΉ : StrEquiv S β'') β UnivalentStr S ΞΉ β Desc β
infixr 4 _,_
{- Functorial structures -}
funcMacroLevel : β {β} β FuncDesc β β Level
funcMacroLevel (constant {β'} x) = β'
funcMacroLevel {β} var = β
funcMacroLevel {β} (dβ , dβ) = β-max (funcMacroLevel dβ) (funcMacroLevel dβ)
funcMacroLevel (param {β'} A d) = β-max β' (funcMacroLevel d)
funcMacroLevel (maybe d) = funcMacroLevel d
-- Structure defined by a functorial descriptor
FuncMacroStructure : β {β} (d : FuncDesc β) β Type β β Type (funcMacroLevel d)
FuncMacroStructure (constant A) X = A
FuncMacroStructure var X = X
FuncMacroStructure (dβ , dβ) X = FuncMacroStructure dβ X Γ FuncMacroStructure dβ X
FuncMacroStructure (param A d) X = A β FuncMacroStructure d X
FuncMacroStructure (maybe d) = MaybeStructure (FuncMacroStructure d)
-- Action defined by a functorial descriptor
funcMacroAction : β {β} (d : FuncDesc β)
{X Y : Type β} β (X β Y) β FuncMacroStructure d X β FuncMacroStructure d Y
funcMacroAction (constant A) _ = idfun A
funcMacroAction var f = f
funcMacroAction (dβ , dβ) f (sβ , sβ) = funcMacroAction dβ f sβ , funcMacroAction dβ f sβ
funcMacroAction (param A d) f s a = funcMacroAction d f (s a)
funcMacroAction (maybe d) f = map-Maybe (funcMacroAction d f)
-- Proof that the action preserves the identity
funcMacroId : β {β} (d : FuncDesc β)
{X : Type β} β β s β funcMacroAction d (idfun X) s β‘ s
funcMacroId (constant A) _ = refl
funcMacroId var _ = refl
funcMacroId (dβ , dβ) (sβ , sβ) = Ξ£PathβPathΞ£ .fst (funcMacroId dβ sβ , funcMacroId dβ sβ)
funcMacroId (param A d) s = funExt Ξ» a β funcMacroId d (s a)
funcMacroId (maybe d) s = congβ map-Maybe (funExt (funcMacroId d)) refl β map-Maybe-id s
{- General structures -}
macroStrLevel : β {β} β Desc β β Level
macroStrLevel (constant {β'} x) = β'
macroStrLevel {β} var = β
macroStrLevel {β} (dβ , dβ) = β-max (macroStrLevel dβ) (macroStrLevel dβ)
macroStrLevel (param {β'} A d) = β-max β' (macroStrLevel d)
macroStrLevel {β} (recvar d) = β-max β (macroStrLevel d)
macroStrLevel (maybe d) = macroStrLevel d
macroStrLevel (functorial d) = funcMacroLevel d
macroStrLevel (foreign {β'} _ _) = β'
macroEquivLevel : β {β} β Desc β β Level
macroEquivLevel (constant {β'} x) = β'
macroEquivLevel {β} var = β
macroEquivLevel {β} (dβ , dβ) = β-max (macroEquivLevel dβ) (macroEquivLevel dβ)
macroEquivLevel (param {β'} A d) = β-max β' (macroEquivLevel d)
macroEquivLevel {β} (recvar d) = β-max β (macroEquivLevel d)
macroEquivLevel (maybe d) = macroEquivLevel d
macroEquivLevel (functorial d) = funcMacroLevel d
macroEquivLevel (foreign {β'' = β''} _ _) = β''
-- Structure defined by a descriptor
MacroStructure : β {β} (d : Desc β) β Type β β Type (macroStrLevel d)
MacroStructure (constant A) X = A
MacroStructure var X = X
MacroStructure (dβ , dβ) X = MacroStructure dβ X Γ MacroStructure dβ X
MacroStructure (param A d) X = A β MacroStructure d X
MacroStructure (recvar d) X = X β MacroStructure d X
MacroStructure (maybe d) = MaybeStructure (MacroStructure d)
MacroStructure (functorial d) = FuncMacroStructure d
MacroStructure (foreign {S = S} _ _) = S
-- Notion of structured equivalence defined by a descriptor
MacroEquivStr : β {β} β (d : Desc β) β StrEquiv {β} (MacroStructure d) (macroEquivLevel d)
MacroEquivStr (constant A) = ConstantEquivStr A
MacroEquivStr var = PointedEquivStr
MacroEquivStr (dβ , dβ) = ProductEquivStr (MacroEquivStr dβ) (MacroEquivStr dβ)
MacroEquivStr (param A d) = ParamEquivStr A Ξ» _ β MacroEquivStr d
MacroEquivStr (recvar d) = UnaryFunEquivStr (MacroEquivStr d)
MacroEquivStr (maybe d) = MaybeEquivStr (MacroEquivStr d)
MacroEquivStr (functorial d) = FunctorialEquivStr (funcMacroAction d)
MacroEquivStr (foreign ΞΉ _) = ΞΉ
-- Proof that structure induced by descriptor is univalent
MacroUnivalentStr : β {β} β (d : Desc β) β UnivalentStr (MacroStructure d) (MacroEquivStr d)
MacroUnivalentStr (constant A) = constantUnivalentStr A
MacroUnivalentStr var = pointedUnivalentStr
MacroUnivalentStr (dβ , dβ) =
ProductUnivalentStr (MacroEquivStr dβ) (MacroUnivalentStr dβ) (MacroEquivStr dβ) (MacroUnivalentStr dβ)
MacroUnivalentStr (param A d) = ParamUnivalentStr A (Ξ» _ β MacroEquivStr d) (Ξ» _ β MacroUnivalentStr d)
MacroUnivalentStr (recvar d) = unaryFunUnivalentStr (MacroEquivStr d) (MacroUnivalentStr d)
MacroUnivalentStr (maybe d) = maybeUnivalentStr (MacroEquivStr d) (MacroUnivalentStr d)
MacroUnivalentStr (functorial d) = functorialUnivalentStr (funcMacroAction d) (funcMacroId d)
MacroUnivalentStr (foreign _ ΞΈ) = ΞΈ
-- Module for easy importing
module Macro β (d : Desc β) where
structure = MacroStructure d
equiv = MacroEquivStr d
univalent = MacroUnivalentStr d
|
[STATEMENT]
lemma max_mapping_cobounded2: "dom P2 \<subseteq> dom P1 \<Longrightarrow> rel_mapping (\<le>) P2 (max_mapping P1 P2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. dom P2 \<subseteq> dom P1 \<Longrightarrow> rel_mapping (\<le>) P2 (max_mapping P1 P2)
[PROOF STEP]
unfolding max_mapping_def rel_mapping_alt
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. dom P2 \<subseteq> dom P1 \<Longrightarrow> dom P2 = dom (\<lambda>x. case (P1 x, P2 x) of (None, None) \<Rightarrow> None | (None, Some x) \<Rightarrow> Map.empty x | (Some x, None) \<Rightarrow> None | (Some x, Some y) \<Rightarrow> Some (max x y)) \<and> (\<forall>p\<in>dom P2. the (P2 p) \<le> the (case (P1 p, P2 p) of (None, None) \<Rightarrow> None | (None, Some x) \<Rightarrow> Map.empty x | (Some x, None) \<Rightarrow> None | (Some x, Some y) \<Rightarrow> Some (max x y)))
[PROOF STEP]
by (auto simp: dom_def split: option.splits)
|
#include "source_generic.hpp"
#include "filesystem.hpp"
#include "info.hpp"
#include "selection_dialog.hpp"
#include "snippets.hpp"
#include "terminal.hpp"
#include "utility.hpp"
#include <algorithm>
#include <boost/algorithm/string.hpp>
Source::GenericView::GenericView(const boost::filesystem::path &file_path, const Glib::RefPtr<Gsv::Language> &language) : BaseView(file_path, language), View(file_path, language, true), autocomplete(this, interactive_completion, last_keyval, false, false) {
if(language) {
auto language_manager = LanguageManager::get_default();
auto search_paths = language_manager->get_search_path();
bool found_language_file = false;
boost::filesystem::path language_file;
boost::system::error_code ec;
for(auto &search_path : search_paths) {
boost::filesystem::path p(static_cast<std::string>(search_path) + '/' + static_cast<std::string>(language->get_id()) + ".lang");
if(boost::filesystem::exists(p, ec) && boost::filesystem::is_regular_file(p, ec)) {
language_file = p;
found_language_file = true;
break;
}
}
if(found_language_file) {
boost::property_tree::ptree pt;
try {
boost::property_tree::xml_parser::read_xml(language_file.string(), pt);
parse_language_file(pt);
}
catch(const std::exception &e) {
Terminal::get().print("\e[31mError\e[m: error parsing language file " + filesystem::get_short_path(language_file).string() + ": " + e.what() + '\n', true);
}
}
}
setup_buffer_words();
setup_autocomplete();
}
void Source::GenericView::parse_language_file(const boost::property_tree::ptree &pt) {
bool case_insensitive = false;
for(auto &node : pt) {
if(node.first == "<xmlcomment>") {
auto data = static_cast<std::string>(node.second.data());
std::transform(data.begin(), data.end(), data.begin(), ::tolower);
if(data.find("case insensitive") != std::string::npos)
case_insensitive = true;
}
else if(node.first == "keyword") {
auto data = static_cast<std::string>(node.second.data());
keywords.emplace(data);
if(case_insensitive) {
std::transform(data.begin(), data.end(), data.begin(), ::tolower);
keywords.emplace(data);
}
}
try {
parse_language_file(node.second);
}
catch(const std::exception &e) {
}
}
}
std::vector<std::pair<Gtk::TextIter, Gtk::TextIter>> Source::GenericView::get_words(const Gtk::TextIter &start, const Gtk::TextIter &end) {
std::vector<std::pair<Gtk::TextIter, Gtk::TextIter>> words;
auto iter = start;
while(iter && iter < end) {
if(is_token_char(*iter)) {
auto word = get_token_iters(iter);
if(!(*word.first >= '0' && *word.first <= '9') && (word.second.get_offset() - word.first.get_offset()) >= 3) // Minimum word length: 3
words.emplace_back(word.first, word.second);
iter = word.second;
}
iter.forward_char();
}
return words;
}
void Source::GenericView::setup_buffer_words() {
{
auto words = get_words(get_buffer()->begin(), get_buffer()->end());
for(auto &word : words) {
auto result = buffer_words.emplace(get_buffer()->get_text(word.first, word.second), 1);
if(!result.second)
++(result.first->second);
}
}
// Remove changed word at insert
get_buffer()->signal_insert().connect(
[this](const Gtk::TextIter &iter_, const Glib::ustring &text, int bytes) {
auto iter = iter_;
if(!is_token_char(*iter))
iter.backward_char();
if(is_token_char(*iter)) {
auto word = get_token_iters(iter);
if(word.second.get_offset() - word.first.get_offset() >= 3) {
auto it = buffer_words.find(get_buffer()->get_text(word.first, word.second));
if(it != buffer_words.end()) {
if(it->second > 1)
--(it->second);
else
buffer_words.erase(it);
}
}
}
},
false);
// Add all words between start and end of insert
get_buffer()->signal_insert().connect([this](const Gtk::TextIter &iter, const Glib::ustring &text, int bytes) {
auto start = iter;
auto end = iter;
start.backward_chars(text.size());
if(!is_token_char(*start))
start.backward_char();
end.forward_char();
auto words = get_words(start, end);
for(auto &word : words) {
auto result = buffer_words.emplace(get_buffer()->get_text(word.first, word.second), 1);
if(!result.second)
++(result.first->second);
}
});
// Remove words within text that was removed
get_buffer()->signal_erase().connect(
[this](const Gtk::TextIter &start_, const Gtk::TextIter &end_) {
auto start = start_;
auto end = end_;
if(!is_token_char(*start))
start.backward_char();
end.forward_char();
auto words = get_words(start, end);
for(auto &word : words) {
auto it = buffer_words.find(get_buffer()->get_text(word.first, word.second));
if(it != buffer_words.end()) {
if(it->second > 1)
--(it->second);
else
buffer_words.erase(it);
}
}
},
false);
// Add new word resulting from erased text
get_buffer()->signal_erase().connect([this](const Gtk::TextIter &start_, const Gtk::TextIter & /*end*/) {
auto start = start_;
if(!is_token_char(*start))
start.backward_char();
if(is_token_char(*start)) {
auto word = get_token_iters(start);
if(word.second.get_offset() - word.first.get_offset() >= 3) {
auto result = buffer_words.emplace(get_buffer()->get_text(word.first, word.second), 1);
if(!result.second)
++(result.first->second);
}
}
});
}
void Source::GenericView::setup_autocomplete() {
non_interactive_completion = [this] {
if(CompletionDialog::get() && CompletionDialog::get()->is_visible())
return;
autocomplete.run();
};
autocomplete.run_check = [this]() {
auto prefix_start = get_buffer()->get_insert()->get_iter();
auto prefix_end = prefix_start;
size_t count = 0;
while(prefix_start.backward_char() && is_token_char(*prefix_start))
++count;
if(prefix_start != prefix_end && !is_token_char(*prefix_start))
prefix_start.forward_char();
if((count >= 3 && !(*prefix_start >= '0' && *prefix_start <= '9')) || !interactive_completion) {
LockGuard lock(autocomplete.prefix_mutex);
autocomplete.prefix = get_buffer()->get_text(prefix_start, prefix_end);
if(interactive_completion)
show_prefix_buffer_word = buffer_words.find(autocomplete.prefix) != buffer_words.end();
else {
auto it = buffer_words.find(autocomplete.prefix);
show_prefix_buffer_word = !(it == buffer_words.end() || it->second == 1);
}
return true;
}
return false;
};
autocomplete.add_rows = [this](std::string & /*buffer*/, int /*line*/, int /*line_index*/) {
if(autocomplete.state == Autocomplete::State::starting) {
autocomplete_comment.clear();
autocomplete_insert.clear();
std::string prefix;
{
LockGuard lock(autocomplete.prefix_mutex);
prefix = autocomplete.prefix;
}
for(auto &keyword : keywords) {
if(starts_with(keyword, prefix)) {
autocomplete.rows.emplace_back(keyword);
autocomplete_insert.emplace_back(keyword);
autocomplete_comment.emplace_back("");
}
}
{
for(auto &buffer_word : buffer_words) {
if((show_prefix_buffer_word || buffer_word.first.size() > prefix.size()) &&
starts_with(buffer_word.first, prefix) &&
keywords.find(buffer_word.first) == keywords.end()) {
autocomplete.rows.emplace_back(buffer_word.first);
auto insert = buffer_word.first;
boost::replace_all(insert, "$", "\\$");
autocomplete_insert.emplace_back(insert);
autocomplete_comment.emplace_back("");
}
}
}
LockGuard lock(snippets_mutex);
if(snippets) {
for(auto &snippet : *snippets) {
if(starts_with(snippet.prefix, prefix)) {
autocomplete.rows.emplace_back(snippet.prefix);
autocomplete_insert.emplace_back(snippet.body);
autocomplete_comment.emplace_back(snippet.description);
}
}
}
}
return true;
};
autocomplete.on_show = [this] {
hide_tooltips();
};
autocomplete.on_hide = [this] {
autocomplete_comment.clear();
autocomplete_insert.clear();
};
autocomplete.on_select = [this](unsigned int index, const std::string &text, bool hide_window) {
get_buffer()->erase(CompletionDialog::get()->start_mark->get_iter(), get_buffer()->get_insert()->get_iter());
if(hide_window)
insert_snippet(CompletionDialog::get()->start_mark->get_iter(), autocomplete_insert[index]);
else
get_buffer()->insert(CompletionDialog::get()->start_mark->get_iter(), text);
};
autocomplete.set_tooltip_buffer = [this](unsigned int index) -> std::function<void(Tooltip & tooltip)> {
auto tooltip_str = autocomplete_comment[index];
if(tooltip_str.empty())
return nullptr;
return [tooltip_str = std::move(tooltip_str)](Tooltip &tooltip) {
tooltip.insert_with_links_tagged(tooltip_str);
};
};
}
|
State Before: Ξ± : Type u
instβΒ² : CommGroup Ξ±
instβΒΉ : LE Ξ±
instβ : CovariantClass Ξ± Ξ± (fun x x_1 => x * x_1) fun x x_1 => x β€ x_1
a b c d : Ξ±
β’ a * bβ»ΒΉ β€ c β a β€ b * c State After: no goals Tactic: rw [β inv_mul_le_iff_le_mul, mul_comm]
|
Set Warnings "-notation-overridden,-parsing".
From Coq Require Import Lists.List.
From DF Require Import Dataframe.
(* Define terms. *)
Inductive op : Type :=
| DataFrame :
Dataframe.DataFrame -> op
| Transpose : op -> op
| Mask :
(** Mask positions. **) list nat ->
(** Axis to apply mask over **) Dataframe.Axis ->
op -> op
| Filter :
(** Filter function **) (list Dataframe.data_value -> bool) ->
(** Axis to filter over **) Dataframe.Axis ->
op -> op
| ToLabels :
(** Index to use as index labels **) nat ->
(** Axis to update the index for. **) Dataframe.Axis ->
op -> op
| FromLabels :
(** Axis to take the index from. **) Dataframe.Axis ->
op -> op
| Map :
(** Mapping function **) (list Dataframe.data_value -> list Dataframe.data_value) ->
(** Possible dtypes each index **) list (list Dataframe.dtype) ->
(** Axis to map over. **) Dataframe.Axis ->
op -> op
| Concat :
(** How to concat **) Dataframe.Set_Combine ->
(** Axis to concat along. **) Dataframe.Axis ->
(** Other dataframe to concat **) op ->
(** Self **) op ->
op
| InferDTypes :
(** Axis to infer dtypes for. **) Dataframe.Axis ->
op -> op.
Inductive dfvalue : op -> Prop :=
| df_DataFrame : forall df_obj,
Dataframe.DataFrame -> dfvalue (DataFrame df_obj).
Hint Constructors dfvalue : core.
Reserved Notation "t '-->' t'" (at level 40).
Inductive step : op -> op -> Prop :=
| ST_TransposeValue : forall df df_obj,
dfvalue df ->
df = DataFrame df_obj ->
Transpose df --> DataFrame (Dataframe.Transpose df_obj)
| ST_TransposeStep : forall df df',
df --> df' ->
Transpose df --> Transpose df'
| ST_TransposeTwice : forall df,
dfvalue df ->
Transpose (Transpose df) --> df
| ST_MaskValue : forall mask_position axis df df_obj,
dfvalue df ->
df = DataFrame df_obj ->
Mask mask_position axis df --> DataFrame (Dataframe.Mask mask_position axis df_obj)
| ST_MaskStep : forall mask_position axis df df',
df --> df' ->
Mask mask_position axis df --> Mask mask_position axis df'
| ST_FilterNone : forall axis df,
dfvalue df ->
Filter (fun row => true) axis df --> df
| ST_FilterValue : forall filter_func axis df df_obj,
dfvalue df ->
df = DataFrame df_obj ->
Filter filter_func axis df --> DataFrame (Dataframe.Filter filter_func axis df_obj)
| ST_FilterStep : forall filter_func axis df df',
df --> df' ->
Filter filter_func axis df --> Filter filter_func axis df'
| ST_ToLabelsValue : forall index_ind axis df df_obj,
dfvalue df ->
df = DataFrame df_obj ->
ToLabels index_ind axis df --> DataFrame (Dataframe.ToLabels index_ind axis df_obj)
| ST_ToLabelsStep : forall index_ind axis df df',
df --> df' ->
ToLabels index_ind axis df --> ToLabels index_ind axis df'
| ST_FromLabelsValue : forall axis df df_obj,
dfvalue df ->
df = DataFrame df_obj ->
FromLabels axis df --> DataFrame (Dataframe.FromLabels axis df_obj)
| ST_FromLabelsStep : forall axis df df',
df --> df' ->
FromLabels axis df --> FromLabels axis df'
| ST_ToFromLabels : forall axis df,
dfvalue df ->
ToLabels 0 axis (FromLabels axis df) --> df
| ST_MapValues : forall map_func map_dtype axis df df_obj,
dfvalue df ->
df = DataFrame df_obj ->
Map map_func map_dtype axis df --> DataFrame (Dataframe.Map map_func map_dtype axis df_obj)
| ST_MapStep : forall map_func map_dtype axis df df',
df --> df' ->
Map map_func map_dtype axis df --> Map map_func map_dtype axis df'
| ST_ConcatValue : forall concat_type axis other other_obj self self_obj,
dfvalue self ->
dfvalue other ->
self = DataFrame self_obj ->
other = DataFrame other_obj ->
Concat concat_type axis other self --> DataFrame (Dataframe.Concat concat_type axis other_obj self_obj)
| ST_ConcatStep : forall concat_type axis other self df,
dfvalue self ->
dfvalue other ->
Concat concat_type axis other self --> df
| ST_InferDTypesValue : forall axis df df_obj,
dfvalue df ->
df = DataFrame df_obj ->
InferDTypes axis df --> DataFrame (Dataframe.InferDTypes axis df_obj)
| ST_InferDTypesStep : forall axis df df',
df --> df' ->
InferDTypes axis df --> InferDTypes axis df'
where "t '-->' t'" := (step t t').
Hint Constructors step : core.
(* Define the data typing for operations. *)
Reserved Notation "'β’' df 'β' rDT ',' cDT " (at level 40).
Inductive has_dtype : op -> list dtype -> list dtype -> Prop :=
| DT_Constructor : forall df_obj,
β’ (DataFrame df_obj) β (get_row_dtypes df_obj), (get_col_dtypes df_obj)
| DT_Transpose : forall df row__dtypes col__dtypes df_obj,
β’ df β row__dtypes, col__dtypes ->
df = DataFrame df_obj ->
β’ DataFrame (Dataframe.Transpose df_obj) β col__dtypes, row__dtypes
| DT_TransposeTwice : forall df row__dtypes col__dtypes df_obj,
β’ df β row__dtypes, col__dtypes ->
df = DataFrame df_obj ->
β’ DataFrame (Dataframe.Transpose (Dataframe.Transpose df_obj)) β row__dtypes, col__dtypes
| DT_Mask : forall mask_positions axis df row__dtypes col__dtypes df_obj df_obj',
β’ df β row__dtypes, col__dtypes ->
df = DataFrame df_obj ->
df_obj' = Dataframe.Mask mask_positions axis df_obj ->
β’ DataFrame df_obj' β (get_row_dtypes df_obj'), (get_col_dtypes df_obj')
| DT_Filter : forall filter_func axis df row__dtypes col__dtypes df_obj df_obj',
β’ df β row__dtypes, col__dtypes ->
df = DataFrame df_obj ->
df_obj' = Dataframe.Filter filter_func axis df_obj ->
β’ DataFrame df_obj' β (get_row_dtypes df_obj'), (get_col_dtypes df_obj')
| DT_ToLabels : forall label_ind axis df row__dtypes col__dtypes df_obj df_obj',
β’ df β row__dtypes, col__dtypes ->
df = DataFrame df_obj ->
df_obj' = Dataframe.ToLabels label_ind axis df_obj ->
β’ DataFrame df_obj' β (get_row_dtypes df_obj'), (get_col_dtypes df_obj')
| DT_FromLabels : forall axis df row__dtypes col__dtypes df_obj df_obj',
β’ df β row__dtypes, col__dtypes ->
df = DataFrame df_obj ->
df_obj' = Dataframe.FromLabels axis df_obj ->
β’ DataFrame df_obj' β (get_row_dtypes df_obj'), (get_col_dtypes df_obj')
| DT_Map : forall map_func map_dtypes axis df row__dtypes col__dtypes df_obj df_obj',
β’ df β row__dtypes, col__dtypes ->
df = DataFrame df_obj ->
df_obj' = Dataframe.Map map_func map_dtypes axis df_obj ->
β’ DataFrame df_obj' β (get_row_dtypes df_obj'), (get_col_dtypes df_obj')
| DT_Concat : forall concat_type axis other self other_row__dtypes other_col__dtypes self_row__dtypes self_col__dtypes other_obj self_obj df_obj',
β’ self β self_row__dtypes, self_col__dtypes ->
β’ other β other_row__dtypes, other_col__dtypes ->
self = DataFrame self_obj ->
other = DataFrame other_obj ->
df_obj' = Dataframe.Concat concat_type axis other_obj self_obj ->
β’ DataFrame df_obj' β (get_row_dtypes df_obj'), (get_col_dtypes df_obj')
| DT_InferDTypes : forall axis df row__dtypes col__dtypes df_obj df_obj',
β’ df β row__dtypes, col__dtypes ->
df = DataFrame df_obj ->
df_obj' = Dataframe.InferDTypes axis df_obj ->
β’ DataFrame df_obj' β (get_row_dtypes df_obj'), (get_col_dtypes df_obj')
where "'β’' df 'β' rDT ',' cDT" := (has_dtype df rDT cDT).
Hint Constructors has_dtype : core.
Theorem progress : forall df rDT cDT,
β’ df β rDT, cDT ->
dfvalue df \/ exists df', df --> df'.
Proof.
intros df rDT cDT H.
induction H; auto; try (right; destruct IHhas_dtype; destruct H0; eauto).
Qed.
Theorem preservation: forall df df' rDT cDT,
β’ df β rDT, cDT ->
df --> df' ->
β’ df' β rDT, cDT.
Proof.
intros df df' rDT cDT HT HE.
generalize dependent df'.
induction HT; intros df' HE; try (inversion HE; subst); eauto.
Qed.
|
---
# Section 5.3: The Power Method and Some Simple Extensions
---
Let $A \in \mathbb{C}^{n \times n}$ be a matrix with _linearly independent_ eigenvectors
$$
v_1, \ldots, v_n
$$
and corresponding eigenvalues
$$
\lambda_1, \ldots, \lambda_n
$$
(i.e., $A v_i = \lambda_i v_i$, for $i=1,\ldots,n$) ordered such that
$$
|\lambda_1| \ge |\lambda_2| \ge \cdots \ge |\lambda_n|.
$$
We say that $A$ has a **dominant eigenvalue** if
$$
|\lambda_1| > |\lambda_2|.
$$
---
## The Power Method
The basic idea of the **power method** is to pick a vector $q \in \mathbb{C}^n$ and compute the sequence
$$
q,\ A q,\ A^2 q,\ A^3 q,\ \ldots.
$$
Since the eigenvectors $v_1,\ldots,v_n$ form a basis for $\mathbb{C}^n$, we have that
$$
q = c_1 v_1 + \cdots + c_n v_n.
$$
For a random $q$, we expect $c_1 \ne 0$.
Then
$$
\begin{align}
A q
&= c_1 A v_1 + \cdots + c_n A v_n \\
&= c_1 \lambda_1 v_1 + \cdots + c_n \lambda_n v_n
\end{align}
$$
and
$$
\begin{align}
A^2 q
&= c_1 \lambda_1 A v_1 + \cdots + c_n \lambda_n A v_n \\
&= c_1 \lambda_1^2 v_1 + \cdots + c_n \lambda_n^2 v_n.
\end{align}
$$
In general, we have
$$
A^j q = c_1 \lambda_1^j v_1 + \cdots + c_n \lambda_n^j v_n
$$
and
$$
\frac{A^j q}{\lambda_1^j} = c_1 v_1 + c_2 \left(\frac{\lambda_2}{\lambda_1}\right)^j v_2 + \cdots + c_n \left(\frac{\lambda_n}{\lambda_1}\right)^j v_n.
$$
Letting
$$
q_j = \frac{A^j q}{\lambda_1^j},
$$
we have
$$
\begin{align}
\| q_j - c_1 v_1 \|
&= \left\| c_2 \left(\frac{\lambda_2}{\lambda_1}\right)^j v_2 + \cdots + c_n \left(\frac{\lambda_n}{\lambda_1}\right)^j v_n \right\| \\
&\le |c_2| \left|\frac{\lambda_2}{\lambda_1}\right|^j \|v_2\| + \cdots + |c_n| \left|\frac{\lambda_n}{\lambda_1}\right|^j \|v_n\| \\
&\le \left|\frac{\lambda_2}{\lambda_1}\right|^j \big(|c_2| \|v_2\| + \cdots + |c_n| \|v_n\|\big).
\end{align}
$$
Now suppose $|\lambda_1| > |\lambda_2|$. Then
$$
\left|\frac{\lambda_2}{\lambda_1}\right| < 1.
$$
Therefore,
$$
\left|\frac{\lambda_2}{\lambda_1}\right|^j \to 0 \quad \text{as} \ j \to \infty.
$$
Thus, $\| q_j - c_1 v_1 \| \to 0$ as $j \to \infty$, so we conclude that
$$
q_j \to c_1 v_1 \quad \text{as $j \to \infty$.}
$$
The rate of the convergence of the power method is generally linear ($\|q_{j+1} - c_1 v_1\| \approx r \|q_j - c_1 v_1\|$ for all $j$ sufficiently large) with convergence ratio
$$
r = \left|\frac{\lambda_2}{\lambda_1}\right|.
$$
Thus, the larger the gap between $|\lambda_1|$ and $|\lambda_2|$, the smaller the convergence ratio and the faster the convergence.
---
## Scaling
Since we usually do not know $\lambda_1$ while running the power method, we will not be able to compute $q_j = A^j q/\lambda_1^j$. However, it is important that we scale $A^j q$ since $\|A^j q\| \to \infty$ if $|\lambda_1| > 1$ and $\|A^j q\| \to 0$ if $|\lambda_1| < 1$.
A simple choice is to scale $A^j q$ so that its largest entry is equal to one. Thus, we let
$$
q_{j+1} = \frac{A q_j}{s_{j+1}},
$$
where $s_{j+1}$ is the component of $A q_j$ which has the largest absolute value.
---
## Algorithm
Given $q_0 \in \mathbb{C}^n$, we iterate
1. $\hat{q} = A q_j$
2. $s_{j+1} =$ entry of $\hat{q}$ with largest absolute value
3. $q_{j+1} \gets \hat{q}/s_{j+1}$
for $j = 0, 1, 2, \ldots$.
Then $q_j$ approaches a multiple of $v_1$ and $s_j$ approaches the eigenvalue $\lambda_1$.
If $A$ is a dense $n \times n$ matrix, then each iteration of this algorithm will require $2n^2 + O(n)$ flops. However, if $A$ is sparse and has at most $k$ nonzeros on each row, then each iteration will require approximately $2 k n$ flops. Therefore, the power method is very well suited for computing the dominant eigenvalue and associated eigenvector of large sparse matrices.
---
## `power_method`
```julia
using LinearAlgebra, SparseArrays
```
```julia
function scale!(q)
maxval, idx = maximum((abs(q[i]),i) for i=1:length(q))
s = q[idx]
q ./= s
return s
end
```
```julia
function power_method(A; tol=sqrt(eps())/2, maxiter=100_000)
m, n = size(A)
n == m || error("Matrix must be square.")
q = randn(n)
s = scale!(q)
lam = s
qold = similar(q)
k = 0
done = false
while !done && k < maxiter
k += 1
copy!(qold, q) # qold = q
mul!(q, A, qold) # q = A*qold
s = scale!(q) # q = q/s
lam = s
done = norm(A*q - lam*q)/norm(q) <= tol
end
if done
println("Converged after $k iterations.")
else
println("Failed to converge.")
end
return lam, q
end
```
```julia
n = 1_000
k = 10
density = (k - 1)/n # density = (k*n - n)/n^2
A = triu(sprand(n, n, density), 1)
A = A + A' + I
# Expect nnz(A) β k*n
@show nnz(A)
if n <= 1000
Ξ» = eigvals(Matrix(A))
abseig = abs.(Ξ») |> sort
r = abseig[end-1]/abseig[end]
@show r
end
println()
@time lam, q = power_method(A)
@show lam
@show norm(A*q - lam*q)/norm(q);
```
---
## Google PageRank Algorithm
Google uses its [PageRank](https://en.wikipedia.org/wiki/PageRank) algorithm to determine its ranking of webpages in search results.
The [Google matrix](https://en.wikipedia.org/wiki/Google_matrix) represents how webpages on the Internet link to one another.
PageRank uses the power method to compute the dominant eigenvector of the Google matrix, and this dominant eigenvector is then used to rank the importance of webpages.
By design, the convergence ratio of the Google matrix is
$$
\left|\frac{\lambda_2}{\lambda_1}\right| = 0.85,
$$
so the number of power method iterations is reasonable.
---
## The Inverse Power Method
Let $A \in \mathbb{C}^{n \times n}$ be nonsingular. Since $A$ is nonsingular, all of its eigenvalues are nonzero.
Since
$$
A v = \lambda v \quad \implies \quad A^{-1} v = \lambda^{-1} v,
$$
the eigenvalues of $A^{-1}$ are $\lambda_1^{-1},\ldots,\lambda_n^{-1}$ and the corresponding eigenvectors are $v_1,\ldots,v_n$.
Since
$$
|\lambda_1| \ge |\lambda_2| \ge \cdots \ge |\lambda_n|,
$$
we have that
$$
\left|\lambda_1^{-1}\right| \le \left|\lambda_2^{-1}\right| \le \cdots \le \left|\lambda_n^{-1}\right|.
$$
If $|\lambda_{n-1}| > |\lambda_n|$, then $\left|\lambda_n^{-1}\right| > \left|\lambda_{n-1}^{-1}\right|$, so the **inverse power method**,
$$
q,\ A^{-1} q,\ A^{-2} q,\ A^{-3} q,\ \ldots,
$$
will generate a sequence $q_j$ that converges to a multiple of $v_n$ (i.e., the eigenvector corresponding to the _smallest_ eigenvalue of $A$).
---
## `inverse_power_method`
```julia
function inverse_power_method(A; tol=sqrt(eps())/2, maxiter=100_000)
m, n = size(A)
n == m || error("Matrix must be square.")
F = lu(A)
q = randn(n)
s = scale!(q)
lam = 1/s
qold = similar(q)
k = 0
done = false
while !done && k < maxiter
k += 1
copy!(qold, q) # qold = q
ldiv!(q, F, qold) # q = F\qold
s = scale!(q) # q = q/s
lam = 1/s
done = norm(A*q - lam*q)/norm(q) <= tol
end
if done
println("Converged after $k iterations.")
else
println("Failed to converge.")
end
return lam, q
end
```
```julia
n = 1000
k = 5
density = (k - 1)/n # density = (k*n - n)/n^2
A = triu(sprand(n, n, density), 1)
A = A + A' + I
# Expect nnz(A) β k*n
@show nnz(A)
if n <= 1000
Ξ» = eigvals(Matrix(A))
abseig = abs.(Ξ») |> sort
r = abseig[1]/abseig[2]
@show r
end
println()
@time lam, q = inverse_power_method(A)
@show lam
@show norm(A*q - lam*q)/norm(q);
```
---
## The Shift-and-Invert Method
If $A v = \lambda v$, then
$$
\big( A - \rho I \big) v = \big( \lambda - \rho \big) v.
$$
Therefore, using the inverse power method on $A - \rho I$, we can compute an eigenvector with eigenvalue closest to the shift $\rho$.
That is, if
$$
|\lambda_i - \rho| \ll |\lambda_j - \rho|, \quad \forall j \ne i,
$$
then the **shift-and-invert method**,
$$
q,\ (A - \rho I)^{-1} q,\ (A - \rho I)^{-2} q,\ (A - \rho I)^{-3} q,\ \ldots,
$$
will generate a sequence $q_j$ that converges to a multiple of $v_i$.
The rate of convergence is
$$
\left| \frac{\lambda_i - \rho}{\lambda_k - \rho} \right|,
$$
where $\lambda_k - \rho$ is the second smallest eigenvalue of $A - \rho I$ in absolute value.
Once we have an $LU$ decomposition of $A - \rho I$ (which requires $\frac{2}{3}n^3 + O(n^2)$ flops), we can compute
$$
q \gets (A - \rho I)^{-1} q
$$
each iteration in $2 n^2$ flops.
---
## `inverse_power_method` with shift $\rho$
```julia
function inverse_power_method(A; Ο=0.0, tol=sqrt(eps())/2, maxiter=100_000)
m, n = size(A)
n == m || error("Matrix must be square.")
F = lu(A - Ο*I)
q = randn(n)
s = scale!(q)
lam = 1/s + Ο
qold = similar(q)
k = 0
done = false
while !done && k < maxiter
k += 1
copy!(qold, q) # qold = q
ldiv!(q, F, qold) # q = F\qold
s = scale!(q) # q = q/s
lam = 1/s + Ο
done = norm(A*q - lam*q)/norm(q) <= tol
end
if done
println("Converged after $k iterations.")
else
println("Failed to converge.")
end
return lam, q
end
```
```julia
n = 1000
k = 5
density = (k - 1)/n # density = (k*n - n)/n^2
A = triu(sprand(n, n, density), 1)
A = A + A' + I
Ο = 2.0
# Expect nnz(A) β k*n
@show nnz(A)
if n <= 1000
Ξ» = eigvals(Matrix(A))
abseig = abs.(Ξ» .- Ο) |> sort
r = abseig[1]/abseig[2]
@show r
end
println()
@time lam, q = inverse_power_method(A, Ο=Ο)
@show Ο, lam
@show norm(A*q - lam*q)/norm(q);
```
---
## Rayleigh Quotient Iteration
Suppose $q \in \mathbb{C}^n$ approximates an eigenvector of $A$. If $A q = \rho q$, then $\rho$ is an eigenvalue of $A$. Otherwise, we want to find the value of $\rho$ that minimizes
$$
\| A q - \rho q \|_2.
$$
The _normal equations_ for this least squares problem is
$$
(q^* q) \rho = q^* A q,
$$
where $q^*$ is the **conjugate transpose** of $q$.
For example, if
$$
q = \begin{bmatrix} 1 + 3 i \\ 4 - 2 i \end{bmatrix},
$$
then
$$
q^* = \begin{bmatrix} 1 - 3 i & 4 + 2 i \end{bmatrix}.
$$
Note that $q^* q = \|q\|_2^2$.
The solution of the normal equations is
$$
\rho = \frac{q^* A q}{q^* q}
$$
and is called the **Rayleigh quotient**.
The **Rayleigh quotient iteration** uses
$$
\rho_j = \frac{q_j^* A q_j}{q_j^* q_j}
$$
as the _shift_ in each iteration of the inverse power method.
Since the shift changes each iteration, we need to compute an $LU$ decomposition _each iteration_. This can be very expensive since each iteration will now cost $\frac{2}{3}n^3 + O(n^2)$ flops.
To make the Raleigh quotient iteration practical, we can first compute a "simple" matrix $H$ that is _similar_ to $A$, such as an **upper Hessenberg** matrix
$$
H =
\begin{bmatrix}
* & * & * & * & * \\
* & * & * & * & * \\
& * & * & * & * \\
& & * & * & * \\
& & & * & * \\
\end{bmatrix}
$$
or a **tridiagonal** matrix
$$
H =
\begin{bmatrix}
* & * & & & \\
* & * & * & & \\
& * & * & * & \\
& & * & * & * \\
& & & * & * \\
\end{bmatrix}.
$$
Computing $LU$ decomposition of an upper Hessenberg matrix only needs $O(n^2)$ flops, and the same for a tridiagonal matrix only needs $O(n)$ flops. We will return to this topic in Section 5.5.
---
## `rayleigh`
```julia
using SuiteSparse
function rayleigh(A; Ο0=0.0, tol=sqrt(eps())/2, maxiter=100)
m, n = size(A)
n == m || error("Matrix must be square.")
q = randn(n)
normalize!(q)
Ο = Ο0
lam = dot(q, A*q)
qold = similar(q)
F = SuiteSparse.UMFPACK.UmfpackLU(
Ptr{Nothing}(), Ptr{Nothing}(), 0, 0,
Int[], Int[], Float64[], 0)
k = 0
done = false
while !done && k < maxiter
k += 1
copy!(qold, q) # qold = q
if k == 1
F = lu(A - Ο*I) # Creates symbolic factorization F
else
lu!(F, A - Ο*I) # Overwrites F with new factorization
end
ldiv!(q, F, qold) # q = (A - Ο*I)\qold
normalize!(q)
lam = dot(q, A*q)
if k > 1
Ο = lam
end
done = norm(A*q - lam*q) <= tol
end
if done
println("Converged after $k iterations.")
else
println("Failed to converge.")
end
return lam, q
end
```
```julia
n = 4000
k = 5
density = (k - 1)/n # density = (k*n - n)/n^2
A = triu(sprand(n, n, density), 1)
A = A + A' + I
Ο = 2.0
# Expect nnz(A) β k*n
@show nnz(A)
if n <= 1000
Ξ» = eigvals(Matrix(A))
abseig = abs.(Ξ» .- Ο) |> sort
r = abseig[1]/abseig[2]
@show r
end
println()
println("Inverse power method:")
@time lam, q = inverse_power_method(A, Ο=Ο)
@show Ο, lam
@show norm(A*q - lam*q)/norm(q);
println()
println("Rayleigh quotient method:")
@time lam, q = rayleigh(A, Ο0=Ο)
@show Ο, lam
@show norm(A*q - lam*q);
```
---
## Quadratic convergence of the Raleigh Quotient Iteration
> ### Theorem: (Raleigh Quotient Approximates Eigenvalue)
>
> Let $A \in \mathbb{C}^{n \times n}$. Let $v$ be an eigenvector of $A$ with eigenvalue $\lambda$, and $\|v\|_2 = 1$.
>
> Let $q \in \mathbb{C}^n$ with $\|q\|_2 = 1$ and
>
> $$ \rho = q^* A q $$
>
> be the Raleigh quotient of $q$. Then
>
> $$ |\lambda - \rho| \le 2 \|A\|_2 \|v - q\|_2. $$
Therefore, if $\|v - q\|_2 = O(\varepsilon)$, then $|\lambda - \rho| = O(\varepsilon)$.
Let $q_0 \in \mathbb{C}^n$ such that $\|q_0\|_2 = 1$, and let $q_j$, for $j=1,2,\ldots$, be defined by
$$
\rho_j = q_j^* A q_j,
\qquad
(A - \rho_j I) \hat{q}_{j+1} = q_j,
\qquad
q_{j+1} = \hat{q}_{j+1}/\|\hat{q}_{j+1}\|_2.
$$
Then $\|q_j\|_2 = 1$, for all $j$.
1. Suppose that $q_j \to v_i$ as $j \to \infty$. Then $\|v_i\|_2 = 1$ and $\rho_j \to \lambda_i$ as $j \to \infty$.
2. Let $\lambda_k$ be the closest eigenvalue to $\lambda_i$.
3. Suppose that $\rho_j \approx \lambda_i$.
Then
$$
\begin{align}
\|v_i - q_{j+1}\|_2
&\approx \left| \frac{(\lambda_k - \rho_j)^{-1}}{(\lambda_i - \rho_j)^{-1}} \right| \|v_i - q_j\|_2 \\
&= \left| \frac{\lambda_i - \rho_j}{\lambda_k - \rho_j} \right| \|v_i - q_j\|_2 \\
&\le \frac{2 \|A\|_2 \|v_i - q_j\|_2}{|\lambda_k - \rho_j|} \|v_i - q_j\|_2 \\
&\approx \frac{2 \|A\|_2}{|\lambda_k - \lambda_i|} \|v_i - q_j\|_2^2. \\
\end{align}
$$
Thus, we obtain the estimate
$$ \|v_i - q_{j+1}\|_2 \approx C \|v_i - q_j\|_2^2, $$
where $C = 2 \|A\|_2 / |\lambda_k - \lambda_i|$. This indicates that the Rayleigh quotient iteration typically converges _quadratically_ when it does converge.
Moreover, if $A$ is a symmetric matrix, then $\|v - q\|_2 = O(\varepsilon)$ implies that $|\lambda - \rho| = O(\varepsilon^2)$, which indicates _cubic_ convergence:
$$ \|v_i - q_{j+1}\|_2 \approx C \|v_i - q_j\|_2^3. $$
---
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.