text
stringlengths 0
3.34M
|
---|
[GOAL]
ι : Type u_1
M : Type u_2
N : Type u_3
α : ι → Type u_4
inst✝³ : (i : ι) → SMul M (α i)
inst✝² : (i : ι) → SMul N (α i)
a✝ : M
i : ι
b✝ : α i
x✝ : (i : ι) × α i
inst✝¹ : SMul M N
inst✝ : ∀ (i : ι), IsScalarTower M N (α i)
a : M
b : N
x : (i : ι) × α i
⊢ (a • b) • x = a • b • x
[PROOFSTEP]
cases x
[GOAL]
case mk
ι : Type u_1
M : Type u_2
N : Type u_3
α : ι → Type u_4
inst✝³ : (i : ι) → SMul M (α i)
inst✝² : (i : ι) → SMul N (α i)
a✝ : M
i : ι
b✝ : α i
x : (i : ι) × α i
inst✝¹ : SMul M N
inst✝ : ∀ (i : ι), IsScalarTower M N (α i)
a : M
b : N
fst✝ : ι
snd✝ : α fst✝
⊢ (a • b) • { fst := fst✝, snd := snd✝ } = a • b • { fst := fst✝, snd := snd✝ }
[PROOFSTEP]
rw [smul_mk, smul_mk, smul_mk, smul_assoc]
[GOAL]
ι : Type u_1
M : Type u_2
N : Type u_3
α : ι → Type u_4
inst✝² : (i : ι) → SMul M (α i)
inst✝¹ : (i : ι) → SMul N (α i)
a✝ : M
i : ι
b✝ : α i
x✝ : (i : ι) × α i
inst✝ : ∀ (i : ι), SMulCommClass M N (α i)
a : M
b : N
x : (i : ι) × α i
⊢ a • b • x = b • a • x
[PROOFSTEP]
cases x
[GOAL]
case mk
ι : Type u_1
M : Type u_2
N : Type u_3
α : ι → Type u_4
inst✝² : (i : ι) → SMul M (α i)
inst✝¹ : (i : ι) → SMul N (α i)
a✝ : M
i : ι
b✝ : α i
x : (i : ι) × α i
inst✝ : ∀ (i : ι), SMulCommClass M N (α i)
a : M
b : N
fst✝ : ι
snd✝ : α fst✝
⊢ a • b • { fst := fst✝, snd := snd✝ } = b • a • { fst := fst✝, snd := snd✝ }
[PROOFSTEP]
rw [smul_mk, smul_mk, smul_mk, smul_mk, smul_comm]
[GOAL]
ι : Type u_1
M : Type u_2
N : Type u_3
α : ι → Type u_4
inst✝³ : (i : ι) → SMul M (α i)
inst✝² : (i : ι) → SMul N (α i)
a✝ : M
i : ι
b : α i
x✝ : (i : ι) × α i
inst✝¹ : (i : ι) → SMul Mᵐᵒᵖ (α i)
inst✝ : ∀ (i : ι), IsCentralScalar M (α i)
a : M
x : (i : ι) × α i
⊢ MulOpposite.op a • x = a • x
[PROOFSTEP]
cases x
[GOAL]
case mk
ι : Type u_1
M : Type u_2
N : Type u_3
α : ι → Type u_4
inst✝³ : (i : ι) → SMul M (α i)
inst✝² : (i : ι) → SMul N (α i)
a✝ : M
i : ι
b : α i
x : (i : ι) × α i
inst✝¹ : (i : ι) → SMul Mᵐᵒᵖ (α i)
inst✝ : ∀ (i : ι), IsCentralScalar M (α i)
a : M
fst✝ : ι
snd✝ : α fst✝
⊢ MulOpposite.op a • { fst := fst✝, snd := snd✝ } = a • { fst := fst✝, snd := snd✝ }
[PROOFSTEP]
rw [smul_mk, smul_mk, op_smul_eq_smul]
[GOAL]
ι : Type u_1
M : Type u_2
N : Type u_3
α : ι → Type u_4
m : Monoid M
inst✝ : (i : ι) → MulAction M (α i)
x : (i : ι) × α i
⊢ 1 • x = x
[PROOFSTEP]
cases x
[GOAL]
case mk
ι : Type u_1
M : Type u_2
N : Type u_3
α : ι → Type u_4
m : Monoid M
inst✝ : (i : ι) → MulAction M (α i)
fst✝ : ι
snd✝ : α fst✝
⊢ 1 • { fst := fst✝, snd := snd✝ } = { fst := fst✝, snd := snd✝ }
[PROOFSTEP]
rw [smul_mk, one_smul]
[GOAL]
ι : Type u_1
M : Type u_2
N : Type u_3
α : ι → Type u_4
m : Monoid M
inst✝ : (i : ι) → MulAction M (α i)
a b : M
x : (i : ι) × α i
⊢ (a * b) • x = a • b • x
[PROOFSTEP]
cases x
[GOAL]
case mk
ι : Type u_1
M : Type u_2
N : Type u_3
α : ι → Type u_4
m : Monoid M
inst✝ : (i : ι) → MulAction M (α i)
a b : M
fst✝ : ι
snd✝ : α fst✝
⊢ (a * b) • { fst := fst✝, snd := snd✝ } = a • b • { fst := fst✝, snd := snd✝ }
[PROOFSTEP]
rw [smul_mk, smul_mk, smul_mk, mul_smul]
|
" Nellie 's " battle with helicopters proved to be difficult to film . The scenes were initially shot in Miyazaki , first with takes of the gyrocopter , with more than 85 take @-@ offs , 5 hours of flight and Wallis nearly crashing into the camera several times . A scene filming the helicopters from above created a major downdraft and cameraman John Jordan 's foot was severed by the craft 's rotor . The concluding shots involved explosions , which the Japanese government did not allow in a national park . So , the crew moved to Torremolinos , Spain , which was found to resemble the Japanese landscape .
|
module FiniteElementMod
type,public :: FEAsolver
real :: zeta=1,omega=1
real :: xi(3),dxi
! real,allocatable :: IEN(:,:) ! element node array, how the elements are linked to the nodes.
! real,allocatable :: ID(:) ! destination array, links the global node number to the global equation number in the final linear system
contains
procedure,public :: init,update
end type
contains
subroutine init(a,xi0,zeta,omega)
class(FEAsolver),intent(inout) :: a
real,intent(in) :: xi0
real,intent(in),optional :: zeta,omega
a%xi = (/xi0,xi0,xi0/)
if(present( zeta)) a%zeta = zeta
if(present(omega)) a%omega= omega
end subroutine init
subroutine update(a,q,dt)
class(FEAsolver),intent(inout) :: a
real,intent(in) :: q,dt
real :: xi,xi_1,xi_2
xi=a%xi(1); xi_1=a%xi(2); xi_2=a%xi(3)
! -- update in time using second order forward difference
xi_n = (q + (5+4*a%zeta*a%omega*dt)*xi &
-(4+a%zeta*a%omega*dt)*xi_1+xi_2) &
/(2+3*a%zeta*a%omega*dt+a%omega**2*dt**2)
! -- prepare next time step
a%xi = (/xi_n, xi, xi_1/)
a%dxi = xi_n-xi
end subroutine update
!
! -- test on a oscillator, this writes to a file
subroutine FEAsolver_test
implicit none
type(FEAsolver) :: FEsolver
real,allocatable :: xi(:),ts(:),dxi(:)
real :: dt,time=0,stop=10,x0(3)
integer :: i = 1
call FEsolver%init(xi0=1.,zeta=sqrt(10.)/40,omega=sqrt(10.))
dt = 0.001
allocate(xi(int(stop/dt)),ts(int(stop/dt)),dxi(int(stop/dt)))
do while(time<stop)
call FEsolver%update(q=0.,dt=dt)
time = time+dt
xi(i) = FEsolver%xi(1)
ts(i) = time
dxi(i) = FEsolver%dxi
i=i+1
end do
open(14,file='out.csv')
write(14,*) ts
write(14,*) xi
write(14,*) dxi
close(14)
end subroutine FEAsolver_test
end module FiniteElementMod
program test
use FiniteElementMod
call FEAsolver_test
end program test
|
module GRIN.Pipeline
import Data.SortedSet as Set
import Data.SortedMap as Map
import Data.String.Builder
import System.Clock
import System.File
import GRIN.AST
import GRIN.Error
import GRIN.GrinM
import GRIN.Analysis.CallGraph
import GRIN.Analysis.Inline
import GRIN.Opts.CaseSimplify
import GRIN.Opts.CopyPropogation
import GRIN.Opts.Inline
import GRIN.Opts.NormaliseBind
import GRIN.Opts.UnusedConstructorElim
import GRIN.Opts.UnusedFunctionElim
import GRIN.Opts.UnusedParameterElim
public export
data Optimise name
= CaseSimplify
| CopyPropogation
| InlineSimpleDef
| InlineUsedOnce
| InlineFunc name
| NormaliseBind
| UnusedConstructorElim
| UnusedFunctionElim
| UnusedParamElim
| Fix (List (Optimise name))
Show name => Show (Optimise name) where
show CopyPropogation = "copy propogation"
show CaseSimplify = "case simplification"
show InlineSimpleDef = "inline simple definitions"
show InlineUsedOnce = "inline functions used once"
show (InlineFunc fn) = "inline " ++ show fn
show NormaliseBind = "bind normalisation"
show UnusedConstructorElim = "unused constructor elimintation"
show UnusedFunctionElim = "unused function elimination"
show UnusedParamElim = "unused parameter elimination"
show (Fix os) = "fix " ++ show os
export
runOpts : Monad m => Ord name => List (Optimise name) -> GrinT name m ()
export
runOpt : Monad m => Ord name => Optimise name -> GrinT name m ()
runOpt CaseSimplify = caseSimplify
runOpt CopyPropogation = copyProp
runOpt InlineSimpleDef = do
inlineSimpleDefs
inlineAll
runOpt InlineUsedOnce = do
inlineUsedOnce
inlineAll
runOpt (InlineFunc fn) = do
ds <- gets $ defs . prog
case lookup fn ds of
Nothing => pure ()
Just def => modify $ record { toInline = singleton fn def }
inlineAll
runOpt NormaliseBind = normaliseBind
runOpt UnusedConstructorElim = unusedConsElim
runOpt UnusedFunctionElim = unusedFuncElim
runOpt UnusedParamElim = unusedParamElim
runOpt s@(Fix ss) = do
p0 <- gets prog
runOpts ss
p1 <- gets prog
if p1 == p0
then pure ()
else runOpt s
runOpts [] = pure ()
runOpts (s :: ss) = runOpt s *> runOpts ss
public export
data Transform name
= O (Optimise name)
| SaveGrin Bool String
| SaveCalls Bool String
| SaveCalledBy Bool String
| SaveInlineSimple Bool String
Show name => Show (Transform name) where
show (O o) = show o
show (SaveGrin _ _) = "save GRIN"
show (SaveCalls _ _) = "save calls graph"
show (SaveCalledBy _ _) = "save called by graph"
show (SaveInlineSimple _ _) = "save simple functions to inline"
export
runTransform : Show name => ShowB (Prog name) => Ord name => Transform name -> GrinT name IO ()
runTransform (O opt) = runOpt opt
runTransform (SaveGrin True f) = do
p <- gets prog
let pretty = runBuilder $ showB p
Right () <- lift $ writeFile f pretty
| Left err => newError $ FileErr f err
pure ()
runTransform (SaveCalls True f) = do
cg <- getCalls
let pretty = showCallGraph cg
Right () <- lift $ writeFile f pretty
| Left err => newError $ FileErr f err
pure ()
runTransform (SaveCalledBy True f) = do
cg <- getCalledBy
let pretty = showCallGraph cg
Right () <- lift $ writeFile f pretty
| Left err => newError $ FileErr f err
pure ()
runTransform (SaveInlineSimple True f) = do
inlineSimpleDefs
ti <- gets toInline
Right () <- lift $ writeFile f $ show $ keys ti
| Left err => newError $ FileErr f err
pure ()
runTransform _ = pure ()
export
runTransforms : Show name => ShowB (Prog name) => Ord name => List (Transform name) -> GrinT name IO ()
runTransforms [] = pure ()
runTransforms (t :: ts) = runTransform t *> runTransforms ts
nano : Integer
nano = 1000000000
micro : Integer
micro = 1000000
export
runWithTiming : Show name => ShowB (Prog name) => Ord name => List (Transform name) -> GrinT name IO ()
runWithTiming [] = pure ()
runWithTiming (t :: ts) = do
putStrLn $ "Start " ++ show t
clock <- liftIO $ clockTime Process
let start = seconds clock * nano + nanoseconds clock
runTransform t
let end = seconds clock * nano + nanoseconds clock
let time = end - start
putStrLn $
"Finished, took: "
++ show (time `div` nano) ++ "."
++ addZeros (unpack (show ((time `mod` nano) `div` micro)))
++ "s"
runWithTiming ts
where
addZeros : List Char -> String
addZeros [] = "000"
addZeros [x] = "00" ++ cast x
addZeros [x, y] = "0" ++ cast x ++ cast y
addZeros str = pack str
|
#' The full human linear model underlying PROGENy
#'
#' HGNC gene symbols in rows, pathways in columns. Pathway activity inference
#' works by matrix multiplication of gene expression with the model.
#'
#' @format The full human model contains 22479 genes, associated pathways,
#' weight and the p-value.
#' \describe{
#' \item{gene}{gene names in HGNC symbols}
#' \item{pathway}{names of PROGENy pathways}
#' \item{weight}{z-scores for a given gene}
#' \item{p.value}{significance of gene in pathway}
#' }
#' @keywords datasets
#' @name model_human_full
#' @examples get("model_human_full", envir = .GlobalEnv)
#' @source \url{https://www.nature.com/articles/s41467-017-02391-6}
NULL
#' The full mouse linear model underlying PROGENy
#'
#' MGI gene symbols in rows, pathways in columns. Pathway activity inference
#' works by matrix multiplication of gene expression with the model.
#'
#' @format The full mouse model contains 17426 genes, associated pathways,
#' weight and the p-value.
#' \describe{
#' \item{gene}{gene names in HGNC symbols}
#' \item{pathway}{names of PROGENy pathways}
#' \item{weight}{z-scores for a given gene}
#' \item{p.value}{significance of gene in a pathway}
#' }
#' @keywords datasets
#' @name model_mouse_full
#' @examples get("model_mouse_full", envir = .GlobalEnv)
#' @source \url{https://www.ncbi.nlm.nih.gov/pubmed/31525460}
NULL
#' The RNA data used in the progeny vignette
#'
#' List with three elements: the gene counts, the experimental design and
#' the result of limma differential analysis
#'
#' @format List with three elements: the gene counts, the experimental design and
#' the result of limma differential analysis
#' \describe{
#' \item{counts}{gene counts}
#' \item{design}{experiemental design}
#' \item{limma_ttop}{differential analysis result using limma}
#' }
#' @keywords datasets
#' @name vignette_data
#' @examples data("vignette_data")
#' @source \url{https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE119931}
NULL
|
%!TEX TX-program = xelatex
\documentclass{article}
\usepackage{allan-eason}
\usetikzlibrary{positioning}
\usetikzlibrary{svg.path}
\graphicspath{ {./images/}}
\newcommand{\Title}{\LaTeX\ Test File}
\newcommand{\Author}{Eason S.}
\title{\Title}
\author{\Author}
\date{\today}
\geometry{a4paper, scale=0.8}
\lhead{\Title}
\begin{document}
\maketitle
\section{Maxwell's Equations}
\subsection{Integral Format}
\defword{Maxwell's Equations} (in forms of \defword{Integral}):
\begin{align}
\oiint_{S} \vect{D} \cdot \diff \vect{S} = \sum q &= \int_V \rho \diff V,\\
\oiint_{S} \vect{B} \cdot \diff \vect{S} &= 0,\\
\oint_{L} \vect{H} \cdot \diff \vect{l} = I + I_{\diff} &= \int_{S} \vect{j} \cdot \diff \vect{S} + \int_{S} \frac{\partial \vect{D}}{\partial t} \cdot \diff \vect{S},\\
\oint_{L} \vect{E} \cdot \diff \vect{l} = - \frac{\diff \varPhi}{\diff t} &= - \int_{S} \frac{\partial \vect{B}}{\partial t} \cdot \diff \vect{S}.
\end{align}
Here, (1) states for the \defword{Gauss Theorem} in an \defword{Electric Field}, while (2) states for the \defword{Gauss Theorem} in an \defword{Magnetic Field}. (3) states for the relationship between \defword{A Changing Electric Field} and a magnetic field, or \defword{Ampere's Circulation Theorem}. (4) states for the relationship between \defword{A Changing Magnetic Field} and a electric field, or \defword{Faraday's Theorem of induction}.
\section{Partial Derivative}
\subsection{Definition}
Let \(t=f(x, y, \ldots)\), the \defword{Partial Derivative} of \(f\) towards \(x\) is
\[
f'_{x} = \partial_x f = D_x f = D_1 f = \frac{\partial}{\partial x} f = \frac{\partial f}{\partial x} = \lim_{\Delta x \rightarrow 0} \frac{f(x + \Delta x, y, \ldots) - f(x, y, \ldots)}{\Delta x}.
\]
Define vector \(\vect{a} = (x, y, \ldots), \vect{\hat{e}_x} = (1, 0, \ldots)\), therefore
\[
\frac{\partial}{\partial x} f = \lim_{x\rightarrow 0} \frac{f(\vect{a} + h \vect{e_x}) - f(\vect{a})}{h}.
\]
\subsection{Gradient}
Define \defword{Gradient} as following:
\[
\Grad f(\vect{a}) = \nabla f(\vect{a}) = \left(\at{\frac{\partial f}{\partial x}}{\vect{a}}, \at{\frac{\partial f}{\partial y}}{\vect{a}}, \ldots\right).
\]
We usually deine Gradient as following in a 3-Dimensional Space:
\[
\Grad = \nabla = \left[\frac{\partial}{\partial x}\right]\vect{\hat{e}_x} + \left[\frac{\partial}{\partial y}\right]\vect{\hat{e}_y} + \left[\frac{\partial}{\partial z}\right]\vect{\hat{e}_z}.
\]
\subsection{Directional Derivative}
Define the \defword{Directional Derivative} along vector \(\vect{v} = \left(v_1, v_2, \ldots\right)\),
\[
\nabla_{\vect{v}} f(\vect{a}) = \lim_{x \rightarrow 0} \frac{f(\vect{a} + h \vect{v}) - f(\vect{a})}{h}.
\]
\subsection{Laplace Operator}
Define the \defword{Laplace Operator} as following:
\[
\Delta = \frac{\partial^2}{\partial x^2} + \frac{\partial^2}{\partial y^2} + \frac{\partial^2}{\partial z^2} = \nabla \cdot \nabla = \nabla^2.
\]
\subsection{Divergence}
Define the \defword{Divergence} of a vector as following: (it outputs a value)
\[
\Div \vect{v} = \nabla \cdot \vect{v} = \left(\frac{\partial}{\partial x}, \frac{\partial}{\partial y}, \frac{\partial}{\partial z} \right) \cdot \left(v_x, v_y, v_z\right) = \frac{\partial v_x}{\partial x} + \frac{\partial v_y}{\partial y} + \frac{\partial v_z}{\partial z}.
\]
\end{document} |
/-
Copyright (c) 2020 Markus Himmel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Markus Himmel
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.linear_algebra.finite_dimensional
import Mathlib.ring_theory.ideal.basic
import Mathlib.PostPort
universes u l v w
namespace Mathlib
/-!
# Invariant basis number property
We say that a ring `R` satisfies the invariant basis number property if there is a well-defined
notion of the rank of a finitely generated free (left) `R`-module. Since a finitely generated free
module with a basis consisting of `n` elements is linearly equivalent to `fin n → R`, it is
sufficient that `(fin n → R) ≃ₗ[R] (fin m → R)` implies `n = m`.
## Main definitions
`invariant_basis_number R` is a type class stating that `R` has the invariant basis number property.
## Main results
We show that every nontrivial commutative ring has the invariant basis number property.
## Future work
So far, there is no API at all for the `invariant_basis_number` class. There are several natural
ways to formulate that a module `M` is finitely generated and free, for example
`M ≃ₗ[R] (fin n → R)`, `M ≃ₗ[R] (ι → R)`, where `ι` is a fintype, or prividing a basis indexed by
a finite type. There should be lemmas applying the invariant basis number property to each
situation.
The finite version of the invariant basis number property implies the infinite analogue, i.e., that
`(ι →₀ R) ≃ₗ[R] (ι' →₀ R)` implies that `cardinal.mk ι = cardinal.mk ι'`. This fact (and its
variants) should be formalized.
## References
* https://en.wikipedia.org/wiki/Invariant_basis_number
## Tags
free module, rank, invariant basis number, IBN
-/
/-- We say that `R` has the invariant basis number property if `(fin n → R) ≃ₗ[R] (fin m → R)`
implies `n = m`. This gives rise to a well-defined notion of rank of a finitely generated free
module. -/
class invariant_basis_number (R : Type u) [ring R] where
eq_of_fin_equiv : ∀ {n m : ℕ}, linear_equiv R (fin n → R) (fin m → R) → n = m
theorem eq_of_fin_equiv (R : Type u) [ring R] [invariant_basis_number R] {n : ℕ} {m : ℕ} :
linear_equiv R (fin n → R) (fin m → R) → n = m :=
invariant_basis_number.eq_of_fin_equiv
theorem nontrivial_of_invariant_basis_number (R : Type u) [ring R] [invariant_basis_number R] :
nontrivial R :=
sorry
/-- A field has invariant basis number. This will be superseded below by the fact that any nonzero
commutative ring has invariant basis number. -/
theorem invariant_basis_number_field {K : Type u} [field K] : invariant_basis_number K := sorry
/-!
We want to show that nontrivial commutative rings have invariant basis number. The idea is to
take a maximal ideal `I` of `R` and use an isomorphism `R^n ≃ R^m` of `R` modules to produce an
isomorphism `(R/I)^n ≃ (R/I)^m` of `R/I`-modules, which will imply `n = m` since `R/I` is a field
and we know that fields have invariant basis number.
We construct the isomorphism in two steps:
1. We construct the ring `R^n/I^n`, show that it is an `R/I`-module and show that there is an
isomorphism of `R/I`-modules `R^n/I^n ≃ (R/I)^n`. This isomorphism is called
`ideal.pi_quot_equiv` and is located in the file `ring_theory/ideals.lean`.
2. We construct an isomorphism of `R/I`-modules `R^n/I^n ≃ R^m/I^m` using the isomorphism
`R^n ≃ R^m`.
-/
/-- An `R`-linear map `R^n → R^m` induces a function `R^n/I^n → R^m/I^m`. -/
/-- An isomorphism of `R`-modules `R^n ≃ R^m` induces an isomorphism `R/I`-modules
`R^n/I^n ≃ R^m/I^m`. -/
/-- Nontrivial commutative rings have the invariant basis number property. -/
protected instance invariant_basis_number_of_nontrivial_of_comm_ring {R : Type u} [comm_ring R]
[nontrivial R] : invariant_basis_number R :=
invariant_basis_number.mk fun (n m : ℕ) (e : linear_equiv R (fin n → R) (fin m → R)) => sorry
end Mathlib |
State Before: G : Type u
A : Type v
x y✝ : G
a b : A
n m : ℕ
inst✝² : Monoid G
inst✝¹ : AddMonoid A
H : Type u_1
inst✝ : Monoid H
y : H
⊢ orderOf x = orderOf y ↔ ∀ (n : ℕ), x ^ n = 1 ↔ y ^ n = 1 State After: no goals Tactic: simp_rw [← isPeriodicPt_mul_iff_pow_eq_one, ← minimalPeriod_eq_minimalPeriod_iff, orderOf] |
/*
Brian Staber ([email protected])
*/
#ifndef DIRICHLETSTRIPELONGATION_STOCHASTICPOLYCONVEXHGO_HPP
#define DIRICHLETSTRIPELONGATION_STOCHASTICPOLYCONVEXHGO_HPP
#include "tensor_calculus.hpp"
#include "laplacepp.hpp"
#include "nearlyIncompressibleHyperelasticity.hpp"
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_real_distribution.hpp>
#include <boost/math/special_functions/erf.hpp>
#include <boost/random/normal_distribution.hpp>
#include <boost/math/special_functions/gamma.hpp>
#include <boost/math/special_functions/beta.hpp>
class dirichletStripElongation_StochasticPolyconvexHGO : public nearlyIncompressibleHyperelasticity
{
public:
double w1, w2, w3, w4;
double mean_c1, c1, deltaC1;
double mean_c2, c2, deltaC2;
double mean_u1, u1, deltaU1;
double mean_mu4, mu4, deltaG4;
double mean_mu1, mu1;
double mean_mu2, mu2;
double mean_mu3, mu3;
double alpha1, alpha2;
double alpha3, alpha4;
double tau1, tau2;
double alpha5, alpha6;
double beta3, beta4;
double theta;
double epsilon = 1e-6;
Epetra_IntSerialDenseVector cells_nodes_p1_med;
Epetra_SerialDenseVector w1_gmrf, w2_gmrf, w3_gmrf, w4_gmrf;
Epetra_SerialDenseVector a, b;
Epetra_SerialDenseVector E1,E2,E3;
dirichletStripElongation_StochasticPolyconvexHGO(Epetra_Comm & comm, Teuchos::ParameterList & Parameters){
std::string mesh_file = Teuchos::getParameter<std::string>(Parameters.sublist("Mesh"), "mesh_file");
//std::string boundary_file = Teuchos::getParameter<std::string>(Parameters.sublist("Mesh"), "boundary_file");
unsigned int number_physical_groups = Teuchos::getParameter<unsigned int>(Parameters.sublist("Mesh"), "nb_phys_groups");
std::string select_model = Teuchos::getParameter<std::string>(Parameters.sublist("Mesh"), "model");
mean_mu1 = Teuchos::getParameter<double>(Parameters.sublist(select_model), "mu1");
mean_mu2 = Teuchos::getParameter<double>(Parameters.sublist(select_model), "mu2");
mean_mu3 = Teuchos::getParameter<double>(Parameters.sublist(select_model), "mu3");
mean_mu4 = Teuchos::getParameter<double>(Parameters.sublist(select_model), "mu4");
beta3 = Teuchos::getParameter<double>(Parameters.sublist(select_model), "beta3");
beta4 = Teuchos::getParameter<double>(Parameters.sublist(select_model), "beta4");
theta = Teuchos::getParameter<double>(Parameters.sublist(select_model), "theta");
deltaC1 = Teuchos::getParameter<double>(Parameters.sublist(select_model), "deltaC1");
deltaC2 = Teuchos::getParameter<double>(Parameters.sublist(select_model), "deltaC2");
deltaU1 = Teuchos::getParameter<double>(Parameters.sublist(select_model), "deltaU1");
deltaG4 = Teuchos::getParameter<double>(Parameters.sublist(select_model), "deltaG4");
mean_c1 = 2.0*mean_mu3*beta3*beta3;
mean_c2 = 2.0*mean_mu1 + std::sqrt(3.0)*3.0*mean_mu2;
mean_u1 = 2.0*mean_mu1/mean_c2;
double gamma = 2.0*mean_mu1/(std::sqrt(3.0)*3.0*mean_mu2);
tau2 = (1.0 - deltaU1*deltaU1)/(deltaU1*deltaU1*gamma*(gamma+1.0));
tau1 = (2.0*mean_mu1/(std::sqrt(3.0)*3.0*mean_mu2))*tau2;
alpha1 = 1.0/(deltaC1*deltaC1);
alpha2 = mean_c1*deltaC1*deltaC1;
alpha3 = 1.0/(deltaC2*deltaC2);
alpha4 = mean_c2*deltaC2*deltaC2;
alpha5 = 1.0/(deltaG4*deltaG4);
alpha6 = mean_mu4*deltaG4*deltaG4;
Mesh = new mesh(comm, mesh_file, 1000.0);
//Mesh->read_boundary_file(boundary_file,number_physical_groups);
Comm = Mesh->Comm;
StandardMap = new Epetra_Map(-1,3*Mesh->n_local_nodes_without_ghosts,&Mesh->local_dof_without_ghosts[0],0,*Comm);
OverlapMap = new Epetra_Map(-1,3*Mesh->n_local_nodes,&Mesh->local_dof[0],0,*Comm);
ImportToOverlapMap = new Epetra_Import(*OverlapMap,*StandardMap);
create_FECrsGraph();
a.Resize(3); b.Resize(3);
E1.Resize(3); E2.Resize(3); E3.Resize(3);
E1(0) = 1.0; E1(1) = 0.0; E1(2) = 0.0;
E2(0) = 0.0; E2(1) = 1.0; E2(2) = 0.0;
E3(0) = 0.0; E3(1) = 0.0; E3(2) = 1.0;
for (int i=0; i<3; ++i){
a(i) = std::cos(theta)*E1(i) + std::sin(theta)*E2(i);
b(i) = std::cos(theta)*E1(i) - std::sin(theta)*E2(i);
}
setup_dirichlet_conditions();
}
~dirichletStripElongation_StochasticPolyconvexHGO(){
}
void get_media(unsigned int & n_cells, unsigned int & n_nodes, std::string & path){
std::ifstream connectivity_file_med;
connectivity_file_med.open(path);
w1_gmrf.Resize(n_nodes);
w2_gmrf.Resize(n_nodes);
w3_gmrf.Resize(n_nodes);
w4_gmrf.Resize(n_nodes);
if (connectivity_file_med.is_open()){
cells_nodes_p1_med.Resize(4*n_cells);
for (unsigned int e=0; e<4*n_cells; ++e){
connectivity_file_med >> cells_nodes_p1_med[e];
cells_nodes_p1_med[e] = cells_nodes_p1_med[e]-1;
}
connectivity_file_med.close();
}
else{
std::cout << "Couldn't open the connectivity file for the media.\n";
}
}
void get_matrix_and_rhs(Epetra_Vector & x, Epetra_FECrsMatrix & K, Epetra_FEVector & F){
assemblePureDirichlet_homogeneousForcing(x,K,F);
}
void setup_dirichlet_conditions(){
n_bc_dof = 0;
int dof = 1;
double coord;
unsigned int node;
for (unsigned int i=0; i<Mesh->n_local_nodes_without_ghosts; ++i){
node = Mesh->local_nodes[i];
coord = Mesh->nodes_coord[3*node+dof];
if(coord==0.0){
n_bc_dof+=3;
}
if(coord==10.0/1000.0){
n_bc_dof+=3;
}
}
int indbc = 0;
dof_on_boundary = new int [n_bc_dof];
for (unsigned int inode=0; inode<Mesh->n_local_nodes_without_ghosts; ++inode){
node = Mesh->local_nodes[inode];
coord = Mesh->nodes_coord[3*node+dof];
if (coord==0.0){
dof_on_boundary[indbc+0] = 3*inode+0;
dof_on_boundary[indbc+1] = 3*inode+1;
dof_on_boundary[indbc+2] = 3*inode+2;
indbc+=3;
}
if (coord==10.0/1000.0){
dof_on_boundary[indbc+0] = 3*inode+0;
dof_on_boundary[indbc+1] = 3*inode+1;
dof_on_boundary[indbc+2] = 3*inode+2;
indbc+=3;
}
}
}
void apply_dirichlet_conditions(Epetra_FECrsMatrix & K, Epetra_FEVector & F, double & displacement){
Epetra_MultiVector v(*StandardMap,true);
v.PutScalar(0.0);
int node;
int dof = 1;
double coord;
for (unsigned int inode=0; inode<Mesh->n_local_nodes_without_ghosts; ++inode){
node = Mesh->local_nodes[inode];
coord = Mesh->nodes_coord[3*node+dof];
if (coord==10.0/1000.0){
v[0][StandardMap->LID(3*node+dof)] = displacement;
}
}
Epetra_MultiVector rhs_dir(*StandardMap,true);
K.Apply(v,rhs_dir);
F.Update(-1.0,rhs_dir,1.0);
for (unsigned int inode=0; inode<Mesh->n_local_nodes_without_ghosts; ++inode){
node = Mesh->local_nodes[inode];
coord = Mesh->nodes_coord[3*node+dof];
if (coord==0.0){
F[0][StandardMap->LID(3*node+0)] = 0.0;
F[0][StandardMap->LID(3*node+1)] = 0.0;
F[0][StandardMap->LID(3*node+2)] = 0.0;
}
if (coord==10.0/1000.0){
F[0][StandardMap->LID(3*node+0)] = 0.0;
F[0][StandardMap->LID(3*node+dof)] = displacement;
F[0][StandardMap->LID(3*node+2)] = 0.0;
}
}
ML_Epetra::Apply_OAZToMatrix(dof_on_boundary,n_bc_dof,K);
}
void get_material_parameters(unsigned int & e_lid, unsigned int & gp){
int n_gauss_points = Mesh->n_gauss_cells;
int e_gid = Mesh->local_cells[e_lid];
int node;
double xi = Mesh->xi_cells[gp]; double eta = Mesh->eta_cells[gp]; double zeta = Mesh->zeta_cells[gp];
Epetra_SerialDenseVector N(4);
tetra4::shape_functions(N,xi,eta,zeta);
w1 = 0.0; w2 = 0.0; w3 = 0.0; w4 = 0.0;
for (unsigned int j=0; j<4; ++j){
node = cells_nodes_p1_med(4*e_gid+j);
w1 += N(j)*w1_gmrf(node);
w2 += N(j)*w2_gmrf(node);
w3 += N(j)*w3_gmrf(node);
w4 += N(j)*w4_gmrf(node);
}
c1 = icdf_gamma(w1,alpha1,alpha2);
c2 = icdf_gamma(w2,alpha3,alpha4);
u1 = icdf_beta (w3,tau1,tau2);
mu4 = icdf_gamma(w4,alpha5,alpha6);
mu1 = ( epsilon*mean_mu1 + (1.0/2.0)*c2*u1 )/( 1.0+epsilon );
mu2 = ( epsilon*mean_mu2 + (1.0/(std::sqrt(3.0)*3.0))*c2*(1.0-u1) )/( 1.0+epsilon );
mu3 = ( epsilon*mean_mu3 + c1/(2.0*beta3*beta3) )/( 1.0+epsilon );
mu4 = ( epsilon*mean_mu4 + mu4 )/( 1.0+epsilon );
}
double icdf_gamma(double & w, double & alpha, double & beta){
double erfx = boost::math::erf<double>(w/std::sqrt(2.0));
double y = (1.0/2.0)*(1.0 + erfx);
double yinv = boost::math::gamma_p_inv<double,double>(alpha,y);
double z = yinv*beta;
return z;
}
double icdf_beta(double & w, double & tau1, double & tau2){
double erfx = boost::math::erf<double>(w/std::sqrt(2.0));
double y = (1.0/2.0)*(1.0 + erfx);
double z = boost::math::ibeta_inv<double,double,double>(tau1,tau2,y);
return z;
}
void get_constitutive_tensors_static_condensation(Epetra_SerialDenseMatrix & deformation_gradient, double & det, Epetra_SerialDenseVector & inverse_cauchy, Epetra_SerialDenseVector & piola_isc, Epetra_SerialDenseVector & piola_vol, Epetra_SerialDenseMatrix & tangent_piola_isc, Epetra_SerialDenseMatrix & tangent_piola_vol){
model_C(deformation_gradient, det, inverse_cauchy, piola_isc, piola_vol, tangent_piola_isc, tangent_piola_vol);
}
void get_internal_pressure(double & theta, double & pressure, double & dpressure){
double ptheta = std::pow(theta,beta3);
pressure = beta3*( (ptheta/theta) - (1.0/(ptheta*theta)) );
dpressure = beta3*( (beta3-1.0)*(ptheta/(theta*theta)) + (beta3+1.0)/(ptheta*theta*theta) );
}
void get_material_parameters_for_recover(unsigned int & e_lid){
int e_gid = Mesh->local_cells[e_lid];
double xi = 1.0/3.0; double eta = 1.0/3.0; double zeta = 1.0/3.0;
Epetra_SerialDenseVector N(4);
tetra4::shape_functions(N,xi,eta,zeta);
w1 = 0.0; w2 = 0.0; w3 = 0.0; w4 = 0.0;
for (unsigned int j=0; j<4; ++j){
int node = cells_nodes_p1_med(4*e_gid+j);
w1 += N(j)*w1_gmrf(node);
w2 += N(j)*w2_gmrf(node);
w3 += N(j)*w3_gmrf(node);
w4 += N(j)*w4_gmrf(node);
}
c1 = icdf_gamma(w1,alpha1,alpha2);
c2 = icdf_gamma(w2,alpha3,alpha4);
u1 = icdf_beta (w3,tau1,tau2);
mu4 = icdf_gamma(w4,alpha5,alpha6);
mu1 = ( epsilon*mean_mu1 + (1.0/2.0)*c2*u1 )/( 1.0+epsilon );
mu2 = ( epsilon*mean_mu2 + (1.0/(std::sqrt(3.0)*3.0))*c2*(1.0-u1) )/( 1.0+epsilon );
mu3 = ( epsilon*mean_mu3 + c1/(2.0*beta3*beta3) )/( 1.0+epsilon );
mu4 = ( epsilon*mean_mu4 + mu4 )/( 1.0+epsilon );
}
void get_stress_for_recover(Epetra_SerialDenseMatrix & deformation_gradient, double & det, Epetra_SerialDenseMatrix & piola_stress){
det = deformation_gradient(0,0)*deformation_gradient(1,1)*deformation_gradient(2,2)-deformation_gradient(0,0)*deformation_gradient(1,2)*deformation_gradient(2,1)-deformation_gradient(0,1)*deformation_gradient(1,0)*deformation_gradient(2,2)+deformation_gradient(0,1)*deformation_gradient(1,2)*deformation_gradient(2,0)+deformation_gradient(0,2)*deformation_gradient(1,0)*deformation_gradient(2,1)-deformation_gradient(0,2)*deformation_gradient(1,1)*deformation_gradient(2,0);
double alpha = std::pow(det,-2.0/3.0);
double beta = 1.0/(det*det);
Epetra_SerialDenseMatrix eye(3,3);
Epetra_SerialDenseMatrix M1(3,3), M2(3,3);
Epetra_SerialDenseMatrix C(3,3), L(3,3);
Epetra_SerialDenseMatrix piola_ani1(3,3), piola_ani2(3,3);
eye(0,0) = 1.0; eye(0,1) = 0.0; eye(0,2) = 0.0;
eye(1,0) = 0.0; eye(1,1) = 1.0; eye(1,2) = 0.0;
eye(2,0) = 0.0; eye(2,1) = 0.0; eye(2,2) = 1.0;
M1.Multiply('N','T',1.0,a,a,0.0);
M2.Multiply('N','T',1.0,b,b,0.0);
C.Multiply('T','N',1.0,deformation_gradient,deformation_gradient,0.0);
L(0,0) = (1.0/(det*det))*(C(1,1)*C(2,2)-C(1,2)*C(2,1));
L(1,1) = (1.0/(det*det))*(C(0,0)*C(2,2)-C(0,2)*C(2,0));
L(2,2) = (1.0/(det*det))*(C(0,0)*C(1,1)-C(0,1)*C(1,0));
L(1,2) = (1.0/(det*det))*(C(0,2)*C(1,0)-C(0,0)*C(1,2));
L(0,2) = (1.0/(det*det))*(C(0,1)*C(1,2)-C(0,2)*C(1,1));
L(0,1) = (1.0/(det*det))*(C(0,2)*C(2,1)-C(0,1)*C(2,2));
L(2,1) = L(1,2); L(2,0) = L(0,2); L(1,0) = L(0,1);
double I1 = C(0,0) + C(1,1) + C(2,2);
double II1 = C(0,0)*C(0,0) + C(1,1)*C(1,1) + C(2,2)*C(2,2) + 2.0*C(1,2)*C(1,2) + 2.0*C(0,2)*C(0,2) + 2.0*C(0,1)*C(0,1);
double I2 = (1.0/2.0)*(I1*I1-II1);
double I4_1 = C(0,0)*M1(0,0) + C(1,1)*M1(1,1) + C(2,2)*M1(2,2) + 2.0*C(0,1)*M1(0,1) + 2.0*C(0,2)*M1(0,2) + 2.0*C(1,2)*M1(1,2);
double I4_2 = C(0,0)*M2(0,0) + C(1,1)*M2(1,1) + C(2,2)*M2(2,2) + 2.0*C(0,1)*M2(0,1) + 2.0*C(0,2)*M2(0,2) + 2.0*C(1,2)*M2(1,2);
double pI2 = std::sqrt(I2);
double S4_1 = (I4_1-1.0)*(I4_1-1.0);
double S4_2 = (I4_2-1.0)*(I4_2-1.0);
double ptheta = std::pow(det,beta3);
double pressure = mu3*beta3*( (ptheta/det) - (1.0/(ptheta*det)) );
for (unsigned int i=0; i<3; ++i){
for (unsigned int j=0; j<3; ++j){
piola_stress(i,j) = 2.0*mu1*alpha*(eye(i,j)-(1.0/3.0)*L(i,j))
+ mu2*beta*( 3.0*pI2*(I1*eye(i,j)-C(i,j)) - 2.0*I2*pI2*L(i,j) )
+ det*pressure*L(i,j);
piola_ani1(i,j) = 4.0*mu4*(I4_1-1.0)*exp(beta4*S4_1)*M1(i,j);
piola_ani2(i,j) = 4.0*mu4*(I4_2-1.0)*exp(beta4*S4_2)*M2(i,j);
}
}
if (I4_1>1.0){
piola_stress += piola_ani1;
}
if (I4_2>1.0){
piola_stress += piola_ani2;
}
}
void model_C(Epetra_SerialDenseMatrix & deformation_gradient, double & det, Epetra_SerialDenseVector & L, Epetra_SerialDenseVector & piola_isc, Epetra_SerialDenseVector & piola_vol, Epetra_SerialDenseMatrix & tangent_piola_isc, Epetra_SerialDenseMatrix & tangent_piola_vol){
det = deformation_gradient(0,0)*deformation_gradient(1,1)*deformation_gradient(2,2)-deformation_gradient(0,0)*deformation_gradient(1,2)*deformation_gradient(2,1)-deformation_gradient(0,1)*deformation_gradient(1,0)*deformation_gradient(2,2)+deformation_gradient(0,1)*deformation_gradient(1,2)*deformation_gradient(2,0)+deformation_gradient(0,2)*deformation_gradient(1,0)*deformation_gradient(2,1)-deformation_gradient(0,2)*deformation_gradient(1,1)*deformation_gradient(2,0);
double alpha = std::pow(det,-2.0/3.0);
Epetra_SerialDenseVector eye(6);
Epetra_SerialDenseMatrix rightCauchy(3,3);
Epetra_SerialDenseVector M1(6), M2(6);
Epetra_SerialDenseVector C(6), D(6);
Epetra_SerialDenseVector piola_nh(6), piola_ani1(6), piola_ani2(6);
M1(0) = a(0)*a(0); M2(0) = b(0)*b(0);
M1(1) = a(1)*a(1); M2(1) = b(1)*b(1);
M1(2) = a(2)*a(2); M2(2) = b(2)*b(2);
M1(3) = a(1)*a(2); M2(3) = b(1)*b(2);
M1(4) = a(0)*a(2); M2(4) = b(0)*b(2);
M1(5) = a(0)*a(1); M2(5) = b(0)*b(1);
rightCauchy.Multiply('T','N',1.0,deformation_gradient,deformation_gradient,0.0);
eye(0) = 1.0; eye(1) = 1.0; eye(2) = 1.0; eye(3) = 0.0; eye(4) = 0.0; eye(5) = 0.0;
C(0) = rightCauchy(0,0); C(1) = rightCauchy(1,1); C(2) = rightCauchy(2,2);
C(3) = rightCauchy(1,2); C(4) = rightCauchy(0,2); C(5) = rightCauchy(0,1);
L(0) = (1.0/(det*det))*(rightCauchy(1,1)*rightCauchy(2,2)-rightCauchy(1,2)*rightCauchy(2,1));
L(1) = (1.0/(det*det))*(rightCauchy(0,0)*rightCauchy(2,2)-rightCauchy(0,2)*rightCauchy(2,0));
L(2) = (1.0/(det*det))*(rightCauchy(0,0)*rightCauchy(1,1)-rightCauchy(0,1)*rightCauchy(1,0));
L(3) = (1.0/(det*det))*(rightCauchy(0,2)*rightCauchy(1,0)-rightCauchy(0,0)*rightCauchy(1,2));
L(4) = (1.0/(det*det))*(rightCauchy(0,1)*rightCauchy(1,2)-rightCauchy(0,2)*rightCauchy(1,1));
L(5) = (1.0/(det*det))*(rightCauchy(0,2)*rightCauchy(2,1)-rightCauchy(0,1)*rightCauchy(2,2));
double I1 = C(0) + C(1) + C(2);
double II1 = C(0)*C(0) + C(1)*C(1) + C(2)*C(2) + 2.0*C(3)*C(3) + 2.0*C(4)*C(4) + 2.0*C(5)*C(5);
double I2 = (1.0/2.0)*(I1*I1-II1);
double I4_1 = C(0)*M1(0) + C(1)*M1(1) + C(2)*M1(2) + 2.0*C(5)*M1(5) + 2.0*C(4)*M1(4) + 2.0*C(3)*M1(3);
double I4_2 = C(0)*M2(0) + C(1)*M2(1) + C(2)*M2(2) + 2.0*C(5)*M2(5) + 2.0*C(4)*M2(4) + 2.0*C(3)*M2(3);
double pI2 = std::sqrt(I2);
double S4_1 = (I4_1-1.0)*(I4_1-1.0);
double S4_2 = (I4_2-1.0)*(I4_2-1.0);
for (unsigned int i=0; i<6; ++i){
D(i) = I1*eye(i) - C(i);
piola_nh(i) = 2.0*alpha*mu1*(eye(i)-(1.0/3.0)*I1*L(i));
piola_isc(i) = piola_nh(i) + (mu2/(det*det))*( 3.0*pI2*D(i) - 2.0*pI2*I2*L(i) );
piola_ani1(i) = 4.0*mu4*(I4_1-1.0)*exp(beta4*S4_1)*M1(i);
piola_ani2(i) = 4.0*mu4*(I4_2-1.0)*exp(beta4*S4_2)*M2(i);
piola_vol(i) = mu3*det*L(i);
}
double scalarAB;
scalarAB = mu3*det;
tensor_product(mu3*det,L,L,tangent_piola_vol,0.0);
scalarAB = -2.0*mu3*det;
sym_tensor_product(scalarAB,L,L,tangent_piola_vol,1.0);
scalarAB = -2.0/3.0;
tensor_product(scalarAB,piola_nh,L,tangent_piola_isc,0.0);
tensor_product(scalarAB,L,piola_nh,tangent_piola_isc,1.0);
scalarAB = -6.0*mu2*pI2/(det*det);
tensor_product(scalarAB,D,eye,tangent_piola_isc,1.0);
tensor_product(scalarAB,eye,D,tangent_piola_isc,1.0);
scalarAB = (-4.0/9.0)*mu1*alpha*I1 + 4.0*mu2*pI2*I2/(det*det);
tensor_product(scalarAB,L,L,tangent_piola_isc,1.0);
scalarAB = (4.0/3.0)*mu1*alpha*I1 + 4.0*mu2*pI2*I2/(det*det);
sym_tensor_product(scalarAB,L,L,tangent_piola_isc,1.0);
scalarAB = 3.0*mu2/(det*det*pI2);
tensor_product(scalarAB,D,D,tangent_piola_isc,1.0);
scalarAB = 6.0*mu2*pI2/(det*det);
tensor_product(scalarAB,eye,eye,tangent_piola_isc,1.0);
scalarAB = -scalarAB;
sym_tensor_product(scalarAB,eye,eye,tangent_piola_isc,1.0);
if (I4_1>1.0){
piola_isc += piola_ani1;
scalarAB = (8.0*mu4 + 16.0*mu4*beta4*S4_1)*exp(beta4*S4_1);
tensor_product(scalarAB,M1,M1,tangent_piola_isc,1.0);
}
if (I4_2>1.0){
piola_isc += piola_ani2;
scalarAB = (8.0*mu4 + 16.0*mu4*beta4*S4_2)*exp(beta4*S4_2);
tensor_product(scalarAB,M2,M2,tangent_piola_isc,1.0);
}
}
};
#endif
|
open import MLib.Algebra.PropertyCode
open import MLib.Algebra.PropertyCode.Structures
module MLib.Matrix {c ℓ} (struct : Struct bimonoidCode c ℓ) where
open import MLib.Prelude
open Struct struct
open import MLib.Algebra.Operations struct
open Table using (head; tail; rearrange; fromList; toList; _≗_)
open import MLib.Matrix.Core public
open import MLib.Matrix.Equality struct public
open import MLib.Matrix.Plus struct public
open import MLib.Matrix.Mul struct public
open import MLib.Matrix.Tensor struct public
open import MLib.Matrix.SemiTensor struct public
open FunctionProperties
|
/-
Copyright (c) 2019 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov, Scott Morrison, Simon Hudon
Definition and basic properties of endomorphisms and automorphisms of an object in a category.
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.category_theory.groupoid
import Mathlib.data.equiv.mul_add
import Mathlib.PostPort
universes v u u' v'
namespace Mathlib
namespace category_theory
/-- Endomorphisms of an object in a category. Arguments order in multiplication agrees with
`function.comp`, not with `category.comp`. -/
def End {C : Type u} [category_struct C] (X : C) :=
X ⟶ X
namespace End
protected instance has_one {C : Type u} [category_struct C] (X : C) : HasOne (End X) :=
{ one := 𝟙 }
protected instance inhabited {C : Type u} [category_struct C] (X : C) : Inhabited (End X) :=
{ default := 𝟙 }
/-- Multiplication of endomorphisms agrees with `function.comp`, not `category_struct.comp`. -/
protected instance has_mul {C : Type u} [category_struct C] (X : C) : Mul (End X) :=
{ mul := fun (x y : End X) => y ≫ x }
@[simp] theorem one_def {C : Type u} [category_struct C] {X : C} : 1 = 𝟙 :=
rfl
@[simp] theorem mul_def {C : Type u} [category_struct C] {X : C} (xs : End X) (ys : End X) : xs * ys = ys ≫ xs :=
rfl
/-- Endomorphisms of an object form a monoid -/
protected instance monoid {C : Type u} [category C] {X : C} : monoid (End X) :=
monoid.mk Mul.mul sorry 1 category.comp_id category.id_comp
/-- In a groupoid, endomorphisms form a group -/
protected instance group {C : Type u} [groupoid C] (X : C) : group (End X) :=
group.mk monoid.mul sorry monoid.one sorry sorry groupoid.inv
(div_inv_monoid.div._default monoid.mul sorry monoid.one sorry sorry groupoid.inv) groupoid.comp_inv
end End
/--
Automorphisms of an object in a category.
The order of arguments in multiplication agrees with
`function.comp`, not with `category.comp`.
-/
def Aut {C : Type u} [category C] (X : C) :=
X ≅ X
namespace Aut
protected instance inhabited {C : Type u} [category C] (X : C) : Inhabited (Aut X) :=
{ default := iso.refl X }
protected instance group {C : Type u} [category C] (X : C) : group (Aut X) :=
group.mk (flip iso.trans) sorry (iso.refl X) sorry sorry iso.symm
(div_inv_monoid.div._default (flip iso.trans) sorry (iso.refl X) sorry sorry iso.symm) sorry
/--
Units in the monoid of endomorphisms of an object
are (multiplicatively) equivalent to automorphisms of that object.
-/
def units_End_equiv_Aut {C : Type u} [category C] (X : C) : units (End X) ≃* Aut X :=
mul_equiv.mk (fun (f : units (End X)) => iso.mk (units.val f) (units.inv f))
(fun (f : Aut X) => units.mk (iso.hom f) (iso.inv f) (iso.inv_hom_id' f) (iso.hom_inv_id' f)) sorry sorry sorry
end Aut
namespace functor
/-- `f.map` as a monoid hom between endomorphism monoids. -/
def map_End {C : Type u} [category C] (X : C) {D : Type u'} [category D] (f : C ⥤ D) : End X →* End (obj f X) :=
monoid_hom.mk (map f) (map_id f X) sorry
/-- `f.map_iso` as a group hom between automorphism groups. -/
def map_Aut {C : Type u} [category C] (X : C) {D : Type u'} [category D] (f : C ⥤ D) : Aut X →* Aut (obj f X) :=
monoid_hom.mk (map_iso f) (map_iso_refl f X) sorry
|
using Test
using FlightMechanics
@testset "atmosphere" begin include("atmosphere.jl") end
@testset "coordinates" begin include("coordinates.jl") end
@testset "anemometry" begin include("anemometry.jl") end
@testset "mechanics" begin include("mechanics.jl") end
@testset "flight mechanics" begin include("flight_mechanics.jl") end
@testset "inputs" begin include("models/inputs.jl") end
@testset "fcs" begin include("models/fcs.jl") end
@testset "pfm" begin include("models/point_forces_moments.jl") end
@testset "mass" begin include("models/mass.jl") end
@testset "attitude" begin include("models/attitude.jl") end
@testset "position" begin include("models/position.jl") end
@testset "aerodynamics" begin include("models/aerodynamics.jl") end
@testset "aerostate" begin include("models/aero_state.jl") end
@testset "dynamic system" begin include("models/dynamic_system.jl") end
@testset "trimmer" begin include("models/trimmer.jl") end
@testset "ac: c310" begin include("aircrafts/c310.jl") end
@testset "ac: f16" begin include("aircrafts/f16.jl") end
|
open import OutsideIn.Prelude
open import OutsideIn.X
module OutsideIn(x : X) where
open X (x) public
open import OutsideIn.Prelude public
open import Data.Vec public hiding ([_])
import OutsideIn.Constraints as C
import OutsideIn.TypeSchema as TS
import OutsideIn.Expressions as E
import OutsideIn.Environments as V
import OutsideIn.TopLevel as TL
import OutsideIn.Inference as I
open E (x) public
open TL(x) public
open TS(x) public
open I (x) public
open V (x) public
open C (x)
|
C$Header: /data/petsun4/data1/src_solaris/interp_4dfp/RCS/butt1d.f,v 1.7 2013/02/05 23:18:06 avi Exp $
C$Log: butt1d.f,v $
c Revision 1.7 2013/02/05 23:18:06 avi
c subroutine butt1dbs
c
c Revision 1.6 2007/11/20 03:15:45 avi
c gcc v4 compatible
c
c Revision 1.5 2007/09/10 20:30:21 avi
c hard-coded buffer size 1024 -> 1536
c
c Revision 1.4 2007/01/16 06:15:54 avi
c subroutine butt1dba()
c
c Revision 1.3 2004/05/26 20:30:53 avi
c subroutine butt1db
c
c Revision 1.2 2002/06/27 05:12:21 avi
c correct code to compute factor
c
c Revision 1.1 2002/06/25 05:16:29 avi
c Initial revision
c
subroutine butt1d_rcs
write (*,"('$Id: butt1d.f,v 1.7 2013/02/05 23:18:06 avi Exp $')")
return
end
subroutine butt1dh(data,n,delta,fhalf,iorder)
real*4 data(n)
parameter (nmax=1536)
real*4 a(nmax/2+1),b(nmax/2+1)
if(n.gt.nmax)then
write(*,"('butt1dh: input array length ',i4,' exceeds ',i4)")n,nmax
call exit(-1)
endif
if(mod(n,2).ne.0)then
write(*,"('butt1dh: illegal odd input array length',i6)")n
call exit(-1)
endif
i=1
do 21 k=1,n,2
a(i)=data(k)
b(i)=data(k+1)
21 i=i+1
call FFT (a,b,1,n/2,1,-1)
call REALT(a,b,1,n/2,1,-1)
do 31 i=1,n/2+1
f=float(i-1)/(float(n)*delta)
r=(f/fhalf)**(2*iorder)
factor=r/(1.0+r)
a(i)=factor*a(i)
31 b(i)=factor*b(i)
call REALT(a,b,1,n/2,1,+1)
call FFT (a,b,1,n/2,1,+1)
i=1
do 41 k=1,n,2
data(k) =a(i)
data(k+1)=b(i)
41 i=i+1
return
end
subroutine butt1db(data,n,delta,fhalf_lo,iorder_lo,fhalf_hi,iorder_hi)
real*4 data(n)
parameter (nmax=1536)
real*4 a(nmax/2+1),b(nmax/2+1)
if(n.gt.nmax)then
write(*,"('butt1db: input array length ',i4,' exceeds ',i4)")n,nmax
call exit(-1)
endif
if(mod(n,2).ne.0)then
write(*,"('butt1db: illegal odd input array length ',i6)")n
call exit(-1)
endif
if(iorder_lo.lt.0.or.iorder_hi.lt.0)then
write(*,"('butt1db: negative Butterworth filter orders not allowed')")
call exit(-1)
endif
i=1
do 21 k=1,n,2
a(i)=data(k)
b(i)=data(k+1)
21 i=i+1
call FFT (a,b,1,n/2,1,-1)
call REALT(a,b,1,n/2,1,-1)
do 31 i=1,n/2+1
f=float(i-1)/(float(n)*delta)
if(iorder_lo.gt.0)then
r_lo=(f/fhalf_lo)**(2*iorder_lo)
factor_lo=r_lo/(1.0+r_lo)
else
factor_lo=1.0
endif
if(iorder_hi.gt.0)then
r_hi=(f/fhalf_hi)**(2*iorder_hi)
factor_hi=1.0/(1.0+r_hi)
else
factor_hi=1.0
endif
a(i)=factor_lo*factor_hi*a(i)
31 b(i)=factor_lo*factor_hi*b(i)
call REALT(a,b,1,n/2,1,+1)
call FFT (a,b,1,n/2,1,+1)
i=1
do 41 k=1,n,2
data(k) =a(i)
data(k+1)=b(i)
41 i=i+1
return
end
subroutine butt1dba(data,n,delta,fhalf_lo,iorder_lo,fhalf_hi,iorder_hi)
c version of butt1db that allocates buffers on each call
real*4 data(n)
real*4 a(1),b(1)
pointer (pa,a),(pb,b)
if(mod(n,2).ne.0)then
write(*,"('butt1dba: illegal odd input array length ',i6)")n
call exit(-1)
endif
if(iorder_lo.lt.0.or.iorder_hi.lt.0)then
write(*,"('butt1dba: negative Butterworth filter orders not allowed')")
call exit(-1)
endif
pa=malloc(4*(n/2+1))
pb=malloc(4*(n/2+1))
if(pa.eq.0.or.pb.eq.0)then
write(*,"('butt1dba: memory allocation error')")
call exit(-1)
endif
i=1
do 21 k=1,n,2
a(i)=data(k)
b(i)=data(k+1)
21 i=i+1
call FFT (a,b,1,n/2,1,-1)
call REALT(a,b,1,n/2,1,-1)
do 31 i=1,n/2+1
f=float(i-1)/(float(n)*delta)
if(iorder_lo.gt.0)then
r_lo=(f/fhalf_lo)**(2*iorder_lo)
factor_lo=r_lo/(1.0+r_lo)
else
factor_lo=1.0
endif
if(iorder_hi.gt.0)then
r_hi=(f/fhalf_hi)**(2*iorder_hi)
factor_hi=1.0/(1.0+r_hi)
else
factor_hi=1.0
endif
a(i)=factor_lo*factor_hi*a(i)
31 b(i)=factor_lo*factor_hi*b(i)
call REALT(a,b,1,n/2,1,+1)
call FFT (a,b,1,n/2,1,+1)
i=1
do 41 k=1,n,2
data(k) =a(i)
data(k+1)=b(i)
41 i=i+1
call free(pa)
call free(pb)
return
end
subroutine butt1dbs(data,n,delta,fhalf_lo,iorder_lo,fhalf_hi,iorder_hi,a,b)
c version of butt1db with scratch arrays a and b passed by pointer
real*4 data(n)
real*4 a(n/2+1),b(n/2+1)
if(mod(n,2).ne.0)then
write(*,"('butt1dbs: illegal odd input array length ',i6)")n
call exit(-1)
endif
if(iorder_lo.lt.0.or.iorder_hi.lt.0)then
write(*,"('butt1dbs: negative Butterworth filter orders not allowed')")
call exit(-1)
endif
i=1
do 21 k=1,n,2
a(i)=data(k)
b(i)=data(k+1)
21 i=i+1
call FFT (a,b,1,n/2,1,-1)
call REALT(a,b,1,n/2,1,-1)
do 31 i=1,n/2+1
f=float(i-1)/(float(n)*delta)
if(iorder_lo.gt.0)then
r_lo=(f/fhalf_lo)**(2*iorder_lo)
factor_lo=r_lo/(1.0+r_lo)
else
factor_lo=1.0
endif
if(iorder_hi.gt.0)then
r_hi=(f/fhalf_hi)**(2*iorder_hi)
factor_hi=1.0/(1.0+r_hi)
else
factor_hi=1.0
endif
a(i)=factor_lo*factor_hi*a(i)
31 b(i)=factor_lo*factor_hi*b(i)
call REALT(a,b,1,n/2,1,+1)
call FFT (a,b,1,n/2,1,+1)
i=1
do 41 k=1,n,2
data(k) =a(i)
data(k+1)=b(i)
41 i=i+1
return
end
|
using Pkg
Pkg.activate(@__DIR__)
using Documenter, DimensionalData, CoordinateTransformations, Dates
CI = get(ENV, "CI", nothing) == "true" || get(ENV, "GITHUB_TOKEN", nothing) !== nothing
docsetup = quote
using DimensionalData, Random, Dates
Random.seed!(1234)
end
DocMeta.setdocmeta!(DimensionalData, :DocTestSetup, docsetup; recursive=true)
makedocs(
modules = [DimensionalData],
sitename = "DimensionalData.jl",
format = Documenter.HTML(
prettyurls = CI,
),
pages = [
"Introduction" => "index.md",
"Crash course" => "course.md",
"API" => "api.md",
"For Developers" => "developer.md"
],
strict=true,
)
if CI
deploydocs(
repo = "github.com/rafaqz/DimensionalData.jl.git",
target = "build",
push_preview = true
)
end
|
(************************************************************************)
(* * The Coq Proof Assistant / The Coq Development Team *)
(* v * INRIA, CNRS and contributors - Copyright 1999-2018 *)
(* <O___,, * (see CREDITS file for the list of authors) *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(* * (see LICENSE file for the text of the license) *)
(************************************************************************)
(* A <X1,...,Xn>: non commutative polynomials on a commutative ring A *)
Set Implicit Arguments.
Require Import Setoid.
Require Import BinList.
Require Import BinPos.
Require Import BinNat.
Require Import BinInt.
Require Export Ring_polynom. (* n'utilise que PExpr *)
Require Export Ncring.
Section MakeRingPol.
Context (C R:Type) `{Rh:Ring_morphism C R}.
Variable phiCR_comm: forall (c:C)(x:R), x * [c] == [c] * x.
Ltac rsimpl := repeat (gen_rewrite || rewrite phiCR_comm).
Ltac add_push := gen_add_push .
(* Definition of non commutative multivariable polynomials
with coefficients in C :
*)
Inductive Pol : Type :=
| Pc : C -> Pol
| PX : Pol -> positive -> positive -> Pol -> Pol.
(* PX P i n Q represents P * X_i^n + Q *)
Definition cO:C . exact ring0. Defined.
Definition cI:C . exact ring1. Defined.
Definition P0 := Pc 0.
Definition P1 := Pc 1.
Variable Ceqb:C->C->bool.
Class Equalityb (A : Type):= {equalityb : A -> A -> bool}.
Notation "x =? y" := (equalityb x y) (at level 70, no associativity).
Variable Ceqb_eq: forall x y:C, Ceqb x y = true -> (x == y).
Instance equalityb_coef : Equalityb C :=
{equalityb x y := Ceqb x y}.
Fixpoint Peq (P P' : Pol) {struct P'} : bool :=
match P, P' with
| Pc c, Pc c' => c =? c'
| PX P i n Q, PX P' i' n' Q' =>
match Pos.compare i i', Pos.compare n n' with
| Eq, Eq => if Peq P P' then Peq Q Q' else false
| _,_ => false
end
| _, _ => false
end.
Instance equalityb_pol : Equalityb Pol :=
{equalityb x y := Peq x y}.
(* Q a ses variables de queue < i *)
Definition mkPX P i n Q :=
match P with
| Pc c => if c =? 0 then Q else PX P i n Q
| PX P' i' n' Q' =>
match Pos.compare i i' with
| Eq => if Q' =? P0 then PX P' i (n + n') Q else PX P i n Q
| _ => PX P i n Q
end
end.
Definition mkXi i n := PX P1 i n P0.
Definition mkX i := mkXi i 1.
(** Opposite of addition *)
Fixpoint Popp (P:Pol) : Pol :=
match P with
| Pc c => Pc (- c)
| PX P i n Q => PX (Popp P) i n (Popp Q)
end.
Notation "-- P" := (Popp P)(at level 30).
(** Addition et subtraction *)
Fixpoint PaddCl (c:C)(P:Pol) {struct P} : Pol :=
match P with
| Pc c1 => Pc (c + c1)
| PX P i n Q => PX P i n (PaddCl c Q)
end.
(* Q quelconque *)
Section PaddX.
Variable Padd:Pol->Pol->Pol.
Variable P:Pol.
(* Xi^n * P + Q
les variables de tete de Q ne sont pas forcement < i
mais Q est normalisé : variables de tete decroissantes *)
Fixpoint PaddX (i n:positive)(Q:Pol){struct Q}:=
match Q with
| Pc c => mkPX P i n Q
| PX P' i' n' Q' =>
match Pos.compare i i' with
| (* i > i' *)
Gt => mkPX P i n Q
| (* i < i' *)
Lt => mkPX P' i' n' (PaddX i n Q')
| (* i = i' *)
Eq => match Z.pos_sub n n' with
| (* n > n' *)
Zpos k => mkPX (PaddX i k P') i' n' Q'
| (* n = n' *)
Z0 => mkPX (Padd P P') i n Q'
| (* n < n' *)
Zneg k => mkPX (Padd P (mkPX P' i k P0)) i n Q'
end
end
end.
End PaddX.
Fixpoint Padd (P1 P2: Pol) {struct P1} : Pol :=
match P1 with
| Pc c => PaddCl c P2
| PX P' i' n' Q' =>
PaddX Padd P' i' n' (Padd Q' P2)
end.
Notation "P ++ P'" := (Padd P P').
Definition Psub(P P':Pol):= P ++ (--P').
Notation "P -- P'" := (Psub P P')(at level 50).
(** Multiplication *)
Fixpoint PmulC_aux (P:Pol) (c:C) {struct P} : Pol :=
match P with
| Pc c' => Pc (c' * c)
| PX P i n Q => mkPX (PmulC_aux P c) i n (PmulC_aux Q c)
end.
Definition PmulC P c :=
if c =? 0 then P0 else
if c =? 1 then P else PmulC_aux P c.
Fixpoint Pmul (P1 P2 : Pol) {struct P2} : Pol :=
match P2 with
| Pc c => PmulC P1 c
| PX P i n Q =>
PaddX Padd (Pmul P1 P) i n (Pmul P1 Q)
end.
Notation "P ** P'" := (Pmul P P')(at level 40).
Definition Psquare (P:Pol) : Pol := P ** P.
(** Evaluation of a polynomial towards R *)
Fixpoint Pphi(l:list R) (P:Pol) {struct P} : R :=
match P with
| Pc c => [c]
| PX P i n Q =>
let x := nth 0 i l in
let xn := pow_pos x n in
(Pphi l P) * xn + (Pphi l Q)
end.
Reserved Notation "P @ l " (at level 10, no associativity).
Notation "P @ l " := (Pphi l P).
(** Proofs *)
Ltac destr_pos_sub H :=
match goal with |- context [Z.pos_sub ?x ?y] =>
assert (H := Z.pos_sub_discr x y); destruct (Z.pos_sub x y)
end.
Lemma Peq_ok : forall P P',
(P =? P') = true -> forall l, P@l == P'@ l.
Proof.
induction P;destruct P';simpl;intros ;try easy.
- now apply ring_morphism_eq, Ceqb_eq.
- specialize (IHP1 P'1). specialize (IHP2 P'2).
simpl in IHP1, IHP2.
destruct (Pos.compare_spec p p1); try discriminate;
destruct (Pos.compare_spec p0 p2); try discriminate.
destruct (Peq P2 P'1); try discriminate.
subst; now rewrite IHP1, IHP2.
Qed.
Lemma Pphi0 : forall l, P0@l == 0.
Proof.
intros;simpl.
rewrite ring_morphism0. reflexivity.
Qed.
Lemma Pphi1 : forall l, P1@l == 1.
Proof.
intros;simpl; rewrite ring_morphism1. reflexivity.
Qed.
Lemma mkPX_ok : forall l P i n Q,
(mkPX P i n Q)@l == P@l * (pow_pos (nth 0 i l) n) + Q@l.
Proof.
intros l P i n Q;unfold mkPX.
destruct P;try (simpl;reflexivity).
assert (Hh := ring_morphism_eq c 0).
simpl; case_eq (Ceqb c 0);simpl;try reflexivity.
intros.
rewrite Hh. rewrite ring_morphism0.
rsimpl. apply Ceqb_eq. trivial.
destruct (Pos.compare_spec i p).
assert (Hh := @Peq_ok P3 P0). case_eq (P3=? P0). intro. simpl.
rewrite Hh.
rewrite Pphi0. rsimpl. rewrite Pos.add_comm. rewrite pow_pos_add;rsimpl.
subst;trivial. reflexivity. trivial. intros. simpl. reflexivity. simpl. reflexivity.
simpl. reflexivity.
Qed.
Ltac Esimpl :=
repeat (progress (
match goal with
| |- context [?P@?l] =>
match P with
| P0 => rewrite (Pphi0 l)
| P1 => rewrite (Pphi1 l)
| (mkPX ?P ?i ?n ?Q) => rewrite (mkPX_ok l P i n Q)
end
| |- context [[?c]] =>
match c with
| 0 => rewrite ring_morphism0
| 1 => rewrite ring_morphism1
| ?x + ?y => rewrite ring_morphism_add
| ?x * ?y => rewrite ring_morphism_mul
| ?x - ?y => rewrite ring_morphism_sub
| - ?x => rewrite ring_morphism_opp
end
end));
simpl; rsimpl.
Lemma PaddCl_ok : forall c P l, (PaddCl c P)@l == [c] + P@l .
Proof.
induction P; simpl; intros; Esimpl; try reflexivity.
rewrite IHP2. rsimpl.
rewrite (ring_add_comm (P2 @ l * pow_pos (nth 0 p l) p0) [c]).
reflexivity.
Qed.
Lemma PmulC_aux_ok : forall c P l, (PmulC_aux P c)@l == P@l * [c].
Proof.
induction P;simpl;intros. rewrite ring_morphism_mul.
try reflexivity.
simpl. Esimpl. rewrite IHP1;rewrite IHP2;rsimpl.
Qed.
Lemma PmulC_ok : forall c P l, (PmulC P c)@l == P@l * [c].
Proof.
intros c P l; unfold PmulC.
assert (Hh:= ring_morphism_eq c 0);case_eq (c =? 0). intros.
rewrite Hh;Esimpl. apply Ceqb_eq;trivial.
assert (H1h:= ring_morphism_eq c 1);case_eq (c =? 1);intros.
rewrite H1h;Esimpl. apply Ceqb_eq;trivial.
apply PmulC_aux_ok.
Qed.
Lemma Popp_ok : forall P l, (--P)@l == - P@l.
Proof.
induction P;simpl;intros.
Esimpl.
rewrite IHP1;rewrite IHP2;rsimpl.
Qed.
Ltac Esimpl2 :=
Esimpl;
repeat (progress (
match goal with
| |- context [(PaddCl ?c ?P)@?l] => rewrite (PaddCl_ok c P l)
| |- context [(PmulC ?P ?c)@?l] => rewrite (PmulC_ok c P l)
| |- context [(--?P)@?l] => rewrite (Popp_ok P l)
end)); Esimpl.
Lemma PaddXPX: forall P i n Q,
PaddX Padd P i n Q =
match Q with
| Pc c => mkPX P i n Q
| PX P' i' n' Q' =>
match Pos.compare i i' with
| (* i > i' *)
Gt => mkPX P i n Q
| (* i < i' *)
Lt => mkPX P' i' n' (PaddX Padd P i n Q')
| (* i = i' *)
Eq => match Z.pos_sub n n' with
| (* n > n' *)
Zpos k => mkPX (PaddX Padd P i k P') i' n' Q'
| (* n = n' *)
Z0 => mkPX (Padd P P') i n Q'
| (* n < n' *)
Zneg k => mkPX (Padd P (mkPX P' i k P0)) i n Q'
end
end
end.
induction Q; reflexivity.
Qed.
Lemma PaddX_ok2 : forall P2,
(forall P l, (P2 ++ P) @ l == P2 @ l + P @ l)
/\
(forall P k n l,
(PaddX Padd P2 k n P) @ l ==
P2 @ l * pow_pos (nth 0 k l) n + P @ l).
induction P2;simpl;intros. split. intros. apply PaddCl_ok.
induction P. unfold PaddX. intros. rewrite mkPX_ok.
simpl. rsimpl.
intros. simpl.
destruct (Pos.compare_spec k p) as [Hh|Hh|Hh].
destr_pos_sub H1h. Esimpl2.
rewrite Hh; trivial. rewrite H1h. reflexivity.
simpl. rewrite mkPX_ok. rewrite IHP1. Esimpl2.
rewrite Pos.add_comm in H1h.
rewrite H1h.
rewrite pow_pos_add. Esimpl2.
rewrite Hh; trivial. reflexivity.
rewrite mkPX_ok. rewrite PaddCl_ok. Esimpl2. rewrite Pos.add_comm in H1h.
rewrite H1h. Esimpl2. rewrite pow_pos_add. Esimpl2.
rewrite Hh; trivial. reflexivity.
rewrite mkPX_ok. rewrite IHP2. Esimpl2.
rewrite (ring_add_comm (P2 @ l * pow_pos (nth 0 p l) p0)
([c] * pow_pos (nth 0 k l) n)).
reflexivity. assert (H1h := ring_morphism_eq c 0);case_eq (Ceqb c 0);
intros; simpl.
rewrite H1h;trivial. Esimpl2. apply Ceqb_eq; trivial. reflexivity.
decompose [and] IHP2_1. decompose [and] IHP2_2. clear IHP2_1 IHP2_2.
split. intros. rewrite H0. rewrite H1.
Esimpl2.
induction P. unfold PaddX. intros. rewrite mkPX_ok. simpl. reflexivity.
intros. rewrite PaddXPX.
destruct (Pos.compare_spec k p1) as [H3h|H3h|H3h].
destr_pos_sub H4h.
rewrite mkPX_ok. simpl. rewrite H0. rewrite H1. Esimpl2.
rewrite H4h. rewrite H3h;trivial. reflexivity.
rewrite mkPX_ok. rewrite IHP1. Esimpl2. rewrite H3h;trivial.
rewrite Pos.add_comm in H4h.
rewrite H4h. rewrite pow_pos_add. Esimpl2.
rewrite mkPX_ok. simpl. rewrite H0. rewrite H1.
rewrite mkPX_ok.
Esimpl2. rewrite H3h;trivial.
rewrite Pos.add_comm in H4h.
rewrite H4h. rewrite pow_pos_add. Esimpl2.
rewrite mkPX_ok. simpl. rewrite IHP2. Esimpl2.
gen_add_push (P2 @ l * pow_pos (nth 0 p1 l) p2). try reflexivity.
rewrite mkPX_ok. simpl. reflexivity.
Qed.
Lemma Padd_ok : forall P Q l, (P ++ Q) @ l == P @ l + Q @ l.
intro P. elim (PaddX_ok2 P); auto.
Qed.
Lemma PaddX_ok : forall P2 P k n l,
(PaddX Padd P2 k n P) @ l == P2 @ l * pow_pos (nth 0 k l) n + P @ l.
intro P2. elim (PaddX_ok2 P2); auto.
Qed.
Lemma Psub_ok : forall P' P l, (P -- P')@l == P@l - P'@l.
unfold Psub. intros. rewrite Padd_ok. rewrite Popp_ok. rsimpl.
Qed.
Lemma Pmul_ok : forall P P' l, (P**P')@l == P@l * P'@l.
induction P'; simpl; intros. rewrite PmulC_ok. reflexivity.
rewrite PaddX_ok. rewrite IHP'1. rewrite IHP'2. Esimpl2.
Qed.
Lemma Psquare_ok : forall P l, (Psquare P)@l == P@l * P@l.
Proof.
intros. unfold Psquare. apply Pmul_ok.
Qed.
(** Definition of polynomial expressions *)
(*
Inductive PExpr : Type :=
| PEc : C -> PExpr
| PEX : positive -> PExpr
| PEadd : PExpr -> PExpr -> PExpr
| PEsub : PExpr -> PExpr -> PExpr
| PEmul : PExpr -> PExpr -> PExpr
| PEopp : PExpr -> PExpr
| PEpow : PExpr -> N -> PExpr.
*)
(** Specification of the power function *)
Section POWER.
Variable Cpow : Set.
Variable Cp_phi : N -> Cpow.
Variable rpow : R -> Cpow -> R.
Record power_theory : Prop := mkpow_th {
rpow_pow_N : forall r n, (rpow r (Cp_phi n))== (pow_N r n)
}.
End POWER.
Variable Cpow : Set.
Variable Cp_phi : N -> Cpow.
Variable rpow : R -> Cpow -> R.
Variable pow_th : power_theory Cp_phi rpow.
(** evaluation of polynomial expressions towards R *)
Fixpoint PEeval (l:list R) (pe:PExpr C) {struct pe} : R :=
match pe with
| PEO => 0
| PEI => 1
| PEc c => [c]
| PEX _ j => nth 0 j l
| PEadd pe1 pe2 => (PEeval l pe1) + (PEeval l pe2)
| PEsub pe1 pe2 => (PEeval l pe1) - (PEeval l pe2)
| PEmul pe1 pe2 => (PEeval l pe1) * (PEeval l pe2)
| PEopp pe1 => - (PEeval l pe1)
| PEpow pe1 n => rpow (PEeval l pe1) (Cp_phi n)
end.
Strategy expand [PEeval].
Definition mk_X j := mkX j.
(** Correctness proofs *)
Lemma mkX_ok : forall p l, nth 0 p l == (mk_X p) @ l.
Proof.
destruct p;simpl;intros;Esimpl;trivial.
Qed.
Ltac Esimpl3 :=
repeat match goal with
| |- context [(?P1 ++ ?P2)@?l] => rewrite (Padd_ok P1 P2 l)
| |- context [(?P1 -- ?P2)@?l] => rewrite (Psub_ok P1 P2 l)
end;try Esimpl2;try reflexivity;try apply ring_add_comm.
(* Power using the chinise algorithm *)
Section POWER2.
Variable subst_l : Pol -> Pol.
Fixpoint Ppow_pos (res P:Pol) (p:positive){struct p} : Pol :=
match p with
| xH => subst_l (Pmul P res)
| xO p => Ppow_pos (Ppow_pos res P p) P p
| xI p => subst_l (Pmul P (Ppow_pos (Ppow_pos res P p) P p))
end.
Definition Ppow_N P n :=
match n with
| N0 => P1
| Npos p => Ppow_pos P1 P p
end.
Fixpoint pow_pos_gen (R:Type)(m:R->R->R)(x:R) (i:positive) {struct i}: R :=
match i with
| xH => x
| xO i => let p := pow_pos_gen m x i in m p p
| xI i => let p := pow_pos_gen m x i in m x (m p p)
end.
Lemma Ppow_pos_ok : forall l, (forall P, subst_l P@l == P@l) ->
forall res P p, (Ppow_pos res P p)@l == (pow_pos_gen Pmul P p)@l * res@l.
Proof.
intros l subst_l_ok res P p. generalize res;clear res.
induction p;simpl;intros. try rewrite subst_l_ok.
repeat rewrite Pmul_ok. repeat rewrite IHp.
rsimpl. repeat rewrite Pmul_ok. repeat rewrite IHp. rsimpl.
try rewrite subst_l_ok.
repeat rewrite Pmul_ok. reflexivity.
Qed.
Definition pow_N_gen (R:Type)(x1:R)(m:R->R->R)(x:R) (p:N) :=
match p with
| N0 => x1
| Npos p => pow_pos_gen m x p
end.
Lemma Ppow_N_ok : forall l, (forall P, subst_l P@l == P@l) ->
forall P n, (Ppow_N P n)@l == (pow_N_gen P1 Pmul P n)@l.
Proof. destruct n;simpl. reflexivity. rewrite Ppow_pos_ok; trivial. Esimpl. Qed.
End POWER2.
(** Normalization and rewriting *)
Section NORM_SUBST_REC.
Let subst_l (P:Pol) := P.
Let Pmul_subst P1 P2 := subst_l (Pmul P1 P2).
Let Ppow_subst := Ppow_N subst_l.
Fixpoint norm_aux (pe:PExpr C) : Pol :=
match pe with
| PEO => Pc cO
| PEI => Pc cI
| PEc c => Pc c
| PEX _ j => mk_X j
| PEadd pe1 (PEopp pe2) =>
Psub (norm_aux pe1) (norm_aux pe2)
| PEadd pe1 pe2 => Padd (norm_aux pe1) (norm_aux pe2)
| PEsub pe1 pe2 => Psub (norm_aux pe1) (norm_aux pe2)
| PEmul pe1 pe2 => Pmul (norm_aux pe1) (norm_aux pe2)
| PEopp pe1 => Popp (norm_aux pe1)
| PEpow pe1 n => Ppow_N (fun p => p) (norm_aux pe1) n
end.
Definition norm_subst pe := subst_l (norm_aux pe).
Lemma norm_aux_spec :
forall l pe,
PEeval l pe == (norm_aux pe)@l.
Proof.
intros.
induction pe.
- now simpl; rewrite <- ring_morphism0.
- now simpl; rewrite <- ring_morphism1.
- Esimpl3.
- Esimpl3.
- simpl.
rewrite IHpe1;rewrite IHpe2.
destruct pe2; Esimpl3.
unfold Psub.
destruct pe1; destruct pe2; rewrite Padd_ok; rewrite Popp_ok; reflexivity.
- simpl. unfold Psub. rewrite IHpe1;rewrite IHpe2.
now destruct pe1;
[destruct pe2; rewrite Padd_ok; rewrite Popp_ok; Esimpl3 | Esimpl3..].
- simpl. rewrite IHpe1;rewrite IHpe2. rewrite Pmul_ok. reflexivity.
- now simpl; rewrite IHpe; Esimpl3.
- simpl.
rewrite Ppow_N_ok; (intros;try reflexivity).
rewrite rpow_pow_N; [| now apply pow_th].
induction n;simpl; [now Esimpl3|].
induction p; simpl; trivial.
+ try rewrite IHp;try rewrite IHpe;
repeat rewrite Pms_ok; repeat rewrite Pmul_ok;reflexivity.
+ rewrite Pmul_ok.
try rewrite IHp;try rewrite IHpe; repeat rewrite Pms_ok;
repeat rewrite Pmul_ok;reflexivity.
Qed.
Lemma norm_subst_spec :
forall l pe,
PEeval l pe == (norm_subst pe)@l.
Proof.
intros;unfold norm_subst.
unfold subst_l. apply norm_aux_spec.
Qed.
End NORM_SUBST_REC.
Fixpoint interp_PElist (l:list R) (lpe:list (PExpr C * PExpr C)) {struct lpe} : Prop :=
match lpe with
| nil => True
| (me,pe)::lpe =>
match lpe with
| nil => PEeval l me == PEeval l pe
| _ => PEeval l me == PEeval l pe /\ interp_PElist l lpe
end
end.
Lemma norm_subst_ok : forall l pe,
PEeval l pe == (norm_subst pe)@l.
Proof.
intros;apply norm_subst_spec.
Qed.
Lemma ring_correct : forall l pe1 pe2,
(norm_subst pe1 =? norm_subst pe2) = true ->
PEeval l pe1 == PEeval l pe2.
Proof.
simpl;intros.
do 2 (rewrite (norm_subst_ok l);trivial).
apply Peq_ok;trivial.
Qed.
End MakeRingPol.
|
theory union
imports Main
"../data/Natu"
"../data/list"
"../funcs/equal"
"$HIPSTER_HOME/IsaHipster"
begin
datatype NList = NNil | NCons Nat NList
fun elemN :: "Nat => NList => bool" where
"elemN x (NNil) = False"
| "elemN x (NCons z xs) = (if equal2 x z then True else elemN x xs)"
fun union :: "NList => NList => NList" where
"union (NNil) y = y"
| "union (NCons z xs) y =
(if elemN z y then union xs y else NCons z (union xs y))"
(*hipster union elemN equal2*)
lemma lemma_ad [thy_expl]: "union x1 NNil = x1"
by (hipster_induct_schemes union.simps elemN.simps equal2.simps)
(*hipster_cond elemN equal2 union elemN*)
lemma lemma_ae [thy_expl]: "elemN x9 z9 \<Longrightarrow> elemN x9 (union y9 z9) = True"
by (hipster_induct_schemes elemN.simps equal2.simps union.simps elemN.simps)
lemma lemma_af [thy_expl]: "elemN x1 z1 \<Longrightarrow> union (NCons x1 y1) (union z1 xa1) = union y1 (union z1 xa1)"
by (hipster_induct_schemes elemN.simps equal2.simps union.simps elemN.simps)
lemma lemma_ag [thy_expl]: "elemN x1 y1 \<Longrightarrow> union (NCons x1 NNil) (union y1 z1) = union y1 z1"
by (hipster_induct_schemes elemN.simps equal2.simps union.simps elemN.simps)
lemma unknown [thy_expl]: "union x x = x"
by (hipster_induct_schemes elemN.simps equal2.simps union.simps )
lemma unknown [thy_expl]: "elemN x (union y z) = elemN x (union z y)"
oops
lemma unknown [thy_expl]: "union (union x y) z = union x (union y z)"
oops
lemma unknown [thy_expl]: "union x (union x y) = union x y"
oops
lemma unknown [thy_expl]: "union x (union y x) = union y x"
oops
lemma unknown [thy_expl]: "union x (NCons y x) = NCons y x"
oops
lemma unknown [thy_expl]: "union (union x y) x = union y x"
oops
lemma unknown [thy_expl]: "union (union x y) y = union x y"
oops
lemma unknown [thy_expl]: "union (union x y) (union x z) =
union y (union x z)"
oops
lemma unknown [thy_expl]: "union (union x y) (union y z) =
union x (union y z)"
oops
lemma unknown [thy_expl]: "union (union x y) (union z x) =
union y (union z x)"
oops
lemma unknown [thy_expl]: "union (union x y) (union z y) =
union x (union z y)"
oops
lemma unknown [thy_expl]: "union (union x y) (NCons z x) = union y (NCons z x)"
oops
lemma unknown [thy_expl]: "union (union x y) (NCons z y) = union x (NCons z y)"
oops
lemma unknown [thy_expl]: "union (NCons x y) (union z y) =
union (NCons x z) (union z y)"
oops
lemma unknown [thy_expl]: "union (NCons x y) (union y z) =
union (NCons x z) (union y z)"
oops
lemma unknown [thy_expl]: "elemN (S x) (union y z) = elemN (S x) (union z y)"
oops
lemma unknown [thy_expl]: "elemN Z (union x y) = elemN Z (union y x)"
oops
lemma unknown [thy_expl]: "union (union x y) (union x y) = union x y"
oops
lemma unknown [thy_expl]: "union (union x y) (union y x) = union y x"
oops
lemma unknown [thy_expl]: "union (NCons x y) (NCons x y) = NCons x y"
oops
lemma unknown [thy_expl]: "union (NCons x NNil) y = union (NCons x y) y"
oops
lemma unknown [thy_expl]: "union x (NCons Z x) = NCons Z x"
oops
lemma unknown [thy_expl]: "union (NCons x NNil) (union y z) =
union (NCons x y) (union y z)"
oops
lemma unknown [thy_expl]: "union (NCons x NNil) (union y z) =
union (NCons x z) (union y z)"
oops
lemma unknown [thy_expl]: "union (NCons x NNil) (NCons y z) = union (NCons x z) (NCons y z)"
oops
lemma unknown [thy_expl]: "union (union x y) (NCons Z x) = union y (NCons Z x)"
oops
lemma unknown [thy_expl]: "union (union x y) (NCons Z y) = union x (NCons Z y)"
oops
lemma unknown [thy_expl]: "union (NCons Z x) (union y x) =
union (NCons Z y) (union y x)"
oops
lemma unknown [thy_expl]: "union (NCons Z x) (union x y) =
union (NCons Z y) (union x y)"
oops
lemma unknown [thy_expl]: "elemN (S Z) (union x y) = elemN (S Z) (union y x)"
oops
lemma unknown [thy_expl]: "union (NCons Z NNil) x = union (NCons Z x) x"
oops
lemma unknown [thy_expl]: "union (NCons x NNil) (NCons Z y) = union (NCons x y) (NCons Z y)"
oops
lemma unknown [thy_expl]: "union (NCons Z NNil) (union x y) =
union (NCons Z x) (union x y)"
oops
lemma unknown [thy_expl]: "union (NCons Z NNil) (union x y) =
union (NCons Z y) (union x y)"
oops
lemma unknown [thy_expl]: "union (NCons Z NNil) (NCons x y) = union (NCons Z y) (NCons x y)"
oops
lemma unknown [thy_expl]: "union (NCons Z x) (NCons Z x) = NCons Z x"
oops
lemma unknown [thy_expl]: "elemN x y \<Longrightarrow> union (NCons x y) y = y"
oops
lemma unknown [thy_expl]: "elemN x y \<Longrightarrow>
union (NCons x y) (union y z) = union y z"
oops
lemma unknown [thy_expl]: "elemN x y \<Longrightarrow>
union (NCons x y) (union z y) = union z y"
oops
lemma unknown [thy_expl]: "elemN x y \<Longrightarrow> union (NCons x y) (NCons z y) = NCons z y"
oops
lemma unknown [thy_expl]: "elemN x z \<Longrightarrow>
union (NCons x y) (union z y) = union z y"
oops
lemma unknown [thy_expl]: "elemN x z \<Longrightarrow>
union (NCons x y) (union y z) = union y z"
oops
lemma unknown [thy_expl]: "elemN x y \<Longrightarrow> union (NCons x y) (NCons Z y) = NCons Z y"
oops
hipster_cond equal2 elemN union equal2
lemma unknown [thy_expl]: "union x x = x"
by (hipster_induct_schemes union.simps elemN.simps equal2.simps Nat.exhaust NList.exhaust)
lemma unknown [thy_expl]: "elemN x (union y z) = elemN x (union z y)"
oops
lemma unknown [thy_expl]: "union (union x y) z = union x (union y z)"
oops
lemma unknown [thy_expl]: "union x (union x y) = union x y"
oops
lemma unknown [thy_expl]: "union x (union y x) = union y x"
oops
lemma unknown [thy_expl]: "union x (NCons y x) = NCons y x"
oops
lemma unknown [thy_expl]: "union (union x y) x = union y x"
oops
lemma unknown [thy_expl]: "union (union x y) y = union x y"
oops
lemma unknown [thy_expl]: "union (union x y) (union x z) = union y (union x z)"
oops
lemma unknown [thy_expl]: "union (union x y) (union y z) = union x (union y z)"
oops
lemma unknown [thy_expl]: "union (union x y) (union z x) = union y (union z x)"
oops
lemma unknown [thy_expl]: "union (union x y) (union z y) = union x (union z y)"
oops
lemma unknown [thy_expl]: "union (union x y) (NCons z x) = union y (NCons z x)"
oops
lemma unknown [thy_expl]: "union (union x y) (NCons z y) = union x (NCons z y)"
oops
lemma unknown [thy_expl]: "union (NCons x y) (union z y) = union (NCons x z) (union z y)"
oops
lemma unknown [thy_expl]: "union (NCons x y) (union y z) = union (NCons x z) (union y z)"
oops
lemma unknown [thy_expl]: "elemN (S x) (union y z) = elemN (S x) (union z y)"
oops
lemma unknown [thy_expl]: "elemN Z (union x y) = elemN Z (union y x)"
oops
lemma unknown [thy_expl]: "union (union x y) (union x y) = union x y"
oops
lemma unknown [thy_expl]: "union (union x y) (union y x) = union y x"
oops
lemma unknown [thy_expl]: "union (NCons x y) (NCons x y) = NCons x y"
oops
lemma unknown [thy_expl]: "union (NCons x NNil) y = union (NCons x y) y"
oops
lemma unknown [thy_expl]: "union x (NCons Z x) = NCons Z x"
oops
lemma unknown [thy_expl]: "union (NCons x NNil) (union y z) = union (NCons x y) (union y z)"
oops
lemma unknown [thy_expl]: "union (NCons x NNil) (union y z) = union (NCons x z) (union y z)"
oops
lemma unknown [thy_expl]: "union (NCons x NNil) (NCons y z) = union (NCons x z) (NCons y z)"
oops
lemma unknown [thy_expl]: "union (union x y) (NCons Z x) = union y (NCons Z x)"
oops
lemma unknown [thy_expl]: "union (union x y) (NCons Z y) = union x (NCons Z y)"
oops
lemma unknown [thy_expl]: "union (NCons Z x) (union y x) = union (NCons Z y) (union y x)"
oops
lemma unknown [thy_expl]: "union (NCons Z x) (union x y) = union (NCons Z y) (union x y)"
oops
lemma unknown [thy_expl]: "elemN (S Z) (union x y) = elemN (S Z) (union y x)"
oops
lemma unknown [thy_expl]: "union (NCons Z NNil) x = union (NCons Z x) x"
oops
lemma unknown [thy_expl]: "union (NCons x NNil) (NCons Z y) = union (NCons x y) (NCons Z y)"
oops
lemma unknown [thy_expl]: "union (NCons Z NNil) (union x y) = union (NCons Z x) (union x y)"
oops
lemma unknown [thy_expl]: "union (NCons Z NNil) (union x y) = union (NCons Z y) (union x y)"
oops
lemma unknown [thy_expl]: "union (NCons Z NNil) (NCons x y) = union (NCons Z y) (NCons x y)"
oops
lemma unknown [thy_expl]: "union (NCons Z x) (NCons Z x) = NCons Z x"
oops
lemma t: "\<not> equal2 u v \<Longrightarrow> elem u (Cons2 v x) = elem u x"
oops
lemma u : "union x (NCons a x) = NCons a x"
apply(induction x)
apply simp
oops
lemma unknown [thy_expl]: "union x x = x"
apply(induction x)
apply(simp_all)
apply rule
apply rule
apply(hipster_induct_schemes simps list.exhaust Nat.exhaust equal2.simps elem.simps lemma_a )
oops
end
|
module Types
--data Example: Type where
-- Points: Double -> Double -> Example
public export
data Examples = Points Double Double
--data Dataset = TrainingSet [Example] | TestSet [Example]
public export
data Dataset : Type where
TrainingSet: List Examples -> Dataset
TestSet: List Examples -> Dataset
public export
data Coefficients = Val Double Double
example1 : Examples
example1 = Points 10.2 10.9
example2 : Examples
example2 = Points 0.0 0.0
example3 : Examples
example3 = Points 2.8 4.9
mytrainset: Dataset
mytrainset = TrainingSet [example1,example2, example3]
|
/* this manages and calls function to execute function
* from beginning to end
*/
#include <stdio.h>
#include <string.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_eigen.h>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_statistics.h>
#include "fileio.h"
#include "data_process.h"
#include "sensor_history.h"
#include "sensor_validation.h"
#include "externs.h"
void handle_files(char *, double*, double*);
void sensor_validation(double*);
double data_processing(double*);
int sensor_number;
int temperature_min = 30;
int temperature_max = 60;
int data_number;
int group_number;
float q_percent;
/* path and names of the files used in the program
* which include input file, output file and sensor history file */
char input_file_name[255] = "../data/input_data/sample_input.csv";
char history_file_name[255] = "../data/sensor_history/sensor_history.csv";
char output_file_name[255] = "../data/output_data/output.csv";
int main() {
printf("\nApplication is running....\n");
/* user input for the number of sensors to process */
printf("Insert number of sensors:");
scanf("%d", &sensor_number);
/* determining the time intervals for the sensors */
int time_interval;
printf("Insert collected time intervals:");
scanf("%d", &time_interval);
printf("Insert q percent(Should be 0~1):");
scanf("%f", &q_percent);
if ((q_percent<0) || (q_percent>1)) {
printf("Invalid q_percent! It will be regarded as 0.7 of default value.\n");
q_percent = 0.7;
}
double values[256];
double time_value[256];
double group_values[sensor_number];
/* function performs reading of sensor data
* @input : file name
* @output : sensor time and associated values
*/
handle_files(input_file_name, &time_value[0], &values[0]);
/* number of calculations to be performed based on the user input */
group_number = data_number / sensor_number;
for (int i = 0; i < group_number; i++) {
time_value[i] = time_value[i * sensor_number];
}
/* check whether the user input and actual file data matches */
if (group_number != time_interval) {
printf("ERROR: Time interval numbers, sensor numbers do not match with your input file!\n");
exit(0);
}
sensor_validation(&values[0]);
/* seperating multiple data computations in the output file*/
FILE *fpout = fopen(output_file_name, "a");
fprintf(fpout, "--------------\n");
fclose(fpout);
FILE *fphis = fopen(history_file_name, "a");
fprintf(fphis, "--------------\n");
fclose(fphis);
for (int i = 0 ; i < group_number; i++) {
for (int j = 0; j < sensor_number; j++) {
group_values[j] = values[i*sensor_number + j];
}
printf("\n---------------------Time interval %d---------------------\n\n", i);
double fused = data_processing(&group_values[0]);
float time_val_file = (float)time_value[i];
write_data(time_val_file, fused, output_file_name);
}
return 1;
}
/* function checks whether an attempt made to open a file is succuessful or not */
void handle_files(char* input_file_name, double* time_value, double* values) {
printf("\n");
if (input_file_name != NULL) {
data_number = read_data(&time_value[0], &values[0], input_file_name);
if ( data_number == -1 ) {
printf("ERROR: File open failed!\n");
exit(0);
} else if ( data_number == 0 ) {
printf("ERROR: Sensor numbers inserted and file are different!\n");
exit(0);
} else if ( data_number > 0) {
printf("Read data successfully!\n");
}
} else {
printf("ERROR: Input file does not exist!\n");
exit(0);
}
}
/* function performs sensor validation for group values
* @input : group Values
*/
void sensor_validation(double* group_values) {
int res = reading_validation(&group_values[0]);
if (res == 1) {
frozen_value_check(&group_values[0]);
}
if (res == 0) {
printf("ERROR: Temperature values are out of range! Check history file.\n");
frozen_value_check(&group_values[0]);
}
}
/* function performs sensor fusion algo using group Values
* @input : group Values
* @output : fused result
*/
double data_processing(double* group_values) {
printf("Sensor Values:\n");
for (int i = 0; i < sensor_number; i++) {
printf("x%d=%f, ", i, group_values[i]);
}
/* Step 1: Calc the Support Degree Matrix */
gsl_matrix* D = gsl_matrix_alloc(sensor_number,sensor_number);
support_degree_generator(D, &group_values[0]); //function in data_process.c to get D - Support Degree Matrix
/* Step 2: Calc eigenval & eigenvec */
gsl_matrix* T = gsl_matrix_alloc(sensor_number,sensor_number);
gsl_vector* evec = gsl_vector_alloc(sensor_number);
gsl_matrix* Temp = gsl_matrix_alloc(sensor_number,sensor_number);
gsl_matrix_memcpy(Temp, D);
eigenvec_calc(evec, T, Temp); //function in data_process.c to get evec - eigen group_values & T - vectors
/* Step 3: Principal Comp Calc */
gsl_matrix* y = gsl_matrix_alloc(sensor_number,sensor_number);
principal_comp_calc(T, D, y); //function in data_process.c to get T - Principal Components
/* Step 4: Calc the contri rate of the kth principal comp */
double alpha[sensor_number];
contri_rate_calc_kth(evec, &alpha[0]); //function in data_process.c to get alpha
/* Step 5: Calc the contri rate of the m principal comp */
double phi[sensor_number];
major_contri_calc(&alpha[0], &phi[0]); //function in data_process.c to get phi
/* Step 6: Compute the integrated support degree score */
gsl_vector* Z = gsl_vector_alloc(sensor_number);
integ_supp_score_calc(&alpha[0], y, Z); //function in data_process.c to get z_i
/* Step 7-1: Eliminate incorrect data */
int sensor_correction[sensor_number];
elliminate_incorrect_data(Z, &sensor_correction[0]); //function in data_process.c to elliminate incorrect datas
/* Step 7-2: Compute the weight coefficient for each sensor */
double omega[sensor_number];
weight_coeff_calc(Z, &sensor_correction[0], &omega[0]); //function in data_process.c to get omega
/* Step 7-3: Compute the fused output */
double fused;
fused = fused_output(&omega[0], &group_values[0]); //function in data_process.c to get fused output
printf("FINAL STEP: \nThe fused output is %f\n", fused);
/* Free memory */
gsl_matrix_free(D);
gsl_matrix_free(Temp);
gsl_matrix_free(T);
gsl_vector_free(evec);
gsl_matrix_free(y);
gsl_vector_free(Z);
return fused;
}
|
Formal statement is: lemma box_ne_empty: fixes a :: "'a::euclidean_space" shows "cbox a b \<noteq> {} \<longleftrightarrow> (\<forall>i\<in>Basis. a\<bullet>i \<le> b\<bullet>i)" and "box a b \<noteq> {} \<longleftrightarrow> (\<forall>i\<in>Basis. a\<bullet>i < b\<bullet>i)" Informal statement is: A closed interval $[a,b]$ is non-empty if and only if $a \leq b$ in every coordinate. An open interval $(a,b)$ is non-empty if and only if $a < b$ in every coordinate. |
/*
* Copyright (c) 2010, Dust Networks, Inc.
*/
#include "StdAfx.h"
#include "PicardCLR.h"
#include "BoostLog.h"
#include "CLRUtils.h"
#include "serial_mux.h" // for resetConnection
#include <boost/date_time/posix_time/posix_time.hpp>
using namespace boost::posix_time;
namespace DustSerialMux {
CPicardCLR_Serial::CPicardCLR_Serial(SerialPort^ device,
int rtsDelay, bool hwFlowControl, int readTimeout)
: m_serial(device),
m_rtsDelay(rtsDelay),
m_hwFlowControl(hwFlowControl),
m_readTimeout(readTimeout)
{ ; }
CPicardCLR_Serial::~CPicardCLR_Serial() {
// TODO: cleanup
}
void CPicardCLR_Serial::sendRaw(const ByteVector& data)
{
// locks should be handled at the sendCommand / sendAck methods
// HDLC encode
ByteVector encodedvec = encodeHDLC(data);
array<Byte>^ encodeddata = convertVectorToArray(encodedvec);
bool portClosed = false;
if (m_hwFlowControl) {
m_serial->RtsEnable = 1;
// busy wait for DSR (or timeout)
bool sendTimeout = false;
ptime timeout = microsec_clock::local_time() + milliseconds(m_serial->WriteTimeout);
while (m_serial->DsrHolding == 0) {
ptime now = microsec_clock::local_time();
if (now > timeout) {
sendTimeout = true;
break;
}
boost::this_thread::sleep(milliseconds(1));
}
if (sendTimeout) {
// note: we detect the write failure when Acks are not received
CBoostLog::log("Serial:Write error: no CTS received");
return;
}
}
try {
m_serial->Write(encodeddata, 0, encodeddata->Length);
CBoostLog::logDump("Serial:Write", encodedvec);
// wait before deasserting RTS
// TODO: later, we may need to adjust the RTS delay based on data length
if (m_hwFlowControl) {
if (m_rtsDelay > 0) {
boost::this_thread::sleep(milliseconds(m_rtsDelay));
}
m_serial->RtsEnable = 0;
}
}
catch (Exception^) {
CBoostLog::log("Exception on write (Serial closed)");
portClosed = true;
}
if (portClosed) {
// when the port is closed, we reset and hope it re-opens soon
resetConnection();
}
}
// read
void CPicardCLR_Serial::read(const std::string& context, int timeout)
{
CBoostLog::log("Starting read()");
bool portClosed = false;
try {
m_serial->ReadTimeout = timeout;
array<unsigned char>^ input = gcnew array<unsigned char>(INPUT_BUFFER_LEN);
int len = m_serial->Read(input, 0, INPUT_BUFFER_LEN);
std::ostringstream prefix;
prefix << "Serial:Read (" << context << ")";
CBoostLog::logDump(prefix.str(), convertArrayToVector(input, 0, len));
for (int i = 0; i < len; i++) {
m_hdlc->addByte(input[i]);
}
// the HDLC parser calls frameComplete
}
catch (TimeoutException^) {
// CBoostLog::log("Timeout exception (Serial read)");
}
catch (Exception^) {
CBoostLog::log("Invalid operation on read (Serial closed)");
portClosed = true;
}
if (portClosed) {
// when the port is closed, we reset and hope it re-opens soon
resetConnection();
}
}
CPicardCLR_UDP::CPicardCLR_UDP(UdpClient^ device, int readTimeout)
: m_outputDev(device),
m_readTimeout(readTimeout)
{ ; }
CPicardCLR_UDP::~CPicardCLR_UDP() {
// TODO: cleanup
}
void CPicardCLR_UDP::sendRaw(const ByteVector& data)
{
// TODO: is there a better solution than copying the whole array?
// create a new buffer offset by one byte so there's a dummy byte in front
int length = data.size();
array<unsigned char>^ buf = gcnew array<unsigned char>(length+1);
for (int i = 0; i < length; i++) { buf[i+1] = data[i]; }
try {
m_outputDev->Send(buf, length+1);
}
catch (Exception^) {
CBoostLog::log("Socket exception (UDP write)");
}
CBoostLog::logDump("UDP:Write (first byte excluded)", data);
}
// read
void CPicardCLR_UDP::read(const std::string& context, int timeout)
{
CBoostLog::log("Starting read()");
try {
IPEndPoint^ remote;
m_outputDev->Client->ReceiveTimeout = timeout; // milliseconds
array<unsigned char>^ buf = m_outputDev->Receive(remote);
// with UDP, there's no HDLC parser, so we call frameComplete directly
// start at offset 1 to drop the dummy byte in front
ByteVector data = convertArrayToVector(buf, 1, buf->Length-1);
std::ostringstream prefix;
prefix << "UDP:Read (" << context << ")";
CBoostLog::logDump(prefix.str(), data);
//boost::posix_time::ptime now = boost::posix_time::microsec_clock::local_time();
//std::cout << "(" << now << ") : UDP read, len=" << buf->Length-1 << std::endl;
frameComplete(data);
}
catch (Exception^) {
CBoostLog::log("Socket exception (UDP read)");
}
}
} // namespace DustSerialMux
|
import pickle
import numpy as np
from robo_rl.common.buffer.buffer import Buffer
class TrajectoryBuffer(Buffer):
"""Unlike traditional replay buffers, this is a collection of
trajectories not individual transitions"""
def __init__(self, capacity=1000):
super().__init__(capacity)
def sample_timestep(self, batch_size, timestep):
"""Sample from trajectories at a particular timestep"""
if batch_size > len(self):
raise ValueError('Sampling batch size greater than buffer size')
trajectory_indices = np.random.randint(0, len(self), batch_size)
batch = []
for trajectory_index in trajectory_indices:
transition = self.buffer[trajectory_index]["trajectory"][timestep]
transition["context"] = self.buffer[trajectory_index]["context"]
batch.append(transition)
return batch
def add_from_file(self, expert_file_path):
with open(expert_file_path, "rb") as expert_file:
"""assumed file to contain an array/list of trajectories"""
trajectories = np.array(pickle.load(expert_file))
for trajectory in trajectories:
self.add(trajectory)
|
Require Import Nijn.Prelude.
Require Import Nijn.Syntax.Signature.
Declare Scope srp.
(** * Strong reduction pairs *)
(** A strong reduction pair provides two relations on the terms. These relations are respected by the term formers and substitution *)
Section StrongReductionPair.
Context {B F : Type}
(ar : F -> ty B).
Record term_order : Type :=
make_term_order
{
tm_gt : forall (C : con B) (A : ty B),
tm ar C A -> tm ar C A -> Prop ;
tm_ge : forall (C : con B) (A : ty B),
tm ar C A -> tm ar C A -> Prop
}.
Local Notation "x ≺[ O ] y" := (tm_gt O _ _ x y) (at level 70).
Local Notation "x ≼[ O ] y" := (tm_ge O _ _ x y) (at level 70).
Definition term_CompatRel
(C : con B)
(A : ty B)
(O : term_order)
: CompatRel
:= Build_CompatRel (tm ar C A) (tm_gt O C A) (tm_ge O C A).
Class is_strong_reduction_pair (O : term_order) : Type :=
{
gt_isWf : forall (C : con B) (A : ty B),
Wf (fun (t₁ t₂ : tm ar C A) => t₁ ≺[ O ] t₂) ;
compatibility : forall (C : con B) (A : ty B),
isCompatRel (term_CompatRel C A O) ;
sub_gt : forall (C₁ C₂ : con B)
(s : sub ar C₁ C₂)
(A : ty B)
(t₁ t₂ : tm ar C₂ A),
t₁ ≺[ O ] t₂
-> t₁ [ s ] ≺[ O ] t₂ [ s ] ;
sub_ge : forall (C₁ C₂ : con B)
(s : sub ar C₁ C₂)
(A : ty B)
(t₁ t₂ : tm ar C₂ A),
t₁ ≼[ O ] t₂
-> t₁ [ s ] ≼[ O ] t₂ [ s ] ;
app_gt_l : forall (C : con B)
(A₁ A₂ : ty B)
(f₁ f₂ : tm ar C (A₁ ⟶ A₂))
(t : tm ar C A₁),
f₁ ≺[ O ] f₂
-> (f₁ · t) ≺[ O ] (f₂ · t) ;
app_gt_r : forall (C : con B)
(A₁ A₂ : ty B)
(f : tm ar C (A₁ ⟶ A₂))
(t₁ t₂ : tm ar C A₁),
t₁ ≺[ O ] t₂
-> (f · t₁) ≺[ O ] (f · t₂) ;
app_ge : forall (C : con B)
(A₁ A₂ : ty B)
(f₁ f₂ : tm ar C (A₁ ⟶ A₂))
(t₁ t₂ : tm ar C A₁),
f₁ ≼[ O ] f₂
-> t₁ ≼[ O ] t₂
-> (f₁ · t₁) ≼[ O ] (f₂ · t₂) ;
lam_gt : forall (C : con B)
(A₁ A₂ : ty B)
(f₁ f₂ : tm ar (A₁ ,, C) A₂),
f₁ ≺[ O ] f₂
-> λ f₁ ≺[ O ] λ f₂ ;
lam_ge : forall (C : con B)
(A₁ A₂ : ty B)
(f₁ f₂ : tm ar (A₁ ,, C) A₂),
f₁ ≼[ O ] f₂
-> λ f₁ ≼[ O ] λ f₂ ;
beta_ge : forall (C : con B)
(A₁ A₂ : ty B)
(f : tm ar (A₁ ,, C) A₂)
(t : tm ar C A₁),
(λ f) · t ≼[ O ] f [ beta_sub t ]
}.
Record strong_reduction_pair : Type :=
make_srp
{
orders :> term_order ;
is_srp : is_strong_reduction_pair orders
}.
Global Instance strong_reduction_pair_is_srp
(srp : strong_reduction_pair)
: is_strong_reduction_pair srp
:= is_srp srp.
End StrongReductionPair.
Notation "x ≺[ O ] y" := (tm_gt _ O _ _ x y) (at level 70) : srp.
Notation "x ≼[ O ] y" := (tm_ge _ O _ _ x y) (at level 70) : srp.
|
Require Export GeoCoq.Highschool.circumcenter.
Section Concyclic.
Context `{TE:Tarski_euclidean}.
Definition Concyclic A B C D := Coplanar A B C D /\ exists O, Cong O A O B /\ Cong O A O C /\ Cong O A O D.
Lemma concyclic_aux : forall A B C D, Concyclic A B C D ->
exists O, Cong O A O B /\ Cong O A O C /\ Cong O A O D /\ Coplanar A B C O.
Proof.
intros A B C D [HCop [O1]]; spliter.
destruct (col_dec A B C).
exists O1; repeat split; Cop.
destruct (l11_62_existence A B C O1) as [O []].
exists O.
repeat split; try apply cong2_per2__cong with O1 O1; finish.
Qed.
Lemma concyclic_trans : forall A B C D E,
~ Col A B C ->
Concyclic A B C D -> Concyclic A B C E -> Concyclic A B D E.
Proof.
intros.
split.
unfold Concyclic in *; spliter; CopR.
apply concyclic_aux in H0.
apply concyclic_aux in H1.
decompose [ex and] H0;clear H0.
decompose [ex and] H1;clear H1.
exists x.
repeat split;Cong.
assert (x=x0).
assert_diffs.
apply is_circumcenter_uniqueness with A B C;try assumption.
repeat split; [CongR..|Cop].
repeat split; [CongR..|Cop].
subst.
Cong.
Qed.
Lemma concyclic_perm_1: forall A B C D,
Concyclic A B C D -> Concyclic A B D C.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_2 : forall A B C D,
Concyclic A B C D -> Concyclic A C B D.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_3 : forall A B C D,
Concyclic A B C D -> Concyclic A C D B.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_4 : forall A B C D,
Concyclic A B C D -> Concyclic A D B C.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_5 : forall A B C D,
Concyclic A B C D -> Concyclic A D C B.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_6 : forall A B C D,
Concyclic A B C D -> Concyclic B A C D.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_7 : forall A B C D,
Concyclic A B C D -> Concyclic B A D C.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_8 : forall A B C D,
Concyclic A B C D -> Concyclic B C A D.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_9 : forall A B C D,
Concyclic A B C D -> Concyclic B C D A.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_10 : forall A B C D,
Concyclic A B C D -> Concyclic B D A C.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_11 : forall A B C D,
Concyclic A B C D -> Concyclic B D C A.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_12 : forall A B C D,
Concyclic A B C D -> Concyclic C A B D.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_13 : forall A B C D,
Concyclic A B C D -> Concyclic C A D B.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_14 : forall A B C D,
Concyclic A B C D -> Concyclic C B A D.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_15 : forall A B C D,
Concyclic A B C D -> Concyclic C B D A.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_16 : forall A B C D,
Concyclic A B C D -> Concyclic C D A B.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_17 : forall A B C D,
Concyclic A B C D -> Concyclic C D B A.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_18 : forall A B C D,
Concyclic A B C D -> Concyclic D A B C.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_19 : forall A B C D,
Concyclic A B C D -> Concyclic D A C B.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_20 : forall A B C D,
Concyclic A B C D -> Concyclic D B A C.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_21 : forall A B C D,
Concyclic A B C D -> Concyclic D B C A.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_22 : forall A B C D,
Concyclic A B C D -> Concyclic D C A B.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_perm_23 : forall A B C D,
Concyclic A B C D -> Concyclic D C B A.
Proof.
intros A B C D H.
destruct H as [H1 [X H2]].
split; [Cop|spliter; exists X; repeat split; CongR..].
Qed.
Lemma concyclic_1123 : forall A B C,
~ Col A B C ->
Concyclic A A B C.
Proof.
intros A B C HABC.
unfold Concyclic.
split.
apply coplanar_trivial.
destruct (exists_circumcenter A B C HABC) as [G HG].
exists G.
apply circumcenter_cong in HG;spliter;repeat split;Cong.
Qed.
End Concyclic.
|
-- We import all of Lean's standard tactics
import tactic
/-
Since it's the easiest thing to start with, we first develop basic logic.
# The logical symbols that Lean understands :
* `→` ("implies" -- type with `\l`)
* `¬` ("not" -- type with `\not` or `\n`)
* `∧` ("and" -- type with `\and` or `\an`)
* `↔` ("iff" -- type with `\iff` or `\lr`)
* `∨` ("or" -- type with `\or` or `\v`)
# Useful tactics :
* `intro`
* `exact`
* `apply`
* `rw`
* `cases`
* `split`
* `left`
* `right`
-/
namespace mbl
variables (P Q R : Prop)
--We define three variables, each is a ' term of type `Prop` '
-- ### Introductory examples with implies (→)
theorem id : P → P :=
begin
-- let hP be a proof of P
intro hP,
-- then hP is a proof of P!
exact hP
end
--This one is immediate from definition.
-- in Lean, `P → Q → R` is _defined_ to mean `P → (Q → R)`
example : (P → Q → R) ↔ (P → (Q → R)) :=
begin
-- look at the goal!
refl -- true because ↔ is reflexive
end
theorem imp_intro : P → Q → P :=
begin
-- remember that by definition the goal is P → (Q → P),
-- so it's P implies something, so let's assume
-- that P is true and call this hypothesis hP.
intro hP,
-- Now we have to prove that Q implies P, so let's
-- assume that Q is true, and let's call this hypothesis hQ
intro hQ,
-- We now have to prove that P is true.
-- But this is exactly our hypothesis hP.
exact hP,
end
lemma modus_ponens : P → (P → Q) → Q :=
begin
-- remember this means "P implies that ((P implies Q) implies Q)"
-- so let's assume P is true
intro hP,
-- and let's assume hypothesis hPQ, that P implies Q
intro hPQ,
-- now `hPQ` says `P → Q` and we're trying to prove `Q`!
-- So by applying the hypothesis `hPQ`, we can reduce
-- this puzzle to proving `P`.
apply hPQ,
-- Now we have to prove `P`. But this is just an assumption
exact hP, -- or `assumption`
end
-- ### First independent steps
lemma imp_trans : (P → Q) → (Q → R) → (P → R) :=
begin
-- The tactics you know should be enough
sorry,
end
lemma forall_imp : (P → Q → R) → (P → Q) → (P → R) :=
begin
-- `intros hPQR hPQ hP,` would be a fast way to start.
sorry,
end
/-
### not
`not P`, with notation `¬ P`, is *defined* to mean `P → false` in Lean,
i.e., the proposition that P implies false. You can easily check with
a truth table that P → false and ¬ P are equivalent.
-/
theorem not_iff_imp_false : ¬ P ↔ (P → false) :=
begin
-- true by definition
refl
end
theorem not_not_intro : P → ¬ (¬ P) :=
begin
--You can do it in a few ways. One of them is particularly short and slick
sorry,
end
-- This is "modus tollens". Some mathematicians think of it as
-- "proof by contradiction".
theorem modus_tollens : (P → Q) → (¬ Q → ¬ P) :=
begin
sorry,
end
-- This one cannot be proved using constructive mathematics!
-- You _have_ to use a tactic like `by_contra` (or, if you're happy
-- to cheat, the full "truth table" tactic `tauto!`.
-- Try it without using these, and you'll get stuck!
theorem double_negation_elimination : ¬ (¬ P) → P :=
begin
sorry,
end
/-
### and
The hypothesis `hPaQ : P ∧ Q` in Lean, is equivalent to
hypotheses `hP : P` and `hQ : Q`.
If you have `hPaQ` as a hypothesis, and you want to get to
`hP` and `hQ`, you can use the `cases` tactic.
If you have `⊢ P ∧ Q` as a goal, and want to turn the goal
into two goals `⊢ P` and `⊢ Q`, then use the `split` tactic.
Note that after `split` it's good etiquette to use braces
e.g.
example (hP : P) (hQ : Q) : P ∧ Q :=
begin
split,
{ exact hP },
{ exact hQ }
end
-/
theorem and.elim_left : P ∧ Q → P :=
begin
-- if `h : P ∧ Q` then `h.1 : P` and `h.2 : Q`
sorry,
end
theorem and.elim_right : P ∧ Q → Q :=
begin
sorry,
end
theorem and.intro : P → Q → P ∧ Q :=
begin
-- remember the `split` tactic.
sorry,
end
/-- the eliminator for `∧` -/
theorem and.elim : P ∧ Q → (P → Q → R) → R :=
begin
sorry,
end
/-- The recursor for `∧` -/
theorem and.rec : (P → Q → R) → P ∧ Q → R :=
begin
sorry,
end
/-- `∧` is symmetric -/
theorem and.symm : P ∧ Q → Q ∧ P :=
begin
--Useful fact:
-- `intro hPQ`
-- `cases hPQ with hP hQ`
--can be replaced with
-- `rintro ⟨hP, hQ⟩`
sorry,
end
/-- `∧` is transitive -/
theorem and.trans : (P ∧ Q) → (Q ∧ R) → (P ∧ R) :=
begin
-- The `rintro` tactic will do `intro` and `cases` all in one go.
-- If you like, try starting this proof with `rintro ⟨hP, hQ⟩` if you want
-- to experiment with it. Get the pointy brackets with `\<` and `\>`,
-- or both at once with `\<>`.
sorry,
end
/-
Recall that the convention for the implies sign →
is that it is _right associative_, by which
I mean that `P → Q → R` means `P → (Q → R)` by definition.
Now note that if `P` implies `Q → R`
then this means that `P` and `Q` together, imply `R`,
so `P → Q → R` is logically equivalent to `(P ∧ Q) → R`.
We proved that `P → Q → R` implied `(P ∧ Q) → R`; this was `and.rec`.
Let's go the other way.
-/
lemma imp_imp_of_and_imp : ((P ∧ Q) → R) → (P → Q → R) :=
begin
sorry,
end
/-!
### iff
The basic theory of `iff`.
In Lean, to prove `P ∧ Q` you have to prove `P` and `Q`.
Similarly, to prove `P ↔ Q` in Lean, you have to prove `P → Q`
and `Q → P`. Just like `∧`, you can uses `cases h` if you have
a hypothesis `h : P ↔ Q`, and `split` if you have a goal `⊢ P ↔ Q`.
-/
/-- `P ↔ P` is true for all propositions `P`, i.e. `↔` is reflexive. -/
theorem iff.refl : P ↔ P :=
begin
split,
apply id,
apply id,
/- or tauto, tauto-/
end
-- If you get stuck, there is always the "truth table" tactic `tauto!`
-- This literally solves everything above. It's a cool thing
-- but overrelying on it today would be pointless
example : P ↔ P :=
begin
tauto!, -- the "truth table" tactic.
end
-- refl tactic also works
example : P ↔ P :=
begin
refl -- `refl` knows that `=` and `↔` are reflexive.
end
/-- `↔` is symmetric -/
theorem iff.symm : (P ↔ Q) → (Q ↔ P) :=
begin
sorry,
end
/-- `↔` is commutative -/
theorem iff.comm : (P ↔ Q) ↔ (Q ↔ P) :=
begin
sorry,
end
-- without rw or cc this is painful!
/-- `↔` is transitive -/
theorem iff.trans : (P ↔ Q) → (Q ↔ R) → (P ↔ R) :=
begin
sorry,
end
-- This can be done constructively, but it's hard. You'll need to know
-- about the `have` tactic to do it. Alternatively the truth table
-- tactic `tauto!` will do it.
theorem iff.boss : ¬ (P ↔ ¬ P) :=
begin
sorry,
end
-- Now we have iff we can go back to and.
/-!
### ↔ and ∧
-/
/-- `∧` is commutative -/
theorem and.comm : P ∧ Q ↔ Q ∧ P :=
begin
sorry,
end
-- Note that ∧ is "right associative" in Lean, which means
-- that `P ∧ Q ∧ R` is _defined to mean_ `P ∧ (Q ∧ R)`.
-- Associativity can hence be written like this:
/-- `∧` is associative -/
theorem and_assoc : ((P ∧ Q) ∧ R) ↔ (P ∧ Q ∧ R) :=
begin
sorry,
end
/-!
## Or
`P ∨ Q` is true when at least one of `P` and `Q` are true.
Here is how to work with `∨` in Lean.
If you have a hypothesis `hPoQ : P ∨ Q` then you
can break into the two cases `hP : P` and `hQ : Q` using
`cases hPoQ with hP hQ`
If you have a _goal_ of the form `⊢ P ∨ Q` then you
need to decide whether you're going to prove `P` or `Q`.
If you want to prove `P` then use the `left` tactic,
and if you want to prove `Q` then use the `right` tactic.
-/
-- recall that P, Q, R are Propositions. We'll need S for this one.
variable (S : Prop)
-- You will need to use the `left` tactic for this one.
theorem or.intro_left : P → P ∨ Q :=
begin
intro hP,
left,
exact hP,
end
theorem or.intro_right : Q → P ∨ Q :=
begin
sorry,
end
/-- the eliminator for `∨`. -/
theorem or.elim : P ∨ Q → (P → R) → (Q → R) → R :=
begin
sorry,
end
/-- `∨` is symmetric -/
theorem or.symm : P ∨ Q → Q ∨ P :=
begin
sorry
end
/-- `∨` is commutative -/
theorem or.comm : P ∨ Q ↔ Q ∨ P :=
begin
sorry,
end
/-- `∨` is associative -/
theorem or.assoc : (P ∨ Q) ∨ R ↔ P ∨ Q ∨ R :=
begin
sorry,
end
/-!
### More about → and ∨
-/
theorem or.imp : (P → R) → (Q → S) → P ∨ Q → R ∨ S :=
begin
sorry,
end
theorem or.imp_left : (P → Q) → P ∨ R → Q ∨ R :=
begin
sorry,
end
theorem or.imp_right : (P → Q) → R ∨ P → R ∨ Q :=
begin
sorry,
end
theorem or.left_comm : P ∨ Q ∨ R ↔ Q ∨ P ∨ R :=
begin
-- Try rewriting `or.comm` and `or.assoc` to do this one quickly.
sorry,
end
/-- the recursor for `∨` -/
theorem or.rec : (P → R) → (Q → R) → P ∨ Q → R :=
begin
sorry,
end
theorem or_congr : (P ↔ R) → (Q ↔ S) → (P ∨ Q ↔ R ∨ S) :=
begin
sorry,
end
/-!
### true and false
`true` is a true-false statement, which can be proved with the `trivial` tactic.
`false` is a true-false statment which can only be proved if you manage
to find a contradiction within your assumptions.
If you manage to end up with a hypothesis `h : false` then there's quite
a funny way to proceed, which we now explain.
If you have `h : P ∧ Q` then you can uses `cases h with hP hQ` to split
into two cases.
If you have `h : false` then what do you think happens if we do `cases h`?
Hint: how many cases are there?
-/
/-- eliminator for `false` -/
theorem false.elim : false → P :=
begin
intro h,
cases h,
end
theorem and_true_iff : P ∧ true ↔ P :=
begin
sorry,
end
theorem or_false_iff : P ∨ false ↔ P :=
begin
sorry,
end
-- false.elim is handy for this one
theorem or.resolve_left : P ∨ Q → ¬P → Q :=
begin
sorry,
end
-- this one you can't do constructively
theorem or_iff_not_imp_left : P ∨ Q ↔ ¬P → Q :=
begin
sorry,
end
end mbl
|
""" -------------------------------------------------------------------------------------------------
Search for Value from Excel
-------------------------------------------------------------------------------------------------
Source: https://github.com/d-insight/code-bank.git
License: MIT License - https://opensource.org/licenses/MIT
-------------------------------------------------------------------------------------------------
"""
#pip3 install pandas
import pandas as pd
#pip3 install numpy
import numpy as np
#specify the files location (or path)
#files = ['files path here']
DIR = 'data/'
excel_files = [DIR + 'SampleData.xlsx', DIR + 'SampleData2.xlsx']
# Which representative sell Pencils?
#loop through the "files" list
for file in excel_files:
#read each file in a dataframe
df = pd.read_excel(file)
#locate the "Item" column and return the "Rep" where the condition matches "Pencil"
pencil = df['Rep'].where(df['Item'] == 'Pencil').dropna()
#print file name
print(file)
#print filtered results
print(pencil)
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj20eqsynthconj5 : forall (lv0 : natural) (lv1 : natural), (@eq natural (plus lv0 lv1) (plus lv0 lv1)).
Admitted.
QuickChick conj20eqsynthconj5.
|
-- Andreas, 2016-02-11, bug reported by sanzhiyan
module Issue610-module where
import Common.Level
open import Common.Equality
data ⊥ : Set where
record ⊤ : Set where
data A : Set₁ where
set : .Set → A
module M .(x : Set) where
.out : Set
out = x
.ack : A → Set
ack (set x) = M.out x
hah : set ⊤ ≡ set ⊥
hah = refl
-- SHOULD FAIL
.moo' : ⊥
moo' = subst (λ x → x) (cong ack hah) _
-- SHOULD FAIL
.moo : ⊥
moo with cong ack hah
moo | q = subst (λ x → x) q _
baa : .⊥ → ⊥
baa ()
yoink : ⊥
yoink = baa moo
|
import data.rel
import data.set.finite
import data.sym.sym2
import set_theory.cardinal.finite
import algebra.big_operators.finprod
open_locale classical big_operators
noncomputable theory
open finset
universes u v w
variables {V : Type u} {E : Type v}
structure graph (V : Type u) (E : Type v) :=
(ends : E → sym2 V)
namespace graph
section basic
variables {G : graph V E} {u v : V} {e f : E}
def adj (G : graph V E) : V → V → Prop :=
λ v w, ∃ (e : E), G.ends e = ⟦(v, w)⟧
def inc (G : graph V E) : V → E → Prop :=
λ v e, v ∈ G.ends e
/-- Set of edges incident to a given vertex, aka incidence set. -/
def incidence_set (G : graph V E) (v : V) : set E := {e : E | v ∈ G.ends e}
/-- Make a graph from the digraph -/
def graph.mk {V : Type u} {E : Type v} (ends : E → sym2 V) : graph V E := { ends := ends }
@[symm] lemma adj_symm (h : G.adj u v) : G.adj v u :=
begin
sorry,
end
/-!
A dart is an edge with a chosen orientation - graphs are naturally unoriented,
but in order to talk about things like walks, the handshaking lemma, etc you have to
pick a "direction" to traverse the edges.
-/
structure dart (G : graph V E) : Type (max u v) :=
(head : V)
(tail : V)
(e : E)
(h : G.ends e = ⟦(head, tail)⟧)
/-!
Flipping a dart around
-/
def reverse_dart (G : graph V E) (d : G.dart) : G.dart :=
{ head := d.tail,
tail := d.head,
e := d.e,
h :=
begin
sorry,
end }
@[simp]
lemma reverse_head_tail (G : graph V E) (d : G.dart) : (G.reverse_dart d).tail = d.head :=
begin
sorry,
end
@[simp]
lemma reverse_tail_head (G : graph V E) (d : G.dart) : (G.reverse_dart d).head = d.tail :=
begin
sorry,
end
end basic
section walks
variables (G : graph V E)
/-!
We have a very clever definition of walks here that one of my colleagues at Waterloo
came up with. One of the issues we had when talk about walks was, when we'd try to talk
about them in an inductive way, we'd end up missing the start or end vertex. This definition
includes both in a neat way.
-/
structure walk (G : graph V E) : Type (max u v) :=
(head : V)
(tail : V)
(darts : list G.dart)
(is_walk :
[head] ++ (list.map dart.tail darts)
= (list.map dart.head darts) ++ [tail])
lemma walk_rev_head (p : walk G) :
list.map dart.head (list.map G.reverse_dart p.darts) =
(list.map dart.tail p.darts) :=
begin
sorry,
end
lemma walk_rev_tail (p : walk G) :
list.map dart.tail (list.map G.reverse_dart p.darts) =
(list.map dart.head p.darts) :=
begin
sorry,
end
/-!
Having seen how to write some definitions, try writing the definition of
the empty walk! Hint: By our definition, we do need a start and end vertex, so
we have to use arbitrary vertex v.
-/
def empty_walk (v : V) : walk G :=
{ head := sorry,
tail := sorry,
darts := sorry,
is_walk := sorry }
/-!
The reverse of a walk p.
-/
def reverse (p : walk G) : walk G :=
{ head := sorry,
tail := sorry,
darts := (list.map G.reverse_dart p.darts.reverse),
-- including the above because you probably haven't seen lists in lean yet (?)
is_walk :=
begin
sorry,
end }
/-!
Appending two walks p and q, where the tail of p is the head of q.
-/
def append (p q : walk G) (h : p.tail = q.head) : walk G :=
{ head := sorry,
tail := sorry,
darts := p.darts ++ q.darts,
is_walk :=
begin
sorry,
end }
/-!
We have reachable as a definition here so that we can start talking about connectivity.
You'll find that the previous definitions of various walks are useful in showing that
reachability is an equivalence relation:
-/
def reachable (u v : V) : Prop := ∃ (p : G.walk), p.head = u ∧ p.tail = v
namespace walk
/-!
Reachability is reflexive, i.e. a vertex can reach itself
-/
@[refl] protected lemma reachable.refl (u : V) : G.reachable u u :=
begin
sorry,
end
protected lemma reachable.rfl {u : V} : G.reachable u u := reachable.refl _ _
/-!
If you have a walk from u to v, you have a walk from v to u
-/
@[symm] protected lemma reachable.symm {u v : V} (huv : G.reachable u v) : G.reachable v u :=
begin
sorry,
end
/-!
If you have a walk from u to v and a walk from v to w, then you have a walk from
u to w
-/
@[trans] protected lemma reachable.trans {u v w : V} (huv : G.reachable u v) (hvw : G.reachable v w)
: G.reachable u w :=
begin
sorry,
end
def edges {G : graph V E} (p : G.walk) : list E := list.map dart.e p.darts
def support {G : graph V E} (p : G.walk) : list V := [p.head] ++ list.map dart.head p.darts
/-! ### Trails, paths, circuits, cycles -/
/-- A *trail* is a walk with no repeating edges. -/
structure is_trail {G : graph V E} (p : G.walk) : Prop :=
(edges_nodup : p.edges.nodup)
/-- A *path* is a walk with no repeating vertices. -/
structure is_path {G : graph V E} (p : G.walk) : Prop :=
(support_nodup : p.support.nodup)
/-- A *circuit* is a nonempty trail beginning and ending at the same vertex. -/
-- extends path & need to get rid of loops
structure is_circuit {G : graph V E} (p : G.walk) : Prop :=
(start_end : p.head = p.tail)
(ne_nil : p.darts ≠ [])
/-- A *cycle* at `u : V` is a circuit at `u` whose only repeating vertex
is `u` (which appears exactly twice). -/
structure is_cycle {G : graph V E} (p : G.walk) : Prop :=
(support_nodup : p.support.tail.nodup)
end walk
end walks
section conn
def connected (G : graph V E) : Prop := ∀ u v : V, G.reachable u v
def is_loop_edge_of (G : graph V E) (v : V) (e : E) : Prop := G.ends e = sym2.diag v
def is_loop_edge (G : graph V E) (e : E) : Prop := sym2.is_diag (G.ends e)
def degree (G : graph V E) (v : V) : ℕ := nat.card (G.incidence_set v)
+ nat.card {e | G.is_loop_edge_of v e}
-- double count loop edges
/-!
This is a harder problem so don't sweat it - honestly I haven't even proven this
version of the handshaking lemma yet! We do have the handshaking lemma in lean,
it just has a different appearance due to different definitions.
-/
theorem handshake (G : graph V E) [fintype V] [fintype E] :
∑ᶠ (x : V), G.degree x = 2 * (fintype.card E) :=
begin
sorry,
end
/-!
From here I'm including some more of the things that I'm thinking about with graphic matroids -
it's not complete and there aren't really lemmas here to be completed, I just thought it would maybe
be instructive to see what we have to do with subgraphs.
-/
def regular (G : graph V E) (k : ℕ) : Prop := ∀ (v : V), G.degree v = k
lemma is_trail_def {G : graph V E} (p : G.walk) : p.is_trail ↔ p.edges.nodup :=
⟨walk.is_trail.edges_nodup, λ h, ⟨h⟩⟩
structure subgraph (G : graph V E) :=
(verts : set V)
(edges : set E)
(edge_sub : ∀ (e : edges), (G.ends e).to_set ⊆ verts)
end conn
namespace subgraph
variables (G : graph V E)
/-- Give a vertex as an element of the subgraph's vertex type. -/
@[reducible]
def vert (G' : subgraph G) (v : V) (h : v ∈ G'.verts) : G'.verts := ⟨v, h⟩
def ends (G' : subgraph G) (e : E) (h : e ∈ G'.edges) : sym2 (G'.verts) :=
begin
refine ⟦(G'.vert _ (quotient.out (G.ends e)).1 _, G'.vert _ (quotient.out (G.ends e)).2 _)⟧,
exact set.mem_of_subset_of_mem (G'.edge_sub ⟨e, h⟩) (sym2.to_set_mem1 (G.ends e)),
exact set.mem_of_subset_of_mem (G'.edge_sub ⟨e, h⟩) (sym2.to_set_mem2 (G.ends e)),
end
-- coercion between "ends" in subgraph to graph?
-- probably easier to do with "e.other" but whatever
def adj (G' : subgraph G) : G'.verts → G'.verts → Prop :=
λ v w, ∃ (e ∈ G'.edges), G'.ends G e H = ⟦(v, w)⟧
protected def coe {G : graph V E} (G' : subgraph G) : graph G'.verts G'.edges :=
{ ends := λ e, G'.ends G e e.2 }
end subgraph
end graph
|
[STATEMENT]
lemma opp_half_apartmentI: "\<lbrakk> x\<in>X; C\<in>\<C>-f\<turnstile>\<C>; x\<subseteq>C \<rbrakk> \<Longrightarrow> x\<in>Y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>x \<in> X; C \<in> \<C> - f \<turnstile> \<C>; x \<subseteq> C\<rbrakk> \<Longrightarrow> x \<in> Y
[PROOF STEP]
using opp_half_apartment_def
[PROOF STATE]
proof (prove)
using this:
Y \<equiv> {x \<in> X. \<exists>C\<in>\<C> - f \<turnstile> \<C>. x \<subseteq> C}
goal (1 subgoal):
1. \<lbrakk>x \<in> X; C \<in> \<C> - f \<turnstile> \<C>; x \<subseteq> C\<rbrakk> \<Longrightarrow> x \<in> Y
[PROOF STEP]
by auto |
Soon after the <unk> began to strike their private issues , Secretary of the Treasury Levi Woodbury became an advocate of having the Mint of the United States ( " Mint " , when described as an institution ) strike the one @-@ dollar denomination in gold . He was opposed by the Mint Director , Robert M. Patterson . Woodbury persuaded President Andrew Jackson to have pattern coins struck . In response , Patterson had Mint Second Engraver Christian Gobrecht break off work on the new design for the silver one @-@ dollar coin and work on a pattern for the gold dollar . Gobrecht 's design featured a Liberty cap surrounded by rays on one side , and a palm branch arranged in a circle with the denomination , date , and name of the country on the other .
|
(*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*)
Require Import String.
Require Import List.
Require Import Compare_dec.
Require Import ZArith.
Require Import Utils.
Require Import BrandRelation.
Require Import ForeignData.
Require Import Data.
Require Import DataLift.
Require Import DataNorm.
Require Import ForeignOperators.
Require Import OperatorsUtils.
Require Import Iterators.
Require Export BinaryOperators.
Section BinaryOperatorsSem.
(* Algebra Unary/Binary Ops *)
Definition nat_arith_binary_op_eval (op:nat_arith_binary_op) (z1 z2:Z) : Z :=
match op with
| NatPlus => Z.add z1 z2
| NatMinus=> Z.sub z1 z2
| NatMult => Z.mul z1 z2
| NatMin => Z.min z1 z2
| NatMax => Z.max z1 z2
| NatDiv=> Z.quot z1 z2
| NatRem => Z.rem z1 z2
end.
Definition float_arith_binary_op_eval (op:float_arith_binary_op) (f1 f2:float) : float :=
match op with
| FloatPlus => float_add f1 f2
| FloatMinus => float_sub f1 f2
| FloatMult => float_mult f1 f2
| FloatDiv => float_div f1 f2
| FloatPow => float_pow f1 f2
| FloatMin => float_min f1 f2
| FloatMax => float_max f1 f2
end.
Definition float_compare_binary_op_eval (op:float_compare_binary_op) (f1 f2:float) : bool :=
match op with
| FloatLt => float_lt f1 f2
| FloatLe => float_le f1 f2
| FloatGt => float_gt f1 f2
| FloatGe => float_ge f1 f2
end.
Context (h:brand_relation_t).
Context {fdata:foreign_data}.
Context {foperators:foreign_operators}.
Definition binary_op_eval (bop:binary_op) (d1 d2:data) : option data :=
match bop with
| OpEqual => unbdata (fun x y => if data_eq_dec x y then true else false) d1 d2
| OpRecConcat =>
match d1, d2 with
| (drec r1), (drec r2) => Some (drec (rec_sort (r1++r2)))
| _, _ => None
end
| OpRecMerge =>
match d1, d2 with
| (drec r1), (drec r2) =>
match merge_bindings r1 r2 with
| Some x => Some (dcoll ((drec x) :: nil))
| None => Some (dcoll nil)
end
| _, _ => None
end
| OpAnd => unbdbool andb d1 d2
| OpOr => unbdbool orb d1 d2
| OpLt => unbdnat (fun x y => if Z_lt_dec x y then true else false) d1 d2
| OpLe => unbdnat (fun x y => if Z_le_dec x y then true else false) d1 d2
| OpBagUnion => rondcoll2 bunion d1 d2
| OpBagDiff => rondcoll2 (@bminus data data_eq_dec) d2 d1
| OpBagMin => rondcoll2 (@bmin data data_eq_dec) d1 d2
| OpBagMax => rondcoll2 (@bmax data data_eq_dec) d1 d2
| OpBagNth =>
match d1, d2 with
| (dcoll c), (dnat n) =>
let natish := ZToSignedNat n in
if (fst natish) then
match List.nth_error c (snd natish) with
| Some d => Some (dsome d)
| None => Some dnone
end
else Some dnone
| _, _ => None
end
| OpContains =>
ondcoll (fun l =>
if in_dec data_eq_dec d1 l
then dbool true else dbool false) d2
| OpStringConcat => unsdstring append d1 d2
| OpStringJoin =>
match d1, d2 with
| (dstring sep), (dcoll c) =>
lifted_join sep c
| _, _ => None
end
| OpNatBinary op =>
match d1, d2 with
| dnat n1, dnat n2 => Some (dnat (nat_arith_binary_op_eval op n1 n2))
| _, _ => None
end
| OpFloatBinary op =>
match d1, d2 with
| dfloat f1, dfloat f2 => Some (dfloat (float_arith_binary_op_eval op f1 f2))
| _, _ => None
end
| OpFloatCompare op =>
match d1, d2 with
| dfloat f1, dfloat f2 => Some (dbool (float_compare_binary_op_eval op f1 f2))
| _, _ => None
end
| OpForeignBinary fb => foreign_operators_binary_interp h fb d1 d2
end.
Hint Constructors data_normalized Forall : qcert.
Lemma binary_op_eval_normalized {b d1 d2 o} :
binary_op_eval b d1 d2 = Some o ->
data_normalized h d1 -> data_normalized h d2 ->
data_normalized h o.
Proof.
destruct b; simpl; intros;
try solve [
unfold rondcoll2 in *;
destruct d1; simpl in *; try discriminate;
destruct d2; simpl in *; try discriminate;
inversion H; subst;
eauto 1;
inversion H; subst;
constructor;
inversion H0; inversion H1; subst;
solve [apply bunion_Forall; trivial
| apply bminus_Forall; trivial
| apply bmin_Forall; trivial
| apply bmax_Forall; trivial]
].
- do 2 match_destr_in H.
inversion H; clear H; subst.
apply data_normalized_rec_sort_app; trivial.
- do 2 match_destr_in H.
unfold merge_bindings in H.
destruct (Compat.compatible l l0); inversion H; qeauto.
constructor. constructor; trivial.
apply data_normalized_rec_concat_sort; qtrivial.
- do 2 match_destr_in H.
destruct z; simpl in *; try discriminate.
+ destruct l; simpl in *.
inversion H; subst; repeat constructor.
inversion H; subst. inversion H0; simpl in *.
rewrite Forall_forall in H3; simpl in H3.
specialize (H3 d).
constructor.
apply H3; auto.
+ case_eq (nth_error l (Pos.to_nat p)); intros; rewrite H2 in H.
* inversion H; clear H; subst.
inversion H0; subst.
apply nth_error_In in H2.
rewrite Forall_forall in H3.
specialize (H3 d H2).
constructor; eauto.
* inversion H; subst; repeat constructor.
+ inversion H; subst; repeat constructor.
- destruct d2; simpl in *; try discriminate.
match_destr_in H; inversion H; qeauto.
- destruct d1; destruct d2; simpl in *; try discriminate.
unfold lifted_join in H.
apply some_lift in H; destruct H; subst.
qeauto.
- eapply foreign_operators_binary_normalized; eauto.
Qed.
End BinaryOperatorsSem.
|
```python
import numpy as np
import matplotlib.pyplot as plt
import sys
%load_ext autoreload
%autoreload 0
```
Notebook by **Maxime Dion** <[email protected]><br>
For the QSciTech-QuantumBC virtual workshop on gate-based quantum computing
## Tutorial for Activity 3.1
For this activity, make sure you can easily import your versions of `hamiltonian.py`, `pauli_string.py` and `mapping.py`. Placing this notebook in the same `path` as these files is the easiest way to acheive this. At the end of this notebook, you should be in good position to complete these 3 files.
The solution we suggest here is NOT mandatory. If you find ways to make it better and more efficient, go on and impress us! On the other hand, by completing all sections of this notebook you be able to :
- Create `PauliString` instances
- Multiply `PauliString` together
- Translate a PauliString into a unitary matrix of size `(2**n)**2` (optionnal)
- Create `LinearCombinaisonPauliString` instances
- Multiply and add `LinearCombinaisonPauliString` together
- Combine repeated `PauliString`s in `LinearCombinaisonPauliString`
- Use the Jordan-Wigner mapping to translate Fermionic Operators in `LinearCombinaisonPauliString`s
- Use the Jordan-Wigner mapping to map a `FermionicHamiltonian` into a `LinearCombinaisonPauliString`.
**Important**
When you modify and save a `*.py` file you need to re-import it so that your modifications can be taken into account when you re-execute a call. By adding the magic command `%autoreload` at the beginning of a cell, you make sure that the modifications you did to the `*.py` files are taken into account when you re-run a celll and that you can see the effect.
If you encounter unusual results, restart the kernel and try again.
**Note on numbering**
When you ask a question in the Slack channel you can refer to the section name or the section number.
To enable the section numbering, please make sure you install [nbextensions](https://jupyter-contrib-nbextensions.readthedocs.io/en/latest/install.html). It is available in the conda distribution. After installation it you need to enable the option 'Table of contents (2)'.
# PauliStrings
The `PauliString` class is partially implemented in the file `PauliString.py`.
```python
from pauli_string import PauliString
```
## Creation
This object's attributes are 2 arrays of booleans `z_bits` and `x_bits`. You can easily create an instance and `print` the result. The `__str__` method is already implemented so you can use `print()` on it. The boolean arrays in input are in the `0123` order and the output string is in the reversed `q3q2q1q0` order. Here we initialize the Pauli string `YXZI = -1j*(ZIZI)*(XXII)`.
```python
z_bits = np.array([0,1,0,1],dtype = bool)
x_bits = np.array([0,0,1,1],dtype = bool)
pauli_string = PauliString(z_bits,x_bits)
print(pauli_string)
```
YXZI
### Creation exercice
Create the `ZZXY` pauli string. Remember that the arrays are in the 0...n order, but the string representation in the reverse order. The `print` should return `ZZXY`.
```python
z_bits = np.array([1,0,1,1], dtype=bool)
x_bits = np.array([1,1,0,0], dtype=bool)
print(PauliString(z_bits,x_bits))
```
ZZXY
### Creation from string
While this will not be a useful way to create PauliString in the H2 solution, it will be practical for this notebook and for debugging. Implement the `@classmethod` method `from_str` that takes a string like `YXZI` to build a `PauliString`.
```python
%autoreload
pauli_string = PauliString.from_str('YXZI')
print(pauli_string)
```
YXZI
### Useful methods
In order to compare `PauliString`s together it's convenient to represent it as `zx_bits` which is an array twice as long that combine `z_bits` and `x_bits`. Implement the `to_zx_bits()` method. Why not do the `to_xz_bits()` while your at it!
You should get :
<code>
[False True False True False False True True]<br>[False False True True False True False True]
</code>
```python
%autoreload
pauli_string = PauliString.from_str('YXZI')
zx_bits = pauli_string.to_zx_bits()
print(zx_bits)
xz_bits = pauli_string.to_xz_bits()
print(xz_bits)
```
[False True False True False False True True]
[False False True True False True False True]
It's also useful to know where are the $\hat{I}$ in a `PauliString`. Implement the method that does this. You should get
<code>[ True False False False]</code>
```python
%autoreload
pauli_string = PauliString.from_str('YXZI')
ids = pauli_string.ids()
print(ids)
```
[ True False False False]
### Create with `zx`
Sometimes it will be convenient to create a `PauliString` from a single array `zx_bits`. Implement the `from_zx_bits()` method in order for this cell to return `YXZI`.
```python
%autoreload
z_bits = np.array([0,1,0,1],dtype = bool)
x_bits = np.array([0,0,1,1],dtype = bool)
zx_bits = np.concatenate((z_bits,x_bits))
pauli_string = PauliString.from_zx_bits(zx_bits)
print(pauli_string)
```
YXZI
## Multiplication with another PauliString
Multiplying `PauliString`s is essential to be able to translate Fermionic Hamiltonians into a qubit Hamiltonian.
Before you implement the method that will allow you to do this, you should experiment a bit with how boolean arrays behave. Take a look at methods like `np.dot()`, `np.logical_and()`, `np.logical_or()` and `np.logical_xor()`. In particular, notice that the addition `+` on booleans is not a (mod 2) addition (it's a `logical_or`) and the `np.sum()` method on a boolean array counts the number of 1 and returns an `int`.
```python
bits_1 = np.array([0,1,0,1],dtype = bool)
bits_2 = np.array([0,1,1,1],dtype = bool)
print(bits_1 + bits_2)
print(np.sum(bits_1))
# Experiment
print(np.dot(bits_1, bits_2))
print(np.logical_and(bits_1, bits_2))
print(np.logical_or(bits_1, bits_2))
print(np.logical_xor(bits_1, bits_2))
print(np.sum(np.logical_and(bits_1, bits_2))) # the "dot product" we need
```
[False True True True]
2
True
[False True False True]
[False True True True]
[False False True False]
2
With these considarations, implement the `mul_pauli_string(PauliString)` method in order to replicate the product
\begin{align}
\hat{I}\hat{Y}\hat{Z}\hat{Z} \times \hat{I}\hat{I}\hat{X}\hat{Z} = i \hat{I}\hat{Y}\hat{Y}\hat{I}.
\end{align}
The product return a `PauliString` and a phase (`complex`). The method `__mul__(other)` is already implemeted to call `mul_pauli_string()` so you can use `*` to do the product.
```python
%autoreload
pauli_string_1 = PauliString.from_str('IYZZ')
pauli_string_2 = PauliString.from_str('IIXZ')
new_pauli_string, phase = pauli_string_1 * pauli_string_2
print(new_pauli_string, phase)
```
IYYI 1j
Check your solution on many pairs of Pauli strings like
\begin{align}
\hat{Z}\hat{Z}\hat{Z}\hat{Z} \times \hat{X}\hat{X}\hat{X}\hat{I} = -i \hat{Y}\hat{Y}\hat{Y}\hat{Z}.
\end{align}
```python
%autoreload
pauli_string_1 = PauliString.from_str('ZZZZ')
pauli_string_2 = PauliString.from_str('XXXI')
new_pauli_string, phase = pauli_string_1 * pauli_string_2
print(new_pauli_string, phase)
```
YYYZ (-0-1j)
## Matrix representation (optional)
The matrix reprensetation of `PauliString` will only be used to compute the exact solution of the Hamiltonian. It will not be used for quantum computing, but it's a nice way to validate your results.
Any Pauli string can be converted into a matrix. This is useful to find the exact solution for small systems. To combine the space of two qubits we use the [Kronecker product](https://en.wikipedia.org/wiki/Kronecker_product) ($\otimes$). For example, the `ZX` pauli string can be represented as the following matrix
\begin{align}
\hat{Z}_1\hat{X}_0 = \hat{Z}_1\otimes\hat{X}_0 &= \begin{pmatrix} 1 \times \hat{X}_0 & 0 \\ 0 & -1 \times \hat{X}_0 \end{pmatrix} \\
&= \begin{pmatrix} 0 & 1 & 0 & 0 \\ 1 & 0 & 0 & 0 \\ 0 & 0 & 0 & -1 \\ 0 & 0 & -1 & 0 \end{pmatrix}
\end{align}
which is expressed in the basis
\begin{align}
|00\rangle, |01\rangle, |10\rangle, |11\rangle.
\end{align}
Indeed we verify that
\begin{align}
\hat{Z}_1\hat{X}_0 |10\rangle &= - |11\rangle \\
\begin{pmatrix} 0 & 1 & 0 & 0 \\ 1 & 0 & 0 & 0 \\ 0 & 0 & 0 & -1 \\ 0 & 0 & -1 & 0 \end{pmatrix}\begin{pmatrix} 0 \\ 0 \\ 1 \\ 0 \end{pmatrix} &= -\begin{pmatrix} 0 \\ 0 \\ 0 \\ 1 \end{pmatrix}
\end{align}
The `np.kron()` method is a good way to acheive this. The previous 2 Pauli string can be turned into a $4\times 4$ matrix like this.
```python
z_matrix = np.array([[1,0],[0,-1]],dtype = int)
x_matrix = np.array([[0,1],[1,0]],dtype = int)
print(z_matrix)
print(x_matrix)
zx_matrix = np.kron(z_matrix,x_matrix)
print(zx_matrix)
```
[[ 1 0]
[ 0 -1]]
[[0 1]
[1 0]]
[[ 0 1 0 0]
[ 1 0 0 0]
[ 0 0 0 -1]
[ 0 0 -1 0]]
Implement the `to_matrix()` method for any Pauli string and try it to find the matrix form of `ZX`.
```python
%autoreload
pauli_string = PauliString.from_str('ZX')
print(pauli_string)
matrix = pauli_string.to_matrix()
print(matrix)
```
ZX
[[ 0.+0.j 1.+0.j 0.+0.j 0.+0.j]
[ 1.+0.j 0.+0.j 0.+0.j 0.+0.j]
[ 0.+0.j 0.+0.j -0.+0.j -1.+0.j]
[ 0.+0.j 0.+0.j -1.+0.j -0.+0.j]]
# LinearCombinaisonPauliString
The `LinearCombinaisonPauliString` class is partially implemented in the file `pauli_string.py`. We will use `LCPS` for short.
```python
from pauli_string import LinearCombinaisonPauliString
```
## Creation
To build a `LCPS` you only need to provide an `numpy.array` of coefficients (`complex`) and a `numpy.array` of PauliString. If they are not arrays, they will be converted. Here again the `__str__()` method is already implemented.
```python
coefs = np.array([0.5,0.5],dtype=complex)
pauli_string_1 = PauliString.from_str('IIXZ')
pauli_string_2 = PauliString.from_str('IYZZ')
pauli_strings = np.array([pauli_string_1,pauli_string_2], dtype=PauliString)
lcps = LinearCombinaisonPauliString(coefs,pauli_strings)
print(lcps)
```
2 pauli strings for 4 qubits (Real, Imaginary)
IIXZ (+0.50000,+0.00000)
IYZZ (+0.50000,+0.00000)
### Multiplication of a PauliString by a coefficient
Multiplying a `PauliString` by a number is a useful way to create a `LCPS` with only 1 `PauliStrin`. Implement the method `mul_coef` in the `PauliString` class so that you can easily create a `LCPS`.
```python
%autoreload
lcps_single = 1*PauliString.from_str('IIXZ')
print(lcps_single)
```
1 pauli strings for 4 qubits (Real, Imaginary)
IIXZ (+1.00000,+0.00000)
### Addition of LCPSs
The sum of two `LCPS`s is just the union of these two ensembles. Implement `add_pauli_string_linear_combinaison(LCPS)` and test your solution here. The `__add__()` method is already implemented to call `add_pauli_string_linear_combinaison(LCPS)` so you can use the `+` operator.
```python
%autoreload
lcps = 0.5*pauli_string_1 + 0.5*pauli_string_2
print(lcps)
```
2 pauli strings for 4 qubits (Real, Imaginary)
IIXZ (+0.50000,+0.00000)
IYZZ (+0.50000,+0.00000)
### Product of LCPSs
The product of two LCPSs can be computed using the distributive property of any sum. While you implement `mul_linear_combinaison_pauli_string` make sure you take into account the phase coming from the product of two PauliStrings. You can test your code on the following cell.
```python
%autoreload
lcps_1 = 1*PauliString.from_str('IIXZ')
lcps_2 = 1*PauliString.from_str('IYZZ')
new_lcps = lcps_1 * lcps_2
print(new_lcps)
```
1 pauli strings for 4 qubits (Real, Imaginary)
IYYI (+0.00000,-1.00000)
You should get:
<code>
1 pauli strings for 4 qubits (Real, Imaginary)<br>IYYI (+0.00000,-1.00000)
</code>
With addition and multiplication, LCPSs are much more convenient to work with than PauliStrings because they carry the possible phase from the product.
## Accessing subset of a LCPS
A `__getitem__()` method is already implemented to access subset of the `LCPS`. You can use indices and slices, like a `list` or an `np.array`.
```python
lcps = 1*PauliString.from_str('IIIZ') + 1*PauliString.from_str('IIZI') + 1*PauliString.from_str('IZII') + 1*PauliString.from_str('ZIII')
print(lcps[0])
print(lcps[1:3])
print(lcps[-1])
print(lcps[np.array([False, True, True, False])])
```
1 pauli strings for 4 qubits (Real, Imaginary)
IIIZ (+1.00000,+0.00000)
2 pauli strings for 4 qubits (Real, Imaginary)
IIZI (+1.00000,+0.00000)
IZII (+1.00000,+0.00000)
1 pauli strings for 4 qubits (Real, Imaginary)
ZIII (+1.00000,+0.00000)
2 pauli strings for 4 qubits (Real, Imaginary)
IIZI (+1.00000,+0.00000)
IZII (+1.00000,+0.00000)
## Useful methods
Your already implemented `to_zx_bits`, `to_xz_bits` and `ids` for the PauliString. Implement similar methods for LCPS where the bits are stored in a dim = 2 array.
```python
%autoreload
print('zx_bits')
zx_bits = lcps.to_zx_bits()
print(zx_bits)
```
zx_bits
[[ True False False False False False False False]
[False True False False False False False False]
[False False True False False False False False]
[False False False True False False False False]]
```python
%autoreload
print('xz_bits')
xz_bits = lcps.to_xz_bits()
print(xz_bits)
```
xz_bits
[[False False False False True False False False]
[False False False False False True False False]
[False False False False False False True False]
[False False False False False False False True]]
```python
%autoreload
print('ids')
ids = lcps.ids()
print(ids)
```
ids
[[False True True True]
[ True False True True]
[ True True False True]
[ True True True False]]
## Combinaison and threshold
When a `PauliString` is present many times in a LCPS, it is convenient to be able to remove extra occurences by combining the respective coefficients. Let's take the example from the presentation.
```python
lcps_1 = 1*PauliString.from_str('IIIZ') + -0.5*PauliString.from_str('IIZZ')
lcps_2 = 1*PauliString.from_str('ZZZI') + 0.5*PauliString.from_str('ZZII')
lcps_3 = lcps_1 * lcps_2
print(lcps_3)
```
4 pauli strings for 4 qubits (Real, Imaginary)
ZZZZ (+1.00000,+0.00000)
ZZIZ (+0.50000,+0.00000)
ZZIZ (-0.50000,+0.00000)
ZZZZ (-0.25000,+0.00000)
We see that `ZZZZ` occurs 2 times and `ZZIZ` occurs 2 times as well.
Implement the `combine()` method to reduce the `LCPS` to 2 `PauliString`s. There are many ways to do that. Suggestion, convert with `to_zx_bits` and use the `np.unique()` method. DO NOT remove `PauliString`s with `0` coef yet.
```python
%autoreload
lcps_combined = lcps_3.combine()
print(lcps_combined)
```
2 pauli strings for 4 qubits (Real, Imaginary)
ZZIZ (+0.00000,+0.00000)
ZZZZ (+0.75000,+0.00000)
You should get:
<code>
2 pauli strings for 4 qubits (Real, Imaginary)<br>ZZIZ (+0.00000,+0.00000)<br>ZZZZ (+0.75000,+0.00000)
</code>
Implement the `apply_threshold` method to get rid of any Pauli string with a coefficient smaller than the `threshold`.
```python
%autoreload
lcps = lcps_combined.apply_threshold()
print(lcps)
```
1 pauli strings for 4 qubits (Real, Imaginary)
ZZZZ (+0.75000,+0.00000)
You should get:
<code>
1 pauli strings for 4 qubits (Real, Imaginary)<br>ZZZZ (+0.75000,+0.00000)
</code>
## Sort
The resulting LCPS after `combine` and `apply_threshold` can be pretty random. It's useful to be able to sort. We suggest you implement the `sort` method using the `zx_bits`.
```python
%autoreload
lcps = (lcps_1 + lcps_2).sort()
print(lcps)
```
4 pauli strings for 4 qubits (Real, Imaginary)
IIIZ (+1.00000,+0.00000)
IIZZ (-0.50000,+0.00000)
ZZII (+0.50000,+0.00000)
ZZZI (+1.00000,+0.00000)
## Matrix representation (optional)
The LCPS can be represented as a matrix. This matrix is just the linear combinaison of the matrices representing each Pauli string. Implement the `to_matrix()` method.
```python
%autoreload
small_lcps = 1*PauliString.from_str('ZZ') + 2*PauliString.from_str('XX')
matrix = small_lcps.to_matrix()
print(matrix)
```
[[ 1.+0.j 0.+0.j 0.+0.j 2.+0.j]
[ 0.+0.j -1.+0.j 2.+0.j 0.+0.j]
[ 0.+0.j 2.+0.j -1.+0.j 0.+0.j]
[ 2.+0.j 0.+0.j 0.+0.j 1.+0.j]]
You should get :
<code>
[[ 1.+0.j 0.+0.j 0.+0.j 2.+0.j]<br> [ 0.+0.j -1.+0.j 2.+0.j 0.+0.j]<br> [ 0.+0.j 2.+0.j -1.+0.j 0.+0.j]<br> [ 2.+0.j 0.+0.j 0.+0.j 1.+0.j]]
</code>
# Mapping
You are now in good position to implement your first mapping. The abstract class `Mapping` and the class `JordanWigner` are partially implemented in the file `Mapping.py`.
```python
from mapping import JordanWigner
```
## Jordan-Wigner
The goal of the mapping is to translate creation and annihilation fermionic operator into `LinearCombinaisonPauliString`. You now need to implement `fermionic_operator_linear_combinaison_pauli_string()` of the `JordanWigner` class. It should return 2 lists of `LinearCombinaisonPauliString`s, one `list` for the 4 creation operators and one `list` for the 4 annihilation operators. You can make use of the addition and multipliation method you implemented earlier.
Refer to the presentation of activity 3.1 for the general structure of the Jordan-Wigner mapping. Make sure your method works for different numbers of qubits.
```python
%autoreload
mapping = JordanWigner()
aps, ams = mapping.fermionic_operator_linear_combinaison_pauli_string(4)
print(len(aps), 'creation operators')
print(len(ams), 'annihilation operators')
print('Creation operators')
for ap in aps:
print(ap)
print()
print('Annihilation operators')
for am in ams:
print(am)
```
4 creation operators
4 annihilation operators
Creation operators
2 pauli strings for 4 qubits (Real, Imaginary)
IIIX (+0.50000,+0.00000)
IIIY (-0.00000,-0.50000)
2 pauli strings for 4 qubits (Real, Imaginary)
IIXZ (+0.50000,+0.00000)
IIYZ (-0.00000,-0.50000)
2 pauli strings for 4 qubits (Real, Imaginary)
IXZZ (+0.50000,+0.00000)
IYZZ (-0.00000,-0.50000)
2 pauli strings for 4 qubits (Real, Imaginary)
XZZZ (+0.50000,+0.00000)
YZZZ (-0.00000,-0.50000)
Annihilation operators
2 pauli strings for 4 qubits (Real, Imaginary)
IIIX (+0.50000,+0.00000)
IIIY (+0.00000,+0.50000)
2 pauli strings for 4 qubits (Real, Imaginary)
IIXZ (+0.50000,+0.00000)
IIYZ (+0.00000,+0.50000)
2 pauli strings for 4 qubits (Real, Imaginary)
IXZZ (+0.50000,+0.00000)
IYZZ (+0.00000,+0.50000)
2 pauli strings for 4 qubits (Real, Imaginary)
XZZZ (+0.50000,+0.00000)
YZZZ (+0.00000,+0.50000)
For the creation operators you should get.
<code>
Creation operators<br>2 pauli strings for 4 qubits (Real, Imaginary)<br>IIIX (+0.50000,+0.00000)<br>IIIY (-0.00000,-0.50000)<br>2 pauli strings for 4 qubits (Real, Imaginary)
IIXZ (+0.50000,+0.00000)<br>IIYZ (-0.00000,-0.50000)<br>2 pauli strings for 4 qubits (Real, Imaginary)<br>IXZZ (+0.50000,+0.00000)<br>IYZZ (-0.00000,-0.50000)<br>2 pauli strings for 4 qubits (Real, Imaginary)<br>XZZZ (+0.50000,+0.00000)<br>YZZZ (-0.00000,-0.50000)
</code>
For the annihilation just reverse the sign of the imaginary part.
## Molecular Hamiltonian Check
To map the H2 `MolecularFermionicHamiltonian` we first need to get the integral tensors. You can use either one of the two solutions presented here depending on how succesful you were with the installation of the now infamous `pyscf` module.
- The first method will make use of `pyscf` to build the molecule and of the `from_pyscf_mol` method you have implemented from day 2.
- The second method will simply load the integral tensors from a file. We provide this file for a H2 molecule with a distance of 0.735 Angstroms. Even if you are using the first method, you can use this one to compare your tensor to the ones provided. In particuler, the two-body tensor can be a bit tricky to get right!
### Building the molecule with pyscf
**note** If you were not able to properly install and run `pyscf` on your computer, please skip this section.
Before going further, we need to make sure your `MolecularFermionicHamiltonian` implementation in `hamiltonian.py` is correct. By now you should be able to run the following cell to build a molecule with pyscf and convert it into a Fermionic Hamiltonian.
```python
%autoreload
from pyscf import gto
from hamiltonian import MolecularFermionicHamiltonian
```
```python
distance = 0.735 #units in AA
mol = gto.M(
atom = [['H', (0,0,-distance/2)], ['H', (0,0,distance/2)]],
basis = 'sto-3g'
)
molecular_hamiltonian_no_spin = MolecularFermionicHamiltonian.from_pyscf_mol(mol)
print(molecular_hamiltonian_no_spin)
```
Fermionic Hamiltonian
Number of orbitals : 2
Including spin : False
You should be able to access the integral tensors to check if their values are correct. The `get_integrals` methods for each Hamiltonian are already implemented.
```python
h1_no_spin, h2_no_spin = molecular_hamiltonian_no_spin.get_integrals()
```
```python
print(h1_no_spin)
```
[[-1.25633907 0. ]
[ 0. -0.47189601]]
Depending on the basis you've chosen, you should get
<code>
[[-1.25633907 0. ]<br> [ 0. -0.47189601]]
</code>
If you have a different order, we strongly suggest to modify your implementation of the `from_pyscf_mol()` to your order matches this one. Getting the order of the eigenvalues with `numpy.argsort` and reorganizing the eigenvalues and the eigenvectors usually does the trick.
The `h2` tensor is large to look at. We will check its validity in next section.
**note** If you want to save the integrals into a file you can use something like the following cell. Saving the atom configuration, the basis and the nuclear energy can be useful.
```python
with open('my_file.npz','wb') as f:
np.savez(f, atom=mol.atom, basis=mol.basis, energy_nuc=mol.energy_nuc(), h1=h1_no_spin, h2=h2_no_spin)
```
/Users/bhenders/opt/miniconda3/envs/qiskit/lib/python3.8/site-packages/numpy/core/_asarray.py:136: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
return array(a, dtype, copy=False, order=order, subok=True)
### Loading the integrals from a file
The file containing the integrals is named `Integrals_sto-3g_H2_d_0.7350_no_spin.npz`. These integrals are already in the molecular basis but the spin bas not been included yet. Make sure it's present in the same path is this notebook or change the access path in `open` to reach it. To load the integrals just run the following cell.
```python
with open('Integrals_sto-3g_H2_d_0.7350_no_spin.npz','rb') as f:
out = np.load(f, allow_pickle=True)
h1_load_no_spin = out['h1']
h2_load_no_spin = out['h2']
print(h1_load_no_spin)
```
[[-1.25633907 0. ]
[ 0. -0.47189601]]
By the way, this `npz` also contains files with the `basis`, the `atom` and `energy_nuc` names. While `atom` describes the position of the H atoms `energy_nuc` gives the Coulomb repulsion energy between the nuclei. You can only access it if you `allow_pickle` in the `load` method (check [here](https://numpy.org/devdocs/reference/generated/numpy.load.html) for more details on that).
**Note** If you built your tensor on your own you can check that yours are equivalent to the ones in the file. If you didn't, just skip this cell. If your integrals are different, maybe you should check you implementation of `from_pyscf_mol` before going further.
```python
np.allclose(h1_no_spin, h1_load_no_spin)
```
True
```python
np.allclose(h2_no_spin, h2_load_no_spin)
```
True
All we need to do now is put this into a `MolecularFermionicHamiltonian` with the `from_integrals` method.
```python
molecular_hamiltonian_no_spin = MolecularFermionicHamiltonian.from_integrals(h1_load_no_spin,h2_load_no_spin)
```
### Add the spin
Last step to make the `MolecularFermionicHamiltonian` complete is to include the spin to double the number of basis states.
```python
molecular_hamiltonian = molecular_hamiltonian_no_spin.include_spin()
```
This method is doing a [Kronecker product](https://en.wikipedia.org/wiki/Kronecker_product) between the integral tensors in the orbital space with the spin space. Since spin up and spin down are only just a copy of one an other, we simply use a identity matrix.
\begin{align}
\mathbb{I}_{mn}^{\text{spin}} \otimes h_{st}^{(1) \text{orbital}} &=
\begin{pmatrix} 1_\uparrow & 0 \\ 0 & 1_\downarrow \end{pmatrix}\otimes \begin{pmatrix} E_g & 0 \\ 0 & E_u \end{pmatrix} \\
&= \begin{pmatrix} 1_\uparrow \times h^{(1) \text{orbital}} & 0 \\ 0 & 1_\downarrow \times h^{(1) \text{orbital}} \end{pmatrix} \\
h_{ij}^{(1) \text{spin-orbital}} &= \begin{pmatrix} E_{g\uparrow} & 0 & 0 & 0 \\ 0 & E_{u\uparrow} & 0 & 0 \\ 0 & 0 & E_{g\downarrow} & 0 \\ 0 & 0 & 0 & E_{u\downarrow} \end{pmatrix}
\end{align}
The index $m$, $n$, $s$ and $t$ can all take the values `0` and `1`. The Kronecker product combines $m$ and $s$ together to form $i$ while $n$ and $t$ are combined together to form $j$. Both $i$ and $j$ can now take 4 different values therefore the $4\times4$ matrix. The $\uparrow$ and $\downarrow$ in subscript are only there to identify each one belongs to which spin. In the end the values $E_{g\uparrow} = E_{g\downarrow} = E_{g}$ and $E_{u\uparrow} = E_{u\downarrow} = E_{u}$.
This a little more complex for the 2 body term because the tensor is of dimension 4.
\begin{align}
\mathbb{I}_{mn}\mathbb{I}_{pq} \otimes h_{stuv}^{(2) \text{orbital}} &= h_{ijkl}^{(2) \text{spin-orbital}}
\end{align}
Obsiously we cannot easily write this! But here the indices are combined like so
\begin{align}
(m,s) \to i, (n,t) \to j, (p,u) \to k, (q,v) \to l.
\end{align}
And if that is not complicated enough this when the $h_{stuv}^{(2) \text{orbital}}$ is given in the physicist notation. Remember that you can go from one notation to the other by simply using
<code>
h2_physicist = np.einsum('ijkl->iklj',h2_chemist)
</code>
or
<code>
h2_chemist = np.einsum('iklj->ijkl',h2_physicist)
</code>
## Building the Qubit Hamiltonian
The translation from a fermionic Hamltonian to a qubit Hamiltonian depends on the kind of Hamiltonian. That's why we will implement a version of the `to_linear_combinaison_pauli_string()` method for `OneBodyFermionicHamiltonian` and for `TwoBodyFermionicHamiltonian`. All we need to provide are the creation/annhilation operators you generated in the Jordan-Wigner section.
### One body term
Let's start with the one_body part. The one body Hamiltonian is of the form
\begin{align}
\mathcal{H}_1 = \sum_{i,j} h_{ij} \hat{a}_i^\dagger \hat{a}_j
\end{align}
You should now be able to implement the `to_linear_combinaison_pauli_string(aps,ams)` method for the `OneBodyFermionicHamiltonian` class in the `Hamiltonian.py` file.
Each fermionic operator is made of 2 Pauli strings. So each term $i,j$ creates 4 Pauli strings. There is 16 terms in $h_{ij}$ so there is 64 Pauli strings (if we do not consider combinaison and applying threshold yet). Your implementation should now return a LCPS of length 64.
```python
%autoreload
print(lcps * np.array([1,2,2,1]))
```
4 pauli strings for 4 qubits (Real, Imaginary)
IIIZ (+1.00000,+0.00000)
IIZZ (-1.00000,+0.00000)
ZZII (+1.00000,+0.00000)
ZZZI (+1.00000,+0.00000)
```python
%autoreload
h1_lcps = molecular_hamiltonian.one_body.to_linear_combinaison_pauli_string(aps, ams)
print(len(h1_lcps))
```
64
We see many Pauli strings with 0 coefficient as well as many repeated strings. We can now exploit `combine()` and `apply_threshold()`. Since there is many 0 terms already we do `apply_threshold()` first, then `combine()` and `apply_threshold()` again if there was any cancellations. We can finish with `sort()` for neat presentation.
```python
h1_lcps = h1_lcps.combine().apply_threshold().sort()
print(h1_lcps)
```
5 pauli strings for 4 qubits (Real, Imaginary)
IIII (-1.72824,+0.00000)
IIIZ (+0.62817,+0.00000)
IIZI (+0.23595,+0.00000)
IZII (+0.62817,+0.00000)
ZIII (+0.23595,+0.00000)
You should get :
<code>
5 pauli strings for 4 qubits (Real, Imaginary)
IIII (-1.72824,+0.00000)
IIIZ (+0.62817,+0.00000)
IIZI (+0.23595,+0.00000)
IZII (+0.62817,+0.00000)
ZIII (+0.23595,+0.00000)
</code>
**Note** : You can add these steps at the end of your `one_body.to_linear_combinaison_pauli_string` to automatically reduce the number of Pauli strings.
### Two body term
The two body Hamiltonian is of the following form :
\begin{align}
\mathcal{H}_2 = \frac{1}{2}\sum_{i,j} h_{ijkl} \hat{a}_i^\dagger\hat{a}_j^\dagger \hat{a}_k\hat{a}_l
\end{align}
You can now implement the `to_linear_combinaison_pauli_string()` method for the `TwoBodyFermionicHamiltonian` class in the `Hamiltonian.py` file.
Counting all Pauli strings you should produce 4096 Pauli strings.
```python
%autoreload
h2_lcps = molecular_hamiltonian.two_body.to_linear_combinaison_pauli_string(aps, ams)
print(len(h2_lcps))
```
4096
Applying the `combine` and `apply_threshold` this reduce to 15 Pauli strings!
```python
h2_lcps = h2_lcps.combine().apply_threshold().sort()
print(h2_lcps)
```
15 pauli strings for 4 qubits (Real, Imaginary)
IIII (+0.91769,+0.00000)
IIIZ (-0.45599,+0.00000)
IIZI (-0.46170,+0.00000)
IIZZ (+0.12091,+0.00000)
IZII (-0.45599,+0.00000)
IZIZ (+0.16893,+0.00000)
IZZI (+0.16615,+0.00000)
ZIII (-0.46170,+0.00000)
ZIIZ (+0.16615,+0.00000)
ZIZI (+0.17464,+0.00000)
ZZII (+0.12091,+0.00000)
XXXX (+0.04523,+0.00000)
XXYY (+0.04523,+0.00000)
YYXX (+0.04523,+0.00000)
YYYY (+0.04523,+0.00000)
You should get :
<code>
15 pauli strings for 4 qubits (Real, Imaginary)<br>IIII (+0.91769,+0.00000)<br>IIIZ (-0.45599,+0.00000)<br>IIZI (-0.46170,+0.00000)<br>IIZZ (+0.12091,+0.00000)<br>IZII (-0.45599,+0.00000)<br>IZIZ (+0.16893,+0.00000)<br>IZZI (+0.16615,+0.00000)<br>ZIII (-0.46170,+0.00000)<br>ZIIZ (+0.16615,+0.00000)<br>ZIZI (+0.17464,+0.00000)<br>ZZII (+0.12091,+0.00000)<br>XXXX (+0.04523,+0.00000)<br>XXYY (+0.04523,+0.00000)<br>YYXX (+0.04523,+0.00000)<br>YYYY (+0.04523,+0.00000)
</code>
### Molecular Hamiltonian
The molecular Hamiltonian is just the sum of the one and two body terms. Implement this function in `MolecularFermionicHamiltonian`. You should now be able to run this code.
```python
%autoreload
htot_lcps = molecular_hamiltonian.to_linear_combinaison_pauli_string(aps, ams)
htot_lcps = htot_lcps.combine().apply_threshold().sort()
print(htot_lcps)
```
15 pauli strings for 4 qubits (Real, Imaginary)
IIII (-0.81055,+0.00000)
IIIZ (+0.17218,+0.00000)
IIZI (-0.22575,+0.00000)
IIZZ (+0.12091,+0.00000)
IZII (+0.17218,+0.00000)
IZIZ (+0.16893,+0.00000)
IZZI (+0.16615,+0.00000)
ZIII (-0.22575,+0.00000)
ZIIZ (+0.16615,+0.00000)
ZIZI (+0.17464,+0.00000)
ZZII (+0.12091,+0.00000)
XXXX (+0.04523,+0.00000)
XXYY (+0.04523,+0.00000)
YYXX (+0.04523,+0.00000)
YYYY (+0.04523,+0.00000)
You should get :
<code>
15 pauli strings for 4 qubits (Real, Imaginary)<br>IIII (-0.81055,+0.00000)<br>IIIZ (+0.17218,+0.00000)<br>IIZI (-0.22575,+0.00000)<br>IIZZ (+0.12091,+0.00000)<br>IZII (+0.17218,+0.00000)<br>IZIZ (+0.16893,+0.00000)<br>IZZI (+0.16615,+0.00000)<br>ZIII (-0.22575,+0.00000)<br>ZIIZ (+0.16615,+0.00000)<br>ZIZI (+0.17464,+0.00000)<br>ZZII (+0.12091,+0.00000)<br>XXXX (+0.04523,+0.00000)<br>XXYY (+0.04523,+0.00000)<br>YYXX (+0.04523,+0.00000)<br>YYYY (+0.04523,+0.00000)
</code>
## You have completed your first mapping of H2!
What now? The next step is to use this mapping to evaluate the Hamiltonian on a quantum computer. This is the topic of activity 3.2.
## Bonus
In the process we built the `to_matrix()` method. Applying it to the `htot_lcps` will give you the matrix representation of the whole molecular Hamiltonian. You can find the exact solution to this Hamiltonian by diagonalizing this matrix. The lowest eigenvalue and its associated eigenvector are the ground state energy/state vector.
Now you could use a `for` loop over an array of distances (say from 0.3 A to 2.5 A) to successively build H2 molecules, map each one to a `LCPS` and finally to a matrix to find its ground state. You could plot the dissociation curve of the H2 molecule. Just don't forget to add the nucleus-nucleus energy that you can get from `mol.energy_nuc()`.
Notebook by **Maxime Dion** <[email protected]><br>
For the QSciTech-QuantumBC virtual workshop on gate-based quantum computing
```python
%autoreload
n = 50
distances = np.linspace(0.3, 2.5, n)
gs_energies = np.zeros(n)
# Jordan-Wigner Mapping of 4 states to 4 qubits
aps, ams = mapping.fermionic_operator_linear_combinaison_pauli_string(4)
for i, distance in enumerate(distances): #units in AA
print('Trying Distance '+str(i+1), end="\r")
mol = gto.M(
atom = [['H', (0,0,-distance/2)], ['H', (0,0,distance/2)]],
basis = 'sto-3g'
)
# build the molecular Hamiltonian
molecular_hamiltonian_no_spin = MolecularFermionicHamiltonian.from_pyscf_mol(mol)
molecular_hamiltonian = molecular_hamiltonian_no_spin.include_spin()
# map the Hamiltonian to a LCPS
htot_lcps = molecular_hamiltonian.to_linear_combinaison_pauli_string(aps, ams)
htot_lcps = htot_lcps.combine().apply_threshold().sort()
# diagonalize the Hamiltonian to get energies
Eh2, _ = np.linalg.eigh(htot_lcps.to_matrix())
gs_energies[i] = Eh2[0] + mol.energy_nuc()
print("Done! ", end="\r")
```
Done!
```python
# plot dissociation curve of H2
fig, ax = plt.subplots(1, 1, figsize=(8,8))
ax.plot(distances, gs_energies, 'r')
ax.set_xlabel(r'Internuclear Distance / $\AA$', fontsize=20)
ax.set_ylabel('Energy / $E_h$', fontsize=20)
ax.set_title('Dissociation Curve of H2', fontsize=28)
# fig.savefig('H2_classical.png')
plt.show()
```
### Try the Same with LiH
```python
%autoreload
n = 30
distances = np.linspace(0.5, 4, n)
gs_energies = np.zeros(n)
# Just ignore the p orbitals for now to see if it will run
small_basis = gto.basis.parse('''
Li S
0.3683820000E+02 0.6966866381E-01
0.5481720000E+01 0.3813463493E+00
0.1113270000E+01 0.6817026244E+00
Li S
0.5402050000E+00 -0.2631264058E+00
0.1022550000E+00 0.1143387418E+01
''')
# Jordan-Wigner Mapping of 6 states to 6 qubits
aps, ams = mapping.fermionic_operator_linear_combinaison_pauli_string(6)
for i, distance in enumerate(distances): #units in AA
print('Trying Distance '+str(i+1), end="\r")
mol = gto.M(
atom = [['H', (0,0,-distance/2)], ['Li', (0,0,distance/2)]],
basis = {'H': 'sto-3g', 'Li': small_basis}
)
# build the molecular Hamiltonian
molecular_hamiltonian_no_spin = MolecularFermionicHamiltonian.from_pyscf_mol(mol)
molecular_hamiltonian = molecular_hamiltonian_no_spin.include_spin()
# map the Hamiltonian to a LCPS
htot_lcps = molecular_hamiltonian.to_linear_combinaison_pauli_string(aps, ams)
htot_lcps = htot_lcps.combine().apply_threshold().sort()
# diagonalize the Hamiltonian to get energies
Eh2, _ = np.linalg.eigh(htot_lcps.to_matrix())
gs_energies[i] = Eh2[0] + mol.energy_nuc()
print("Done! ")
```
Done!
```python
# plot dissociation curve of LiH
fig, ax = plt.subplots(1, 1, figsize=(8,8))
ax.plot(distances, gs_energies, 'r')
ax.set_xlabel(r'Internuclear Distance / $\AA$', fontsize=20)
ax.set_ylabel('Energy / $E_h$', fontsize=20)
ax.set_title('Dissociation Curve of LiH', fontsize=28)
# fig.savefig('LiH_classical.png')
plt.show()
```
```python
```
|
cat("hello world/n") |
[STATEMENT]
lemma edges_of_walkE: "(v,w) \<in> edges_of_walk xs \<Longrightarrow> \<exists>xs_pre xs_post. xs = xs_pre @ v # w # xs_post"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (v, w) \<in> edges_of_walk xs \<Longrightarrow> \<exists>xs_pre xs_post. xs = xs_pre @ v # w # xs_post
[PROOF STEP]
unfolding edges_of_walk_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (v, w) \<in> {uu_. \<exists>v w xs_pre xs_post. uu_ = (v, w) \<and> xs = xs_pre @ v # w # xs_post} \<Longrightarrow> \<exists>xs_pre xs_post. xs = xs_pre @ v # w # xs_post
[PROOF STEP]
by blast |
module Compiler.ES.Node
import Compiler.ES.ES
import Compiler.Common
import Compiler.CompileExpr
import Core.Context
import Core.TT
import Utils.Path
import System
import System.File
import Data.Maybe
findNode : IO String
findNode =
do env <- getEnv "NODE"
pure $ fromMaybe "/usr/bin/env node" env
||| Compile a TT expression to Node
compileToNode : Ref Ctxt Defs ->
ClosedTerm -> Core String
compileToNode c tm = do
compileToES c tm ["node", "javascript"]
||| Node implementation of the `compileExpr` interface.
compileExpr : Ref Ctxt Defs -> (tmpDir : String) -> (outputDir : String) ->
ClosedTerm -> (outfile : String) -> Core (Maybe String)
compileExpr c tmpDir outputDir tm outfile
= do es <- compileToNode c tm
let out = outputDir </> outfile
Right () <- coreLift (writeFile out es)
| Left err => throw (FileErr out err)
pure (Just out)
||| Node implementation of the `executeExpr` interface.
executeExpr : Ref Ctxt Defs -> (tmpDir : String) -> ClosedTerm -> Core ()
executeExpr c tmpDir tm
= do let outn = tmpDir </> "_tmp_node" ++ ".js"
js <- compileToNode c tm
Right () <- coreLift $ writeFile outn js
| Left err => throw (FileErr outn err)
node <- coreLift findNode
coreLift $ system (node ++ " " ++ outn)
pure ()
compileLibrary : Ref Ctxt Defs -> (tmpDir : String) -> (outputDir : String) -> (libName : String) -> (changedModules : Maybe (List ModuleIdent)) -> Core (Maybe (String, List String))
compileLibrary c tmpDir outputDir libName changedModules = do
coreLift $ putStrLn "Compiling to library is not supported."
pure Nothing
||| Codegen wrapper for Node implementation.
export
codegenNode : Codegen
codegenNode = MkCG compileExpr executeExpr compileLibrary
|
# coding: utf-8
import itertools
import numpy as np
from read_files import *
from superbubbles import find_superbubble
import multiprocessing
import sys
import functools
def print_stats(df_ref, df_ans, G, title=None, dataset=""):
print()
if title:
print(title)
precision_sum = 0
recall_sum = 0
gf_sum = 0
trash_len_sum = 0
strains = df_ans.columns
k = 126
if dataset == "infant_gut":
strains = ["s1", "s3"]
k = 76
for cur_s in strains:
real_true = df_ref[cur_s] == 1
predicted_true = df_ans[cur_s] == 1
TP_df = df_ref[real_true & predicted_true]
TP = len(TP_df)
#print_nodes(TP_df.index)
FP_df = df_ref[~real_true & predicted_true]
FP = len(FP_df)
TN_df = df_ref[~real_true & ~predicted_true]
TN = len(TN_df)
FN_df = df_ref[real_true & ~predicted_true]
FN = len(FN_df)
precision_sum += TP / (TP+FP+1e-10)
recall_sum += TP / (TP+FN+1e-10)
ref_len = df_ref.loc[real_true, "length"].sum() - k * (real_true.sum() - 1)
TP_len = TP_df["length"].sum() - k * (TP - 1)
genome_fraction = TP_len / ref_len
gf_sum += genome_fraction
FP_len = FP_df["length"].sum() - k * (FP - 1)
trash_len_sum += FP_len
if dataset == "infant_gut":
print("________", cur_s)
print("TP={} TN={} FP={} FN={}".format(TP, TN, FP, FN))
print("precision: {0:.2f}".format(TP / (TP+FP+1e-10)))
print(" recall: {0:.2f}".format(TP / (TP+FN+1e-10)))
print(" GF: {0:.2f}".format(genome_fraction))
print("FP len: {}".format(FP_len))
#print("ref len: {}".format(ref_len))
#print("TP len: {}".format(TP_len))
#print("FP len: {}".format(FP_len))
#G_sub = G.subgraph(to_double_format(df_ans[predicted_true].index))
#print("components in DESMAN subgraph:", nx.number_weakly_connected_components(G_sub))
if dataset == "infant_gut":
return
precision_mean = precision_sum / len(strains)
recall_mean = recall_sum / len(strains)
gf_mean = gf_sum / len(strains)
trash_len_mean = trash_len_sum / len(strains)
print("precision: {0:.2f}".format(precision_mean))
print(" recall: {0:.2f}".format(recall_mean))
print(" GF: {0:.2f}".format(gf_mean))
print("FP len: {0:.0f}".format(trash_len_mean))
def correct_cutpoints(G, df_ans):
G_rev = G.reverse()
df_corrected_ans = df_ans.copy()
bubbles_s_t = []
for cur_s in df_ans.columns:
selected_nodes = to_double_format(df_ans[df_ans[cur_s] == 1].index)
selected_nodes = [s for s in selected_nodes if s[-1] == '+']
visited = dict.fromkeys(selected_nodes, False)
ans = []
for node in selected_nodes:
if visited[node] or visited.get(rev(node), False):
continue
visited[node] = True
for graph, rev_flag in zip([G, G_rev], [False, True]):
if not rev_flag:
rev_graph = G_rev
else:
rev_graph = G
cur_node = node
has_bubble, t_bubb_end = find_superbubble(graph, rev_graph, node)
unique_continue, t_unique_cont = graph.out_degree(cur_node) == 1, list(graph.neighbors(cur_node))
while has_bubble or unique_continue:
if has_bubble:
bubbles_s_t.append((cur_node, t_bubb_end, rev_flag))
t = t_bubb_end
else: # unique_continue
t = t_unique_cont[0]
if visited.get(t, False):
break
visited[t] = True
ans.append(t)
cur_node = t
has_bubble, t_bubb_end = find_superbubble(graph, rev_graph, cur_node)
unique_continue, t_unique_cont = graph.out_degree(cur_node) == 1, list(graph.neighbors(cur_node))
ans = to_single_format(ans)
df_corrected_ans.loc[ans, cur_s] = 1
return df_corrected_ans, bubbles_s_t
def correct_bubbles(G, df_ans, bubbles_s_t, desman_profile):
n_samples = desman_profile.shape[1]
strains = df_ans.columns
assignments = {}
#print('\n', "number of bubbles:", len(bubbles_s_t), '\n')
iter_step = 0
for s, t, rev_flag in bubbles_s_t:
iter_step += 1
if iter_step % 200 == 0:
print(iter_step)
#if iter_step == 1000:
# break
if rev_flag:
s, t = t, s
paths = list(nx.all_simple_paths(G, source=s, target=t))
paths = [p[1:-1] for p in paths]
inner_nodes = set(x for lst in list(paths) for x in lst)
cur_strains_bool = df_ans.loc[to_my_int(s)]
cur_strains = [s for s in strains if cur_strains_bool[s]]
min_error = float("Inf")
min_variant = None
cur_profile = desman_profile.loc[cur_strains]
cur_profile /= cur_profile.sum()
paths_s = [paths for i in range(len(cur_strains))]
if len(paths) ** len(cur_strains) > 3000:
print('!', len(paths), len(paths) ** len(cur_strains))
continue
mean_cov = G.node[s]["cov"] * G.node[s]["length"] + G.node[t]["cov"] * G.node[t]["length"] + 1e-10
mean_cov /= G.node[s]["length"] + G.node[t]["length"] #- 2 * 126
for variant in itertools.product(*paths_s):
nodes_dict = {}
for cur_node in inner_nodes:
nodes_dict[cur_node] = {}
nodes_dict[cur_node]["strains"] = []
nodes_dict[cur_node]["real_cov"] = G.node[cur_node]["cov"]
nodes_dict[cur_node]["estimated_prof"] = np.zeros(n_samples)
nodes_dict[cur_node]["real_prof"] = np.zeros(n_samples)
for i in range(len(cur_strains)):
s_i = cur_strains[i]
for cur_node in variant[i]:
nodes_dict[cur_node]["strains"].append(s_i)
nodes_dict[cur_node]["estimated_prof"] += cur_profile.loc[s_i]
cur_error = np.zeros(n_samples)
for node in nodes_dict:
nodes_dict[node]["real_prof"] = nodes_dict[node]["real_cov"] / mean_cov
node_error = np.linalg.norm(nodes_dict[node]["estimated_prof"] - nodes_dict[node]["real_prof"]) ** 2
cur_error += node_error * np.log(G.node[node]["length"])
if cur_error.sum() >= min_error:
break
cur_error = cur_error.sum()
if cur_error < min_error:
min_error = cur_error
for cur_node in inner_nodes:
nodes_dict[cur_node] = nodes_dict[cur_node]["strains"]
min_variant = nodes_dict
assignments.update(min_variant)
return assignments
def merge_bubbles_answers(df_ans, assignments):
df_corrected_ans = df_ans.copy()
for node, cur_strains in assignments.items():
df_corrected_ans.loc[to_my_int(node), cur_strains] = 1
return df_corrected_ans
def main():
#dataset = "g3_r1"
dataset = sys.argv[1]
print(dataset)
G = read_graph(dataset)
df_ref, df_desman, desman_profile = read_answers(G, dataset)
print_stats(df_ref, df_desman, G, "*****Initial", dataset)
df_corrected_cutpoints, bubbles_s_t = correct_cutpoints(G, df_desman)
print_stats(df_ref, df_corrected_cutpoints, G, "*****Continues", dataset)
chunks = [bubbles_s_t[x:x + 100] for x in range(0, len(bubbles_s_t), 100)]
bubbles_assignments = {}
pool = multiprocessing.Pool(int(sys.argv[2]))
n = len(chunks)
args = zip([G] * n, [df_corrected_cutpoints] * n, chunks, [desman_profile] * n)
assignments = pool.starmap(correct_bubbles, args)
#assignments = pool.map(functools.partial(correct_bubbles, G=G, df_corrected_cutpoints=df_corrected_cutpoints, desman_profile=desman_profile), chunks)
for ans in assignments:
bubbles_assignments.update(ans)
df_corrected_bubbles = merge_bubbles_answers(df_corrected_cutpoints, bubbles_assignments)
print_stats(df_ref, df_corrected_bubbles, G, "*****Bubbles", dataset)
if __name__ == '__main__':
main()
|
proposition component_diff_connected: fixes S :: "'a::metric_space set" assumes "connected S" "connected U" "S \<subseteq> U" and C: "C \<in> components (U - S)" shows "connected(U - C)" |
[STATEMENT]
lemma PO_m1_inv0_fin [iff]: "reach m1 \<subseteq> m1_inv0_fin"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. reach m1 \<subseteq> m1_inv0_fin
[PROOF STEP]
by (rule inv_rule_incr, auto del: subsetI) |
module Compiler.VMCode
import Compiler.ANF
import Core.CompileExpr
import Core.Context
import Core.Core
import Core.TT
import Libraries.Data.IntMap
import Data.List
import Data.Maybe
import Data.Vect
%default covering
public export
data Reg : Type where
RVal : Reg
Loc : Int -> Reg
Discard : Reg
-- VM instructions - first Reg is where the result goes, unless stated
-- otherwise.
-- As long as you have a representation of closures, and an 'apply' function
-- which adds an argument and evaluates if it's fully applied, then you can
-- translate this directly to a target language program.
public export
data VMInst : Type where
DECLARE : Reg -> VMInst
START : VMInst -- start of the main body of the function
ASSIGN : Reg -> Reg -> VMInst
MKCON : Reg -> (tag : Either Int Name) -> (args : List Reg) -> VMInst
MKCLOSURE : Reg -> Name -> (missing : Nat) -> (args : List Reg) -> VMInst
MKCONSTANT : Reg -> Constant -> VMInst
APPLY : Reg -> (f : Reg) -> (a : Reg) -> VMInst
CALL : Reg -> (tailpos : Bool) -> Name -> (args : List Reg) -> VMInst
OP : {0 arity : Nat} -> Reg -> PrimFn arity -> Vect arity Reg -> VMInst
-- ^ we explicitly bind arity here to silence the warnings it is shadowing
-- an existing global definition
EXTPRIM : Reg -> Name -> List Reg -> VMInst
CASE : Reg -> -- scrutinee
(alts : List (Either Int Name, List VMInst)) -> -- based on constructor tag
(def : Maybe (List VMInst)) ->
VMInst
CONSTCASE : Reg -> -- scrutinee
(alts : List (Constant, List VMInst)) ->
(def : Maybe (List VMInst)) ->
VMInst
PROJECT : Reg -> (value : Reg) -> (pos : Int) -> VMInst
NULL : Reg -> VMInst
ERROR : String -> VMInst
public export
data VMDef : Type where
MkVMFun : (args : List Int) -> List VMInst -> VMDef
MkVMForeign : (ccs : List String) -> (fargs : List CFType) ->
CFType -> VMDef
MkVMError : List VMInst -> VMDef
export
Show Reg where
show RVal = "RVAL"
show (Loc i) = "v" ++ show i
show Discard = "DISCARD"
export
Show VMInst where
show (DECLARE r) = "DECLARE " ++ show r
show START = "START"
show (ASSIGN r v) = show r ++ " := " ++ show v
show (MKCON r t args)
= show r ++ " := MKCON " ++ show t ++ " (" ++
showSep ", " (map show args) ++ ")"
show (MKCLOSURE r n m args)
= show r ++ " := MKCLOSURE " ++ show n ++ " " ++ show m ++ " (" ++
showSep ", " (map show args) ++ ")"
show (MKCONSTANT r c) = show r ++ " := MKCONSTANT " ++ show c
show (APPLY r f a) = show r ++ " := " ++ show f ++ " @ " ++ show a
show (CALL r t n args)
= show r ++ " := " ++ (if t then "TAILCALL " else "CALL ") ++
show n ++ "(" ++ showSep ", " (map show args) ++ ")"
show (OP r op args)
= show r ++ " := " ++ "OP " ++
show op ++ "(" ++ showSep ", " (map show (toList args)) ++ ")"
show (EXTPRIM r n args)
= show r ++ " := " ++ "EXTPRIM " ++
show n ++ "(" ++ showSep ", " (map show args) ++ ")"
show (CASE scr alts def)
= "CASE " ++ show scr ++ " " ++ show alts ++ " {default: " ++ show def ++ "}"
show (CONSTCASE scr alts def)
= "CASE " ++ show scr ++ " " ++ show alts ++ " {default: " ++ show def ++ "}"
show (PROJECT r val pos)
= show r ++ " := PROJECT(" ++ show val ++ ", " ++ show pos ++ ")"
show (NULL r) = show r ++ " := NULL"
show (ERROR str) = "ERROR " ++ show str
export
Show VMDef where
show (MkVMFun args body) = show args ++ ": " ++ show body
show (MkVMForeign ccs args ret)
= "Foreign call " ++ show ccs ++ " " ++
show args ++ " " ++ show ret
show (MkVMError err) = "Error: " ++ show err
toReg : AVar -> Reg
toReg (ALocal i) = Loc i
toReg ANull = Discard
projectArgs : Int -> Int -> (used : IntMap ()) -> (args : List Int) -> List VMInst
projectArgs scr i used [] = []
projectArgs scr i used (arg :: args)
= case lookup arg used of
Just _ => PROJECT (Loc arg) (Loc scr) i :: projectArgs scr (i + 1) used args
Nothing => projectArgs scr (i + 1) used args
collectReg : Reg -> IntMap ()
collectReg (Loc i) = singleton i ()
collectReg _ = empty
collectUsed : VMInst -> IntMap ()
collectUsed (DECLARE reg) = collectReg reg
collectUsed START = empty
collectUsed (ASSIGN _ val) = collectReg val
collectUsed (MKCON _ _ args) = foldMap collectReg args
collectUsed (MKCLOSURE _ _ _ args) = foldMap collectReg args
collectUsed (MKCONSTANT _ _) = empty
collectUsed (APPLY _ fn arg) = collectReg fn <+> collectReg arg
collectUsed (CALL _ _ _ args) = foldMap collectReg args
collectUsed (OP _ _ args) = foldMap collectReg args
collectUsed (EXTPRIM _ _ args) = foldMap collectReg args
collectUsed (CASE sc is mdef)
= collectReg sc
<+> foldMap (foldMap collectUsed . snd) is
<+> maybe empty (foldMap collectUsed) mdef
collectUsed (CONSTCASE sc is mdef)
= collectReg sc
<+> foldMap (foldMap collectUsed . snd) is
<+> maybe empty (foldMap collectUsed) mdef
collectUsed (PROJECT _ val _) = collectReg val
collectUsed (NULL _) = empty
collectUsed (ERROR _) = empty
toVM : (tailpos : Bool) -> (target : Reg) -> ANF -> List VMInst
toVM t Discard _ = []
toVM t res (AV fc (ALocal i))
= [ASSIGN res (Loc i)]
toVM t res (AAppName fc _ n args)
= [CALL res t n (map toReg args)]
toVM t res (AUnderApp fc n m args)
= [MKCLOSURE res n m (map toReg args)]
toVM t res (AApp fc _ f a)
= [APPLY res (toReg f) (toReg a)]
toVM t res (ALet fc var val body)
= toVM False (Loc var) val ++ toVM t res body
toVM t res (ACon fc n ci (Just tag) args)
= [MKCON res (Left tag) (map toReg args)]
toVM t res (ACon fc n ci Nothing args)
= [MKCON res (Right n) (map toReg args)]
toVM t res (AOp fc _ op args)
= [OP res op (map toReg args)]
toVM t res (AExtPrim fc _ p args)
= [EXTPRIM res p (map toReg args)]
toVM t res (AConCase fc (ALocal scr) [MkAConAlt n ci mt args code] Nothing) -- exactly one alternative, so skip matching
= let body = toVM t res code
used = foldMap collectUsed body
in projectArgs scr 0 used args ++ body
toVM t res (AConCase fc (ALocal scr) alts def)
= [CASE (Loc scr) (map toVMConAlt alts) (map (toVM t res) def)]
where
toVMConAlt : AConAlt -> (Either Int Name, List VMInst)
toVMConAlt (MkAConAlt n ci tag args code)
= let body = toVM t res code
used = foldMap collectUsed body
in (maybe (Right n) Left tag, projectArgs scr 0 used args ++ body)
toVM t res (AConstCase fc (ALocal scr) alts def)
= [CONSTCASE (Loc scr) (map toVMConstAlt alts) (map (toVM t res) def)]
where
toVMConstAlt : AConstAlt -> (Constant, List VMInst)
toVMConstAlt (MkAConstAlt c code)
= (c, toVM t res code)
toVM t res (APrimVal fc c)
= [MKCONSTANT res c]
toVM t res (AErased fc)
= [NULL res]
toVM t res (ACrash fc err)
= [ERROR err]
toVM t res _
= [NULL res]
findVars : VMInst -> List Int
findVars (ASSIGN (Loc r) _) = [r]
findVars (MKCON (Loc r) _ _) = [r]
findVars (MKCLOSURE (Loc r) _ _ _) = [r]
findVars (MKCONSTANT (Loc r) _) = [r]
findVars (APPLY (Loc r) _ _) = [r]
findVars (CALL (Loc r) _ _ _) = [r]
findVars (OP (Loc r) _ _) = [r]
findVars (EXTPRIM (Loc r) _ _) = [r]
findVars (CASE _ alts d)
= foldMap findVarAlt alts ++ fromMaybe [] (map (foldMap findVars) d)
where
findVarAlt : (Either Int Name, List VMInst) -> List Int
findVarAlt (t, code) = foldMap findVars code
findVars (CONSTCASE _ alts d)
= foldMap findConstVarAlt alts ++ fromMaybe [] (map (foldMap findVars) d)
where
findConstVarAlt : (Constant, List VMInst) -> List Int
findConstVarAlt (t, code) = foldMap findVars code
findVars (PROJECT (Loc r) _ _) = [r]
findVars _ = []
declareVars : List Int -> List VMInst -> List VMInst
declareVars got code
= let vs = foldMap findVars code in
declareAll got vs
where
declareAll : List Int -> List Int -> List VMInst
declareAll got [] = START :: code
declareAll got (i :: is)
= if i `elem` got
then declareAll got is
else DECLARE (Loc i) :: declareAll (i :: got) is
export
toVMDef : ANFDef -> Maybe VMDef
toVMDef (MkAFun args body)
= Just $ MkVMFun args (declareVars args (toVM True RVal body))
toVMDef (MkAForeign ccs cargs ret)
= Just $ MkVMForeign ccs cargs ret
toVMDef (MkAError body)
= Just $ MkVMError (declareVars [] (toVM True RVal body))
toVMDef _ = Nothing
export
allDefs : List (Name, ANFDef) -> List (Name, VMDef)
allDefs = mapMaybe (\ (n, d) => do d' <- toVMDef d; pure (n, d'))
|
lemma linear_continuous_compose: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::euclidean_space" and g :: "'b \<Rightarrow> 'c::real_normed_vector" assumes "continuous F f" "linear g" shows "continuous F (\<lambda>x. g(f x))" |
lemma connected_Ico[simp]: "connected {a..<b}" for a b :: "'a::linear_continuum_topology" |
import threading
import numpy as np
from tensorflow.keras.utils import Sequence
__authors__ = "Javier Naranjo, Sergi Perez and Irene Martín"
__copyright__ = "Machine Listeners Valencia"
__credits__ = ["Machine Listeners Valencia"]
__license__ = "MIT License"
__version__ = "2.0.0"
__maintainer__ = "Javier Naranjo"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "2020"
# TODO: both data generator
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
# python 3
def __next__(self):
with self.lock:
return self.it.__next__()
class MixupGenerator(Sequence):
def __init__(self, x_train, y_train, batch_size=32, alpha=0.2, shuffle=True):
self.X_train = x_train
self.y_train = y_train
self.batch_size = batch_size
self.alpha = alpha
self.shuffle = shuffle
self.sample_num = len(x_train)
self.lock = threading.Lock()
def __iter__(self):
return self
# @threadsafe_generator
def __call__(self):
with self.lock:
while True:
indexes = self.__get_exploration_order()
itr_num = int(len(indexes) // (self.batch_size * 2))
for i in range(itr_num):
batch_ids = indexes[i * self.batch_size * 2:(i + 1) * self.batch_size * 2]
X, y = self.__data_generation(batch_ids)
yield X, y
def __get_exploration_order(self):
indexes = np.arange(self.sample_num)
if self.shuffle:
np.random.shuffle(indexes)
return indexes
def __data_generation(self, batch_ids):
_, h, w, c = self.X_train.shape
l = np.random.beta(self.alpha, self.alpha, self.batch_size)
X_l = l.reshape(self.batch_size, 1, 1, 1)
y_l = l.reshape(self.batch_size, 1)
X1 = self.X_train[batch_ids[:self.batch_size]]
X2 = self.X_train[batch_ids[self.batch_size:]]
X = X1 * X_l + X2 * (1.0 - X_l)
if isinstance(self.y_train, list):
y = []
for y_train_ in self.y_train:
y1 = y_train_[batch_ids[:self.batch_size]]
y2 = y_train_[batch_ids[self.batch_size:]]
y.append(y1 * y_l + y2 * (1.0 - y_l))
else:
y1 = self.y_train[batch_ids[:self.batch_size]]
y2 = self.y_train[batch_ids[self.batch_size:]]
y = y1 * y_l + y2 * (1.0 - y_l)
return X, y
class MixupGeneratorKeras():
def __init__(self, X_train, y_train, batch_size=32, alpha=0.2, shuffle=True): # datagen=None):
self.X_train = X_train
self.y_train = y_train
self.batch_size = batch_size
self.alpha = alpha
self.shuffle = shuffle
self.sample_num = len(X_train)
self.lock = threading.Lock()
def __iter__(self):
return self
@threadsafe_generator
def __call__(self):
with self.lock:
while True:
indexes = self.__get_exploration_order()
itr_num = int(len(indexes) // (self.batch_size * 2))
for i in range(itr_num):
batch_ids = indexes[i * self.batch_size * 2:(i + 1) * self.batch_size * 2]
X, y = self.__data_generation(batch_ids)
yield X, y
def __get_exploration_order(self):
indexes = np.arange(self.sample_num)
if self.shuffle:
np.random.shuffle(indexes)
return indexes
def __data_generation(self, batch_ids):
_, h, w, c = self.X_train.shape
l = np.random.beta(self.alpha, self.alpha, self.batch_size)
X_l = l.reshape(self.batch_size, 1, 1, 1)
y_l = l.reshape(self.batch_size, 1)
X1 = self.X_train[batch_ids[:self.batch_size]]
X2 = self.X_train[batch_ids[self.batch_size:]]
X = X1 * X_l + X2 * (1.0 - X_l)
if isinstance(self.y_train, list):
y = []
for y_train_ in self.y_train:
y1 = y_train_[batch_ids[:self.batch_size]]
y2 = y_train_[batch_ids[self.batch_size:]]
y.append(y1 * y_l + y2 * (1.0 - y_l))
else:
y1 = self.y_train[batch_ids[:self.batch_size]]
y2 = self.y_train[batch_ids[self.batch_size:]]
y = y1 * y_l + y2 * (1.0 - y_l)
return X, y
|
```python
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (12, 9)
plt.rcParams["font.size"] = 18
```
# Reactor Physics
This is not a reactor physics class. However, to understand and analyze fuel management, advanced reactor designs, and spent fuel recycling, one must first have a background in reactor physics.
## Learning Objectives
- Describe criticality in terms of the life cycle of a fission neutron.
- Differentiate concepts such as neutron flux, reaction rates, cross sections, reactivity, multiplication factor.
- Calculate the multiplication factor using the 6 factor formula.
- Calculate the dynamic impact of reactivity feedback on criticality.
- Recognize the impact of moderator material choice on reactor behavior.
- Analyze reactor behavior.
- Calculate the impact of geometry on criticality.
## Neutron Multiplication Fundamentals
### Neutron interactions with matter.
\begin{align}
^1_0n + {^a_z}X \longrightarrow \left({^{a+1}_z}X\right)^* \longrightarrow
\begin{cases}
^1_0n + {^a_z}X , & \mbox{Resonance Elastic Scattering}\\
^1_0n + \left({^a_z}X\right)^* , & \mbox{Elastic Scattering}\\
{^{a+1}_z}X +\gamma , & \mbox{Radiative Capture}\\
{^{a_1}_{z_2}}X + {^{a_2}_{z_2}}X + \nu^1_0n, & \mbox{Fission}
\end{cases}
\end{align}
### Cross sections
The likelihood of each of these events is captured by cross sections.
- $\sigma_x = $ microscopic cross section $[cm^2]$
- $\Sigma_x = $ macroscopic cross section $[1/length]$
- $\Sigma_x = N\sigma_x $
- $N = $ number density of target atoms $[\#/volume]$
### Cross sections are in units of area. Explain this to your neighbor.
### Reaction Rates
- The microscopic cross section is just the likelihood of the event per unit area.
- The macroscopic cross section is just the likelihood of the event per unit area of a certain density of target isotopes.
- The reaction rate is the macroscopic cross section times the flux of incident neutrons.
\begin{align}
R_{i,j}(\vec{r}) &= N_j(\vec{r})\int dE \phi(\vec{r},E)\sigma_{i,j}(E)\\
R_{i,j}(\vec{r}) &= \mbox{reactions of type i involving isotope j } [reactions/cm^2s]\\
N_j(\vec{r}) &= \mbox{number of nuclei participating in the reactions }\\
E &= \mbox{energy}\\
\phi(\vec{r},E)&= \mbox{flux of neutrons with energy E at position i}\\
\sigma_{i,j}(E)&= \mbox{cross section}\\
\end{align}
This can be written more simply as $R_x = \Sigma_x I N$, where I is intensity of the neutron flux.
### Neutron attenuation
$$
\begin{align}
I(x) &= I_0e^{-\Sigma_t x}\\
\end{align}
$$
where
$$
\begin{align}
I(x) &= \mbox{intensity at distance x}\\
I_0 &= \mbox{initial intensity}\\
\Sigma_t &= \mbox{macroscopic total cross section} \\
x &= \mbox{distance into material [m]}\\
\end{align}
$$
```python
import math
def attenuation(distance, initial=100, sig_t=1):
"""This function describes neutron attenuation into the slab"""
return initial*math.exp(-sig_t*distance)
```
Rather than intensity, one can find the probability density:
In the case of decay:
\begin{align}
P(t)dt &= \lambda e^{-\lambda t}dt
\end{align}
From this, one can find the mean lifetime of a neutron before absorption:
\begin{align}
\bar{t} &= \int_0^\infty t'P(t')dt'\\
&= \int_0^\infty t'\lambda e^{-\lambda t'}dt'\\
&= \frac{1}{\lambda}
\end{align}
In the case of attenuation:
\begin{align}
P(x)dx &= \Sigma_te^{-\Sigma_tx}dx
\end{align}
So, the mean free path is:
\begin{align}
\bar{l} &= \int_0^\infty x'P(x')dx'\\
&= \int_0^\infty x'\Sigma_te^{-\Sigma_t x'}dx'\\
&= \frac{1}{\Sigma_t}
\end{align}
```python
def prob_dens(distance, initial=100, sig_t=1):
return sig_t*attenuation(distance, initial=100, sig_t=1)
```
```python
sig_t = 0.2
i_0 = 100
# This code plots attenuation
import numpy as np
z = np.arange(24)
y = np.arange(24)
x = np.arange(24)
for h in range(0,24):
x[h] = h
y[h] = attenuation(h, initial=i_0, sig_t=sig_t)
z[h] = prob_dens(h, initial=i_0, sig_t=sig_t)
# creates a figure and axes with matplotlib
fig, ax = plt.subplots()
scatter = plt.scatter(x, y, color='blue', s=y*20, alpha=0.4)
ax.plot(x, y, color='red')
ax.plot(x, z, color='green')
# adds labels to the plot
ax.set_ylabel('Percent of Neutrons')
ax.set_xlabel('Distance into slab')
ax.set_title('Attenuation')
# adds tooltips
import mpld3
labels = ['{0}% intensity'.format(i) for i in y]
tooltip = mpld3.plugins.PointLabelTooltip(scatter, labels=labels)
mpld3.plugins.connect(fig, tooltip)
mpld3.display()
```
/home/huff/anaconda3/lib/python3.5/site-packages/IPython/core/formatters.py:92: DeprecationWarning: DisplayFormatter._ipython_display_formatter_default is deprecated: use @default decorator instead.
def _ipython_display_formatter_default(self):
/home/huff/anaconda3/lib/python3.5/site-packages/IPython/core/formatters.py:669: DeprecationWarning: PlainTextFormatter._singleton_printers_default is deprecated: use @default decorator instead.
def _singleton_printers_default(self):
<style>
</style>
<div id="fig_el60741399191357703848285819310"></div>
### Source term
The source of neutrons in a reactor are the neutrons from fission.
\begin{align}
s &=\nu \Sigma_f \phi
\end{align}
where
\begin{align}
s &= \mbox{neutrons available for next generation of fissions}\\
\nu &= \mbox{the number born per fission}\\
\Sigma_f &= \mbox{the number of fissions in the material}\\
\phi &= \mbox{initial neutron flux}
\end{align}
This can also be written as:
\begin{align}
s &= \nu\Sigma_f\phi\\
&= \nu\frac{\Sigma_f}{\Sigma_{a,fuel}}\frac{\Sigma_{a,fuel}}{\Sigma_a}\\
&= \eta f \Sigma_a\phi\\
\eta &= \frac{\nu\Sigma_f}{\Sigma_{a,fuel}} = \mbox{number of neutrons produced per neutron absorbed by the fuel, "neutron reproduction factor"}\\
f &= \frac{\nu\Sigma_{a,fuel}}{\Sigma_a} = \mbox{number of neutrons absorbed in the fuel per neutron absorbed anywhere, "fuel utilization factor"}\\
\end{align}
This absorption and flux term at the end seeks to capture the fact that some of the neutrons escape. However, if we assume an infinite reactor, we know that all the neutrons are eventually absorbed in either the fuel or the coolant, so we can normalize by $\Sigma_a\phi$ and therefore:
\begin{align}
k_\infty &= \frac{\eta f \Sigma_a\phi}{\Sigma_a \phi}\\
&= \eta f
\end{align}
### Fission Spectrum
$\chi(E)$ is an empirical probability density function describing the energies of prompt fission neutrons.
\begin{align}
\chi (E) &= 0.453e^{-1.036E}\sinh\left(\sqrt{2.29E}\right)\\
\end{align}
```python
import numpy as np
import math
def chi(energy):
return 0.453*np.exp(-1.036*energy)*np.sinh(np.sqrt(2.29*energy))
energies = np.arange(0.0,10.0, 0.1)
plt.plot(energies, chi(energies))
plt.title(r'Prompt Neutron Energy Distribution $\chi(E)$')
plt.xlabel("Prompt Neutron Energy [MeV]")
plt.ylabel("probability")
```
#### Questions about this plot:
- What is the most likely prompt neutron energy?
- Can you write an equation for the average neutron energy?
```python
### Max function
print(max([chi(e) for e in energies]), chi(0.7))
```
0.358102702287 0.358102702287
#### Expectation Value
Recall that the average energy will be the expectation value of the probability density function.
\begin{align}
<\chi(E)> &= \int\chi(E)dE\\
&= E \chi(E)
\end{align}
```python
plt.plot(energies, [chi(e)*e for e in energies])
```
### What energy neutron do we prefer for fission in U235?
### Four Factor Formula
So, then, if we consider the impact of neutron energy on the likelihood of absorption, we can break the neutrons up into a thermal and a fast group.
So, we can say more explicitly:
\begin{align}
k_\infty =& \epsilon p \eta f\\
\epsilon =& \mbox{ fast fission factor}\\
=& \frac{\mbox{neutrons produced by all fissions}}{\mbox{neutrons produced by thermal fissions}}\\
p =& \mbox{ resonance escape probability}\\
=& \frac{\mbox{neutrons that reach thermal energies}}{\mbox{number of fast neutrons that start to slow down}}
\end{align}
If we consider the non-infinite nature of most reactors, we have to add two more factors to the formula:
\begin{align}
k_\infty =& P_{fnl} P_{tnl}\epsilon p \eta f\\
P_{fnl} =& \mbox{ fast neutron leakage probability}\\
=& \frac{\mbox{fast neutrons that do not leak from the reacotr}}{\mbox{fast neutrons produced by thermal fissions}}\\
P_{tnl} =& \mbox{ thermal neutron leakage probability}\\
=& \frac{\mbox{thermal neutrons that do not leak from the reacotr}}{\mbox{neutrons that reach thermal energies}}\\
\end{align}
## Neutron Transport Fundamentals
### Fast Homogenous Reactor
Consider a reactor that is:
- fast
- critical
- the fuel and coolant are a homogeneous mixture
- the reactor has only one region, no reflector ("bare" reactor)
This reactor can be described by the one group diffusion equation:
\begin{align}
D\nabla^2\phi-\Sigma_a\phi + s &= \frac{1}{v}\frac{\partial \phi}{\partial t}\\
D &= \mbox{ the diffusion coefficient}\\
\phi &= \mbox{ flux}\\
v &= \mbox{ neutron speed}
\end{align}
If the fission source, $s$ does not balance neutron absorption and leakage, then the right hand side of the one-group diffusion equation is nonzero and the power may increase or decrease with time.
### Reactivity is deviation from k
\begin{align}
\rho &= \frac{k-1}{k}
\end{align}
```python
def rho(k):
"""reactivity, rho
:param k: multiplication factor
"""
return (k-1)/k
# Plot reactivity as a function of k
import numpy as np
k_vals = np.arange(0.7, 2.0, 0.01)
r = [rho(k) for k in k_vals]
plt.plot(k_vals, r)
plt.ylabel(r'reactivity $\rho$ $[\frac{\delta k}{k}]$',fontsize=20,)
plt.xlabel(r'multiplication factor $k$ $[-]$',fontsize=20,)
plt.title(r'$\rho = \frac{k-1}{k}$', fontsize=20, y=1.02)
```
#### Steady state
The multiplication factor, k, can be used to adjust the source strength and reach a steady state diffusion equation:
\begin{align}
D\nabla^2\phi-\Sigma_a\phi + \frac{1}{k}\nu\Sigma_f\phi &= 0\\
\end{align}
## Diffusion Solution
```python
## This code example was adapted from
## https://github.com/marort91/AlphaEigenvalue/blob/master/RadiationTransportCoding/NeutronDiffusion_Python/NDE_CriticalityEigenvalue.ipynb
def diff(sig_tr):
return 1.0/(3.0*sig_tr)
def sig_tr(e):
sig_t(e) - mu*sig_s(e)
D = diff(0.1)
nusigf = 0.70
siga = 0.066
Lx = np.pi*((nusigf-siga)/D)**(-0.5)
N = 50;
h = Lx/(N-1)
x = np.zeros(N)
for i in range(N-1):
x[i+1] = x[i] + h
L = np.zeros((N,N))
A = np.zeros((N,N))
M = np.zeros((N,N))
for i in range(N):
L[i][i] = L[i][i] + (-2*(-D/(h**2)))
for i in range(1,N):
L[i][i-1] = L[i][i-1] + (-D/h**2)
for i in range(N-1):
L[i][i+1] = L[i][i+1] + (-D/h**2)
for i in range(N):
A[i][i] = A[i][i] + siga
M = L + A
M[0][0] = 1
M[0][1] = 0
M[N-1][N-1] = 1
M[N-1][N-2] = 0
phi0 = np.ones((N,1))
phi0[0] = 0
phi0[N-1] = 0
tol = 1e-15
k = 1.00
def is_converged(k_old, k, tol):
return np.abs(k - k_old) <= tol
for i in range(100):
# update k
k_old = k
# solve for psi
psi = np.linalg.solve(M, nusigf*phi0)
# solve for k
k = sum(nusigf*psi)/sum(nusigf*phi0)
# solve for phi
phi0 = (1/k)*psi
phi0[0] = 0
phi0[N-1] = 0
# determine convergence
if is_converged(k_old, k, tol):
break
plt.plot(x, phi0)
plt.xlabel('Slab (cm)')
plt.ylabel('Neutron Flux')
plt.grid()
print("k-effective = ", k)
print(" approx alpha = ", rho(k) * sum(nusigf*phi0)/sum(phi0))
```
### One Group Reactor Equation
We can define a quantity, geometric bucking, as:
\begin{align}
B^2 &= \frac{1}{D}\left(\frac{\nu}{k}\Sigma_f - \Sigma_a\right)\\
\end{align}
Next, we can simplify the previous equation using this definition, so that the one-group reactor equation becomes:
\begin{align}
\nabla^2\phi + B^2\phi &= 0\\
\end{align}
To find the criticality of a reactor with a specific geometry, then, we can solve the geometric buckling equation for k:
\begin{align}
k &= \frac{\nu\Sigma_f}{DB^2 + \Sigma_a}\\
\end{align}
The buckling, B, is used to help describe the geometry of the system. The solutions of the one group reactor equation for boundary conditions corresponding to canonical shapes provide both flux and buckling formulae for each canonical shape:
#### Slab
Thickness a:
\begin{align}
\phi &= cos\left(\frac{\pi x}{a}\right)\\
B^2 &= \frac{\pi^2}{a^2}
\end{align}
#### Cylinder
Height H, Radius R:
\begin{align}
\phi &= J_0\left(\frac{\nu_0r}{R}\right)cos\left(\frac{\pi z}{H}\right)\\
B^2 &= \frac{\nu_0^2}{R^2} + \frac{\pi^2}{H^2}
\end{align}
#### Sphere
Radius R:
\begin{align}
\phi &= \left(\frac{1}{r}\right)sin\left(\frac{\pi r}{R}\right)\\
B^2 &= \frac{\pi^2}{R^2}
\end{align}
```python
def k(nusigf, diff, bsq, siga):
return nusigf/(diff*bsq + siga)
def bsq_sphere(r):
return (np.pi**2)/(r**2)
nusigf = 0.3
diff = 1.1
siga =0.01
fig, ax1 = plt.subplots()
radii = np.arange(0.1, 5.0, 0.01)
ax1.plot(radii, k(nusigf, diff, bsq_sphere(radii), siga), 'b-', label='k')
ax1.set_xlabel('radius (r)')
ax1.set_ylabel('k', color='b')
ax1.set_title('Criticality and Geometric Buckling of a Sphere')
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
ax2.plot(radii, bsq_sphere(radii), 'darkorange', label=r'$B^2$')
ax2.set_ylabel(r'$B^2$', color='darkorange')
for tl in ax2.get_yticklabels():
tl.set_color('darkorange')
ax1.legend()
ax2.legend(loc=2)
```
### Multigroup and beyond
To capture the variation of neutron energies in the diffusion equation, we can discretize the flux into energy groups.
\begin{align}
\phi &= \sum_{g=1}^{g=G}\phi_g\\
\phi_g &= \int_{E_g}^{E_{g-1}}\phi(E)dE\rvert_{g = 1,G}
\end{align}
The diffusion coefficient also needs to be individually evaluated for each energy group such that we have a $D_g$ for each group g. Also, it is important to consider possible paths of demise for potential fission neutrons.
The derivation, based on integration, can be found in your book, pages 128-130. It is based on the following integration:
\begin{align}
\int_{E_g}^{E_{g-1}}dE\left[-\nabla D(\vec{r},E)\nabla\phi(\vec{r},E)+ \Sigma_t(\vec{r},E)\phi(\vec{r},E)\right] &= \int_{E_g}^{E_{g-1}}dE\left[\int_{E'}\Sigma_{s}(\vec{r},E'\rightarrow E)\phi(\vec{r},E') + S(\vec{r},E)\right]\\
\end{align}
Once this integration is completed for a set of group boundaries, $g\in[1,G] \rightarrow E_g\in[E_1,E_G]$, one can generically write:
\begin{align}
-\nabla D_g\nabla\phi_g+ \Sigma_{t,g}\phi_g &= \sum_{g'=1}^{g'=G}\Sigma_{s}^{g'\rightarrow g}\phi_{g'} + S\int_{E_g}^{E_{g-1}}\chi(E)dE\\
\end{align}
Recall, however, that the fission spectrum is a probability density function, so one can also write the following identities:
\begin{align}
\int_0^\infty\chi(E) &= 1\\
\chi_g &= \int_{E_g}^{E_{g-1}}\chi(E)dE\\
\sum_{g=1}^{g=G}\chi_g &= 1\\
\end{align}
This simplifies the diffusion equation:
\begin{align}
-\nabla D_g\nabla^2\phi_g+ \Sigma_{t,g}\phi_g &= \sum_{g'=1}^{g'=G}\Sigma_{s}^{g'\rightarrow g}\phi_{g'} + \chi_gS\\
\end{align}
where
\begin{align}
S &= \sum_{g'=1}^{g'=G}(\nu\Sigma_f)_{g'}\phi_{g'}
\end{align}
#### Group Removal Cross Section
The right hand side summation of the scattering cross section is confusing though. Most of the scattering is from other groups into this one, but some of the scattering is from this group into itself. Keeping that term, $\Sigma_s^{g\rightarrow g} on the right hand side is misleading. It's not a source of new neutrons. So, we have a change of variables that can clean this up, the group removal cross section.
\begin{align}
\Sigma_{R,g} &= \Sigma_{t,g} - \Sigma_{s}^{g\rightarrow g}
\end{align}
If we use the group removal cross section, then we arrivae at the following form of the multigroup equation.
\begin{align}
-\nabla D_g\nabla^2\phi_g+ \Sigma_{R,g}\phi_g &= \sum_{g'\ne G}^{g'=G}\Sigma_{s}^{g'\rightarrow g}\phi_{g'} + \chi_g\sum_{g'=1}^{g'=G}(\nu\Sigma_f)_{g'}\phi_{g'}\\
\end{align}
### Two Group Diffusion
Let's define just two groups, fast and thermal. Let's also state that all prompt neutrons are born fast and that the diffusion coefficient and cross sections do not vary in space. With these assumptions, we arrive at the following two equations:
\begin{align}
-D_1\nabla^2\phi_1 + \Sigma_{R,1}\phi_1 &= \Sigma_{s}^{2\rightarrow 1}\phi_{2} + \left[(\nu\Sigma_f)_{1}\phi_{1} + (\nu\Sigma_f)_{2}\phi_{2}\right]\\
\end{align}
\begin{align}
- D_2\nabla\phi_2+ \Sigma_{R,2}\phi_2 &= \Sigma_{s}^{1\rightarrow 2}\phi_{1}\\
\end{align}
**Discussion: What happened to the prompt fission spectrum, $\chi_g$ in the above equations?**
**Discussion: If we neglect upscattering, which term or terms in the above two equations will disappear?**
### Criticality calculation
For criticality calculations, one might normalize the prompt fission spectrum with k as we have done before. For simplicity, one can move the scattering term to the left hand side, as if to say "diffusion, group removal, and scattering are balanced at criticality."
\begin{align}
-\nabla D_g\nabla\phi_g+ \Sigma_{R,g}\phi_g - \sum_{g'\ne G}^{g'=G}\Sigma_{s}^{g'\rightarrow g}\phi_{g'} &= \frac{1}{k}\chi_g\sum_{g'=1}^{g'=G}(\nu\Sigma_f)_{g'}\phi_{g'}\\
\end{align}
This change propagates to the explicit equations for two groups thus:
\begin{align}
- D_1\nabla^2\phi_1 + \Sigma_{R,1}\phi_1 - \Sigma_{s}^{2\rightarrow 1}\phi_{2} &= \frac{1}{k}\chi_1\left[(\nu\Sigma_f)_{1}\phi_{1} + (\nu\Sigma_f)_{2}\phi_{2}\right]\\
\end{align}
\begin{align}
- D_2\nabla^2\phi_2+ \Sigma_{R,2}\phi_2 - \Sigma_{s}^{1\rightarrow 2}\phi_{1} &= 0\\
\end{align}
### Addition of chemical shim
Together let us consider the impact of a chemical shim, some absorber introduced to the coolant.
**Discussion: In the two group equations, which parameter will change?**
In the thermal group, group 2, the removal cross section will change, because it involves absorption. The amount of chemical shim will impact the absorption cross section thus:
\begin{align}
\Sigma_{a',2} &= \Sigma_{a,2} + \Sigma_{shim,2} \\
\Sigma_{shim, 2} &= \Sigma_{a',2} - \Sigma_{a,2}\\
&= \rho_{shim}\frac{N_{avo}}{A_{shim}}\sigma_{a,shim,2}\\
\end{align}
## Wrap-up
- Describe criticality in terms of the life cycle of a fission neutron.
- Differentiate concepts such as neutron flux, reaction rates, cross sections, reactivity, multiplication factor.
- Calculate the multiplication factor using the 6 factor formula.
- Calculate the dynamic impact of reactivity feedback on criticality.
- Recognize the impact of moderator material choice on reactor behavior.
- Analyze reactor behavior.
- Calculate the impact of geometry on criticality.
## References
This section was developed to complement pages YY-ZZ of [1].
[1] N. Tsoulfanidis, The Nuclear Fuel Cycle. La Grange Park, Illinois, USA: American Nuclear Society, 2013.
```python
```
|
"""
Create train, val, test iterators for Tiny ImageNet.
Train set size: 100000
Val set size: 10000
Test set size: 10000
Number of classes: 200
Link: https://tiny-imagenet.herokuapp.com/
"""
import os
import torch
import numpy as numpy
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import os
import glob
from torch.utils.data import Dataset
from PIL import Image
EXTENSION = 'JPEG'
NUM_IMAGES_PER_CLASS = 500
CLASS_LIST_FILE = 'wnids.txt'
VAL_ANNOTATION_FILE = 'val_annotations.txt'
class TinyImageNet(Dataset):
"""Tiny ImageNet data set available from `http://cs231n.stanford.edu/tiny-imagenet-200.zip`.
Parameters
----------
root: string
Root directory including `train`, `test` and `val` subdirectories.
split: string
Indicating which split to return as a data set.
Valid option: [`train`, `test`, `val`]
transform: torchvision.transforms
A (series) of valid transformation(s).
in_memory: bool
Set to True if there is enough memory (about 5G) and want to minimize disk IO overhead.
"""
def __init__(self, root, split='train', transform=None, target_transform=None, in_memory=False):
self.root = os.path.expanduser(root)
self.split = split
self.transform = transform
self.target_transform = target_transform
self.in_memory = in_memory
self.split_dir = os.path.join(root, self.split)
self.image_paths = sorted(glob.iglob(os.path.join(self.split_dir, '**', '*.%s' % EXTENSION), recursive=True))
self.labels = {} # fname - label number mapping
self.images = [] # used for in-memory processing
# build class label - number mapping
with open(os.path.join(self.root, CLASS_LIST_FILE), 'r') as fp:
self.label_texts = sorted([text.strip() for text in fp.readlines()])
self.label_text_to_number = {text: i for i, text in enumerate(self.label_texts)}
if self.split == 'train':
for label_text, i in self.label_text_to_number.items():
for cnt in range(NUM_IMAGES_PER_CLASS):
self.labels['%s_%d.%s' % (label_text, cnt, EXTENSION)] = i
elif self.split == 'val':
with open(os.path.join(self.split_dir, VAL_ANNOTATION_FILE), 'r') as fp:
for line in fp.readlines():
terms = line.split('\t')
file_name, label_text = terms[0], terms[1]
self.labels[file_name] = self.label_text_to_number[label_text]
# read all images into torch tensor in memory to minimize disk IO overhead
if self.in_memory:
self.images = [self.read_image(path) for path in self.image_paths]
def __len__(self):
return len(self.image_paths)
def __getitem__(self, index):
file_path = self.image_paths[index]
if self.in_memory:
img = self.images[index]
else:
img = self.read_image(file_path)
if self.split == 'test':
return img
else:
# file_name = file_path.split('/')[-1]
return img, self.labels[os.path.basename(file_path)]
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = self.split
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
def read_image(self, path):
img = Image.open(path)
if (img.mode == 'L'):
img = img.convert('RGB')
return self.transform(img) if self.transform else img
def get_data_loader(root,
batch_size,
split='train',
shuffle=True,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning train and valid
multi-process iterators over the Tiny Imagenet dataset. A sample
9x9 grid of the images can be optionally displayed.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- root: The root directory for TinyImagenet dataset
- batch_size: how many samples per batch to load.
- split: Can be train/val/test. For train we apply the data augmentation techniques.
- shuffle: whether to shuffle the train/validation indices.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
val_test_transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
train_transform = transforms.Compose([
transforms.RandomCrop(64, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
# load the dataset
data_dir = root
if (split == 'train'):
dataset = TinyImageNet(data_dir,
split='train',
transform=train_transform,
in_memory=True)
else:
dataset = TinyImageNet(data_dir,
split='val',
transform=val_test_transform,
in_memory=True)
print(dataset)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory, shuffle=True
)
return data_loader |
If $S$ is locally path-connected, then the path-component of $x$ in $S$ is closed in $S$. |
{-# OPTIONS --sized-types #-}
module SizedTypesFunctionFromSuccSize where
open import Common.Size
data Nat : {size : Size} → Set where
zero : {size : Size} → Nat {↑ size}
suc : {size : Size} → Nat {size} → Nat {↑ size}
bad : {i : Size} → Nat {↑ i} → Set
bad zero = bad zero
bad (suc x) = Nat
|
/******************************************************************************
*
* Copyright (c) 2019, the Perspective Authors.
*
* This file is part of the Perspective library, distributed under the terms of
* the Apache License 2.0. The full license can be found in the LICENSE file.
*
*/
#ifdef PSP_ENABLE_PYTHON
#include <perspective/base.h>
#include <perspective/binding.h>
#include <perspective/python.h>
#include <perspective/gnode.h>
#include <perspective/table.h>
#include <perspective/pool.h>
#include <perspective/context_zero.h>
#include <perspective/context_one.h>
#include <perspective/context_two.h>
#include <perspective/sym_table.h>
#include <random>
#include <cmath>
#include <sstream>
#include <codecvt>
#include <boost/optional.hpp>
#include <cstdint>
namespace perspective {
namespace binding {
perspective::t_schema* t_schema_init(py::list& columns, py::list& types)
{
std::vector<std::string> cols;
std::vector<perspective::t_dtype> ts;
for(ssize_t i=0; i < py::len(columns); i++) {
cols.push_back(py::extract<std::string>(columns[i]));
}
for(ssize_t i=0; i < py::len(types); i++) {
ts.push_back(py::extract<perspective::t_dtype>(types[i]));
}
return new perspective::t_schema(cols, ts);
}
template<typename T>
void _fill_col(std::vector<T>& dcol, std::shared_ptr<perspective::t_column> col)
{
perspective::t_uindex nrows = col->size();
for (auto i = 0; i < nrows; ++i)
{
auto elem = dcol[i];
// std::cout << elem << std::endl;
col->set_nth(i, elem);
}
}
template<typename T>
void _fill_col_np(np::ndarray& dcol, std::shared_ptr<perspective::t_column>col)
{
perspective::t_uindex nrows = col->size();
for (auto i = 0; i < nrows; ++i)
{
// auto elem = dcol[i];
auto elem = reinterpret_cast<T *>(dcol.get_data()+(i*sizeof(T)));
// T elem = py::extract<T>(dcol[i]);
// std::cout << *elem << std::endl;
col->set_nth(i, *elem);
}
}
void _fill_data_single_column(perspective::t_table& tbl,
const std::string& colname_i,
py::list& data_cols_i,
perspective::t_dtype col_type)
{
std::string name = colname_i;
std::shared_ptr<perspective::t_column> col = tbl.get_column(name);
switch(col_type){
case perspective::DTYPE_INT64 : {
std::vector<std::int64_t> dcol;
for(ssize_t i=0; i < py::len(data_cols_i); i++)
{
dcol.push_back(py::extract<std::int64_t>(data_cols_i[i]));
}
_fill_col<std::int64_t>(dcol, col);
break;
}
case perspective::DTYPE_UINT64 : {
std::vector<std::uint64_t> dcol;
for(ssize_t i=0; i < py::len(data_cols_i); i++)
{
dcol.push_back(py::extract<std::uint64_t>(data_cols_i[i]));
}
_fill_col<std::uint64_t>(dcol, col);
break;
}
case perspective::DTYPE_FLOAT64 : {
std::vector<double> dcol;
for(ssize_t i=0; i < py::len(data_cols_i); i++)
{
dcol.push_back(py::extract<double>(data_cols_i[i]));
}
_fill_col<double>(dcol, col);
break;
}
case perspective::DTYPE_BOOL : {
//FIXME segfault
std::vector<bool> dcol;
for(ssize_t i=0; i < py::len(data_cols_i); i++)
{
dcol.push_back(py::extract<bool>(data_cols_i[i]));
}
_fill_col<bool>(dcol, col);
break;
}
case perspective::DTYPE_STR : {
std::vector<std::string> dcol;
for(ssize_t i=0; i < py::len(data_cols_i); i++)
{
dcol.push_back(py::extract<std::string>(data_cols_i[i]));
}
_fill_col<std::string>(dcol, col);
break;
}
default: {
break;
}
}
}
void
_fill_data_single_column_np(perspective::t_table& tbl,
const std::string& colname_i,
np::ndarray& dcol,
perspective::t_dtype col_type)
{
std::string name = colname_i;
std::shared_ptr<perspective::t_column> col = tbl.get_column(name);
switch(col_type){
case perspective::DTYPE_INT64 : {
_fill_col_np<std::int64_t>(dcol, col);
break;
}
case perspective::DTYPE_FLOAT64 : {
_fill_col_np<double>(dcol, col);
break;
}
case perspective::DTYPE_STR : {
_fill_col_np<std::string>(dcol, col);
break;
}
default: {
break;
}
}
}
np::ndarray _get_as_numpy(perspective::t_table& tbl, const std::string& colname_i)
{
std::string name = colname_i;
std::shared_ptr<perspective::t_column> col = tbl.get_column(name);
return col->_as_numpy();
}
template <typename T, typename U>
std::vector<U> vecFromArray(T& arr){
//TODO
std::vector<U> ret;
return ret;
}
/******************************************************************************
*
* Data Loading
*/
/* template <>
std::vector<t_sortspec> _get_sort(
const std::vector<std::string>& columns, bool is_column_sort, py::object j_sortby) {
// TODO
std::vector<t_sortspec> svec{};
return svec;
} */
/**
*
*
* Params
* ------
*
*
* Returns
* -------
*
*/
/* template <>
std::vector<t_fterm>
_get_fterms(const t_schema schema, py::object j_date_parser, py::object j_filters) {
// TODO
std::vector<t_fterm> fvec{};
return fvec;
} */
/**
*
*
* Params
* ------
*
*
* Returns
* -------
*
*/
/* std::vector<t_aggspec>
_get_aggspecs(py::object j_aggs) {
// TODO
std::vector<t_aggspec> aggspecs;
return aggspecs;
}
*/
/**
* Converts a scalar value to its Python representation.
*
* Params
* ------
* t_tscalar scalar
*
* Returns
* -------
* py::object
*/
template <>
py::object scalar_to(const t_tscalar& scalar) {
if (!scalar.is_valid()) {
return py::object(); //None
}
switch (scalar.get_dtype()) {
case DTYPE_BOOL: {
if (scalar) {
return py::object(true);
} else {
return py::object(false);
}
}
case DTYPE_TIME:
case DTYPE_FLOAT64:
case DTYPE_FLOAT32: {
return py::object(scalar.to_double());
}
case DTYPE_DATE: {
// TODO
// return t_date_to_jsdate(scalar.get<t_date>()).call<val>("getTime");
}
case DTYPE_UINT8:
case DTYPE_UINT16:
case DTYPE_UINT32:
case DTYPE_INT8:
case DTYPE_INT16:
case DTYPE_INT32: {
return py::object(static_cast<std::int32_t>(scalar.to_int64()));
}
case DTYPE_UINT64:
case DTYPE_INT64: {
// This could potentially lose precision
return py::object(static_cast<std::int32_t>(scalar.to_int64()));
}
case DTYPE_NONE: {
return py::object(); //None
}
case DTYPE_STR:
default: {
std::wstring_convert<utf8convert_type, wchar_t> converter("", L"<Invalid>");
return py::str(converter.from_bytes(scalar.to_string()));
}
}
}
/**
* Fills the table with data from Javascript.
*
* Params
* ------
* tbl - pointer to the table object
* ocolnames - vector of column names
* accessor - the JS data accessor interface
* odt - vector of data types
* offset
* is_arrow - flag for arrow data
*
* Returns
* -------
*
*/
void
_fill_data(t_table& tbl, std::vector<std::string> ocolnames, py::object accessor,
std::vector<t_dtype> odt, std::uint32_t offset, bool is_arrow, bool is_update) {
//TODO
}
/******************************************************************************
*
* Public
*/
template<typename T>
void set_column_nth(t_column* col, t_uindex idx, T value) {
//TODO
}
/**
* Helper function for computed columns
*
* Params
* ------
*
*
* Returns
* -------
*
*/
template<typename T>
void table_add_computed_column(t_table& table, T computed_defs) {
//TODO
}
/**
* DataAccessor
*
* parses and converts input data into a canonical format for
* interfacing with Perspective.
*/
// Name parsing
std::vector<std::string>
column_names(py::object data, std::int32_t format) {
//TODO
std::vector<std::string> names;
return names;
}
// Type inferrence for fill_col and data_types
t_dtype
infer_type(py::object x, py::object date_validator) {
//TODO
t_dtype t = t_dtype::DTYPE_STR;
return t;
}
t_dtype
get_data_type(py::object data, std::int32_t format, std::string name, py::object date_validator) {
//TODO
return t_dtype::DTYPE_STR;
}
std::vector<t_dtype>
data_types(py::object data, std::int32_t format, std::vector<std::string> names, py::object date_validator) {
//TODO
if (names.size() == 0) {
PSP_COMPLAIN_AND_ABORT("Cannot determine data types without column names!");
}
std::vector<t_dtype> types;
return types;
}
/**
* Create a default gnode.
*
* Params
* ------
* j_colnames - a JS Array of column names.
* j_dtypes - a JS Array of column types.
*
* Returns
* -------
* A gnode.
*/
std::shared_ptr<t_gnode>
make_gnode(const t_schema& iscm) {
std::vector<std::string> ocolnames(iscm.columns());
std::vector<t_dtype> odt(iscm.types());
if (iscm.has_column("psp_pkey")) {
t_uindex idx = iscm.get_colidx("psp_pkey");
ocolnames.erase(ocolnames.begin() + idx);
odt.erase(odt.begin() + idx);
}
if (iscm.has_column("psp_op")) {
t_uindex idx = iscm.get_colidx("psp_op");
ocolnames.erase(ocolnames.begin() + idx);
odt.erase(odt.begin() + idx);
}
t_schema oscm(ocolnames, odt);
// Create a gnode
auto gnode = std::make_shared<t_gnode>(oscm, iscm);
gnode->init();
return gnode;
}
/**
* Create a populated table.
*
* Params
* ------
* chunk - a JS object containing parsed data and associated metadata
* offset
* limit
* index
* is_delete - sets the table operation
*
* Returns
* -------
* a populated table.
*/
template<typename T>
std::shared_ptr<t_gnode>
make_table(t_pool* pool, T gnode, T accessor, T computed, std::uint32_t offset,
std::uint32_t limit, std::string index, bool is_update, bool is_delete, bool is_arrow) {
std::vector<std::string> colnames;
std::vector<t_dtype> dtypes;
// Create the table
t_table tbl(t_schema(colnames, dtypes));
tbl.init();
tbl.extend(0);
std::shared_ptr<t_gnode> new_gnode;
return new_gnode;
}
/**
* Copies the internal table from a gnode
*
* Params
* ------
*
* Returns
* -------
* A gnode.
*/
template<typename T>
std::shared_ptr<t_gnode>
clone_gnode_table(t_pool* pool, std::shared_ptr<t_gnode> gnode, T computed) {
t_table* tbl = gnode->_get_pkeyed_table();
table_add_computed_column(*tbl, computed);
std::shared_ptr<t_gnode> new_gnode = make_gnode(tbl->get_schema());
pool->register_gnode(new_gnode.get());
pool->send(new_gnode->get_id(), 0, *tbl);
pool->_process();
return new_gnode;
}
template<>
void sort(std::shared_ptr<t_ctx2> ctx2, py::object j_sortby){
}
template <>
py::object get_column_data(std::shared_ptr<t_table> table, std::string colname) {
py::list arr;
return arr;
}
/**
*
*
* Params
* ------
*
*
* Returns
* -------
*
*/
template <typename CTX_T>
py::object get_data(std::shared_ptr<View<CTX_T> > view, std::uint32_t start_row, std::uint32_t end_row, std::uint32_t start_col,
std::uint32_t end_col) {
py::list arr;
return arr;
}
template <>
py::object get_data_two_skip_headers(std::shared_ptr<View<t_ctx2> > view, std::uint32_t depth,
std::uint32_t start_row, std::uint32_t end_row, std::uint32_t start_col,
std::uint32_t end_col) {
py::list arr;
return arr;
}
}
}
#endif |
(* Title: CTT/CTT.thy
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
Copyright 1993 University of Cambridge
*)
theory CTT
imports Pure
begin
section \<open>Constructive Type Theory: axiomatic basis\<close>
ML_file \<open>~~/src/Provers/typedsimp.ML\<close>
setup Pure_Thy.old_appl_syntax_setup
typedecl i
typedecl t
typedecl o
consts
\<comment> \<open>Types\<close>
F :: "t"
T :: "t" \<comment> \<open>\<open>F\<close> is empty, \<open>T\<close> contains one element\<close>
contr :: "i\<Rightarrow>i"
tt :: "i"
\<comment> \<open>Natural numbers\<close>
N :: "t"
succ :: "i\<Rightarrow>i"
rec :: "[i, i, [i,i]\<Rightarrow>i] \<Rightarrow> i"
\<comment> \<open>Unions\<close>
inl :: "i\<Rightarrow>i"
inr :: "i\<Rightarrow>i"
"when" :: "[i, i\<Rightarrow>i, i\<Rightarrow>i]\<Rightarrow>i"
\<comment> \<open>General Sum and Binary Product\<close>
Sum :: "[t, i\<Rightarrow>t]\<Rightarrow>t"
fst :: "i\<Rightarrow>i"
snd :: "i\<Rightarrow>i"
split :: "[i, [i,i]\<Rightarrow>i] \<Rightarrow>i"
\<comment> \<open>General Product and Function Space\<close>
Prod :: "[t, i\<Rightarrow>t]\<Rightarrow>t"
\<comment> \<open>Types\<close>
Plus :: "[t,t]\<Rightarrow>t" (infixr "+" 40)
\<comment> \<open>Equality type\<close>
Eq :: "[t,i,i]\<Rightarrow>t"
eq :: "i"
\<comment> \<open>Judgements\<close>
Type :: "t \<Rightarrow> prop" ("(_ type)" [10] 5)
Eqtype :: "[t,t]\<Rightarrow>prop" ("(_ =/ _)" [10,10] 5)
Elem :: "[i, t]\<Rightarrow>prop" ("(_ /: _)" [10,10] 5)
Eqelem :: "[i,i,t]\<Rightarrow>prop" ("(_ =/ _ :/ _)" [10,10,10] 5)
Reduce :: "[i,i]\<Rightarrow>prop" ("Reduce[_,_]")
\<comment> \<open>Types\<close>
\<comment> \<open>Functions\<close>
lambda :: "(i \<Rightarrow> i) \<Rightarrow> i" (binder "\<^bold>\<lambda>" 10)
app :: "[i,i]\<Rightarrow>i" (infixl "`" 60)
\<comment> \<open>Natural numbers\<close>
Zero :: "i" ("0")
\<comment> \<open>Pairing\<close>
pair :: "[i,i]\<Rightarrow>i" ("(1<_,/_>)")
syntax
"_PROD" :: "[idt,t,t]\<Rightarrow>t" ("(3\<Prod>_:_./ _)" 10)
"_SUM" :: "[idt,t,t]\<Rightarrow>t" ("(3\<Sum>_:_./ _)" 10)
translations
"\<Prod>x:A. B" \<rightleftharpoons> "CONST Prod(A, \<lambda>x. B)"
"\<Sum>x:A. B" \<rightleftharpoons> "CONST Sum(A, \<lambda>x. B)"
abbreviation Arrow :: "[t,t]\<Rightarrow>t" (infixr "\<longrightarrow>" 30)
where "A \<longrightarrow> B \<equiv> \<Prod>_:A. B"
abbreviation Times :: "[t,t]\<Rightarrow>t" (infixr "\<times>" 50)
where "A \<times> B \<equiv> \<Sum>_:A. B"
text \<open>
Reduction: a weaker notion than equality; a hack for simplification.
\<open>Reduce[a,b]\<close> means either that \<open>a = b : A\<close> for some \<open>A\<close> or else
that \<open>a\<close> and \<open>b\<close> are textually identical.
Does not verify \<open>a:A\<close>! Sound because only \<open>trans_red\<close> uses a \<open>Reduce\<close>
premise. No new theorems can be proved about the standard judgements.
\<close>
axiomatization
where
refl_red: "\<And>a. Reduce[a,a]" and
red_if_equal: "\<And>a b A. a = b : A \<Longrightarrow> Reduce[a,b]" and
trans_red: "\<And>a b c A. \<lbrakk>a = b : A; Reduce[b,c]\<rbrakk> \<Longrightarrow> a = c : A" and
\<comment> \<open>Reflexivity\<close>
refl_type: "\<And>A. A type \<Longrightarrow> A = A" and
refl_elem: "\<And>a A. a : A \<Longrightarrow> a = a : A" and
\<comment> \<open>Symmetry\<close>
sym_type: "\<And>A B. A = B \<Longrightarrow> B = A" and
sym_elem: "\<And>a b A. a = b : A \<Longrightarrow> b = a : A" and
\<comment> \<open>Transitivity\<close>
trans_type: "\<And>A B C. \<lbrakk>A = B; B = C\<rbrakk> \<Longrightarrow> A = C" and
trans_elem: "\<And>a b c A. \<lbrakk>a = b : A; b = c : A\<rbrakk> \<Longrightarrow> a = c : A" and
equal_types: "\<And>a A B. \<lbrakk>a : A; A = B\<rbrakk> \<Longrightarrow> a : B" and
equal_typesL: "\<And>a b A B. \<lbrakk>a = b : A; A = B\<rbrakk> \<Longrightarrow> a = b : B" and
\<comment> \<open>Substitution\<close>
subst_type: "\<And>a A B. \<lbrakk>a : A; \<And>z. z:A \<Longrightarrow> B(z) type\<rbrakk> \<Longrightarrow> B(a) type" and
subst_typeL: "\<And>a c A B D. \<lbrakk>a = c : A; \<And>z. z:A \<Longrightarrow> B(z) = D(z)\<rbrakk> \<Longrightarrow> B(a) = D(c)" and
subst_elem: "\<And>a b A B. \<lbrakk>a : A; \<And>z. z:A \<Longrightarrow> b(z):B(z)\<rbrakk> \<Longrightarrow> b(a):B(a)" and
subst_elemL:
"\<And>a b c d A B. \<lbrakk>a = c : A; \<And>z. z:A \<Longrightarrow> b(z)=d(z) : B(z)\<rbrakk> \<Longrightarrow> b(a)=d(c) : B(a)" and
\<comment> \<open>The type \<open>N\<close> -- natural numbers\<close>
NF: "N type" and
NI0: "0 : N" and
NI_succ: "\<And>a. a : N \<Longrightarrow> succ(a) : N" and
NI_succL: "\<And>a b. a = b : N \<Longrightarrow> succ(a) = succ(b) : N" and
NE:
"\<And>p a b C. \<lbrakk>p: N; a: C(0); \<And>u v. \<lbrakk>u: N; v: C(u)\<rbrakk> \<Longrightarrow> b(u,v): C(succ(u))\<rbrakk>
\<Longrightarrow> rec(p, a, \<lambda>u v. b(u,v)) : C(p)" and
NEL:
"\<And>p q a b c d C. \<lbrakk>p = q : N; a = c : C(0);
\<And>u v. \<lbrakk>u: N; v: C(u)\<rbrakk> \<Longrightarrow> b(u,v) = d(u,v): C(succ(u))\<rbrakk>
\<Longrightarrow> rec(p, a, \<lambda>u v. b(u,v)) = rec(q,c,d) : C(p)" and
NC0:
"\<And>a b C. \<lbrakk>a: C(0); \<And>u v. \<lbrakk>u: N; v: C(u)\<rbrakk> \<Longrightarrow> b(u,v): C(succ(u))\<rbrakk>
\<Longrightarrow> rec(0, a, \<lambda>u v. b(u,v)) = a : C(0)" and
NC_succ:
"\<And>p a b C. \<lbrakk>p: N; a: C(0); \<And>u v. \<lbrakk>u: N; v: C(u)\<rbrakk> \<Longrightarrow> b(u,v): C(succ(u))\<rbrakk> \<Longrightarrow>
rec(succ(p), a, \<lambda>u v. b(u,v)) = b(p, rec(p, a, \<lambda>u v. b(u,v))) : C(succ(p))" and
\<comment> \<open>The fourth Peano axiom. See page 91 of Martin-Löf's book.\<close>
zero_ne_succ: "\<And>a. \<lbrakk>a: N; 0 = succ(a) : N\<rbrakk> \<Longrightarrow> 0: F" and
\<comment> \<open>The Product of a family of types\<close>
ProdF: "\<And>A B. \<lbrakk>A type; \<And>x. x:A \<Longrightarrow> B(x) type\<rbrakk> \<Longrightarrow> \<Prod>x:A. B(x) type" and
ProdFL:
"\<And>A B C D. \<lbrakk>A = C; \<And>x. x:A \<Longrightarrow> B(x) = D(x)\<rbrakk> \<Longrightarrow> \<Prod>x:A. B(x) = \<Prod>x:C. D(x)" and
ProdI:
"\<And>b A B. \<lbrakk>A type; \<And>x. x:A \<Longrightarrow> b(x):B(x)\<rbrakk> \<Longrightarrow> \<^bold>\<lambda>x. b(x) : \<Prod>x:A. B(x)" and
ProdIL: "\<And>b c A B. \<lbrakk>A type; \<And>x. x:A \<Longrightarrow> b(x) = c(x) : B(x)\<rbrakk> \<Longrightarrow>
\<^bold>\<lambda>x. b(x) = \<^bold>\<lambda>x. c(x) : \<Prod>x:A. B(x)" and
ProdE: "\<And>p a A B. \<lbrakk>p : \<Prod>x:A. B(x); a : A\<rbrakk> \<Longrightarrow> p`a : B(a)" and
ProdEL: "\<And>p q a b A B. \<lbrakk>p = q: \<Prod>x:A. B(x); a = b : A\<rbrakk> \<Longrightarrow> p`a = q`b : B(a)" and
ProdC: "\<And>a b A B. \<lbrakk>a : A; \<And>x. x:A \<Longrightarrow> b(x) : B(x)\<rbrakk> \<Longrightarrow> (\<^bold>\<lambda>x. b(x)) ` a = b(a) : B(a)" and
ProdC2: "\<And>p A B. p : \<Prod>x:A. B(x) \<Longrightarrow> (\<^bold>\<lambda>x. p`x) = p : \<Prod>x:A. B(x)" and
\<comment> \<open>The Sum of a family of types\<close>
SumF: "\<And>A B. \<lbrakk>A type; \<And>x. x:A \<Longrightarrow> B(x) type\<rbrakk> \<Longrightarrow> \<Sum>x:A. B(x) type" and
SumFL: "\<And>A B C D. \<lbrakk>A = C; \<And>x. x:A \<Longrightarrow> B(x) = D(x)\<rbrakk> \<Longrightarrow> \<Sum>x:A. B(x) = \<Sum>x:C. D(x)" and
SumI: "\<And>a b A B. \<lbrakk>a : A; b : B(a)\<rbrakk> \<Longrightarrow> <a,b> : \<Sum>x:A. B(x)" and
SumIL: "\<And>a b c d A B. \<lbrakk> a = c : A; b = d : B(a)\<rbrakk> \<Longrightarrow> <a,b> = <c,d> : \<Sum>x:A. B(x)" and
SumE: "\<And>p c A B C. \<lbrakk>p: \<Sum>x:A. B(x); \<And>x y. \<lbrakk>x:A; y:B(x)\<rbrakk> \<Longrightarrow> c(x,y): C(<x,y>)\<rbrakk>
\<Longrightarrow> split(p, \<lambda>x y. c(x,y)) : C(p)" and
SumEL: "\<And>p q c d A B C. \<lbrakk>p = q : \<Sum>x:A. B(x);
\<And>x y. \<lbrakk>x:A; y:B(x)\<rbrakk> \<Longrightarrow> c(x,y)=d(x,y): C(<x,y>)\<rbrakk>
\<Longrightarrow> split(p, \<lambda>x y. c(x,y)) = split(q, \<lambda>x y. d(x,y)) : C(p)" and
SumC: "\<And>a b c A B C. \<lbrakk>a: A; b: B(a); \<And>x y. \<lbrakk>x:A; y:B(x)\<rbrakk> \<Longrightarrow> c(x,y): C(<x,y>)\<rbrakk>
\<Longrightarrow> split(<a,b>, \<lambda>x y. c(x,y)) = c(a,b) : C(<a,b>)" and
fst_def: "\<And>a. fst(a) \<equiv> split(a, \<lambda>x y. x)" and
snd_def: "\<And>a. snd(a) \<equiv> split(a, \<lambda>x y. y)" and
\<comment> \<open>The sum of two types\<close>
PlusF: "\<And>A B. \<lbrakk>A type; B type\<rbrakk> \<Longrightarrow> A+B type" and
PlusFL: "\<And>A B C D. \<lbrakk>A = C; B = D\<rbrakk> \<Longrightarrow> A+B = C+D" and
PlusI_inl: "\<And>a A B. \<lbrakk>a : A; B type\<rbrakk> \<Longrightarrow> inl(a) : A+B" and
PlusI_inlL: "\<And>a c A B. \<lbrakk>a = c : A; B type\<rbrakk> \<Longrightarrow> inl(a) = inl(c) : A+B" and
PlusI_inr: "\<And>b A B. \<lbrakk>A type; b : B\<rbrakk> \<Longrightarrow> inr(b) : A+B" and
PlusI_inrL: "\<And>b d A B. \<lbrakk>A type; b = d : B\<rbrakk> \<Longrightarrow> inr(b) = inr(d) : A+B" and
PlusE:
"\<And>p c d A B C. \<lbrakk>p: A+B;
\<And>x. x:A \<Longrightarrow> c(x): C(inl(x));
\<And>y. y:B \<Longrightarrow> d(y): C(inr(y)) \<rbrakk> \<Longrightarrow> when(p, \<lambda>x. c(x), \<lambda>y. d(y)) : C(p)" and
PlusEL:
"\<And>p q c d e f A B C. \<lbrakk>p = q : A+B;
\<And>x. x: A \<Longrightarrow> c(x) = e(x) : C(inl(x));
\<And>y. y: B \<Longrightarrow> d(y) = f(y) : C(inr(y))\<rbrakk>
\<Longrightarrow> when(p, \<lambda>x. c(x), \<lambda>y. d(y)) = when(q, \<lambda>x. e(x), \<lambda>y. f(y)) : C(p)" and
PlusC_inl:
"\<And>a c d A B C. \<lbrakk>a: A;
\<And>x. x:A \<Longrightarrow> c(x): C(inl(x));
\<And>y. y:B \<Longrightarrow> d(y): C(inr(y)) \<rbrakk>
\<Longrightarrow> when(inl(a), \<lambda>x. c(x), \<lambda>y. d(y)) = c(a) : C(inl(a))" and
PlusC_inr:
"\<And>b c d A B C. \<lbrakk>b: B;
\<And>x. x:A \<Longrightarrow> c(x): C(inl(x));
\<And>y. y:B \<Longrightarrow> d(y): C(inr(y))\<rbrakk>
\<Longrightarrow> when(inr(b), \<lambda>x. c(x), \<lambda>y. d(y)) = d(b) : C(inr(b))" and
\<comment> \<open>The type \<open>Eq\<close>\<close>
EqF: "\<And>a b A. \<lbrakk>A type; a : A; b : A\<rbrakk> \<Longrightarrow> Eq(A,a,b) type" and
EqFL: "\<And>a b c d A B. \<lbrakk>A = B; a = c : A; b = d : A\<rbrakk> \<Longrightarrow> Eq(A,a,b) = Eq(B,c,d)" and
EqI: "\<And>a b A. a = b : A \<Longrightarrow> eq : Eq(A,a,b)" and
EqE: "\<And>p a b A. p : Eq(A,a,b) \<Longrightarrow> a = b : A" and
\<comment> \<open>By equality of types, can prove \<open>C(p)\<close> from \<open>C(eq)\<close>, an elimination rule\<close>
EqC: "\<And>p a b A. p : Eq(A,a,b) \<Longrightarrow> p = eq : Eq(A,a,b)" and
\<comment> \<open>The type \<open>F\<close>\<close>
FF: "F type" and
FE: "\<And>p C. \<lbrakk>p: F; C type\<rbrakk> \<Longrightarrow> contr(p) : C" and
FEL: "\<And>p q C. \<lbrakk>p = q : F; C type\<rbrakk> \<Longrightarrow> contr(p) = contr(q) : C" and
\<comment> \<open>The type T\<close>
\<comment> \<open>Martin-Löf's book (page 68) discusses elimination and computation.
Elimination can be derived by computation and equality of types,
but with an extra premise \<open>C(x)\<close> type \<open>x:T\<close>.
Also computation can be derived from elimination.\<close>
TF: "T type" and
TI: "tt : T" and
TE: "\<And>p c C. \<lbrakk>p : T; c : C(tt)\<rbrakk> \<Longrightarrow> c : C(p)" and
TEL: "\<And>p q c d C. \<lbrakk>p = q : T; c = d : C(tt)\<rbrakk> \<Longrightarrow> c = d : C(p)" and
TC: "\<And>p. p : T \<Longrightarrow> p = tt : T"
subsection "Tactics and derived rules for Constructive Type Theory"
text \<open>Formation rules.\<close>
lemmas form_rls = NF ProdF SumF PlusF EqF FF TF
and formL_rls = ProdFL SumFL PlusFL EqFL
text \<open>
Introduction rules. OMITTED:
\<^item> \<open>EqI\<close>, because its premise is an \<open>eqelem\<close>, not an \<open>elem\<close>.
\<close>
lemmas intr_rls = NI0 NI_succ ProdI SumI PlusI_inl PlusI_inr TI
and intrL_rls = NI_succL ProdIL SumIL PlusI_inlL PlusI_inrL
text \<open>
Elimination rules. OMITTED:
\<^item> \<open>EqE\<close>, because its conclusion is an \<open>eqelem\<close>, not an \<open>elem\<close>
\<^item> \<open>TE\<close>, because it does not involve a constructor.
\<close>
lemmas elim_rls = NE ProdE SumE PlusE FE
and elimL_rls = NEL ProdEL SumEL PlusEL FEL
text \<open>OMITTED: \<open>eqC\<close> are \<open>TC\<close> because they make rewriting loop: \<open>p = un = un = \<dots>\<close>\<close>
lemmas comp_rls = NC0 NC_succ ProdC SumC PlusC_inl PlusC_inr
text \<open>Rules with conclusion \<open>a:A\<close>, an elem judgement.\<close>
lemmas element_rls = intr_rls elim_rls
text \<open>Definitions are (meta)equality axioms.\<close>
lemmas basic_defs = fst_def snd_def
text \<open>Compare with standard version: \<open>B\<close> is applied to UNSIMPLIFIED expression!\<close>
lemma SumIL2: "\<lbrakk>c = a : A; d = b : B(a)\<rbrakk> \<Longrightarrow> <c,d> = <a,b> : Sum(A,B)"
by (rule sym_elem) (rule SumIL; rule sym_elem)
lemmas intrL2_rls = NI_succL ProdIL SumIL2 PlusI_inlL PlusI_inrL
text \<open>
Exploit \<open>p:Prod(A,B)\<close> to create the assumption \<open>z:B(a)\<close>.
A more natural form of product elimination.
\<close>
lemma subst_prodE:
assumes "p: Prod(A,B)"
and "a: A"
and "\<And>z. z: B(a) \<Longrightarrow> c(z): C(z)"
shows "c(p`a): C(p`a)"
by (rule assms ProdE)+
subsection \<open>Tactics for type checking\<close>
ML \<open>
local
fun is_rigid_elem \<^Const_>\<open>Elem for a _\<close> = not(is_Var (head_of a))
| is_rigid_elem \<^Const_>\<open>Eqelem for a _ _\<close> = not(is_Var (head_of a))
| is_rigid_elem \<^Const_>\<open>Type for a\<close> = not(is_Var (head_of a))
| is_rigid_elem _ = false
in
(*Try solving a:A or a=b:A by assumption provided a is rigid!*)
fun test_assume_tac ctxt = SUBGOAL (fn (prem, i) =>
if is_rigid_elem (Logic.strip_assums_concl prem)
then assume_tac ctxt i else no_tac)
fun ASSUME ctxt tf i = test_assume_tac ctxt i ORELSE tf i
end
\<close>
text \<open>
For simplification: type formation and checking,
but no equalities between terms.
\<close>
lemmas routine_rls = form_rls formL_rls refl_type element_rls
ML \<open>
fun routine_tac rls ctxt prems =
ASSUME ctxt (filt_resolve_from_net_tac ctxt 4 (Tactic.build_net (prems @ rls)));
(*Solve all subgoals "A type" using formation rules. *)
val form_net = Tactic.build_net @{thms form_rls};
fun form_tac ctxt =
REPEAT_FIRST (ASSUME ctxt (filt_resolve_from_net_tac ctxt 1 form_net));
(*Type checking: solve a:A (a rigid, A flexible) by intro and elim rules. *)
fun typechk_tac ctxt thms =
let val tac =
filt_resolve_from_net_tac ctxt 3
(Tactic.build_net (thms @ @{thms form_rls} @ @{thms element_rls}))
in REPEAT_FIRST (ASSUME ctxt tac) end
(*Solve a:A (a flexible, A rigid) by introduction rules.
Cannot use stringtrees (filt_resolve_tac) since
goals like ?a:SUM(A,B) have a trivial head-string *)
fun intr_tac ctxt thms =
let val tac =
filt_resolve_from_net_tac ctxt 1
(Tactic.build_net (thms @ @{thms form_rls} @ @{thms intr_rls}))
in REPEAT_FIRST (ASSUME ctxt tac) end
(*Equality proving: solve a=b:A (where a is rigid) by long rules. *)
fun equal_tac ctxt thms =
REPEAT_FIRST
(ASSUME ctxt
(filt_resolve_from_net_tac ctxt 3
(Tactic.build_net (thms @ @{thms form_rls element_rls intrL_rls elimL_rls refl_elem}))))
\<close>
method_setup form = \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD (form_tac ctxt))\<close>
method_setup typechk = \<open>Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (typechk_tac ctxt ths))\<close>
method_setup intr = \<open>Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (intr_tac ctxt ths))\<close>
method_setup equal = \<open>Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (equal_tac ctxt ths))\<close>
subsection \<open>Simplification\<close>
text \<open>To simplify the type in a goal.\<close>
lemma replace_type: "\<lbrakk>B = A; a : A\<rbrakk> \<Longrightarrow> a : B"
apply (rule equal_types)
apply (rule_tac [2] sym_type)
apply assumption+
done
text \<open>Simplify the parameter of a unary type operator.\<close>
lemma subst_eqtyparg:
assumes 1: "a=c : A"
and 2: "\<And>z. z:A \<Longrightarrow> B(z) type"
shows "B(a) = B(c)"
apply (rule subst_typeL)
apply (rule_tac [2] refl_type)
apply (rule 1)
apply (erule 2)
done
text \<open>Simplification rules for Constructive Type Theory.\<close>
lemmas reduction_rls = comp_rls [THEN trans_elem]
ML \<open>
(*Converts each goal "e : Eq(A,a,b)" into "a=b:A" for simplification.
Uses other intro rules to avoid changing flexible goals.*)
val eqintr_net = Tactic.build_net @{thms EqI intr_rls}
fun eqintr_tac ctxt =
REPEAT_FIRST (ASSUME ctxt (filt_resolve_from_net_tac ctxt 1 eqintr_net))
(** Tactics that instantiate CTT-rules.
Vars in the given terms will be incremented!
The (rtac EqE i) lets them apply to equality judgements. **)
fun NE_tac ctxt sp i =
TRY (resolve_tac ctxt @{thms EqE} i) THEN
Rule_Insts.res_inst_tac ctxt [((("p", 0), Position.none), sp)] [] @{thm NE} i
fun SumE_tac ctxt sp i =
TRY (resolve_tac ctxt @{thms EqE} i) THEN
Rule_Insts.res_inst_tac ctxt [((("p", 0), Position.none), sp)] [] @{thm SumE} i
fun PlusE_tac ctxt sp i =
TRY (resolve_tac ctxt @{thms EqE} i) THEN
Rule_Insts.res_inst_tac ctxt [((("p", 0), Position.none), sp)] [] @{thm PlusE} i
(** Predicate logic reasoning, WITH THINNING!! Procedures adapted from NJ. **)
(*Finds f:Prod(A,B) and a:A in the assumptions, concludes there is z:B(a) *)
fun add_mp_tac ctxt i =
resolve_tac ctxt @{thms subst_prodE} i THEN assume_tac ctxt i THEN assume_tac ctxt i
(*Finds P\<longrightarrow>Q and P in the assumptions, replaces implication by Q *)
fun mp_tac ctxt i = eresolve_tac ctxt @{thms subst_prodE} i THEN assume_tac ctxt i
(*"safe" when regarded as predicate calculus rules*)
val safe_brls = sort (make_ord lessb)
[ (true, @{thm FE}), (true,asm_rl),
(false, @{thm ProdI}), (true, @{thm SumE}), (true, @{thm PlusE}) ]
val unsafe_brls =
[ (false, @{thm PlusI_inl}), (false, @{thm PlusI_inr}), (false, @{thm SumI}),
(true, @{thm subst_prodE}) ]
(*0 subgoals vs 1 or more*)
val (safe0_brls, safep_brls) =
List.partition (curry (op =) 0 o subgoals_of_brl) safe_brls
fun safestep_tac ctxt thms i =
form_tac ctxt ORELSE
resolve_tac ctxt thms i ORELSE
biresolve_tac ctxt safe0_brls i ORELSE mp_tac ctxt i ORELSE
DETERM (biresolve_tac ctxt safep_brls i)
fun safe_tac ctxt thms i = DEPTH_SOLVE_1 (safestep_tac ctxt thms i)
fun step_tac ctxt thms = safestep_tac ctxt thms ORELSE' biresolve_tac ctxt unsafe_brls
(*Fails unless it solves the goal!*)
fun pc_tac ctxt thms = DEPTH_SOLVE_1 o (step_tac ctxt thms)
\<close>
method_setup eqintr = \<open>Scan.succeed (SIMPLE_METHOD o eqintr_tac)\<close>
method_setup NE = \<open>
Scan.lift Parse.embedded_inner_syntax >> (fn s => fn ctxt => SIMPLE_METHOD' (NE_tac ctxt s))
\<close>
method_setup pc = \<open>Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD' (pc_tac ctxt ths))\<close>
method_setup add_mp = \<open>Scan.succeed (SIMPLE_METHOD' o add_mp_tac)\<close>
ML_file \<open>rew.ML\<close>
method_setup rew = \<open>Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (rew_tac ctxt ths))\<close>
method_setup hyp_rew = \<open>Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (hyp_rew_tac ctxt ths))\<close>
subsection \<open>The elimination rules for fst/snd\<close>
lemma SumE_fst: "p : Sum(A,B) \<Longrightarrow> fst(p) : A"
apply (unfold basic_defs)
apply (erule SumE)
apply assumption
done
text \<open>The first premise must be \<open>p:Sum(A,B)\<close>!!.\<close>
lemma SumE_snd:
assumes major: "p: Sum(A,B)"
and "A type"
and "\<And>x. x:A \<Longrightarrow> B(x) type"
shows "snd(p) : B(fst(p))"
apply (unfold basic_defs)
apply (rule major [THEN SumE])
apply (rule SumC [THEN subst_eqtyparg, THEN replace_type])
apply (typechk assms)
done
section \<open>The two-element type (booleans and conditionals)\<close>
definition Bool :: "t"
where "Bool \<equiv> T+T"
definition true :: "i"
where "true \<equiv> inl(tt)"
definition false :: "i"
where "false \<equiv> inr(tt)"
definition cond :: "[i,i,i]\<Rightarrow>i"
where "cond(a,b,c) \<equiv> when(a, \<lambda>_. b, \<lambda>_. c)"
lemmas bool_defs = Bool_def true_def false_def cond_def
subsection \<open>Derivation of rules for the type \<open>Bool\<close>\<close>
text \<open>Formation rule.\<close>
lemma boolF: "Bool type"
unfolding bool_defs by typechk
text \<open>Introduction rules for \<open>true\<close>, \<open>false\<close>.\<close>
lemma boolI_true: "true : Bool"
unfolding bool_defs by typechk
lemma boolI_false: "false : Bool"
unfolding bool_defs by typechk
text \<open>Elimination rule: typing of \<open>cond\<close>.\<close>
lemma boolE: "\<lbrakk>p:Bool; a : C(true); b : C(false)\<rbrakk> \<Longrightarrow> cond(p,a,b) : C(p)"
unfolding bool_defs
apply (typechk; erule TE)
apply typechk
done
lemma boolEL: "\<lbrakk>p = q : Bool; a = c : C(true); b = d : C(false)\<rbrakk>
\<Longrightarrow> cond(p,a,b) = cond(q,c,d) : C(p)"
unfolding bool_defs
apply (rule PlusEL)
apply (erule asm_rl refl_elem [THEN TEL])+
done
text \<open>Computation rules for \<open>true\<close>, \<open>false\<close>.\<close>
lemma boolC_true: "\<lbrakk>a : C(true); b : C(false)\<rbrakk> \<Longrightarrow> cond(true,a,b) = a : C(true)"
unfolding bool_defs
apply (rule comp_rls)
apply typechk
apply (erule_tac [!] TE)
apply typechk
done
lemma boolC_false: "\<lbrakk>a : C(true); b : C(false)\<rbrakk> \<Longrightarrow> cond(false,a,b) = b : C(false)"
unfolding bool_defs
apply (rule comp_rls)
apply typechk
apply (erule_tac [!] TE)
apply typechk
done
section \<open>Elementary arithmetic\<close>
subsection \<open>Arithmetic operators and their definitions\<close>
definition add :: "[i,i]\<Rightarrow>i" (infixr "#+" 65)
where "a#+b \<equiv> rec(a, b, \<lambda>u v. succ(v))"
definition diff :: "[i,i]\<Rightarrow>i" (infixr "-" 65)
where "a-b \<equiv> rec(b, a, \<lambda>u v. rec(v, 0, \<lambda>x y. x))"
definition absdiff :: "[i,i]\<Rightarrow>i" (infixr "|-|" 65)
where "a|-|b \<equiv> (a-b) #+ (b-a)"
definition mult :: "[i,i]\<Rightarrow>i" (infixr "#*" 70)
where "a#*b \<equiv> rec(a, 0, \<lambda>u v. b #+ v)"
definition mod :: "[i,i]\<Rightarrow>i" (infixr "mod" 70)
where "a mod b \<equiv> rec(a, 0, \<lambda>u v. rec(succ(v) |-| b, 0, \<lambda>x y. succ(v)))"
definition div :: "[i,i]\<Rightarrow>i" (infixr "div" 70)
where "a div b \<equiv> rec(a, 0, \<lambda>u v. rec(succ(u) mod b, succ(v), \<lambda>x y. v))"
lemmas arith_defs = add_def diff_def absdiff_def mult_def mod_def div_def
subsection \<open>Proofs about elementary arithmetic: addition, multiplication, etc.\<close>
subsubsection \<open>Addition\<close>
text \<open>Typing of \<open>add\<close>: short and long versions.\<close>
lemma add_typing: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a #+ b : N"
unfolding arith_defs by typechk
lemma add_typingL: "\<lbrakk>a = c:N; b = d:N\<rbrakk> \<Longrightarrow> a #+ b = c #+ d : N"
unfolding arith_defs by equal
text \<open>Computation for \<open>add\<close>: 0 and successor cases.\<close>
lemma addC0: "b:N \<Longrightarrow> 0 #+ b = b : N"
unfolding arith_defs by rew
lemma addC_succ: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> succ(a) #+ b = succ(a #+ b) : N"
unfolding arith_defs by rew
subsubsection \<open>Multiplication\<close>
text \<open>Typing of \<open>mult\<close>: short and long versions.\<close>
lemma mult_typing: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a #* b : N"
unfolding arith_defs by (typechk add_typing)
lemma mult_typingL: "\<lbrakk>a = c:N; b = d:N\<rbrakk> \<Longrightarrow> a #* b = c #* d : N"
unfolding arith_defs by (equal add_typingL)
text \<open>Computation for \<open>mult\<close>: 0 and successor cases.\<close>
lemma multC0: "b:N \<Longrightarrow> 0 #* b = 0 : N"
unfolding arith_defs by rew
lemma multC_succ: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> succ(a) #* b = b #+ (a #* b) : N"
unfolding arith_defs by rew
subsubsection \<open>Difference\<close>
text \<open>Typing of difference.\<close>
lemma diff_typing: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a - b : N"
unfolding arith_defs by typechk
lemma diff_typingL: "\<lbrakk>a = c:N; b = d:N\<rbrakk> \<Longrightarrow> a - b = c - d : N"
unfolding arith_defs by equal
text \<open>Computation for difference: 0 and successor cases.\<close>
lemma diffC0: "a:N \<Longrightarrow> a - 0 = a : N"
unfolding arith_defs by rew
text \<open>Note: \<open>rec(a, 0, \<lambda>z w.z)\<close> is \<open>pred(a).\<close>\<close>
lemma diff_0_eq_0: "b:N \<Longrightarrow> 0 - b = 0 : N"
unfolding arith_defs
apply (NE b)
apply hyp_rew
done
text \<open>
Essential to simplify FIRST!! (Else we get a critical pair)
\<open>succ(a) - succ(b)\<close> rewrites to \<open>pred(succ(a) - b)\<close>.
\<close>
lemma diff_succ_succ: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> succ(a) - succ(b) = a - b : N"
unfolding arith_defs
apply hyp_rew
apply (NE b)
apply hyp_rew
done
subsection \<open>Simplification\<close>
lemmas arith_typing_rls = add_typing mult_typing diff_typing
and arith_congr_rls = add_typingL mult_typingL diff_typingL
lemmas congr_rls = arith_congr_rls intrL2_rls elimL_rls
lemmas arithC_rls =
addC0 addC_succ
multC0 multC_succ
diffC0 diff_0_eq_0 diff_succ_succ
ML \<open>
structure Arith_simp = TSimpFun(
val refl = @{thm refl_elem}
val sym = @{thm sym_elem}
val trans = @{thm trans_elem}
val refl_red = @{thm refl_red}
val trans_red = @{thm trans_red}
val red_if_equal = @{thm red_if_equal}
val default_rls = @{thms arithC_rls comp_rls}
val routine_tac = routine_tac @{thms arith_typing_rls routine_rls}
)
fun arith_rew_tac ctxt prems =
make_rew_tac ctxt (Arith_simp.norm_tac ctxt (@{thms congr_rls}, prems))
fun hyp_arith_rew_tac ctxt prems =
make_rew_tac ctxt
(Arith_simp.cond_norm_tac ctxt (prove_cond_tac ctxt, @{thms congr_rls}, prems))
\<close>
method_setup arith_rew = \<open>
Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (arith_rew_tac ctxt ths))
\<close>
method_setup hyp_arith_rew = \<open>
Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (hyp_arith_rew_tac ctxt ths))
\<close>
subsection \<open>Addition\<close>
text \<open>Associative law for addition.\<close>
text \<open>Commutative law for addition. Can be proved using three inductions.
Must simplify after first induction! Orientation of rewrites is delicate.\<close>
lemma add_commute: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a #+ b = b #+ a : N"
apply (NE a)
apply hyp_arith_rew
apply (rule sym_elem)
prefer 2
apply (NE b)
prefer 4
apply (NE b)
apply hyp_arith_rew
done
subsection \<open>Multiplication\<close>
text \<open>Right annihilation in product.\<close>
lemma mult_0_right: "a:N \<Longrightarrow> a #* 0 = 0 : N"
apply (NE a)
apply hyp_arith_rew
done
text \<open>Right successor law for multiplication.\<close>
lemma mult_succ_right: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a #* succ(b) = a #+ (a #* b) : N"
apply (NE a)
apply (hyp_arith_rew add_assoc [THEN sym_elem])
apply (assumption | rule add_commute mult_typingL add_typingL intrL_rls refl_elem)+
done
text \<open>Commutative law for multiplication.\<close>
lemma mult_commute: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a #* b = b #* a : N"
apply (NE a)
apply (hyp_arith_rew mult_0_right mult_succ_right)
done
text \<open>Addition distributes over multiplication.\<close>
lemma add_mult_distrib: "\<lbrakk>a:N; b:N; c:N\<rbrakk> \<Longrightarrow> (a #+ b) #* c = (a #* c) #+ (b #* c) : N"
apply (NE a)
apply (hyp_arith_rew add_assoc [THEN sym_elem])
done
text \<open>Associative law for multiplication.\<close>
lemma mult_assoc: "\<lbrakk>a:N; b:N; c:N\<rbrakk> \<Longrightarrow> (a #* b) #* c = a #* (b #* c) : N"
apply (NE a)
apply (hyp_arith_rew add_mult_distrib)
done
subsection \<open>Difference\<close>
text \<open>
Difference on natural numbers, without negative numbers
\<^item> \<open>a - b = 0\<close> iff \<open>a \<le> b\<close>
\<^item> \<open>a - b = succ(c)\<close> iff \<open>a > b\<close>
\<close>
lemma diff_self_eq_0: "a:N \<Longrightarrow> a - a = 0 : N"
apply (NE a)
apply hyp_arith_rew
done
lemma add_0_right: "\<lbrakk>c : N; 0 : N; c : N\<rbrakk> \<Longrightarrow> c #+ 0 = c : N"
by (rule addC0 [THEN [3] add_commute [THEN trans_elem]])
text \<open>
Addition is the inverse of subtraction: if \<open>b \<le> x\<close> then \<open>b #+ (x - b) = x\<close>.
An example of induction over a quantified formula (a product).
Uses rewriting with a quantified, implicative inductive hypothesis.
\<close>
schematic_goal add_diff_inverse_lemma:
"b:N \<Longrightarrow> ?a : \<Prod>x:N. Eq(N, b-x, 0) \<longrightarrow> Eq(N, b #+ (x-b), x)"
apply (NE b)
\<comment> \<open>strip one "universal quantifier" but not the "implication"\<close>
apply (rule_tac [3] intr_rls)
\<comment> \<open>case analysis on \<open>x\<close> in \<open>succ(u) \<le> x \<longrightarrow> succ(u) #+ (x - succ(u)) = x\<close>\<close>
prefer 4
apply (NE x)
apply assumption
\<comment> \<open>Prepare for simplification of types -- the antecedent \<open>succ(u) \<le> x\<close>\<close>
apply (rule_tac [2] replace_type)
apply (rule_tac [1] replace_type)
apply arith_rew
\<comment> \<open>Solves first 0 goal, simplifies others. Two sugbgoals remain.
Both follow by rewriting, (2) using quantified induction hyp.\<close>
apply intr \<comment> \<open>strips remaining \<open>\<Prod>\<close>s\<close>
apply (hyp_arith_rew add_0_right)
apply assumption
done
text \<open>
Version of above with premise \<open>b - a = 0\<close> i.e. \<open>a \<ge> b\<close>.
Using @{thm ProdE} does not work -- for \<open>?B(?a)\<close> is ambiguous.
Instead, @{thm add_diff_inverse_lemma} states the desired induction scheme;
the use of \<open>THEN\<close> below instantiates Vars in @{thm ProdE} automatically.
\<close>
lemma add_diff_inverse: "\<lbrakk>a:N; b:N; b - a = 0 : N\<rbrakk> \<Longrightarrow> b #+ (a-b) = a : N"
apply (rule EqE)
apply (rule add_diff_inverse_lemma [THEN ProdE, THEN ProdE])
apply (assumption | rule EqI)+
done
subsection \<open>Absolute difference\<close>
text \<open>Typing of absolute difference: short and long versions.\<close>
lemma absdiff_typing: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a |-| b : N"
unfolding arith_defs by typechk
lemma absdiff_typingL: "\<lbrakk>a = c:N; b = d:N\<rbrakk> \<Longrightarrow> a |-| b = c |-| d : N"
unfolding arith_defs by equal
lemma absdiff_self_eq_0: "a:N \<Longrightarrow> a |-| a = 0 : N"
unfolding absdiff_def by (arith_rew diff_self_eq_0)
lemma absdiffC0: "a:N \<Longrightarrow> 0 |-| a = a : N"
unfolding absdiff_def by hyp_arith_rew
lemma absdiff_succ_succ: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> succ(a) |-| succ(b) = a |-| b : N"
unfolding absdiff_def by hyp_arith_rew
text \<open>Note how easy using commutative laws can be? ...not always...\<close>
lemma absdiff_commute: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a |-| b = b |-| a : N"
unfolding absdiff_def
apply (rule add_commute)
apply (typechk diff_typing)
done
text \<open>If \<open>a + b = 0\<close> then \<open>a = 0\<close>. Surprisingly tedious.\<close>
schematic_goal add_eq0_lemma: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> ?c : Eq(N,a#+b,0) \<longrightarrow> Eq(N,a,0)"
apply (NE a)
apply (rule_tac [3] replace_type)
apply arith_rew
apply intr \<comment> \<open>strips remaining \<open>\<Prod>\<close>s\<close>
apply (rule_tac [2] zero_ne_succ [THEN FE])
apply (erule_tac [3] EqE [THEN sym_elem])
apply (typechk add_typing)
done
text \<open>
Version of above with the premise \<open>a + b = 0\<close>.
Again, resolution instantiates variables in @{thm ProdE}.
\<close>
lemma add_eq0: "\<lbrakk>a:N; b:N; a #+ b = 0 : N\<rbrakk> \<Longrightarrow> a = 0 : N"
apply (rule EqE)
apply (rule add_eq0_lemma [THEN ProdE])
apply (rule_tac [3] EqI)
apply typechk
done
text \<open>Here is a lemma to infer \<open>a - b = 0\<close> and \<open>b - a = 0\<close> from \<open>a |-| b = 0\<close>, below.\<close>
schematic_goal absdiff_eq0_lem:
"\<lbrakk>a:N; b:N; a |-| b = 0 : N\<rbrakk> \<Longrightarrow> ?a : Eq(N, a-b, 0) \<times> Eq(N, b-a, 0)"
apply (unfold absdiff_def)
apply intr
apply eqintr
apply (rule_tac [2] add_eq0)
apply (rule add_eq0)
apply (rule_tac [6] add_commute [THEN trans_elem])
apply (typechk diff_typing)
done
text \<open>If \<open>a |-| b = 0\<close> then \<open>a = b\<close>
proof: \<open>a - b = 0\<close> and \<open>b - a = 0\<close>, so \<open>b = a + (b - a) = a + 0 = a\<close>.
\<close>
lemma absdiff_eq0: "\<lbrakk>a |-| b = 0 : N; a:N; b:N\<rbrakk> \<Longrightarrow> a = b : N"
apply (rule EqE)
apply (rule absdiff_eq0_lem [THEN SumE])
apply eqintr
apply (rule add_diff_inverse [THEN sym_elem, THEN trans_elem])
apply (erule_tac [3] EqE)
apply (hyp_arith_rew add_0_right)
done
subsection \<open>Remainder and Quotient\<close>
text \<open>Typing of remainder: short and long versions.\<close>
lemma mod_typing: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a mod b : N"
unfolding mod_def by (typechk absdiff_typing)
lemma mod_typingL: "\<lbrakk>a = c:N; b = d:N\<rbrakk> \<Longrightarrow> a mod b = c mod d : N"
unfolding mod_def by (equal absdiff_typingL)
text \<open>Computation for \<open>mod\<close>: 0 and successor cases.\<close>
lemma modC0: "b:N \<Longrightarrow> 0 mod b = 0 : N"
unfolding mod_def by (rew absdiff_typing)
lemma modC_succ: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow>
succ(a) mod b = rec(succ(a mod b) |-| b, 0, \<lambda>x y. succ(a mod b)) : N"
unfolding mod_def by (rew absdiff_typing)
text \<open>Typing of quotient: short and long versions.\<close>
lemma div_typing: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a div b : N"
unfolding div_def by (typechk absdiff_typing mod_typing)
lemma div_typingL: "\<lbrakk>a = c:N; b = d:N\<rbrakk> \<Longrightarrow> a div b = c div d : N"
unfolding div_def by (equal absdiff_typingL mod_typingL)
lemmas div_typing_rls = mod_typing div_typing absdiff_typing
text \<open>Computation for quotient: 0 and successor cases.\<close>
lemma divC0: "b:N \<Longrightarrow> 0 div b = 0 : N"
unfolding div_def by (rew mod_typing absdiff_typing)
lemma divC_succ: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow>
succ(a) div b = rec(succ(a) mod b, succ(a div b), \<lambda>x y. a div b) : N"
unfolding div_def by (rew mod_typing)
text \<open>Version of above with same condition as the \<open>mod\<close> one.\<close>
lemma divC_succ2: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow>
succ(a) div b =rec(succ(a mod b) |-| b, succ(a div b), \<lambda>x y. a div b) : N"
apply (rule divC_succ [THEN trans_elem])
apply (rew div_typing_rls modC_succ)
apply (NE "succ (a mod b) |-|b")
apply (rew mod_typing div_typing absdiff_typing)
done
text \<open>For case analysis on whether a number is 0 or a successor.\<close>
lemma iszero_decidable: "a:N \<Longrightarrow> rec(a, inl(eq), \<lambda>ka kb. inr(<ka, eq>)) :
Eq(N,a,0) + (\<Sum>x:N. Eq(N,a, succ(x)))"
apply (NE a)
apply (rule_tac [3] PlusI_inr)
apply (rule_tac [2] PlusI_inl)
apply eqintr
apply equal
done
text \<open>Main Result. Holds when \<open>b\<close> is 0 since \<open>a mod 0 = a\<close> and \<open>a div 0 = 0\<close>.\<close>
lemma mod_div_equality: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a mod b #+ (a div b) #* b = a : N"
apply (NE a)
apply (arith_rew div_typing_rls modC0 modC_succ divC0 divC_succ2)
apply (rule EqE)
\<comment> \<open>case analysis on \<open>succ(u mod b) |-| b\<close>\<close>
apply (rule_tac a1 = "succ (u mod b) |-| b" in iszero_decidable [THEN PlusE])
apply (erule_tac [3] SumE)
apply (hyp_arith_rew div_typing_rls modC0 modC_succ divC0 divC_succ2)
\<comment> \<open>Replace one occurrence of \<open>b\<close> by \<open>succ(u mod b)\<close>. Clumsy!\<close>
apply (rule add_typingL [THEN trans_elem])
apply (erule EqE [THEN absdiff_eq0, THEN sym_elem])
apply (rule_tac [3] refl_elem)
apply (hyp_arith_rew div_typing_rls)
done
end
|
!> Implements the Lennard-Jones 12-6 potential and the force derived from it.
module m_lj
use num_kind
use json_module
use m_json_wrapper, only: get_parameter
implicit none
type :: lj_potential
!> The range parameter which gives the zero-crossing distance of the
!! potential.
real(dp) :: sigma_0 = 1.
!> The well-depth of the potential.
real(dp) :: epsilon_0 = 1.
contains
procedure :: potential => lj1
procedure :: force => lj_force
procedure :: to_json => lj_to_json
end type lj_potential
interface lj_potential
module procedure lj_init_parameters, lj_from_json
end interface lj_potential
contains
!> Initializes the LJ 12-6 potential with the well-depth @p epsilon_0
!! and contact distance @p sigma_0.
function lj_init_parameters(epsilon_0, sigma_0) result(this)
real(dp), intent(in) :: epsilon_0, sigma_0
type(lj_potential) :: this
this%epsilon_0 = epsilon_0
this%sigma_0 = sigma_0
end function lj_init_parameters
!> Initialize the module using parameters given by @p reader.
function lj_from_json(json_val) result(this)
type(json_value), pointer, intent(in) :: json_val
type(lj_potential) :: this
call get_parameter(json_val, 'lj_sigma_0', this%sigma_0, error_lb=0._dp)
call get_parameter(json_val, 'lj_epsilon_0', this%epsilon_0, error_lb=0._dp)
end function lj_from_json
!> Write the module parameters using the output unit and format defined
!! by @p writer.
subroutine lj_to_json(this, json_val)
class(lj_potential), intent(in) :: this
type(json_value), pointer, intent(inout) :: json_val
call json_add(json_val, 'type', 'lj')
call json_add(json_val, 'lj_sigma_0', this%sigma_0)
call json_add(json_val, 'lj_epsilon_0', this%epsilon_0)
end subroutine lj_to_json
!> Returns the LJ 12-6 potential with the parameterization set with
!! lj_init and a given internuclear vector @p rij.
pure subroutine lj1(this, r, energy, overlap)
class(lj_potential), intent(in) :: this
real(dp), intent(in) :: r
real(dp), intent(out) :: energy
logical, intent(out), optional :: overlap
energy = lj3(r, this%epsilon_0, this%sigma_0)
if (present(overlap)) overlap = .false.
end subroutine lj1
!> Returns the LJ 12-6 potential value with given well-depth @p epsilon,
!! range parameter @p sigma and the internuclear vector @p rij from
!! particle i to particle j.
pure function lj3(rij, epsilon, sigma)
real(dp) :: lj3
real(dp), intent(in) :: rij
real(dp), intent(in) :: epsilon
real(dp), intent(in) :: sigma
lj3 = (sigma / rij)**6
lj3 = lj3 * (lj3 - 1.)
lj3 = 4. * epsilon * lj3
end function lj3
!> Returns the force exerted on the particle j by particle i when the
!! interparticle vector from i to j is @p rij.
pure function lj_force(this, rij)
class(lj_potential), intent(in) :: this
real(dp), intent(in) :: rij(3)
real(dp) :: lj_force(3)
real(dp) :: urij(3)
real(dp) :: rijabs
rijabs = sqrt(dot_product(rij, rij))
urij = rij/rijabs
lj_force = 24. * this%epsilon_0 * this%sigma_0**6 / rijabs**7 * &
(2. * this%sigma_0**6 / rijabs**6 - 1.) * urij
end function lj_force
end module m_lj
|
(** Authors: Jianzhou Zhao. *)
Require Import Bang_Renaming.
Require Import Bang_Parametricity.
Require Import Bang_OParametricity_App.
Require Import Bang_ContextualEq_Infrastructure.
Require Import Bang_ContextualEq_Sound.
Require Import Bang_ContextualEq_Prop.
Export Parametricity.
Definition F_ciu_eq E lE e e' t : Prop :=
typing E lE e t /\
typing E lE e' t /\
forall dsubst gsubst lgsubst,
wf_lgamma_subst E lE dsubst gsubst lgsubst ->
F_nobservational_eq nil nil
(apply_delta_subst dsubst (apply_gamma_subst gsubst (apply_gamma_subst lgsubst e)))
(apply_delta_subst dsubst (apply_gamma_subst gsubst (apply_gamma_subst lgsubst e')))
(apply_delta_subst_typ dsubst t).
(*
\* x:s. e = \y a:!s. let !x = y in e
*)
Definition exp_iabs t e := exp_abs (typ_bang t) (exp_let 0 e).
Definition typ_iarrow T1 T2 := typ_arrow (typ_bang T1) T2.
Definition ctx_iabs x T C := ctx_abs_free (typ_bang T) (ctx_let2_capture 0 x C).
Lemma expr_iabs : forall L T e1,
type T ->
(forall x, x `notin` L -> expr (open_ee e1 x)) ->
expr (exp_iabs T e1).
Proof.
intros L T e1 H1 H2.
unfold exp_iabs.
apply expr_abs with (L:=L); auto.
intros x xn.
apply H2 in xn.
unfold open_ee in *. simpl.
apply expr_let with (L:=L `union` {{x}}); auto.
intros y yn.
unfold open_ee.
rewrite open_ee_open_ee_rec__commut; auto.
apply open_ee_rec_swap with (y:=y) in xn.
rewrite <- open_ee_rec_expr; auto.
Qed.
Lemma wf_typ_iarrow : forall E T1 T2,
wf_typ E T1 ->
wf_typ E T2 ->
wf_typ E (typ_iarrow T1 T2).
Proof.
intros E T1 T2 H1 H2.
unfold typ_iarrow.
apply wf_typ_arrow; auto.
Qed.
Lemma typing_iabs : forall L E D T1 e1 T2,
wf_typ E T1 ->
(forall x, x `notin` L -> typing ([(x, bind_typ T1)]++E) D (open_ee e1 x) T2) ->
typing E D (exp_iabs T1 e1) (typ_iarrow T1 T2).
Proof.
intros L E D T1 e1 T2 H1 H2.
unfold exp_iabs. unfold typ_iarrow.
apply typing_abs with (L:=L `union` fv_ee e1 `union` dom E `union` dom D); auto.
intros x xn.
assert (x `notin` L) as xnL. auto.
apply H2 in xnL.
unfold open_ee. simpl.
apply typing_let with (D1:=[(x,lbind_typ (typ_bang T1))])(D2:=D)(T1:=T1)(L:={{x}} `union` dom E `union` dom D); auto.
apply typing_lvar; auto.
rewrite_env ([(x,lbind_typ (typ_bang T1))]++nil).
apply wf_lenv_typ; auto.
apply typing_regular in xnL. decompose [and] xnL.
inversion H; auto.
intros y yn.
unfold open_ee in *.
rewrite open_ee_open_ee_rec__commut; auto.
rewrite <- open_ee_rec_expr; eauto using open_ee_rec_swap.
apply swap_typing_nonlin_head with (x:=x); auto.
rewrite_env ([(x,lbind_typ (typ_bang T1))]++nil).
rewrite_env ([(x,lbind_typ (typ_bang T1))]++D).
apply lenv_split_left; auto.
apply wf_lenv_trivial_split; auto.
apply typing_regular in xnL. decompose [and] xnL.
rewrite_env (nil++[(x,bind_typ T1)]++E) in H3.
apply wf_lenv_strengthening in H3; auto.
Qed.
Lemma context_iabs : forall L y T C1,
type T ->
(forall x : atom, x `notin` L -> context (open_ec C1 x)) ->
context (ctx_iabs y T C1) .
Proof.
intros L y T C1 H1 H2.
unfold ctx_iabs.
apply context_abs_free with (L:=L); auto.
intros x xn.
apply H2 in xn.
unfold open_ec in *. simpl.
apply context_let2_capture with (L:=L `union` {{x}}); auto.
intros x0 x0n.
unfold open_ec.
rewrite open_ec_open_ec_rec__commut; auto.
apply open_ec_rec_swap with (y:=y) in xn.
rewrite <- open_ec_rec_context; auto.
Qed.
Lemma contexting_iabs_capture : forall E D T y T1' C1 T2' E' D',
wf_typ (env_remove (y, (bind_typ T1')) E') T1' ->
binds y (bind_typ T1') E' ->
y `notin` dom D `union` cv_ec C1 ->
contexting E D T C1 E' D' T2' ->
contexting E D T (ctx_iabs y T1' (close_ec C1 y)) (env_remove (y, (bind_typ T1')) E') D' (typ_iarrow T1' T2').
Proof.
intros E D T y T1' C1 T2' E' D' Hwft Hbinds Hyn Hcontext.
unfold ctx_iabs. unfold typ_iarrow.
apply contexting_abs_free with (L:=fv_ec C1 `union` fv_env E' `union` dom D `union` dom D'); auto.
intros x xn.
assert (x `notin` dom (env_remove (y, bind_typ T1') E')) as xnd.
destruct_notin.
assert (J':=@env_remove_sub E' y (bind_typ T1')).
apply free_env__free_dom. fsetdec.
assert (wf_lenv (env_remove (y, bind_typ T1') E') [(x,lbind_typ (typ_bang T1'))]) as Wfle.
rewrite_env ([(x,lbind_typ (typ_bang T1'))]++nil).
apply wf_lenv_typ; auto.
apply wf_lenv_empty.
apply contexting_regular in Hcontext. decompose [and] Hcontext.
assert (J:=@env_remove_inv E' y (bind_typ T1') H2 Hbinds).
destruct J as [E1'0 [E2'0 [EQ1 EQ2]]]; subst.
rewrite EQ1.
apply wf_env_strengthening in H2; auto.
assert (wf_lenv (env_remove (y, bind_typ T1') E') D') as Wfle'.
apply contexting_regular in Hcontext. decompose [and] Hcontext.
assert (J:=@env_remove_inv E' y (bind_typ T1') H2 Hbinds).
destruct J as [E1'0 [E2'0 [EQ1 EQ2]]]; subst.
rewrite EQ1.
apply wf_lenv_strengthening in H3; auto.
unfold open_ec. simpl. simpl_env.
assert (open_ec_rec 1 x (close_ec C1 y) = (close_ec C1 y)) as J.
rewrite open_ec_rec_id_bctx_eupper; auto.
assert (J:=@close_ec_bctx_eupper C1 0 y).
rewrite context_bctx_eupper with (C:=C1) in J; auto.
rewrite J.
apply contexting_let2_capture with (D1':=[(x,lbind_typ (typ_bang T1'))])(D2':=D')(T1':=T1'); auto.
rewrite_env ([(x,lbind_typ (typ_bang T1'))]++nil).
rewrite_env ([(x,lbind_typ (typ_bang T1'))]++D').
apply lenv_split_left; auto.
apply wf_lenv_trivial_split; auto.
simpl. apply disjdom_one_2; auto.
Qed.
Fixpoint gen_ctx_labs (lE:lenv) (C:ctx) {struct lE} : ctx :=
match lE with
| nil => C
| (x, lbind_typ T)::lE' => ctx_abs_capture x T (close_ec (gen_ctx_labs lE' C) x)
end.
Fixpoint gen_typ_labs (lE:lenv) (t:typ) {struct lE} : typ :=
match lE with
| nil => t
| (x, lbind_typ T)::lE' => typ_arrow T (gen_typ_labs lE' t)
end.
Fixpoint gen_ctx_abs (E:env) (C:ctx) {struct E} : ctx :=
match E with
| nil => C
| (X, bind_kn)::E' => (gen_ctx_abs E' C)
| (x, bind_typ T)::E' => ctx_iabs x T (close_ec (gen_ctx_abs E' C) x)
end.
Fixpoint gen_typ_abs (E:env) (t:typ) {struct E} : typ :=
match E with
| nil => t
| (X, bind_kn)::E' => (gen_typ_abs E' t)
| (x, bind_typ T)::E' => typ_iarrow T (gen_typ_abs E' t)
end.
Fixpoint gen_ctx_tabs (E:env) (C:ctx) {struct E} : ctx :=
match E with
| nil => C
| (X, bind_kn)::E' => ctx_tabs_capture X (close_tc (gen_ctx_tabs E' C) X)
| (x, bind_typ T)::E' => (gen_ctx_tabs E' C)
end.
Fixpoint gen_typ_tabs (E:env) (t:typ) {struct E} : typ :=
match E with
| nil => t
| (X, bind_kn)::E' => typ_all (close_tt (gen_typ_tabs E' t) X)
| (x, bind_typ T)::E' => (gen_typ_tabs E' t)
end.
Fixpoint gen_ctx_tapp (dsubst:delta_subst) (C:ctx) {struct dsubst} : ctx :=
match dsubst with
| nil => C
| (X, T)::dsubst' => ctx_tapp (gen_ctx_tapp dsubst' C) T
end.
Fixpoint gen_typ_tapp (dsubst:delta_subst) (t:typ) {struct dsubst} : typ :=
match dsubst with
| nil => t
| (X, T)::dsubst' => open_tt (gen_typ_tapp dsubst' t) T
end.
Fixpoint gen_ctx_lapp (lgsubst:gamma_subst) (C:ctx) {struct lgsubst} : ctx :=
match lgsubst with
| nil => C
| (x, v)::lgsubst' => ctx_app1 (gen_ctx_lapp lgsubst' C) v
end.
Fixpoint gen_ctx_app (gsubst:gamma_subst) (C:ctx) {struct gsubst} : ctx :=
match gsubst with
| nil => C
| (x, v)::gsubst' => ctx_app1 (gen_ctx_app gsubst' C) (exp_bang v)
end.
Lemma wf_lgsubst_lcons_inv' : forall E x T D dsubst gsubst lgsubst,
wf_lgamma_subst E ([(x, lbind_typ T)]++D) dsubst gsubst lgsubst ->
exists e, exists lgsubst',
lgsubst = [(x, e)] ++ lgsubst' /\
dom D [=] dom lgsubst' /\
wf_typ E T /\
typing nil nil e (apply_delta_subst_typ dsubst T) /\
wf_lgamma_subst E D dsubst gsubst lgsubst'.
Proof.
intros E x T D dsubst gsubst lgsubst Wflg.
remember ([(x, lbind_typ T)]++D) as DD.
generalize dependent D.
generalize dependent x.
generalize dependent T.
induction Wflg; intros; subst.
inversion HeqDD.
assert ([(x0, lbind_typ T0)]++D=[(x0, lbind_typ T0)]++D) as EQ. auto.
apply IHWflg in EQ.
destruct EQ as [e0 [lgsubst' [EQ1 [EQ2 [Wft [Typ Wflg']]]]]]; subst.
exists e0. exists lgsubst'.
split; auto.
split; auto.
split; auto.
apply wf_typ_weaken_head; auto.
apply wf_lgamma_subst__wf_lenv in Wflg.
destruct Wflg; auto.
inversion HeqDD; subst. clear HeqDD.
exists e. exists lgsE.
split; auto.
split; auto.
apply dom_lgamma_subst in Wflg.
decompose [and] Wflg; auto.
assert ([(x, lbind_typ T0)]++D=[(x, lbind_typ T0)]++D) as EQ. auto.
apply IHWflg in EQ.
destruct EQ as [e [lgsubst' [EQ1 [EQ2 [Wft [Typ Wflg']]]]]]; subst.
exists e. exists lgsubst'.
split; auto.
split; auto.
split; auto.
apply wf_typ_weaken_head; auto.
apply wf_lgamma_subst__wf_lenv in Wflg.
destruct Wflg; auto.
split; auto.
simpl. simpl in H0.
rewrite <- subst_tt_fresh; auto.
apply notin_fv_wf with (X:=X) in Wft; auto.
Qed.
Lemma cv_ec_gen_ctx_labs_hole : forall lE,
cv_ec (gen_ctx_labs lE ctx_hole) [=] dom lE.
Proof.
induction lE; simpl; auto.
destruct a.
destruct l. simpl.
rewrite <- cv_ec_close_ec.
rewrite IHlE. fsetdec.
Qed.
Lemma wf_lgsubst_kcons_inv' : forall E X D dsubst gsubst lgsubst,
wf_lgamma_subst ([(X, bind_kn)]++E) D dsubst gsubst lgsubst ->
X `notin` fv_lenv D ->
exists T, exists dsubst',
dsubst = [(X, T)] ++ dsubst' /\
ddom_env E [=] dom dsubst' /\
wf_typ nil T /\
wf_lgamma_subst E D dsubst' gsubst lgsubst.
Proof.
intros E X D dsubst gsubst lgsubst Wflg XnD.
remember ([(X, bind_kn)]++E) as EE.
generalize dependent E.
generalize dependent X.
induction Wflg; intros; subst.
inversion HeqEE.
inversion HeqEE.
assert ([(X, bind_kn)]++E0=[(X, bind_kn)]++E0) as EQ. auto.
apply IHWflg in EQ.
destruct EQ as [T0 [dsubst' [EQ1 [EQ2 [Wft Wflg']]]]]; subst.
exists T0. exists dsubst'.
split; auto.
split; auto.
split; auto.
apply wf_lgamma_subst_slval; auto.
simpl in H1.
simpl_env in XnD.
simpl in XnD.
rewrite <- subst_tt_fresh in H1; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++E0) in H2.
simpl_env in XnD.
simpl in XnD.
apply wf_typ_strengthening_typ in H2; auto.
simpl_env in XnD. auto.
inversion HeqEE; subst.
exists T. exists dsE.
split; auto.
split; auto.
apply dom_lgamma_subst in Wflg.
destruct Wflg as [J1 [J2 J3]]. assumption.
Qed.
Lemma wf_lgsubst_cons_inv' : forall E x T D dsubst gsubst lgsubst,
wf_lgamma_subst ([(x, bind_typ T)]++E) D dsubst gsubst lgsubst ->
exists e, exists gsubst',
gsubst = [(x, e)] ++ gsubst' /\
gdom_env E [=] dom gsubst' /\
wf_typ E T /\
typing nil nil e (apply_delta_subst_typ dsubst T) /\
wf_lgamma_subst E D dsubst gsubst' lgsubst.
Proof.
intros E x T D dsubst gsubst lgsubst Wflg.
remember ([(x, bind_typ T)]++E) as EE.
generalize dependent E.
generalize dependent x.
generalize dependent T.
induction Wflg; intros; subst.
inversion HeqEE.
inversion HeqEE; subst. clear HeqEE.
exists e. exists gsE.
split; auto.
split; auto.
apply dom_lgamma_subst in Wflg.
decompose [and] Wflg; auto.
assert ([(x0, bind_typ T0)]++E0=[(x0, bind_typ T0)]++E0) as EQ. auto.
apply IHWflg in EQ.
destruct EQ as [e0 [gsubst' [EQ1 [EQ2 [Wft [Typ Wflg']]]]]]; subst.
exists e0. exists gsubst'.
split; auto.
split; auto.
split; auto.
split; auto.
apply wf_lgamma_subst_slval; auto.
rewrite_env (nil ++ [(x0, bind_typ T0)] ++ E0) in H2.
apply wf_typ_strengthening in H2; auto.
apply wf_lgamma_subst__wf_lenv in Wflg.
destruct Wflg; auto.
inversion HeqEE.
Qed.
Lemma dom_gamma_subst : forall E dsubst gsubst,
wf_gamma_subst E dsubst gsubst ->
ddom_env E [=] dom dsubst /\
gdom_env E [=] dom gsubst.
Proof.
intros E dsubst gsubst Wfg.
induction Wfg; simpl_env; simpl.
split; auto.
destruct IHWfg as [J1 J2].
rewrite <- J1. rewrite <- J2.
split; auto. fsetdec.
clear. fsetdec.
destruct IHWfg as [J1 J2].
rewrite <- J1. rewrite <- J2.
split; auto. fsetdec.
clear. fsetdec.
Qed.
Lemma wf_gsubst_cons_inv' : forall E x T dsubst gsubst,
wf_gamma_subst ([(x, bind_typ T)]++E) dsubst gsubst ->
exists e, exists gsubst',
gsubst = [(x, e)] ++ gsubst' /\
gdom_env E [=] dom gsubst' /\
wf_typ E T /\
typing nil nil e (apply_delta_subst_typ dsubst T) /\
wf_gamma_subst E dsubst gsubst'.
Proof.
intros E x T dsubst gsubst Wflg.
remember ([(x, bind_typ T)]++E) as EE.
generalize dependent E.
generalize dependent x.
generalize dependent T.
induction Wflg; intros; subst.
inversion HeqEE.
inversion HeqEE; subst. clear HeqEE.
exists e. exists gsE.
split; auto.
split; auto.
apply dom_gamma_subst in Wflg.
decompose [and] Wflg; auto.
inversion HeqEE.
Qed.
Lemma _from_subst_to_ctx_labs : forall lE E lE' t,
uniq lE ->
wf_typ E t ->
wf_lenv E (lE'++lE) ->
contexting E (lE'++lE) t (gen_ctx_labs lE ctx_hole) E lE' (gen_typ_labs lE t).
Proof.
induction lE; intros E lE' t Uniq Hwft Hwlenv.
simpl_env in *. apply contexting_hole; auto.
destruct a. destruct l. simpl_env in Uniq.
assert (J:=Uniq).
inversion J; subst.
rewrite_env ((lE'++[(a, lbind_typ t0)])++lE) in Hwlenv.
apply IHlE with (t:=t) in Hwlenv; auto.
simpl.
assert (contexting E (lE'++(a, lbind_typ t0)::lE) t (ctx_abs_capture a t0 (close_ec (gen_ctx_labs lE ctx_hole) a)) E lE' (typ_arrow t0 (gen_typ_labs lE t)) =
contexting E (lE'++(a, lbind_typ t0)::lE) t (ctx_abs_capture a t0 (close_ec (gen_ctx_labs lE ctx_hole) a)) E (lenv_remove (a, lbind_typ t0) (lE'++[(a, lbind_typ t0)]++nil)) (typ_arrow t0 (gen_typ_labs lE t))) as EQ.
rewrite lenv_remove_opt.
simpl_env. auto.
apply contexting_regular in Hwlenv.
decompose [and] Hwlenv.
clear - H5. apply uniq_from_wf_lenv in H5. simpl_env. auto.
rewrite EQ. clear EQ.
simpl_env. simpl_env in Hwlenv.
apply contexting_abs_capture; auto.
apply contexting_regular in Hwlenv.
decompose [and] Hwlenv.
clear - H5.
apply wf_typ_from_lbinds_typ with (x:=a) (U:=t0) in H5; auto.
rewrite cv_ec_gen_ctx_labs_hole.
apply contexting_regular in Hwlenv.
decompose [and] Hwlenv.
assert (a `notin` gdom_env E) as anE.
apply dom__gdom.
apply disjoint_wf_lenv in H2.
clear - H2. solve_uniq.
auto.
Qed.
Lemma from_subst_to_ctx_labs : forall E lE t,
wf_lenv E lE ->
wf_typ E t ->
contexting E lE t (gen_ctx_labs lE ctx_hole) E nil (gen_typ_labs lE t).
Proof.
intros E lE t Hwflenv Hwft.
rewrite_env (nil++lE).
apply _from_subst_to_ctx_labs; auto.
simpl_env. apply uniq_from_wf_lenv in Hwflenv; auto.
Qed.
Lemma cv_ec_gen_ctx_abs : forall E C,
cv_ec (gen_ctx_abs E C) [=] gdom_env E `union` cv_ec C.
Proof.
induction E; intro C; simpl.
fsetdec.
destruct a.
destruct b; simpl.
rewrite IHE. fsetdec.
unfold shift_ec.
rewrite <- cv_ec_close_ec.
rewrite IHE. fsetdec.
Qed.
Lemma fv_ec_gen_ctx_labs_hole : forall lE,
fv_ec (gen_ctx_labs lE ctx_hole) [=] {}.
Proof.
induction lE; simpl; auto.
destruct a.
destruct l. simpl.
assert (J := @close_ec_fv_ec_upper (gen_ctx_labs lE ctx_hole) a).
rewrite IHlE in J; auto.
fsetdec.
Qed.
Lemma fv_ec_gen_ctx_tabs_hole : forall E C,
fv_ec C [=] {} ->
fv_ec (gen_ctx_tabs E C) [=] {}.
Proof.
induction E; intros C Heq; simpl; auto.
destruct a.
destruct b; simpl.
apply IHE in Heq. rewrite <- fv_ec_close_tc. assumption.
rewrite IHE; auto.
Qed.
Lemma wf_typ_strengthening_remove_tmvar : forall E2 E1 E3 t,
wf_typ (E1++E2++E3) t ->
wf_typ (E1++remove_tmvar E2 ++ E3) t.
Proof.
induction E2; intros E1 E3 t Wft; simpl.
simpl_env in Wft; assumption.
destruct a.
destruct b; simpl_env in *.
rewrite_env ((E1++[(a, bind_kn)])++E2++E3) in Wft.
apply IHE2 in Wft.
simpl_env in Wft; auto.
apply wf_typ_strengthening in Wft.
apply IHE2 in Wft; auto.
Qed.
Lemma _from_subst_to_ctx_abs : forall E E' lE t t' C,
uniq E ->
fv_ec C [=] {} ->
cv_ec C [=] dom lE ->
contexting (E'++E) lE t C (E'++E) nil t' ->
contexting (E'++E) lE t (gen_ctx_abs E C) (E'++remove_tmvar E) nil (gen_typ_abs E t').
Proof.
induction E; intros E' lE t t' C Uniq Hfvc Hcvc Hctx.
simpl. simpl_env in *. auto.
destruct a. destruct b; simpl.
simpl_env in *.
assert (J:=Uniq).
rewrite_env ((E'++[(a, bind_kn)])++E) in Hctx.
inversion J; subst.
apply IHE in Hctx; simpl_env; auto.
simpl_env in Hctx. auto.
simpl_env in *.
assert (J:=Uniq).
inversion J; subst.
rewrite_env ((E'++[(a, bind_typ t0)])++E) in Hctx.
apply IHE in Hctx; simpl_env; auto.
simpl_env in Hctx.
assert (contexting (E'++[(a, bind_typ t0)]++E) lE t (ctx_iabs a t0 (close_ec (gen_ctx_abs E C) a)) (E'++remove_tmvar E) nil (typ_iarrow t0 (gen_typ_abs E t')) =
contexting (E'++[(a, bind_typ t0)]++E) lE t (ctx_iabs a t0 (close_ec (gen_ctx_abs E C) a)) (env_remove (a, bind_typ t0) (E'++[(a, bind_typ t0)]++remove_tmvar E)) nil (typ_iarrow t0 (gen_typ_abs E t'))) as EQ.
rewrite env_remove_opt.
simpl_env. auto.
apply contexting_regular in Hctx.
decompose [and] Hctx.
apply uniq_from_wf_env in H4. assumption.
rewrite EQ. clear EQ.
apply contexting_iabs_capture; auto.
rewrite env_remove_opt.
apply contexting_regular in Hctx.
decompose [and] Hctx.
apply wf_typ_from_binds_typ with (x:=a) (U:=t0) in H; auto.
apply wf_typ_strengthening in H.
rewrite_env (E'++E++nil) in H.
apply wf_typ_strengthening_remove_tmvar in H.
simpl_env in H; assumption.
apply contexting_regular in Hctx.
decompose [and] Hctx.
apply uniq_from_wf_env in H4. assumption.
rewrite cv_ec_gen_ctx_abs; auto.
apply contexting_regular in Hctx.
decompose [and] Hctx.
assert (a `notin` gdom_env E) as anE.
apply dom__gdom.
apply uniq_from_wf_env in H.
clear - H. solve_uniq.
assert (a `notin` dom lE) as anlE.
clear - H2. apply disjoint_wf_lenv in H2. solve_uniq.
assert (a `notin` cv_ec C) as anC.
rewrite Hcvc. auto.
auto.
Qed.
Lemma from_subst_to_ctx_abs : forall E lE t t' C,
uniq E ->
fv_ec C [=] {} ->
cv_ec C [=] dom lE ->
contexting E lE t C E nil t' ->
contexting E lE t (gen_ctx_abs E C) (remove_tmvar E) nil (gen_typ_abs E t').
Proof.
intros E lE t t' C Hwflg Hfvc Hcvc Hctx.
rewrite_env (nil++E).
rewrite_env (nil++remove_tmvar E).
apply _from_subst_to_ctx_abs; auto.
Qed.
Lemma wf_dsubst_kcons_inv' : forall E X dsubst,
wf_delta_subst ([(X, bind_kn)]++E) dsubst ->
exists T, exists dsubst',
dsubst = [(X, T)] ++ dsubst' /\
ddom_env E [=] dom dsubst' /\
wf_typ nil T /\
wf_delta_subst E dsubst'.
Proof.
intros E X dsubst Wfd.
remember ([(X, bind_kn)]++E) as EE.
generalize dependent E.
generalize dependent X.
induction Wfd; intros; subst.
inversion HeqEE.
inversion HeqEE; subst.
exists T. exists SE.
split; auto.
split; auto.
apply dom_delta_subst in Wfd.
assumption.
inversion HeqEE.
Qed.
Lemma remove_tmvar_app : forall E E',
remove_tmvar (E ++ E') = remove_tmvar E ++ remove_tmvar E'.
Proof.
induction E; intros E'; simpl; auto.
destruct a.
destruct b; auto.
rewrite IHE. auto.
Qed.
Lemma fv_tc_gen_ctx_labs_hole : forall lE,
fv_tc (gen_ctx_labs lE ctx_hole) [=] ftv_lenv lE.
Proof.
induction lE; simpl; auto.
destruct a.
destruct l. simpl.
rewrite <- fv_tc_close_ec. rewrite IHlE. fsetdec.
Qed.
Lemma typing_fv_te_upper : forall E lE e t,
typing E lE e t ->
fv_te e [<=] dom E.
Proof.
intros E lE e t Typing.
(typing_cases (induction Typing) Case); intros; subst; simpl.
Case "typing_var".
apply binds_In in H0.
fsetdec.
Case "typing_lvar".
fsetdec.
Case "typing_abs".
pick fresh x.
assert (x `notin` L) as xnL. auto.
apply H1 in xnL.
assert (J:=@fv_te_open_ee_eq e1 x).
rewrite J in xnL.
assert (J':=@wft_fv_tt_sub E T1 H).
destruct_notin.
clear - J' xnL NotInTac. simpl in xnL. fsetdec.
Case "typing_app".
fsetdec.
Case "typing_tabs".
pick fresh X.
assert (X `notin` L) as XnL. auto.
apply H0 in XnL.
assert (J:=@open_te_fv_te_lower e1 X).
simpl in XnL.
assert (fv_te e1 [<=] add X (dom E)) as JJ.
clear - XnL J. fsetdec.
destruct_notin.
clear - JJ NotInTac.
fsetdec.
Case "typing_tapp".
assert (J':=@wft_fv_tt_sub E T H).
fsetdec.
Case "typing_bang". auto.
Case "typing_let".
pick fresh x.
assert (x `notin` L) as xnL. auto.
apply H0 in xnL.
assert (J:=@fv_te_open_ee_eq e2 x).
rewrite J in xnL.
simpl_env in xnL. destruct_notin.
clear - IHTyping J xnL NotInTac0. fsetdec.
Case "typing_apair". fsetdec.
Case "typing_fst". auto.
Case "typing_snd". auto.
Qed.
Lemma fv_te_shift_ee_rec : forall e m k,
fv_te e [=]fv_te (shift_ee_rec m k e).
Proof.
induction e; intros m k; simpl; auto.
destruct (le_lt_dec k n); auto.
rewrite <- IHe. fsetdec.
rewrite <- IHe. fsetdec.
Qed.
Lemma fv_te_shift_ee : forall e,
fv_te e [=]fv_te (shift_ee e).
Proof.
intros e.
unfold shift_ee.
apply fv_te_shift_ee_rec.
Qed.
Lemma fv_tc_shift_ec_rec : forall C m b,
fv_tc C [=]fv_tc (shift_ec_rec m b C).
Proof.
induction C; intros m b; simpl;
try solve [
auto |
rewrite <- IHC; fsetdec |
rewrite <- IHC;
rewrite <- fv_te_shift_ee_rec;
fsetdec
].
Qed.
Lemma fv_tc_shift_ec : forall C,
fv_tc C [=] fv_tc (shift_ec C).
Proof.
intros C x.
unfold shift_ec.
apply fv_tc_shift_ec_rec.
Qed.
Lemma fv_tc_gen_ctx_abs_hole : forall E C,
fv_tc (gen_ctx_abs E C) [=] ftv_env E `union` fv_tc C.
Proof.
induction E; intros C; simpl.
fsetdec.
destruct a.
destruct b; simpl.
rewrite IHE. fsetdec.
rewrite <- fv_tc_close_ec. rewrite IHE. fsetdec.
Qed.
Lemma cv_tc_gen_ctx_labs : forall lE,
cv_tc (gen_ctx_labs lE ctx_hole) [=] {}.
Proof.
induction lE; simpl.
fsetdec.
destruct a.
destruct l; simpl.
rewrite <- cv_tc_close_ec. rewrite IHlE; auto.
Qed.
Lemma cv_tc_shift_ec_rec : forall C m b,
cv_tc C [=] cv_tc (shift_ec_rec m b C).
Proof.
induction C; intros m b; simpl; auto.
Qed.
Lemma cv_tc_shift_ec : forall C,
cv_tc C [=] cv_tc (shift_ec C).
Proof.
intros C.
unfold shift_ec.
apply cv_tc_shift_ec_rec.
Qed.
Lemma cv_tc_gen_ctx_abs : forall E C,
cv_tc C [=] {} ->
cv_tc (gen_ctx_abs E C) [=] {}.
Proof.
induction E; intros C Heq; simpl.
fsetdec.
destruct a.
destruct b; simpl.
rewrite IHE; auto.
rewrite <- cv_tc_close_ec. rewrite IHE; auto.
Qed.
Lemma cv_tc_gen_ctx_tabs : forall E C,
cv_tc (gen_ctx_tabs E C) [=] ddom_env E `union` cv_tc C.
Proof.
induction E; intros C; simpl.
fsetdec.
destruct a.
destruct b; simpl.
assert (J:=@IHE C).
assert (J':=@cv_tc_close_tc (gen_ctx_tabs E C) a).
rewrite J in J'.
rewrite <- J'. fsetdec.
rewrite IHE. fsetdec.
Qed.
Lemma wfe_remove_tmvar : forall E,
wf_env E ->
wf_env (remove_tmvar E).
Proof.
induction E; intro Wfe; simpl; auto.
destruct a.
destruct b; simpl_env in *.
inversion Wfe; subst.
apply notin_remove_tmvar_dom in H2.
apply IHE in H1; auto.
inversion Wfe; subst.
apply notin_remove_tmvar_dom in H4.
apply IHE in H2; auto.
Qed.
Lemma wf_env_strengthening_nilgdom : forall E2 E1 E3,
wf_env (E1++E2++E3) ->
gdom_env E1 [=] {} ->
wf_env (E1++E3).
Proof.
induction E2; intros E1 E3 Wfe Gdom; simpl.
simpl_env in Wfe; assumption.
destruct a.
destruct b; simpl_env in *.
rewrite_env ((E1++[(a, bind_kn)])++E2++E3) in Wfe.
apply IHE2 in Wfe.
simpl_env in Wfe.
apply wf_env_strengthening_typ in Wfe; auto.
rewrite fv_env_is_ddom_env; auto.
apply uniq_from_wf_env in Wfe.
apply dom__ddom.
clear - Wfe. solve_uniq.
simpl_env. rewrite Gdom. simpl. fsetdec.
apply wf_env_strengthening in Wfe.
apply IHE2 in Wfe; auto.
Qed.
Lemma gdom_env_remove_tmvar : forall E,
gdom_env (remove_tmvar E) [=] {}.
Proof.
induction E; simpl; auto.
destruct a.
destruct b; simpl; auto.
Qed.
Lemma cv_ec_gen_ctx_tabs : forall E C,
cv_ec (gen_ctx_tabs E C) [=] ddom_env E `union` cv_ec C.
Proof.
induction E; intro C; simpl.
fsetdec.
destruct a.
destruct b; simpl.
rewrite <- cv_ec_close_tc.
rewrite IHE. fsetdec.
rewrite IHE. fsetdec.
Qed.
Lemma wf_typ_Two :
wf_typ nil Two.
Proof.
unfold Two.
apply wf_typ_all with (L:={}).
intros X Xn.
unfold open_tt. simpl.
apply wf_typ_arrow; auto.
Qed.
Lemma _from_subst_to_ctx_tabs : forall E E' lE t t' C,
uniq E ->
cv_tc C [=] {} ->
cv_ec C [=] gdom_env (E'++E) `union` dom lE ->
contexting (E'++E) lE t C (remove_tmvar (E'++E)) nil t' ->
contexting (E'++E) lE t (gen_ctx_tabs E C) (remove_tmvar E') nil (gen_typ_tabs E t').
Proof.
induction E; intros E' lE t t' C Uniq Hctc Hcec Hctx.
simpl. simpl_env in *. auto.
destruct a. destruct b; simpl in *.
simpl_env in *.
assert (J:=Uniq).
inversion J; subst.
rewrite_env ((E'++[(a, bind_kn)])++E) in Hctx.
apply IHE with (t:=t) in Hctx; simpl_env; auto.
simpl_env in Hctx.
assert (contexting (E'++[(a, bind_kn)]++E) lE t (ctx_tabs_capture a (close_tc (gen_ctx_tabs E C) a)) (remove_tmvar E') nil (typ_all (close_tt (gen_typ_tabs E t') a)) =
contexting (E'++[(a, bind_kn)]++E) lE t (ctx_tabs_capture a (close_tc (gen_ctx_tabs E C) a)) (env_remove (a, bind_kn) (remove_tmvar (E'++[(a, bind_kn)])++nil)) nil (typ_all (close_tt (gen_typ_tabs E t') a))) as EQ.
rewrite remove_tmvar_app. simpl. simpl_env.
rewrite_env (remove_tmvar E'++[(a, bind_kn)]++nil).
rewrite env_remove_opt.
simpl_env. auto.
apply contexting_regular in Hctx.
decompose [and] Hctx.
apply uniq_from_wf_env in H4. simpl_env.
rewrite remove_tmvar_app in H4. simpl. assumption.
rewrite EQ. clear EQ.
simpl_env.
apply contexting_tabs_capture; auto.
rewrite remove_tmvar_app; simpl; auto.
rewrite cv_ec_gen_ctx_tabs.
simpl in Hcec. rewrite Hcec.
rewrite dom__ddom_gdom in H3.
assert (a `notin` dom lE) as J1.
apply contexting_regular in Hctx. destruct Hctx as [_ [JJ _]].
clear - JJ. apply disjoint_wf_lenv in JJ. solve_uniq.
assert (a `notin` dom E') as J2.
apply contexting_regular in Hctx. destruct Hctx as [JJ _].
clear - JJ. apply uniq_from_wf_env in JJ. solve_uniq.
apply dom__gdom in J2.
clear - H3 J1 J2. fsetdec.
apply contexting_regular in Hctx.
decompose [and] Hctx.
rewrite remove_tmvar_app. simpl.
rewrite_env (remove_tmvar E'++[(a, bind_kn)]++nil).
rewrite env_remove_opt.
simpl_env. apply wf_lenv_empty.
apply wfe_remove_tmvar in H.
rewrite remove_tmvar_app in H.
rewrite remove_tmvar_app in H.
simpl in H. simpl_env in H.
rewrite_env (remove_tmvar E'++([(a, bind_kn)]++remove_tmvar E)++nil) in H.
apply wf_env_strengthening_nilgdom in H.
simpl_env in H. assumption.
apply gdom_env_remove_tmvar.
apply uniq_from_wf_env in H4.
rewrite remove_tmvar_app in H4. simpl in *.
assumption.
simpl_env in *.
inversion Uniq; subst.
rewrite_env ((E'++[(a, bind_typ t0)])++E) in Hctx.
apply IHE in Hctx; simpl_env; auto.
rewrite remove_tmvar_app in Hctx.
simpl in Hctx. simpl_env in Hctx. auto.
Qed.
Lemma from_subst_to_ctx_tabs : forall E lE t t' C,
uniq E ->
cv_tc C [=] {} ->
cv_ec C [=] gdom_env E `union` dom lE ->
contexting E lE t C (remove_tmvar E) nil t' ->
contexting E lE t (gen_ctx_tabs E C) nil nil (gen_typ_tabs E t').
Proof.
intros E lE t t' C Uniq Hctc Hctx.
rewrite_env (nil++E).
apply _from_subst_to_ctx_tabs; auto.
Qed.
Lemma gen_typ_tabs_app : forall E E' t,
gen_typ_tabs (E ++ E') t = gen_typ_tabs E (gen_typ_tabs E' t).
Proof.
induction E; intros E' t; simpl; auto.
destruct a.
destruct b; auto.
rewrite IHE. auto.
Qed.
Lemma gen_typ_tabs_id : forall E t,
ddom_env E [=] {} ->
gen_typ_tabs E t = t.
Proof.
induction E; intros t Heq; simpl; auto.
destruct a.
destruct b; auto.
simpl in Heq.
assert (a `in` {}) as FALSE.
rewrite <- Heq.
auto.
contradict FALSE; auto.
Qed.
Lemma wf_dsubst_nil_dsubst : forall E,
wf_delta_subst E nil ->
ddom_env E [=] {}.
Proof.
induction E; intros Wfd; auto.
destruct a.
destruct b; simpl.
simpl_env in Wfd.
inversion Wfd; subst.
simpl_env in Wfd.
inversion Wfd; subst.
auto.
Qed.
Lemma ddom_env_remove_tmvar : forall E,
ddom_env (remove_tmvar E) [=] ddom_env E.
Proof.
induction E; simpl; auto.
destruct a.
destruct b; simpl; auto.
Qed.
Lemma wf_dsubst_dapp_inv : forall EE dsubst' dsubst,
wf_delta_subst EE (dsubst'++dsubst) ->
gdom_env EE [=] {} ->
exists E', exists E,
EE = E' ++ E /\
ddom_env E [=] dom dsubst /\
ddom_env E' [=] dom dsubst' /\
wf_delta_subst E dsubst /\
wf_delta_subst E' dsubst'.
Proof.
intros EE dsubst' dsubst Wfd Dom.
remember (dsubst'++dsubst) as dsE.
generalize dependent dsubst'.
generalize dependent dsubst.
induction Wfd; intros; subst.
symmetry in HeqdsE.
apply app_eq_nil in HeqdsE.
destruct HeqdsE; subst.
exists nil. exists nil. simpl. auto.
apply one_eq_app in HeqdsE.
destruct HeqdsE as [[dsE'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
assert (dsE''++dsubst=dsE''++dsubst) as EQ. auto.
apply IHWfd in EQ.
destruct EQ as [E1 [E2 [EQ1 [EQ2 [EQ3 [Hwfd1 Hwfd2]]]]]]; subst.
exists ([(X, bind_kn)]++E1). exists E2.
simpl. split; auto. split; auto.
split. rewrite EQ3. clear. fsetdec.
split; auto.
simpl_env.
apply wf_delta_subst_styp; auto.
simpl in Dom. assumption.
assert (SE=nil++SE) as EQ. auto.
apply IHWfd in EQ.
destruct EQ as [E1 [E2 [EQ1 [EQ2 [EQ3 [Hwfd1 Hwfd2]]]]]]; subst.
exists nil. exists ([(X, bind_kn)]++E1++E2).
simpl. split; auto.
split. simpl_env. rewrite EQ2. rewrite EQ3. clear. fsetdec.
split; auto.
split; auto.
simpl_env.
apply wf_delta_subst_styp; auto.
simpl in Dom. assumption.
simpl in Dom.
assert (x `in` {}) as FALSE.
rewrite <- Dom.
auto.
contradict FALSE; auto.
Qed.
Lemma remove_tmvar_id : forall E,
gdom_env E [=] {} ->
remove_tmvar E = E.
Proof.
induction E; intros; simpl; auto.
destruct a.
destruct b; simpl in *.
rewrite IHE; auto.
assert (a `in` {}) as FALSE.
rewrite <- H.
auto.
contradict FALSE; auto.
Qed.
Lemma remove_tmvar_app_inv : forall E E1 E2,
remove_tmvar E = E1 ++ E2 ->
exists E1', exists E2',
E = E1' ++ E2' /\
remove_tmvar E1' = E1 /\
remove_tmvar E2' = E2.
Proof.
induction E; intros E1 E2 H.
simpl in H.
symmetry in H.
apply app_eq_nil in H.
destruct H as [J1 J2]; subst.
exists nil. exists nil. split; auto.
simpl in H.
destruct a.
destruct b.
simpl_env in H.
apply one_eq_app in H. destruct H as [[E'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
apply IHE in EQ2.
destruct EQ2 as [E1' [E2' [EQ1 [H1 H2]]]]; subst.
exists ([(a, bind_kn)]++E1'). exists E2'.
split; auto.
exists nil. exists ((a, bind_kn)::E).
split; auto.
apply IHE in H.
destruct H as [E1' [E2' [EQ1 [H1 H2]]]]; subst.
simpl_env.
exists ([(a, bind_typ t)]++E1'). exists E2'.
split; auto.
Qed.
Lemma gen_typ_tabs_opt : forall E1 X E2 t,
uniq (E1++[(X, bind_kn)]++E2) ->
ddom_env (E1++[(X, bind_kn)]++E2) [=] {{X}} ->
gen_typ_tabs (E1++[(X, bind_kn)]++E2) t = typ_all (close_tt t X).
Proof.
induction E1; intros x E2 t Uniq Hdom.
simpl in *.
simpl_env in Uniq. inversion Uniq; subst.
rewrite gen_typ_tabs_id; auto.
clear - Hdom H3. apply dom__ddom in H3. fsetdec.
destruct a.
destruct b; simpl in *.
simpl_env in Hdom.
assert (a <> x) as anx.
simpl_env in Uniq. clear - Uniq. solve_uniq.
assert (a `in` {{x}}) as aisx.
rewrite <- Hdom.
auto.
contradict aisx; auto.
inversion Uniq; subst.
apply IHE1 with (t:=t) in H1; auto.
Qed.
Lemma wf_dsubst_single_inv : forall E X d,
wf_delta_subst E [(X, d)] ->
exists E1, exists E2,
E = E1 ++ [(X, bind_kn)] ++ E2 /\
wf_typ nil d /\ ddom_env E1 [=] {} /\ ddom_env E2 [=] {}.
Proof.
induction E; intros X d Hwfd.
inversion Hwfd.
destruct a.
destruct b; simpl_env in *.
inversion Hwfd; subst.
exists nil. exists E. split; auto.
split; auto. simpl. split; auto.
apply wf_dsubst_nil_dsubst in H3; auto.
inversion Hwfd; subst.
apply IHE in H2.
destruct H2 as [E1 [E2 [J1 [J2 [J3 J4]]]]]; subst.
exists ([(a, bind_typ t)]++E1). exists E2.
split; auto.
Qed.
Lemma remove_tmvar_single_inv : forall E a,
remove_tmvar E = [(a, bind_kn)] ->
exists E1, exists E2,
E = E1 ++ [(a, bind_kn)] ++ E2 /\ remove_tmvar E1 = nil /\ remove_tmvar E2 = nil.
Proof.
induction E; intros X H.
simpl in H.
inversion H.
destruct a.
destruct b; simpl in *; simpl_env in *.
inversion H; subst.
exists nil. exists E. auto.
apply IHE in H.
destruct H as [E1 [E2 [J1 [J2 J3]]]]; subst.
exists ([(a, bind_typ t)]++E1). exists E2.
split; auto.
Qed.
Inductive wf_dsubst : delta_subst -> Prop :=
| wf_dsubst_empty :
wf_dsubst delta_nil
| wf_dsubst_styp : forall SE X T,
wf_dsubst SE -> X `notin` dom SE -> wf_typ nil T ->
wf_dsubst ([(X, T)] ++ SE)
.
Tactic Notation "wf_dsubst_cases" tactic(first) tactic(c) :=
first;
[ c "wf_dsubst_empty" |
c "wf_dsubst_styp"].
Hint Constructors wf_dsubst.
Lemma dsubst_nil_typ : forall dsubst X t dsubst',
wf_dsubst (dsubst'++[(X, t)]++dsubst) ->
wf_typ nil t.
Proof.
intros dsubst X t dsubst' Hwf_dsubst.
remember (dsubst'++[(X, t)]++dsubst) as Dsubst.
generalize dependent dsubst'.
(wf_dsubst_cases (induction Hwf_dsubst) Case);
intros dsubst' HeqDsubst.
Case "wf_dsubst_empty".
contradict HeqDsubst; auto.
Case "wf_dsubst_styp".
destruct (one_eq_app _ _ _ _ _ HeqDsubst) as [[dsubst'' [DEQ1 DEQ2]] | [DEQ1 DEQ2]]; subst.
SCase "exists DS'',DS'=DS&X0'' /\ DS0=DS&X&DS'' ".
assert (J:=@IHHwf_dsubst dsubst''); auto.
SCase "DS'=nil /\ DS&X = DS0&X0 ".
inversion DEQ2. subst.
exact H0.
Qed.
Lemma dsubst_opt : forall dsubst X t t' dsubst',
wf_dsubst (dsubst'++[(X, t)]++dsubst) ->
apply_delta_subst_typ (dsubst'++[(X, t)]++dsubst) t' =
apply_delta_subst_typ (dsubst'++dsubst) (subst_tt X t t').
Proof.
intros dsubst X t t' dsubst' Hwf_dsubst.
remember (dsubst'++[(X, t)]++dsubst) as Dsubst.
generalize dependent t'.
generalize dependent dsubst'.
(wf_dsubst_cases (induction Hwf_dsubst) Case);
intros dsubst' HeqDsubst t'.
Case "wf_dsubst_empty".
contradict HeqDsubst; auto.
Case "wf_dsubst_styp".
destruct (one_eq_app _ _ _ _ _ HeqDsubst) as [[dsubst'' [DEQ1 DEQ2]] | [DEQ1 DEQ2]]; subst.
SCase "exists DS'',DS'=DS&X0'' /\ DS0=DS&X&DS'' ".
simpl. simpl_env.
rewrite <- subst_tt_commute; auto.
eauto using notin_fv_wf.
apply dsubst_nil_typ in Hwf_dsubst.
eauto using notin_fv_wf.
SCase "DS'=nil /\ DS&X = DS0&X0 ".
inversion DEQ2. subst.
simpl_env in *. destruct_notin.
auto.
Qed.
Ltac tac_wfd_fresh_X :=
match goal with
| H: wf_delta_subst (?E ++ [(?X, _)]) (?dsubst ++ [(?X, _)]) |- _ =>
apply wf_delta_subst__uniq in H;
destruct H as [J1 [J2 J3]];
solve_uniq
| H: wf_delta_subst (?E' ++ [(?X, _)] ++ ?E) (?dsubst' ++ [(?X, _)] ++ ?dsubst) |- _ =>
apply wf_delta_subst__uniq in H;
destruct H as [J1 [J2 J3]];
solve_uniq
end.
Lemma ddom_dom__inv' : forall A (sE : list (atom * A)) E X b,
ddom_env (E++[(X, bind_kn)]) [=] dom (sE++[(X, b)]) ->
X `notin` ddom_env E ->
X `notin` dom sE ->
ddom_env E [=] dom sE.
Proof.
intros.
simpl_env in *.
fsetdec.
Qed.
Lemma dsubst_head_opt : forall dsubst X t t',
wf_dsubst (dsubst++[(X, t)]) ->
apply_delta_subst_typ (dsubst++[(X, t)]) t' =
apply_delta_subst_typ dsubst (subst_tt X t t').
Proof.
intros dsubst X t t' Hwfd.
rewrite_env (dsubst++nil).
rewrite_env (dsubst++[(X, t)]++nil).
apply dsubst_opt; auto.
Qed.
Lemma wf_delta_subst__wf_dsubst : forall E dsubst,
wf_delta_subst E dsubst ->
wf_dsubst dsubst.
Proof.
intros E dsubst Hwfd.
(wf_delta_subst_cases (induction Hwfd) Case); auto.
Case "wf_delta_subst_styp".
apply wf_dsubst_styp; auto.
apply dom_delta_subst in Hwfd.
rewrite <- Hwfd.
apply dom__ddom; auto.
Qed.
Lemma wf_dsubst_styp_rev : forall SE X T,
wf_dsubst SE -> X `notin` dom SE -> wf_typ nil T ->
wf_dsubst (SE++[(X, T)]).
Proof.
intros SE X T Wfd.
generalize dependent X.
generalize dependent T.
induction Wfd; intros; auto.
rewrite_env ([(X, T)]++delta_nil).
apply wf_dsubst_styp; auto.
simpl_env.
apply wf_dsubst_styp; auto.
Qed.
Lemma dom_rev : forall A (E : @list (atom*A)),
dom E [=] dom (rev E).
Proof.
intros A E.
induction E; simpl; auto.
destruct a. simpl_env.
rewrite IHE. fsetdec.
Qed.
Lemma wf_dsubst_rev : forall E,
wf_dsubst E ->
wf_dsubst (rev E).
Proof.
intros E Wfd.
induction Wfd; intros; auto.
simpl. simpl_env.
apply wf_dsubst_styp_rev; auto.
rewrite <- dom_rev; auto.
Qed.
Lemma gen_typ_tabs_type : forall E t,
type t ->
type (gen_typ_tabs E t).
Proof.
induction E; intros t Htype; simpl; auto.
destruct a.
destruct b; auto.
apply type_all with (L:={{a}}).
intros X FrX.
rewrite close_open_tt__subst_tt; auto.
Qed.
Lemma type_preserved_under_dsubst: forall dsubst t,
wf_dsubst dsubst ->
type t ->
type (apply_delta_subst_typ dsubst t).
Proof.
intros dsubst t Hwfd Ht.
generalize dependent t.
induction Hwfd; intros; simpl; auto.
apply IHHwfd.
apply subst_tt_type; eauto using type_from_wf_typ.
Qed.
Lemma gen_typ_tabs_subst_tt_commute : forall E X T t,
X `notin` dom E ->
wf_typ nil T ->
gen_typ_tabs E (subst_tt X T t) = subst_tt X T (gen_typ_tabs E t).
Proof.
induction E; intros X T t HC Wft; simpl; auto.
destruct a.
destruct b; simpl.
rewrite IHE; auto.
rewrite subst_tt_close_tt; auto.
apply notin_fv_wf with (X:=a) in Wft; auto.
rewrite IHE; auto.
Qed.
Lemma dsubst_permut : forall dsubst X T t,
wf_dsubst dsubst ->
X `notin` dom dsubst -> wf_typ nil T ->
apply_delta_subst_typ dsubst (subst_tt X T t) = subst_tt X T (apply_delta_subst_typ dsubst t).
Proof.
intros dsubst X T t Hwfd Fr Hwft.
generalize dependent t.
induction Hwfd; intros; simpl; eauto.
simpl_env in *.
rewrite <- subst_tt_commute; eauto using notin_fv_wf.
Qed.
Lemma in_remove_tmvar_dom : forall x E,
x `in` dom (remove_tmvar E) ->
x `in` dom E.
Proof.
intros x.
induction E; intros; simpl; auto.
destruct a.
simpl_env in H.
destruct_notin.
destruct b; simpl in *; auto.
assert (x `in` {{a}} \/ x `in` dom (remove_tmvar E)) as J. fsetdec.
destruct J as [J | J]; fsetdec.
Qed.
Lemma _from_subst_to_ctx_tapp : forall dsubst' E' E lE t t' C,
wf_delta_subst (remove_tmvar E') (rev dsubst') ->
type t' ->
contexting (E'++E) lE t C nil nil (gen_typ_tabs (E'++E) t') ->
contexting (E'++E) lE t (gen_ctx_tapp dsubst' C) nil nil (gen_typ_tabs E (apply_delta_subst_typ dsubst' t')).
Proof.
induction dsubst'; intros E' E lE t t' C Hwfd Htype Hctx; simpl.
simpl in Hwfd.
apply wf_dsubst_nil_dsubst in Hwfd.
rewrite gen_typ_tabs_app in Hctx.
rewrite gen_typ_tabs_id in Hctx; auto.
rewrite ddom_env_remove_tmvar in Hwfd; auto.
destruct a. simpl in Hwfd. simpl_env in Hwfd.
apply wf_dsubst_dapp_inv in Hwfd; auto using gdom_env_remove_tmvar.
destruct Hwfd as [E1 [E2 [EQ1 [EQ2 [EQ3 [Hwfd1 Hwfd2]]]]]]; subst.
apply remove_tmvar_app_inv in EQ1.
destruct EQ1 as [E1' [E2' [EQ' [EQ2' EQ3']]]]; subst.
rewrite_env (E1'++(E2'++E)) in Hctx.
apply IHdsubst' in Hctx; auto.
rewrite gen_typ_tabs_app in Hctx.
assert (gen_typ_tabs E (apply_delta_subst_typ dsubst' (subst_tt a d t')) =
open_tt (close_tt (gen_typ_tabs E (apply_delta_subst_typ dsubst' t')) a) d) as EQ.
rewrite close_open_tt__subst_tt; auto.
assert (J:=Hwfd1).
apply wf_dsubst_single_inv in J.
destruct J as [E1 [E2 [J1 [J2 [J3 J4]]]]]; subst.
rewrite <- gen_typ_tabs_subst_tt_commute; auto.
rewrite dsubst_permut; auto.
apply wf_delta_subst__wf_dsubst in Hwfd2.
apply wf_dsubst_rev in Hwfd2.
rewrite <- rev_involutive; auto.
rewrite dom_rev.
rewrite <- EQ3.
apply dom__ddom.
apply notin_remove_tmvar_dom.
assert (a `in` dom E2') as J.
apply in_remove_tmvar_dom.
rewrite J1. simpl_env. auto.
apply contexting_regular in Hctx.
destruct Hctx as [J' _].
apply uniq_from_wf_env in J'.
clear - J' J. solve_uniq.
assert (a `in` dom E2') as J.
apply in_remove_tmvar_dom.
rewrite J1. simpl_env. auto.
apply contexting_regular in Hctx.
destruct Hctx as [J' _].
apply uniq_from_wf_env in J'.
clear - J' J. solve_uniq.
apply gen_typ_tabs_type.
apply type_preserved_under_dsubst; auto.
apply wf_delta_subst__wf_dsubst in Hwfd2.
apply wf_dsubst_rev in Hwfd2.
rewrite <- rev_involutive; auto.
rewrite EQ. clear EQ.
assert (J:=Hwfd1).
apply wf_dsubst_single_inv in J.
destruct J as [E1 [E2 [EQ4 [Hwft [JJ1 JJ2]]]]]; subst.
apply contexting_tapp; auto.
assert (exists E21', exists E22',
E2' = E21' ++ [(a, bind_kn)]++E22' /\
remove_tmvar E21' = E1 /\
remove_tmvar E22' = E2
) as EQ.
clear - EQ4.
assert (J:=EQ4).
apply remove_tmvar_app_inv in EQ4.
destruct EQ4 as [F1 [F2 [J1 [J2 J3]]]].
apply remove_tmvar_app_inv in J3.
destruct J3 as [F3 [F4 [J4 [J5 J6]]]].
subst.
apply remove_tmvar_single_inv in J5.
destruct J5 as [F5 [F6 [J1 [J2 J3]]]]; subst.
exists (F1++F5). exists (F6++F4).
simpl_env. split; auto.
rewrite remove_tmvar_app.
rewrite remove_tmvar_app.
rewrite J3. rewrite J2.
split; auto.
destruct EQ as [E21' [E22' [EQ5 [EQ6 EQ7]]]]; subst.
rewrite gen_typ_tabs_opt in Hctx; auto.
simpl_env in Hctx; simpl_env; auto.
apply contexting_regular in Hctx.
decompose [and] Hctx.
apply uniq_from_wf_env in H.
clear - H. solve_uniq.
rewrite EQ4 in Hwfd1. clear - Hwfd1 EQ4.
apply wf_dsubst_single_inv in Hwfd1.
destruct Hwfd1 as [E1 [E2 [J1 [J2 [J3 J4]]]]]; subst.
rewrite J1 in EQ4.
rewrite <- ddom_env_remove_tmvar.
rewrite EQ4.
simpl_env. rewrite J3. rewrite J4. fsetdec.
Qed.
Lemma from_subst_to_ctx_tapp : forall E lE dsubst t t' C,
wf_delta_subst (remove_tmvar E) (rev dsubst) ->
type t' ->
contexting E lE t C nil nil (gen_typ_tabs E t') ->
contexting E lE t (gen_ctx_tapp dsubst C) nil nil (apply_delta_subst_typ dsubst t').
Proof.
intros E lE dsubst t t' C Hwfd Htype Hctx.
rewrite_env (E++nil) in Hctx.
apply _from_subst_to_ctx_tapp with (dsubst':=dsubst) in Hctx; simpl in Hctx; simpl_env in Hctx; auto.
Qed.
Lemma wf_gsubst_nil_gsubst : forall E dsubst,
wf_gamma_subst E dsubst nil->
gdom_env E [=] {}.
Proof.
induction E; intros dsubst Wfg; auto.
destruct a.
destruct b; simpl.
simpl_env in Wfg.
inversion Wfg; subst; eauto.
simpl_env in Wfg.
inversion Wfg; subst.
Qed.
Lemma gdom_env_remove_typvar : forall E,
gdom_env (remove_typvar E) [=] gdom_env E.
Proof.
induction E; simpl; auto.
destruct a.
destruct b; simpl; auto.
Qed.
Lemma wf_gamma_subst__nfv : forall E dsubst gsubst x,
wf_gamma_subst E dsubst gsubst ->
x `notin` dom E ->
x `notin` dom dsubst /\ x `notin` dom gsubst /\ x `notin` fv_env E.
intros E dsubst gsubst x Hwfg Hfv.
induction Hwfg; intros; auto.
apply notin_fv_wf with (X:=x) in H1; simpl; auto.
apply notin_fv_wf with (X:=x) in H0; simpl; auto.
Qed.
Lemma wf_gamma_subst__uniq : forall E dsubst gsubst,
wf_gamma_subst E dsubst gsubst ->
uniq gsubst /\ uniq dsubst /\ uniq E.
Proof.
intros.
induction H; auto.
decompose [and] IHwf_gamma_subst.
split.
apply uniq_push; auto.
apply wf_gamma_subst__nfv with (x:=x) in H; auto.
split; auto.
apply wf_gamma_subst__nfv with (x:=X) in H; auto.
decompose [and] IHwf_gamma_subst.
split; auto.
Qed.
Lemma wf_gamma_subst_strengthen_when_nilE : forall E dsubst gsubst,
wf_gamma_subst E dsubst gsubst ->
ddom_env E [=] {} ->
wf_gamma_subst nil dsubst nil.
Proof.
intros E dsubst gsubst Hwfg Heq.
induction Hwfg; auto.
simpl in Heq.
assert (X `in` {}) as FALSE.
rewrite <- Heq.
auto.
contradict FALSE; auto.
Qed.
Lemma wf_gsubst_gapp_inv : forall EE gsubst' gsubst,
wf_gamma_subst EE nil (gsubst'++gsubst) ->
ddom_env EE [=] {} ->
exists E', exists E,
EE = E' ++ E /\
gdom_env E [=] dom gsubst /\
gdom_env E' [=] dom gsubst' /\
wf_gamma_subst E nil gsubst /\
wf_gamma_subst E' nil gsubst'.
Proof.
intros EE gsubst' gsubst Wfg Dom.
remember (gsubst'++gsubst) as gsE.
generalize dependent gsubst'.
generalize dependent gsubst.
induction Wfg; intros; subst.
symmetry in HeqgsE.
apply app_eq_nil in HeqgsE.
destruct HeqgsE as [J1 J2]; subst.
exists nil. exists nil. split; auto.
apply one_eq_app in HeqgsE. destruct HeqgsE as [[gsE'' [gsEQ1 gsEQ2]] | [gsEQ1 gsEQ2]]; subst.
assert (gsE''++gsubst = gsE''++gsubst) as EQ. auto.
simpl_env in Dom.
apply IHWfg in EQ.
destruct EQ as [E1 [E2 [EQ1 [EQ2 [EQ3 [Wfg1 Wfg2]]]]]]; subst.
exists ([(x, bind_typ T)]++E1). exists E2.
split; auto.
split; auto.
split; auto.
simpl. rewrite EQ3. fsetdec.
split; auto.
simpl_env. apply wf_gamma_subst_sval; auto.
rewrite_env ((E1++E2)++nil) in H1.
apply wft_strengthen_ex in H1; auto.
rewrite_env (nil++E1++nil).
apply wf_typ_weakening; auto.
apply wf_gamma_subst__uniq in Wfg2.
decompose [and] Wfg2.
simpl_env. auto.
clear - Dom. fsetdec.
clear - Dom. fsetdec.
exists nil. exists ([(x, bind_typ T)]++E).
split; auto.
split.
simpl_env. apply dom_gamma_subst in Wfg.
destruct Wfg. rewrite H3. fsetdec.
split; auto.
split.
simpl_env. apply wf_gamma_subst_sval; auto.
apply wf_gamma_subst_strengthen_when_nilE in Wfg; auto.
simpl in Dom.
assert (X `in` {}) as FALSE.
rewrite <- Dom.
auto.
contradict FALSE; auto.
Qed.
Lemma ddom_env_remove_typvar : forall E,
ddom_env (remove_typvar E) [=] {}.
Proof.
induction E; simpl; auto.
destruct a.
destruct b; simpl; auto.
Qed.
Lemma remove_typvar_id : forall E,
ddom_env E [=] {} ->
remove_typvar E = E.
Proof.
induction E; intros; simpl; auto.
destruct a.
destruct b; simpl in *.
assert (a `in` {}) as FALSE.
rewrite <- H.
auto.
contradict FALSE; auto.
rewrite IHE; auto.
Qed.
Lemma remove_typvar_app_inv : forall E E1 E2,
remove_typvar E = E1 ++ E2 ->
exists E1', exists E2',
E = E1' ++ E2' /\
remove_typvar E1' = E1 /\
remove_typvar E2' = E2.
Proof.
induction E; intros E1 E2 H.
simpl in H.
symmetry in H.
apply app_eq_nil in H.
destruct H as [J1 J2]; subst.
exists nil. exists nil. split; auto.
simpl in H.
destruct a.
destruct b.
apply IHE in H.
destruct H as [E1' [E2' [EQ1 [H1 H2]]]]; subst.
simpl_env.
exists ([(a, bind_kn)]++E1'). exists E2'.
split; auto.
simpl_env in H.
apply one_eq_app in H. destruct H as [[E'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
apply IHE in EQ2.
destruct EQ2 as [E1' [E2' [EQ1 [H1 H2]]]]; subst.
exists ([(a, bind_typ t)]++E1'). exists E2'.
split; auto.
exists nil. exists ((a, bind_typ t)::E).
split; auto.
Qed.
Lemma wf_gsubst_single_inv : forall E x v,
wf_gamma_subst E nil [(x, v)] ->
exists E1, exists E2, exists t,
E = E1 ++ [(x, bind_typ t)] ++ E2 /\
typing nil nil v t.
Proof.
induction E; intros x v Hwfg.
inversion Hwfg.
destruct a.
destruct b; simpl_env in *.
inversion Hwfg; subst.
inversion Hwfg; subst.
exists nil. exists E. exists t. split; auto.
Qed.
Lemma apply_delta_subst_env_gdom : forall dsubst E,
gdom_env E [=] gdom_env (apply_delta_subst_env dsubst E).
Proof.
induction E; intros; simpl; auto.
destruct a.
destruct b; simpl; rewrite <- IHE; fsetdec.
Qed.
Lemma apply_delta_subst_env_ddom : forall dsubst E,
ddom_env E [=] ddom_env (apply_delta_subst_env dsubst E).
Proof.
induction E; intros; simpl; auto.
destruct a.
destruct b; simpl; rewrite <- IHE; fsetdec.
Qed.
Lemma apply_delta_subst_env_remove_typvar_commut : forall E dsubst,
apply_delta_subst_env dsubst (remove_typvar E) =
remove_typvar (apply_delta_subst_env dsubst E).
Proof.
induction E; intros dsubst; simpl; auto.
destruct a.
destruct b; simpl; auto.
rewrite IHE; auto.
Qed.
Lemma apply_delta_subst_env_app_inv : forall E dsubst E1 E2,
apply_delta_subst_env dsubst E = E1 ++ E2 ->
exists E1', exists E2',
E = E1' ++ E2' /\
apply_delta_subst_env dsubst E1' = E1 /\
apply_delta_subst_env dsubst E2' = E2.
Proof.
induction E; intros dsubst E1 E2 H.
simpl in H.
symmetry in H.
apply app_eq_nil in H.
destruct H as [J1 J2]; subst.
exists nil. exists nil. split; auto.
simpl in H.
destruct a.
destruct b.
simpl_env in H.
apply one_eq_app in H. destruct H as [[E'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
apply IHE in EQ2.
destruct EQ2 as [E1' [E2' [EQ1 [H1 H2]]]]; subst.
exists ([(a, bind_kn)]++E1'). exists E2'.
split; auto.
exists nil. exists ((a, bind_kn)::E).
split; auto.
simpl_env in H.
apply one_eq_app in H. destruct H as [[E'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
apply IHE in EQ2.
destruct EQ2 as [E1' [E2' [EQ1 [H1 H2]]]]; subst.
exists ([(a, bind_typ t)]++E1'). exists E2'.
split; auto.
exists nil. exists ((a, bind_typ t)::E).
split; auto.
Qed.
Lemma apply_delta_subst_env_app : forall E E' dsubst,
apply_delta_subst_env dsubst (E ++ E') =
apply_delta_subst_env dsubst E ++ apply_delta_subst_env dsubst E'.
Proof.
induction E; intros E' dsubst; simpl; auto.
destruct a.
destruct b; rewrite IHE; auto.
Qed.
Lemma wf_dsubst_app_inv' : forall E E' dsE,
wf_delta_subst (E'++E) dsE ->
gdom_env (E'++E) [=] {} ->
exists dsubst', exists dsubst,
dsE = dsubst' ++ dsubst /\
ddom_env E [=] dom dsubst /\
ddom_env E' [=] dom dsubst' /\
wf_delta_subst E dsubst /\
wf_delta_subst E' dsubst'.
Proof.
intros E E' dsE Wfd Dom.
remember (E'++E) as EE.
generalize dependent E'.
generalize dependent E.
induction Wfd; intros; subst.
symmetry in HeqEE.
apply app_eq_nil in HeqEE.
destruct HeqEE; subst.
exists nil. exists nil. simpl. auto.
apply one_eq_app in HeqEE.
destruct HeqEE as [[E'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
assert (E''++E0=E''++E0) as EQ. auto.
apply IHWfd in EQ.
destruct EQ as [dsubst1 [dsubst2 [EQ1 [EQ2 [EQ3 [Hwfd1 Hwfd2]]]]]]; subst.
exists ([(X, T)]++dsubst1). exists dsubst2.
simpl. split; auto. split; auto.
split. rewrite EQ3. clear. fsetdec.
split; auto.
simpl_env.
apply wf_delta_subst_styp; auto.
simpl in Dom. assumption.
assert (E=nil++E) as EQ. auto.
apply IHWfd in EQ.
destruct EQ as [dsubst1 [dsubst2 [EQ1 [EQ2 [EQ3 [Hwfd1 Hwfd2]]]]]]; subst.
exists nil. exists ([(X, T)]++dsubst1++dsubst2).
simpl. split; auto.
split. simpl_env. rewrite EQ2. rewrite <- EQ3. clear. fsetdec.
split; auto.
split; auto.
simpl_env.
apply wf_delta_subst_styp; auto.
simpl in Dom. assumption.
simpl in Dom.
assert (x `in` {}) as FALSE.
rewrite <- Dom.
auto.
contradict FALSE; auto.
Qed.
Lemma wf_dsubst_app_merge : forall E E' dsubst dsubst',
wf_delta_subst E dsubst ->
wf_delta_subst E' dsubst' ->
uniq (E'++E) ->
wf_delta_subst (E'++E) (dsubst'++dsubst).
Proof.
intros E E' dsubst dsubst' Hwfd Hwfd' Uniq.
generalize dependent E.
generalize dependent dsubst.
induction Hwfd'; intros; simpl_env; auto.
simpl_env in Uniq.
inversion Uniq; subst.
apply IHHwfd' in Hwfd; auto.
simpl_env in Uniq.
inversion Uniq; subst.
apply IHHwfd' in Hwfd; auto.
apply wf_delta_subst_skip; auto.
rewrite_env (E++E0++nil).
apply wf_typ_weakening; simpl_env; auto.
Qed.
Lemma apply_delta_subst_env_uniq : forall E dsubst,
uniq E ->
uniq (apply_delta_subst_env dsubst E).
Proof.
induction E; intros dsubst Uniq; simpl; auto.
destruct a.
destruct b.
inversion Uniq; subst.
simpl_env.
apply uniq_push; auto.
rewrite <- apply_delta_subst_env_dom; auto.
inversion Uniq; subst.
simpl_env.
apply uniq_push; auto.
rewrite <- apply_delta_subst_env_dom; auto.
Qed.
Lemma apply_delta_subst_env_nil : forall E,
apply_delta_subst_env nil E = E.
Proof.
induction E; simpl; auto.
destruct a.
destruct b; simpl.
rewrite IHE; auto.
rewrite IHE; auto.
Qed.
Lemma apply_delta_subst_env_cons : forall E X T dsubst,
apply_delta_subst_env ([(X, T)]++dsubst) E = apply_delta_subst_env dsubst (map (subst_tb X T) E).
Proof.
induction E; intros X T dsubst; simpl; auto.
destruct a.
destruct b; simpl; simpl_env.
rewrite IHE; auto.
rewrite IHE; auto.
Qed.
Lemma apply_delta_subst_env_subst_tb_swap : forall F E dsubst X T,
wf_delta_subst E dsubst ->
X `notin` dom E ->
wf_typ empty T ->
apply_delta_subst_env dsubst (map (subst_tb X T) F) =
map (subst_tb X T) (apply_delta_subst_env dsubst F).
Proof.
induction F; intros E dsubst X T Hwfd XnE Hwft; simpl; auto.
destruct a.
destruct b; simpl; simpl_env.
rewrite IHF with (E:=E); auto.
rewrite delta_subst_permut with (dE:=E); auto.
rewrite IHF with (E:=E); auto.
apply dom_delta_subst in Hwfd. rewrite <- Hwfd.
apply dom__ddom in XnE. auto.
Qed.
Lemma wf_dsubst_dapp_head : forall EE dsubst' dsubst,
wf_delta_subst EE (dsubst'++dsubst) ->
exists E', exists E,
EE = E' ++ E /\
ddom_env E [=] dom dsubst /\
ddom_env E' [=] dom dsubst' /\
wf_delta_subst E dsubst.
Proof.
intros EE dsubst' dsubst Wfd.
remember (dsubst'++dsubst) as dsE.
generalize dependent dsubst'.
generalize dependent dsubst.
induction Wfd; intros; subst.
symmetry in HeqdsE.
apply app_eq_nil in HeqdsE.
destruct HeqdsE; subst.
exists nil. exists nil. simpl. auto.
apply one_eq_app in HeqdsE.
destruct HeqdsE as [[dsE'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
assert (dsE''++dsubst=dsE''++dsubst) as EQ. auto.
apply IHWfd in EQ.
destruct EQ as [E1 [E2 [EQ1 [EQ2 [EQ3 Hwfd2]]]]]; subst.
exists ([(X, bind_kn)]++E1). exists E2.
simpl. split; auto. split; auto.
split; auto. rewrite EQ3. clear. fsetdec.
assert (SE=nil++SE) as EQ. auto.
apply IHWfd in EQ.
destruct EQ as [E1 [E2 [EQ1 [EQ2 [EQ3 Hwfd2]]]]]; subst.
exists nil. exists ([(X, bind_kn)]++E1++E2).
simpl. split; auto.
split. simpl_env. rewrite EQ2. rewrite EQ3. clear. fsetdec.
split; auto.
simpl_env.
apply wf_delta_subst_styp; auto.
assert (dsubst'++dsubst=dsubst'++dsubst) as EQ. auto.
apply IHWfd in EQ.
destruct EQ as [E1 [E2 [EQ1 [EQ2 [EQ3 Hwfd2]]]]]; subst.
exists ([(x, bind_typ T)]++E1). exists E2.
simpl. split; auto.
Qed.
Lemma apply_delta_subst_env_dsubst_app : forall dsubst' dsubst E F,
wf_delta_subst F (dsubst'++dsubst) ->
apply_delta_subst_env (dsubst'++dsubst) E =
apply_delta_subst_env dsubst' (apply_delta_subst_env dsubst E).
Proof.
intros dsubst' dsubst E F Hwfd.
remember (dsubst'++dsubst) as dsE.
generalize dependent dsubst'.
generalize dependent E.
induction Hwfd; intros; subst.
symmetry in HeqdsE.
apply app_eq_nil in HeqdsE.
destruct HeqdsE; subst.
rewrite apply_delta_subst_env_nil.
rewrite apply_delta_subst_env_nil. auto.
apply one_eq_app in HeqdsE.
destruct HeqdsE as [[dsE'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
assert (dsE''++dsubst=dsE''++dsubst) as EQ. auto.
simpl_env.
rewrite apply_delta_subst_env_cons.
rewrite apply_delta_subst_env_cons.
apply IHHwfd with (E:=map (subst_tb X T) E0) in EQ.
assert (J:=Hwfd).
apply wf_dsubst_dapp_head in J.
destruct J as [E1 [E2 [J1 [J2 [J3 J4]]]]]; subst.
rewrite <- apply_delta_subst_env_subst_tb_swap with (E:=E2) (dsubst:=dsubst); auto.
simpl_env.
rewrite apply_delta_subst_env_nil. auto.
assert (dsubst'++dsubst=dsubst'++dsubst) as EQ. auto.
apply IHHwfd with (E:=E0) in EQ. assumption.
Qed.
Lemma commut_subst_tt_rdsubst: forall t X E dsubst T,
wf_delta_subst E dsubst ->
wf_typ nil t ->
X `notin` dom E ->
apply_delta_subst_typ (rev dsubst) (subst_tt X t T) =
subst_tt X t (apply_delta_subst_typ (rev dsubst) T).
Proof.
intros t X E dsubst T Hwfd Hwft Fr.
generalize dependent t.
generalize dependent T.
induction Hwfd; intros; simpl; auto.
simpl_env in*. destruct_notin.
assert (J:=Hwfd).
apply wf_delta_subst__wf_dsubst in Hwfd.
apply wf_dsubst_rev in Hwfd.
apply dom_delta_subst in J.
apply wf_dsubst_styp_rev with (X:=X0) (T:=T) in Hwfd; auto.
rewrite dsubst_head_opt; auto.
rewrite dsubst_head_opt; auto.
rewrite <- IHHwfd; auto.
rewrite subst_tt_commute; eauto using notin_fv_wf.
apply dom__ddom in H.
rewrite J in H.
rewrite <- dom_rev; auto.
Qed.
Lemma apply_delta_subst_typ_rev : forall E dsubst t,
wf_delta_subst E dsubst ->
apply_delta_subst_typ dsubst t = apply_delta_subst_typ (rev dsubst) t.
proof.
let E, dsubst, t:typ be such that Hwfd:(wf_delta_subst E dsubst).
escape.
generalize dependent t.
induction Hwfd; intro t; simpl; simpl_env; auto.
rewrite commut_subst_tt_dsubst with (E:=E); auto.
rewrite IHHwfd.
rewrite <- commut_subst_tt_rdsubst with (E:=E); auto.
rewrite dsubst_head_opt; auto.
apply wf_dsubst_styp_rev; auto.
apply wf_delta_subst__wf_dsubst in Hwfd.
apply wf_dsubst_rev; auto.
apply dom_delta_subst in Hwfd.
apply dom__ddom in H.
rewrite Hwfd in H.
rewrite <- dom_rev; auto.
return.
end proof.
Qed.
Lemma dsubst_te_opt : forall dsubst X t e dsubst',
wf_dsubst (dsubst'++[(X, t)]++dsubst) ->
apply_delta_subst (dsubst'++[(X, t)]++dsubst) e =
apply_delta_subst (dsubst'++dsubst) (subst_te X t e).
Proof.
intros dsubst X t e dsubst' Hwf_dsubst.
remember (dsubst'++[(X, t)]++dsubst) as Dsubst.
generalize dependent e.
generalize dependent dsubst'.
(wf_dsubst_cases (induction Hwf_dsubst) Case);
intros dsubst' HeqDsubst e.
Case "wf_dsubst_empty".
contradict HeqDsubst; auto.
Case "wf_dsubst_styp".
destruct (one_eq_app _ _ _ _ _ HeqDsubst) as [[dsubst'' [DEQ1 DEQ2]] | [DEQ1 DEQ2]]; subst.
SSCase "exists DS'',DS'=DS&X0'' /\ DS0=DS&X&DS'' ".
simpl. simpl_env.
rewrite <- subst_te_commute; auto.
eauto using notin_fv_wf.
apply dsubst_nil_typ in Hwf_dsubst.
eauto using notin_fv_wf.
SSCase "DS'=nil /\ DS&X = DS0&X0 ".
inversion DEQ2. subst.
simpl_env in *. destruct_notin.
auto.
Qed.
Lemma dsubst_te_head_opt : forall dsubst X t e,
wf_dsubst (dsubst++[(X, t)]) ->
apply_delta_subst (dsubst++[(X, t)]) e =
apply_delta_subst dsubst (subst_te X t e).
Proof.
intros dsubst X t e Hwfd.
rewrite_env (dsubst++nil).
rewrite_env (dsubst++[(X, t)]++nil).
apply dsubst_te_opt; auto.
Qed.
Lemma swap_subst_te_rdsubst: forall t X E dsubst e,
wf_delta_subst E dsubst ->
wf_typ nil t ->
X `notin` dom dsubst ->
subst_te X t (apply_delta_subst (rev dsubst) e) =
apply_delta_subst (rev dsubst) (subst_te X t e).
Proof.
intros t X E dsubst e Hwfd Hwft xndsubst.
generalize dependent e.
generalize dependent t.
induction Hwfd; intros t Hwft e0; simpl; eauto.
simpl_env.
simpl_env in*. destruct_notin.
assert (J:=Hwfd).
apply wf_delta_subst__wf_dsubst in Hwfd.
apply wf_dsubst_rev in Hwfd.
apply dom_delta_subst in J.
apply wf_dsubst_styp_rev with (X:=X0) (T:=T) in Hwfd; auto.
rewrite dsubst_te_head_opt; auto.
rewrite dsubst_te_head_opt; auto.
rewrite IHHwfd; auto.
rewrite subst_te_commute; eauto using notin_fv_wf.
apply dom__ddom in H.
rewrite J in H.
rewrite <- dom_rev; auto.
Qed.
Lemma apply_delta_subst_rev : forall E dsubst e,
wf_delta_subst E dsubst ->
apply_delta_subst dsubst e = apply_delta_subst (rev dsubst) e.
proof.
let E, dsubst, e:exp be such that Hwfd:(wf_delta_subst E dsubst).
escape.
generalize dependent e.
induction Hwfd; intro e; simpl; simpl_env; auto.
assert (X `notin` dom SE) as J.
apply dom_delta_subst in Hwfd.
apply dom__ddom in H.
rewrite Hwfd in H. exact H.
rewrite <- swap_subst_te_dsubst with (E:=E); auto.
rewrite IHHwfd.
rewrite swap_subst_te_rdsubst with (E:=E); auto.
rewrite dsubst_te_head_opt; auto.
apply wf_dsubst_styp_rev; auto.
apply wf_delta_subst__wf_dsubst in Hwfd.
apply wf_dsubst_rev; auto.
apply dom_delta_subst in Hwfd.
apply dom__ddom in H.
rewrite Hwfd in H.
rewrite <- dom_rev; auto.
return.
end proof.
Qed.
Inductive wf_gsubst : gamma_subst -> Prop :=
| wf_gsubst_empty :
wf_gsubst gamma_nil
| wf_gsubst_sval : forall SE x e T,
wf_gsubst SE -> x `notin` dom SE -> typing nil nil e T ->
wf_gsubst ([(x, e)] ++ SE)
.
Tactic Notation "wf_gsubst_cases" tactic(first) tactic(c) :=
first;
[ c "wf_gsubst_empty" |
c "wf_gsubst_sval"].
Hint Constructors wf_gsubst.
Lemma gsubst_nil_term : forall gsubst x e gsubst',
wf_gsubst (gsubst'++[(x, e)]++gsubst) ->
exists t, typing nil nil e t.
Proof.
intros gsubst x e gsubst' Hwf_gsubst.
remember (gsubst'++[(x, e)]++gsubst) as Gsubst.
generalize dependent gsubst'.
(wf_gsubst_cases (induction Hwf_gsubst) Case);
intros gsubst' HeqGsubst.
Case "wf_gsubst_empty".
contradict HeqGsubst; auto.
Case "wf_gsubst_sval".
destruct (one_eq_app _ _ _ _ _ HeqGsubst) as [[gsubst'' [GEQ1 GEQ2]] | [GEQ1 GEQ2]]; subst.
SCase "exists GS'',GS'=GS&x0'' /\ GS0=GS&x&GS'' ".
destruct IHHwf_gsubst with (gsubst':=gsubst'') as [t J]; auto.
exists t. exact J.
SCase "GS'=nil /\ GS&x = GS0&x0 ".
inversion GEQ2. subst.
exists T. exact H0.
Qed.
Lemma gsubst_opt : forall gsubst x e e' gsubst',
wf_gsubst (gsubst'++[(x, e)]++gsubst) ->
apply_gamma_subst (gsubst'++[(x, e)]++gsubst) e' =
apply_gamma_subst (gsubst'++gsubst) (subst_ee x e e').
Proof.
intros gsubst x e e' gsubst' Hwf_gsubst.
remember (gsubst'++[(x, e)]++gsubst) as Gsubst.
generalize dependent e'.
generalize dependent gsubst'.
(wf_gsubst_cases (induction Hwf_gsubst) Case);
intros gsubst' HeqGsubst e'.
Case "wf_gsubst_empty".
contradict HeqGsubst; auto.
Case "wf_gsubst_sval".
destruct (one_eq_app _ _ _ _ _ HeqGsubst) as [[gsubst'' [GEQ1 GEQ2]] | [GEQ1 GEQ2]]; subst.
SCase "exists GS'',GS'=GS&x0'' /\ GS0=GS&x&GS'' ".
simpl. simpl_env.
apply gsubst_nil_term in Hwf_gsubst.
destruct Hwf_gsubst as [t Typing].
rewrite <- subst_ee_commute; auto.
apply notin_fv_ee_typing with (y:=x0) in H0; auto.
apply notin_fv_ee_typing with (y:=x0) in Typing; auto.
apply notin_fv_ee_typing with (y:=x) in H0; auto.
apply notin_fv_ee_typing with (y:=x) in Typing; auto.
SCase "GS'=nil /\ GS&x = GS0&x0 ".
inversion GEQ2. subst.
simpl_env in *. destruct_notin.
auto.
Qed.
Lemma gsubst_head_opt : forall gsubst x e e',
wf_gsubst (gsubst++[(x, e)]) ->
apply_gamma_subst (gsubst++[(x, e)]) e' =
apply_gamma_subst gsubst (subst_ee x e e').
Proof.
intros gsubst x e e' Hwfg.
rewrite_env (gsubst++nil).
rewrite_env (gsubst++[(x, e)]++nil).
apply gsubst_opt; auto.
Qed.
Lemma wf_gamma_subst__wf_gsubst : forall E dsubst gsubst,
wf_gamma_subst E dsubst gsubst ->
wf_gsubst gsubst.
Proof.
intros E dsubst gsubst Hwfg.
(wf_gamma_subst_cases (induction Hwfg) Case); auto.
Case "wf_gamma_subst_sval".
apply wf_gsubst_sval with (T:=apply_delta_subst_typ dsE T); auto.
apply dom_gamma_subst in Hwfg. destruct Hwfg as [J1 J2].
rewrite <- J2.
apply dom__gdom; auto.
Qed.
Lemma wf_gsubst_sval_rev : forall SE x e T,
wf_gsubst SE -> x `notin` dom SE -> typing nil nil e T ->
wf_gsubst (SE++[(x, e)]).
Proof.
intros SE x e T Wfg.
generalize dependent x.
generalize dependent e.
generalize dependent T.
induction Wfg; intros; auto.
rewrite_env ([(x, e)]++gamma_nil).
apply wf_gsubst_sval with (T:=T); auto.
simpl_env.
apply wf_gsubst_sval with (T:=T); auto.
apply IHWfg with (T:=T0); auto.
Qed.
Lemma wf_gsubst_rev : forall E,
wf_gsubst E ->
wf_gsubst (rev E).
Proof.
intros E Wfg.
induction Wfg; intros; auto.
simpl. simpl_env.
apply wf_gsubst_sval_rev with (T:=T); auto.
rewrite <- dom_rev; auto.
Qed.
Lemma swap_subst_ee_rgsubst: forall e' x E dsubst gsubst e t,
wf_gamma_subst E dsubst gsubst ->
typing nil nil e' t ->
x `notin` dom gsubst ->
subst_ee x e' (apply_gamma_subst (rev gsubst) e) =
apply_gamma_subst (rev gsubst) (subst_ee x e' e).
Proof.
intros e' x E dsubst gsubst e t Hwfg Typing xngsubst.
generalize dependent e.
generalize dependent e'.
generalize dependent t.
induction Hwfg; intros t e' Typing e0; simpl; eauto.
simpl_env in*. destruct_notin.
assert (J:=Hwfg).
apply wf_gamma_subst__wf_gsubst in Hwfg.
apply wf_gsubst_rev in Hwfg.
apply dom_gamma_subst in J. destruct J as [J1 J2].
apply wf_gsubst_sval_rev with (x:=x0) (e:=e) (T:=apply_delta_subst_typ dsE T) in Hwfg; auto.
rewrite gsubst_head_opt; auto.
rewrite gsubst_head_opt; auto.
rewrite IHHwfg with (t:=t); auto.
rewrite subst_ee_commute; eauto.
apply notin_fv_ee_typing with (y:=x0) in H0; auto.
apply notin_fv_ee_typing with (y:=x0) in Typing; auto.
apply notin_fv_ee_typing with (y:=x) in H0; auto.
apply notin_fv_ee_typing with (y:=x) in Typing; auto.
apply dom__gdom in H.
rewrite J2 in H.
rewrite <- dom_rev; auto.
Qed.
Lemma swap_subst_ee_gsubst': forall e' x E dsubst gsubst e t,
wf_gamma_subst E dsubst gsubst ->
typing nil nil e' t ->
x `notin` dom gsubst ->
subst_ee x e' (apply_gamma_subst gsubst e) =
apply_gamma_subst gsubst (subst_ee x e' e).
Proof.
intros e' x E dsubst gsubst e t Hwfg Hwft xngsubst.
generalize dependent e.
generalize dependent e'.
generalize dependent t.
induction Hwfg; intros t e' Hwft e0; simpl; eauto.
rewrite subst_ee_commute; eauto.
eauto using typing_fv.
eauto using typing_fv.
Qed.
Lemma apply_gamma_subst_rev : forall E dsubst gsubst e,
wf_gamma_subst E dsubst gsubst ->
apply_gamma_subst gsubst e = apply_gamma_subst (rev gsubst) e.
proof.
let E, dsubst, gsubst, e:exp be such that Hwfg:(wf_gamma_subst E dsubst gsubst).
escape.
generalize dependent e.
induction Hwfg; intro e0; simpl; simpl_env; auto.
assert (x `notin` dom gsE) as J.
apply dom_gamma_subst in Hwfg.
destruct Hwfg as [J1 J2].
apply dom__gdom in H.
rewrite J2 in H. exact H.
rewrite <- swap_subst_ee_gsubst' with (E:=E) (dsubst:=dsE) (t:=apply_delta_subst_typ dsE T); auto.
rewrite IHHwfg.
rewrite swap_subst_ee_rgsubst with (E:=E) (dsubst:=dsE) (t:=apply_delta_subst_typ dsE T); auto.
rewrite gsubst_head_opt; auto.
apply wf_gsubst_sval_rev with (T:=apply_delta_subst_typ dsE T); auto.
apply wf_gamma_subst__wf_gsubst in Hwfg.
apply wf_gsubst_rev; auto.
apply dom_gamma_subst in Hwfg.
destruct Hwfg as [J1 J2].
apply dom__gdom in H.
rewrite J2 in H.
rewrite <- dom_rev; auto.
return.
end proof.
Qed.
Lemma wf_lgamma_subst__wf_gsubst : forall E D dsubst gsubst lgsubst,
wf_lgamma_subst E D dsubst gsubst lgsubst->
wf_gsubst gsubst.
Proof.
intros E D dsubst gsubst lgsubst Hwflg.
(wf_lgamma_subst_cases (induction Hwflg) Case); auto.
Case "wf_lgamma_subst_sval".
apply wf_gsubst_sval with (T:=apply_delta_subst_typ dsE T); auto.
apply dom_lgamma_subst in Hwflg. destruct Hwflg as [J1 [J2 J3]].
rewrite <- J2.
apply dom__gdom; auto.
Qed.
Lemma wf_lgamma_subst__wf_lgsubst : forall E D dsubst gsubst lgsubst,
wf_lgamma_subst E D dsubst gsubst lgsubst->
wf_gsubst lgsubst.
Proof.
intros E D dsubst gsubst lgsubst Hwflg.
(wf_lgamma_subst_cases (induction Hwflg) Case); auto.
Case "wf_lgamma_subst_slval".
apply wf_gsubst_sval with (T:=apply_delta_subst_typ dsE T); auto.
apply dom_lgamma_subst in Hwflg. destruct Hwflg as [J1 [J2 J3]].
rewrite <- J3. exact H0.
Qed.
Lemma swap_subst_ee_rlgsubst: forall e' x E D dsubst gsubst lgsubst e t,
wf_lgamma_subst E D dsubst gsubst lgsubst ->
typing nil nil e' t ->
x `notin` dom lgsubst ->
subst_ee x e' (apply_gamma_subst (rev lgsubst) e) =
apply_gamma_subst (rev lgsubst) (subst_ee x e' e).
Proof.
intros e' x E D dsubst gsubst lgsubst e t Hwflg Typing xngsubst.
generalize dependent e.
generalize dependent e'.
generalize dependent t.
induction Hwflg; intros t e' Typing e0; simpl; eauto.
simpl_env in*. destruct_notin.
assert (J:=Hwflg).
apply wf_lgamma_subst__wf_lgsubst in Hwflg.
apply wf_gsubst_rev in Hwflg.
apply dom_lgamma_subst in J. destruct J as [J1 [J2 J3]].
apply wf_gsubst_sval_rev with (x:=x0) (e:=e) (T:=apply_delta_subst_typ dsE T) in Hwflg; auto.
rewrite gsubst_head_opt; auto.
rewrite gsubst_head_opt; auto.
rewrite IHHwflg with (t:=t); auto.
rewrite subst_ee_commute; eauto.
apply notin_fv_ee_typing with (y:=x0) in H1; auto.
apply notin_fv_ee_typing with (y:=x0) in Typing; auto.
apply notin_fv_ee_typing with (y:=x) in H1; auto.
apply notin_fv_ee_typing with (y:=x) in Typing; auto.
rewrite J3 in H0.
rewrite <- dom_rev; auto.
Qed.
Lemma apply_lgamma_subst_rev : forall E lE dsubst gsubst lgsubst e,
wf_lgamma_subst E lE dsubst gsubst lgsubst ->
apply_gamma_subst lgsubst e = apply_gamma_subst (rev lgsubst) e.
proof.
let E, lE, dsubst, gsubst, lgsubst, e:exp be such that Hwflg:(wf_lgamma_subst E lE dsubst gsubst lgsubst).
escape.
generalize dependent e.
induction Hwflg; intro e0; simpl; simpl_env; auto.
assert (x `notin` dom lgsE) as J.
apply dom_lgamma_subst in Hwflg.
destruct Hwflg as [J1 [J2 J3]].
rewrite J3 in H0. exact H0.
rewrite <- swap_subst_ee_lgsubst with (E:=E) (dsubst:=dsE) (t:=apply_delta_subst_typ dsE T) (D:=lE) (gsubst:=gsE); auto.
rewrite IHHwflg.
rewrite swap_subst_ee_rlgsubst with (E:=E) (dsubst:=dsE) (t:=apply_delta_subst_typ dsE T) (D:=lE) (gsubst:=gsE); auto.
rewrite gsubst_head_opt; auto.
apply wf_gsubst_sval_rev with (T:=apply_delta_subst_typ dsE T); auto.
apply wf_lgamma_subst__wf_lgsubst in Hwflg.
apply wf_gsubst_rev; auto.
apply dom_lgamma_subst in Hwflg.
destruct Hwflg as [J1 [J2 J3]].
rewrite J3 in H0.
rewrite <- dom_rev; auto.
return.
end proof.
Qed.
Lemma remove_typvar_single_inv : forall E a T,
remove_typvar E = [(a, bind_typ T)] ->
exists E1, exists E2,
E = E1 ++ [(a, bind_typ T)] ++ E2 /\ remove_typvar E1 = nil /\ remove_typvar E2 = nil.
Proof.
induction E; intros x T H.
simpl in H.
inversion H.
destruct a.
destruct b; simpl in *; simpl_env in *.
apply IHE in H.
destruct H as [E1 [E2 [J1 [J2 J3]]]]; subst.
exists ([(a, bind_kn)]++E1). exists E2.
split; auto.
inversion H; subst.
exists nil. exists E. auto.
Qed.
Lemma apply_delta_subst_env_nil_inv : forall E dsubst,
apply_delta_subst_env dsubst E = nil ->
E = nil.
Proof.
induction E; intros dsubst H; auto.
destruct a.
destruct b; simpl in H.
inversion H.
inversion H.
Qed.
Lemma apply_delta_subst_env_single_inv : forall E dsubst a T,
apply_delta_subst_env dsubst E = [(a, bind_typ T)] ->
exists T',
E = [(a, bind_typ T')] /\ apply_delta_subst_typ dsubst T' = T.
Proof.
induction E; intros dsubst x T H.
simpl in H.
inversion H.
destruct a.
destruct b; simpl in *; simpl_env in *.
inversion H; subst.
inversion H; subst.
apply apply_delta_subst_env_nil_inv in H3.
subst.
exists t. split; auto.
Qed.
Lemma remove_typvar_app : forall E E',
remove_typvar (E ++ E') = remove_typvar E ++ remove_typvar E'.
Proof.
induction E; intros E'; simpl; auto.
destruct a.
destruct b; auto.
rewrite IHE. auto.
Qed.
Lemma gen_typ_abs_app : forall E E' t,
gen_typ_abs (E ++ E') t = gen_typ_abs E (gen_typ_abs E' t).
Proof.
induction E; intros E' t; simpl; auto.
destruct a.
destruct b; auto.
rewrite IHE. auto.
Qed.
Lemma gen_typ_abs_id : forall E t,
gdom_env E [=] {} ->
gen_typ_abs E t = t.
Proof.
induction E; intros t Heq; simpl; auto.
destruct a.
destruct b; auto.
simpl in Heq.
assert (a `in` {}) as FALSE.
rewrite <- Heq.
auto.
contradict FALSE; auto.
Qed.
Lemma gen_typ_abs_opt : forall E1 x t0 E2 t,
uniq (E1++[(x, bind_typ t0)]++E2) ->
gdom_env (E1++[(x, bind_typ t0)]++E2) [=] {{x}} ->
gen_typ_abs (E1++[(x, bind_typ t0)]++E2) t = typ_iarrow t0 t.
Proof.
induction E1; intros x t0 E2 t Uniq Hdom.
simpl in *.
simpl_env in Uniq. inversion Uniq; subst.
rewrite gen_typ_abs_id; auto.
clear - Hdom H3. apply dom__gdom in H3. fsetdec.
destruct a.
destruct b; simpl in *.
inversion Uniq; subst.
apply IHE1 with (t:=t) in H1; auto.
simpl_env in Hdom.
assert (a <> x) as anx.
simpl_env in Uniq. clear - Uniq. solve_uniq.
assert (a `in` {{x}}) as aisx.
rewrite <- Hdom.
auto.
contradict aisx; auto.
Qed.
Lemma _from_subst_to_ctx_app : forall gsubst' dsubst' dsubst E' E lE t t' C,
wf_delta_subst (remove_tmvar E') dsubst' ->
wf_delta_subst (remove_tmvar E) dsubst ->
wf_gamma_subst (apply_delta_subst_env (dsubst'++dsubst) (remove_typvar E')) nil (rev gsubst') ->
contexting (E'++E) lE t C nil nil (gen_typ_abs (apply_delta_subst_env (rev (dsubst'++dsubst)) (E'++E)) (apply_delta_subst_typ (rev (dsubst'++dsubst)) t')) ->
contexting (E'++E) lE t (gen_ctx_app gsubst' C) nil nil (gen_typ_abs (apply_delta_subst_env (rev (dsubst'++dsubst)) E) (apply_delta_subst_typ (rev (dsubst'++dsubst)) t')).
Proof.
induction gsubst'; intros dsubst' dsubst E' E lE t t' C Hwfd' Hwfd Hwfg Hctx; simpl.
simpl in Hwfg.
apply wf_gsubst_nil_gsubst in Hwfg.
rewrite apply_delta_subst_env_app in Hctx.
rewrite gen_typ_abs_app in Hctx.
rewrite gen_typ_abs_id in Hctx; auto.
rewrite <- apply_delta_subst_env_gdom in Hwfg.
rewrite <- apply_delta_subst_env_gdom.
rewrite gdom_env_remove_typvar in Hwfg; auto.
destruct a. simpl in Hwfg. simpl_env in Hwfg.
apply wf_gsubst_gapp_inv in Hwfg;
try solve [auto | rewrite <- apply_delta_subst_env_ddom; auto using ddom_env_remove_typvar].
destruct Hwfg as [E1 [E2 [EQ1 [EQ2 [EQ3 [Hwfg1 Hwfg2]]]]]]; subst.
rewrite apply_delta_subst_env_remove_typvar_commut in EQ1.
apply remove_typvar_app_inv in EQ1.
destruct EQ1 as [E1' [E2' [EQ' [EQ2' EQ3']]]]; subst.
apply apply_delta_subst_env_app_inv in EQ'.
destruct EQ' as [E1'0 [E2'0 [EQ5 [EQ6 EQ7]]]]; subst.
rewrite_env (E1'0++(E2'0++E)) in Hctx.
rewrite remove_tmvar_app in Hwfd'.
assert (gdom_env (remove_tmvar E1'0 ++ remove_tmvar E2'0) [=] {}) as EQ.
rewrite <- remove_tmvar_app.
apply gdom_env_remove_tmvar; auto.
assert (EQ5:=Hwfd').
apply wf_dsubst_app_inv' in EQ5; auto.
destruct EQ5 as [dsubst1'0 [dsubst2'0 [EQ5 [EQ6 [EQ7 [Hwfd'2 Hwfd'1]]]]]]; subst.
rewrite <- apply_delta_subst_env_remove_typvar_commut in Hwfg2.
assert (wf_delta_subst (remove_tmvar (E2'0++E)) (dsubst2'0++dsubst)) as Hwfd0.
rewrite remove_tmvar_app.
apply wf_dsubst_app_merge; auto.
apply contexting_regular in Hctx.
decompose [and] Hctx. clear - H.
apply wfe_remove_tmvar in H; auto.
rewrite remove_tmvar_app in H.
rewrite remove_tmvar_app in H.
rewrite_env (nil ++ remove_tmvar E1'0 ++ remove_tmvar E2'0 ++ remove_tmvar E) in H.
apply wf_env_strengthening_nilgdom in H; auto.
simpl_env in Hctx. simpl_env in Hwfg2.
apply IHgsubst' with (dsubst':=dsubst1'0) in Hctx; auto.
rewrite apply_delta_subst_env_app in Hctx.
rewrite gen_typ_abs_app in Hctx.
rewrite <- apply_delta_subst_env_remove_typvar_commut in Hwfg1.
assert (J:=Hwfg1).
apply wf_gsubst_single_inv in J.
destruct J as [E1 [E2 [T [EQ4 Htyping]]]]; subst.
assert (exists E21', exists E22', exists T',
E2'0 = E21' ++ [(a, bind_typ T')]++E22' /\
T = apply_delta_subst_typ (dsubst1'0++dsubst2'0++dsubst) T' /\
apply_delta_subst_env (dsubst1'0++dsubst2'0++dsubst) (remove_typvar E21') = E1 /\
apply_delta_subst_env (dsubst1'0++dsubst2'0++dsubst) (remove_typvar E22') = E2
) as EQ'.
clear - EQ4. simpl_env in EQ4.
assert (J:=EQ4).
rewrite apply_delta_subst_env_remove_typvar_commut in EQ4.
apply remove_typvar_app_inv in EQ4.
destruct EQ4 as [F1 [F2 [J1 [J2 J3]]]].
apply remove_typvar_app_inv in J3.
destruct J3 as [F3 [F4 [J4 [J5 J6]]]].
subst.
apply remove_typvar_single_inv in J5.
destruct J5 as [F5 [F6 [J3 [J4 J5]]]]; subst.
apply apply_delta_subst_env_app_inv in J1.
destruct J1 as [F7 [F8 [J7 [J8 J9]]]]; subst.
apply apply_delta_subst_env_app_inv in J9.
destruct J9 as [F9 [F10 [J10 [J11 J12]]]]; subst.
apply apply_delta_subst_env_app_inv in J11.
destruct J11 as [F11 [F12 [J13 [J14 J15]]]]; subst.
apply apply_delta_subst_env_app_inv in J15.
destruct J15 as [F13 [F14 [J16 [J17 J18]]]]; subst.
apply apply_delta_subst_env_single_inv in J17.
destruct J17 as [T' [J19 J20]]; subst.
exists (F7++F11). exists (F14++F10). exists T'.
simpl_env. split; auto. split; auto.
rewrite <- apply_delta_subst_env_remove_typvar_commut.
rewrite <- apply_delta_subst_env_remove_typvar_commut.
rewrite remove_typvar_app.
rewrite remove_typvar_app.
rewrite apply_delta_subst_env_app.
rewrite apply_delta_subst_env_app.
rewrite <- apply_delta_subst_env_remove_typvar_commut in J4.
rewrite J4.
rewrite <- apply_delta_subst_env_remove_typvar_commut in J5.
rewrite J5.
split; auto.
destruct EQ' as [E21' [E22' [T' [EQ8 [EQ9 [EQ10 EQ11]]]]]]; subst.
apply contexting_app1 with (D1':=nil) (D2':=nil)(T1':=typ_bang (apply_delta_subst_typ (rev (dsubst1'0++dsubst2'0++dsubst)) T')); auto.
rewrite distr_rev in Hctx.
rewrite distr_rev in Hctx.
simpl_env in Hctx.
rewrite apply_delta_subst_env_app in Hctx.
rewrite apply_delta_subst_env_app in Hctx.
simpl in Hctx.
simpl_env in Hctx.
rewrite <- distr_rev in Hctx.
rewrite <- distr_rev in Hctx.
simpl_env.
rewrite gen_typ_abs_opt in Hctx; auto.
simpl_env in Hctx. auto.
assert (uniq (E21'++[(a, bind_typ T')]++E22')) as Uniq.
apply contexting_regular in Hctx.
decompose [and] Hctx.
apply uniq_from_wf_env in H.
clear - H. solve_uniq.
apply apply_delta_subst_env_uniq with (dsubst:=(rev ((dsubst1'0++dsubst2'0)++dsubst))) in Uniq; auto.
rewrite apply_delta_subst_env_app in Uniq.
rewrite apply_delta_subst_env_app in Uniq; assumption.
simpl_env. simpl.
rewrite <- apply_delta_subst_env_gdom.
rewrite <- apply_delta_subst_env_gdom.
rewrite gdom_env_remove_typvar in EQ2.
rewrite <- apply_delta_subst_env_gdom in EQ2.
simpl_env in EQ2. simpl in EQ2. rewrite EQ2.
clear. fsetdec.
clear - Htyping Hwfd Hwfd' Hctx.
rewrite <- apply_delta_subst_typ_rev with (E:=(remove_tmvar E1'0++remove_tmvar (E21' ++ [(a, bind_typ T')] ++E22')) ++ remove_tmvar E); auto.
rewrite_env ((dsubst1'0++dsubst2'0)++dsubst).
apply wf_dsubst_app_merge; auto.
apply contexting_regular in Hctx. decompose [and] Hctx. clear Hctx.
apply wfe_remove_tmvar in H.
apply uniq_from_wf_env in H. clear - H.
repeat (rewrite remove_tmvar_app).
repeat (rewrite remove_tmvar_app in H).
simpl_env in H. simpl_env. auto.
clear - Htyping.
eapply empty_typing_disjdom; eauto.
Qed.
Lemma from_subst_to_ctx_app : forall E lE dsubst gsubst t t' C,
wf_delta_subst (remove_tmvar E) dsubst ->
wf_gamma_subst (apply_delta_subst_env dsubst (remove_typvar E)) nil (rev gsubst) ->
contexting E lE t C nil nil (gen_typ_abs (apply_delta_subst_env (rev dsubst) E) (apply_delta_subst_typ (rev dsubst) t')) ->
contexting E lE t (gen_ctx_app gsubst C) nil nil (apply_delta_subst_typ (rev dsubst) t').
Proof.
intros E lE dsubst gsubst t t' C Hwfd Hwfg Hctx.
rewrite_env (E++nil).
assert (gen_typ_abs (apply_delta_subst_env (rev (dsubst++nil)) nil) (apply_delta_subst_typ (rev (dsubst++nil)) t') = (apply_delta_subst_typ (rev dsubst) t')) as EQ.
simpl. simpl_env. auto.
rewrite <- EQ.
apply _from_subst_to_ctx_app; simpl_env; auto.
Qed.
Lemma gen_typ_labs_app : forall lE lE' t,
gen_typ_labs (lE ++ lE') t = gen_typ_labs lE (gen_typ_labs lE' t).
Proof.
induction lE; intros lE' t; simpl; auto.
destruct a.
destruct l; rewrite IHlE; auto.
Qed.
Lemma gen_typ_labs_id : forall lE t,
dom lE [=] {} ->
gen_typ_labs lE t = t.
Proof.
induction lE; intros t Heq; simpl; auto.
destruct a.
destruct l.
simpl in Heq.
assert (a `in` {}) as FALSE.
rewrite <- Heq.
auto.
contradict FALSE; auto.
Qed.
Lemma wf_lgsubst_nil_lgsubst : forall lE E dsubst gsubst,
wf_lgamma_subst E lE dsubst gsubst nil->
dom lE [=] {}.
Proof.
induction lE; intros E dsubst gsubst Wflg; auto.
destruct a.
destruct l; simpl.
apply dom_lgamma_subst in Wflg.
decompose [and] Wflg.
simpl in H2. auto.
Qed.
Lemma wf_lgamma_subst_strengthen_when_nillE : forall E lE dsubst gsubst lgsubst,
wf_lgamma_subst E lE dsubst gsubst lgsubst ->
wf_lgamma_subst E nil dsubst gsubst nil.
Proof.
intros E lE dsubst gsubst lgsubst Hwflg.
induction Hwflg; auto.
Qed.
Lemma wf_lgsubst_gapp_inv : forall lEE lgsubst' lgsubst,
wf_lgamma_subst nil lEE nil nil (lgsubst'++lgsubst) ->
exists lE', exists lE,
lEE = lE' ++ lE /\
dom lE [=] dom lgsubst /\
dom lE' [=] dom lgsubst' /\
wf_lgamma_subst nil lE nil nil lgsubst /\
wf_lgamma_subst nil lE' nil nil lgsubst'.
Proof.
intros lEE lgsubst' lgsubst Wflg.
remember (lgsubst'++lgsubst) as lgsE.
generalize dependent lgsubst'.
generalize dependent lgsubst.
induction Wflg; intros; subst.
symmetry in HeqlgsE.
apply app_eq_nil in HeqlgsE.
destruct HeqlgsE as [J1 J2]; subst.
exists nil. exists nil. split; auto.
assert (lgsubst'++lgsubst = lgsubst'++lgsubst) as EQ. auto.
apply IHWflg in EQ.
destruct EQ as [lE1 [lE2 [EQ1 [EQ2 [EQ3 [Wfg1 Wfg2]]]]]]; subst.
exists lE1. exists lE2.
split; auto.
apply one_eq_app in HeqlgsE. destruct HeqlgsE as [[lgsE'' [lgsEQ1 lgsEQ2]] | [lgsEQ1 lgsEQ2]]; subst.
assert (lgsE''++lgsubst = lgsE''++lgsubst) as EQ. auto.
apply IHWflg in EQ.
destruct EQ as [lE1 [lE2 [EQ1 [EQ2 [EQ3 [Wflg1 Wflg2]]]]]]; subst.
exists ([(x, lbind_typ T)]++lE1). exists lE2.
split; auto.
split; auto.
split; auto.
simpl. rewrite EQ3. fsetdec.
split; auto.
simpl_env. apply wf_lgamma_subst_slval; auto.
exists nil. exists ([(x, lbind_typ T)]++lE).
split; auto.
split.
simpl_env. apply dom_lgamma_subst in Wflg.
decompose [and] Wflg. rewrite H6. fsetdec.
split; auto.
split.
simpl_env. apply wf_lgamma_subst_slval; auto.
apply wf_lgamma_subst_strengthen_when_nillE in Wflg; auto.
assert (lgsubst'++lgsubst = lgsubst'++lgsubst) as EQ. auto.
apply IHWflg in EQ.
destruct EQ as [lE1 [lE2 [EQ1 [EQ2 [EQ3 [Wfg1 Wfg2]]]]]]; subst.
exists lE1. exists lE2.
split; auto.
Qed.
Lemma gen_typ_labs_opt : forall lE1 x t0 lE2 t,
uniq (lE1++[(x, lbind_typ t0)]++lE2) ->
dom (lE1++[(x, lbind_typ t0)]++lE2) [=] {{x}} ->
gen_typ_labs (lE1++[(x, lbind_typ t0)]++lE2) t = typ_arrow t0 t.
Proof.
induction lE1; intros x t0 lE2 t Uniq Hdom.
simpl in *.
simpl_env in Uniq. inversion Uniq; subst.
rewrite gen_typ_labs_id; auto.
clear - Hdom H3. fsetdec.
destruct a.
destruct l; simpl in *.
simpl_env in Hdom.
assert (a <> x) as anx.
simpl_env in Uniq. clear - Uniq. solve_uniq.
assert (a `in` {{x}}) as aisx.
rewrite <- Hdom.
auto.
contradict aisx; auto.
Qed.
Lemma wf_lgsubst_single_inv : forall lE x v,
wf_lgamma_subst nil lE nil nil [(x, v)] ->
exists lE1, exists lE2, exists t,
lE = lE1 ++ [(x, lbind_typ t)] ++ lE2 /\
typing nil nil v t.
Proof.
induction lE; intros x v Hwflg.
inversion Hwflg.
destruct a.
destruct l; simpl_env in *.
inversion Hwflg; subst.
exists nil. exists lE. exists t. split; auto.
Qed.
Lemma apply_delta_subst_lenv_app_inv : forall lE dsubst lE1 lE2,
apply_delta_subst_lenv dsubst lE = lE1 ++ lE2 ->
exists lE1', exists lE2',
lE = lE1' ++ lE2' /\
apply_delta_subst_lenv dsubst lE1' = lE1 /\
apply_delta_subst_lenv dsubst lE2' = lE2.
Proof.
induction lE; intros dsubst lE1 lE2 H.
simpl in H.
symmetry in H.
apply app_eq_nil in H.
destruct H as [J1 J2]; subst.
exists nil. exists nil. split; auto.
simpl in H.
destruct a.
destruct l.
simpl_env in H.
apply one_eq_app in H. destruct H as [[lE'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
apply IHlE in EQ2.
destruct EQ2 as [lE1' [lE2' [EQ1 [H1 H2]]]]; subst.
exists ([(a, lbind_typ t)]++lE1'). exists lE2'.
split; auto.
exists nil. exists ((a, lbind_typ t)::lE).
split; auto.
Qed.
Lemma apply_delta_subst_lenv_app : forall lE lE' dsubst,
apply_delta_subst_lenv dsubst (lE ++ lE') =
apply_delta_subst_lenv dsubst lE ++ apply_delta_subst_lenv dsubst lE'.
Proof.
induction lE; intros lE' dsubst; simpl; auto.
destruct a.
destruct l; rewrite IHlE; auto.
Qed.
Lemma apply_delta_subst_lenv_uniq : forall E dsubst,
uniq E ->
uniq (apply_delta_subst_lenv dsubst E).
Proof.
induction E; intros dsubst Uniq; simpl; auto.
destruct a.
destruct l.
inversion Uniq; subst.
simpl_env.
apply uniq_push; auto.
rewrite <- apply_delta_subst_lenv_dom; auto.
Qed.
Lemma apply_delta_subst_lenv_nil_inv : forall lE dsubst,
apply_delta_subst_lenv dsubst lE = nil ->
lE = nil.
Proof.
induction lE; intros dsubst H; auto.
destruct a.
destruct l; simpl in H.
inversion H.
Qed.
Lemma apply_delta_subst_lenv_single_inv : forall E dsubst a T,
apply_delta_subst_lenv dsubst E = [(a, lbind_typ T)] ->
exists T',
E = [(a, lbind_typ T')] /\ apply_delta_subst_typ dsubst T' = T.
Proof.
induction E; intros dsubst x T H.
simpl in H.
inversion H.
destruct a.
destruct l; simpl in *; simpl_env in *.
inversion H; subst.
apply apply_delta_subst_lenv_nil_inv in H3.
subst.
exists t. split; auto.
Qed.
Lemma _from_subst_to_ctx_lapp : forall lgsubst' dsubst E lE' lE t t' C,
wf_delta_subst (remove_tmvar E) dsubst ->
wf_lgamma_subst nil (apply_delta_subst_lenv dsubst lE') nil nil (rev lgsubst') ->
contexting E (lE'++lE) t C nil nil (gen_typ_labs (apply_delta_subst_lenv (rev dsubst) (lE'++lE)) (apply_delta_subst_typ (rev dsubst) t')) ->
contexting E (lE'++lE) t (gen_ctx_lapp lgsubst' C) nil nil (gen_typ_labs (apply_delta_subst_lenv (rev dsubst) lE) (apply_delta_subst_typ (rev dsubst) t')).
Proof.
induction lgsubst'; intros dsubst E lE' lE t t' C Hwfd Hwflg' Hctx; simpl.
simpl in Hwflg'.
apply wf_lgsubst_nil_lgsubst in Hwflg'.
rewrite apply_delta_subst_lenv_app in Hctx.
rewrite gen_typ_labs_app in Hctx.
rewrite gen_typ_labs_id in Hctx; auto.
rewrite <- apply_delta_subst_lenv_dom in Hwflg'.
rewrite <- apply_delta_subst_lenv_dom; auto.
destruct a. simpl in Hwflg'. simpl_env in Hwflg'.
apply wf_lgsubst_gapp_inv in Hwflg';
try solve [auto | rewrite <- apply_delta_subst_lenv_dom; auto].
destruct Hwflg' as [lE1 [lE2 [EQ1 [EQ2 [EQ3 [Hwflg1' Hwflg2']]]]]]; subst.
apply apply_delta_subst_lenv_app_inv in EQ1.
destruct EQ1 as [lE1'0 [lE2'0 [EQ5 [EQ6 EQ7]]]]; subst.
simpl_env in Hctx.
apply IHlgsubst' with (dsubst:=dsubst) in Hctx; auto.
rewrite apply_delta_subst_lenv_app in Hctx.
rewrite gen_typ_labs_app in Hctx.
assert (J:=Hwflg1').
apply wf_lgsubst_single_inv in J.
destruct J as [E1 [E2 [T [EQ4 Htyping]]]]; subst.
assert (exists lE21', exists lE22', exists T',
lE2'0 = lE21' ++ [(a, lbind_typ T')]++lE22' /\
T = apply_delta_subst_typ dsubst T' /\
apply_delta_subst_lenv dsubst lE21' = E1 /\
apply_delta_subst_lenv dsubst lE22' = E2
) as EQ'.
clear - EQ4. simpl_env in EQ4.
assert (J:=EQ4).
apply apply_delta_subst_lenv_app_inv in EQ4.
destruct EQ4 as [F7 [F8 [J7 [J8 J9]]]]; subst.
apply apply_delta_subst_lenv_app_inv in J9.
destruct J9 as [F9 [F10 [J10 [J11 J12]]]]; subst.
apply apply_delta_subst_lenv_single_inv in J11.
destruct J11 as [T' [J19 J20]]; subst.
exists (F7). exists (F10). exists T'.
simpl_env. split; auto.
destruct EQ' as [lE21' [lE22' [T' [EQ8 [EQ9 [EQ10 EQ11]]]]]]; subst.
apply contexting_app1 with (D1':=nil) (D2':=nil)(T1':=apply_delta_subst_typ (rev dsubst) T'); auto.
simpl_env in Hctx.
rewrite apply_delta_subst_lenv_app in Hctx.
rewrite apply_delta_subst_lenv_app in Hctx.
simpl in Hctx.
simpl_env in Hctx.
simpl_env.
rewrite gen_typ_labs_opt in Hctx; auto.
simpl_env in Hctx. auto.
assert (uniq (lE21'++[(a, lbind_typ T')]++lE22')) as Uniq.
apply contexting_regular in Hctx.
decompose [and] Hctx.
apply uniq_from_wf_lenv in H1.
clear - H1. solve_uniq.
apply apply_delta_subst_lenv_uniq with (dsubst:=(rev dsubst)) in Uniq; auto.
rewrite apply_delta_subst_lenv_app in Uniq.
rewrite apply_delta_subst_lenv_app in Uniq; assumption.
simpl_env. simpl.
rewrite <- apply_delta_subst_lenv_dom.
rewrite <- apply_delta_subst_lenv_dom.
rewrite <- apply_delta_subst_lenv_dom in EQ2.
simpl_env in EQ2. simpl in EQ2. rewrite EQ2.
clear. fsetdec.
clear - Htyping Hwfd.
rewrite <- apply_delta_subst_typ_rev with (E:=remove_tmvar E); auto.
clear - Htyping.
eapply empty_typing_disjdom; eauto.
Qed.
Lemma from_subst_to_ctx_lapp : forall E lE dsubst lgsubst t t' C,
wf_delta_subst (remove_tmvar E) dsubst ->
wf_lgamma_subst nil (apply_delta_subst_lenv dsubst lE) nil nil (rev lgsubst) ->
contexting E lE t C nil nil (gen_typ_labs (apply_delta_subst_lenv (rev dsubst) lE) (apply_delta_subst_typ (rev dsubst) t')) ->
contexting E lE t (gen_ctx_lapp lgsubst C) nil nil (apply_delta_subst_typ (rev dsubst) t').
Proof.
intros E lE dsubst lgsubst t t' C Hwfd Hwfg Hctx.
rewrite_env (lE++nil).
assert (gen_typ_labs (apply_delta_subst_lenv (rev dsubst) nil) (apply_delta_subst_typ (rev dsubst) t') = (apply_delta_subst_typ (rev dsubst) t')) as EQ.
simpl. auto.
rewrite <- EQ.
apply _from_subst_to_ctx_lapp; simpl_env; auto.
Qed.
Lemma dom_ddom_remove_tmvar : forall E,
dom (remove_tmvar E) [=] ddom_env (remove_tmvar E).
Proof.
induction E; simpl; auto.
destruct a.
destruct b; simpl; auto.
fsetdec.
Qed.
Lemma wf_delta_subst__remove_tmvar : forall E dsubst,
wf_delta_subst E dsubst ->
wf_delta_subst (remove_tmvar E) dsubst.
Proof.
induction 1; simpl; simpl_env; auto.
apply wf_delta_subst_styp; auto.
rewrite dom_ddom_remove_tmvar.
rewrite ddom_env_remove_tmvar.
apply dom__ddom; auto.
Qed.
Lemma notin_remove_typvar_dom : forall x E,
x `notin` dom E ->
x `notin` dom (remove_typvar E).
Proof.
intros x.
induction E; intros; simpl; auto.
destruct a.
simpl_env in H.
destruct_notin.
destruct b; simpl; auto.
Qed.
Lemma wf_gamma_subst__wf_subst : forall E dsubst gsubst,
wf_gamma_subst E dsubst gsubst -> wf_delta_subst E dsubst /\ wf_env E.
intros.
induction H; auto.
destruct IHwf_gamma_subst.
split; auto.
destruct IHwf_gamma_subst.
split; auto.
Qed.
Lemma wf_gamma_subst__remove_typvar : forall E dsubst gsubst,
wf_gamma_subst E dsubst gsubst ->
wf_gamma_subst (apply_delta_subst_env dsubst (remove_typvar E)) nil gsubst.
Proof.
intros E dsubst gsubst Hwfg.
induction Hwfg; simpl; simpl_env; auto.
apply wf_gamma_subst_sval; auto.
apply notin_remove_typvar_dom in H.
rewrite <- apply_delta_subst_env_dom; auto.
apply wft_subst with (dsubst:=dsE) in H1; auto.
rewrite_env (apply_delta_subst_env dsE (remove_typvar E) ++ nil).
apply wf_typ_weaken_head; auto.
simpl_env.
apply wf_gamma_subst__uniq in IHHwfg. decompose [and] IHHwfg; auto.
apply wf_gamma_subst__wf_subst in Hwfg. destruct Hwfg as [J1 J2]. exact J1.
rewrite apply_delta_subst_env_cons.
rewrite apply_delta_subst_env_subst_tb_swap with (E:=E); auto.
rewrite <- map_subst_tb_id; auto.
apply wf_gamma_subst__wf_subst in IHHwfg. destruct IHHwfg as [J1 J2]. exact J2.
apply notin_remove_typvar_dom in H.
rewrite <- apply_delta_subst_env_dom; auto.
apply wf_gamma_subst__wf_subst in Hwfg. destruct Hwfg as [J1 J2]. exact J1.
Qed.
Lemma apply_delta_subst_lenv_nil : forall lE,
apply_delta_subst_lenv nil lE = lE.
Proof.
induction lE; simpl; auto.
destruct a.
destruct l; simpl.
rewrite IHlE; auto.
Qed.
Lemma apply_delta_subst_gen_typ_labs_commut : forall lE dsubst t,
(gen_typ_labs (apply_delta_subst_lenv dsubst lE) (apply_delta_subst_typ dsubst t))
= apply_delta_subst_typ dsubst (gen_typ_labs lE t).
Proof.
induction lE; intros dsubst t; simpl; simpl_env; auto.
destruct a. destruct l. simpl.
rewrite IHlE. simpl_commut_subst. auto.
Qed.
Lemma apply_delta_subst_lenv_cons' : forall E X T dsubst,
apply_delta_subst_lenv ([(X, T)]++dsubst) E = apply_delta_subst_lenv dsubst (map (subst_tlb X T) E).
Proof.
induction E; intros X T dsubst; simpl; auto.
destruct a.
destruct l; simpl; simpl_env.
rewrite IHE; auto.
Qed.
Lemma apply_delta_subst_lenv_subst_tlb_swap : forall F E dsubst X T,
wf_delta_subst E dsubst ->
X `notin` dom E ->
wf_typ empty T ->
apply_delta_subst_lenv dsubst (map (subst_tlb X T) F) =
map (subst_tlb X T) (apply_delta_subst_lenv dsubst F).
Proof.
induction F; intros E dsubst X T Hwfd XnE Hwft; simpl; auto.
destruct a.
destruct l; simpl; simpl_env.
rewrite delta_subst_permut with (dE:=E); auto.
rewrite IHF with (E:=E); auto.
apply dom_delta_subst in Hwfd. rewrite <- Hwfd.
apply dom__ddom in XnE. auto.
Qed.
Lemma wf_lgamma_subst__remove_typvar : forall E lE dsubst gsubst lgsubst,
wf_lgamma_subst E lE dsubst gsubst lgsubst->
wf_lgamma_subst nil (apply_delta_subst_lenv dsubst lE) nil nil lgsubst.
Proof.
intros E lE dsubst gsubst lgsubst Hwflg.
induction Hwflg; simpl; simpl_env; auto.
apply wf_lgamma_subst_slval; auto.
apply notin_remove_typvar_dom in H.
rewrite <- apply_delta_subst_lenv_dom; auto.
rewrite apply_delta_subst_lenv_cons'.
rewrite apply_delta_subst_lenv_subst_tlb_swap with (E:=E); auto.
rewrite <- map_subst_tlb_id with (G:=nil); auto.
apply wf_lgamma_subst__wf_lenv in IHHwflg. destruct IHHwflg as [J1 J2]. exact J2.
apply wf_lgamma_subst__wf_subst in Hwflg. destruct Hwflg as [J1 J2]. exact J2.
Qed.
Lemma gen_typ_labs_type : forall E lE t,
wf_lenv E lE ->
type t ->
type (gen_typ_labs lE t).
Proof.
intros E lE t Hwfle.
generalize dependent t.
induction Hwfle; intros e Htype; simpl; auto.
apply type_arrow; auto.
apply type_from_wf_typ in H1; auto.
Qed.
Lemma cv_ec_gen_ctx_labs : forall lE C,
cv_ec (gen_ctx_labs lE C) [=] dom lE `union` cv_ec C.
Proof.
induction lE; intro C; simpl.
fsetdec.
destruct a.
destruct l; simpl.
rewrite <- cv_ec_close_ec.
rewrite IHlE. fsetdec.
Qed.
Lemma gen_ctx_labs_context : forall lE E,
wf_lenv E lE ->
context (gen_ctx_labs lE ctx_hole).
Proof.
intros lE E Hwlenv.
induction Hwlenv.
simpl_env in *. apply context_hole; auto.
simpl.
apply context_abs_capture with (L:={{x}} `union` dom D); auto.
apply type_from_wf_typ in H1; auto.
intros x0 Hx0.
rewrite close_open_ec__subst_ec; auto.
apply subst_ec_context; auto.
Qed.
Lemma apply_delta_subst_gen_typ_abs_commut : forall E dsubst t,
(gen_typ_abs (apply_delta_subst_env dsubst E) (apply_delta_subst_typ dsubst t))
= apply_delta_subst_typ dsubst (gen_typ_abs E t).
Proof.
induction E; intros dsubst t; simpl; simpl_env; auto.
destruct a. unfold typ_iarrow.
destruct b; simpl.
rewrite IHE. simpl_commut_subst. auto.
rewrite IHE. simpl_commut_subst. auto.
Qed.
Lemma gen_typ_abs_type : forall E t,
wf_env E ->
type t ->
type (gen_typ_abs E t).
Proof.
intros E t Hwfe.
generalize dependent t.
induction Hwfe; intros e Htype; simpl; auto.
apply type_arrow; auto.
apply type_from_wf_typ in H; auto.
Qed.
Lemma wf_from_subst_to_ctx : forall E lE dsubst gsubst lgsubst t,
wf_lgamma_subst E lE dsubst gsubst lgsubst ->
wf_typ E t ->
contexting E lE t ( gen_ctx_lapp (rev lgsubst) (gen_ctx_app (rev gsubst) (gen_ctx_tapp (rev dsubst) (gen_ctx_tabs E (gen_ctx_abs E (gen_ctx_labs lE ctx_hole)))))) nil nil (apply_delta_subst_typ (rev dsubst) t).
Proof.
intros E lE dsubst gsubst lgsubst t Hwflg Hwft.
apply from_subst_to_ctx_lapp with (dsubst:=dsubst); auto.
apply wf_lgamma_subst__wf_subst in Hwflg. destruct Hwflg.
apply wf_delta_subst__remove_tmvar; auto.
rewrite rev_involutive.
apply wf_lgamma_subst__remove_typvar in Hwflg; auto.
rewrite apply_delta_subst_gen_typ_labs_commut.
apply from_subst_to_ctx_app with (dsubst:=dsubst); auto.
apply wf_lgamma_subst__wf_subst in Hwflg. destruct Hwflg.
apply wf_delta_subst__remove_tmvar; auto.
apply wf_lgamma_subst__wf_subst in Hwflg. destruct Hwflg.
rewrite rev_involutive.
apply wf_gamma_subst__remove_typvar; auto.
rewrite apply_delta_subst_gen_typ_abs_commut.
apply from_subst_to_ctx_tapp; auto.
apply wf_lgamma_subst__wf_subst in Hwflg. destruct Hwflg.
rewrite rev_involutive.
apply wf_delta_subst__remove_tmvar; auto.
apply wf_lgamma_subst__wf_lenv in Hwflg.
destruct Hwflg as [J1 J2].
apply gen_typ_abs_type; auto.
apply gen_typ_labs_type with (E:=E); auto.
apply type_from_wf_typ in Hwft; auto.
assert (J:=Hwflg).
apply wf_lgamma_subst__uniq in Hwflg.
decompose [and] Hwflg; auto.
apply wf_lgamma_subst__wf_lenv in J.
decompose [and] J; auto.
apply from_subst_to_ctx_tabs; simpl; auto.
apply cv_tc_gen_ctx_abs.
apply cv_tc_gen_ctx_labs.
rewrite cv_ec_gen_ctx_abs.
rewrite cv_ec_gen_ctx_labs.
simpl. fsetdec.
apply from_subst_to_ctx_abs; auto.
apply fv_ec_gen_ctx_labs_hole.
apply cv_ec_gen_ctx_labs_hole.
apply from_subst_to_ctx_labs; auto.
Qed.
Fixpoint gen_exp_labs (lE:lenv) (e:exp) {struct lE} : exp :=
match lE with
| nil => e
| (x, lbind_typ T)::lE' => exp_abs T (close_ee (shift_ee (gen_exp_labs lE' e)) x)
end.
Fixpoint gen_exp_abs (E:env) (e:exp) {struct E} : exp :=
match E with
| nil => e
| (X, bind_kn)::E' => (gen_exp_abs E' e)
| (x, bind_typ T)::E' => exp_iabs T (close_ee (shift_ee (shift_ee (gen_exp_abs E' e))) x)
end.
Fixpoint gen_exp_tabs (E:env) (e:exp) {struct E} : exp :=
match E with
| nil => e
| (X, bind_kn)::E' => exp_tabs (close_te (shift_te (gen_exp_tabs E' e)) X)
| (x, bind_typ T)::E' => (gen_exp_tabs E' e)
end.
Fixpoint gen_exp_tapp (dsubst:delta_subst) (e:exp) {struct dsubst} : exp :=
match dsubst with
| nil => e
| (X, T)::dsubst' => exp_tapp (gen_exp_tapp dsubst' e) T
end.
Fixpoint gen_exp_lapp (lgsubst:gamma_subst) (e:exp) {struct lgsubst} : exp :=
match lgsubst with
| nil => e
| (x, v)::lgsubst' => exp_app (gen_exp_lapp lgsubst' e) v
end.
Fixpoint gen_exp_app (gsubst:gamma_subst) (e:exp) {struct gsubst} : exp :=
match gsubst with
| nil => e
| (x, v)::gsubst' => exp_app (gen_exp_app gsubst' e) (exp_bang v)
end.
Lemma plug_gen_ctx_lapp__gen_exp_lapp : forall lgsubst C e,
plug (gen_ctx_lapp lgsubst C) e = gen_exp_lapp lgsubst (plug C e).
Proof.
induction lgsubst; intros C e; simpl; auto.
destruct a. simpl.
rewrite IHlgsubst. auto.
Qed.
Lemma plug_gen_ctx_tapp__gen_exp_tapp : forall dsubst C e,
plug (gen_ctx_tapp dsubst C) e = gen_exp_tapp dsubst (plug C e).
Proof.
induction dsubst; intros C e; simpl; auto.
destruct a. simpl.
rewrite IHdsubst. auto.
Qed.
Lemma gen_ctx_tabs_context : forall E C,
wf_env E ->
context C ->
context (gen_ctx_tabs E C).
Proof.
intros E C Wfe Hctx.
generalize dependent C.
induction Wfe; intros; simpl; simpl_env in *; auto.
apply IHWfe in Hctx; auto.
apply context_tabs_capture with (L:={{X}} `union` dom E); auto.
intros X0 HX0.
rewrite close_open_tc__subst_tc; auto.
apply subst_tc_context; auto.
Qed.
Lemma plug_gen_ctx_tabs__gen_exp_tabs : forall E C e,
wf_env E ->
context C ->
disjdom (ddom_env E) (cv_ec C) ->
plug (gen_ctx_tabs E C) e = gen_exp_tabs E (plug C e).
Proof.
intros E C e Wfe Context Disj.
generalize dependent C.
generalize dependent e.
induction Wfe; intros e C Context Disj; simpl; auto.
rewrite <- IHWfe; auto.
unfold close_te.
unfold close_tc.
rewrite <- close_te_rec_plug.
unfold shift_te.
rewrite shift_te_rec_plug.
rewrite <- shift_tc_rec_context with (C:=gen_ctx_tabs E C); auto.
apply gen_ctx_tabs_context; auto.
rewrite cv_ec_gen_ctx_tabs.
assert (X `notin` cv_ec C) as J.
apply disjdom_one_1.
apply disjdom_sym_1.
apply disjdom_sub with (D1:=ddom_env ([(X, bind_kn)]++E)).
apply disjdom_sym_1; auto.
simpl. fsetdec.
apply dom__ddom in H.
fsetdec.
apply disjdom_sym_1.
apply disjdom_sub with (D1:=ddom_env ([(X, bind_kn)]++E)).
apply disjdom_sym_1; auto.
simpl. fsetdec.
Qed.
Lemma gen_ctx_labs_context' : forall lE E C,
wf_lenv E lE ->
fv_ec C [=] {} ->
context C ->
context (gen_ctx_labs lE C).
Proof.
intros lE E C Hwlenv Hfvc Hctx .
induction Hwlenv; simpl; auto.
apply context_abs_capture with (L:={{x}} `union` dom D); auto.
apply type_from_wf_typ in H1; auto.
intros x0 Hx0.
rewrite close_open_ec__subst_ec; auto.
apply subst_ec_context; auto.
Qed.
Lemma plug_gen_ctx_labs__gen_exp_labs : forall C E lE e,
wf_lenv E lE ->
fv_ec C [=] {} ->
disjdom (cv_ec C) (dom lE) ->
context C ->
plug (gen_ctx_labs lE C) e = gen_exp_labs lE (plug C e).
Proof.
intros C E lE e Wfle Hfv Disj Context.
generalize dependent C.
generalize dependent e.
induction Wfle; intros e C Hfv Disj Context; simpl; auto.
rewrite <- IHWfle; auto.
unfold close_ee.
unfold close_ec.
rewrite <- close_ee_rec_plug.
unfold shift_ee.
rewrite shift_ee_rec_plug.
rewrite <- shift_ec_rec_context with (C:=gen_ctx_labs D C); auto.
apply gen_ctx_labs_context' with (E:=E); auto.
rewrite cv_ec_gen_ctx_labs.
assert (x `notin` cv_ec C) as J.
apply disjdom_one_1.
apply disjdom_sym_1.
apply disjdom_sub with (D1:=dom ([(x, lbind_typ T)]++D)); auto.
simpl_env. fsetdec.
auto.
apply disjdom_sub with (D1:=dom ([(x, lbind_typ T)]++D)); auto.
simpl_env. fsetdec.
Qed.
Lemma gen_ctx_abs_context : forall E C,
wf_env E ->
fv_ec C [=] {} ->
context C ->
context (gen_ctx_abs E C).
Proof.
intros E C Wfe Hfvc Hctx.
generalize dependent C.
induction Wfe; intros; simpl; simpl_env in *; auto.
apply IHWfe in Hctx; simpl_env; auto.
apply context_abs_free with (L:={{x}} `union` dom E); auto.
apply type_from_wf_typ in H; auto.
intros x0 Hx0.
unfold open_ec. simpl.
apply context_let2_capture with (L:={}); auto.
intros x1 x1n.
unfold open_ec.
rewrite <- open_ec_open_ec_rec__commut; auto.
unfold close_ec.
rewrite close_open_ec_rec__subst_ec; auto.
apply subst_ec_context with (z:=x)(e:=x1) in Hctx; auto.
rewrite <- open_ec_rec_context; auto.
rewrite context_bctx_eupper; auto.
Qed.
Lemma fv_ec_gen_ctx_abs_hole : forall E C,
fv_ec (gen_ctx_abs E C) [<=] dom E `union` fv_ec C.
Proof.
induction E; intros C; simpl.
fsetdec.
destruct a.
destruct b; simpl.
assert (J:=@IHE C). fsetdec.
assert (J1:=@IHE C).
assert (J2:=@close_ec_fv_ec_upper (gen_ctx_abs E C) a).
fsetdec.
Qed.
Lemma plug_gen_ctx_abs__gen_exp_abs : forall C E e,
wf_env E ->
fv_ec C [=] {} ->
disjdom (cv_ec C) (dom E) ->
context C ->
plug (gen_ctx_abs E C) e = gen_exp_abs E (plug C e).
Proof.
intros C E e Wfe Hfv Disj Context.
generalize dependent C.
generalize dependent e.
induction Wfe; intros e C Hfv Disj Context; simpl; auto.
rewrite <- IHWfe; auto.
apply disjdom_sub with (D1:=dom ([(X, bind_kn)]++E)); auto.
simpl_env. fsetdec.
unfold exp_iabs.
assert (x `notin` cv_ec (gen_ctx_abs E C)) as xnCV.
rewrite cv_ec_gen_ctx_abs.
assert (x `notin` cv_ec C) as J.
apply disjdom_one_1.
apply disjdom_sym_1.
apply disjdom_sub with (D1:=dom ([(x, bind_typ T)]++E)); auto.
simpl_env. fsetdec.
apply dom__gdom in H0. auto.
assert (context (gen_ctx_abs E C)) as Ctxabs.
apply gen_ctx_abs_context; auto.
rewrite <- IHWfe; auto.
unfold close_ee.
unfold close_ec.
rewrite <- close_ee_rec_plug; auto.
unfold shift_ee.
rewrite shift_ee_rec_plug.
rewrite <- shift_ec_rec_context with (C:=gen_ctx_abs E C); auto.
rewrite shift_ee_rec_plug.
rewrite <- shift_ec_rec_context with (C:=gen_ctx_abs E C); auto.
apply disjdom_sub with (D1:=dom ([(x, bind_typ T)]++E)); auto.
simpl_env. fsetdec.
Qed.
Lemma eval_gen_exp_lapp: forall lgsubst e e',
wf_gsubst lgsubst ->
bigstep_red e e' ->
bigstep_red (gen_exp_lapp lgsubst e) (gen_exp_lapp lgsubst e').
Proof.
intros lgsubst e e' Hwflg Hbred.
generalize dependent e.
generalize dependent e'.
induction Hwflg; intros; simpl; auto.
apply bigstep_red_app; auto.
Qed.
Lemma eval_gen_exp_tapp: forall dsubst e e',
wf_dsubst dsubst ->
bigstep_red e e' ->
bigstep_red (gen_exp_tapp dsubst e) (gen_exp_tapp dsubst e').
Proof.
intros dsubst e e' Hwfd Hbred.
generalize dependent e.
generalize dependent e'.
induction Hwfd; intros; simpl; auto.
apply bigstep_red_tapp; auto.
apply type_from_wf_typ in H0; auto.
Qed.
Lemma gen_exp_tapp_app : forall E E' e,
gen_exp_tapp (E ++ E') e = gen_exp_tapp E (gen_exp_tapp E' e).
Proof.
induction E; intros E' e; simpl; auto.
destruct a. rewrite IHE; auto.
Qed.
Lemma bigstep_red_preserved_under_subst_te: forall e e' X T,
bigstep_red e e' ->
type T ->
bigstep_red (subst_te X T e) (subst_te X T e').
Proof.
intros e e' X T H.
generalize dependent X.
generalize dependent T.
induction H; intros; auto.
apply bigstep_red_trans with (subst_te X T e'); auto.
apply red_preserved_under_typsubst; auto.
Qed.
Lemma gen_exp_tabs_expr : forall E e,
expr e ->
expr (gen_exp_tabs E e).
Proof.
induction E; intros e Hexpr; simpl; auto.
destruct a.
destruct b; auto.
rewrite <- shift_te_expr; auto.
apply expr_tabs with (L:={{a}}).
intros X FrX.
rewrite close_open_te__subst_te; auto.
Qed.
Lemma gen_exp_tapp_subst_te_commute : forall dsubst e X T,
wf_dsubst dsubst ->
gen_exp_tapp dsubst (subst_te X T e) = subst_te X T (gen_exp_tapp dsubst e).
Proof.
intros dsubst e X T Hwfd.
generalize dependent e.
generalize dependent X.
generalize dependent T.
induction Hwfd; intros; simpl; auto.
rewrite IHHwfd.
rewrite <- subst_tt_fresh; auto.
apply notin_fv_wf with (X:=X0) in H0; auto.
Qed.
Lemma gen_exp_tabs_subst_tt_commute : forall E X T e,
X `notin` dom E ->
wf_typ nil T ->
gen_exp_tabs E (subst_te X T e) = subst_te X T (gen_exp_tabs E e).
Proof.
induction E; intros X T e HC Wft; simpl; auto.
destruct a.
destruct b; simpl.
rewrite IHE; auto.
rewrite subst_te_close_te; auto.
unfold shift_te.
rewrite subst_te_shift_te_rec; auto.
apply type_from_wf_typ in Wft; auto.
apply notin_fv_wf with (X:=a) in Wft; auto.
rewrite IHE; auto.
Qed.
Lemma swap_subst_te_dsubst': forall t X dsubst e,
wf_dsubst dsubst ->
wf_typ nil t ->
X `notin` dom dsubst ->
subst_te X t (apply_delta_subst dsubst e) =
apply_delta_subst dsubst (subst_te X t e).
Proof.
intros t X dsubst e Hwfd Hwft xndsubst.
generalize dependent e.
generalize dependent t.
induction Hwfd; intros t Hwft e0; simpl; eauto.
rewrite subst_te_commute; eauto.
eauto using notin_fv_wf.
eauto using notin_fv_wf.
Qed.
Lemma dsubst_stronger' : forall dsubst dsubst' X t,
wf_dsubst (dsubst'++[(X,t)]++dsubst) ->
wf_dsubst (dsubst'++dsubst).
Proof.
intros dsubst dsubst' X t Hwf_dsubst.
remember (dsubst'++[(X,t)]++dsubst) as dsG.
generalize dependent dsubst'.
(wf_dsubst_cases (induction Hwf_dsubst) Case); intros; subst.
Case "wf_dsubst_empty".
contradict HeqdsG; auto.
Case "wf_dsubst_styp".
apply one_eq_app in HeqdsG. destruct HeqdsG as [[dsE'' [dsEQ1 dsEQ2]] | [dsEQ1 dsEQ2]]; subst.
simpl_env.
apply wf_dsubst_styp; simpl in *; auto.
inversion dsEQ2. subst. simpl_env; auto.
Qed.
Lemma dsubst_strengthen : forall dsubst dsubst1 dsubst2,
wf_dsubst (dsubst1++dsubst++dsubst2) ->
wf_dsubst (dsubst1++dsubst2).
Proof.
induction dsubst; intros dsubst1 dsubst2 H; simpl_env in *; auto.
destruct a.
apply dsubst_stronger' in H; auto.
Qed.
Lemma dsubst_strengthen_head : forall dsubst1 dsubst2,
wf_dsubst (dsubst1++dsubst2) ->
wf_dsubst (dsubst1).
Proof.
intros dsubst1 dsubst2 H.
rewrite_env (dsubst1++dsubst2++nil) in H.
apply dsubst_strengthen in H; simpl_env in H; auto.
Qed.
Lemma dsubst_strengthen_tail : forall dsubst1 dsubst2,
wf_dsubst (dsubst1++dsubst2) ->
wf_dsubst (dsubst2).
Proof.
intros dsubst1 dsubst2 H.
rewrite_env (nil++dsubst1++dsubst2) in H.
apply dsubst_strengthen in H; simpl_env in H; auto.
Qed.
Lemma _eval_gen_exp_tabs_list: forall E' E dsubst' dsubst e,
wf_delta_subst (E'++E) (dsubst' ++dsubst) ->
ddom_env E' [=] dom dsubst' ->
ddom_env E [=] dom dsubst ->
expr e ->
bigstep_red (gen_exp_tapp (rev (dsubst' ++dsubst)) (gen_exp_tabs (E'++E) e))
(gen_exp_tapp (rev dsubst) (gen_exp_tabs E (apply_delta_subst dsubst' e))).
Proof.
intros E' E dsubst' dsubst e Hwfd Dom Hexpr.
remember (E'++E) as EE.
remember (dsubst'++dsubst) as dsE.
generalize dependent E'.
generalize dependent E.
generalize dependent dsubst'.
generalize dependent dsubst.
generalize dependent e.
induction Hwfd; intros; subst.
symmetry in HeqEE.
apply app_eq_nil in HeqEE.
destruct HeqEE as [J1 J2]; subst.
symmetry in HeqdsE.
apply app_eq_nil in HeqdsE.
destruct HeqdsE as [J1 J2]; subst.
simpl. auto.
apply one_eq_app in HeqEE. destruct HeqEE as [[E'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
apply one_eq_app in HeqdsE. destruct HeqdsE as [[dsE'' [dsEQ1 dsEQ2]] | [dsEQ1 dsEQ2]]; subst.
assert (dsE''++dsubst = dsE''++dsubst) as EQ. auto.
assert (ddom_env E'' [=] dom dsE'') as J.
apply ddom_dom__inv with (X:=X)(b:=T); auto.
apply dom__ddom in H.
apply dom_delta_subst in Hwfd.
rewrite Hwfd in H. simpl_env in H. auto.
apply IHHwfd with (e:=e) (E:=E0) (E':=E'') in EQ; auto.
simpl_env.
rewrite distr_rev. simpl. simpl_env.
rewrite gen_exp_tapp_app. simpl.
rewrite <- shift_te_expr; try solve [apply gen_exp_tabs_expr; auto].
apply bigstep_red__trans with (e' := gen_exp_tapp (rev (dsE''++dsubst)) (open_te (close_te (gen_exp_tabs (E''++E0) e) X) T)).
apply eval_gen_exp_tapp.
apply wf_delta_subst__wf_dsubst in Hwfd.
apply wf_dsubst_rev; auto.
apply bigstep_red_trans with (e':= (open_te (close_te (gen_exp_tabs (E''++E0) e) X) T)); auto.
apply red_tabs.
apply expr_tabs with (L:={}).
intros X0 HX0.
rewrite close_open_te__subst_te; auto.
apply subst_te_expr; auto.
apply gen_exp_tabs_expr; auto.
apply gen_exp_tabs_expr; auto.
apply type_from_wf_typ in H0; auto.
assert (open_te (close_te (gen_exp_tabs (E''++E0) e) X) T = subst_te X T (gen_exp_tabs (E''++E0) e)) as EQ'.
rewrite close_open_te__subst_te; auto.
apply gen_exp_tabs_expr; auto.
rewrite EQ'.
assert (gen_exp_tapp (rev (dsE''++dsubst)) (subst_te X T (gen_exp_tabs (E''++E0) e)) =
subst_te X T (gen_exp_tapp (rev (dsE''++dsubst)) (gen_exp_tabs (E''++E0) e))) as EQ''.
rewrite gen_exp_tapp_subst_te_commute; auto.
apply wf_delta_subst__wf_dsubst in Hwfd.
apply wf_dsubst_rev in Hwfd; auto.
rewrite EQ''.
assert (gen_exp_tapp (rev dsubst) (gen_exp_tabs E0 (apply_delta_subst dsE'' (subst_te X T e))) =
subst_te X T (gen_exp_tapp (rev dsubst) (gen_exp_tabs E0 (apply_delta_subst dsE'' e)))) as EQ'''.
rewrite <- gen_exp_tapp_subst_te_commute; auto.
rewrite <- gen_exp_tabs_subst_tt_commute; auto.
rewrite swap_subst_te_dsubst'; auto.
apply wf_delta_subst__wf_dsubst in Hwfd.
apply dsubst_strengthen_head in Hwfd; auto.
apply dom_delta_subst in Hwfd.
apply dom__ddom in H.
rewrite Hwfd in H.
simpl_env in H. auto.
apply wf_delta_subst__wf_dsubst in Hwfd.
apply dsubst_strengthen_tail in Hwfd.
apply wf_dsubst_rev in Hwfd; auto.
rewrite EQ'''.
apply bigstep_red_preserved_under_subst_te; auto.
apply type_from_wf_typ in H0; auto.
simpl in Dom.
assert (X `in` {}) as FALSE.
rewrite <- Dom.
auto.
contradict FALSE; auto.
apply one_eq_app in HeqdsE. destruct HeqdsE as [[dsE'' [dsEQ1 dsEQ2]] | [dsEQ1 dsEQ2]]; subst.
simpl in Dom.
assert (X `in` {}) as FALSE.
rewrite Dom.
auto.
contradict FALSE; auto.
simpl_env.
rewrite distr_rev. simpl. simpl_env. auto.
apply one_eq_app in HeqEE. destruct HeqEE as [[E'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
simpl.
assert (dsubst'++dsubst = dsubst'++dsubst) as EQ. auto.
simpl in Dom.
apply IHHwfd with (e:=e) (E:=E0) (E':=E'') in EQ; auto.
simpl in *. symmetry in Dom.
apply dom_empty_inv in Dom.
subst. simpl. auto.
Qed.
Lemma eval_gen_exp_tabs_list: forall E dsubst e,
wf_delta_subst E dsubst ->
expr e ->
bigstep_red (gen_exp_tapp (rev dsubst) (gen_exp_tabs E e)) (apply_delta_subst dsubst e).
Proof.
intros E dsubst e Hwfd Hexpr.
rewrite_env (E++nil) in Hwfd.
rewrite_env (dsubst++nil) in Hwfd.
apply _eval_gen_exp_tabs_list with (e:=e) in Hwfd; auto.
simpl_env in Hwfd; auto.
apply dom_delta_subst in Hwfd. simpl_env in Hwfd. auto.
Qed.
Lemma gen_exp_app_app : forall E E' e,
gen_exp_app (E ++ E') e = gen_exp_app E (gen_exp_app E' e).
Proof.
induction E; intros E' e; simpl; auto.
destruct a. rewrite IHE; auto.
Qed.
Lemma bigstep_red_preserved_under_subst_ee: forall e e' x v,
bigstep_red e e' ->
expr v ->
bigstep_red (subst_ee x v e) (subst_ee x v e').
Proof.
intros e e' x v H.
generalize dependent x.
generalize dependent v.
induction H; intros; auto.
apply bigstep_red_trans with (subst_ee x v e'); auto.
apply red_preserved_under_expsubst; auto.
Qed.
Lemma close_open_ee__subst_ee' : forall e k x y,
expr e ->
open_ee_rec k y (close_ee_rec k x e) = subst_ee x y e.
Proof.
intros e k x y He.
apply close_open_ee_rec__subst_ee; auto.
rewrite expr_bexp_eupper; auto. omega.
Qed.
Lemma gen_exp_abs_expr : forall E e,
wf_env E ->
expr e ->
expr (gen_exp_abs E e).
Proof.
intros E e Hwfe.
generalize dependent e.
induction Hwfe; intros e Hexpr; simpl; auto.
rewrite <- shift_ee_expr with (e:=gen_exp_abs E e); auto.
rewrite <- shift_ee_expr with (e:=gen_exp_abs E e); auto.
apply expr_abs with (L:={{x}}).
apply type_from_wf_typ in H; auto.
intros x0 Frx0.
unfold open_ee. simpl.
apply expr_let with (L:={{x}} `union` {{x0}}); auto.
intros x1 xin.
unfold open_ee.
rewrite open_ee_open_ee_rec__commut; auto.
unfold close_ee.
rewrite close_open_ee__subst_ee'; auto.
rewrite <- open_ee_rec_expr; auto.
Qed.
Lemma gen_exp_app_subst_ee_commute : forall gsubst e x v,
wf_gsubst gsubst ->
gen_exp_app gsubst (subst_ee x v e) = subst_ee x v (gen_exp_app gsubst e).
Proof.
intros gsubst e x v Hwfg.
generalize dependent e.
generalize dependent x.
generalize dependent v.
induction Hwfg; intros; simpl; auto.
rewrite IHHwfg.
rewrite <- subst_ee_fresh with (e:=e); auto.
apply notin_fv_ee_typing with (y:=x0) in H0; auto.
Qed.
Lemma gen_exp_abs_subst_ee_commute : forall E x v T e,
x `notin` dom E ->
typing nil nil v T ->
gen_exp_abs E (subst_ee x v e) = subst_ee x v (gen_exp_abs E e).
Proof.
induction E; intros x v T e HC Typing; simpl; auto.
destruct a.
destruct b; simpl.
rewrite IHE with (T:=T); auto.
assert (a `notin` fv_ee v) as anv.
apply notin_fv_ee_typing with (y:=a) in Typing; auto.
rewrite IHE with (T:=T); auto.
unfold exp_iabs.
rewrite subst_ee_close_ee; auto.
unfold shift_ee.
rewrite subst_ee_shift_ee_rec; auto.
rewrite subst_ee_shift_ee_rec; auto.
Qed.
Lemma swap_subst_ee_gsubst'': forall e' x gsubst e t,
wf_gsubst gsubst ->
typing nil nil e' t ->
x `notin` dom gsubst ->
subst_ee x e' (apply_gamma_subst gsubst e) =
apply_gamma_subst gsubst (subst_ee x e' e).
Proof.
intros e' x gsubst e t Hwflg Hwft xngsubst.
generalize dependent e.
generalize dependent e'.
generalize dependent t.
induction Hwflg; intros t e' Hwft e0; simpl; eauto.
rewrite subst_ee_commute; eauto.
eauto using typing_fv.
eauto using typing_fv.
Qed.
Lemma gsubst_stronger' : forall gsubst gsubst' x v,
wf_gsubst (gsubst'++[(x,v)]++gsubst) ->
wf_gsubst (gsubst'++gsubst).
Proof.
intros gsubst gsubst' x v Hwf_gsubst.
remember (gsubst'++[(x,v)]++gsubst) as gsG.
generalize dependent gsubst'.
(wf_gsubst_cases (induction Hwf_gsubst) Case); intros; subst.
Case "wf_gsubst_empty".
contradict HeqgsG; auto.
Case "wf_gsubst_sval".
apply one_eq_app in HeqgsG. destruct HeqgsG as [[gsE'' [gsEQ1 gsEQ2]] | [gsEQ1 gsEQ2]]; subst.
simpl_env.
apply wf_gsubst_sval with (T:=T); simpl in *; auto.
inversion gsEQ2. subst. simpl_env; auto.
Qed.
Lemma gsubst_strengthen : forall gsubst gsubst1 gsubst2,
wf_gsubst (gsubst1++gsubst++gsubst2) ->
wf_gsubst (gsubst1++gsubst2).
Proof.
induction gsubst; intros gsubst1 gsubst2 H; simpl_env in *; auto.
destruct a.
apply gsubst_stronger' in H; auto.
Qed.
Lemma gsubst_strengthen_head : forall gsubst1 gsubst2,
wf_gsubst (gsubst1++gsubst2) ->
wf_gsubst (gsubst1).
Proof.
intros gsubst1 gsubst2 H.
rewrite_env (gsubst1++gsubst2++nil) in H.
apply gsubst_strengthen in H; simpl_env in H; auto.
Qed.
Lemma gsubst_strengthen_tail : forall gsubst1 gsubst2,
wf_gsubst (gsubst1++gsubst2) ->
wf_gsubst (gsubst2).
Proof.
intros gsubst1 gsubst2 H.
rewrite_env (nil++gsubst1++gsubst2) in H.
apply gsubst_strengthen in H; simpl_env in H; auto.
Qed.
Lemma eval_gen_exp_app: forall gsubst e e',
wf_gsubst gsubst ->
bigstep_red e e' ->
bigstep_red (gen_exp_app gsubst e) (gen_exp_app gsubst e').
Proof.
intros gsubst e e' Hwfg Hbred.
generalize dependent e.
generalize dependent e'.
induction Hwfg; intros; simpl; auto.
apply bigstep_red_app; auto.
Qed.
Lemma bigstep_red_iabs : forall T e1 e2,
bexp_eupper e1 <=1 ->
expr (exp_iabs T e1) ->
expr e2 ->
bigstep_red (exp_app (exp_iabs T e1) (exp_bang e2)) (open_ee e1 e2).
Proof.
intros T e1 e2 H0 H1 H2.
unfold exp_iabs.
apply bigstep_red_trans with (e':=open_ee (exp_let 0 e1) (exp_bang e2)).
apply red_abs; auto.
inversion H1; subst.
unfold open_ee in *. simpl in *.
apply bigstep_red_trans with (e':=open_ee (open_ee_rec 1 (exp_bang e2) e1) e2).
apply red_let_beta; auto.
apply expr_let with (L:=L); auto.
intros x xn.
apply H5 in xn.
rewrite open_ee_rec_id_bexp_eupper; auto.
rewrite open_ee_rec_id_bexp_eupper in xn; auto.
inversion xn; subst.
pick fresh y.
assert (y `notin` L0) as ynL0. auto.
apply H7 in ynL0.
unfold open_ee.
apply open_ee_rec_swap with (x:=y); auto.
rewrite open_ee_rec_id_bexp_eupper; auto.
Qed.
Lemma fv_ee_gen_exp_abs_hole : forall E e,
fv_ee (gen_exp_abs E e) [<=] dom E `union` fv_ee e.
Proof.
induction E; intros e; simpl.
fsetdec.
destruct a.
destruct b; simpl.
assert (J:=@IHE e). fsetdec.
assert (J1:=@IHE e).
assert (J2:=@close_ee_fv_ee_upper (shift_ee (shift_ee (gen_exp_abs E e))) a).
rewrite shift_ee_fv_ee_eq in J2.
rewrite shift_ee_fv_ee_eq in J2.
fsetdec.
Qed.
Lemma _eval_gen_exp_abs_list: forall E' E dsubst gsubst' gsubst e,
wf_gamma_subst (E'++E) dsubst (gsubst' ++gsubst) ->
gdom_env E' [=] dom gsubst' ->
gdom_env E [=] dom gsubst ->
expr e ->
bigstep_red (gen_exp_app (rev (gsubst' ++gsubst)) (gen_exp_abs (E'++E) e))
(gen_exp_app (rev gsubst) (gen_exp_abs E (apply_gamma_subst gsubst' e))).
Proof.
intros E' E dsubst' gsubst' gsubst e Hwfg Dom' Dom Hexpr.
remember (E'++E) as EE.
remember (gsubst'++gsubst) as gsE.
generalize dependent E'.
generalize dependent E.
generalize dependent gsubst'.
generalize dependent gsubst.
generalize dependent e.
induction Hwfg; intros; subst.
symmetry in HeqEE.
apply app_eq_nil in HeqEE.
destruct HeqEE as [J1 J2]; subst.
symmetry in HeqgsE.
apply app_eq_nil in HeqgsE.
destruct HeqgsE as [J1 J2]; subst.
simpl. auto.
apply one_eq_app in HeqEE. destruct HeqEE as [[E'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
apply one_eq_app in HeqgsE. destruct HeqgsE as [[gsE'' [gsEQ1 gsEQ2]] | [gsEQ1 gsEQ2]]; subst.
assert (gsE''++gsubst = gsE''++gsubst) as EQ. auto.
assert (gdom_env E'' [=] dom gsE'') as J.
apply gdom_dom__inv with (x:=x)(b:=e)(t:=T); auto.
apply dom__gdom in H. simpl_env in H. auto.
apply dom__gdom in H.
apply dom_gamma_subst in Hwfg. destruct Hwfg.
rewrite H3 in H. simpl_env in H. auto.
apply IHHwfg with (e:=e0) (E:=E0) (E':=E'') in EQ; auto.
simpl_env.
rewrite distr_rev. simpl. simpl_env.
rewrite gen_exp_app_app. simpl.
assert (wf_env (E''++E0)) as Hwfe.
apply wf_gamma_subst__wf_subst in Hwfg. destruct Hwfg; auto.
assert (expr (gen_exp_abs (E''++E0) e0)) as Expr.
apply gen_exp_abs_expr; auto.
rewrite <- shift_ee_expr with (e:=gen_exp_abs (E''++E0) e0); auto.
rewrite <- shift_ee_expr with (e:=gen_exp_abs (E''++E0) e0); auto.
apply bigstep_red__trans with (e' := gen_exp_app (rev (gsE''++gsubst)) (open_ee (close_ee (gen_exp_abs (E''++E0) e0) x) e)).
apply eval_gen_exp_app.
apply wf_gamma_subst__wf_gsubst in Hwfg.
apply wf_gsubst_rev; auto.
apply bigstep_red__trans with (e':= (open_ee (close_ee (gen_exp_abs (E''++E0) e0) x) e)); auto.
apply bigstep_red_iabs; auto.
assert (J':=@close_ee_bexp_eupper (gen_exp_abs (E''++E0) e0) 0 x).
rewrite expr_bexp_eupper with (e:=gen_exp_abs (E''++E0) e0) in J'; auto.
unfold exp_iabs.
apply expr_abs with (L:={}); auto.
apply type_bang; eauto using type_from_wf_typ.
intros x0 Hx0. unfold open_ee. simpl.
apply expr_let with (L:={{x0}}); auto.
intros x1 Hx1.
unfold open_ee.
rewrite open_ee_open_ee_rec__commut; auto.
unfold close_ee.
rewrite close_open_ee__subst_ee'; auto.
apply subst_ee_expr with (z:=x)(e2:=x1) in Expr; auto.
rewrite <- open_ee_rec_expr; auto.
assert (open_ee (close_ee (gen_exp_abs (E''++E0) e0) x) e = subst_ee x e (gen_exp_abs (E''++E0) e0)) as EQ'.
rewrite close_open_ee__subst_ee; auto.
rewrite EQ'.
assert (gen_exp_app (rev (gsE''++gsubst)) (subst_ee x e (gen_exp_abs (E''++E0) e0)) =
subst_ee x e (gen_exp_app (rev (gsE''++gsubst)) (gen_exp_abs (E''++E0) e0))) as EQ''.
rewrite gen_exp_app_subst_ee_commute; auto.
apply wf_gamma_subst__wf_gsubst in Hwfg.
apply wf_gsubst_rev in Hwfg; auto.
rewrite EQ''.
assert (gen_exp_app (rev gsubst) (gen_exp_abs E0 (apply_gamma_subst gsE'' (subst_ee x e e0))) =
subst_ee x e (gen_exp_app (rev gsubst) (gen_exp_abs E0 (apply_gamma_subst gsE'' e0)))) as EQ'''.
rewrite <- gen_exp_app_subst_ee_commute; auto.
rewrite <- gen_exp_abs_subst_ee_commute with (T:=apply_delta_subst_typ dsE T); auto.
rewrite swap_subst_ee_gsubst'' with (t:=apply_delta_subst_typ dsE T); auto.
apply wf_gamma_subst__wf_gsubst in Hwfg.
apply gsubst_strengthen_head in Hwfg; auto.
apply dom_gamma_subst in Hwfg.
apply dom__gdom in H. destruct Hwfg as [J1 J2].
rewrite J2 in H.
simpl_env in H. auto.
apply wf_gamma_subst__wf_gsubst in Hwfg.
apply gsubst_strengthen_tail in Hwfg.
apply wf_gsubst_rev in Hwfg; auto.
rewrite EQ'''.
apply bigstep_red_preserved_under_subst_ee; auto.
simpl in Dom'.
assert (x `in` {}) as FALSE.
rewrite <- Dom'.
auto.
contradict FALSE; auto.
apply one_eq_app in HeqgsE. destruct HeqgsE as [[gsE'' [gsEQ1 gsEQ2]] | [gsEQ1 gsEQ2]]; subst.
simpl in Dom'.
assert (x `in` {}) as FALSE.
rewrite Dom'.
auto.
contradict FALSE; auto.
simpl_env.
rewrite distr_rev. simpl. simpl_env. auto.
apply one_eq_app in HeqEE. destruct HeqEE as [[E'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
simpl.
assert (gsubst'++gsubst = gsubst'++gsubst) as EQ. auto.
simpl in Dom'.
apply IHHwfg with (e:=e) (E:=E0) (E':=E'') in EQ; auto.
simpl in *. symmetry in Dom'.
apply dom_empty_inv in Dom'.
subst. simpl. auto.
Qed.
Lemma eval_gen_exp_abs_list: forall E dsubst gsubst e,
wf_gamma_subst E dsubst gsubst ->
expr e ->
bigstep_red (gen_exp_app (rev gsubst) (gen_exp_abs E e)) (apply_gamma_subst gsubst e).
Proof.
intros E dsubst gsubst e Hwfg Hexpr.
rewrite_env (E++nil) in Hwfg.
rewrite_env (gsubst++nil) in Hwfg.
apply _eval_gen_exp_abs_list with (e:=e) in Hwfg; auto.
simpl_env in Hwfg; auto.
apply dom_gamma_subst in Hwfg. simpl_env in Hwfg. destruct Hwfg; auto.
Qed.
Lemma gen_exp_lapp_app : forall lE lE' e,
gen_exp_lapp (lE ++ lE') e = gen_exp_lapp lE (gen_exp_lapp lE' e).
Proof.
induction lE; intros lE' e; simpl; auto.
destruct a. rewrite IHlE; auto.
Qed.
Lemma gen_exp_labs_expr : forall E lE e,
wf_lenv E lE ->
expr e ->
expr (gen_exp_labs lE e).
Proof.
intros E lE e Hwfle Hexpr.
generalize dependent e.
induction Hwfle; intros e Hexpr; simpl; auto.
apply expr_abs with (L:={{x}}).
apply type_from_wf_typ in H1; auto.
intros x0 Frx0.
rewrite <- shift_ee_expr; auto.
rewrite close_open_ee__subst_ee; auto.
Qed.
Lemma gen_exp_lapp_subst_ee_commute : forall gsubst e x v,
wf_gsubst gsubst ->
gen_exp_lapp gsubst (subst_ee x v e) = subst_ee x v (gen_exp_lapp gsubst e).
Proof.
intros gsubst e x v Hwfg.
generalize dependent e.
generalize dependent x.
generalize dependent v.
induction Hwfg; intros; simpl; auto.
rewrite IHHwfg.
rewrite <- subst_ee_fresh with (e:=e); auto.
apply notin_fv_ee_typing with (y:=x0) in H0; auto.
Qed.
Lemma gen_exp_labs_subst_ee_commute : forall lE x v T e,
x `notin` dom lE ->
typing nil nil v T ->
gen_exp_labs lE (subst_ee x v e) = subst_ee x v (gen_exp_labs lE e).
Proof.
induction lE; intros x v T e HC Typing; simpl; auto.
destruct a.
destruct l; simpl.
rewrite IHlE with (T:=T); auto.
rewrite subst_ee_close_ee; auto.
unfold shift_ee.
rewrite subst_ee_shift_ee_rec; auto.
apply notin_fv_ee_typing with (y:=a) in Typing; auto.
Qed.
Lemma _eval_gen_exp_labs_list: forall E lE' lE dsubst gsubst lgsubst' lgsubst e,
wf_lgamma_subst E (lE'++lE) dsubst gsubst (lgsubst' ++lgsubst) ->
dom lE' [=] dom lgsubst' ->
dom lE [=] dom lgsubst ->
expr e ->
bigstep_red (gen_exp_lapp (rev (lgsubst' ++lgsubst)) (gen_exp_labs (lE'++lE) e))
(gen_exp_lapp (rev lgsubst) (gen_exp_labs lE (apply_gamma_subst lgsubst' e))).
Proof.
intros E lE' lE dsubst gsubst lgsubst' lgsubst e Hwflg Dom' Dom Hexpr.
remember (lE'++lE) as lEE.
remember (lgsubst'++lgsubst) as lgsE.
generalize dependent lE'.
generalize dependent lE.
generalize dependent lgsubst'.
generalize dependent lgsubst.
generalize dependent e.
induction Hwflg; intros; subst.
symmetry in HeqlEE.
apply app_eq_nil in HeqlEE.
destruct HeqlEE as [J1 J2]; subst.
symmetry in HeqlgsE.
apply app_eq_nil in HeqlgsE.
destruct HeqlgsE as [J1 J2]; subst.
simpl. auto.
assert (lgsubst'++lgsubst = lgsubst'++lgsubst) as EQ. auto.
simpl in Dom'.
apply IHHwflg with (e:=e0) (lE:=lE0) (lE'0:=lE') in EQ; auto.
apply one_eq_app in HeqlEE. destruct HeqlEE as [[lE'' [EQ1 EQ2]] | [EQ1 EQ2]]; subst.
apply one_eq_app in HeqlgsE. destruct HeqlgsE as [[lgsE'' [lgsEQ1 lgsEQ2]] | [lgsEQ1 lgsEQ2]]; subst.
assert (lgsE''++lgsubst = lgsE''++lgsubst) as EQ. auto.
assert (dom lE'' [=] dom lgsE'') as J.
apply dom_dom__inv with (x:=x)(b':=e)(b:=lbind_typ T); auto.
apply dom_lgamma_subst in Hwflg. decompose [and] Hwflg.
rewrite H6 in H0. simpl_env in H0. auto.
apply IHHwflg with (e:=e0) (lE:=lE0) (lE':=lE'') in EQ; auto.
simpl_env.
rewrite distr_rev. simpl. simpl_env.
rewrite gen_exp_lapp_app. simpl.
assert (wf_lenv E (lE''++lE0)) as Hwfle.
apply wf_lgamma_subst__wf_lenv in Hwflg. destruct Hwflg; auto.
rewrite <- shift_ee_expr; try solve [apply gen_exp_labs_expr with (E:=E); auto].
apply bigstep_red__trans with (e' := gen_exp_lapp (rev (lgsE''++lgsubst)) (open_ee (close_ee (gen_exp_labs (lE''++lE0) e0) x) e)).
apply eval_gen_exp_lapp.
apply wf_lgamma_subst__wf_lgsubst in Hwflg.
apply wf_gsubst_rev; auto.
apply bigstep_red_trans with (e':= (open_ee (close_ee (gen_exp_labs (lE''++lE0) e0) x) e)); auto.
apply red_abs; auto.
apply expr_abs with (L:={}).
apply type_from_wf_typ in H2; auto.
intros x0 Hx0.
rewrite close_open_ee__subst_ee; auto.
apply subst_ee_expr; auto.
apply gen_exp_labs_expr with (E:=E); auto.
apply gen_exp_labs_expr with (E:=E); auto.
assert (open_ee (close_ee (gen_exp_labs (lE''++lE0) e0) x) e = subst_ee x e (gen_exp_labs (lE''++lE0) e0)) as EQ'.
rewrite close_open_ee__subst_ee; auto.
apply gen_exp_labs_expr with (E:=E); auto.
rewrite EQ'.
assert (gen_exp_lapp (rev (lgsE''++lgsubst)) (subst_ee x e (gen_exp_labs (lE''++lE0) e0)) =
subst_ee x e (gen_exp_lapp (rev (lgsE''++lgsubst)) (gen_exp_labs (lE''++lE0) e0))) as EQ''.
rewrite gen_exp_lapp_subst_ee_commute; auto.
apply wf_lgamma_subst__wf_lgsubst in Hwflg.
apply wf_gsubst_rev in Hwflg; auto.
rewrite EQ''.
assert (gen_exp_lapp (rev lgsubst) (gen_exp_labs lE0 (apply_gamma_subst lgsE'' (subst_ee x e e0))) =
subst_ee x e (gen_exp_lapp (rev lgsubst) (gen_exp_labs lE0 (apply_gamma_subst lgsE'' e0)))) as EQ'''.
rewrite <- gen_exp_lapp_subst_ee_commute; auto.
rewrite <- gen_exp_labs_subst_ee_commute with (T:=apply_delta_subst_typ dsE T); auto.
rewrite swap_subst_ee_gsubst'' with (t:=apply_delta_subst_typ dsE T); auto.
apply wf_lgamma_subst__wf_lgsubst in Hwflg.
apply gsubst_strengthen_head in Hwflg; auto.
apply dom_lgamma_subst in Hwflg.
simpl_env in H0.
destruct_notin.
rewrite J in H0. auto.
apply wf_lgamma_subst__wf_lgsubst in Hwflg.
apply gsubst_strengthen_tail in Hwflg.
apply wf_gsubst_rev in Hwflg; auto.
rewrite EQ'''.
apply bigstep_red_preserved_under_subst_ee; auto.
simpl in Dom'.
assert (x `in` {}) as FALSE.
rewrite <- Dom'.
auto.
contradict FALSE; auto.
apply one_eq_app in HeqlgsE. destruct HeqlgsE as [[lgsE'' [lgsEQ1 lgsEQ2]] | [lgsEQ1 lgsEQ2]]; subst.
simpl in Dom'.
assert (x `in` {}) as FALSE.
rewrite Dom'.
auto.
contradict FALSE; auto.
simpl_env.
rewrite distr_rev. simpl. simpl_env. auto.
assert (lgsubst'++lgsubst = lgsubst'++lgsubst) as EQ. auto.
simpl in Dom'.
apply IHHwflg with (e:=e) (lE:=lE0) (lE'0:=lE') in EQ; auto.
Qed.
Lemma eval_gen_exp_labs_list: forall E lE dsubst gsubst lgsubst e,
wf_lgamma_subst E lE dsubst gsubst lgsubst ->
expr e ->
bigstep_red (gen_exp_lapp (rev lgsubst) (gen_exp_labs lE e)) (apply_gamma_subst lgsubst e).
Proof.
intros E lE dsubst gsubst lgsubst e Hwflg Hexpr.
rewrite_env (lE++nil) in Hwflg.
rewrite_env (lgsubst++nil) in Hwflg.
apply _eval_gen_exp_labs_list with (e:=e) in Hwflg; auto.
simpl_env in Hwflg; auto.
apply dom_lgamma_subst in Hwflg.
decompose [and] Hwflg. simpl_env in H2. auto.
Qed.
Lemma swap_subst_te_gsubst: forall T X gsubst e,
wf_gsubst gsubst ->
wf_typ nil T ->
subst_te X T (apply_gamma_subst gsubst e) =
apply_gamma_subst gsubst (subst_te X T e).
Proof.
intros T X gsubst e Hwflg Hwft.
generalize dependent e.
generalize dependent T.
induction Hwflg; intros; simpl; eauto.
rewrite subst_te_ee_commute; eauto.
eauto using notin_fv_te_typing.
Qed.
Lemma wf_dgamma_subst_reorder : forall dsubst gsubst e,
wf_dsubst dsubst ->
wf_gsubst gsubst ->
(apply_delta_subst dsubst (apply_gamma_subst gsubst e)) =
(apply_gamma_subst gsubst (apply_delta_subst dsubst e)).
Proof.
intros dsubst gsubst e Hwfd.
generalize dependent gsubst.
generalize dependent e.
induction Hwfd; intros; simpl; auto.
rewrite swap_subst_te_gsubst; auto.
Qed.
Lemma wf_ggamma_subst_reorder : forall gsubst lgsubst e,
wf_gsubst gsubst ->
wf_gsubst lgsubst ->
disjoint gsubst lgsubst ->
(apply_gamma_subst gsubst (apply_gamma_subst lgsubst e)) =
(apply_gamma_subst lgsubst (apply_gamma_subst gsubst e)).
Proof.
intros gsubst lgsubst e Hwfg.
generalize dependent lgsubst.
generalize dependent e.
induction Hwfg; intros; simpl; auto.
rewrite swap_subst_ee_gsubst'' with (t:=T); auto.
rewrite IHHwfg; auto.
solve_uniq.
solve_uniq.
Qed.
Lemma wf_lgamma_subst__wf_dsubst : forall E D dsubst gsubst lgsubst,
wf_lgamma_subst E D dsubst gsubst lgsubst->
wf_dsubst dsubst.
Proof.
intros E D dsubst gsubst lgsubst Hwflg.
(wf_lgamma_subst_cases (induction Hwflg) Case); auto.
Case "wf_lgamma_subst_skind".
apply wf_dsubst_styp; auto.
apply dom_lgamma_subst in Hwflg. destruct Hwflg as [J1 [J2 J3]].
rewrite <- J1.
apply dom__ddom; auto.
Qed.
Lemma wf_lgamma_subst_reorder : forall E lE dsubst gsubst lgsubst e,
wf_lgamma_subst E lE dsubst gsubst lgsubst ->
(apply_delta_subst dsubst (apply_gamma_subst gsubst (apply_gamma_subst lgsubst e))) =
(apply_gamma_subst lgsubst (apply_gamma_subst gsubst (apply_delta_subst dsubst e))).
Proof.
intros E lE dsubst gsubst lgsubst e Hwflg.
assert (J1:=Hwflg). apply wf_lgamma_subst__wf_dsubst in J1; auto.
assert (J2:=Hwflg). apply wf_lgamma_subst__wf_gsubst in J2; auto.
assert (J3:=Hwflg). apply wf_lgamma_subst__wf_lgsubst in J3; auto.
rewrite wf_dgamma_subst_reorder; auto.
rewrite wf_dgamma_subst_reorder; auto.
rewrite wf_ggamma_subst_reorder; auto.
apply wf_lgamma_subst_disjoint in Hwflg. decompose [and] Hwflg; auto.
Qed.
Lemma gen_ctx_tabs_abs_labs_context : forall E lE,
wf_lenv E lE ->
context (gen_ctx_tabs E (gen_ctx_abs E (gen_ctx_labs lE ctx_hole))).
Proof.
intros E lE Wfle.
apply gen_ctx_tabs_context; auto.
apply gen_ctx_abs_context; auto.
apply fv_ec_gen_ctx_labs_hole.
apply gen_ctx_labs_context with (E:=E); auto.
Qed.
Lemma gen_ctx_abs_labs_context : forall E lE,
wf_lenv E lE ->
context (gen_ctx_abs E (gen_ctx_labs lE ctx_hole)).
Proof.
intros E lE Wfle.
apply gen_ctx_abs_context; auto.
apply fv_ec_gen_ctx_labs_hole.
apply gen_ctx_labs_context with (E:=E); auto.
Qed.
Lemma expr_preserved_under_dsubst: forall dsubst e,
wf_dsubst dsubst ->
expr e ->
expr (apply_delta_subst dsubst e).
Proof.
intros dsubst e Hwfd He.
generalize dependent e.
induction Hwfd; intros; simpl; auto.
apply IHHwfd.
apply subst_te_expr; eauto using type_from_wf_typ.
Qed.
Lemma expr_preserved_under_gsubst: forall gsubst e,
wf_gsubst gsubst ->
expr e ->
expr (apply_gamma_subst gsubst e).
Proof.
intros gsubst e Hwfg He.
generalize dependent e.
induction Hwfg; intros; auto.
simpl. apply IHHwfg.
apply subst_ee_expr; auto.
Qed.
Lemma gen_exp_abs_subst_te_commute : forall E X T e,
wf_typ nil T ->
gen_exp_abs (map (subst_tb X T) E) (subst_te X T e) = subst_te X T (gen_exp_abs E e).
Proof.
induction E; intros X T e Wft; simpl; auto.
destruct a.
destruct b; simpl.
rewrite IHE; auto.
rewrite IHE; auto.
rewrite subst_te_close_ee; auto.
unfold shift_ee.
rewrite subst_te_shift_ee_rec; auto.
rewrite subst_te_shift_ee_rec; auto.
Qed.
Lemma gen_exp_labs_subst_te_commute : forall lE X T e,
wf_typ nil T ->
gen_exp_labs (map (subst_tlb X T) lE) (subst_te X T e) = subst_te X T (gen_exp_labs lE e).
Proof.
induction lE; intros X T e Wft; simpl; auto.
destruct a.
destruct l; simpl.
rewrite IHlE; auto.
rewrite subst_te_close_ee; auto.
unfold shift_ee.
rewrite subst_te_shift_ee_rec; auto.
Qed.
Lemma gen_exp_abs_dsubst_commute : forall dsubst E e,
wf_dsubst dsubst ->
gen_exp_abs (apply_delta_subst_env dsubst E) (apply_delta_subst dsubst e) = apply_delta_subst dsubst (gen_exp_abs E e).
Proof.
intros dsubst E e Wfd; simpl; auto.
generalize dependent E.
generalize dependent e.
induction Wfd; intros; simpl; auto.
rewrite apply_delta_subst_env_nil; auto.
simpl_env.
rewrite <- gen_exp_abs_subst_te_commute; auto.
rewrite <- IHWfd; auto.
rewrite apply_delta_subst_env_cons; auto.
Qed.
Lemma gen_exp_labs_dsubst_commute : forall dsubst lE e,
wf_dsubst dsubst ->
gen_exp_labs (apply_delta_subst_lenv dsubst lE) (apply_delta_subst dsubst e) = apply_delta_subst dsubst (gen_exp_labs lE e).
Proof.
intros dsubst lE e Wfd; simpl; auto.
generalize dependent lE.
generalize dependent e.
induction Wfd; intros; simpl; auto.
rewrite apply_delta_subst_lenv_nil; auto.
simpl_env.
rewrite <- gen_exp_labs_subst_te_commute; auto.
rewrite <- IHWfd; auto.
rewrite apply_delta_subst_lenv_cons'; auto.
Qed.
Lemma gen_exp_labs_gsubst_commute : forall gsubst lE e,
wf_gsubst gsubst ->
disjoint lE gsubst ->
gen_exp_labs lE (apply_gamma_subst gsubst e) =
apply_gamma_subst gsubst (gen_exp_labs lE e).
Proof.
intros gsubst lE e Wfg Disj; simpl; auto.
generalize dependent lE.
generalize dependent e.
induction Wfg; intros; simpl; auto.
rewrite <- gen_exp_labs_subst_ee_commute with (T:=T); auto.
rewrite <- IHWfg; auto.
solve_uniq.
solve_uniq.
Qed.
Lemma dsubst_binds_typ : forall dsubst X t,
wf_dsubst dsubst ->
binds X t dsubst ->
wf_typ nil t.
Proof.
intros dsubst X t Hwf_dsubst.
generalize dependent X.
generalize dependent t.
(wf_dsubst_cases (induction Hwf_dsubst) Case);
intros t X0 HeqDsubst.
inversion HeqDsubst.
analyze_binds HeqDsubst.
apply IHHwf_dsubst with (X:=X0); auto.
Qed.
Lemma apply_delta_subst_typ_subst_tt_id : forall dsubst X T t,
wf_dsubst dsubst ->
binds X T dsubst ->
apply_delta_subst_typ dsubst (subst_tt X T t) = apply_delta_subst_typ dsubst t.
Proof.
intros dsubst X T t Hwfd Hbinds.
generalize dependent X.
generalize dependent T.
generalize dependent t.
induction Hwfd; intros; simpl; auto.
inversion Hbinds.
analyze_binds Hbinds.
rewrite <- subst_tt_fresh with (T:=subst_tt X T t); auto.
apply notin_fv_tt_subst_tt_var; auto.
eauto using notin_fv_wf.
rewrite subst_tt_commute; auto.
apply dsubst_binds_typ in BindsTac; auto.
eauto using notin_fv_wf.
eauto using notin_fv_wf.
apply binds_In in BindsTac. solve_uniq.
Qed.
Lemma wf_gamma_subst__wf_dsubst : forall E dsubst gsubst,
wf_gamma_subst E dsubst gsubst ->
wf_dsubst dsubst.
Proof.
intros E dsubst gsubst Hwfg.
(wf_gamma_subst_cases (induction Hwfg) Case); auto.
Case "wf_gamma_subst_skind".
apply wf_dsubst_styp; auto.
apply dom_gamma_subst in Hwfg. destruct Hwfg as [J1 J2].
rewrite <- J1.
apply dom__ddom; auto.
Qed.
Lemma wf_typ_tt_in : forall E t X T,
wf_env E ->
wf_typ E t ->
binds X (bind_kn) E ->
wf_typ nil T ->
wf_typ (map (subst_tb X T) E) (subst_tt X T t).
Proof.
intros E t X T Hwfe Hwft Binds Hwft'.
generalize dependent T.
generalize dependent X.
induction Hwft; intros; simpl.
destruct (X==X0); subst.
rewrite_env (map (subst_tb X0 T) E ++ nil).
apply wf_typ_weaken_head; auto.
apply wf_typ_var; auto.
apply binds_map_2 with (f:=subst_tb X0 T) in H0.
simpl in H0. auto.
apply wf_typ_arrow; eauto.
apply wf_typ_all with (L:=L `union` dom E `union` {{X}}); eauto.
intros X0 HX0.
assert (X0 `notin` L) as J. auto.
apply H in J.
assert (wf_env ([(X0, bind_kn)]++E)) as J1. auto.
apply H0 with (X:=X0) (T:=T) (X0:=X) in J1; auto.
simpl in J1. simpl_env in J1.
rewrite subst_tt_open_tt_var; auto.
apply type_from_wf_typ in Hwft'; auto.
apply wf_typ_bang; eauto.
apply wf_typ_with; eauto.
Qed.
Lemma wf_typ_tt_notin : forall E t X T,
uniq E ->
wf_typ E t ->
X `notin` dom E ->
wf_typ nil T ->
wf_typ (map (subst_tb X T) E) (subst_tt X T t).
Proof.
intros E t X T Hwfe Hwft HX Hwft'.
generalize dependent T.
generalize dependent X.
induction Hwft; intros; simpl.
destruct (X==X0); subst.
apply binds_In in H0.
contradict H0; auto.
apply wf_typ_var; auto.
apply binds_map_2 with (f:=subst_tb X0 T) in H0.
simpl in H0. auto.
apply wf_typ_arrow; eauto.
apply wf_typ_all with (L:=L `union` dom E `union` {{X}}); eauto.
intros X0 HX0.
assert (X0 `notin` L) as J. auto.
apply H in J.
assert (uniq ([(X0, bind_kn)]++E)) as J1. auto.
apply H0 with (X:=X0) (T:=T) (X0:=X) in J1; auto.
simpl in J1. simpl_env in J1.
rewrite subst_tt_open_tt_var; auto.
apply type_from_wf_typ in Hwft'; auto.
apply wf_typ_bang; eauto.
apply wf_typ_with; eauto.
Qed.
Lemma subst_tt_fv_tt_sub : forall t X T,
wf_typ nil T ->
fv_tt (subst_tt X T t) [<=] fv_tt t.
Proof.
induction t; intros X T Hwft; simpl; try solve [eauto | fsetdec].
destruct (a==X); simpl; try solve [fsetdec].
apply wft_fv_tt_sub in Hwft.
fsetdec.
assert (J1:=@IHt1 X T Hwft).
assert (J2:=@IHt2 X T Hwft).
fsetdec.
assert (J1:=@IHt1 X T Hwft).
assert (J2:=@IHt2 X T Hwft).
fsetdec.
Qed.
Lemma apply_delta_subst_typ_fv_tt : forall dsubst t,
wf_dsubst dsubst ->
fv_tt (apply_delta_subst_typ dsubst t) [<=] fv_tt t.
Proof.
intros dsubst t Hwfd.
generalize dependent t.
induction Hwfd; intros; simpl.
fsetdec.
assert (J:=@IHHwfd (subst_tt X T t)).
assert (J':=@subst_tt_fv_tt_sub t X T H0).
fsetdec.
Qed.
Lemma subst_tt_fv_tt_notin : forall t X T,
wf_typ nil T ->
X `notin` fv_tt (subst_tt X T t).
Proof.
induction t; intros X T Hwft; simpl; try solve [eauto | fsetdec].
destruct (a==X); simpl; try solve [fsetdec].
apply notin_fv_wf with (X:=X) in Hwft; auto.
Qed.
Lemma wf_delta_subst_dsubst_id : forall E dsubst t,
wf_delta_subst E dsubst ->
wf_typ E t ->
wf_typ (apply_delta_subst_env dsubst E) (apply_delta_subst_typ dsubst t).
Proof.
intros E dsubst t Hwfd.
generalize dependent t.
induction Hwfd; intros; simpl; simpl_env; auto.
apply wf_typ_weaken_head; auto.
rewrite apply_delta_subst_env_cons.
rewrite apply_delta_subst_env_subst_tb_swap with (E:=E); auto.
rewrite_env (nil++[(X, bind_kn)]++E) in H1.
apply wf_typ_subst_tb with (F:=nil) (Z:=X) (T:=t) (P:=T) in H1.
simpl_env in H1.
apply IHHwfd in H1.
apply wf_typ_tt_notin with (X:=X) (T:=T) in H1; auto.
rewrite <- subst_tt_fresh in H1; auto.
apply wf_delta_subst__wf_dsubst in Hwfd.
assert (J:=@apply_delta_subst_typ_fv_tt SE (subst_tt X T t) Hwfd).
assert (J':=@subst_tt_fv_tt_notin t X T H0).
fsetdec.
apply apply_delta_subst_env_uniq.
apply wf_delta_subst__uniq in Hwfd.
decompose [and] Hwfd; auto.
rewrite <- (@apply_delta_subst_env_dom (SE) E); auto.
rewrite_env (E++nil).
apply wf_typ_weaken_head; auto.
apply wf_delta_subst__uniq in Hwfd.
decompose [and] Hwfd; auto.
assert (uniq E) as Uniq.
apply wf_delta_subst__uniq in Hwfd.
decompose [and] Hwfd; auto.
apply apply_delta_subst_env_uniq with (dsubst:=[(X, T)]++SE) in Uniq.
rewrite (@apply_delta_subst_env_dom ([(X, T)]++SE) E) in H.
solve_uniq.
apply wf_typ_weaken_head; auto.
apply IHHwfd.
rewrite_env (nil++[(x, bind_typ T)]++E) in H1.
apply wf_typ_strengthening in H1; auto.
assert (uniq E) as Uniq.
apply wf_delta_subst__uniq in Hwfd.
decompose [and] Hwfd; auto.
apply apply_delta_subst_env_uniq with (dsubst:=SE) in Uniq.
rewrite (@apply_delta_subst_env_dom SE E) in H.
solve_uniq.
Qed.
Lemma wf_env_tt_notin : forall E X T,
wf_env E ->
X `notin` dom E ->
wf_typ nil T ->
wf_env (map (subst_tb X T) E).
Proof.
intros E X T Hwfe HX Hwft.
generalize dependent T.
generalize dependent X.
induction Hwfe; intros; simpl; simpl_env; auto.
apply wf_env_typ; eauto.
eapply wf_typ_tt_notin; eauto.
Qed.
Lemma wf_env_dsubst_id : forall E dsubst,
wf_delta_subst E dsubst ->
wf_env E->
wf_env (apply_delta_subst_env dsubst E).
Proof.
intros E dsubst Hwfd.
induction Hwfd; intros Hwfe; simpl; simpl_env; auto.
apply wf_env_kn; auto.
rewrite apply_delta_subst_env_cons.
rewrite apply_delta_subst_env_subst_tb_swap with (E:=E); auto.
apply wf_env_tt_notin; auto.
apply IHHwfd.
inversion Hwfe; subst; auto.
rewrite <- (@apply_delta_subst_env_dom (SE) E); auto.
rewrite <- (@apply_delta_subst_env_dom ([(X, T)]++SE) E); auto.
apply wf_env_typ; auto.
apply IHHwfd.
inversion Hwfe; subst; auto.
apply wf_delta_subst_dsubst_id; auto.
rewrite <- (@apply_delta_subst_env_dom (SE) E); auto.
Qed.
Lemma delta_subst_binds_typ : forall E dsubst X t,
wf_delta_subst E dsubst ->
binds X t dsubst ->
wf_typ nil t /\ binds X (bind_kn) E.
Proof.
intros E dsubst X t Hwf_dsubst Binds.
generalize dependent X.
generalize dependent t.
(wf_delta_subst_cases (induction Hwf_dsubst) Case);
intros t X0 HeqDsubst.
inversion HeqDsubst.
analyze_binds HeqDsubst.
apply IHHwf_dsubst in BindsTac; auto.
destruct BindsTac as [J1 J2].
split; auto.
apply IHHwf_dsubst in HeqDsubst; auto.
destruct HeqDsubst as [J1 J2].
split; auto.
Qed.
Lemma wf_gamma_subst_subst_tt_notin : forall E dsubst gsubst X T,
wf_gamma_subst E dsubst gsubst ->
X `notin` dom E ->
wf_typ nil T ->
wf_gamma_subst (map (subst_tb X T) E) dsubst gsubst.
Proof.
intros E dsubst gsubst X T Hwfg HX Wft.
generalize dependent T.
generalize dependent X.
induction Hwfg; intros; simpl; simpl_env; auto.
apply wf_gamma_subst_sval; eauto.
rewrite delta_subst_permut with (dE:=E); auto.
rewrite <- subst_tt_fresh; auto.
apply typing_regular in H0.
destruct H0 as [_ [_ [_ J]]].
eauto using notin_fv_wf.
apply wf_gamma_subst__wf_subst in Hwfg.
destruct Hwfg; auto.
apply dom_gamma_subst in Hwfg.
destruct Hwfg as [J1 J2].
rewrite <- J1. apply dom__ddom; auto.
assert (J:=Hwfg).
apply wf_gamma_subst__wf_subst in J. destruct J as [J1 J2].
apply wf_typ_tt_notin; auto.
Qed.
Lemma wf_gamma_subst_subst_tt_in : forall E dsubst gsubst X T,
wf_gamma_subst E dsubst gsubst ->
binds X T dsubst ->
wf_gamma_subst (map (subst_tb X T) E) dsubst gsubst.
Proof.
intros E dsubst gsubst X T Hwfg Binds.
generalize dependent T.
generalize dependent X.
induction Hwfg; intros; simpl; simpl_env; auto.
apply wf_gamma_subst_sval; eauto.
rewrite apply_delta_subst_typ_subst_tt_id; auto.
apply wf_gamma_subst__wf_dsubst in Hwfg. auto.
assert (J:=Hwfg).
apply wf_gamma_subst__wf_subst in J. destruct J as [J1 J2].
apply delta_subst_binds_typ with (X:=X) (t:=T0) in J1; auto.
destruct J1 as [J11 J12].
apply wf_typ_tt_in; auto.
apply wf_gamma_subst_skind; auto.
analyze_binds Binds; auto.
rewrite <- map_subst_tb_id; auto.
apply wf_gamma_subst__wf_subst in Hwfg.
destruct Hwfg; auto.
Qed.
Lemma wf_gamma_subst_dsubst_id : forall E dsubst gsubst,
wf_gamma_subst E dsubst gsubst ->
wf_gamma_subst (apply_delta_subst_env dsubst E) dsubst gsubst.
Proof.
intros E dsubst gsubst Hwfg.
induction Hwfg; simpl; simpl_env; auto.
apply wf_gamma_subst_sval; auto.
rewrite <- apply_delta_subst_env_dom; auto.
rewrite delta_subst_closed_typing with (e:=e); auto.
apply wf_delta_subst_dsubst_id; auto.
apply wf_gamma_subst__wf_subst in Hwfg.
destruct Hwfg; auto.
apply wf_gamma_subst_skind; auto.
rewrite apply_delta_subst_env_cons.
rewrite apply_delta_subst_env_subst_tb_swap with (E:=E); auto.
apply wf_gamma_subst_subst_tt_notin; auto.
rewrite <- apply_delta_subst_env_dom; auto.
apply wf_gamma_subst__wf_subst in Hwfg.
destruct Hwfg; auto.
rewrite apply_delta_subst_env_cons.
rewrite apply_delta_subst_env_subst_tb_swap with (E:=E); auto.
rewrite dom_map.
rewrite <- apply_delta_subst_env_dom; auto.
apply wf_gamma_subst__wf_subst in Hwfg.
destruct Hwfg; auto.
Qed.
Lemma wf_lgamma_subst_subst_tt_notin : forall E lE dsubst gsubst lgsubst X T,
wf_lgamma_subst E lE dsubst gsubst lgsubst ->
X `notin` dom E ->
wf_typ nil T ->
wf_lgamma_subst (map (subst_tb X T) E) (map (subst_tlb X T) lE) dsubst gsubst lgsubst.
Proof.
intros E lE dsubst gsubst lgsubst X T Hwflg HX Wft.
generalize dependent T.
generalize dependent X.
induction Hwflg; intros; simpl; simpl_env; auto.
apply wf_lgamma_subst_sval; eauto.
rewrite delta_subst_permut with (dE:=E); auto.
rewrite <- subst_tt_fresh; auto.
apply typing_regular in H1.
destruct H1 as [_ [_ [_ J]]].
eauto using notin_fv_wf.
apply wf_lgamma_subst__wf_subst in Hwflg.
destruct Hwflg; auto.
apply dom_lgamma_subst in Hwflg.
destruct Hwflg as [J1 [J2 J3]].
rewrite <- J1. apply dom__ddom; auto.
assert (J:=Hwflg).
apply wf_lgamma_subst__wf_subst in J. destruct J as [J1 J2].
apply wf_typ_tt_notin; auto.
apply wf_delta_subst__uniq in J2. decompose [and] J2; auto.
apply wf_lgamma_subst_slval; eauto.
rewrite delta_subst_permut with (dE:=E); auto.
rewrite <- subst_tt_fresh; auto.
apply typing_regular in H1.
destruct H1 as [_ [_ [_ J]]].
eauto using notin_fv_wf.
apply wf_lgamma_subst__wf_subst in Hwflg.
destruct Hwflg; auto.
apply dom_lgamma_subst in Hwflg.
destruct Hwflg as [J1 [J2 J3]].
rewrite <- J1. apply dom__ddom; auto.
assert (J:=Hwflg).
apply wf_lgamma_subst__wf_subst in J. destruct J as [J1 J2].
apply wf_typ_tt_notin; auto.
apply wf_delta_subst__uniq in J2. decompose [and] J2; auto.
Qed.
Lemma wf_lgamma_subst_dsubst_id : forall E lE dsubst gsubst lgsubst,
wf_lgamma_subst E lE dsubst gsubst lgsubst ->
wf_lgamma_subst (apply_delta_subst_env dsubst E) (apply_delta_subst_lenv dsubst lE) dsubst gsubst lgsubst.
Proof.
intros E lE dsubst gsubst lgsubst Hwflg.
induction Hwflg; simpl; simpl_env; auto.
apply wf_lgamma_subst_sval; auto.
rewrite <- apply_delta_subst_env_dom; auto.
rewrite <- apply_delta_subst_lenv_dom; auto.
rewrite delta_subst_closed_typing with (e:=e); auto.
apply wf_delta_subst_dsubst_id; auto.
apply wf_lgamma_subst__wf_subst in Hwflg.
destruct Hwflg; auto.
apply wf_lgamma_subst_slval; auto.
rewrite <- apply_delta_subst_env_dom; auto.
rewrite <- apply_delta_subst_lenv_dom; auto.
rewrite delta_subst_closed_typing with (e:=e); auto.
apply wf_delta_subst_dsubst_id; auto.
apply wf_lgamma_subst__wf_subst in Hwflg.
destruct Hwflg; auto.
apply wf_lgamma_subst_skind; auto.
rewrite apply_delta_subst_env_cons.
rewrite apply_delta_subst_lenv_cons'.
rewrite apply_delta_subst_env_subst_tb_swap with (E:=E); auto.
rewrite apply_delta_subst_lenv_subst_tlb_swap with (E:=E); auto.
apply wf_lgamma_subst_subst_tt_notin; auto.
rewrite <- apply_delta_subst_env_dom; auto.
apply wf_lgamma_subst__wf_subst in Hwflg.
destruct Hwflg; auto.
apply wf_lgamma_subst__wf_subst in Hwflg.
destruct Hwflg; auto.
rewrite apply_delta_subst_env_cons.
rewrite apply_delta_subst_env_subst_tb_swap with (E:=E); auto.
rewrite dom_map.
rewrite <- apply_delta_subst_env_dom; auto.
apply wf_lgamma_subst__wf_subst in Hwflg.
destruct Hwflg; auto.
rewrite apply_delta_subst_lenv_cons'.
rewrite apply_delta_subst_lenv_subst_tlb_swap with (E:=E); auto.
rewrite dom_map.
rewrite <- apply_delta_subst_lenv_dom; auto.
apply wf_lgamma_subst__wf_subst in Hwflg.
destruct Hwflg; auto.
Qed.
Lemma wf_lenv_dsubst_id : forall E lE dsubst,
wf_delta_subst E dsubst ->
wf_lenv E lE ->
wf_lenv (apply_delta_subst_env dsubst E) (apply_delta_subst_lenv dsubst lE).
Proof.
intros E lE dsubst Hwfd Hwfe.
generalize dependent dsubst.
induction Hwfe; intros; simpl; simpl_env; auto.
apply wf_lenv_empty.
apply wf_env_dsubst_id; auto.
apply wf_lenv_typ; auto.
rewrite <- apply_delta_subst_env_dom; auto.
rewrite <- apply_delta_subst_lenv_dom; auto.
apply wf_delta_subst_dsubst_id; auto.
Qed.
Lemma wf_lgamma_subst__wf_subst : forall E D dsubst gsubst lgsubst,
wf_lgamma_subst E D dsubst gsubst lgsubst ->
wf_gamma_subst E dsubst gsubst /\ wf_delta_subst E dsubst.
intros.
induction H; auto.
destruct IHwf_lgamma_subst as [J1 J2].
split.
apply wf_gamma_subst_sval; auto.
apply wf_delta_subst_skip; auto.
destruct IHwf_lgamma_subst as [J1 J2].
split.
apply wf_gamma_subst_skind; auto.
apply wf_delta_subst_styp; auto.
Qed.
Lemma ddom_gdom_disjdom : forall E,
uniq E ->
disjdom (ddom_env E) (gdom_env E).
Proof.
intros E Uniq.
induction Uniq; simpl.
apply disjdom_nil_1.
destruct a.
eapply disjdom_app_l.
split; auto.
eapply disjdom_one_l; auto.
apply dom__gdom; auto.
eapply disjdom_app_r; auto.
split; auto.
eapply disjdom_one_l; auto.
apply disjdom_sym_1; auto.
Qed.
Lemma bigstep_red_fst_beta : forall e e',
bigstep_red e e' ->
bigstep_red (exp_fst e) (exp_fst e').
Proof.
intros e e' Hrel.
induction Hrel; auto.
apply bigstep_red_trans with (e':=exp_fst e'); auto.
Qed.
Lemma plug_gen_ctx_app__gen_exp_app : forall gsubst C e,
plug (gen_ctx_app gsubst C) e = gen_exp_app gsubst (plug C e).
Proof.
induction gsubst; intros C e; simpl; auto.
destruct a. simpl.
rewrite IHgsubst. auto.
Qed.
Lemma eval_from_subst_to_ctx : forall E lE dsubst gsubst lgsubst e t,
wf_lgamma_subst E lE dsubst gsubst lgsubst ->
typing E lE e t ->
bigstep_red
(plug (gen_ctx_lapp (rev lgsubst) (gen_ctx_app (rev gsubst) (gen_ctx_tapp (rev dsubst) (gen_ctx_tabs E (gen_ctx_abs E (gen_ctx_labs lE ctx_hole)))))) e)
(apply_delta_subst dsubst (apply_gamma_subst gsubst (apply_gamma_subst lgsubst e))).
Proof.
intros E lE dsubst gsubst lgsubst e t Hwflg Htyping.
assert (JJ:=Hwflg).
rewrite wf_lgamma_subst_reorder with (E:=E) (lE:=lE); auto.
rewrite plug_gen_ctx_lapp__gen_exp_lapp.
rewrite plug_gen_ctx_app__gen_exp_app.
rewrite plug_gen_ctx_tapp__gen_exp_tapp.
assert (disjdom (ddom_env E) (cv_ec (gen_ctx_abs E (gen_ctx_labs lE ctx_hole)))) as JJ'.
apply disjdom_sym_1.
apply disjdom_eq with (D1:=gdom_env E `union` dom lE).
eapply disjdom_app_l.
split.
apply disjdom_sym_1.
apply ddom_gdom_disjdom; auto.
apply disjdom_sub with (D1:=dom E).
apply disjoint__disjdom.
apply wf_lgamma_subst__wf_lenv in Hwflg.
destruct Hwflg as [J1 J2].
apply disjoint_wf_lenv in J2; auto.
rewrite dom__ddom_gdom. fsetdec.
rewrite cv_ec_gen_ctx_abs.
rewrite cv_ec_gen_ctx_labs.
simpl. fsetdec.
rewrite plug_gen_ctx_tabs__gen_exp_tabs; try solve [auto | apply context_apair1; [apply gen_ctx_abs_labs_context; auto | apply expr_tt]].
rewrite plug_gen_ctx_abs__gen_exp_abs; try solve [
auto |
apply gen_ctx_labs_context with (E:=E); auto |
apply fv_ec_gen_ctx_labs_hole |
apply disjdom_eq with (D1:=dom lE); try solve [
apply disjoint__disjdom;
apply typing_regular in Htyping;
destruct Htyping as [_ [J [_ _] ]];
apply disjoint_wf_lenv in J; auto |
rewrite cv_ec_gen_ctx_labs_hole; fsetdec]
].
rewrite plug_gen_ctx_labs__gen_exp_labs with (E:=E); simpl; try solve [auto | apply disjdom_nil_1].
apply bigstep_red__trans with (e':=(gen_exp_lapp (rev lgsubst) (gen_exp_labs (apply_delta_subst_lenv dsubst lE) (apply_gamma_subst (rev gsubst) (apply_delta_subst (rev dsubst) e))))).
apply eval_gen_exp_lapp; auto.
apply wf_lgamma_subst__wf_lgsubst in Hwflg.
apply wf_gsubst_rev; auto.
apply bigstep_red__trans with (e':=(gen_exp_app (rev gsubst) (gen_exp_abs (apply_delta_subst_env dsubst E) (gen_exp_labs (apply_delta_subst_lenv dsubst lE) (apply_delta_subst (rev dsubst) e))))).
apply eval_gen_exp_app; auto.
apply wf_lgamma_subst__wf_gsubst in Hwflg.
apply wf_gsubst_rev; auto.
apply bigstep_red__trans with (e':=gen_exp_abs (apply_delta_subst_env dsubst E) (gen_exp_labs (apply_delta_subst_lenv dsubst lE) (apply_delta_subst (rev dsubst) e))).
assert (apply_delta_subst dsubst (gen_exp_abs E (gen_exp_labs lE e)) =
(gen_exp_abs (apply_delta_subst_env dsubst E)
(gen_exp_labs (apply_delta_subst_lenv dsubst lE)
(apply_delta_subst (rev dsubst) e)))) as EQ.
simpl_commut_subst.
assert (J:=Hwflg).
apply wf_lgamma_subst__wf_dsubst in J.
rewrite <- gen_exp_abs_dsubst_commute; auto.
rewrite <- gen_exp_labs_dsubst_commute; auto.
rewrite <- apply_delta_subst_rev with (E:=E); auto.
apply wf_lgamma_subst__wf_subst in Hwflg. decompose [and] Hwflg; auto.
rewrite <- EQ.
assert (J:=Hwflg).
apply wf_lgamma_subst__wf_subst in J. destruct J as [J1 J2].
apply eval_gen_exp_tabs_list with (E:=E) (dsubst:=dsubst); auto.
apply gen_exp_abs_expr; auto.
apply gen_exp_labs_expr with (E:=E); auto.
apply bigstep_red_refl; auto.
assert (apply_gamma_subst gsubst (gen_exp_labs (apply_delta_subst_lenv dsubst lE) (apply_delta_subst (rev dsubst) e)) =
gen_exp_labs (apply_delta_subst_lenv dsubst lE) (apply_gamma_subst (rev gsubst) (apply_delta_subst (rev dsubst) e))) as EQ.
assert (J:=Hwflg). apply wf_lgamma_subst__wf_gsubst in J.
rewrite <- gen_exp_labs_gsubst_commute; auto.
rewrite <- apply_gamma_subst_rev with (E:=E) (dsubst:=dsubst); auto.
apply wf_lgamma_subst__wf_subst in Hwflg. decompose [and] Hwflg; auto.
apply wf_lgamma_subst_disjoint in Hwflg.
destruct Hwflg as [_ [_ [_ [_ [_ H]]]]].
assert (J':=@apply_delta_subst_lenv_dom dsubst lE).
solve_uniq.
rewrite <- EQ.
assert (J:=Hwflg).
apply wf_lgamma_subst__wf_subst in J. destruct J as [J1 J2].
apply eval_gen_exp_abs_list with (dsubst:=dsubst) (gsubst:=gsubst); auto.
apply wf_gamma_subst_dsubst_id; auto.
apply gen_exp_labs_expr with (E:=apply_delta_subst_env dsubst E); auto.
apply wf_lenv_dsubst_id ; auto.
apply expr_preserved_under_dsubst; auto.
apply wf_dsubst_rev.
apply wf_delta_subst__wf_dsubst in J2; auto.
assert (J:=Hwflg).
apply wf_lgamma_subst__wf_subst in J. destruct J as [J1 J2].
apply wf_lgamma_subst__wf_subst in JJ. destruct JJ as [J3 J4].
rewrite apply_gamma_subst_rev with (E:=E) (dsubst:=dsubst) (gsubst:=gsubst); auto.
rewrite apply_delta_subst_rev with (E:=E) (dsubst:=dsubst); auto.
apply eval_gen_exp_labs_list with (E:=apply_delta_subst_env dsubst E) (dsubst:=dsubst) (gsubst:=gsubst); auto.
apply wf_lgamma_subst_dsubst_id; auto.
apply expr_preserved_under_gsubst; auto.
apply wf_gsubst_rev.
apply wf_gamma_subst__wf_gsubst in J1; auto.
apply expr_preserved_under_dsubst; auto.
apply wf_dsubst_rev.
apply wf_delta_subst__wf_dsubst in J2; auto.
apply gen_ctx_abs_context with (E:=E); auto.
rewrite fv_ec_gen_ctx_labs_hole. fsetdec.
apply gen_ctx_labs_context with (E:=E); auto.
Qed.
Lemma hole_red : forall C e e',
red e e' ->
context C ->
cbn_ctx C ->
red (plug C e) (plug C e').
Proof.
intros C ee ee' Hred Hcontext HvC.
induction Hcontext; simpl; try solve [auto | inversion HvC | destruct HvC as [J1 J2]; eauto].
simpl in HvC.
apply red_let_cong; auto.
apply expr_let with (L:=L); auto.
apply IHHcontext in HvC. auto.
Qed.
Lemma hole_bigstep_red : forall e e' C,
bigstep_red e e' ->
context C ->
cbn_ctx C ->
bigstep_red (plug C e) (plug C e').
Proof.
intros e e' C Hred Hcontext HvC.
induction Hred; auto.
apply bigstep_red_trans with (e':=plug C e'); auto using hole_red.
Qed.
Lemma cbn_ctx_cv_ec : forall C,
cbn_ctx C ->
cv_ec C [=] {}.
Proof.
induction C; intros HvC; simpl in *;
try solve [inversion HvC |
destruct HvC as [J1 J2]; rewrite (@IHC J1); fsetdec |
destruct HvC as [J1 J2]; rewrite (@IHC J2); fsetdec |
rewrite (@IHC HvC); fsetdec |
fsetdec
].
Qed.
Lemma F_observational_eq__F_ciu_eq : forall E lE e e' t,
F_observational_eq E lE e e' t ->
F_ciu_eq E lE e e' t.
Proof.
intros E lE e e' t Hob.
destruct Hob as [Htyp [Htyp' J]].
split; auto.
split; auto.
intros dsubst gsubst lgsubst Hwflg.
assert (JJ:=Hwflg).
split.
apply typing_subst with (E:=E) (D:=lE); auto.
split.
apply typing_subst with (E:=E) (D:=lE); auto.
intros C Contexting HvC.
assert (wf_delta_subst E dsubst) as Wfd.
apply wf_lgamma_subst__wf_subst in Hwflg. destruct Hwflg. auto.
rewrite apply_delta_subst_typ_rev with (E:=E) in Contexting; auto.
assert (wf_typ E t) as J1. auto.
assert (J2:=@wf_from_subst_to_ctx E lE dsubst gsubst lgsubst t JJ J1).
assert (J3:=@eval_from_subst_to_ctx E lE dsubst gsubst lgsubst e t Hwflg Htyp).
assert (J4:=@eval_from_subst_to_ctx E lE dsubst gsubst lgsubst e' t Hwflg Htyp').
assert (contexting E lE t (plugC C (( gen_ctx_lapp (rev lgsubst) (gen_ctx_app (rev gsubst) (gen_ctx_tapp (rev dsubst) (gen_ctx_tabs E (gen_ctx_abs E (gen_ctx_labs lE ctx_hole)))))))) nil nil Two) as J5.
apply contexting_plugC_contexting with (E':=nil) (D':=nil) (T':=apply_delta_subst_typ (rev dsubst) t); auto.
apply disjdom_sym_1.
apply disjdom_eq with (D1:={}).
apply disjdom_nil_1.
rewrite cbn_ctx_cv_ec; auto.
apply fv_ec_contexting_sub in Contexting.
clear - Contexting. fsetdec.
apply J in J5.
split.
apply contexting_plug_typing with (E:=nil) (D:=nil) (T:=apply_delta_subst_typ (rev dsubst) t); auto.
rewrite <- apply_delta_subst_typ_rev with (E:=E); auto.
apply typing_subst with (E:=E) (D:=lE); auto.
split.
apply contexting_plug_typing with (E:=nil) (D:=nil) (T:=apply_delta_subst_typ (rev dsubst) t); auto.
rewrite <- apply_delta_subst_typ_rev with (E:=E); auto.
apply typing_subst with (E:=E) (D:=lE); auto.
destruct J5 as [J5 [J6 [[J7 J8] | [J7 J8]]]].
left.
split.
apply bigstep_red_normalization with (u:=plug (plugC C (( gen_ctx_lapp (rev lgsubst) (gen_ctx_app (rev gsubst) (gen_ctx_tapp (rev dsubst) (gen_ctx_tabs E (gen_ctx_abs E (gen_ctx_labs lE ctx_hole)))))))) e) (t:=Two); auto.
rewrite plug_plugC.
apply hole_bigstep_red; auto.
apply disjdom_eq with (D1:={}).
apply disjdom_nil_1.
rewrite cbn_ctx_cv_ec; auto.
apply bigstep_red_normalization with (u:=plug (plugC C (( gen_ctx_lapp (rev lgsubst) (gen_ctx_app (rev gsubst) (gen_ctx_tapp (rev dsubst) (gen_ctx_tabs E (gen_ctx_abs E (gen_ctx_labs lE ctx_hole)))))))) e') (t:=Two); auto.
rewrite plug_plugC.
apply hole_bigstep_red; auto.
apply disjdom_eq with (D1:={}).
apply disjdom_nil_1.
rewrite cbn_ctx_cv_ec; auto.
right.
split.
apply bigstep_red_normalization with (u:=plug (plugC C (( gen_ctx_lapp (rev lgsubst) (gen_ctx_app (rev gsubst) (gen_ctx_tapp (rev dsubst) (gen_ctx_tabs E (gen_ctx_abs E (gen_ctx_labs lE ctx_hole)))))))) e) (t:=Two); auto.
rewrite plug_plugC.
apply hole_bigstep_red; auto.
apply disjdom_eq with (D1:={}).
apply disjdom_nil_1.
rewrite cbn_ctx_cv_ec; auto.
apply bigstep_red_normalization with (u:=plug (plugC C (( gen_ctx_lapp (rev lgsubst) (gen_ctx_app (rev gsubst) (gen_ctx_tapp (rev dsubst) (gen_ctx_tabs E (gen_ctx_abs E (gen_ctx_labs lE ctx_hole)))))))) e') (t:=Two); auto.
rewrite plug_plugC.
apply hole_bigstep_red; auto.
apply disjdom_eq with (D1:={}).
apply disjdom_nil_1.
rewrite cbn_ctx_cv_ec; auto.
Qed.
Lemma F_ciu_eq__refl : forall E lE e t,
typing E lE e t ->
F_ciu_eq E lE e e t.
Proof.
intros E lE e t Typ.
split; auto.
split; auto.
intros dsubst gsubst lgsubst Hwflg.
apply F_observational_eq__F_nobservational_eq.
apply F_observational_eq__refl; auto.
apply typing_subst with (E:=E) (D:=lE); auto.
Qed.
Lemma F_ciu_eq__sym : forall E lE e e' t,
F_ciu_eq E lE e e' t ->
F_ciu_eq E lE e' e t.
Proof.
intros E lE e e' t Hciu.
destruct Hciu as [Typ [Typ' J]].
split; auto.
split; auto.
intros dsubst gsubst lgsubst Hwflg.
apply F_nobservational_eq__sym; auto.
Qed.
Lemma F_ciu_eq__trans : forall E lE e e' e'' t,
F_ciu_eq E lE e e' t ->
F_ciu_eq E lE e' e'' t ->
F_ciu_eq E lE e e'' t.
Proof.
intros E lE e e' e'' t Hciu Hciu'.
destruct Hciu as [Typ [Typ' J]].
destruct Hciu' as [_ [Typ'' J']].
split; auto.
split; auto.
intros dsubst gsubst lgsubst Hwflg.
apply F_nobservational_eq__trans with (e':=apply_delta_subst dsubst (apply_gamma_subst gsubst (apply_gamma_subst lgsubst e'))); auto.
Qed.
Lemma F_ciu_eq__beta : forall E lE e e' t,
typing E lE e t ->
red e e' ->
F_ciu_eq E lE e e' t.
Proof.
intros E lE e e' t Typ Red.
split; auto.
split. apply preservation with (e':=e') in Typ; auto.
intros dsubst gsubst lgsubst Hwflg.
apply F_observational_eq__F_nobservational_eq.
apply F_observational_eq__beta.
apply typing_subst with (E:=E) (D:=lE); auto.
apply red_preserved_under_subst with (E:=E) (D:=lE); auto.
Qed.
Lemma F_ciu_eq__mbeta : forall E lE e e' t,
typing E lE e t ->
bigstep_red e e' ->
F_ciu_eq E lE e e' t.
Proof.
intros E lE e e' t Typ Red.
induction Red.
apply F_ciu_eq__refl; auto.
apply F_ciu_eq__trans with (e':=e').
apply F_ciu_eq__beta; auto.
apply IHRed.
apply preservation with (e':=e') in Typ; auto.
Qed.
Lemma wf_lgamma_subst_split : forall E lE dsubst gsubst lgsubst lE1 lE2 E',
wf_lgamma_subst E lE dsubst gsubst lgsubst ->
lenv_split (E'++E) lE1 lE2 lE ->
exists lgsubst1, exists lgsubst2,
lgamma_subst_split E lE dsubst gsubst lgsubst1 lgsubst2 lgsubst /\
wf_lgamma_subst E lE1 dsubst gsubst lgsubst1 /\
wf_lgamma_subst E lE2 dsubst gsubst lgsubst2.
Proof.
intros E lE dsubst gsubst lgsubst lE1 lE2 E' Hwflg Hsplit.
generalize dependent lE1. generalize dependent lE2. generalize dependent E'.
(wf_lgamma_subst_cases (induction Hwflg) Case); intros.
Case "wf_lgamma_subst_empty".
exists gamma_nil. exists gamma_nil.
inversion Hsplit; subst.
repeat (split; auto).
Case "wf_lgamma_subst_sval".
assert (J:=Hsplit).
rewrite_env ((E'++[(x, bind_typ T)])++E) in Hsplit.
apply IHHwflg in Hsplit. clear IHHwflg.
destruct Hsplit as [lgsubst1 [lgsubst2 [J1 [J2 J3]]]].
exists lgsubst1. exists lgsubst2.
split.
apply lgamma_subst_split_nonlin_weakening_tail; auto.
split.
apply wf_lgamma_subst_sval; auto.
apply dom_lenv_split in J.
rewrite J in H0; auto.
apply wf_lgamma_subst_sval; auto.
apply dom_lenv_split in J.
rewrite J in H0; auto.
Case "wf_lgamma_subst_slval".
inversion Hsplit; subst.
SCase "lenv_split_left".
assert (J:=H6).
apply IHHwflg in H6. clear IHHwflg.
destruct H6 as [lgsubst1 [lgsubst2 [J1 [J2 J3]]]].
exists ([(x,e)]++lgsubst1). exists lgsubst2.
split.
apply lgamma_subst_split_left; auto.
split; auto.
apply wf_lgamma_subst_slval; auto.
apply dom_lenv_split in J.
rewrite J in H0; auto.
SCase "lenv_split_right".
assert (J:=H6).
apply IHHwflg in H6. clear IHHwflg.
destruct H6 as [lgsubst1 [lgsubst2 [J1 [J2 J3]]]].
exists lgsubst1. exists ([(x,e)]++lgsubst2).
split.
apply lgamma_subst_split_right; auto.
split; auto.
apply wf_lgamma_subst_slval; auto.
apply dom_lenv_split in J.
rewrite J in H0; auto.
Case "wf_lgamma_subst_skind".
assert (J:=Hsplit).
assert (K:=Hwflg).
apply wf_lgamma_subst__nfv with (x:=X) in K; auto.
rewrite_env ((E'++[(X, bind_kn)])++E) in Hsplit.
apply IHHwflg in Hsplit. clear IHHwflg.
destruct Hsplit as [lgsubst1 [lgsubst2 [J1 [J2 J3]]]].
exists lgsubst1. exists lgsubst2.
split.
apply lgamma_subst_split_nonlin_weakening_typ_tail; auto.
split.
apply wf_lgamma_subst_skind; auto.
apply dom_lenv_split in J.
rewrite J in H0; auto.
apply wf_lgamma_subst_skind; auto.
apply dom_lenv_split in J.
rewrite J in H0; auto.
Qed.
Lemma F_ciu_eq__congr_app : forall E lE1 lE2 lE e1 e1' e2 t1 t2,
F_ciu_eq E lE1 e1 e1' (typ_arrow t1 t2) ->
value e1' ->
typing E lE2 e2 t1 ->
lenv_split E lE1 lE2 lE ->
F_ciu_eq E lE (exp_app e1 e2) (exp_app e1' e2) t2.
Proof.
intros E lE1 lE2 lE e1 e1' e2 t1 t2 Hciu1 Hv1' Typ2 Split.
destruct Hciu1 as [Typ1 [Typ1' Heq1]].
split; eauto.
split; eauto.
intros dsubst gsubst lgsubst Hwflg.
apply wf_lgamma_subst_split with (lE1:=lE1) (lE2:=lE2) (E':=nil) in Hwflg; auto.
destruct Hwflg as [lgsubst1 [lgsubst2 [Hsplit [Hwflg1 Hwflg2]]]].
assert (apply_delta_subst dsubst
(apply_gamma_subst gsubst
(apply_gamma_subst lgsubst (exp_app e1 e2))
) =
apply_delta_subst dsubst
(apply_gamma_subst gsubst
(exp_app
(apply_gamma_subst lgsubst1 e1)
(apply_gamma_subst lgsubst2 e2)
)
)
) as EQ.
simpl_commut_subst in *.
rewrite lgamma_subst_split_subst' with (lgsubst1:=lgsubst1) (lgsubst2:=lgsubst2) (E:=E) (lE:=lE) (dsubst:=dsubst) (gsubst:=gsubst) (lgsubst:=lgsubst); auto.
rewrite lgamma_subst_split_subst with (lgsubst1:=lgsubst1) (lgsubst2:=lgsubst2) (E:=E) (lE:=lE) (dsubst:=dsubst) (gsubst:=gsubst) (lgsubst:=lgsubst); auto.
rewrite lgamma_subst_split_shuffle2 with (lgsubst:=lgsubst) (lgsubst1:=lgsubst1) (E:=E) (lE:=lE) ; auto.
apply typing_subst with (dsubst:=dsubst) (gsubst:=gsubst) (lgsubst:=lgsubst1) in Typ1; try solve [auto | apply wfv_lgamma_subst__wf_lgamma_subst; auto].
apply typing_subst with (dsubst:=dsubst) (gsubst:=gsubst) (lgsubst:=lgsubst2) in Typ2; try solve [auto | apply wfv_lgamma_subst__wf_lgamma_subst; auto].
erewrite gamma_subst_closed_exp; eauto.
rewrite lgamma_subst_split_shuffle1 with (lgsubst:=lgsubst) (lgsubst2:=lgsubst2) (E:=E) (lE:=lE) (e:=apply_gamma_subst lgsubst2 e2) ; auto.
erewrite gamma_subst_closed_exp with
(e:=apply_delta_subst dsubst
(apply_gamma_subst gsubst
(apply_gamma_subst lgsubst2 e2))
); eauto.
repeat(rewrite EQ). clear EQ.
assert (apply_delta_subst dsubst
(apply_gamma_subst gsubst
(apply_gamma_subst lgsubst (exp_app e1' e2))
) =
apply_delta_subst dsubst
(apply_gamma_subst gsubst
(exp_app
(apply_gamma_subst lgsubst1 e1')
(apply_gamma_subst lgsubst2 e2)
)
)
) as EQ.
simpl_commut_subst in *.
rewrite lgamma_subst_split_subst' with (lgsubst1:=lgsubst1) (lgsubst2:=lgsubst2) (E:=E) (lE:=lE) (dsubst:=dsubst) (gsubst:=gsubst) (lgsubst:=lgsubst); auto.
rewrite lgamma_subst_split_subst with (lgsubst1:=lgsubst1) (lgsubst2:=lgsubst2) (E:=E) (lE:=lE) (dsubst:=dsubst) (gsubst:=gsubst) (lgsubst:=lgsubst); auto.
rewrite lgamma_subst_split_shuffle2 with (lgsubst:=lgsubst) (lgsubst1:=lgsubst1) (E:=E) (lE:=lE) ; auto.
apply typing_subst with (dsubst:=dsubst) (gsubst:=gsubst) (lgsubst:=lgsubst1) in Typ1'; try solve [auto | apply wfv_lgamma_subst__wf_lgamma_subst; auto].
apply typing_subst with (dsubst:=dsubst) (gsubst:=gsubst) (lgsubst:=lgsubst2) in Typ2; try solve [auto | apply wfv_lgamma_subst__wf_lgamma_subst; auto].
erewrite gamma_subst_closed_exp; eauto.
rewrite lgamma_subst_split_shuffle1 with (lgsubst:=lgsubst) (lgsubst2:=lgsubst2) (E:=E) (lE:=lE) (e:=apply_gamma_subst lgsubst2 e2) ; auto.
erewrite gamma_subst_closed_exp with
(e:=apply_delta_subst dsubst
(apply_gamma_subst gsubst
(apply_gamma_subst lgsubst2 e2))
); eauto.
repeat(rewrite EQ). clear EQ.
assert (J:=Hwflg1).
apply Heq1 in Hwflg1; auto.
simpl_commut_subst in *.
apply F_nobservational_eq__congr_app with (lE1:=nil) (lE2:=nil) (t1:=apply_delta_subst_typ dsubst t1); auto.
eapply delta_gamma_lgamma_subst_value; eauto.
apply wf_lgamma_subst__wf_subst in J. destruct J; auto.
apply typing_subst with (E:=E)(D:=lE2); auto.
Qed.
Lemma F_ciu_eq__congr_tapp : forall E lE e1 e1' t2 t,
F_ciu_eq E lE e1 e1' (typ_all t) ->
wf_typ E t2 ->
F_ciu_eq E lE (exp_tapp e1 t2) (exp_tapp e1' t2) (open_tt t t2).
Proof.
intros E lE e1 e1' t2 t Heq Wft2.
destruct Heq as [Typ [Typ' Heq]].
split; eauto.
split; eauto.
intros dsubst gsubst lgsubst Hwflg.
simpl_commut_subst in *.
assert (J:=Hwflg).
apply wf_lgamma_subst__wf_subst in J.
destruct J as [_ J].
rewrite commut_delta_subst_open_tt with (dE:=E); auto.
apply Heq in Hwflg; auto.
simpl_commut_subst in *.
apply wft_subst with (dsubst:=dsubst) in Wft2; auto.
apply F_nobservational_eq__congr_tapp; auto.
Qed.
Lemma F_ciu_eq__congr_fst : forall E lE e1 e1' e2 e2' t1 t2,
F_ciu_eq E lE (exp_apair e1 e2) (exp_apair e1' e2') (typ_with t1 t2) ->
F_ciu_eq E lE e1 e1' t1.
Proof.
intros E lE e1 e1' e2 e2' t1 t2 Heq.
destruct Heq as [Typ [Typ' Heq]].
split.
inversion Typ; subst; auto.
split.
inversion Typ'; subst; auto.
intros dsubst gsubst lgsubst Hwflg.
apply Heq in Hwflg; auto.
simpl_commut_subst in *.
apply F_nobservational_eq__congr_fst in Hwflg; auto.
Qed.
Lemma F_ciu_eq__congr_snd : forall E lE e1 e1' e2 e2' t1 t2,
F_ciu_eq E lE (exp_apair e1 e2) (exp_apair e1' e2') (typ_with t1 t2) ->
F_ciu_eq E lE e2 e2' t2.
Proof.
intros E lE e1 e1' e2 e2' t1 t2 Heq.
destruct Heq as [Typ [Typ' Heq]].
split.
inversion Typ; subst; auto.
split.
inversion Typ'; subst; auto.
intros dsubst gsubst lgsubst Hwflg.
apply Heq in Hwflg; auto.
simpl_commut_subst in *.
apply F_nobservational_eq__congr_snd in Hwflg; auto.
Qed.
Lemma F_ciu_eq__congr_bang : forall E lE e1 e1' t1,
F_ciu_eq E lE (exp_bang e1) (exp_bang e1') (typ_bang t1) ->
F_ciu_eq E lE e1 e1' t1.
Proof.
intros E lE e1 e1' t1 Heq.
destruct Heq as [Typ [Typ' Heq]].
split.
inversion Typ; subst; auto.
split.
inversion Typ'; subst; auto.
intros dsubst gsubst lgsubst Hwflg.
apply Heq in Hwflg; auto.
simpl_commut_subst in *.
apply F_nobservational_eq__congr_bang in Hwflg; auto.
Qed.
Definition P_F_related_terms__respect_for_ciueq (n:nat) :=
forall t E rsubst dsubst dsubst' e1 e2 e1' e2',
typ_size t = n ->
F_Rsubst E rsubst dsubst dsubst' ->
F_related_terms t rsubst dsubst dsubst' e1 e2 ->
F_ciu_eq nil nil e1 e1' (apply_delta_subst_typ dsubst t) ->
F_ciu_eq nil nil e2 e2' (apply_delta_subst_typ dsubst' t) ->
F_related_terms t rsubst dsubst dsubst' e1' e2'.
Axiom Rel__respect_for_ciueq : forall E rsubst a R e1 e1' t1 e2 e2' t2,
wf_rho_subst E rsubst ->
binds a R rsubst ->
F_ciu_eq nil nil e1 e1' t1 ->
F_ciu_eq nil nil e2 e2' t2 ->
R e1 e2 ->
R e1' e2'.
Lemma _F_related_terms__respect_for_ciueq: forall n, P_F_related_terms__respect_for_ciueq n.
Proof.
intro n.
apply lt_wf_ind. clear n.
intros n H.
unfold P_F_related_terms__respect_for_ciueq in *.
intros t E rsubst dsubst dsubst' e1 e2 e1' e2' Hsize HRsubst Hrel Hctx1 Hctx2.
destruct Hrel as [v1 [v2 [Htypingv1 [Htypingv2 [Hn_e1v1 [Hn_e2v2 Hrel]]]]]].
assert (exists v1', normalize e1' v1') as Hn_e1'v1'.
apply strong_normalization with (t:=apply_delta_subst_typ dsubst t); auto.
destruct Hctx1 as [J1 [J2 J3]]; auto.
assert (exists v2', normalize e2' v2') as Hn_e2'v2'.
apply strong_normalization with (t:=apply_delta_subst_typ dsubst' t); auto.
destruct Hctx2 as [J1 [J2 J3]]; auto.
destruct Hn_e1'v1' as [v1' Hn_e1'v1'].
destruct Hn_e2'v2' as [v2' Hn_e2'v2'].
exists v1'. exists v2'.
split.
destruct Hctx1 as [J1 [J2 J3]]; auto.
split.
destruct Hctx2 as [J1 [J2 J3]]; auto.
split; auto.
split; auto.
assert (F_ciu_eq nil nil v1 v1' (apply_delta_subst_typ dsubst t)) as Hctxv1.
apply F_ciu_eq__trans with (e':=e1).
apply F_ciu_eq__sym.
apply F_ciu_eq__mbeta.
destruct Hctx1 as [J1 [J2 J3]]; auto.
destruct Hn_e1v1; auto.
apply F_ciu_eq__trans with (e':=e1'); auto.
apply F_ciu_eq__mbeta.
destruct Hctx1 as [J1 [J2 J3]]; auto.
destruct Hn_e1'v1'; auto.
assert (F_ciu_eq nil nil v2 v2' (apply_delta_subst_typ dsubst' t)) as Hctxv2.
apply F_ciu_eq__trans with (e':=e2).
apply F_ciu_eq__sym.
apply F_ciu_eq__mbeta.
destruct Hctx2 as [J1 [J2 J3]]; auto.
destruct Hn_e2v2; auto.
apply F_ciu_eq__trans with (e':=e2'); auto.
apply F_ciu_eq__mbeta.
destruct Hctx2 as [J1 [J2 J3]]; auto.
destruct Hn_e2'v2'; auto.
(typ_cases (destruct t) Case).
Case "typ_bvar". (*bvar*)
apply F_related_values_bvar_leq in Hrel; auto.
Case "typ_fvar". (* fvar *)
apply F_related_values_fvar_leq in Hrel.
apply F_related_values_fvar_req.
unfold In_rel in Hrel.
destruct Hrel as [R0 [Hb [Hv1 [Hv2' Hr]]]]; subst; simpl.
exists (R0).
simpl_env.
repeat(split; auto).
destruct Hn_e1'v1'; auto.
destruct Hn_e2'v2'; auto.
apply Rel__respect_for_ciueq with (e1:=v1) (e2:=v2) (t1:=apply_delta_subst_typ dsubst a) (t2:=apply_delta_subst_typ dsubst' a) (a:=a) (rsubst:=rsubst) (E:=E); auto.
apply F_Rsubst__wf_subst in HRsubst.
decompose [prod] HRsubst; auto.
Case "typ_arrow". (* arrow *)
apply F_related_values_arrow_leq in Hrel.
apply F_related_values_arrow_req.
destruct Hrel as [Hv1 [Hv2 Harrow]]; subst.
repeat(split; auto; simpl_env in *).
destruct Hn_e1'v1'; auto.
destruct Hn_e2'v2'; auto.
intros x x' Htypingx Htypingx' Harrow'.
destruct (@Harrow x x') as [u [u' [Hnorm_vxu [Hnorm_v'x'u' Hrel_wft1]]]]; auto.
assert (typ_size t2 < typ_size (typ_arrow t1 t2)) as G1. simpl. omega.
apply H with (e1:=exp_app v1 x) (e2:=exp_app v2 x') (t:=t2) (E:=E)
(e1':=exp_app v1' x) (e2':=exp_app v2' x')
(rsubst:=rsubst) (dsubst:=dsubst) (dsubst':=dsubst') in G1; auto.
destruct G1 as [v3 [v3' [Typing3 [Typing3' [Hn_v3 [Hn_v3' G1]]]]]].
exists v3. exists v3'.
split; auto.
exists u. exists u'.
split.
apply typing_app with (D1:=nil) (D2:=nil) (T1:=apply_delta_subst_typ dsubst t1); auto.
simpl_commut_subst in Htypingv1.
apply preservation_normalization with (e:=e1); auto.
split.
apply typing_app with (D1:=nil) (D2:=nil) (T1:=apply_delta_subst_typ dsubst' t1); auto.
simpl_commut_subst in Htypingv2.
apply preservation_normalization with (e:=e2); auto.
split; auto.
simpl_commut_subst in Hctxv1.
apply F_ciu_eq__congr_app with (lE1:=nil) (lE2:=nil) (t1:=apply_delta_subst_typ dsubst t1); auto.
destruct Hn_e1'v1'; auto.
simpl_commut_subst in Hctxv2.
apply F_ciu_eq__congr_app with (lE1:=nil) (lE2:=nil) (t1:=apply_delta_subst_typ dsubst' t1); auto.
destruct Hn_e2'v2'; auto.
Case "typ_all". (* all *)
apply F_related_values_all_leq in Hrel.
apply F_related_values_all_req.
destruct Hrel as [Hv1 [Hv2 [L Hall]]]; subst.
repeat(split; auto; simpl_env in *).
destruct Hn_e1'v1'; auto.
destruct Hn_e2'v2'; auto.
exists (L `union` fv_env E).
intros X t2 t2' R Fr Hwfr Hfv.
assert (X `notin` L) as XnL. auto.
destruct (@Hall X t2 t2' R XnL) as [w0 [w'0 [Hnorm_vt2w0 [Hnorm_v't2'w'0 Hrel_wft]]]]; auto.
assert (typ_size (open_tt t X) < typ_size (typ_all t)) as G.
simpl. rewrite open_tt_typ_size_eq. omega.
apply H with (e1:=exp_tapp v1 t2) (e2:=exp_tapp v2 t2') (t:=open_tt t X)
(e1':=exp_tapp v1' t2) (e2':=exp_tapp v2' t2') (E:=[(X, bind_kn)]++E)
(rsubst:=[(X, R)]++rsubst) (dsubst:=[(X, t2)]++dsubst) (dsubst':=[(X, t2')]++dsubst') in G; auto.
destruct G as [v3 [v3' [Typing3 [Typing3' [Hn_v3 [Hn_v3' G]]]]]].
exists v3. exists v3'.
split; auto.
apply F_Rsubst_rel; auto.
exists w0. exists w'0.
simpl.
assert (type t2) as type2.
apply wfr_left_inv in Hwfr.
apply type_from_wf_typ in Hwfr; auto.
rewrite subst_tt_open_tt; auto.
rewrite <- subst_tt_fresh; auto.
simpl.
destruct (X==X); subst; try solve [contradict n; auto].
assert (type t2') as type2'.
apply wfr_right_inv in Hwfr.
apply type_from_wf_typ in Hwfr; auto.
rewrite subst_tt_open_tt; auto.
rewrite <- subst_tt_fresh; auto.
simpl.
destruct (X==X); subst; try solve [contradict n; auto].
assert (J:=HRsubst).
apply F_Rsubst__wf_subst in J.
decompose [prod] J.
rewrite commut_delta_subst_open_tt with (dE:=E); auto.
rewrite delta_subst_closed_typ with (t:=t2); eauto using wfr_left_inv.
rewrite commut_delta_subst_open_tt with (dE:=E); auto.
rewrite delta_subst_closed_typ with (t:=t2'); eauto using wfr_right_inv.
split.
apply typing_tapp; eauto using wfr_left_inv.
simpl_commut_subst in Htypingv1.
apply preservation_normalization with (e:=e1); auto.
split.
apply typing_tapp; eauto using wfr_right_inv.
simpl_commut_subst in Htypingv2.
apply preservation_normalization with (e:=e2); auto.
split; auto.
simpl.
assert (wf_typ nil t2) as Wft2.
apply wfr_left_inv in Hwfr. auto.
assert (type t2) as type2.
apply wfr_left_inv in Hwfr. auto.
apply type_from_wf_typ in Hwfr; auto.
rewrite subst_tt_open_tt; auto.
rewrite <- subst_tt_fresh; auto.
simpl.
destruct (X==X); subst; try solve [contradict n; auto].
assert (J:=HRsubst).
apply F_Rsubst__wf_subst in J.
decompose [prod] J.
rewrite commut_delta_subst_open_tt with (dE:=E); auto.
rewrite delta_subst_closed_typ with (t:=t2); eauto using wfr_left_inv.
simpl_commut_subst in Hctxv1.
apply F_ciu_eq__congr_tapp; auto.
simpl.
assert (wf_typ nil t2') as Wft2'.
apply wfr_right_inv in Hwfr. auto.
assert (type t2') as type2'.
apply wfr_right_inv in Hwfr.
apply type_from_wf_typ in Hwfr; auto.
rewrite subst_tt_open_tt; auto.
rewrite <- subst_tt_fresh; auto.
simpl.
destruct (X==X); subst; try solve [contradict n; auto].
assert (J:=HRsubst).
apply F_Rsubst__wf_subst in J.
decompose [prod] J.
rewrite commut_delta_subst_open_tt with (dE:=E); auto.
rewrite delta_subst_closed_typ with (t:=t2'); eauto using wfr_right_inv.
simpl_commut_subst in Hctxv2.
apply F_ciu_eq__congr_tapp; auto.
Case "typ_bang". (* bang *)
apply F_related_values_bang_leq in Hrel.
apply F_related_values_bang_req.
destruct Hrel as [Hv [Hv' [f1 [f1' [Heq [Heq'
[u1 [u1' [Hnorm_f1u1 [Hnorm_f1'u1' Hrel_wft1]]]
]]]]]]]; subst.
repeat(split; auto; simpl_env in *).
destruct Hn_e1'v1'; auto.
destruct Hn_e2'v2'; auto.
assert (exists g1, v1' = exp_bang g1) as J.
simpl_commut_subst in Hctxv1.
assert (J:=Hctxv1). destruct J as [J1 [J2 J3]].
assert (J:=Hn_e1'v1'). destruct J as [J4 J5].
apply canonical_form_bang in J2; auto.
assert (exists g1', v2' = exp_bang g1') as J'.
simpl_commut_subst in Hctxv2.
assert (J':=Hctxv2). destruct J' as [J1 [J2 J3]].
assert (J':=Hn_e2'v2'). destruct J' as [J4 J5].
apply canonical_form_bang in J2; auto.
destruct J as [g1 J].
destruct J' as [g1' J'].
subst.
exists g1. exists g1'.
split; auto.
split; auto.
assert (typ_size t < typ_size (typ_bang t)) as G1. simpl. omega.
apply H with (e1:=f1) (e2:=f1') (t:=t) (E:=E)
(e1':=g1) (e2':=g1')
(rsubst:=rsubst) (dsubst:=dsubst) (dsubst':=dsubst') in G1; auto.
destruct G1 as [v3 [v3' [Typing3 [Typing3' [Hn_v3 [Hn_v3' G1]]]]]].
exists v3. exists v3'.
split; auto.
exists u1. exists u1'.
split.
simpl_commut_subst in Htypingv1.
apply preservation_normalization with (v:=exp_bang f1) in Htypingv1; auto.
inversion Htypingv1; subst; auto.
split.
simpl_commut_subst in Htypingv2.
apply preservation_normalization with (v:=exp_bang f1') in Htypingv2; auto.
inversion Htypingv2; subst; auto.
split; auto.
simpl_commut_subst in Hctxv1.
apply F_ciu_eq__congr_bang in Hctxv1; auto.
simpl_commut_subst in Hctxv2.
apply F_ciu_eq__congr_bang in Hctxv2; auto.
Case "typ_with". (* with *)
apply F_related_values_with_leq in Hrel.
apply F_related_values_with_req.
destruct Hrel as [Hv [Hv' [f1 [f2 [f1' [f2' [Heq [Heq'
[[u1 [u1' [Hnorm_f1u1 [Hnorm_f1'u1' Hrel_wft1]]]]
[u2 [u2' [Hnorm_F2u2 [Hnorm_f2'u2' Hrel_wft2]]]]]
]]]]]]]]; subst.
repeat(split; auto; simpl_env in *).
destruct Hn_e1'v1'; auto.
destruct Hn_e2'v2'; auto.
assert (exists g1, exists g2, v1' = exp_apair g1 g2) as J.
simpl_commut_subst in Hctxv1.
assert (J:=Hctxv1). destruct J as [J1 [J2 J3]].
assert (J:=Hn_e1'v1'). destruct J as [J4 J5].
apply canonical_form_with in J2; auto.
assert (exists g1', exists g2', v2' = exp_apair g1' g2') as J'.
simpl_commut_subst in Hctxv2.
assert (J':=Hctxv2). destruct J' as [J1 [J2 J3]].
assert (J':=Hn_e2'v2'). destruct J' as [J4 J5].
apply canonical_form_with in J2; auto.
destruct J as [g1 [g2 J]].
destruct J' as [g1' [g2' J']].
subst.
exists g1. exists g2. exists g1'. exists g2'.
split; auto.
split; auto.
split.
assert (typ_size t1 < typ_size (typ_with t1 t2)) as G1. simpl. omega.
apply H with (e1:=f1) (e2:=f1') (t:=t1) (E:=E)
(e1':=g1) (e2':=g1')
(rsubst:=rsubst) (dsubst:=dsubst) (dsubst':=dsubst') in G1; auto.
destruct G1 as [v3 [v3' [Typing3 [Typing3' [Hn_v3 [Hn_v3' G1]]]]]].
exists v3. exists v3'.
split; auto.
exists u1. exists u1'.
split.
simpl_commut_subst in Htypingv1.
apply preservation_normalization with (v:=exp_apair f1 f2) in Htypingv1; auto.
inversion Htypingv1; subst; auto.
split.
simpl_commut_subst in Htypingv2.
apply preservation_normalization with (v:=exp_apair f1' f2') in Htypingv2; auto.
inversion Htypingv2; subst; auto.
split; auto.
simpl_commut_subst in Hctxv1.
apply F_ciu_eq__congr_fst in Hctxv1; auto.
simpl_commut_subst in Hctxv2.
apply F_ciu_eq__congr_fst in Hctxv2; auto.
assert (typ_size t2 < typ_size (typ_with t1 t2)) as G2. simpl. omega.
apply H with (e1:=f2) (e2:=f2') (t:=t2) (E:=E)
(e1':=g2) (e2':=g2')
(rsubst:=rsubst) (dsubst:=dsubst) (dsubst':=dsubst') in G2; auto.
destruct G2 as [v3 [v3' [Typing3 [Typing3' [Hn_v3 [Hn_v3' G2]]]]]].
exists v3. exists v3'.
split; auto.
exists u2. exists u2'.
split.
simpl_commut_subst in Htypingv1.
apply preservation_normalization with (v:=exp_apair f1 f2) in Htypingv1; auto.
inversion Htypingv1; subst; auto.
split.
simpl_commut_subst in Htypingv2.
apply preservation_normalization with (v:=exp_apair f1' f2') in Htypingv2; auto.
inversion Htypingv2; subst; auto.
split; auto.
simpl_commut_subst in Hctxv1.
apply F_ciu_eq__congr_snd in Hctxv1; auto.
simpl_commut_subst in Hctxv2.
apply F_ciu_eq__congr_snd in Hctxv2; auto.
Qed.
Lemma F_related_terms__respect_for_ciueq : forall t E rsubst dsubst dsubst' e1 e2 e1' e2',
F_Rsubst E rsubst dsubst dsubst' ->
F_related_terms t rsubst dsubst dsubst' e1 e2 ->
F_ciu_eq nil nil e1 e1' (apply_delta_subst_typ dsubst t) ->
F_ciu_eq nil nil e2 e2' (apply_delta_subst_typ dsubst' t) ->
F_related_terms t rsubst dsubst dsubst' e1' e2'.
Proof.
intros t E rsubst dsubst dsubst' e1 e2 e1' e2' HRsubst Hrel Hctx1 Hctx2.
assert (P_F_related_terms__respect_for_ciueq (typ_size t)) as J.
apply (@_F_related_terms__respect_for_ciueq (typ_size t)).
unfold P_F_related_terms__respect_for_ciueq in J.
eapply J; eauto.
Qed.
Lemma F_logical_related__respect_for_ciueq : forall E lE e1 e2 e1' e2' t,
F_logical_related E lE e1 e2 t ->
F_ciu_eq E lE e1 e1' t ->
F_ciu_eq E lE e2 e2' t ->
F_logical_related E lE e1' e2' t.
Proof.
intros E lE e1 e2 e1' e2' t Hlrel Hciueq Hciueq'.
destruct Hlrel as [Typ1 [Typ2 Hlrel]].
assert (J:=Hciueq).
destruct J as [_ [Typ1' J1]].
assert (J':=Hciueq').
destruct J' as [_ [Typ2' J2]].
split; auto.
split; auto.
intros dsubst dsubst' gsubst gsubst' lgsubst lgsubst' rsubst H_relsubst H_Rsubst.
assert (J:=@Hlrel dsubst dsubst' gsubst gsubst' lgsubst lgsubst' rsubst H_relsubst H_Rsubst).
apply F_related_terms__respect_for_ciueq with
(E:=E)
(e1:=(apply_delta_subst dsubst (apply_gamma_subst gsubst (apply_gamma_subst lgsubst e1))))
(e2:=(apply_delta_subst dsubst' (apply_gamma_subst gsubst' (apply_gamma_subst lgsubst' e2)))); auto.
split; auto.
apply typing_subst with (E:=E) (D:=lE); auto.
apply F_related_subst__inversion in H_relsubst.
decompose [prod] H_relsubst; auto.
split; auto.
apply typing_subst with (E:=E) (D:=lE); auto.
apply F_related_subst__inversion in H_relsubst.
decompose [prod] H_relsubst; auto.
intros dsubst0 gsubst0 lgsubst0 Hwflg0.
apply F_related_subst__inversion in H_relsubst.
decompose [prod] H_relsubst; auto.
apply typing_subst with (dsubst:=dsubst)(gsubst:=gsubst)(lgsubst:=lgsubst) in Typ1; try solve [auto | apply wfv_lgamma_subst__wf_lgamma_subst; auto].
rewrite gamma_subst_closed_exp with (e:=(apply_delta_subst dsubst (apply_gamma_subst gsubst (apply_gamma_subst lgsubst e1)))) (t:=apply_delta_subst_typ dsubst t); auto.
rewrite gamma_subst_closed_exp with (e:=(apply_delta_subst dsubst (apply_gamma_subst gsubst (apply_gamma_subst lgsubst e1)))) (t:=apply_delta_subst_typ dsubst t); auto.
rewrite delta_subst_closed_exp with (e:=(apply_delta_subst dsubst (apply_gamma_subst gsubst (apply_gamma_subst lgsubst e1)))) (t:=apply_delta_subst_typ dsubst t); auto.
apply typing_subst with (dsubst:=dsubst)(gsubst:=gsubst)(lgsubst:=lgsubst) in Typ1'; try solve [auto | apply wfv_lgamma_subst__wf_lgamma_subst; auto].
rewrite gamma_subst_closed_exp with (e:=(apply_delta_subst dsubst (apply_gamma_subst gsubst (apply_gamma_subst lgsubst e1')))) (t:=apply_delta_subst_typ dsubst t); auto.
rewrite gamma_subst_closed_exp with (e:=(apply_delta_subst dsubst (apply_gamma_subst gsubst (apply_gamma_subst lgsubst e1')))) (t:=apply_delta_subst_typ dsubst t); auto.
rewrite delta_subst_closed_exp with (e:=(apply_delta_subst dsubst (apply_gamma_subst gsubst (apply_gamma_subst lgsubst e1')))) (t:=apply_delta_subst_typ dsubst t); auto.
rewrite delta_subst_closed_typ with (t:=apply_delta_subst_typ dsubst t); auto.
split; auto.
apply typing_subst with (E:=E) (D:=lE); auto.
apply F_related_subst__inversion in H_relsubst.
decompose [prod] H_relsubst; auto.
split; auto.
apply typing_subst with (E:=E) (D:=lE); auto.
apply F_related_subst__inversion in H_relsubst.
decompose [prod] H_relsubst; auto.
intros dsubst0 gsubst0 lgsubst0 Hwflg0.
apply F_related_subst__inversion in H_relsubst.
decompose [prod] H_relsubst; auto.
apply typing_subst with (dsubst:=dsubst')(gsubst:=gsubst')(lgsubst:=lgsubst') in Typ2; try solve [auto | apply wfv_lgamma_subst__wf_lgamma_subst; auto].
rewrite gamma_subst_closed_exp with (e:=(apply_delta_subst dsubst' (apply_gamma_subst gsubst' (apply_gamma_subst lgsubst' e2)))) (t:=apply_delta_subst_typ dsubst' t); auto.
rewrite gamma_subst_closed_exp with (e:=(apply_delta_subst dsubst' (apply_gamma_subst gsubst' (apply_gamma_subst lgsubst' e2)))) (t:=apply_delta_subst_typ dsubst' t); auto.
rewrite delta_subst_closed_exp with (e:=(apply_delta_subst dsubst' (apply_gamma_subst gsubst' (apply_gamma_subst lgsubst' e2)))) (t:=apply_delta_subst_typ dsubst' t); auto.
apply typing_subst with (dsubst:=dsubst')(gsubst:=gsubst')(lgsubst:=lgsubst') in Typ2'; try solve [auto | apply wfv_lgamma_subst__wf_lgamma_subst; auto].
rewrite gamma_subst_closed_exp with (e:=(apply_delta_subst dsubst' (apply_gamma_subst gsubst' (apply_gamma_subst lgsubst' e2')))) (t:=apply_delta_subst_typ dsubst' t); auto.
rewrite gamma_subst_closed_exp with (e:=(apply_delta_subst dsubst' (apply_gamma_subst gsubst' (apply_gamma_subst lgsubst' e2')))) (t:=apply_delta_subst_typ dsubst' t); auto.
rewrite delta_subst_closed_exp with (e:=(apply_delta_subst dsubst' (apply_gamma_subst gsubst' (apply_gamma_subst lgsubst' e2')))) (t:=apply_delta_subst_typ dsubst' t); auto.
rewrite delta_subst_closed_typ with (t:=apply_delta_subst_typ dsubst' t); auto.
Qed.
Lemma F_ciu_eq__F_logical_related : forall E lE e e' t,
F_ciu_eq E lE e e' t ->
F_logical_related E lE e e' t.
Proof.
intros E lE e e' t Hciueq.
assert (J:=Hciueq).
destruct J as [Typ [Typ' J]].
assert (Hciueq':=@F_ciu_eq__refl E lE e t Typ).
assert (F_logical_related E lE e e t) as Hrel.
split; auto.
split; auto.
intros dsubst dsubst' gsubst gsubst' lgsubst lgsubst' rsubst H_relsubst H_Rsubst.
apply parametricity with (E:=E) (lE:=lE); auto.
apply F_logical_related__respect_for_ciueq with (e1:=e) (e2:=e); auto.
Qed.
Lemma F_logical_related__complete : forall E lE e e' t,
F_observational_eq E lE e e' t ->
F_logical_related E lE e e' t.
Proof.
intros E lE e e' t Hctx.
apply F_ciu_eq__F_logical_related.
apply F_observational_eq__F_ciu_eq in Hctx; auto.
Qed.
Lemma F_ciu_eq__F_observational_eq : forall E lE e e' t,
F_ciu_eq E lE e e' t ->
F_observational_eq E lE e e' t.
Proof.
intros E lE e e' t Hciu.
apply F_logical_related__sound.
apply F_ciu_eq__F_logical_related; auto.
Qed.
(***************************************************************)
(* Double Negation *)
(***************************************************************)
(* nA = \X. (A->X)->X *)
Parameter A : typ.
Definition dnegA := (typ_all (typ_arrow (typ_arrow A (typ_bvar 0)) (typ_bvar 0))).
Axiom wftA : wf_typ nil A.
Lemma wftdnegA : wf_typ nil dnegA.
Proof.
unfold dnegA.
assert (HwftA:=wftA).
apply wf_typ_all with (L:={}).
intros X HfvX.
unfold open_tt. simpl.
assert (J:=HwftA).
apply type_from_wf_typ in J.
rewrite <- open_tt_rec_type; auto.
apply wf_typ_arrow; auto.
apply wf_typ_arrow; auto.
apply wf_typ_weaken_head with (F:=X~bind_kn) in HwftA; auto.
Qed.
Hint Resolve wftA wftdnegA.
(* i = \x: A. \X. \g : A-> X. g x : A -> nA *)
Definition fun_to_dnegA :=
exp_abs
A
(exp_tabs
(exp_abs
(typ_arrow A (typ_bvar 0))
(exp_app (exp_bvar 0) (exp_bvar 1))
)
).
(* j = \h: nA. h[A] (\x:A. x) : nA -> A *)
Definition fun_from_dnegA :=
exp_abs
dnegA
(exp_app
(exp_tapp (exp_bvar 0) A)
(exp_abs A (exp_bvar 0))
).
Lemma typing_fun_to_dneg :
typing nil nil fun_to_dnegA (typ_arrow A dnegA).
Proof.
assert (HwftA := wftA).
unfold fun_to_dnegA. unfold dnegA.
apply typing_abs with (L := {}); auto.
intros x Hfvx.
unfold open_ee. simpl.
apply typing_tabs with (L := singleton x).
intros X HfvX.
unfold open_te. unfold open_tt. simpl.
assert (J:=HwftA).
apply type_from_wf_typ in J.
rewrite <- open_tt_rec_type; auto.
apply typing_abs with (L := singleton x `union` singleton X).
apply wf_typ_arrow; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply wf_typ_weakening; auto.
intros x0 Hfvx0.
unfold open_ee. simpl.
apply typing_app with (T1:=A) (D1:=[(x0, lbind_typ (typ_arrow A X))]) (D2:=[(x, lbind_typ A)]); auto.
apply typing_lvar.
rewrite_env ([(x0, lbind_typ (typ_arrow A X))]++nil).
apply wf_lenv_typ; auto.
apply wf_lenv_empty; auto.
rewrite_env ([(X, bind_kn)]++nil).
apply wf_env_kn; auto.
apply wf_typ_arrow; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply wf_typ_weakening; auto.
apply typing_lvar.
rewrite_env ([(x, lbind_typ A)]++nil).
apply wf_lenv_typ; auto.
apply wf_lenv_empty; auto.
rewrite_env ([(X, bind_kn)]++nil).
apply wf_env_kn; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply wf_typ_weakening; auto.
rewrite_env ([(x0, lbind_typ (typ_arrow A X))]++[(x, lbind_typ A)]++nil).
rewrite_env ([(x0, lbind_typ (typ_arrow A X))]++nil).
rewrite_env ([(x, lbind_typ A)]++nil).
apply lenv_split_left; auto.
apply lenv_split_right; auto.
apply lenv_split_empty; auto.
rewrite_env ([(X, bind_kn)]++nil).
apply wf_env_kn; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply wf_typ_weakening; auto.
apply wf_typ_arrow; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply wf_typ_weakening; auto.
Qed.
Lemma typing_fun_from_dneg :
typing nil nil fun_from_dnegA (typ_arrow dnegA A).
Proof.
assert (HwftA := wftA).
assert (HwftdnegA := wftdnegA).
unfold fun_from_dnegA. unfold dnegA in *.
apply typing_abs with (L := {}); auto.
intros x Hfvx.
unfold open_ee. simpl.
apply typing_app with (T1:=typ_arrow A A) (D1:=([(x, lbind_typ (typ_all (typ_arrow (typ_arrow A 0) 0)))]) ++ nil) (D2:=nil); auto.
assert (open_tt (typ_arrow (typ_arrow A 0) 0) A = typ_arrow (typ_arrow A A) A) as Heq.
unfold open_tt. simpl.
apply type_from_wf_typ in HwftA.
rewrite <- open_tt_rec_type; auto.
rewrite <- Heq.
apply typing_tapp; auto.
simpl_env.
apply typing_lvar; simpl_env; auto.
assert ([(x, lbind_typ (typ_all (typ_arrow (typ_arrow A 0) 0)))] =
[(x, lbind_typ (typ_all (typ_arrow (typ_arrow A 0) 0)))] ++ nil); auto.
rewrite H. clear H.
apply wf_lenv_typ; auto.
apply typing_abs with (L := singleton x); auto.
intros x0 Hfvx0.
unfold open_ee. simpl.
apply typing_lvar; simpl_env.
rewrite_env ([(x0, lbind_typ A)] ++ nil).
apply wf_lenv_typ; auto.
simpl_env.
rewrite_env ([(x, lbind_typ (typ_all (typ_arrow (typ_arrow A 0) 0)))]++nil).
apply lenv_split_left; auto.
Qed.
Hint Resolve typing_fun_to_dneg typing_fun_from_dneg.
Lemma isomorphism_left : forall x,
typing nil nil x A ->
F_observational_eq nil nil (exp_app fun_from_dnegA (exp_app fun_to_dnegA x)) x A.
Proof.
intros x Htyping.
unfold fun_from_dnegA.
assert (type A) as TypeA.
apply typing_regular in Htyping.
decompose [and] Htyping.
apply type_from_wf_typ in H3; auto.
assert (open_tt (typ_arrow (typ_arrow A (typ_bvar 0)) (typ_bvar 0)) A = typ_arrow (typ_arrow A A) A) as EQ.
unfold open_tt. simpl.
rewrite <- open_tt_rec_type; auto.
apply F_observational_eq__trans with (e':=
(exp_app
(exp_tapp (exp_app fun_to_dnegA x) A)
(exp_abs A (exp_bvar 0))
)).
Case "EQ1".
assert ( (open_ee ((exp_app (exp_tapp (exp_bvar 0) A) (exp_abs A (exp_bvar 0)))) (exp_app fun_to_dnegA x))
= (exp_app (exp_tapp (exp_app fun_to_dnegA x) A) (exp_abs A (exp_bvar 0)))
) as Heq.
unfold open_ee. simpl. auto.
rewrite <- Heq.
apply F_observational_eq__beta; auto.
apply typing_app with (T1:=dnegA) (D1:=nil) (D2:=nil); auto.
apply typing_app with (T1:=A) (D1:=nil) (D2:=nil); auto.
apply red_abs; auto.
apply expr_abs with (L:={}); auto.
apply type_from_wf_typ with (E:= nil).
apply wftdnegA.
intros x0 x0notin.
unfold open_ee. simpl.
apply expr_app; auto.
apply expr_abs with (L:={{x0}}); auto.
intros x1 x1notin.
unfold open_ee. simpl; auto.
assert (J:=typing_fun_to_dneg).
apply expr_app; auto.
Case "EQ2".
unfold fun_from_dnegA.
apply F_observational_eq__trans with (e':=
(exp_app
(exp_tapp (exp_tabs (exp_abs (typ_arrow A (typ_bvar 0)) (exp_app (exp_bvar 0) x))) A)
(exp_abs A (exp_bvar 0))
)
).
SCase "EQ21".
assert (F_observational_eq empty lempty (exp_abs A 0) (exp_abs A 0) (typ_arrow A A)) as J.
apply F_observational_eq__refl; auto.
apply typing_abs with (L:={}); auto.
intros x0 x0notin.
unfold open_ee. simpl. simpl_env.
apply typing_lvar; auto.
rewrite_env ([(x0, lbind_typ A)]++nil).
apply wf_lenv_typ; auto.
apply F_observational_eq__congr_app with (t1:=typ_arrow A A) (lE1:=nil) (lE2:=nil); auto.
assert ( (open_tt
(typ_arrow (typ_arrow A 0) 0)
A)
= typ_arrow (typ_arrow A A) A
) as Heq.
unfold open_tt. simpl.
rewrite <- open_tt_rec_type; auto.
rewrite <- Heq. clear Heq.
apply F_observational_eq__congr_tapp; auto.
assert ( (open_ee
(exp_tabs (exp_abs (typ_arrow A (typ_bvar 0)) (exp_app (exp_bvar 0) (exp_bvar 1))))
x)
= (exp_tabs (exp_abs (typ_arrow A (typ_bvar 0)) (exp_app (exp_bvar 0) x)))
) as Heq.
unfold open_ee. simpl. auto.
rewrite <- Heq. clear Heq.
apply F_observational_eq__beta; auto.
apply typing_app with (T1:=A) (D1:=nil) (D2:=nil); auto.
apply red_abs; auto.
apply expr_abs with (L:={}); auto.
intros x0 x0notin.
unfold open_ee. simpl.
apply expr_tabs with (L:={}); auto.
intros X Xnotin.
unfold open_te. simpl.
rewrite <- open_tt_rec_type; auto.
apply expr_abs with (L:={{X}}); auto.
intros y0 y0notin.
unfold open_ee. simpl.
apply expr_app; auto.
SCase "EQ22".
apply F_observational_eq__trans with (e':=
(exp_app
(exp_abs (typ_arrow A A) (exp_app (exp_bvar 0) x))
(exp_abs A (exp_bvar 0))
)
).
SSCase "EQ221".
apply F_observational_eq__congr_app with (t1:=typ_arrow A A) (lE1:=nil) (lE2:=nil); auto.
SSSCase "EQ2211".
assert (open_tt (typ_arrow (typ_arrow A (typ_bvar 0)) (typ_bvar 0)) A = typ_arrow (typ_arrow A A) A) as EQ'.
unfold open_tt. simpl.
rewrite <- open_tt_rec_type; auto.
rewrite <- EQ'.
assert (open_te (exp_abs (typ_arrow A 0) (exp_app 0 x)) A = exp_abs (typ_arrow A A) (exp_app 0 x)) as J.
unfold open_te. simpl.
rewrite <- open_te_rec_expr; auto.
rewrite <- open_tt_rec_type; auto.
rewrite <- J.
apply F_observational_eq__beta; auto.
apply typing_tapp; auto.
apply typing_tabs with (L:={}).
intros X Xnotin.
unfold open_te. unfold open_tt. simpl.
rewrite <- open_te_rec_expr; auto.
rewrite <- open_tt_rec_type; auto.
apply typing_abs with (L:={{X}}); auto.
apply wf_typ_arrow; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply wf_typ_weakening; auto.
intros x0 x0notin.
unfold open_ee. simpl.
rewrite <- open_ee_rec_expr; auto.
assert (wf_lenv [(X, bind_kn)] [(x0, lbind_typ (typ_arrow A X))]) as Wfle.
rewrite_env ([(x0, lbind_typ (typ_arrow A X))]++nil).
apply wf_lenv_typ; auto.
apply wf_lenv_empty.
rewrite_env ([(X, bind_kn)]++nil).
apply wf_env_kn; auto.
apply wf_typ_arrow; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply wf_typ_weakening; auto.
apply typing_app with (T1:=A) (D1:=[(x0, lbind_typ (typ_arrow A X))]) (D2:=nil); auto.
rewrite_env (nil ++ ([(X, bind_kn)]) ++ nil).
apply typing_weakening; simpl_env; auto.
simpl_env.
rewrite_env ([(x0, lbind_typ (typ_arrow A X))]++nil).
apply lenv_split_left; auto.
apply wf_typ_arrow; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply wf_typ_weakening; auto.
apply red_tabs; auto.
apply expr_tabs with (L:={}).
intros X Xnotin.
unfold open_te. unfold open_tt. simpl.
rewrite <- open_te_rec_expr; auto.
rewrite <- open_tt_rec_type; auto.
apply expr_abs with (L:={{X}}).
apply type_arrow; auto.
intros x0 x0notin.
unfold open_ee. simpl. rewrite <- open_ee_rec_expr; auto.
SSSCase "EQ2212".
apply F_observational_eq__refl.
apply typing_abs with (L:={}); auto.
intros x0 x0notin.
unfold open_ee. simpl.
apply typing_lvar; auto.
rewrite_env ([(x0, lbind_typ A)] ++ nil).
apply wf_lenv_typ; auto.
SSCase "EQ222".
apply F_observational_eq__trans with (e':=
(exp_app (exp_abs A (exp_bvar 0)) x)
).
SSSCase "EQ2221".
assert (open_ee (exp_app (exp_bvar 0) x) (exp_abs A (exp_bvar 0))
= (exp_app (exp_abs A (exp_bvar 0)) x)
) as Heq.
unfold open_ee. simpl.
rewrite <- open_ee_rec_expr; auto.
rewrite <- Heq.
apply F_observational_eq__beta; auto.
apply typing_app with (T1:=typ_arrow A A) (D1:=nil) (D2:=nil); auto.
apply typing_abs with (L:={}); auto.
intros x0 Hfvx0.
unfold open_ee. simpl; auto.
rewrite <- open_ee_rec_expr; auto.
assert (wf_lenv empty ([(x0, lbind_typ (typ_arrow A A))])) as Wfle.
rewrite_env ([(x0, lbind_typ (typ_arrow A A))] ++ nil).
apply wf_lenv_typ; auto.
apply typing_app with (T1:=A) (D1:=[(x0, lbind_typ (typ_arrow A A))]) (D2:=nil); auto.
simpl_env.
rewrite_env ([(x0, lbind_typ (typ_arrow A A))] ++ nil).
apply lenv_split_left; auto.
apply typing_abs with (L:={}); auto.
intros x0 Hfvx0.
unfold open_ee. simpl; auto.
apply typing_lvar; auto.
rewrite_env ([(x0, lbind_typ A)] ++ nil).
apply wf_lenv_typ; auto.
apply red_abs.
apply expr_abs with (L:={}); auto.
intros x0 Hfvx0.
unfold open_ee. simpl; auto.
rewrite <- open_ee_rec_expr; auto.
apply expr_abs with (L:={}); auto.
intros x0 Hfvx0.
unfold open_ee. simpl; auto.
SSSCase "EQ2222".
assert (open_ee 0 x = x) as Heq.
unfold open_ee. simpl. auto.
rewrite <- Heq.
apply F_observational_eq__beta; auto.
apply typing_app with (T1:=A) (D1:=nil) (D2:=nil); auto.
apply typing_abs with (L:={}); auto.
intros x0 Hfvx0.
unfold open_ee. simpl; auto.
apply typing_lvar; auto.
rewrite_env ([(x0, lbind_typ A)] ++ nil).
apply wf_lenv_typ; auto.
apply red_abs; auto.
apply expr_abs with (L:={}); auto.
intros x0 Hfvx0.
unfold open_ee. simpl; auto.
Qed.
Require Import Bang_Parametricity_App.
Lemma dnegation_type_inversion : forall nt B B',
typing nil nil nt dnegA->
(forall x y RY,
wfr RY B B' ->
exists Y:atom,
(F_related_terms (typ_arrow A Y)
([(Y, RY)])
([(Y, B)])
([(Y, B')])
x y ->
F_related_terms (typ_fvar Y)
([(Y, RY)])
([(Y, B)])
([(Y, B')])
(exp_app (exp_tapp nt B) x) (exp_app (exp_tapp nt B') y))).
Proof.
intros nt B B' Htyping x y RY Hwfr.
assert (F_related_terms dnegA rho_nil delta_nil delta_nil nt nt) as Frel_All.
apply fundamental_parametricity; auto.
destruct Frel_All as [v [v'[Ht [Ht' [Hn_ntv [Hn_nt'v' Frel_All]]]]]].
apply F_related_values_all_leq in Frel_All.
destruct Frel_All as [Hvy [Hvy' [LY Hall]]].
pick fresh Y.
assert (Y `notin` LY) as FrY. auto.
destruct (@Hall Y B B' RY FrY) as [vv [vv' [Hn_wBvv [Hn_w'B'vv' Hrel_wft]]]]; auto.
assert (Y `notin` fv_tt A) as YnA.
apply notin_fv_wf with (E:=nil); auto.
simpl. auto.
unfold open_tt in*. simpl in *. clear Hall.
assert (type A) as TypeA.
apply type_from_wf_typ with (E:=nil); auto.
exists (Y).
intros Hterm. simpl_env in *.
rewrite <- open_tt_rec_type in *; auto.
assert (Harrow := @F_related_values_arrow_leq (typ_arrow A Y) Y ([(Y,RY)]) ([(Y,B)]) ([(Y,B')]) vv vv' Hrel_wft).
destruct Harrow as [Hvv [Hvv' Harrow]].
destruct Hterm as [v0 [v'0 [Ht_x [Ht_y [Hn_xv0 [Hn_yv'0 Hrel]]]]]].
destruct (@Harrow x y) as [u [u' [Hn_vvxu [Hn_vv'yu' Hrel_wft2]]]]; auto.
exists v0. exists v'0. repeat (split;auto).
clear Harrow.
simpl in *.
assert ((if Y==Y then B else typ_fvar Y) = B) as EqB.
destruct (Y==Y); auto. contradict n; auto.
assert ((if Y==Y then B' else typ_fvar Y) = B') as EqB'.
destruct (Y==Y); auto. contradict n; auto.
rewrite EqB in *. rewrite EqB' in *.
assert (Y `notin` fv_tt A) as YnA.
apply notin_fv_wf with (E:=nil); auto.
rewrite <- subst_tt_fresh with (T:=A) in Ht_x; auto.
rewrite <- subst_tt_fresh with (T:=A) in Ht_y; auto.
clear EqB EqB'.
assert (normalize (exp_tapp nt B) vv).
apply congr_tapp with (v1:=v); auto.
eapply type_from_wf_typ with (E:=nil); eauto using wfr_left_inv.
assert (normalize (exp_tapp nt B') vv').
apply congr_tapp with (v1:=v'); auto.
eapply type_from_wf_typ with (E:=nil); eauto using wfr_right_inv.
assert (open_tt (typ_arrow (typ_arrow A (typ_bvar 0)) (typ_bvar 0)) B = typ_arrow (typ_arrow A B) B) as EqNAB.
unfold open_tt. simpl.
rewrite <- open_tt_rec_type; auto.
assert (open_tt (typ_arrow (typ_arrow A (typ_bvar 0)) (typ_bvar 0)) B' = typ_arrow (typ_arrow A B') B') as EqNAB'.
unfold open_tt. simpl.
rewrite <- open_tt_rec_type; auto.
assert (normalize (exp_app (exp_tapp nt B) x) u).
apply congr_app with (v1:=vv); auto.
assert (typing nil nil (exp_tapp nt B) (typ_arrow (typ_arrow A B) B)).
rewrite <- EqNAB.
apply typing_tapp; eauto using wfr_left_inv.
auto.
assert (normalize (exp_app (exp_tapp nt B') y) u').
apply congr_app with (v1:=vv'); auto.
assert (typing nil nil (exp_tapp nt B') (typ_arrow (typ_arrow A B') B')).
rewrite <- EqNAB'.
apply typing_tapp; eauto using wfr_right_inv.
auto.
exists(u). exists(u'). simpl in *.
split; simpl.
destruct (Y==Y); try solve [contradict n; auto | auto].
apply typing_app with (T1:=typ_arrow A B) (D1:=nil) (D2:=nil); auto.
rewrite <- EqNAB.
apply typing_tapp; eauto using wfr_left_inv.
split; simpl; auto.
destruct (Y==Y); try solve [contradict n; auto | auto].
apply typing_app with (T1:=typ_arrow A B') (D1:=nil) (D2:=nil); auto.
rewrite <- EqNAB'.
apply typing_tapp; eauto using wfr_right_inv.
Qed.
Definition Rfun (A A':typ) (f:exp) (v v':exp) : Prop :=
typing nil nil v A /\ typing nil nil v' A' /\
typing nil nil f (typ_arrow A A') /\
F_nobservational_eq nil nil (exp_app f v) v' A'
.
Lemma Rfun_wfr : forall A A' a,
wf_typ nil A ->
wf_typ nil A' ->
wfr (Rfun A A' a) A A'.
Proof.
intros.
split; auto.
Qed.
Corollary Rearrangement_DNegation : forall nt f B,
typing nil nil nt dnegA ->
wf_typ nil B ->
typing nil nil f (typ_arrow A B) ->
Rfun A B f
(exp_app (exp_tapp nt A) (exp_abs A (exp_bvar 0)))
(exp_app (exp_tapp nt B) (exp_abs A (exp_app f 0)))
.
Proof.
intros nt f B Htypingnt HwftB Htypingf.
assert (wf_typ nil A) as HwftA. auto.
destruct (@dnegation_type_inversion nt A B
Htypingnt
(exp_abs A (exp_bvar 0)) (exp_abs A (exp_app f 0))
(Rfun A B f)
) as [Y JJ]; auto using Rfun_wfr.
assert (F_related_terms (typ_arrow A Y) [(Y, Rfun A B f)] [(Y, A)] [(Y, B)] (exp_abs A 0) (exp_abs A (exp_app f 0))) as H.
Case "(exp_abs A (exp_bvar 0)) and (exp_abs A (exp_app f (exp_bvar 0))) are related".
assert (type A) as TypeA.
apply type_from_wf_typ with (E:=nil); auto.
assert (Y `notin` fv_tt A) as YnA.
apply notin_fv_wf with (E:=nil); auto.
assert (value (exp_abs A 0)) as Jv1.
apply value_abs.
apply expr_abs with (L:={{Y}}); auto.
intros x xnotin.
unfold open_ee. simpl. auto.
assert (value (exp_abs A (exp_app f 0))) as Jv2.
apply value_abs.
apply expr_abs with (L:={{Y}}); auto.
intros x xnotin.
unfold open_ee. simpl.
rewrite <- open_ee_rec_expr; auto.
exists (exp_abs A (exp_bvar 0)). exists (exp_abs A (exp_app f (exp_bvar 0))).
split; simpl.
SCase "typing".
destruct (Y==Y); subst; auto.
rewrite <- subst_tt_fresh; auto.
apply typing_abs with ({{Y}}); auto.
intros x xnotin.
unfold open_ee. simpl.
apply typing_lvar; auto.
rewrite_env ([(x, lbind_typ A)]++nil).
apply wf_lenv_typ; auto.
contradict n; auto.
split; simpl.
SCase "typing".
destruct (Y==Y); subst; auto.
rewrite <- subst_tt_fresh; auto.
apply typing_abs with ({{Y}}); auto.
intros x xnotin.
unfold open_ee. simpl.
rewrite <- open_ee_rec_expr; auto.
apply typing_app with (T1:=A)(D1:=nil)(D2:=[(x, lbind_typ A)]); auto.
apply typing_lvar; auto.
simpl_env.
rewrite_env ([(x, lbind_typ A)]++nil).
apply wf_lenv_typ; auto.
simpl_env.
rewrite_env ([(x, lbind_typ A)]++nil).
apply lenv_split_right; auto.
contradict n; auto.
unfold normalize.
split; auto.
split; auto.
SCase "Terms".
apply F_related_values_arrow_req. simpl.
split; auto.
split; auto.
intros x x' Htyping Htyping' Hrel_xx'.
rewrite <- subst_tt_fresh in Htyping; auto.
rewrite <- subst_tt_fresh in Htyping'; auto.
destruct Hrel_xx' as [w [w' [Hxw [Hx'w' Hrel_xx']]]].
assert (F_related_values A rho_nil delta_nil delta_nil w w') as Hrelww'.
rewrite_env (nil ++ [(Y, Rfun A B f)] ++ nil) in Hrel_xx'.
rewrite_env (nil ++ [(Y, A)] ++ nil) in Hrel_xx'.
rewrite_env (nil ++ [(Y, B)] ++ nil) in Hrel_xx'.
apply Frel_stronger with (E:=nil) (E':=nil) in Hrel_xx'; simpl_env; auto using Rfun_wfr.
rewrite_env ([(Y, Rfun A B f)] ++ nil).
rewrite_env ([(Y, bind_kn)] ++ nil).
apply wf_rho_subst_srel; auto.
rewrite_env ([(Y, A)] ++ nil).
rewrite_env ([(Y, bind_kn)] ++ nil).
apply wf_delta_subst_styp; auto.
rewrite_env ([(Y, B)] ++ nil).
rewrite_env ([(Y, bind_kn)] ++ nil).
apply wf_delta_subst_styp; auto.
assert (F_nobservational_eq nil nil x x' A) as x_eq_x'.
apply F_observational_eq__F_nobservational_eq.
apply F_logical_related__sound; auto.
split; auto.
split; auto.
intros dsubst dsubst' gsubst gsubst' lgsubst lgsubst' rsubst Hrelsubst HRsubst.
inversion Hrelsubst; subst.
simpl.
exists w. exists w'.
simpl.
split; auto.
assert (exists fx', normalize (exp_app f x') fx') as Hn_fx'2fx'.
apply strong_normalization with (t:=B); auto.
apply typing_app with (T1:=A) (D1:=nil) (D2:=nil); auto.
destruct Hn_fx'2fx' as [fx' [Hb_fx'2fx' Valuefx']].
exists w. exists fx'.
assert (value w) as Valuew.
destruct Hxw; auto.
repeat(split; auto).
SSCase "Red".
destruct Hxw.
apply bigstep_red_trans with (e':=x); auto.
assert (red (exp_app (exp_abs A 0) x) x = red (exp_app (exp_abs A 0) x) (open_ee 0 x)) as Heq.
unfold open_ee. simpl. auto.
rewrite Heq.
apply red_abs; auto.
SSCase "Red".
apply bigstep_red_trans with (e':=exp_app f x'); auto.
assert (exp_app f x' = open_ee (exp_app f 0) x') as Heq.
unfold open_ee. simpl.
rewrite <- open_ee_rec_expr; auto.
rewrite Heq.
apply red_abs; auto.
SSCase "Terms".
apply F_related_values_fvar_req. simpl.
exists(Rfun A B f).
split; auto.
split; auto.
split; auto.
split.
apply preservation_normalization with (v:=w) in Htyping; auto.
split.
apply preservation_bigstep_red with (e:=exp_app f x'); auto.
apply typing_app with (T1:=A) (D1:=nil) (D2:=nil); auto.
split; auto.
assert (exists vf, normalize f vf) as Hn_f2vf.
apply strong_normalization with (t:=typ_arrow A B); auto.
destruct Hn_f2vf as [vf [Hb_f2fv Valuefv]].
apply F_nobservational_eq__trans with (e':=exp_app vf w); auto.
apply F_observational_eq__F_nobservational_eq.
apply F_observational_eq__mbeta; auto.
apply typing_app with (D1:=lempty)(D2:=lempty)(T1:=A); auto.
apply preservation_normalization with (v:=w) in Htyping; auto.
apply bigstep_red_app; auto.
apply F_nobservational_eq__trans with (e':=exp_app vf x'); auto.
apply F_observational_eq__F_nobservational_eq.
apply F_observational_eq__congr_app with (lE1:=nil) (lE2:=nil) (t1:=A); auto.
apply F_observational_eq__refl; auto.
apply preservation_normalization with (v:=vf) in Htypingf; auto.
split; auto.
apply F_observational_eq__trans with (e':=x); auto.
apply F_observational_eq__sym.
destruct Hxw.
apply F_observational_eq__mbeta; auto.
apply F_logical_related__sound.
unfold F_logical_related.
split; auto.
split; auto.
intros dsubst dsubst' gsubst gsubst' lgsubst lgsubst' rsubst H1 H2.
inversion H1; subst. simpl.
exists w. exists w'. repeat (split; auto).
apply F_observational_eq__F_nobservational_eq.
apply F_observational_eq__mbeta; auto.
apply typing_app with (T1:=A) (D1:=nil) (D2:=nil); auto.
apply preservation_normalization with (v:=vf) in Htypingf; auto.
split; auto.
assert (exists v, normalize (exp_app vf x') v) as Hn_vfx'2v.
apply strong_normalization with (t:=B); auto.
apply typing_app with (D1:=lempty)(D2:=lempty)(T1:=A); auto.
apply preservation_normalization with (v:=vf) in Htypingf; auto.
split; auto.
destruct Hn_vfx'2v as [v Hn_vfx'2v].
assert (normalize (exp_app f x') v) as Hn_fx'2v.
destruct Hn_vfx'2v.
split; auto.
apply _congr_app_fun with (v1:=vf); auto.
assert (normalize (exp_app f x') fx') as Hn_fx'2fx'.
split; auto.
assert (v = fx') as EQ.
apply unique_normal_form with (u:=(exp_app f x')); auto.
subst. destruct Hn_vfx'2v; auto.
(* nt[A] id and nt[B](\x. fx) are related as Rfun*)
apply JJ in H; auto using Rfun_wfr.
destruct H as [v [v' [Typing1 [Typing2 [Heq_ntAidv [Heq_ntBfv' [R [Hb [Valuev [Valuev' Hrel]]]]]]]]]]; subst.
simpl in Typing1.
simpl in Typing2.
destruct (Y==Y); try solve [auto | contradict n; auto].
unfold Rfun.
split; auto.
split; auto.
split; auto.
Case "Eq".
assert (exists vf, normalize f vf) as Hn_f2vf.
apply strong_normalization with (t:=typ_arrow A B); auto.
destruct Hn_f2vf as [vf [Hb_f2fv Valuefv]].
apply F_nobservational_eq__trans with (e':=exp_app vf (exp_app (exp_tapp nt A) (exp_abs A 0))); auto.
apply F_observational_eq__F_nobservational_eq.
apply F_observational_eq__mbeta; auto.
apply typing_app with (D1:=lempty)(D2:=lempty)(T1:=A); auto.
apply bigstep_red_app; auto.
apply F_nobservational_eq__trans with (e':=exp_app vf v).
apply F_observational_eq__F_nobservational_eq.
apply F_observational_eq__congr_app with (t1:=A) (lE1:=nil) (lE2:=nil); auto.
apply F_observational_eq__refl; auto.
apply preservation_normalization with (v:=vf) in Htypingf; auto.
split; auto.
apply F_observational_eq__mbeta; auto.
destruct Heq_ntAidv; auto.
apply F_nobservational_eq__trans with (e':=v').
analyze_binds Hb.
destruct Hrel as [J1 [J2 [J3 J4]]]; auto.
apply F_nobservational_eq__trans with (e':=exp_app f v); auto.
apply F_nobservational_eq__sym.
apply F_observational_eq__F_nobservational_eq.
apply F_observational_eq__mbeta; auto.
apply typing_app with (D1:=lempty)(D2:=lempty)(T1:=A); auto.
apply bigstep_red_app; auto.
apply F_nobservational_eq__sym.
apply F_observational_eq__F_nobservational_eq.
apply F_observational_eq__mbeta; auto.
destruct Heq_ntBfv'; auto.
Qed.
Lemma id_typing : typing nil nil (exp_abs A 0) (typ_arrow A A).
Proof.
assert (J:=wftA).
apply typing_abs with ({}); auto.
intros x xnotin.
unfold open_ee. simpl.
apply typing_lvar; auto.
rewrite_env ([(x, lbind_typ A)]++nil).
apply wf_lenv_typ; auto.
Qed.
Hint Resolve id_typing.
Lemma isomorphism_right : forall h,
typing nil nil h dnegA ->
F_observational_eq nil nil (exp_app fun_to_dnegA (exp_app fun_from_dnegA h)) h dnegA.
Proof.
intros h Htyping.
assert (J:=@wftA).
unfold fun_from_dnegA.
assert (type A) as typeA.
apply type_from_wf_typ with (E:=nil); auto.
assert (type dnegA) as typednegA.
apply type_from_wf_typ with (E:=nil); auto.
assert (typing nil nil (exp_app (exp_tapp h A) (exp_abs A 0)) A) as Typing1.
apply typing_app with (T1:=typ_arrow A A) (D1:=nil) (D2:=nil); auto.
assert (open_tt (typ_arrow (typ_arrow A 0) 0) A = typ_arrow (typ_arrow A A) A) as EQ.
unfold open_tt. simpl. rewrite <- open_tt_rec_type; auto.
rewrite <- EQ.
apply typing_tapp; auto.
apply F_observational_eq__trans with (e':=exp_tabs (exp_abs (typ_arrow A 0) (exp_app 0 (exp_app fun_from_dnegA h)))).
Case "EQ1".
assert (open_ee (exp_tabs (exp_abs (typ_arrow A 0) (exp_app 0 1))) (exp_app fun_from_dnegA h)
= exp_tabs (exp_abs (typ_arrow A 0) (exp_app 0 (exp_app fun_from_dnegA h)))
) as Heq.
unfold open_ee. simpl. auto.
rewrite <- Heq.
apply F_observational_eq__beta.
apply typing_app with (D1:=nil) (D2:=nil) (T1:=A); auto.
apply typing_app with (D1:=nil) (D2:=nil) (T1:=dnegA); auto.
apply red_abs; auto.
apply expr_abs with (L:={}); auto.
intros x xn.
unfold open_ee. simpl.
apply expr_tabs with (L:={}); auto.
intros X Xn.
unfold open_te. simpl.
apply expr_abs with (L:={{x}}); auto.
rewrite <- open_tt_rec_type; auto.
intros y yn.
unfold open_ee. simpl. auto.
assert (H:=typing_fun_from_dneg).
apply expr_app; auto.
Case "EQ2".
apply F_observational_eq__trans with (e':=exp_tabs (exp_abs (typ_arrow A 0) (exp_app 0 (exp_app (exp_tapp h A) (exp_abs A 0))))).
SCase "EQ21".
apply F_observational_eq__congr_tabs_abs with (L:={}); auto.
intros X Xn.
unfold open_tt. simpl.
rewrite <- open_tt_rec_type; auto.
apply wf_typ_arrow; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply wf_typ_weakening; auto.
intros X x Xn xn.
unfold open_te. unfold open_tt. unfold open_ee. simpl.
rewrite <- open_tt_rec_type; auto.
rewrite <- open_te_rec_expr; auto.
rewrite <- open_ee_rec_expr; auto.
assert (wf_typ ((X,bind_kn)::empty) A) as WFT'.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply wf_typ_weakening; auto.
apply F_observational_eq__congr_app with (lE1:=[(x, lbind_typ (typ_arrow A X))]) (lE2:=nil) (t1:=A); auto.
apply F_observational_eq__refl; auto.
apply typing_lvar; auto.
rewrite_env ([(x, lbind_typ (typ_arrow A X))]++nil).
apply wf_lenv_typ; auto.
apply wf_lenv_empty.
rewrite_env ([(X, bind_kn)]++nil).
apply wf_env_kn; auto.
assert (open_ee (exp_app (exp_tapp 0 A) (exp_abs A 0)) h
= exp_app (exp_tapp h A) (exp_abs A 0)
) as Heq.
unfold open_ee. simpl. auto.
rewrite <- Heq.
apply F_observational_eq__beta.
apply typing_app with (D1:=nil) (D2:=nil) (T1:=dnegA); auto.
rewrite <- open_tt_rec_type; auto.
unfold dnegA.
assert (wf_typ ((X,bind_kn)::empty) (typ_all (typ_arrow (typ_arrow A 0) 0))) as WFT.
apply wf_typ_all with (L:={{X}}); auto.
intros Y Yn.
unfold open_tt. simpl.
apply wf_typ_arrow; auto.
rewrite <- open_tt_rec_type; auto.
apply wf_typ_arrow; auto.
rewrite_env (nil ++ ([(Y, bind_kn)]++[(X, bind_kn)]) ++ nil).
apply wf_typ_weakening; auto.
apply typing_abs with (L:={{x}} `union` {{X}}); auto.
intros y yn.
unfold open_ee. simpl.
apply typing_app with (D1:=[(y, lbind_typ (typ_all (typ_arrow (typ_arrow A 0) 0)))])(D2:=lempty)(T1:=typ_arrow A A); auto.
assert (open_tt (typ_arrow (typ_arrow A 0) 0) A = typ_arrow (typ_arrow A A) A) as EQ.
unfold open_tt. simpl.
rewrite <- open_tt_rec_type; auto.
rewrite <- EQ.
apply typing_tapp.
apply typing_lvar; auto.
rewrite_env ([(y, lbind_typ (typ_all (typ_arrow (typ_arrow A 0) 0)))]++nil).
rewrite_env ([(X, bind_kn)]++nil).
apply wf_lenv_typ; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply wf_typ_weakening; auto.
apply typing_abs with (L:={{x}}`union`{{y}}`union`{{X}}); auto.
intros z zn.
unfold open_ee. simpl.
apply typing_lvar; auto.
rewrite_env ([(z, lbind_typ A)]++nil).
rewrite_env ([(X, bind_kn)]++nil).
apply wf_lenv_typ; auto.
simpl_env.
rewrite_env ([(y, lbind_typ (typ_all (typ_arrow (typ_arrow A 0) 0)))]++nil).
rewrite_env ([(X, bind_kn)]++nil).
apply lenv_split_left; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply typing_weakening; auto.
rewrite_env ([(X, bind_kn)] ++ nil). auto.
rewrite_env ([(X, bind_kn)] ++ nil). auto.
apply red_abs; auto.
apply expr_abs with (L:={}); auto.
rewrite <- open_tt_rec_type; auto.
intros y yn.
unfold open_ee. simpl.
apply expr_app; auto.
apply expr_abs with (L:={}); auto.
intros z zn.
unfold open_ee. simpl. auto.
simpl_env.
rewrite_env ([(x, lbind_typ (typ_arrow A X))]++nil).
rewrite_env ([(X, bind_kn)]++nil).
apply lenv_split_left; auto.
SCase "EQ22".
apply F_observational_eq__trans with (e':=exp_tabs (exp_abs (typ_arrow A 0) (exp_app (exp_tapp h 0) 0))).
SSCase "EQ221".
assert (JJ:=Rearrangement_DNegation).
assert (Typing5:=id_typing).
unfold dnegA in *.
apply F_observational_eq__congr_tabs_abs with (L:={}); auto.
intros X Xn.
unfold open_tt. simpl.
rewrite <- open_tt_rec_type; auto.
apply wf_typ_arrow; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply wf_typ_weakening; auto.
intros X x Xn xn.
unfold open_te. unfold open_tt. unfold open_ee. simpl.
rewrite <- open_tt_rec_type; auto.
rewrite <- open_te_rec_expr; auto.
rewrite <- open_ee_rec_expr; auto.
simpl_env.
assert (wf_env [(X, bind_kn)]) as Wfe1.
rewrite_env ([(X, bind_kn)]++nil).
apply wf_env_kn; auto.
assert (wf_typ [(X, bind_kn)] A) as Wft2.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply wf_typ_weakening; auto.
assert (wf_typ [(X, bind_kn)] (typ_arrow A X)) as Wft1.
apply wf_typ_arrow; auto.
assert (wf_lenv [(X, bind_kn)] [(x, lbind_typ (typ_arrow A X))]) as Wfle1.
rewrite_env ([(x, lbind_typ (typ_arrow A X))]++nil).
apply wf_lenv_typ; auto.
apply F_ciu_eq__F_observational_eq.
split.
apply typing_app with (T1:=A) (D1:=[(x, lbind_typ (typ_arrow A X))]) (D2:=nil); auto.
apply typing_app with (T1:=typ_arrow A A) (D1:=nil) (D2:=nil); auto.
assert (open_tt (typ_arrow (typ_arrow A 0) 0) A = typ_arrow (typ_arrow A A) A) as EQ.
unfold open_tt. simpl. rewrite <- open_tt_rec_type; auto.
rewrite <- EQ.
apply typing_tapp; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply typing_weakening; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply typing_weakening; auto.
rewrite_env ([(x, lbind_typ (typ_arrow A X))]++nil).
apply lenv_split_left; auto.
split.
apply typing_app with (T1:=typ_arrow A X) (D2:=[(x, lbind_typ (typ_arrow A X))]) (D1:=nil); auto.
assert (open_tt (typ_arrow (typ_arrow A 0) 0) X = typ_arrow (typ_arrow A X) X) as EQ.
unfold open_tt. simpl. rewrite <- open_tt_rec_type; auto.
rewrite <- EQ.
apply typing_tapp; auto.
rewrite_env (nil ++ [(X, bind_kn)] ++ nil).
apply typing_weakening; auto.
rewrite_env ([(x, lbind_typ (typ_arrow A X))]++nil).
apply lenv_split_right; auto.
intros dsubst gsubst lgsubst Hwflg.
inversion Hwflg; subst.
(* inversion Hwflg1 *)
inversion H2; subst.
inversion H1; subst.
simpl in *.
destruct (x==x); try solve [contradict n; auto].
destruct (X==X); try solve [contradict n; auto].
assert (X `notin` fv_te e) as Xn1.
apply notin_fv_te_typing with (X:=X) in H9; auto.
assert (x `notin` fv_ee h) as xn1.
apply notin_fv_ee_typing with (y:=x) in Htyping; auto.
assert (X `notin` fv_tt A) as Xn2.
apply notin_fv_wf with (X:=X) in J; auto.
assert (X `notin` fv_te h) as Xn3.
apply notin_fv_te_typing with (X:=X) in Htyping; auto.
rewrite <- subst_ee_fresh; auto.
rewrite <- subst_tt_fresh; auto.
rewrite <- subst_te_fresh; auto.
rewrite <- subst_te_fresh; auto.
rewrite <- subst_tt_fresh in H9; auto.
apply F_nobservational_eq__trans with (e':=exp_app (exp_tapp h T) (exp_abs A (exp_app e 0))); auto.
assert (JJ':=@JJ h e T Htyping H13 H9).
destruct JJ' as [J1 [J2 [J3 J4]]]; auto.
assert (typing nil nil (exp_tapp h T) (typ_arrow (typ_arrow A T) T)) as Ht_hT.
assert (open_tt (typ_arrow (typ_arrow A 0) 0) T = typ_arrow (typ_arrow A T) T) as EQ.
unfold open_tt. simpl. rewrite <- open_tt_rec_type; auto.
rewrite <- EQ.
apply typing_tapp; auto.
assert (exists v0, normalize (exp_tapp h T) v0) as Hn_hT_v0.
apply strong_normalization with (t:=typ_arrow (typ_arrow A T) T); auto.
destruct Hn_hT_v0 as [v0 Hn_hT_v0].
assert (F_nobservational_eq nil nil (exp_app (exp_tapp h T) e) (exp_app v0 e) T) as hT_eq_v0.
apply F_observational_eq__F_nobservational_eq.
apply F_observational_eq__mbeta.
apply typing_app with (D1:=nil) (D2:=nil) (T1:=typ_arrow A T); auto.
destruct Hn_hT_v0.
apply bigstep_red_app; auto.
assert (F_nobservational_eq nil nil (exp_app (exp_tapp h T) (exp_abs A (exp_app e 0))) (exp_app v0 (exp_abs A (exp_app e 0))) T) as hT_eq_eta_e.
apply F_observational_eq__F_nobservational_eq.
apply F_observational_eq__mbeta.
apply typing_app with (D1:=nil) (D2:=nil) (T1:=typ_arrow A T); auto.
apply typing_eta_abs; auto.
destruct Hn_hT_v0.
apply bigstep_red_app; auto.
apply expr_abs with (L:={}); auto.
intros x0 Hx0. unfold open_ee. simpl.
rewrite <- open_ee_rec_expr; auto.
apply F_nobservational_eq__trans with (e':=exp_app v0 (exp_abs A (exp_app e 0))); auto.
apply F_nobservational_eq__sym in hT_eq_v0.
apply F_nobservational_eq__trans with (e':=exp_app v0 e); auto.
destruct Hn_hT_v0 as [Hv0 Hr_hT_v0].
apply F_observational_eq__F_nobservational_eq.
apply F_observational_eq__congr_app with (lE1:=nil) (lE2:=nil) (t1:=typ_arrow A T); auto.
apply F_observational_eq__refl; auto.
apply preservation_bigstep_red with (e:=exp_tapp h T); auto.
apply F_observational_eq__sym.
apply F_observational_eq__eta_abs; auto.
(* inversion Hwflg2 *)
inversion H1; subst.
inversion H5; subst.
simpl in *.
assert (X `notin` fv_tt (typ_arrow A X)) as Xn1.
apply notin_fv_wf with (X:=X) in H14; auto.
simpl in Xn1.
destruct_notin.
contradict NotInTac1; auto.
SSCase "EQ222".
apply F_observational_eq__sym.
apply F_observational_eq__eta_tabs_abs; auto.
Qed.
(*
*** Local Variables: ***
*** coq-prog-name: "coqtop" ***
*** coq-prog-args: ("-emacs-U" "-I" "../../../metatheory/" "-I" "../Bang/") ***
*** End: ***
*)
|
lemma monoseq_Suc: "monoseq X \<longleftrightarrow> (\<forall>n. X n \<le> X (Suc n)) \<or> (\<forall>n. X (Suc n) \<le> X n)" |
{-# OPTIONS --safe #-}
module Cubical.HITs.SequentialColimit.Base where
open import Cubical.Foundations.Prelude
open import Cubical.Data.Nat
private
variable
ℓ : Level
record Sequence (ℓ : Level) : Type (ℓ-suc ℓ) where
field
space : ℕ → Type ℓ
map : {n : ℕ} → space n → space (1 + n)
open Sequence
data Lim→ (X : Sequence ℓ) : Type ℓ where
inl : {n : ℕ} → X .space n → Lim→ X
push : {n : ℕ}(x : X .space n) → inl x ≡ inl (X .map x)
|
||| An example EDSL to describe multiple file interactions, and a
||| handler for the IO computation context.
|||
module Example.Files
import public Resources
%access public export
%default total
namespace AbstractST
||| Files are open for reading or writing.
public export
data FMode = R | W
||| Abstract state.
public export
data FileState = Open FMode | Closed
public export
data FH = MkFH
{- [ NOTE ]
Type synonym to make writing the language easier.
-}
public export
FHStateType : FH -> Type
FHStateType _ = FileState
public export
FileHandle : Type
FileHandle = Var FH MkFH
public export
FileStateItem : Type
FileStateItem = StateItem FH FHStateType MkFH
{- [ NOTE ]
Predicates to reason about file handlers.
-}
namespace Predicates
||| Reason about a file handle's current abstract state.
public export
data IsOpenFor : (hdl : Var FH MkFH)
-> (mode : FMode)
-> (item : FileStateItem)
-> Type where
FileIsOpenFor : (m : FMode) -> IsOpenFor hdl m (MkStateItem MkFH hdl (Open m))
||| Does the abstract state belong to the given handle.
public export
data IsHandle : (hdl : Var FH MkFH)
-> (item : FileStateItem)
-> Type where
FileExists : (hdl : Var FH MkFH) -> IsHandle hdl (MkStateItem MkFH hdl st)
{- [ NOTE ]
Functions to update FileHandle abstract states.
-}
namespace Updates
public export
closeHandle : (item : FileStateItem)
-> (prf : IsOpenFor hdl m item)
-> FileStateItem
closeHandle (MkStateItem MkFH label _) prf = MkStateItem MkFH label Closed
namespace Definition
{- [ NOTE ]
Type level functions to perform state transitions and update the type-level context.
-}
public export
openTrans : Either FileError (Var FH MkFH)
-> FMode
-> Context FH FHStateType
-> Context FH FHStateType
openTrans (Left _) _ old = old
openTrans (Right x) fm old = MkStateItem MkFH x (Open fm) :: old
public export
readTrans : Either FileError String
-> (old : Context FH FHStateType)
-> InContext MkFH (IsOpenFor hdl R) old
-> Context FH FHStateType
readTrans (Right _) old _ = old
readTrans (Left _) old prf = update old prf (\i,p => closeHandle i p)
public export
writeTrans : Maybe FileError
-> (old : Context FH FHStateType)
-> (prf : InContext MkFH (IsOpenFor hdl W) old)
-> Context FH FHStateType
writeTrans Nothing old _ = old
writeTrans (Just _) old prf = update old prf (\i,p => closeHandle i p)
||| The language definition for working with multiple file handlers.
public export
data Files : Lang FH FHStateType where
||| Open the given file in the presented mode.
Open : (fname : String)
-> (fm : FMode)
-> Files (Either FileError (Var FH MkFH))
old
(\res => openTrans res fm old)
||| Read a line from the open (for reading) file handle.
Read : (hdl : Var FH MkFH)
-> (prf : InContext MkFH (IsOpenFor hdl R) old)
-> Files (Either FileError String) old (\res => readTrans res old prf)
||| Write the given String to the open (for writing) file handle.
Write : (hdl : Var FH MkFH)
-> (msg : String)
-> (prf : InContext MkFH (IsOpenFor hdl W) old)
-> Files (Maybe FileError) old (\res => writeTrans res old prf)
||| Close the open file handle
Close : (hdl : Var FH MkFH)
-> (prf : InContext MkFH (IsHandle hdl) old)
-> Files () old (const $ drop old prf)
||| Print stuff.
PrintLn : Show a => a -> Files () old (const old)
{- [ NOTE ]
A high-level API to embedd language expressions within `LangM`, and calculate language proofs.
-}
namespace API
public export
FILES : LANG FH FHStateType
FILES = MkLang FH FHStateType Files
||| Open the given file in the presented mode.
public export
openFile : (fname : String)
-> (fm : FMode)
-> LangM m (Either FileError (Var FH MkFH)) FILES old (\res => openTrans res fm old)
openFile fname fm = expr $ Open fname fm
||| Read a line from the open (for reading) file handle.
public export
readString : (hdl : Var FH MkFH)
-> {auto prf : InContext MkFH (IsOpenFor hdl R) old}
-> LangM m (Either FileError String) FILES old (\res => readTrans res old prf)
readString hdl {prf} = expr (Read hdl prf)
||| Write the given String to the open (for writing) file handle.file handle.
public export
writeString : (hdl : Var FH MkFH)
-> String
-> {auto prf : InContext MkFH (IsOpenFor hdl W) old}
-> LangM m (Maybe FileError) FILES old (\res => writeTrans res old prf)
writeString hdl str {prf} = expr (Write hdl str prf)
||| Close the open file handle
public export
closeFile : (hdl : Var FH MkFH)
-> {auto prf : InContext MkFH (IsHandle hdl) old}
-> LangM m () FILES old (const $ drop old prf)
closeFile hdl {prf} = expr (Close hdl prf)
||| Print showable things on a line.
public export
printLn : Show a
=> a
-> LangM m () FILES old (const old)
printLn a = expr (PrintLn a)
||| Type given to programs to ensure that they are closed.
|||
public export
Files : (m : Type -> Type) -> Type -> Type
Files m type = LangM m type FILES Nil (const Nil)
{- [ NOTE]
A handler for the `IO` context.
-}
namespace ContextIO
||| Files are Files
public export
RealVar FH where
CalcRealType MkFH = File
||| How to handle files, in which we map high-level operations to unsafe ones from Idris' prelude.
public export
Handler FH FHStateType Files () IO where
handle env (Open fname fm) acc cont = do
let m = case fm of {R => Read; W => WriteTruncate}
res <- openFile fname m
case res of
Left err => cont (Left err) env acc
Right fh => cont (Right MkVar) (MkTag fh::env) acc
handle env (Read hdl prf) acc cont = do
let MkTag fh = lookup env prf
res <- fGetLine fh
case res of
Left err => cont (Left err) (update env prf (\i,p => closeHandle i p)) acc
Right str => cont (Right str) env acc
handle env (Write hdl str prf) acc cont = do
let MkTag fh = lookup env prf
res <- fPutStrLn fh str
case res of
Left err => cont (Just err) (update env prf (\i,p => closeHandle i p)) acc
Right _ => cont Nothing env acc
handle env (Close hdl prf) acc cont = do
let MkTag fh = lookup env prf
closeFile fh
cont () (drop env prf) acc
handle env (PrintLn a) acc cont = do
printLn a
cont () env acc
||| A sample program to copy a string from one file to another.
|||
copy : (a,b : String) -> Files m (Maybe FileError)
copy a b = do
Right fh <- openFile a R | Left err => do {printLn err; pure (Just err)}
Right str <- readString fh | Left err => do {printLn err; closeFile fh; pure (Just err)}
closeFile fh
Right fh1 <- openFile b W | Left err => do {printLn err; pure (Just err)}
res <- writeString fh1 str
case res of
Nothing => do {closeFile fh1; pure Nothing}
Just err => do {printLn err; closeFile fh1; pure (Just err)}
-- --------------------------------------------------------------------- [ EOF ]
|
[GOAL]
α : Type u
inst✝ : Monoid α
a b : α
x✝ : Invertible a
⊢ ⅟a * (a * b) = b
[PROOFSTEP]
rw [← mul_assoc, invOf_mul_self, one_mul]
[GOAL]
α : Type u
inst✝¹ : Monoid α
a b : α
inst✝ : Invertible a
⊢ ⅟a * (a * b) = b
[PROOFSTEP]
rw [← mul_assoc, invOf_mul_self, one_mul]
[GOAL]
α : Type u
inst✝ : Monoid α
a b : α
x✝ : Invertible a
⊢ a * (⅟a * b) = b
[PROOFSTEP]
rw [← mul_assoc, mul_invOf_self, one_mul]
[GOAL]
α : Type u
inst✝¹ : Monoid α
a b : α
inst✝ : Invertible a
⊢ a * (⅟a * b) = b
[PROOFSTEP]
rw [← mul_assoc, mul_invOf_self, one_mul]
[GOAL]
α : Type u
inst✝ : Monoid α
a b : α
x✝ : Invertible b
⊢ a * ⅟b * b = a
[PROOFSTEP]
simp [mul_assoc]
[GOAL]
α : Type u
inst✝¹ : Monoid α
a b : α
inst✝ : Invertible b
⊢ a * ⅟b * b = a
[PROOFSTEP]
simp [mul_assoc]
[GOAL]
α : Type u
inst✝ : Monoid α
a b : α
x✝ : Invertible b
⊢ a * b * ⅟b = a
[PROOFSTEP]
simp [mul_assoc]
[GOAL]
α : Type u
inst✝¹ : Monoid α
a b : α
inst✝ : Invertible b
⊢ a * b * ⅟b = a
[PROOFSTEP]
simp [mul_assoc]
[GOAL]
α✝ α : Type u
inst✝² : Monoid α
a b : α
inst✝¹ : Invertible a
inst✝ : Invertible b
h : a = b
⊢ ⅟a = ⅟b
[PROOFSTEP]
apply invOf_eq_right_inv
[GOAL]
case hac
α✝ α : Type u
inst✝² : Monoid α
a b : α
inst✝¹ : Invertible a
inst✝ : Invertible b
h : a = b
⊢ a * ⅟b = 1
[PROOFSTEP]
rw [h, mul_invOf_self]
[GOAL]
α : Type u
inst✝ : Monoid α
a : α
x✝¹ x✝ : Invertible a
b : α
hba : b * a = 1
hab : a * b = 1
c : α
invOf_mul_self✝ : c * a = 1
hac : a * c = 1
⊢ { invOf := b, invOf_mul_self := hba, mul_invOf_self := hab } =
{ invOf := c, invOf_mul_self := invOf_mul_self✝, mul_invOf_self := hac }
[PROOFSTEP]
congr
[GOAL]
case e_invOf
α : Type u
inst✝ : Monoid α
a : α
x✝¹ x✝ : Invertible a
b : α
hba : b * a = 1
hab : a * b = 1
c : α
invOf_mul_self✝ : c * a = 1
hac : a * c = 1
⊢ b = c
[PROOFSTEP]
exact left_inv_eq_right_inv hba hac
[GOAL]
α : Type u
inst✝ : MulOneClass α
r : α
hr : Invertible r
s si : α
hs : s = r
hsi : si = ⅟r
⊢ si * s = 1
[PROOFSTEP]
rw [hs, hsi, invOf_mul_self]
[GOAL]
α : Type u
inst✝ : MulOneClass α
r : α
hr : Invertible r
s si : α
hs : s = r
hsi : si = ⅟r
⊢ s * si = 1
[PROOFSTEP]
rw [hs, hsi, mul_invOf_self]
[GOAL]
α : Type u
inst✝² : Ring α
a b : α
inst✝¹ : Invertible a
inst✝ : Invertible b
h : a = b
⊢ ⅟a = ⅟b
[PROOFSTEP]
subst h
[GOAL]
α : Type u
inst✝² : Ring α
a : α
inst✝¹ inst✝ : Invertible a
⊢ ⅟a = ⅟a
[PROOFSTEP]
congr
[GOAL]
case h.e_5.h
α : Type u
inst✝² : Ring α
a : α
inst✝¹ inst✝ : Invertible a
⊢ inst✝¹ = inst✝
[PROOFSTEP]
apply Subsingleton.allEq
[GOAL]
α : Type u
inst✝¹ : Monoid α
a : α
inst✝ : Invertible a
⊢ a * ⅟a = 1
[PROOFSTEP]
simp
[GOAL]
α : Type u
inst✝¹ : Monoid α
a : α
inst✝ : Invertible a
⊢ ⅟a * a = 1
[PROOFSTEP]
simp
[GOAL]
α : Type u
inst✝³ : Mul α
inst✝² : One α
inst✝¹ : HasDistribNeg α
a : α
inst✝ : Invertible a
⊢ -⅟a * -a = 1
[PROOFSTEP]
simp
[GOAL]
α : Type u
inst✝³ : Mul α
inst✝² : One α
inst✝¹ : HasDistribNeg α
a : α
inst✝ : Invertible a
⊢ -a * -⅟a = 1
[PROOFSTEP]
simp
[GOAL]
α : Type u
inst✝³ : Monoid α
inst✝² : HasDistribNeg α
a : α
inst✝¹ : Invertible a
inst✝ : Invertible (-a)
⊢ -a * -⅟a = 1
[PROOFSTEP]
simp
[GOAL]
α : Type u
inst✝¹ : Ring α
inst✝ : Invertible 2
⊢ 2 * (1 - ⅟2) = 2 * ⅟2
[PROOFSTEP]
rw [mul_sub, mul_invOf_self, mul_one, ← one_add_one_eq_two, add_sub_cancel]
[GOAL]
α : Type u
inst✝¹ : NonAssocSemiring α
inst✝ : Invertible 2
⊢ ⅟2 + ⅟2 = 1
[PROOFSTEP]
rw [← two_mul, mul_invOf_self]
[GOAL]
α : Type u
inst✝² : Monoid α
a b : α
inst✝¹ : Invertible a
inst✝ : Invertible b
⊢ ⅟b * ⅟a * (a * b) = 1
[PROOFSTEP]
simp [← mul_assoc]
[GOAL]
α : Type u
inst✝² : Monoid α
a b : α
inst✝¹ : Invertible a
inst✝ : Invertible b
⊢ a * b * (⅟b * ⅟a) = 1
[PROOFSTEP]
simp [← mul_assoc]
[GOAL]
α : Type u
inst✝³ : Monoid α
a b : α
inst✝² : Invertible a
inst✝¹ : Invertible b
inst✝ : Invertible (a * b)
⊢ a * b * (⅟b * ⅟a) = 1
[PROOFSTEP]
simp [← mul_assoc]
[GOAL]
α : Type u
a b : α
inst✝¹ : Monoid α
c : α
inst✝ : Invertible c
h : a * c = b * c
⊢ a = b
[PROOFSTEP]
simpa using congr_arg (· * ⅟c) h
[GOAL]
α : Type u
a b : α
inst✝¹ : Monoid α
c : α
inst✝ : Invertible c
h : c * a = c * b
⊢ a = b
[PROOFSTEP]
simpa using congr_arg (⅟c * ·) h
[GOAL]
α : Type u
c a b : α
inst✝¹ : Monoid α
inst✝ : Invertible c
⊢ ⅟c * a = b ↔ a = c * b
[PROOFSTEP]
rw [← mul_left_inj_of_invertible (c := c), mul_invOf_self_assoc]
[GOAL]
α : Type u
c a b : α
inst✝¹ : Monoid α
inst✝ : Invertible c
⊢ c * a = b ↔ a = ⅟c * b
[PROOFSTEP]
rw [← mul_left_inj_of_invertible (c := ⅟c), invOf_mul_self_assoc]
[GOAL]
α : Type u
c a b : α
inst✝¹ : Monoid α
inst✝ : Invertible c
⊢ a * ⅟c = b ↔ a = b * c
[PROOFSTEP]
rw [← mul_right_inj_of_invertible (c := c), mul_invOf_mul_self_cancel]
[GOAL]
α : Type u
c a b : α
inst✝¹ : Monoid α
inst✝ : Invertible c
⊢ a * c = b ↔ a = b * ⅟c
[PROOFSTEP]
rw [← mul_right_inj_of_invertible (c := ⅟c), mul_mul_invOf_self_cancel]
[GOAL]
α : Type u
inst✝¹ : Monoid α
a b : α
inst✝ : Invertible b
h : Commute a b
⊢ a * ⅟b = ⅟b * (b * a * ⅟b)
[PROOFSTEP]
simp [mul_assoc]
[GOAL]
α : Type u
inst✝¹ : Monoid α
a b : α
inst✝ : Invertible b
h : Commute a b
⊢ ⅟b * (b * a * ⅟b) = ⅟b * (a * b * ⅟b)
[PROOFSTEP]
rw [h.eq]
[GOAL]
α : Type u
inst✝¹ : Monoid α
a b : α
inst✝ : Invertible b
h : Commute a b
⊢ ⅟b * (a * b * ⅟b) = ⅟b * a
[PROOFSTEP]
simp [mul_assoc]
[GOAL]
α : Type u
inst✝¹ : Monoid α
a b : α
inst✝ : Invertible b
h : Commute b a
⊢ ⅟b * a = ⅟b * (a * b * ⅟b)
[PROOFSTEP]
simp [mul_assoc]
[GOAL]
α : Type u
inst✝¹ : Monoid α
a b : α
inst✝ : Invertible b
h : Commute b a
⊢ ⅟b * (a * b * ⅟b) = ⅟b * (b * a * ⅟b)
[PROOFSTEP]
rw [h.eq]
[GOAL]
α : Type u
inst✝¹ : Monoid α
a b : α
inst✝ : Invertible b
h : Commute b a
⊢ ⅟b * (b * a * ⅟b) = a * ⅟b
[PROOFSTEP]
simp [mul_assoc]
[GOAL]
α : Type u
inst✝² : MulZeroOneClass α
a : α
inst✝¹ : Nontrivial α
inst✝ : Invertible a
ha : a = 0
⊢ 0 = ⅟a * a
[PROOFSTEP]
simp [ha]
[GOAL]
α : Type u
inst✝² : Monoid α
a b : α
inst✝¹ : Invertible a
inst✝ : Invertible (a * b)
⊢ ⅟(a * b) * a * b = 1
[PROOFSTEP]
rw [mul_assoc, invOf_mul_self]
[GOAL]
α : Type u
inst✝² : Monoid α
a b : α
inst✝¹ : Invertible a
inst✝ : Invertible (a * b)
⊢ b * (⅟(a * b) * a) = 1
[PROOFSTEP]
rw [← (isUnit_of_invertible a).mul_right_inj, ← mul_assoc, ← mul_assoc, mul_invOf_self, mul_one, one_mul]
[GOAL]
α : Type u
inst✝² : Monoid α
a b : α
inst✝¹ : Invertible (a * b)
inst✝ : Invertible b
⊢ b * ⅟(a * b) * a = 1
[PROOFSTEP]
rw [← (isUnit_of_invertible b).mul_left_inj, mul_assoc, mul_assoc, invOf_mul_self, mul_one, one_mul]
[GOAL]
α : Type u
inst✝² : Monoid α
a b : α
inst✝¹ : Invertible (a * b)
inst✝ : Invertible b
⊢ a * (b * ⅟(a * b)) = 1
[PROOFSTEP]
rw [← mul_assoc, mul_invOf_self]
[GOAL]
α : Type u
inst✝² : GroupWithZero α
a b : α
inst✝¹ : Invertible a
inst✝ : Invertible b
⊢ b / a * (a / b) = 1
[PROOFSTEP]
simp [← mul_div_assoc]
[GOAL]
α : Type u
inst✝² : GroupWithZero α
a b : α
inst✝¹ : Invertible a
inst✝ : Invertible b
⊢ a / b * (b / a) = 1
[PROOFSTEP]
simp [← mul_div_assoc]
[GOAL]
α : Type u
inst✝³ : GroupWithZero α
a b : α
inst✝² : Invertible a
inst✝¹ : Invertible b
inst✝ : Invertible (a / b)
⊢ a / b * (b / a) = 1
[PROOFSTEP]
simp [← mul_div_assoc]
[GOAL]
α : Type u
inst✝¹ : GroupWithZero α
a : α
inst✝ : Invertible a
⊢ a * a⁻¹ = 1
[PROOFSTEP]
simp
[GOAL]
α : Type u
inst✝¹ : GroupWithZero α
a : α
inst✝ : Invertible a
⊢ a⁻¹ * a = 1
[PROOFSTEP]
simp
[GOAL]
α : Type u
R : Type u_1
S : Type u_2
F : Type u_3
inst✝³ : MulOneClass R
inst✝² : MulOneClass S
inst✝¹ : MonoidHomClass F R S
f : F
r : R
inst✝ : Invertible r
⊢ ↑f ⅟r * ↑f r = 1
[PROOFSTEP]
rw [← map_mul, invOf_mul_self, map_one]
[GOAL]
α : Type u
R : Type u_1
S : Type u_2
F : Type u_3
inst✝³ : MulOneClass R
inst✝² : MulOneClass S
inst✝¹ : MonoidHomClass F R S
f : F
r : R
inst✝ : Invertible r
⊢ ↑f r * ↑f ⅟r = 1
[PROOFSTEP]
rw [← map_mul, mul_invOf_self, map_one]
[GOAL]
α : Type u
R : Type u_1
S : Type u_2
F : Type u_3
inst✝³ : MulOneClass R
inst✝² : Monoid S
inst✝¹ : MonoidHomClass F R S
f : F
r : R
inst✝ : Invertible r
ifr : Invertible (↑f r)
h : ifr = Invertible.map f r
⊢ ↑f ⅟r = ⅟(↑f r)
[PROOFSTEP]
subst h
[GOAL]
α : Type u
R : Type u_1
S : Type u_2
F : Type u_3
inst✝³ : MulOneClass R
inst✝² : Monoid S
inst✝¹ : MonoidHomClass F R S
f : F
r : R
inst✝ : Invertible r
⊢ ↑f ⅟r = ⅟(↑f r)
[PROOFSTEP]
rfl
|
module Text.PrettyPrint.Prettyprinter.Symbols
import Text.PrettyPrint.Prettyprinter.Doc
%default total
export
squote : Doc ann
squote = pretty '\''
export
dquote : Doc ann
dquote = pretty '"'
export
lparen : Doc ann
lparen = pretty '('
export
rparen : Doc ann
rparen = pretty ')'
export
langle : Doc ann
langle = pretty '<'
export
rangle : Doc ann
rangle = pretty '>'
export
lbracket : Doc ann
lbracket = pretty '['
export
rbracket : Doc ann
rbracket = pretty ']'
export
lbrace : Doc ann
lbrace = pretty '{'
export
rbrace : Doc ann
rbrace = pretty '}'
export
semi : Doc ann
semi = pretty ';'
export
colon : Doc ann
colon = pretty ':'
export
comma : Doc ann
comma = pretty ','
export
space : Doc ann
space = pretty ' '
export
dot : Doc ann
dot = pretty '.'
export
slash : Doc ann
slash = pretty '/'
export
backslash : Doc ann
backslash = pretty '\\'
export
equals : Doc ann
equals = pretty '='
export
pipe : Doc ann
pipe = pretty '|'
export
squotes : Doc ann -> Doc ann
squotes = enclose squote squote
export
dquotes : Doc ann -> Doc ann
dquotes = enclose dquote dquote
export
parens : Doc ann -> Doc ann
parens = enclose lparen rparen
export
angles : Doc ann -> Doc ann
angles = enclose langle rangle
export
brackets : Doc ann -> Doc ann
brackets = enclose lbracket rbracket
export
braces : Doc ann -> Doc ann
braces = enclose lbrace rbrace
|
[STATEMENT]
lemma infdist_nonneg: "0 \<le> infdist x A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> infdist x A
[PROOF STEP]
by (auto simp: infdist_def intro: cINF_greatest) |
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import order.hom.basic
import logic.relation
/-!
# Turning a preorder into a partial order
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file allows to make a preorder into a partial order by quotienting out the elements `a`, `b`
such that `a ≤ b` and `b ≤ a`.
`antisymmetrization` is a functor from `Preorder` to `PartialOrder`. See `Preorder_to_PartialOrder`.
## Main declarations
* `antisymm_rel`: The antisymmetrization relation. `antisymm_rel r a b` means that `a` and `b` are
related both ways by `r`.
* `antisymmetrization α r`: The quotient of `α` by `antisymm_rel r`. Even when `r` is just a
preorder, `antisymmetrization α` is a partial order.
-/
open function order_dual
variables {α β : Type*}
section relation
variables (r : α → α → Prop)
/-- The antisymmetrization relation. -/
def antisymm_rel (a b : α) : Prop := r a b ∧ r b a
lemma antisymm_rel_swap : antisymm_rel (swap r) = antisymm_rel r :=
funext $ λ _, funext $ λ _, propext and.comm
@[refl] lemma antisymm_rel_refl [is_refl α r] (a : α) : antisymm_rel r a a := ⟨refl _, refl _⟩
variables {r}
@[symm] lemma antisymm_rel.symm {a b : α} : antisymm_rel r a b → antisymm_rel r b a := and.symm
@[trans] lemma antisymm_rel.trans [is_trans α r] {a b c : α} (hab : antisymm_rel r a b)
(hbc : antisymm_rel r b c) :
antisymm_rel r a c :=
⟨trans hab.1 hbc.1, trans hbc.2 hab.2⟩
instance antisymm_rel.decidable_rel [decidable_rel r] : decidable_rel (antisymm_rel r) :=
λ _ _, and.decidable
@[simp] lemma antisymm_rel_iff_eq [is_refl α r] [is_antisymm α r] {a b : α} :
antisymm_rel r a b ↔ a = b := antisymm_iff
alias antisymm_rel_iff_eq ↔ antisymm_rel.eq _
end relation
section is_preorder
variables (α) (r : α → α → Prop) [is_preorder α r]
/-- The antisymmetrization relation as an equivalence relation. -/
@[simps] def antisymm_rel.setoid : setoid α :=
⟨antisymm_rel r, antisymm_rel_refl _, λ _ _, antisymm_rel.symm, λ _ _ _, antisymm_rel.trans⟩
/-- The partial order derived from a preorder by making pairwise comparable elements equal. This is
the quotient by `λ a b, a ≤ b ∧ b ≤ a`. -/
def antisymmetrization : Type* := quotient $ antisymm_rel.setoid α r
variables {α}
/-- Turn an element into its antisymmetrization. -/
def to_antisymmetrization : α → antisymmetrization α r := quotient.mk'
/-- Get a representative from the antisymmetrization. -/
noncomputable def of_antisymmetrization : antisymmetrization α r → α := quotient.out'
instance [inhabited α] : inhabited (antisymmetrization α r) := quotient.inhabited _
@[elab_as_eliminator]
protected lemma antisymmetrization.ind {p : antisymmetrization α r → Prop} :
(∀ a, p $ to_antisymmetrization r a) → ∀ q, p q :=
quot.ind
@[elab_as_eliminator]
protected lemma antisymmetrization.induction_on {p : antisymmetrization α r → Prop}
(a : antisymmetrization α r) (h : ∀ a, p $ to_antisymmetrization r a) : p a :=
quotient.induction_on' a h
@[simp] lemma to_antisymmetrization_of_antisymmetrization (a : antisymmetrization α r) :
to_antisymmetrization r (of_antisymmetrization r a) = a := quotient.out_eq' _
end is_preorder
section preorder
variables {α} [preorder α] [preorder β] {a b : α}
lemma antisymm_rel.image {a b : α} (h : antisymm_rel (≤) a b) {f : α → β} (hf : monotone f) :
antisymm_rel (≤) (f a) (f b) :=
⟨hf h.1, hf h.2⟩
instance : partial_order (antisymmetrization α (≤)) :=
{ le := λ a b, quotient.lift_on₂' a b (≤) $ λ (a₁ a₂ b₁ b₂ : α) h₁ h₂,
propext ⟨λ h, h₁.2.trans $ h.trans h₂.1, λ h, h₁.1.trans $ h.trans h₂.2⟩,
lt := λ a b, quotient.lift_on₂' a b (<) $ λ (a₁ a₂ b₁ b₂ : α) h₁ h₂,
propext ⟨λ h, h₁.2.trans_lt $ h.trans_le h₂.1, λ h, h₁.1.trans_lt $ h.trans_le h₂.2⟩,
le_refl := λ a, quotient.induction_on' a $ le_refl,
le_trans := λ a b c, quotient.induction_on₃' a b c $ λ a b c, le_trans,
lt_iff_le_not_le := λ a b, quotient.induction_on₂' a b $ λ a b, lt_iff_le_not_le,
le_antisymm := λ a b, quotient.induction_on₂' a b $ λ a b hab hba, quotient.sound' ⟨hab, hba⟩ }
lemma antisymmetrization_fibration :
relation.fibration (<) (<) (@to_antisymmetrization α (≤) _) :=
by { rintro a ⟨b⟩ h, exact ⟨b, h, rfl⟩ }
lemma acc_antisymmetrization_iff : acc (<) (to_antisymmetrization (≤) a) ↔ acc (<) a :=
acc_lift_on₂'_iff
lemma well_founded_antisymmetrization_iff :
well_founded (@has_lt.lt (antisymmetrization α (≤)) _) ↔ well_founded (@has_lt.lt α _) :=
well_founded_lift_on₂'_iff
instance [well_founded_lt α] : well_founded_lt (antisymmetrization α (≤)) :=
⟨well_founded_antisymmetrization_iff.2 is_well_founded.wf⟩
instance [@decidable_rel α (≤)] [@decidable_rel α (<)] [is_total α (≤)] :
linear_order (antisymmetrization α (≤)) :=
{ le_total := λ a b, quotient.induction_on₂' a b $ total_of (≤),
decidable_eq := @quotient.decidable_eq _ (antisymm_rel.setoid _ (≤)) antisymm_rel.decidable_rel,
decidable_le := λ _ _, quotient.lift_on₂'.decidable _ _ _ _,
decidable_lt := λ _ _, quotient.lift_on₂'.decidable _ _ _ _,
..antisymmetrization.partial_order }
@[simp] lemma to_antisymmetrization_le_to_antisymmetrization_iff :
to_antisymmetrization (≤) a ≤ to_antisymmetrization (≤) b ↔ a ≤ b := iff.rfl
@[simp] lemma to_antisymmetrization_lt_to_antisymmetrization_iff :
to_antisymmetrization (≤) a < to_antisymmetrization (≤) b ↔ a < b := iff.rfl
@[simp] lemma of_antisymmetrization_le_of_antisymmetrization_iff {a b : antisymmetrization α (≤)} :
of_antisymmetrization (≤) a ≤ of_antisymmetrization (≤) b ↔ a ≤ b :=
rel_embedding.map_rel_iff (quotient.out'_rel_embedding _)
@[simp] lemma of_antisymmetrization_lt_of_antisymmetrization_iff {a b : antisymmetrization α (≤)} :
of_antisymmetrization (≤) a < of_antisymmetrization (≤) b ↔ a < b :=
(quotient.out'_rel_embedding _).map_rel_iff
@[mono] lemma to_antisymmetrization_mono : monotone (@to_antisymmetrization α (≤) _) := λ a b, id
/-- `to_antisymmetrization` as an order homomorphism. -/
@[simps] def order_hom.to_antisymmetrization : α →o antisymmetrization α (≤) :=
⟨to_antisymmetrization (≤), λ a b, id⟩
private lemma lift_fun_antisymm_rel (f : α →o β) :
((antisymm_rel.setoid α (≤)).r ⇒ (antisymm_rel.setoid β (≤)).r) f f :=
λ a b h, ⟨f.mono h.1, f.mono h.2⟩
/-- Turns an order homomorphism from `α` to `β` into one from `antisymmetrization α` to
`antisymmetrization β`. `antisymmetrization` is actually a functor. See `Preorder_to_PartialOrder`.
-/
protected def order_hom.antisymmetrization (f : α →o β) :
antisymmetrization α (≤) →o antisymmetrization β (≤) :=
⟨quotient.map' f $ lift_fun_antisymm_rel f, λ a b, quotient.induction_on₂' a b $ f.mono⟩
@[simp] lemma order_hom.coe_antisymmetrization (f : α →o β) :
⇑f.antisymmetrization = quotient.map' f (lift_fun_antisymm_rel f) := rfl
@[simp] lemma order_hom.antisymmetrization_apply (f : α →o β) (a : antisymmetrization α (≤)) :
f.antisymmetrization a = quotient.map' f (lift_fun_antisymm_rel f) a := rfl
@[simp] lemma order_hom.antisymmetrization_apply_mk (f : α →o β) (a : α) :
f.antisymmetrization (to_antisymmetrization _ a) = (to_antisymmetrization _ (f a)) :=
quotient.map'_mk' f (lift_fun_antisymm_rel f) _
variables (α)
/-- `of_antisymmetrization` as an order embedding. -/
@[simps] noncomputable def order_embedding.of_antisymmetrization : antisymmetrization α (≤) ↪o α :=
{ to_fun := of_antisymmetrization _,
..quotient.out'_rel_embedding _ }
/-- `antisymmetrization` and `order_dual` commute. -/
def order_iso.dual_antisymmetrization :
(antisymmetrization α (≤))ᵒᵈ ≃o antisymmetrization αᵒᵈ (≤) :=
{ to_fun := quotient.map' id $ λ _ _, and.symm,
inv_fun := quotient.map' id $ λ _ _, and.symm,
left_inv := λ a, quotient.induction_on' a $ λ a, by simp_rw [quotient.map'_mk', id],
right_inv := λ a, quotient.induction_on' a $ λ a, by simp_rw [quotient.map'_mk', id],
map_rel_iff' := λ a b, quotient.induction_on₂' a b $ λ a b, iff.rfl }
@[simp] lemma order_iso.dual_antisymmetrization_apply (a : α) :
order_iso.dual_antisymmetrization _ (to_dual $ to_antisymmetrization _ a) =
to_antisymmetrization _ (to_dual a) := rfl
@[simp] lemma order_iso.dual_antisymmetrization_symm_apply (a : α) :
(order_iso.dual_antisymmetrization _).symm (to_antisymmetrization _ $ to_dual a) =
to_dual (to_antisymmetrization _ a) := rfl
end preorder
|
# Taylor problem 5.32
last revised: 12-Jan-2019 by Dick Furnstahl [[email protected]]
**Replace ### by appropriate expressions.**
The equation for an underdamped oscillator, such as a mass on the end of a spring, takes the form
$\begin{align}
x(t) = e^{-\beta t} [B_1 \cos(\omega_1 t) + B_2 \sin(\omega_1 t)]
\end{align}$
where
$\begin{align}
\omega_1 = \sqrt{\omega_0^2 - \beta^2}
\end{align}$
and the mass is released from rest at position $x_0$ at $t=0$.
**Goal: plot $x(t)$ for $0 \leq t \leq 20$, with $x_0 = 1$, $\omega_0=1$, and $\beta = 0.$, 0.02, 0.1, 0.3, and 1.**
```python
import numpy as np
import matplotlib.pyplot as plt
```
```python
def underdamped(t, beta, omega_0=1, x_0=1):
"""Solution x(t) for an underdamped harmonic oscillator."""
omega_1 = np.sqrt(omega_0**2 - beta**2)
B_1 = ### fill in the blank
B_2 = ### fill in the blank
return np.exp(-beta*t) \
* ( B_1 * np.cos(omega_1*t) + B_2 * np.sin(omega_1*t) )
```
```python
t_pts = np.arange(0., 20., .01)
betas = [0., 0.02, 0.1, 0.3, 0.9999]
fig = plt.figure(figsize=(10,6))
# look up "python enumerate" to find out how this works!
for i, beta in enumerate(betas):
ax = fig.add_subplot(2, 3, i+1)
ax.plot(t_pts, underdamped(t_pts, beta), color='blue')
ax.set_title(rf'$\beta = {beta:.2f}$')
ax.set_xlabel('t')
ax.set_ylabel('x(t)')
ax.set_ylim(-1.1,1.1)
ax.axhline(0., color='black', alpha=0.3) # lightened black zero line
fig.tight_layout()
### add code to print the figure
```
## Bonus: Widgetized!
```python
from ipywidgets import interact, fixed
import ipywidgets as widgets
omega_0 = 1.
def plot_beta(beta):
"""Plot function for underdamped harmonic oscillator."""
t_pts = np.arange(0., 20., .01)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(t_pts, underdamped(t_pts, beta), color='blue')
ax.set_title(rf'$\beta = {beta:.2f}$')
ax.set_xlabel('t')
ax.set_ylabel('x(t)')
ax.set_ylim(-1.1,1.1)
ax.axhline(0., color='black', alpha=0.3)
fig.tight_layout()
max_value = omega_0 - 0.0001
interact(plot_beta,
beta=widgets.FloatSlider(min=0., max=max_value, step=0.01,
value=0., readout_format='.2f',
continuous_update=False));
```
Now let's allow for complex numbers! This will enable us to take $\beta > \omega_0$.
```python
# numpy.lib.scimath version of sqrt handles complex numbers.
# numpy exp, cos, and sin already can.
import numpy.lib.scimath as smath
def all_beta(t, beta, omega_0=1, x_0=1):
"""Solution x(t) for damped harmonic oscillator, allowing for overdamped
as well as underdamped solution.
"""
omega_1 = smath.sqrt(omega_0**2 - beta**2)
return np.real( x_0 * np.exp(-beta*t) \
* (np.cos(omega_1*t) + (beta/omega_1)*np.sin(omega_1*t)) )
```
```python
from ipywidgets import interact, fixed
import ipywidgets as widgets
omega_0 = 1.
def plot_all_beta(beta):
"""Plot of x(t) for damped harmonic oscillator, allowing for overdamped
as well as underdamped cases."""
t_pts = np.arange(0., 20., .01)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(t_pts, all_beta(t_pts, beta), color='blue')
ax.set_title(rf'$\beta = {beta:.2f}$')
ax.set_xlabel('t')
ax.set_ylabel('x(t)')
ax.set_ylim(-1.1,1.1)
ax.axhline(0., color='black', alpha=0.3)
fig.tight_layout()
interact(plot_all_beta,
beta=widgets.FloatSlider(min=0., max=2, step=0.01,
value=0., readout_format='.2f',
continuous_update=False));
```
|
\chapter{Proof of xyz}
\label{appendix}
\thispagestyle{myheadings}
This is the appendix. |
module ParallelKMeans
using StatsBase
import MLJModelInterface
import Base.Threads: @spawn
import Distances
const MMI = MLJModelInterface
include("seeding.jl")
include("kmeans.jl")
include("lloyd.jl")
include("hamerly.jl")
include("elkan.jl")
include("mlj_interface.jl")
export kmeans
export Lloyd, Hamerly, Elkan
end # module
|
function V_corr = make_vcorr(DD,pv,nb,nl,f,Zb)
%MAKE_VCORR Voltage Correction used in distribution power flow
%
% V_corr = make_vcorr(DD,pv,nb,nl,f,Zb)
%
% Calculates voltage corrections with current generators placed at PV
% buses. Their currents are calculated with the voltage difference at PV
% buses break points and loop impedances. The slack bus voltage is set to
% zero. Details can be seen in
% D. Rajicic, R. Ackovski and R. Taleski, "Voltage correction power flow,"
% IEEE Transactions on Power Delivery, vol. 9, no. 2, pp. 1056-1062, Apr 1994.
% https://doi.org/10.1109/61.296308
%
% See also RADIAL_PF.
V_corr = zeros(nb,1);
I = zeros(nb,1);
I(pv) = DD;
% backward sweep
for k = nl:-1:2
i = f(k);
I(i) = I(i) + I(k);
end
% forward sweep
for k = 2:nl
i = f(k);
V_corr(k) = V_corr(i) - Zb(k) * I(k);
end
|
If $f$ is continuous on the closed interval $[a,b]$, then $f$ attains its maximum and minimum on $[a,b]$. |
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: lda_exc *)
(* prefix:
lda_xc_ksdt_params *params;
assert(p->params != NULL);
params = (lda_xc_ksdt_params * )(p->params);
*)
g := [2/3, -0.0139261, 0.183208]:
l := [1.064009, 0.572565]:
alpha := (t, rs) ->
2 - (g[1] + g[2]*rs)/(1 + g[3]*rs)*exp(-t*(l[1] + l[2]*t*sqrt(rs))):
phi := (malpha, z) -> (opz_pow_n(z,malpha) + opz_pow_n(-z,malpha) - 2)/(2^malpha - 2):
lambda := (4/(9*Pi))^(1/3):
a0 := 1/(Pi*lambda):
a := [0.750, 3.043630, -0.0922700, 1.703500, 8.310510, 5.11050]:
aa := t -> a0*tanh(1/t)*(a[1] + a[2]*t^2 + a[3]*t^3 + a[4]*t^4)/(1 + a[5]*t^2 + a[6]*t^4):
bb := (b, t) -> tanh(1/sqrt(t))*(b[1] + b[2]*t^2 + b[3]*t^4)/(1 + b[4]*t^2 + b[5]*t^4):
dd := (d, t) -> bb(d, t):
ee := (e, t) -> tanh(1/t)*(e[1] + e[2]*t^2 + e[3]*t^4)/(1 + e[4]*t^2 + e[5]*t^4):
cc := (c, e, t) -> (c[1] + c[2]*exp(-c[3]/t))*ee(e, t):
fxc := (omega, b, c, d, e, rs, t) ->
-(omega*aa(t) + bb(b, t)*sqrt(rs) + cc(c, e, t)*rs)/(rs*(1 + dd(d, t)*sqrt(rs) + ee(e, t)*rs)):
# (T/T_F)*opz_pow_n(z,2/3)
mtt := (rs, z) ->
2*(4/(9*Pi))^(2/3)*params_a_T*rs^2*(1 + params_a_thetaParam*z)^(2/3):
f := (rs, z) ->
+ fxc(1,
params_a_b_0_, params_a_c_0_, params_a_d_0_, params_a_e_0_,
rs, mtt(rs, z))*(1 - phi(alpha(mtt(rs, z), rs), z))
+ fxc(2^(1/3),
params_a_b_1_, params_a_c_1_, params_a_d_1_, params_a_e_1_,
rs, mtt(rs, z)/2^(2/3))*phi(alpha(mtt(rs, z), rs), z):
|
SUBROUTINE NDFDINP ( gdfile, gfunc, gdatim, glevel, gvcord,
+ gbfile, center, wmohdr, iret )
C************************************************************************
C* NDFDINP *
C* *
C* This subroutine gets the input parameters for GD2NDFD. *
C* *
C* NDFDINP ( GDFILE, GFUNC, GDATIM, GLEVEL, GVCORD, GBFILE, CENTER, *
C* WMOHDR, IRET ) *
C* *
C** *
C* Log: *
C* T. Piper/SAIC 3/03 Created from GDGUIN.F *
C* T. Piper/SAIC 07/04 Changed VERCEN to CENTER *
C************************************************************************
CHARACTER*(*) gdfile, gfunc, gdatim, glevel, gvcord,
+ gbfile, center, wmohdr
C*
INTEGER ier (8)
C------------------------------------------------------------------------
CALL IP_STR ( 'GDFILE', gdfile, ier(1) )
CALL IP_STR ( 'GFUNC', gfunc, ier(2) )
CALL IP_STR ( 'GDATTIM', gdatim, ier(3) )
CALL IP_STR ( 'GLEVEL', glevel, ier(4) )
CALL IP_STR ( 'GVCORD', gvcord, ier(5) )
CALL IP_STR ( 'GBFILE', gbfile, ier(6) )
CALL IP_STR ( 'CENTER', center, ier(7) )
CALL IP_STR ( 'WMOHDR', wmohdr, ier(8) )
iret = 0
DO i = 1, 8
iret = iret + ier (i)
END DO
IF ( iret .ne. 0 ) iret = -2
C*
RETURN
END
|
/*
Copyright (C) 2016 Quaternion Risk Management Ltd
All rights reserved.
This file is part of ORE, a free-software/open-source library
for transparent pricing and risk analysis - http://opensourcerisk.org
ORE is free software: you can redistribute it and/or modify it
under the terms of the Modified BSD License. You should have received a
copy of the license along with this program.
The license is also available online at <http://opensourcerisk.org>
This program is distributed on the basis that it will form a useful
contribution to risk analytics and model standardisation, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the license for more details.
*/
/*! \file test/logquote.hpp
\brief LogQuote test
*/
#ifndef quantext_test_logquote_hpp
#define quantext_test_logquote_hpp
#include <boost/test/unit_test.hpp>
namespace testsuite {
//! LogQuotes tests
class LogQuoteTest {
public:
/*! Tests the LogQuote class by Comparing LogQuote values with the logs of Quote values. */
static void testLogQuote();
static boost::unit_test_framework::test_suite* suite();
};
} // namespace testsuite
#endif
|
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#pragma once
#include "BaseWorker.h"
#include "MageSlam.h"
#include <arcana/scheduling/state_machine.h>
#include <gsl/gsl>
#include <memory>
namespace mage
{
struct MageContext;
struct MageSlamSettings;
struct FrameData;
class Fuser;
class Runtime
{
public:
Runtime(const MageSlamSettings& settings, MageContext& context, Fuser& fuser, mira::state_machine_driver& driver);
~Runtime();
void Run(gsl::span<const MAGESlam::CameraConfiguration> cameras);
void TrackMono(std::shared_ptr<FrameData> frame);
void TrackStereo(std::shared_ptr<FrameData> one, std::shared_ptr<FrameData> two);
void AddSample(const mage::SensorSample& sample);
private:
struct Impl;
const std::unique_ptr<Impl> m_impl;
};
}
|
```python
from sympy import *
import numpy as np
```
```python
def m_to_in(arg):
return arg*39.3701
def in_to_m(arg):
return arg/39.3701
```
### Inputs:
```python
P_tanks = 1.379e+6 # Pressure in tanks, Pascals (200 PSI)
# Pressure_Tanks = 101325*2; # Pascals (2ATM)
D_tanks = in_to_m(10) # Diameter of tanks, meters (12 inches)
T_cryo = 90.15 # Kevlin *CHANGE ME* ACCORDING TO WHICH CRYOGRENIC FUEL YOU WANT TO EXAMINE
T3 = 270 # Kelvin * CHANGE ME* ACCORDING TO VEHICLE SIZING
```
### Constants:
```python
simgay_al = 324e+6 # Tensile_Strength_Yield_Al_6061T, Pascals (4700 PSI) @ 77.15 K -196 Celsius
# Tensile Chosen because structure will be in tension.
# http://www.matweb.com/search/datasheet_print.aspx?matguid=1b8c06d0ca7c456694c7777d9e10be5b
K_CFRP = 7.0 # CFRP Thermal Conductivity, Watts/Meter Kelvin
K_PU = 0.025 # Polyurethane_Thermal_Conductivity, Watts/Meter Kelvin
T_ambient = 299.15 # Kelvin
H = 35 # Convective Heat Transfer Coefficient, Watts/SQR Meter Kelvin
FS = 1.5 # Safety Factor
```
### Calculations:
```python
R1 = D_tanks /2
t = Symbol('t')
R = Symbol('R')
t_al = solve((P_tanks*D_tanks)/(4*t) - simgay_al, t) # thickness of aluminum, meters
t_al = float(t_al[0]) # convert to floating point number
t_al = 0.00635
R2 = R1 + 1.5 * t_al # Meters
T2 = T_cryo # Kelvin Assumption: WORST CASE
L = 1.0
R_soln = solve(2*np.pi*R*L*H*(T_ambient-T3) - ((2*pi*L)*K_PU*(T3-T2)/log(R/R2)), R)
print('Thickness Aluminum:', m_to_in(t_al), 'in')
print('Radius3:', m_to_in(R_soln[0]), 'in')
print('Thickness of Polyurethane:', m_to_in(R_soln[0]-R2), 'in')
```
Thickness Aluminum: 0.250000135 in
Radius3: 5.54581854038781 in
Thickness of Polyurethane: 0.170818337887813 in
```python
```
```python
```
|
theory Crypto_Scheme
imports Kyber_spec
Compress
Abs_Qr
begin
section \<open>$(1-\delta)$-Correctness Proof of the Kyber Crypto Scheme\<close>
context kyber_spec
begin
text \<open>In the following the key generation, encryption and decryption algorithms
of Kyber are stated. Here, the variables have the meaning:
\begin{itemize}
\item $A$: matrix, part of Alices public key
\item $s$: vector, Alices secret key
\item $t$: is the key generated by Alice qrom $A$ and $s$ in \<open>key_gen\<close>
\item $r$: Bobs "secret" key, randomly picked vector
\item $m$: message bits, $m\in \{0,1\}^{256}$
\item $(u,v)$: encrypted message
\item $dt$, $du$, $dv$: the compression parameters for $t$, $u$ and $v$ respectively.
Notice that \<open>0 < d < \<lceil>log_2 q\<rceil>\<close>. The $d$ values are public knowledge.
\item $e$, $e1$ and $e2$: error parameters to obscure the message.
We need to make certain that an eavesdropper cannot distinguish
the encrypted message qrom uniformly random input.
Notice that $e$ and $e1$ are vectors while $e2$ is a mere element in \<open>\<int>_q[X]/(X^n+1).\<close>
\end{itemize}
\<close>
definition key_gen ::
"nat \<Rightarrow> (('a qr, 'k) vec, 'k) vec \<Rightarrow> ('a qr, 'k) vec \<Rightarrow>
('a qr, 'k) vec \<Rightarrow> ('a qr, 'k) vec" where
"key_gen dt A s e = compress_vec dt (A *v s + e)"
definition encrypt ::
"('a qr, 'k) vec \<Rightarrow> (('a qr, 'k) vec, 'k) vec \<Rightarrow>
('a qr, 'k) vec \<Rightarrow> ('a qr, 'k) vec \<Rightarrow> ('a qr) \<Rightarrow>
nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> 'a qr \<Rightarrow>
(('a qr, 'k) vec) * ('a qr)" where
"encrypt t A r e1 e2 dt du dv m =
(compress_vec du ((transpose A) *v r + e1),
compress_poly dv (scalar_product (decompress_vec dt t) r +
e2 + to_module (round((real_of_int q)/2)) * m)) "
definition decrypt ::
"('a qr, 'k) vec \<Rightarrow> ('a qr) \<Rightarrow> ('a qr, 'k) vec \<Rightarrow>
nat \<Rightarrow> nat \<Rightarrow> 'a qr" where
"decrypt u v s du dv = compress_poly 1 ((decompress_poly dv v) -
scalar_product s (decompress_vec du u))"
text \<open>Lifting a function to the quotient ring\<close>
fun f_int_to_poly :: "(int \<Rightarrow> int) \<Rightarrow> ('a qr) \<Rightarrow> ('a qr)" where
"f_int_to_poly f =
to_qr \<circ>
Poly \<circ>
(map of_int_mod_ring) \<circ>
(map f) \<circ>
(map to_int_mod_ring) \<circ>
coeffs \<circ>
of_qr"
text \<open>Error of compression and decompression.\<close>
definition compress_error_poly ::
"nat \<Rightarrow> 'a qr \<Rightarrow> 'a qr" where
"compress_error_poly d y =
decompress_poly d (compress_poly d y) - y"
definition compress_error_vec ::
"nat \<Rightarrow> ('a qr, 'k) vec \<Rightarrow> ('a qr, 'k) vec" where
"compress_error_vec d y =
decompress_vec d (compress_vec d y) - y"
text \<open>Lemmas for scalar product\<close>
lemma scalar_product_linear_left:
"scalar_product (a+b) c =
scalar_product a c + scalar_product b (c :: ('a qr, 'k) vec)"
unfolding scalar_product_def
by auto (metis (no_types, lifting) distrib_right sum.cong sum.distrib)
lemma scalar_product_linear_right:
"scalar_product a (b+c) =
scalar_product a b + scalar_product a (c :: ('a qr, 'k) vec)"
unfolding scalar_product_def
by auto (metis (no_types, lifting) distrib_left sum.cong sum.distrib)
lemma scalar_product_assoc:
"scalar_product (A *v s) (r :: ('a qr, 'k) vec ) =
scalar_product s (r v* A)"
unfolding scalar_product_def matrix_vector_mult_def
vector_matrix_mult_def
proof auto
have "(\<Sum>i\<in>UNIV. (\<Sum>j\<in>UNIV. (vec_nth (vec_nth A i) j) *
(vec_nth s j)) * (vec_nth r i)) =
(\<Sum>i\<in>UNIV. (\<Sum>j\<in>UNIV. (vec_nth (vec_nth A i) j) *
(vec_nth s j) * (vec_nth r i)))"
by (simp add: sum_distrib_right)
also have "\<dots> = (\<Sum>j\<in>UNIV. (\<Sum>i\<in>UNIV. (vec_nth (vec_nth A i) j) *
(vec_nth s j) * (vec_nth r i)))"
using sum.swap .
also have "\<dots> = (\<Sum>j\<in>UNIV. (\<Sum>i\<in>UNIV. (vec_nth s j) *
(vec_nth (vec_nth A i) j) * (vec_nth r i)))"
by (metis (no_types, lifting) mult_commute_abs sum.cong)
also have "\<dots> = (\<Sum>j\<in>UNIV. (vec_nth s j) *
(\<Sum>i\<in>UNIV. (vec_nth (vec_nth A i) j) * (vec_nth r i)))"
by (metis (no_types, lifting) mult.assoc sum.cong sum_distrib_left)
finally show "(\<Sum>i\<in>UNIV. (\<Sum>j\<in>UNIV. (vec_nth (vec_nth A i) j) *
(vec_nth s j)) * (vec_nth r i)) = (\<Sum>j\<in>UNIV. (vec_nth s j) *
(\<Sum>i\<in>UNIV. (vec_nth (vec_nth A i) j) * (vec_nth r i)))"
by blast
qed
text \<open>Lemma about coeff Poly\<close>
lemma coeffs_in_coeff:
assumes "\<forall>i. poly.coeff x i \<in> A"
shows "set (coeffs x) \<subseteq> A"
by (simp add: assms coeffs_def image_subsetI)
lemma set_coeff_Poly: "set ((coeffs \<circ> Poly) xs) \<subseteq> set xs"
proof -
have "x \<in> set (strip_while ((=) 0) xs) \<Longrightarrow> x \<in> set xs"
for x
by (metis append.assoc append_Cons in_set_conv_decomp
split_strip_while_append)
then show ?thesis by auto
qed
text \<open>We now want to show the deterministic correctness of the algorithm.
That means, after choosing the variables correctly, generating the public key, encrypting
and decrypting, we get back the original message.\<close>
lemma kyber_correct:
fixes A s r e e1 e2 dt du dv ct cu cv t u v
assumes
t_def: "t = key_gen dt A s e"
and u_v_def: "(u,v) = encrypt t A r e1 e2 dt du dv m"
and ct_def: "ct = compress_error_vec dt (A *v s + e)"
and cu_def: "cu = compress_error_vec du
((transpose A) *v r + e1)"
and cv_def: "cv = compress_error_poly dv
(scalar_product (decompress_vec dt t) r + e2 +
to_module (round((real_of_int q)/2)) * m)"
and delta: "abs_infty_poly (scalar_product e r + e2 + cv -
scalar_product s e1 + scalar_product ct r -
scalar_product s cu) < round (real_of_int q / 4)"
and m01: "set ((coeffs \<circ> of_qr) m) \<subseteq> {0,1}"
shows "decrypt u v s du dv = m"
proof -
text \<open>First, show that the calculations are performed correctly.\<close>
have t_correct: "decompress_vec dt t = A *v s + e + ct "
using t_def ct_def unfolding compress_error_vec_def
key_gen_def by simp
have u_correct: "decompress_vec du u =
(transpose A) *v r + e1 + cu"
using u_v_def cu_def unfolding encrypt_def
compress_error_vec_def by simp
have v_correct: "decompress_poly dv v =
scalar_product (decompress_vec dt t) r + e2 +
to_module (round((real_of_int q)/2)) * m + cv"
using u_v_def cv_def unfolding encrypt_def
compress_error_poly_def by simp
have v_correct': "decompress_poly dv v =
scalar_product (A *v s + e) r + e2 +
to_module (round((real_of_int q)/2)) * m + cv +
scalar_product ct r"
using t_correct v_correct
by (auto simp add: scalar_product_linear_left)
let ?t = "decompress_vec dt t"
let ?u = "decompress_vec du u"
let ?v = "decompress_poly dv v"
text \<open>Define w as the error term of the message encoding.
Have $\|w\|_{\infty ,q} < \lceil q/4 \rfloor$\<close>
define w where "w = scalar_product e r + e2 + cv -
scalar_product s e1 + scalar_product ct r -
scalar_product s cu"
have w_length: "abs_infty_poly w < round (real_of_int q / 4)"
unfolding w_def using delta by auto
moreover have "abs_infty_poly w = abs_infty_poly (-w)"
unfolding abs_infty_poly_def
using neg_mod_plus_minus[OF q_odd q_gt_zero]
using abs_infty_q_def abs_infty_q_minus by auto
ultimately have minus_w_length:
"abs_infty_poly (-w) < round (real_of_int q / 4)"
by auto
have vsu: "?v - scalar_product s ?u =
w + to_module (round((real_of_int q)/2)) * m"
unfolding w_def by (auto simp add: u_correct v_correct'
scalar_product_linear_left scalar_product_linear_right
scalar_product_assoc)
text \<open>Set m' as the actual result of the decryption.
It remains to show that $m' = m$.\<close>
define m' where "m' = decrypt u v s du dv"
have coeffs_m': "\<forall>i. poly.coeff (of_qr m') i \<in> {0,1}"
unfolding m'_def decrypt_def using compress_poly_1 by auto
text \<open>Show $\| v - s^Tu - \lceil q/2 \rfloor m' \|_{\infty, q}
\leq \lceil q/4 \rfloor$\<close>
have "abs_infty_poly (?v - scalar_product s ?u -
to_module (round((real_of_int q)/2)) * m')
= abs_infty_poly (?v - scalar_product s ?u -
decompress_poly 1 (compress_poly 1 (?v - scalar_product s ?u)))"
by (auto simp flip: decompress_poly_1[of m', OF coeffs_m'])
(simp add:m'_def decrypt_def)
also have "\<dots> \<le> round (real_of_int q / 4)"
using decompress_compress_poly[of 1 "?v - scalar_product s ?u"]
q_gt_two by fastforce
finally have "abs_infty_poly (?v - scalar_product s ?u -
to_module (round((real_of_int q)/2)) * m') \<le>
round (real_of_int q / 4)"
by auto
text \<open>Show $\| \lceil q/2 \rfloor (m-m')) \|_{\infty, q} <
2 \lceil q/4 \rfloor $\<close>
then have "abs_infty_poly (w + to_module
(round((real_of_int q)/2)) * m - to_module
(round((real_of_int q)/2)) * m') \<le> round (real_of_int q / 4)"
using vsu by auto
then have w_mm': "abs_infty_poly (w +
to_module (round((real_of_int q)/2)) * (m - m'))
\<le> round (real_of_int q / 4)"
by (smt (verit) add_uminus_conv_diff is_num_normalize(1)
right_diff_distrib')
have "abs_infty_poly (to_module
(round((real_of_int q)/2)) * (m - m')) =
abs_infty_poly (w + to_module
(round((real_of_int q)/2)) * (m - m') - w)"
by auto
also have "\<dots> \<le> abs_infty_poly
(w + to_module (round((real_of_int q)/2)) * (m - m'))
+ abs_infty_poly (- w)"
using abs_infty_poly_triangle_ineq[of
"w+to_module (round((real_of_int q)/2)) * (m - m')" "-w"]
by auto
also have "\<dots> < 2 * round (real_of_int q / 4)"
using w_mm' minus_w_length by auto
finally have error_lt: "abs_infty_poly (to_module (round((real_of_int q)/2)) * (m - m')) <
2 * round (real_of_int q / 4)"
by auto
text \<open>Finally show that $m-m'$ is small enough, ie that it is
an integer smaller than one.
Here, we need that $q \cong 1\mod 4$.\<close>
have coeffs_m':"set ((coeffs \<circ> of_qr) m') \<subseteq> {0,1}"
proof -
have "compress 1 a \<in> {0,1}" for a
unfolding compress_def by auto
then have "poly.coeff (of_qr (compress_poly 1 a)) i \<in> {0,1}"
for a i
using compress_poly_1 by presburger
then have "set (coeffs (of_qr (compress_poly 1 a))) \<subseteq> {0,1}"
for a
using coeffs_in_coeff[of "of_qr (compress_poly 1 a)" "{0,1}"]
by simp
then show ?thesis unfolding m'_def decrypt_def by simp
qed
have coeff_0pm1: "set ((coeffs \<circ> of_qr) (m-m')) \<subseteq>
{of_int_mod_ring (-1),0,1}"
proof -
have "poly.coeff (of_qr m) i \<in> {0,1}"
for i using m01 coeff_in_coeffs
by (metis comp_def insertCI le_degree subset_iff
zero_poly.rep_eq)
moreover have "poly.coeff (of_qr m') i \<in> {0,1}" for i
using coeffs_m' coeff_in_coeffs
by (metis comp_def insertCI le_degree subset_iff zero_poly.rep_eq)
ultimately have "poly.coeff (of_qr m - of_qr m') i \<in> {of_int_mod_ring (- 1), 0, 1}" for i
by (metis (no_types, lifting) coeff_diff diff_zero
eq_iff_diff_eq_0 insert_iff of_int_hom.hom_one of_int_minus
of_int_of_int_mod_ring singleton_iff verit_minus_simplify(3))
then have "set (coeffs (of_qr m - of_qr m')) \<subseteq> {of_int_mod_ring (- 1), 0, 1}"
by (simp add: coeffs_in_coeff)
then show ?thesis using m01 of_qr_diff[of m m'] by simp
qed
have "set ((coeffs \<circ> of_qr) (m-m')) \<subseteq> {0}"
proof (rule ccontr)
assume "\<not>set ((coeffs \<circ> of_qr) (m-m')) \<subseteq> {0}"
then have "\<exists>i. poly.coeff (of_qr (m-m')) i \<in>
{of_int_mod_ring (-1),1}"
using coeff_0pm1
by (smt (z3) coeff_in_coeffs comp_apply insert_iff
leading_coeff_0_iff order_refl
set_coeffs_subset_singleton_0_iff subsetD)
then have error_ge: "abs_infty_poly (to_module
(round((real_of_int q)/2)) * (m-m')) \<ge>
2 * round (real_of_int q / 4)"
using abs_infty_poly_ineq_pm_1 by simp
show False using error_lt error_ge by simp
qed
then show ?thesis by (simp flip: m'_def) (metis to_qr_of_qr)
qed
end
end
|
/* spprop.c
*
* Copyright (C) 2014 Patrick Alken
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <config.h>
#include <stdlib.h>
#include <gsl/gsl_spmatrix.h>
#include <gsl/gsl_errno.h>
/*
gsl_spmatrix_equal()
Return 1 if a = b, 0 otherwise
*/
int
gsl_spmatrix_equal(const gsl_spmatrix *a, const gsl_spmatrix *b)
{
const size_t M = a->size1;
const size_t N = a->size2;
if (b->size1 != M || b->size2 != N)
{
GSL_ERROR_VAL("matrices must have same dimensions", GSL_EBADLEN, 0);
}
else if (a->sptype != b->sptype)
{
GSL_ERROR_VAL("trying to compare different sparse matrix types", GSL_EINVAL, 0);
}
else
{
const size_t nz = a->nz;
size_t n;
if (nz != b->nz)
return 0; /* different number of non-zero elements */
if (GSL_SPMATRIX_ISTRIPLET(a))
{
/*
* triplet formats could be out of order but identical, so use
* gsl_spmatrix_get() on b for each aij
*/
for (n = 0; n < nz; ++n)
{
double bij = gsl_spmatrix_get(b, a->i[n], a->p[n]);
if (a->data[n] != bij)
return 0;
}
}
else if (GSL_SPMATRIX_ISCCS(a))
{
/*
* for CCS, both matrices should have everything
* in the same order
*/
/* check row indices and data */
for (n = 0; n < nz; ++n)
{
if ((a->i[n] != b->i[n]) || (a->data[n] != b->data[n]))
return 0;
}
/* check column pointers */
for (n = 0; n < a->size2 + 1; ++n)
{
if (a->p[n] != b->p[n])
return 0;
}
}
else if (GSL_SPMATRIX_ISCRS(a))
{
/*
* for CRS, both matrices should have everything
* in the same order
*/
/* check column indices and data */
for (n = 0; n < nz; ++n)
{
if ((a->i[n] != b->i[n]) || (a->data[n] != b->data[n]))
return 0;
}
/* check row pointers */
for (n = 0; n < a->size1 + 1; ++n)
{
if (a->p[n] != b->p[n])
return 0;
}
}
else
{
GSL_ERROR_VAL("unknown sparse matrix type", GSL_EINVAL, 0);
}
return 1;
}
} /* gsl_spmatrix_equal() */
|
theory Memory_Allocation_Model
imports Main
begin
subsection \<open>def of datetype\<close>
(*------------------------------------------------------------------------------------------------*)
datatype (set: 'a) tree = leaf: Leaf (L: 'a) |
node: Node (LL:"'a tree") (LR:"'a tree") (RL:"'a tree") (RR:"'a tree")
for map: tree_map
datatype block_state_type = FREE | ALLOC
type_synonym ID = nat
type_synonym Block = "(block_state_type \<times> ID) tree"
type_synonym poolname = "string"
record Pool = zerolevelblocks :: "Block set"
pname :: poolname
subsection \<open>def of 'a tree function\<close>
(*------------------------------------------------------------------------------------------------*)
definition compare2 :: "nat \<Rightarrow> nat \<Rightarrow> nat"
where "compare2 a b \<equiv> (if a > b then a else b)"
definition compare4 :: "nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat"
where "compare4 a b c d \<equiv> (let c1 = compare2 a b;
c2 = compare2 c1 c in compare2 c2 d)"
fun get_level' :: "'a tree \<Rightarrow> 'a \<Rightarrow> nat \<Rightarrow> nat"
where "get_level' (Leaf x) b n = (if (x = b) then n else 0)" |
"get_level' (Node n1 n2 n3 n4) b n = compare4 (get_level' n1 b (Suc n))
(get_level' n2 b (Suc n))
(get_level' n3 b (Suc n))
(get_level' n4 b (Suc n))"
definition get_level :: "'a tree \<Rightarrow> 'a \<Rightarrow> nat"
where "get_level B b \<equiv> get_level' B b 0"
lemma level_notbelong:
"b \<notin> set B \<Longrightarrow>
get_level' B b lv = 0"
proof(induct B arbitrary: lv)
case (Leaf x)
then show ?case by auto
next
case (Node B1 B2 B3 B4)
have b1: "b \<notin> set B1"
using Node.prems by auto
have b2: "b \<notin> set B2"
using Node.prems by auto
have b3: "b \<notin> set B3"
using Node.prems by auto
have b4: "b \<notin> set B4"
using Node.prems by auto
have l_node': "get_level' (Node B1 B2 B3 B4) b lv =
compare4 (get_level' B1 b (Suc lv))
(get_level' B2 b (Suc lv))
(get_level' B3 b (Suc lv))
(get_level' B4 b (Suc lv))"
using get_level'.simps(2) by auto
have l1: "get_level' B1 b (Suc lv) = 0"
using Node.hyps(1) b1 by auto
have l2: "get_level' B2 b (Suc lv) = 0"
using Node.hyps(2) b2 by auto
have l3: "get_level' B3 b (Suc lv) = 0"
using Node.hyps(3) b3 by auto
have l4: "get_level' B4 b (Suc lv) = 0"
using Node.hyps(4) b4 by auto
have l_node: "compare4 (get_level' B1 b (Suc lv))
(get_level' B2 b (Suc lv))
(get_level' B3 b (Suc lv))
(get_level' B4 b (Suc lv)) = 0"
unfolding compare4_def Let_def compare2_def l1 l2 l3 l4 by auto
show ?case using l_node' l_node by auto
qed
fun get_level_node' :: "'a tree \<Rightarrow> 'a tree \<Rightarrow> nat \<Rightarrow> nat"
where "get_level_node' (Leaf x) b n = (if leaf b \<and> (L b) = x then n else 0)" |
"get_level_node' (Node n1 n2 n3 n4) b n = (if (Node n1 n2 n3 n4) = b then n
else compare4 (get_level_node' n1 b (Suc n))
(get_level_node' n2 b (Suc n))
(get_level_node' n3 b (Suc n))
(get_level_node' n4 b (Suc n)))"
definition get_level_node :: "'a tree \<Rightarrow> 'a tree \<Rightarrow> nat"
where "get_level_node B b \<equiv> get_level_node' B b 0"
lemma level_node_notbelong:
"leaf b \<Longrightarrow>
L b \<notin> set B \<Longrightarrow>
get_level_node' B b lv = 0"
proof(induct B arbitrary: lv)
case (Leaf x)
then show ?case by auto
next
case (Node B1 B2 B3 B4)
have b1: "L b \<notin> set B1"
using Node.prems by auto
have b2: "L b \<notin> set B2"
using Node.prems by auto
have b3: "L b \<notin> set B3"
using Node.prems by auto
have b4: "L b \<notin> set B4"
using Node.prems by auto
have node_not_l: "Node B1 B2 B3 B4 \<noteq> b"
using Node.prems(1) by auto
have l_node': "get_level_node' (Node B1 B2 B3 B4) b lv =
compare4 (get_level_node' B1 b (Suc lv))
(get_level_node' B2 b (Suc lv))
(get_level_node' B3 b (Suc lv))
(get_level_node' B4 b (Suc lv))"
using get_level_node'.simps(2) node_not_l by auto
have l1: "get_level_node' B1 b (Suc lv) = 0"
using Node.hyps(1) Node.prems(1) b1 by auto
have l2: "get_level_node' B2 b (Suc lv) = 0"
using Node.hyps(2) Node.prems(1) b2 by auto
have l3: "get_level_node' B3 b (Suc lv) = 0"
using Node.hyps(3) Node.prems(1) b3 by auto
have l4: "get_level_node' B4 b (Suc lv) = 0"
using Node.hyps(4) Node.prems(1) b4 by auto
have l_node: "compare4 (get_level_node' B1 b (Suc lv))
(get_level_node' B2 b (Suc lv))
(get_level_node' B3 b (Suc lv))
(get_level_node' B4 b (Suc lv)) = 0"
unfolding compare4_def Let_def compare2_def using l1 l2 l3 l4 by auto
show ?case using l_node' l_node by auto
qed
lemma level_node_notbelong2:
"node b \<Longrightarrow>
\<not> tree.set b \<subseteq> tree.set B \<Longrightarrow>
get_level_node' B b lv = 0"
proof(induct B arbitrary: lv)
case (Leaf x)
show ?case using Leaf.prems(1) by auto
next
case (Node B1 B2 B3 B4)
have not_eq: "b \<noteq> Node B1 B2 B3 B4"
using Node.prems(2) by blast
have b1: "\<not> tree.set b \<subseteq> tree.set B1"
using Node.prems(2) dual_order.trans by auto
have b2: "\<not> tree.set b \<subseteq> tree.set B2"
using Node.prems(2) dual_order.trans by auto
have b3: "\<not> tree.set b \<subseteq> tree.set B3"
using Node.prems(2) dual_order.trans by auto
have b4: "\<not> tree.set b \<subseteq> tree.set B4"
using Node.prems(2) dual_order.trans by auto
have l1: "get_level_node' B1 b (Suc lv) = 0"
using Node.hyps(1) Node.prems(1) b1 by auto
have l2: "get_level_node' B2 b (Suc lv) = 0"
using Node.hyps(2) Node.prems(1) b2 by auto
have l3: "get_level_node' B3 b (Suc lv) = 0"
using Node.hyps(3) Node.prems(1) b3 by auto
have l4: "get_level_node' B4 b (Suc lv) = 0"
using Node.hyps(4) Node.prems(1) b4 by auto
have l_node': "get_level_node' (Node B1 B2 B3 B4) b lv =
compare4 (get_level_node' B1 b (Suc lv))
(get_level_node' B2 b (Suc lv))
(get_level_node' B3 b (Suc lv))
(get_level_node' B4 b (Suc lv))"
using get_level_node'.simps(2) not_eq by auto
have l_node: "compare4 (get_level_node' B1 b (Suc lv))
(get_level_node' B2 b (Suc lv))
(get_level_node' B3 b (Suc lv))
(get_level_node' B4 b (Suc lv)) = 0"
unfolding compare4_def Let_def compare2_def using l1 l2 l3 l4 by auto
then show ?case using l_node' l_node by auto
qed
subsection \<open>def of function_call\<close>
(*------------------------------------------------------------------------------------------------*)
definition getnewid :: "ID set \<Rightarrow> (ID \<times> ID \<times> ID \<times> ID \<times> ID set)"
where "getnewid ids \<equiv> let nid1 = SOME p1. p1 \<notin> ids;
ids1 = ids \<union> {nid1};
nid2 = SOME p2. p2 \<notin> ids1;
ids2 = ids1 \<union> {nid2};
nid3 = SOME p3. p3 \<notin> ids2;
ids3 = ids2 \<union> {nid3};
nid4 = SOME p4. p4 \<notin> ids3;
ids4 = ids3 \<union> {nid4} in
(nid1, nid2, nid3, nid4, ids4)"
lemma getnewid_inc: "ids \<subseteq> snd(snd(snd(snd(getnewid ids))))"
unfolding getnewid_def Let_def by auto
lemma newid1_in_getnewid: "fst(getnewid ids) \<in> snd(snd(snd(snd(getnewid ids))))"
unfolding getnewid_def Let_def by auto
lemma newid2_in_getnewid: "fst(snd(getnewid ids)) \<in> snd(snd(snd(snd(getnewid ids))))"
unfolding getnewid_def Let_def by auto
lemma newid3_in_getnewid: "fst(snd(snd(getnewid ids))) \<in> snd(snd(snd(snd(getnewid ids))))"
unfolding getnewid_def Let_def by auto
lemma newid4_in_getnewid: "fst(snd(snd(snd(getnewid ids)))) \<in> snd(snd(snd(snd(getnewid ids))))"
unfolding getnewid_def Let_def by auto
lemma exists_p_getnewid:
"\<exists>xa xb xc xd. getnewid ids = (xa, xb, xc, xd, ids \<union> {xa, xb, xc, xd})"
unfolding getnewid_def Let_def by auto
lemma getnewid_diffab:
"finite ids \<Longrightarrow>
newid = getnewid ids \<Longrightarrow>
xa = fst newid \<Longrightarrow>
xb = fst (snd newid) \<Longrightarrow>
xc = fst (snd (snd newid)) \<Longrightarrow>
xd = fst (snd (snd (snd newid))) \<Longrightarrow>
xa \<noteq> xb"
unfolding getnewid_def Let_def
apply auto
by (metis (mono_tags, lifting) add.left_neutral finite_nat_set_iff_bounded lessI not_add_less2 plus_nat.simps(2) someI_ex)
lemma getnewid_diffac:
"finite ids \<Longrightarrow>
newid = getnewid ids \<Longrightarrow>
xa = fst newid \<Longrightarrow>
xb = fst (snd newid) \<Longrightarrow>
xc = fst (snd (snd newid)) \<Longrightarrow>
xd = fst (snd (snd (snd newid))) \<Longrightarrow>
xa \<noteq> xc"
unfolding getnewid_def Let_def
apply auto
by (smt ex_new_if_finite finite.insertI infinite_UNIV_nat insertCI some_eq_ex someI_ex)
lemma getnewid_diffad:
"finite ids \<Longrightarrow>
newid = getnewid ids \<Longrightarrow>
xa = fst newid \<Longrightarrow>
xb = fst (snd newid) \<Longrightarrow>
xc = fst (snd (snd newid)) \<Longrightarrow>
xd = fst (snd (snd (snd newid))) \<Longrightarrow>
xa \<noteq> xd"
unfolding getnewid_def Let_def
apply auto
by (smt ex_new_if_finite finite.insertI infinite_UNIV_nat insertCI some_eq_ex someI_ex)
lemma getnewid_diffbc:
"finite ids \<Longrightarrow>
newid = getnewid ids \<Longrightarrow>
xa = fst newid \<Longrightarrow>
xb = fst (snd newid) \<Longrightarrow>
xc = fst (snd (snd newid)) \<Longrightarrow>
xd = fst (snd (snd (snd newid))) \<Longrightarrow>
xb \<noteq> xc"
unfolding getnewid_def Let_def
apply auto
by (smt ex_new_if_finite finite.insertI infinite_UNIV_nat insertCI some_eq_ex someI_ex)
lemma getnewid_diffbd:
"finite ids \<Longrightarrow>
newid = getnewid ids \<Longrightarrow>
xa = fst newid \<Longrightarrow>
xb = fst (snd newid) \<Longrightarrow>
xc = fst (snd (snd newid)) \<Longrightarrow>
xd = fst (snd (snd (snd newid))) \<Longrightarrow>
xb \<noteq> xd"
unfolding getnewid_def Let_def
apply auto
by (smt ex_new_if_finite finite.insertI infinite_UNIV_nat insertCI some_eq_ex someI_ex)
lemma getnewid_diffcd:
"finite ids \<Longrightarrow>
newid = getnewid ids \<Longrightarrow>
xa = fst newid \<Longrightarrow>
xb = fst (snd newid) \<Longrightarrow>
xc = fst (snd (snd newid)) \<Longrightarrow>
xd = fst (snd (snd (snd newid))) \<Longrightarrow>
xc \<noteq> xd"
unfolding getnewid_def Let_def
apply auto
by (smt ex_new_if_finite finite.insertI infinite_UNIV_nat insertCI some_eq_ex someI_ex)
lemma getnewid_diff1:
"finite ids \<Longrightarrow>
xa = fst (getnewid ids) \<Longrightarrow>
xb = fst (snd (getnewid ids)) \<Longrightarrow>
xc = fst (snd (snd (getnewid ids))) \<Longrightarrow>
xd = fst (snd (snd (snd (getnewid ids)))) \<Longrightarrow>
xa \<noteq> xb \<and> xa \<noteq> xc \<and> xa \<noteq> xd"
by (meson getnewid_diffab getnewid_diffac getnewid_diffad)
lemma getnewid_diff2:
"finite ids \<Longrightarrow>
xa = fst (getnewid ids) \<Longrightarrow>
xb = fst (snd (getnewid ids)) \<Longrightarrow>
xc = fst (snd (snd (getnewid ids))) \<Longrightarrow>
xd = fst (snd (snd (snd (getnewid ids)))) \<Longrightarrow>
xb \<noteq> xc \<and> xb \<noteq> xd \<and> xc \<noteq> xd"
by (meson getnewid_diffbc getnewid_diffbd getnewid_diffcd)
lemma getnewid_anot:
"finite ids \<Longrightarrow>
newid = getnewid ids \<Longrightarrow>
xa = fst newid \<Longrightarrow>
xb = fst (snd newid) \<Longrightarrow>
xc = fst (snd (snd newid)) \<Longrightarrow>
xd = fst (snd (snd (snd newid))) \<Longrightarrow>
xa \<notin> ids"
unfolding getnewid_def Let_def
apply auto
by (metis Collect_mem_eq finite_Collect_not infinite_UNIV_nat not_finite_existsD someI_ex)
lemma getnewid_bnot:
"finite ids \<Longrightarrow>
newid = getnewid ids \<Longrightarrow>
xa = fst newid \<Longrightarrow>
xb = fst (snd newid) \<Longrightarrow>
xc = fst (snd (snd newid)) \<Longrightarrow>
xd = fst (snd (snd (snd newid))) \<Longrightarrow>
xb \<notin> ids"
unfolding getnewid_def Let_def
apply auto
by (metis (mono_tags, lifting) finite_nat_set_iff_bounded lessI less_irrefl not_add_less2 plus_nat.simps(2) someI_ex)
lemma getnewid_cnot:
"finite ids \<Longrightarrow>
newid = getnewid ids \<Longrightarrow>
xa = fst newid \<Longrightarrow>
xb = fst (snd newid) \<Longrightarrow>
xc = fst (snd (snd newid)) \<Longrightarrow>
xd = fst (snd (snd (snd newid))) \<Longrightarrow>
xc \<notin> ids"
unfolding getnewid_def Let_def
apply auto
by (smt finite.insertI finite_nat_set_iff_bounded insert_compr less_irrefl mem_Collect_eq someI_ex)
lemma getnewid_dnot:
"finite ids \<Longrightarrow>
newid = getnewid ids \<Longrightarrow>
xa = fst newid \<Longrightarrow>
xb = fst (snd newid) \<Longrightarrow>
xc = fst (snd (snd newid)) \<Longrightarrow>
xd = fst (snd (snd (snd newid))) \<Longrightarrow>
xd \<notin> ids"
unfolding getnewid_def Let_def
apply auto
by (smt ball_empty empty_Collect_eq ex_new_if_finite finite.insertI infinite_UNIV_nat insert_compr mem_Collect_eq some_eq_ex)
lemma getnewid_notbelong:
"finite ids \<Longrightarrow>
xa = fst (getnewid ids) \<Longrightarrow>
xb = fst (snd (getnewid ids)) \<Longrightarrow>
xc = fst (snd (snd (getnewid ids))) \<Longrightarrow>
xd = fst (snd (snd (snd (getnewid ids)))) \<Longrightarrow>
xa \<notin> ids \<and> xb \<notin> ids \<and> xc \<notin> ids \<and> xd \<notin> ids"
by (simp add: getnewid_anot getnewid_bnot getnewid_cnot getnewid_dnot)
definition divide :: "Block \<Rightarrow> ID set \<Rightarrow> (Block \<times> ID set)"
where "divide bl ids \<equiv>
(let b = L bl;
nids = getnewid ids;
x1 = fst nids;
x2 = fst (snd nids);
x3 = fst (snd (snd nids));
x4 = fst (snd (snd (snd nids)));
newids = snd (snd (snd (snd nids))) in
(Node (Leaf (ALLOC, x1)) (Leaf (FREE, x2)) (Leaf (FREE, x3)) (Leaf (FREE, x4)), newids))"
lemma divide_diff:
"finite ids \<Longrightarrow>
fst (divide b ids) = Node (Leaf ll) (Leaf lr) (Leaf rl) (Leaf rr) \<Longrightarrow>
snd ll \<noteq> snd lr \<and> snd ll \<noteq> snd rl \<and> snd ll \<noteq> snd rr"
unfolding divide_def Let_def using getnewid_diff1 by auto
lemma divide_diff2:
"finite ids \<Longrightarrow>
fst (divide b ids) = Node (Leaf ll) (Leaf lr) (Leaf rl) (Leaf rr) \<Longrightarrow>
snd lr \<noteq> snd rl \<and> snd lr \<noteq> snd rr \<and> snd rl \<noteq> snd rr"
unfolding divide_def Let_def using getnewid_diff2 by auto
lemma divide_belong:
"fst (divide b ids) = Node (Leaf ll) (Leaf lr) (Leaf rl) (Leaf rr) \<Longrightarrow>
snd ll \<in> snd (divide b ids) \<and>
snd lr \<in> snd (divide b ids) \<and>
snd rl \<in> snd (divide b ids) \<and>
snd rr \<in> snd (divide b ids)"
unfolding divide_def Let_def
using newid1_in_getnewid newid2_in_getnewid newid3_in_getnewid newid4_in_getnewid by auto
lemma divide_notbelong:
"finite ids \<Longrightarrow>
fst (divide b ids) = Node (Leaf ll) (Leaf lr) (Leaf rl) (Leaf rr) \<Longrightarrow>
snd ll \<notin> ids \<and> snd lr \<notin> ids \<and> snd rl \<notin> ids \<and> snd rr \<notin> ids"
unfolding divide_def Let_def using getnewid_notbelong by auto
lemma divide_finite:
"finite ids \<Longrightarrow>
finite (snd (divide b ids))"
proof-
assume a0: "finite ids"
have p0: "snd (divide b ids) = snd (snd (snd (snd (getnewid ids))))"
unfolding divide_def Let_def by auto
obtain xa xb xc xd
where obtain_divide: "snd (divide b ids) = ids \<union> {xa, xb, xc, xd}"
using p0 exists_p_getnewid by (metis sndI)
have "finite (ids \<union> {xa, xb, xc, xd})" using a0 by auto
then show ?thesis using obtain_divide by auto
qed
definition getnewid2 :: "ID set \<Rightarrow> (ID \<times> ID set)"
where "getnewid2 ids \<equiv> let nid = SOME p. p \<notin> ids;
nids = ids \<union> {nid} in
(nid, nids)"
lemma getnewid2_inc: "ids \<subseteq> snd(getnewid2 ids)"
unfolding getnewid2_def Let_def by auto
lemma newid_in_getnewid2: "fst(getnewid2 ids) \<in> snd(getnewid2 ids)"
unfolding getnewid2_def Let_def by auto
lemma exists_p_getnewid2: "\<exists>p. getnewid2 ids = (p, ids \<union> {p})"
unfolding getnewid2_def by metis
lemma getnewid2_anot:
"finite ids \<Longrightarrow>
xa = fst (getnewid2 ids) \<Longrightarrow>
xa \<notin> ids"
unfolding getnewid2_def Let_def
apply auto
by (metis Collect_mem_eq finite_Collect_not infinite_UNIV_char_0 not_finite_existsD someI_ex)
definition combine :: "Block \<Rightarrow> ID set \<Rightarrow> (Block \<times> ID set)"
where "combine b ids \<equiv> (if (\<exists>a1 a2 a3 a4. b = Node (Leaf (FREE, a1)) (Leaf (FREE, a2)) (Leaf (FREE, a3)) (Leaf (FREE, a4))) then
let nids = getnewid2 ids;
newid = fst nids;
newids = snd nids in (Leaf (FREE, newid), newids)
else (b, ids))"
lemma combine_ids:
"ids \<subseteq> snd (combine b ids)"
unfolding combine_def Let_def
using getnewid2_inc by auto
lemma combine_finite:
"finite ids \<Longrightarrow>
finite (snd (combine b ids))"
unfolding combine_def Let_def apply auto
using exists_p_getnewid2 snd_conv
by (metis Un_insert_right finite_insert sup_bot.right_neutral)
definition freesets :: "Block \<Rightarrow> Block set"
where "freesets b = {l. leaf l \<and> L l \<in> set b \<and> fst (L l) = FREE}"
definition freesets_level :: "Block \<Rightarrow> nat \<Rightarrow> Block set"
where "freesets_level b lv = {l. l \<in> freesets b \<and> get_level b (L l) = lv}"
definition freesets_level_pool :: "Block set \<Rightarrow> nat \<Rightarrow> Block set"
where "freesets_level_pool bset lv = {l. \<exists>b \<in> bset. l \<in> freesets_level b lv}"
definition freesets_maxlevel :: "Block set \<Rightarrow> nat \<Rightarrow> nat"
where "freesets_maxlevel bset lv \<equiv>
THE lmax. lmax \<le> lv \<and>
freesets_level_pool bset lmax \<noteq> {} \<and>
(\<forall>l. l \<le> lv \<and> freesets_level_pool bset l \<noteq> {} \<longrightarrow> l \<le> lmax)"
definition exists_freelevel :: "Block set \<Rightarrow> nat \<Rightarrow> bool"
where "exists_freelevel bset lv \<equiv> \<exists>lv'. lv' \<le> lv \<and> freesets_level_pool bset lv' \<noteq> {}"
lemma exist_lmax_h:
"freesets_level_pool bset lv = {} \<Longrightarrow>
\<exists>lv'. lv' < lv \<and> freesets_level_pool bset lv' \<noteq> {} \<Longrightarrow>
\<exists>lmax. lmax < lv \<and>
freesets_level_pool bset lmax \<noteq> {} \<and>
(\<forall>l. l \<le> lv \<and> l > lmax \<longrightarrow> freesets_level_pool bset l = {})"
proof(induct lv)
case 0
then show ?case by auto
next
case (Suc xa)
then show ?case
by (smt Suc_leI Suc_le_lessD le_Suc_eq lessI not_less)
qed
lemma exist_lmax:
"exists_freelevel bset lv \<Longrightarrow>
\<exists>!lmax. lmax \<le> lv \<and>
freesets_level_pool bset lmax \<noteq> {} \<and>
(\<forall>l. l \<le> lv \<and> freesets_level_pool bset l \<noteq> {} \<longrightarrow> l \<le> lmax)"
proof-
assume exi_level: "exists_freelevel bset lv"
hence exi_level_def: "\<exists>lv'. lv' \<le> lv \<and> freesets_level_pool bset lv' \<noteq> {}"
unfolding exists_freelevel_def by auto
{assume a0: "freesets_level_pool bset lv \<noteq> {}"
hence "lv \<le> lv \<and>
freesets_level_pool bset lv \<noteq> {} \<and>
(\<forall>l. l \<le> lv \<and> freesets_level_pool bset l \<noteq> {} \<longrightarrow> l \<le> lv)"
using exi_level_def by auto
then have ?thesis using le_antisym by blast
}moreover
{assume a1: "freesets_level_pool bset lv = {}"
hence exi_level_less: "\<exists>lv'. lv' < lv \<and> freesets_level_pool bset lv' \<noteq> {}"
using exi_level_def le_neq_implies_less by blast
have "\<exists>lmax. lmax < lv \<and>
freesets_level_pool bset lmax \<noteq> {} \<and>
(\<forall>l. l \<le> lv \<and> l > lmax \<longrightarrow> freesets_level_pool bset l = {})"
using exist_lmax_h a1 exi_level_less by auto
then obtain lmax where exi_lmax:
"lmax < lv \<and>
freesets_level_pool bset lmax \<noteq> {} \<and>
(\<forall>l. l \<le> lv \<and> l > lmax \<longrightarrow> freesets_level_pool bset l = {})" by auto
then have "\<forall>l. l \<le> lv \<and> freesets_level_pool bset l \<noteq> {} \<longrightarrow> l \<le> lmax"
using a1 by (metis le_less_linear)
then have ?thesis using exi_lmax
by (meson le_less_Suc_eq le_simps(2) less_imp_le_nat)
}
ultimately have ?thesis by linarith
then show ?thesis by auto
qed
subsection \<open>def of sub core function\<close>
(*------------------------------------------------------------------------------------------------*)
definition set_state_type :: "Block \<Rightarrow> block_state_type \<Rightarrow> Block"
where "set_state_type bl t \<equiv> (let b = (L bl) in Leaf (t, snd b))"
definition replace :: "Block \<Rightarrow> Block \<Rightarrow> Block \<Rightarrow> Block"
where "replace B b b' \<equiv> (tree_map (\<lambda>b1. if (b1 = L b) then (L b') else b1) B)"
lemma no_replace:
"L b \<notin> set blo \<Longrightarrow>
b' = set_state_type b t \<Longrightarrow>
tree_map (\<lambda>b1. if b1 = L b then L b' else b1) blo = blo"
by (smt tree.map_cong0 tree.map_ident)
fun split :: "Block \<Rightarrow> ID set \<Rightarrow> nat \<Rightarrow> (Block \<times> ID set \<times> ID)"
where "split b ids lv = (if lv = 0 then (b, ids, snd (L b))
else
let re = divide b ids;
node = fst re;
newids = snd re;
c1 = split (LL node) newids (lv - 1) in
(Node (fst c1) (LR node) (RL node) (RR node), fst (snd c1), snd (snd c1)))"
lemma split_induct:
"lv > 0 \<Longrightarrow>
fst (divide b ids) = Node (Leaf ll) (Leaf lr) (Leaf rl) (Leaf rr) \<Longrightarrow>
newids = snd (divide b ids) \<Longrightarrow>
fst (split b ids lv) = Node (fst (split (Leaf ll) newids (lv - 1))) (Leaf lr) (Leaf rl) (Leaf rr)"
using split.simps unfolding Let_def
by (metis fst_conv less_not_refl3 tree.sel(2) tree.sel(3) tree.sel(4) tree.sel(5))
fun replace_leaf :: "Block \<Rightarrow> Block \<Rightarrow> Block \<Rightarrow> Block"
where "replace_leaf (Leaf x) y st = (if (x = (L y)) then st else (Leaf x))" |
"replace_leaf (Node n1 n2 n3 n4) y st = Node (replace_leaf n1 y st)
(replace_leaf n2 y st)
(replace_leaf n3 y st)
(replace_leaf n4 y st)"
lemma no_replace_leaf:
"(L b) \<notin> set B \<Longrightarrow>
replace_leaf B b subbtr = B"
apply(induct B)
by auto
lemma replace_leaf_belong:
"(L b) \<in> set B \<Longrightarrow>
(L l) \<in> set subbtr \<Longrightarrow>
(L l) \<in> set (replace_leaf B b subbtr)"
apply(induct B)
by auto
lemma replace_subbtr_belong:
"(L b) \<in> set B \<Longrightarrow>
tree.set subbtr \<subseteq> tree.set (replace_leaf B b subbtr)"
apply(induct B)
by auto
fun merge :: "Block \<Rightarrow> ID set \<Rightarrow> (Block \<times> ID set)"
where "merge (Leaf v) ids = ((Leaf v), ids)" |
"merge (Node ll lr rl rr) ids =
(if (\<exists>xa xb xc xd. (Node ll lr rl rr) = Node (Leaf (FREE, xa))
(Leaf (FREE, xb))
(Leaf (FREE, xc))
(Leaf (FREE, xd)))
then combine (Node ll lr rl rr) ids
else
let m1 = merge ll ids;
m2 = merge lr (snd m1);
m3 = merge rl (snd m2);
m4 = merge rr (snd m3) in
combine (Node (fst m1) (fst m2) (fst m3) (fst m4)) (snd m4))"
definition alloc1 :: "Block set \<Rightarrow> nat \<Rightarrow> ID set \<Rightarrow> (Block set \<times> ID set \<times> bool \<times> ID set)"
where "alloc1 bset lv ids \<equiv> (let blo = (SOME b. b \<in> bset \<and> freesets_level b lv \<noteq> {});
b = (SOME l. l \<in> freesets_level blo lv);
allocid = snd (L b);
newblo = replace blo b (set_state_type b ALLOC) in
((bset - {blo}) \<union> {newblo}, ids, True, {allocid}))"
definition alloc :: "Block set \<Rightarrow> nat \<Rightarrow> ID set \<Rightarrow> (Block set \<times> ID set \<times> bool \<times> ID set)"
where "alloc bset lv ids \<equiv>
if (exists_freelevel bset lv) then
let lmax = freesets_maxlevel bset lv in
if lmax = lv then
alloc1 bset lv ids
else
let blo = (SOME b. b \<in> bset \<and> freesets_level b lmax \<noteq> {});
b = (SOME l. l \<in> freesets_level blo lmax);
re = split b ids (lv - lmax);
subbtr = fst re;
newids = fst (snd re);
allocid = snd (snd re);
newbtr = replace_leaf blo b subbtr in
(((bset - {blo}) \<union> {newbtr}), newids, True, {allocid})
else (bset, ids, False, {})"
definition free :: "Block set \<Rightarrow> Block \<Rightarrow> ID set \<Rightarrow> (Block set \<times> ID set \<times> bool)"
where "free bset b ids \<equiv>
if (\<exists>btree \<in> bset. (L b) \<in> set btree) then
if fst (L b) = FREE then
(bset, ids, False)
else
let btree = (THE t. t \<in> bset \<and> (L b) \<in> set t);
freeblo = replace btree b (set_state_type b FREE);
re = merge freeblo ids;
newblo = fst re;
newids = snd re in
((bset - {btree}) \<union> {newblo}, newids, True)
else
(bset, ids, False)"
end |
\name{widthDetails.legend}
\alias{widthDetails.legend}
\title{
Grob width for packed_legends
}
\description{
Grob width for packed_legends
}
\usage{
\method{widthDetails}{legend}(x)
}
\arguments{
\item{x}{A legend object.}
}
\examples{
# There is no example
NULL
}
|
Require Export Iron.Language.SystemF2Cap.Kind.
(* Type level capabilities *)
Inductive tycap : Type :=
(* The region capability, also called a 'region handle'.
When one of these exists in the program we know there is a
corresponding region in the store. *)
| TyCapRegion : nat -> tycap.
Hint Constructors tycap.
(* Check if two capabilities are equal. *)
Fixpoint EqTyCap (tc1 : tycap) (tc2 : tycap) : Prop :=
match tc1, tc2 with
| TyCapRegion p1, TyCapRegion p2 => p1 = p2
end.
|
From Test Require Import tactic.
Section FOFProblem.
Variable Universe : Set.
Variable UniverseElement : Universe.
Variable wd_ : Universe -> Universe -> Prop.
Variable col_ : Universe -> Universe -> Universe -> Prop.
Variable col_swap1_1 : (forall A B C : Universe, (col_ A B C -> col_ B A C)).
Variable col_swap2_2 : (forall A B C : Universe, (col_ A B C -> col_ B C A)).
Variable col_triv_3 : (forall A B : Universe, col_ A B B).
Variable wd_swap_4 : (forall A B : Universe, (wd_ A B -> wd_ B A)).
Variable col_trans_5 : (forall P Q A B C : Universe, ((wd_ P Q /\ (col_ P Q A /\ (col_ P Q B /\ col_ P Q C))) -> col_ A B C)).
Theorem pipo_6 : (forall P Q A B Bprime Cprime : Universe, ((wd_ P Q /\ (wd_ P A /\ (wd_ P Bprime /\ (wd_ P Cprime /\ (wd_ B Bprime /\ (wd_ B Cprime /\ (wd_ A B /\ (wd_ A Bprime /\ (wd_ B P /\ (col_ P Q A /\ (col_ P Q Bprime /\ (col_ P Q Cprime /\ (col_ B B Bprime /\ col_ A B B))))))))))))) -> col_ P Bprime Cprime)).
Proof.
time tac.
Qed.
End FOFProblem.
|
-- Andreas, 2019-03-27
-- Do not run checkIApplyConfluence unless --cubical
-- The following verbosity options triggers a crash
-- in case checkIApplyConfluence_ runs.
-- {-# OPTIONS --cubical #-} -- Trigges the crash.
{-# OPTIONS -v tc.cover.iapply.confluence.crash:666 #-} -- Activate crashing program point.
open import Agda.Builtin.Nat
-- A harmless definition.
id : Nat → Nat
id zero = zero
id (suc x) = suc (id x)
-- Should succeed.
|
#ifndef context_hh_INCLUDED
#define context_hh_INCLUDED
#include "dynamic_selection_list.hh"
#include <boost/optional.hpp>
namespace Kakoune
{
class Editor;
class Window;
class Buffer;
class Client;
class InputHandler;
class UserInterface;
class DisplayLine;
class KeymapManager;
// A Context is used to access non singleton objects for various services
// in commands.
//
// The Context object links an Client, an Editor (which may be a Window),
// and a UserInterface. It may represent an interactive user window, or
// a hook execution or a macro replay.
class Context
{
public:
Context();
Context(InputHandler& input_handler, Buffer& buffer, SelectionList selections, String name = "");
~Context();
Context(const Context&) = delete;
Context& operator=(const Context&) = delete;
Buffer& buffer() const;
bool has_buffer() const { return m_selections; }
Window& window() const;
bool has_window() const { return (bool)m_window; }
Client& client() const;
bool has_client() const { return (bool)m_client; }
InputHandler& input_handler() const;
bool has_input_handler() const { return (bool)m_input_handler; }
UserInterface& ui() const;
bool has_ui() const { return has_client(); }
SelectionList& selections();
const SelectionList& selections() const;
std::vector<String> selections_content() const;
void change_buffer(Buffer& buffer);
void set_client(Client& client);
void set_window(Window& window);
OptionManager& options() const;
HookManager& hooks() const;
KeymapManager& keymaps() const;
void print_status(DisplayLine status) const;
void push_jump();
const DynamicSelectionList& jump_forward();
const DynamicSelectionList& jump_backward();
void forget_jumps_to_buffer(Buffer& buffer);
const String& name() const { return m_name; }
void set_name(String name) { m_name = std::move(name); }
bool is_editing() const { return m_edition_level!= 0; }
void disable_undo_handling() { m_edition_level = -1; }
private:
void begin_edition();
void end_edition();
int m_edition_level = 0;
friend struct ScopedEdition;
safe_ptr<InputHandler> m_input_handler;
safe_ptr<Window> m_window;
safe_ptr<Client> m_client;
friend class Client;
boost::optional<DynamicSelectionList> m_selections;
String m_name;
using JumpList = std::vector<DynamicSelectionList>;
JumpList m_jump_list;
JumpList::iterator m_current_jump = m_jump_list.begin();
};
struct ScopedEdition
{
ScopedEdition(Context& context)
: m_context(context)
{ m_context.begin_edition(); }
~ScopedEdition()
{ m_context.end_edition(); }
Context& context() const { return m_context; }
private:
Context& m_context;
};
}
#endif // context_hh_INCLUDED
|
module India.Wrapper
import Text.Lexer
%default total
public export
interface Wrapper (0 f : Type -> Type) where
unwrap : f a -> a
export
Wrapper TokenData where
unwrap = tok
|
State Before: n✝ b n : ℕ
n0 : 0 < n
nb : n < b
⊢ digits b n = [n] ∧ 1 < b ∧ 0 < n State After: n✝ b n : ℕ
n0 : 0 < n
nb : n < b
b2 : 1 < b
⊢ digits b n = [n] ∧ 1 < b ∧ 0 < n Tactic: have b2 : 1 < b :=
lt_iff_add_one_le.mpr (le_trans (add_le_add_right (lt_iff_add_one_le.mp n0) 1) nb) State Before: n✝ b n : ℕ
n0 : 0 < n
nb : n < b
b2 : 1 < b
⊢ digits b n = [n] ∧ 1 < b ∧ 0 < n State After: n✝ b n : ℕ
n0 : 0 < n
nb : n < b
b2 : 1 < b
⊢ digits b n = [n] Tactic: refine' ⟨_, b2, n0⟩ State Before: n✝ b n : ℕ
n0 : 0 < n
nb : n < b
b2 : 1 < b
⊢ digits b n = [n] State After: no goals Tactic: rw [Nat.digits_def' b2 n0, Nat.mod_eq_of_lt nb,
(Nat.div_eq_zero_iff ((zero_le n).trans_lt nb)).2 nb, Nat.digits_zero] |
theory Part_4 imports Main Part_2
begin
(* 2.9 *)
fun itadd :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
"itadd 0 n = n" |
"itadd (Suc m) n = itadd m (Suc n)"
theorem itadd_is_add : "itadd m n = add m n"
apply(induction m arbitrary: n)
apply(auto)
done
end |
/-
Copyright (c) 2017 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Simon Hudon
Instances for identity and composition functors
-/
import category.functor
universe variables u v w
section lemmas
open function
variables {F : Type u → Type v}
variables [applicative F] [is_lawful_applicative F]
variables {α β γ σ : Type u}
attribute [functor_norm] seq_assoc pure_seq_eq_map map_pure seq_map_assoc map_seq
lemma applicative.map_seq_map (f : α → β → γ) (g : σ → β) (x : F α) (y : F σ) :
(f <$> x) <*> (g <$> y) = (flip (∘) g ∘ f) <$> x <*> y :=
by simp [flip] with functor_norm
lemma applicative.pure_seq_eq_map' (f : α → β) :
(<*>) (pure f : F (α → β)) = (<$>) f :=
by ext; simp with functor_norm
theorem applicative.ext {F} : ∀ {A1 : applicative F} {A2 : applicative F}
[@is_lawful_applicative F A1] [@is_lawful_applicative F A2]
(H1 : ∀ {α : Type u} (x : α),
@has_pure.pure _ A1.to_has_pure _ x = @has_pure.pure _ A2.to_has_pure _ x)
(H2 : ∀ {α β : Type u} (f : F (α → β)) (x : F α),
@has_seq.seq _ A1.to_has_seq _ _ f x = @has_seq.seq _ A2.to_has_seq _ _ f x),
A1 = A2
| {to_functor := F1, seq := s1, pure := p1, seq_left := sl1, seq_right := sr1}
{to_functor := F2, seq := s2, pure := p2, seq_left := sl2, seq_right := sr2} L1 L2 H1 H2 :=
begin
have : @p1 = @p2, {funext α x, apply H1}, subst this,
have : @s1 = @s2, {funext α β f x, apply H2}, subst this,
cases L1, cases L2,
have : F1 = F2,
{ resetI, apply functor.ext, intros,
exact (L1_pure_seq_eq_map _ _).symm.trans (L2_pure_seq_eq_map _ _) },
subst this,
congr; funext α β x y,
{ exact (L1_seq_left_eq _ _).trans (L2_seq_left_eq _ _).symm },
{ exact (L1_seq_right_eq _ _).trans (L2_seq_right_eq _ _).symm }
end
end lemmas
instance : is_comm_applicative id :=
by refine { .. }; intros; refl
namespace comp
open function (hiding comp)
open functor
variables {F : Type u → Type w} {G : Type v → Type u}
variables [applicative F] [applicative G]
protected def seq {α β : Type v} : comp F G (α → β) → comp F G α → comp F G β
| (comp.mk f) (comp.mk x) := comp.mk $ (<*>) <$> f <*> x
instance : has_pure (comp F G) :=
⟨λ _ x, comp.mk $ pure $ pure x⟩
instance : has_seq (comp F G) :=
⟨λ _ _ f x, comp.seq f x⟩
@[simp] protected lemma run_pure {α : Type v} :
∀ x : α, (pure x : comp F G α).run = pure (pure x)
| _ := rfl
@[simp] protected lemma run_seq {α β : Type v} (f : comp F G (α → β)) (x : comp F G α) :
(f <*> x).run = (<*>) <$> f.run <*> x.run := rfl
instance : applicative (comp F G) :=
{ map := @comp.map F G _ _,
seq := @comp.seq F G _ _,
..comp.has_pure }
variables [is_lawful_applicative F] [is_lawful_applicative G]
variables {α β γ : Type v}
lemma map_pure (f : α → β) (x : α) : (f <$> pure x : comp F G β) = pure (f x) :=
comp.ext $ by simp
lemma seq_pure (f : comp F G (α → β)) (x : α) :
f <*> pure x = (λ g : α → β, g x) <$> f :=
comp.ext $ by simp [(∘)] with functor_norm
lemma seq_assoc (x : comp F G α) (f : comp F G (α → β)) (g : comp F G (β → γ)) :
g <*> (f <*> x) = (@function.comp α β γ <$> g) <*> f <*> x :=
comp.ext $ by simp [(∘)] with functor_norm
lemma pure_seq_eq_map (f : α → β) (x : comp F G α) :
pure f <*> x = f <$> x :=
comp.ext $ by simp [applicative.pure_seq_eq_map'] with functor_norm
instance : is_lawful_applicative (comp F G) :=
{ pure_seq_eq_map := @comp.pure_seq_eq_map F G _ _ _ _,
map_pure := @comp.map_pure F G _ _ _ _,
seq_pure := @comp.seq_pure F G _ _ _ _,
seq_assoc := @comp.seq_assoc F G _ _ _ _ }
theorem applicative_id_comp {F} [AF : applicative F] [LF : is_lawful_applicative F] :
@comp.applicative id F _ _ = AF :=
@applicative.ext F _ _ (@comp.is_lawful_applicative id F _ _ _ _) _
(λ α x, rfl) (λ α β f x, rfl)
theorem applicative_comp_id {F} [AF : applicative F] [LF : is_lawful_applicative F] :
@comp.applicative F id _ _ = AF :=
@applicative.ext F _ _ (@comp.is_lawful_applicative F id _ _ _ _) _
(λ α x, rfl) (λ α β f x, show id <$> f <*> x = f <*> x, by rw id_map)
open is_comm_applicative
instance {f : Type u → Type w} {g : Type v → Type u}
[applicative f] [applicative g]
[is_comm_applicative f] [is_comm_applicative g] :
is_comm_applicative (comp f g) :=
by { refine { .. @comp.is_lawful_applicative f g _ _ _ _, .. },
intros, casesm* comp _ _ _, simp! [map,has_seq.seq] with functor_norm,
rw [commutative_map],
simp [comp.mk,flip,(∘)] with functor_norm,
congr, funext, rw [commutative_map], congr }
end comp
open functor
@[functor_norm]
lemma comp.seq_mk {α β : Type w}
{f : Type u → Type v} {g : Type w → Type u}
[applicative f] [applicative g]
(h : f (g (α → β))) (x : f (g α)) :
comp.mk h <*> comp.mk x = comp.mk (has_seq.seq <$> h <*> x) := rfl
|
Sheila Morris will discuss growing up “half and half,” as the daughter of a Chinese father and white mother, during a special presentation March 13 at the Owatonna Arts Center.
The event, scheduled for 7 p.m., is co-sponsored by the Owatonna chapter of the American Association of University Women (AAUW), Steele County Home Economics Association, and the OAC, which is offering the venue for the presentation, said the AAUW’s Mary Kaye Tillmann. A $5 fee will be charged at the door, and net proceeds benefit the OAC.
Tillmann had seen Jessica Huang’s play, “The Paper Dreams of Harry Chin” — which focuses on the relationship of Chin, his daughter (Morris), and various other relatives — and it’s “wonderful,” she said. Morris spoke to the audience following the performance, and when Tillmann learned Morris resides in Waseca, she began contemplating ways to land her as a speaker for the AAUW.
Initially, Morris viewed the play as a way of honoring her father — and family in general — but, as her brother, Roger, observed, “it’s your story,” and “he was right,” Morris said. “It’s from my perspective,” and other members of the family would naturally have different perspectives.
Like Morris, Huang is “multi-cultural,” even introducing Morris to the word “hapa,” which means “part white and part Asian,” and Huang discovered the family story in a book titled “The Chinese in Minnesota,” Morris said. For that historical book, Morris and her father were interviewed by author Sherri Gebert Fuller, and when Morris later met Huang for coffee, she felt a special kinship with the playwright.
She also learned more about her father, “a paper son,” through the production, she said. For example, Huang discovered the entire 123-page transcript of Chin’s interrogation when he came ashore in California.
Chinese immigrants who were the “children” of Chinese-American citizens only on paper — fraudulent documents with false names — earned the “paper sons” moniker, according to NPR. Blood relatives of American-born Chinese, as well as Chinese merchants, teachers and students, were among the exceptions to the Chinese Exclusion Act of 1882 that barred Chinese laborers from entering America.
Though the ring of “paper sons” was eventually discovered, the United States government—noting how so many of the illegal immigrants were productive members of society, as well as the fact China was a communist regime—instituted a confession program, she said. President Lyndon Johnson signed legislation to essentially forgive them, but they had to become naturalized citizens, which her father did in the late 1960s.
Her father — like so many of his countrymen — came to America for economic opportunity, since China, especially in rural areas, was impoverished, she said. Youth took seriously the cultural imperative to care for their elders, so earning money was pivotal, and Chin escaped China on one of the last ships to leave harbor as the Japanese closed in on the country’s coasts during World War II.
“I had never thought about (my father’s) time on the ship” over from China, a journey where many passengers committed suicide in despair they would fail the interrogation, Morris said. Many more killed themselves in holding cells after either failing the inquisition or during the interminable wait for admittance into America.
Her father first went to Chicago’s Chinatown, but — told by associates he’d never learn English there — migrated to the Port Arthur Café in St. Paul to work, she said. There, he met Laura, Sheila’s mother. |
(*
* Copyright 2018, Data61
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(DATA61_BSD)
*)
section "L4V-Internal Word Lemmas"
text \<open>
This is a holding area for Word utility lemmas that are too specific or unpolished for the
AFP, but which are reusable enough to be collected together for the rest of L4V. New
utility lemmas that only prove facts about words should be added here (in preference to being
kept where they were first needed).
\<close>
theory Word_Lemmas_Internal
imports Word_Lemmas
begin
(* FIXME: move out of Word_Lib *)
lemma disjCI2:
"(\<not> P \<Longrightarrow> Q) \<Longrightarrow> P \<or> Q"
by blast
(* FIXME: move out of Word_Lib *)
lemma nat_power_minus_less:
"a < 2 ^ (x - n) \<Longrightarrow> (a :: nat) < 2 ^ x"
by (erule order_less_le_trans) simp
(* FIXME: move out of Word_Lib *)
lemma pow_mono_leq_imp_lt:
"x \<le> y \<Longrightarrow> x < 2 ^ y"
by (simp add: le_less_trans)
(* FIXME: move out of Word_Lib *)
lemma small_powers_of_2:
"x \<ge> 3 \<Longrightarrow> x < 2 ^ (x - 1)"
apply (induct x)
apply simp
by (simp add: suc_le_pow_2)
(* FIXME: move out of Word_Lib *)
lemma nat_diff_diff_le_lhs:
"a + c - b \<le> d \<Longrightarrow> a - (b - c) \<le> (d :: nat)"
by arith
(* FIXME: move out of Word_Lib *)
lemma nat_le_Suc_less:
"0 < y \<Longrightarrow> (x \<le> y - Suc 0) = (x < y)"
by arith
lemma is_aligned_obvious_no_wrap':
"\<lbrakk> is_aligned ptr sz; x = 2 ^ sz - 1 \<rbrakk>
\<Longrightarrow> ptr \<le> ptr + x"
by (fastforce simp: field_simps intro: is_aligned_no_overflow)
lemma mask_zero:
"is_aligned x a \<Longrightarrow> x && mask a = 0"
by (metis is_aligned_mask)
lemma fold_eq_0_to_bool:
"(v = 0) = (\<not> to_bool v)"
by (simp add: to_bool_def)
lemma is_aligned_neg_mask_eq_concrete:
"\<lbrakk> is_aligned p n; msk && ~~ mask n = ~~ mask n \<rbrakk>
\<Longrightarrow> p && msk = p"
by (metis word_bw_assocs(1) word_bw_comms(1) is_aligned_neg_mask_eq)
lemmas add_ge0_weak = add_increasing[where 'a=int and b=0]
lemma less_diff_gt0:
"a < b \<Longrightarrow> (0 :: 'a :: len word) < b - a"
by unat_arith
lemma unat_plus_gt:
"unat ((a:: 'a :: len word) + b) \<le> (unat a + unat b)"
by (clarsimp simp: unat_plus_if_size)
lemma is_aligned_and_not_zero:
"\<lbrakk> is_aligned n k; n \<noteq> 0 \<rbrakk>
\<Longrightarrow> 2 ^ k \<le> n"
by (metis aligned_small_is_0 word_not_le)
lemma const_less:
"\<lbrakk> (a :: 'a :: len word) - 1 < b; a \<noteq> b \<rbrakk>
\<Longrightarrow> a < b"
by (metis less_1_simp word_le_less_eq)
lemma is_aligned_and_2_to_k:
"(n && 2 ^ k - 1) = 0
\<Longrightarrow> is_aligned (n :: 'a :: len word) k"
by (simp add: is_aligned_mask mask_def)
lemma is_aligned_power2:
"b \<le> a \<Longrightarrow> is_aligned (2 ^ a) b"
by (metis is_aligned_triv is_aligned_weaken)
lemma add_mult_aligned_neg_mask:
"\<lbrakk> m && (2 ^ n - 1) = (0 :: 'a :: len word) \<rbrakk>
\<Longrightarrow> (x + y * m) && ~~ mask n = (x && ~~ mask n) + y * m"
apply (subgoal_tac "is_aligned (y * m) n")
apply (subst field_simps, subst mask_out_add_aligned[symmetric], assumption)
apply (simp add: field_simps)
apply (simp add: is_aligned_mask mask_2pm1[symmetric])
apply (simp add:mask_eqs(5)[symmetric])
done
lemma unat_of_nat_minus_1:
"\<lbrakk> n < 2 ^ LENGTH('a); n \<noteq> 0 \<rbrakk>
\<Longrightarrow> (unat (((of_nat n):: 'a :: len word) - 1)) = n - 1"
apply (subst unat_minus_one)
apply (rule of_nat_neq_0)
apply simp
apply simp
by (simp add: unat_of_nat_eq)
lemma word_eq_zeroI:
"a \<le> a - 1 \<Longrightarrow> a = (0:: 'a :: len word)"
apply (rule ccontr)
apply (subst (asm) le_m1_iff_lt[THEN iffD1])
apply unat_arith
apply simp
done
lemma sint_eq_uint_2pl:
"\<lbrakk> (a :: 'a :: len word) < 2 ^ (LENGTH('a) - 1) \<rbrakk>
\<Longrightarrow> sint a = uint a"
by (simp add: not_msb_from_less sint_eq_uint word_2p_lem word_size)
lemma aligned_sub_aligned:
"\<lbrakk> is_aligned (a :: 'a :: len word) n; is_aligned b n; n < LENGTH('a) \<rbrakk>
\<Longrightarrow> is_aligned (a - b) n"
by (simp add: Aligned.aligned_sub_aligned)
lemma word_add_format:
"(-1 :: 'a :: len word) + b + c = b + (c - 1)"
by simp
lemma upto_enum_word_nth:
"\<lbrakk> i \<le> j; k \<le> unat (j - i) \<rbrakk>
\<Longrightarrow> [i .e. j] ! k = i + of_nat k"
apply (clarsimp simp: upto_enum_def nth_append)
apply (clarsimp simp: word_le_nat_alt[symmetric])
apply (rule conjI, clarsimp)
apply (subst toEnum_of_nat, unat_arith)
apply unat_arith
apply (clarsimp simp: not_less unat_sub[symmetric])
apply unat_arith
done
lemma upto_enum_step_nth:
"\<lbrakk> a \<le> c; n \<le> unat ((c - a) div (b - a)) \<rbrakk>
\<Longrightarrow> [a, b .e. c] ! n = a + of_nat n * (b - a)"
by (clarsimp simp: upto_enum_step_def not_less[symmetric] upto_enum_word_nth)
lemma neg_mask_add:
"y && mask n = 0 \<Longrightarrow> x + y && ~~ mask n = (x && ~~ mask n) + y"
by (clarsimp simp: mask_out_sub_mask mask_eqs(7)[symmetric] mask_twice)
lemma minus_minus_swap:
"\<lbrakk> a \<le> c; b \<le> d; b \<le> a; d \<le> c ; (d :: nat) - b = c - a \<rbrakk>
\<Longrightarrow> a - b = c - d"
by arith
lemma minus_minus_swap':
"\<lbrakk> c \<le> a; d \<le> b; b \<le> a; d \<le> c ; (b :: nat) - d = a - c \<rbrakk>
\<Longrightarrow> a - b = c - d"
by arith
lemma shiftr_shiftl_shiftr[simp]:
"(x :: 'a :: len word) >> a << a >> a = x >> a"
apply (rule word_eqI)
apply (simp add: word_size nth_shiftr nth_shiftl)
apply safe
apply (drule test_bit_size)
apply (simp add: word_size)
done
lemma add_right_shift:
"\<lbrakk> x && mask n = 0; y && mask n = 0; x \<le> x + y \<rbrakk>
\<Longrightarrow> (x + y :: ('a :: len) word) >> n = (x >> n) + (y >> n)"
apply (simp add: no_olen_add_nat is_aligned_mask[symmetric])
apply (simp add: unat_arith_simps shiftr_div_2n' split del: if_split)
apply (subst if_P)
apply (erule order_le_less_trans[rotated])
apply (simp add: add_mono)
apply (simp add: shiftr_div_2n' is_aligned_def)
done
lemma sub_right_shift:
"\<lbrakk> x && mask n = 0; y && mask n = 0; y \<le> x \<rbrakk>
\<Longrightarrow> (x - y) >> n = (x >> n :: 'a :: len word) - (y >> n)"
using add_right_shift[where x="x - y" and y=y and n=n]
by (simp add: aligned_sub_aligned is_aligned_mask[symmetric]
word_sub_le Aligned.aligned_sub_aligned)
lemma and_and_mask_simple:
"(y && mask n) = mask n \<Longrightarrow> ((x && y) && mask n) = x && mask n"
by (simp add: word_bool_alg.conj.assoc)
lemma and_and_mask_simple_not:
"(y && mask n) = 0 \<Longrightarrow> ((x && y) && mask n) = 0"
by (simp add: word_bool_alg.conj.assoc)
lemma word_and_le':
"b \<le> c \<Longrightarrow> (a :: 'a :: len word) && b \<le> c"
by (metis word_and_le1 order_trans)
lemma word_and_less':
"b < c \<Longrightarrow> (a :: 'a :: len word) && b < c"
by (metis word_and_le1 xtr7)
lemma shiftr_w2p:
"x < LENGTH('a)
\<Longrightarrow> 2 ^ x = (2 ^ (LENGTH('a) - 1) >> (LENGTH('a) - 1 - x) :: 'a :: len word)"
apply simp
apply (rule word_eqI)
apply (auto intro: simp: word_size nth_shiftr nth_w2p)
done
lemma t2p_shiftr:
"\<lbrakk> b \<le> a; a < LENGTH('a) \<rbrakk>
\<Longrightarrow> (2 :: 'a :: len word) ^ a >> b = 2 ^ (a - b)"
apply (subst shiftr_w2p)
apply assumption
apply (subst shiftr_w2p[where x = "a - b"])
apply arith
apply (simp add:shiftr_shiftr)
done
lemma scast_1[simp]:
"scast (1 :: 'a :: len signed word) = (1 :: 'a word)"
by simp
lemma ucast_ucast_mask_eq:
"\<lbrakk> (ucast :: ('a :: len) word \<Rightarrow> ('b :: len) word) x = y;
x && mask LENGTH('b) = x \<rbrakk>
\<Longrightarrow> x = ucast y"
apply (drule_tac f="ucast :: 'b word \<Rightarrow> 'a word" in arg_cong)
apply (simp add: ucast_ucast_mask)
done
lemma ucast_up_neq:
"\<lbrakk> ucast x \<noteq> (ucast y::'b::len word);
LENGTH('b) \<le> LENGTH ('a) \<rbrakk>
\<Longrightarrow> ucast x \<noteq> (ucast y::'a::len word)"
apply (clarsimp)
apply (drule ucast_up_eq)
apply simp+
done
lemma is_aligned_neg_mask_weaken:
"\<lbrakk> is_aligned p n; m \<le> n \<rbrakk>
\<Longrightarrow> p && ~~ mask m = p"
using is_aligned_neg_mask_eq is_aligned_weaken by blast
lemma mask_AND_less_0:
"\<lbrakk> x && mask n = 0; m \<le> n \<rbrakk>
\<Longrightarrow> x && mask m = 0"
apply (case_tac "LENGTH('a) \<le> n")
using is_aligned_mask is_aligned_weaken apply blast+
done
lemma mask_len_id [simp]:
"(x :: 'a :: len word) && mask LENGTH('a) = x"
using uint_lt2p [of x] by (simp add: mask_eq_iff)
lemma scast_ucast_down_same:
"LENGTH('b) \<le> LENGTH('a)
\<Longrightarrow> (scast :: 'a :: len word \<Rightarrow> 'b :: len word) = (ucast :: 'a :: len word \<Rightarrow> 'b :: len word)"
apply (rule down_cast_same [symmetric])
apply (simp add: is_down_def target_size_def source_size_def word_size)
done
lemma aligned_mask_disjoint:
"\<lbrakk> is_aligned (a :: 'a :: len word) n; b \<le> mask n; n < LENGTH('a) \<rbrakk>
\<Longrightarrow> a && b = 0"
apply (rule word_eqI)
apply (clarsimp simp: is_aligned_nth word_size mask_def simp del: word_less_sub_le)
apply (frule le2p_bits_unset)
apply (case_tac "na < n")
apply simp
apply simp
done
lemma word_aligned_0_sum:
"\<lbrakk> a + b = 0; is_aligned (a :: 'a :: len word) n; b \<le> mask n; n < LENGTH('a) \<rbrakk>
\<Longrightarrow> a = 0 \<and> b = 0"
by (simp add: word_plus_and_or_coroll aligned_mask_disjoint word_or_zero)
lemma mask_eq1_nochoice:
"\<lbrakk> LENGTH('a) > 1; (x :: 'a :: len word) && 1 = x \<rbrakk>
\<Longrightarrow> x = 0 \<or> x = 1"
apply (simp add:mask_eq_iff[where n = 1,unfolded mask_def,simplified])
apply (drule word_2p_lem[where n = 1 and w = x,symmetric,simplified,THEN iffD1,rotated])
apply (simp add:word_size)
by (simp add: x_less_2_0_1')
lemmas word_le_mask_eq = le_mask_imp_and_mask
lemma is_aligned_neg_mask2 [simp]:
"is_aligned (a && ~~ mask n) n"
apply (cases "n < LENGTH('a)")
apply (simp add: and_not_mask)
apply (subst shiftl_t2n)
apply (rule is_aligned_mult_triv1)
apply (simp add: not_less NOT_mask power_overflow)
done
lemma unat_of_nat_ctz_mw:
"unat (of_nat (word_ctz (w :: 'a :: len word)) :: 'a :: len word) = word_ctz w"
using word_ctz_le[where w=w, simplified] unat_of_nat_eq[where x="word_ctz w" and 'a="'a"]
pow_mono_leq_imp_lt
by simp
lemma unat_of_nat_ctz_smw:
"unat (of_nat (word_ctz (w :: 'a :: len word)) :: 'a :: len sword) = word_ctz w"
using word_ctz_le[where w=w, simplified] unat_of_nat_eq[where x="word_ctz w" and 'a="'a"]
pow_mono_leq_imp_lt
by (metis le_unat_uoi le_unat_uoi linorder_neqE_nat nat_less_le scast_of_nat
word_unat.Rep_inverse)
lemma shiftr_and_eq_shiftl:
fixes w x y :: "'a:: len word"
assumes r: "(w >> n) && x = y"
shows "w && (x << n) = (y << n)"
using assms
proof -
{ fix i
assume i: "i < LENGTH('a)"
hence "test_bit (w && (x << n)) i \<longleftrightarrow> test_bit (y << n) i"
using word_eqD[where x="i-n", OF r]
by (cases "n \<le> i") (auto simp: nth_shiftl nth_shiftr)
} note bits = this
show ?thesis
by (rule word_eqI[rule_format], rule bits, simp add: word_size)
qed
lemma int_and_leR:
"0 \<le> b \<Longrightarrow> a AND b \<le> (b :: int)"
by (clarsimp simp: int_and_le bin_sign_def split: if_split_asm)
lemma int_and_leL:
"0 \<le> a \<Longrightarrow> a AND b \<le> (a :: int)"
by (metis int_and_leR int_and_comm)
lemma mask_len_max:
"mask LENGTH('a) = (max_word :: 'a :: len word)"
by (simp add: max_word_mask)
lemma if_then_1_else_0:
"((if P then 1 else 0) = (0 :: ('a :: zero_neq_one))) = (\<not> P)"
by (simp split: if_split)
lemma if_then_0_else_1:
"((if P then 0 else 1) = (0 :: 'a :: len word)) = (P)"
by simp
lemmas if_then_simps = if_then_0_else_1 if_then_1_else_0
lemma add_mask_lower_bits':
"\<lbrakk> len = LENGTH('a); is_aligned (x :: 'a :: len word) n;
\<forall>n' \<ge> n. n' < len \<longrightarrow> \<not> p !! n' \<rbrakk>
\<Longrightarrow> x + p && ~~mask n = x"
apply (subst word_plus_and_or_coroll)
apply (rule word_eqI)
apply (clarsimp simp: word_size is_aligned_nth)
apply (erule_tac x=na in allE)+
apply simp
apply (rule word_eqI)
apply (clarsimp simp: word_size is_aligned_nth word_ops_nth_size)
apply (erule_tac x=na in allE)+
apply (case_tac "na < n")
apply simp
apply simp
done
lemma mask_in_range:
"is_aligned ptr bits
\<Longrightarrow> (ptr' && (~~ mask bits) = ptr) = (ptr' \<in> {ptr .. ptr + 2 ^ bits - 1})"
apply (erule is_aligned_get_word_bits)
defer
apply (simp add: power_overflow mask_def)
apply (rule iffI)
apply (drule sym)
apply (simp add: word_and_le2)
apply (subst field_simps[symmetric], subst mask_2pm1[symmetric])
apply (subst word_plus_and_or_coroll)
apply (rule word_eqI, clarsimp simp: word_ops_nth_size)
apply (subgoal_tac "ptr' && ~~ mask bits || mask bits = ptr' || mask bits")
apply (simp add: le_word_or2)
apply (rule word_eqI, clarsimp simp: word_ops_nth_size word_size)
apply fastforce
apply (subgoal_tac "\<exists>x. ptr' = ptr || x \<and> x && mask bits = x")
apply (rule word_eqI)
apply (clarsimp simp: word_ops_nth_size word_size is_aligned_mask)
apply (drule_tac x=n in word_eqD)+
apply (simp add: word_ops_nth_size word_size
is_aligned_mask)
apply safe[1]
apply (subgoal_tac "\<exists>x. ptr' = ptr + x")
apply clarsimp
apply (drule(1) word_le_minus_mono_left[where x=ptr])
apply simp
apply (subst conj_commute)
apply (rule exI, rule context_conjI[OF _ word_plus_and_or_coroll])
apply (subst mask_eq_iff_w2p)
apply (simp add: word_size)
apply (rule minus_one_helper5)
apply simp
apply simp
apply (simp add: is_aligned_mask)
apply (rule word_eqI)
apply (drule_tac x=n in word_eqD)+
apply (clarsimp simp: word_ops_nth_size word_size)
apply (rule exI[where x="ptr' - ptr"])
apply simp
done
lemma aligned_range_offset_mem:
"\<lbrakk> is_aligned (x :: 'a :: len word) m; y < 2 ^ m; is_aligned p n;
n \<ge> m; n < LENGTH('a) \<rbrakk>
\<Longrightarrow> (x + y \<in> {p .. p + 2 ^ n - 1}) = (x \<in> {p .. p + 2 ^ n - 1})"
apply (simp only: mask_in_range[symmetric]
is_aligned_add_or)
apply (simp add: word_ao_dist, simp add: mask_out_sub_mask)
apply (subst less_mask_eq, erule order_less_le_trans)
apply (simp add: two_power_increasing)
apply simp
done
lemma upto_enum_inc_1_len:
"a < - 1
\<Longrightarrow> [(0 :: 'a :: len word) .e. 1 + a] = [0 .e. a] @ [1 + a]"
apply (simp add: upto_enum_word)
apply (subgoal_tac "unat (1 +a) = 1 + unat a")
apply simp
apply (subst unat_plus_simple[THEN iffD1])
apply (rule word_plus_mono_right2[where b = "2 ^ LENGTH('a) - 2"])
apply simp
using minus_one_helper3 apply force
apply unat_arith
done
lemma range_to_bl':
"\<lbrakk> is_aligned (ptr :: 'a :: len word) bits; bits < LENGTH('a) \<rbrakk>
\<Longrightarrow> {ptr .. ptr + (2 ^ bits) - 1}
= {x. take (LENGTH('a) - bits) (to_bl x) = take (LENGTH('a) - bits) (to_bl ptr)}"
apply (rule set_eqI, rule iffI)
apply clarsimp
apply (subgoal_tac "\<exists>y. x = ptr + y \<and> y < 2 ^ bits")
apply clarsimp
apply (subst is_aligned_add_conv)
apply assumption
apply simp
apply simp
apply (rule_tac x="x - ptr" in exI)
apply (simp add: add_diff_eq[symmetric])
apply (simp only: word_less_sub_le[symmetric])
apply (rule word_diff_ls')
apply (simp add: field_simps)
apply assumption
apply simp
apply (subgoal_tac "\<exists>y. y < 2 ^ bits \<and> to_bl (ptr + y) = to_bl x")
apply clarsimp
apply (rule conjI)
apply (erule(1) is_aligned_no_wrap')
apply (simp only: add_diff_eq[symmetric])
apply (rule word_plus_mono_right)
apply simp
apply (erule is_aligned_no_wrap')
apply simp
apply (rule_tac x="of_bl (drop (LENGTH('a) - bits) (to_bl x))" in exI)
apply (rule context_conjI)
apply (rule order_less_le_trans [OF of_bl_length])
apply simp
apply simp
apply (subst is_aligned_add_conv)
apply assumption
apply simp
apply (drule sym)
apply (simp add: word_rep_drop)
done
lemma range_to_bl:
"is_aligned (ptr :: 'a :: len word) bits
\<Longrightarrow> {ptr..ptr + 2 ^ bits - 1}
= {x. take (LENGTH('a) - bits) (to_bl x) = take (LENGTH('a) - bits) (to_bl ptr)}"
apply (erule is_aligned_get_word_bits)
apply (erule(1) range_to_bl')
apply (rule set_eqI)
apply (simp add: power_overflow)
done
lemma aligned_ranges_subset_or_disjoint:
"\<lbrakk> is_aligned (p :: 'a :: len word) n; is_aligned (p' :: 'a :: len word) n' \<rbrakk>
\<Longrightarrow> {p .. p + 2 ^ n - 1} \<inter> {p' .. p' + 2 ^ n' - 1} = {}
\<or> {p .. p + 2 ^ n - 1} \<subseteq> {p' .. p' + 2 ^ n' - 1}
\<or> {p .. p + 2 ^ n - 1} \<supseteq> {p' .. p' + 2 ^ n' - 1}"
apply (simp add: range_to_bl)
apply (rule disjCI2)
apply (erule nonemptyE)
apply simp
apply (subgoal_tac "(\<exists>n''. LENGTH('a) - n = (LENGTH('a) - n') + n'')
\<or> (\<exists>n''. LENGTH('a) - n' = (LENGTH('a) - n) + n'')")
apply (elim conjE disjE exE)
apply (rule disjI1)
apply (clarsimp simp: take_add)
apply (rule disjI2)
apply (clarsimp simp: take_add)
apply arith
done
lemma aligned_range_offset_subset:
assumes al: "is_aligned (ptr :: 'a :: len word) sz" and al': "is_aligned x sz'"
and szv: "sz' \<le> sz"
and xsz: "x < 2 ^ sz"
shows "{ptr + x .. (ptr + x) + 2 ^ sz' - 1} \<subseteq> {ptr .. ptr + 2 ^ sz - 1}"
using al
proof (rule is_aligned_get_word_bits)
assume p0: "ptr = 0" and szv': "LENGTH ('a) \<le> sz"
hence "(2 :: 'a word) ^ sz = 0" by simp
thus ?thesis using p0
apply -
apply (erule ssubst)
apply simp
done
next
assume szv': "sz < LENGTH('a)"
hence blah: "2 ^ (sz - sz') < (2 :: nat) ^ LENGTH('a)"
using szv
apply -
apply (rule power_strict_increasing, simp+)
done
show ?thesis using szv szv'
apply (intro range_subsetI)
apply (rule is_aligned_no_wrap' [OF al xsz])
apply (simp only: add_diff_eq[symmetric])
apply (subst add.assoc, rule word_plus_mono_right)
apply (subst iffD1 [OF le_m1_iff_lt])
apply (simp add: p2_gt_0)
apply (rule is_aligned_add_less_t2n[OF al' _ szv xsz])
apply simp
apply (simp add: field_simps szv al is_aligned_no_overflow)
done
qed
lemma aligned_diff:
"\<lbrakk> is_aligned (dest :: 'a :: len word) bits; is_aligned (ptr :: 'a :: len word) sz;
bits \<le> sz; sz < LENGTH('a); dest < ptr \<rbrakk>
\<Longrightarrow> (2 ^ bits - 1) + dest < ptr"
apply (frule_tac p' = ptr in aligned_ranges_subset_or_disjoint)
apply assumption
apply (elim disjE)
apply clarsimp
apply (drule_tac ptr = dest in is_aligned_no_overflow)
apply simp
apply (drule is_aligned_no_overflow)
apply clarsimp
apply (erule impE)
apply (erule order_trans[OF less_imp_le])
apply (clarsimp simp:field_simps)
apply (clarsimp simp:not_less field_simps not_le)
apply clarsimp
apply (drule_tac ptr = dest in is_aligned_no_overflow)
apply simp
apply fastforce
apply clarsimp
apply (frule is_aligned_no_overflow)
apply (erule impE)
apply (frule(1) is_aligned_no_overflow)
apply (rule ccontr)
apply (clarsimp simp:not_less p_assoc_help)
apply (subst (asm) add.commute[where b = "(2^ sz - 1)"])
apply (subst (asm) add.commute[where b = "(2^ bits - 1)"])+
apply (drule word_sub_mono2)
apply (rule word_le_minus_mono_left)
apply (erule(1) two_power_increasing)
apply (simp add:word_1_le_power)
apply (simp add:field_simps is_aligned_no_overflow)+
done
lemma aligned_ranges_subset_or_disjoint_coroll:
"\<lbrakk> is_aligned (p :: 'a :: len word) n; is_aligned (p' :: 'a :: len word) n';
p && ~~ mask n' \<noteq> p'; p' && ~~ mask n \<noteq> p \<rbrakk>
\<Longrightarrow> {p .. p + 2 ^ n - 1} \<inter> {p' .. p' + 2 ^ n' - 1} = {}"
using aligned_ranges_subset_or_disjoint
apply (simp only: mask_in_range)
apply (subgoal_tac "p \<in> {p .. p + 2 ^ n - 1} \<and> p' \<in> {p' .. p' + 2 ^ n' - 1}")
apply blast
using is_aligned_neg_mask_eq mask_in_range by blast
lemma neg_mask_combine:
"~~ mask a && ~~ mask b = ~~ mask (max a b)"
by (auto simp: word_ops_nth_size word_size intro!: word_eqI)
lemma neg_mask_twice:
"x && ~~ mask n && ~~ mask m = x && ~~ mask (max n m)"
by (metis neg_mask_combine)
lemma multiple_mask_trivia:
"n \<ge> m \<Longrightarrow> (x && ~~ mask n) + (x && mask n && ~~ mask m) = x && ~~ mask m"
apply (rule trans[rotated], rule_tac w="mask n" in word_plus_and_or_coroll2)
apply (simp add: word_bw_assocs word_bw_comms word_bw_lcs neg_mask_twice
max_absorb2)
done
lemma distinct_aligned_addresses_accumulate:
"\<lbrakk> is_aligned p n; is_aligned ptr bits; n \<ge> m; n < size p; m \<le> bits;
(\<forall> y < 2 ^ (n - m). p + (y << m) \<notin> {ptr .. ptr + 2 ^ bits - 1}) \<rbrakk>
\<Longrightarrow> {p .. p + 2 ^ n - 1} \<inter> {ptr .. ptr + 2 ^ bits - 1} = {}"
apply safe
apply (simp only: mask_in_range[symmetric])
apply (drule_tac x="(x && mask n) >> m" in spec)
apply (simp add: shiftr_shiftl1 word_bw_assocs)
apply (drule mp, rule shiftr_less_t2n)
apply (subst add_diff_inverse, simp, rule and_mask_less', simp add: word_size)
apply (clarsimp simp: multiple_mask_trivia word_bw_assocs neg_mask_twice max_absorb2)
done
lemma leq_mask_shift:
"(x :: 'a :: len word) \<le> mask (low_bits + high_bits)
\<Longrightarrow> (x >> low_bits) \<le> mask high_bits"
by (simp add: le_mask_iff shiftr_shiftr)
lemma ucast_ucast_eq_mask_shift:
"(x :: 'a :: len word) \<le> mask (low_bits + LENGTH('b))
\<Longrightarrow> ucast((ucast (x >> low_bits)) :: 'b :: len word) = x >> low_bits"
by (meson and_mask_eq_iff_le_mask eq_ucast_ucast_eq not_le_imp_less shiftr_less_t2n'
ucast_ucast_len)
lemma const_le_unat:
"\<lbrakk> b < 2 ^ LENGTH('a); of_nat b \<le> a \<rbrakk>
\<Longrightarrow> b \<le> unat (a :: 'a :: len word)"
by (clarsimp simp: word_le_def uint_nat of_nat_inverse)
lemma createNewCaps_guard:
fixes x :: "'a :: len word"
shows "\<lbrakk> unat x = c; b < 2 ^ LENGTH('a) \<rbrakk>
\<Longrightarrow> (n < of_nat b \<and> n < x) = (n < of_nat (min (min b c) c))"
apply (erule subst)
apply (simp add: min.assoc)
apply (rule iffI)
apply (simp add: min_def word_less_nat_alt split: if_split)
apply (simp add: min_def word_less_nat_alt not_le le_unat_uoi split: if_split_asm)
by (simp add: of_nat_inverse)
lemma upt_enum_offset_trivial:
"\<lbrakk> x < 2 ^ LENGTH('a) - 1 ; n \<le> unat x \<rbrakk>
\<Longrightarrow> ([(0 :: 'a :: len word) .e. x] ! n) = of_nat n"
apply (induct x arbitrary: n)
apply simp
by (simp add: upto_enum_word_nth)
lemma word_le_mask_out_plus_2sz:
"x \<le> (x && ~~ mask sz) + 2 ^ sz - 1"
using mask_in_range[where ptr'=x and bits=sz,
OF is_aligned_neg_mask2[where a=x]]
by simp
lemma bits_2_subtract_ineq:
"i < (n :: ('a :: len) word)
\<Longrightarrow> 2 ^ bits + 2 ^ bits * unat (n - (1 + i)) = unat (n - i) * 2 ^ bits"
apply (simp add: unat_sub minus_one_helper2)
apply (subst unatSuc)
apply clarsimp
apply unat_arith
apply (simp only: mult_Suc_right[symmetric])
apply (rule trans[OF mult.commute], rule arg_cong2[where f="(*)"], simp_all)
apply (simp add: word_less_nat_alt)
done
lemma ucast_add:
"ucast (a + (b :: 'a :: len word)) = ucast a + (ucast b :: ('a signed word))"
apply (case_tac "LENGTH('a) = 1")
apply (clarsimp simp: ucast_def)
apply (metis (hide_lams, mono_tags) One_nat_def len_signed plus_word.abs_eq
uint_word_arith_bintrs(1) word_ubin.Abs_norm)
apply (clarsimp simp: ucast_def)
apply (metis le_refl len_signed plus_word.abs_eq uint_word_arith_bintrs(1) wi_bintr)
done
lemma ucast_minus:
"ucast (a - (b :: 'a :: len word)) = ucast a - (ucast b :: ('a signed word))"
apply (insert ucast_add[where a=a and b="-b"])
apply (metis (no_types, hide_lams) add_diff_eq diff_add_cancel ucast_add)
done
lemma scast_ucast_add_one [simp]:
"scast (ucast (x :: 'a::len word) + (1 :: 'a signed word)) = x + 1"
apply (subst ucast_1[symmetric])
apply (subst ucast_add[symmetric])
apply clarsimp
done
lemma word_and_le_plus_one:
"a > 0 \<Longrightarrow> (x :: 'a :: len word) && (a - 1) < a"
by (simp add: gt0_iff_gem1 word_and_less')
lemma unat_of_ucast_then_shift_eq_unat_of_shift[simp]:
"LENGTH('b) \<ge> LENGTH('a)
\<Longrightarrow> unat ((ucast (x :: 'a :: len word) :: 'b :: len word) >> n) = unat (x >> n)"
by (simp add: shiftr_div_2n' unat_ucast_up_simp)
lemma unat_of_ucast_then_mask_eq_unat_of_mask[simp]:
"LENGTH('b) \<ge> LENGTH('a)
\<Longrightarrow> unat ((ucast (x :: 'a :: len word) :: 'b :: len word) && mask m) = unat (x && mask m)"
by (metis ucast_and_mask unat_ucast_up_simp)
lemma word_clz_sint_upper[simp]:
"LENGTH('a) \<ge> 3
\<Longrightarrow> sint (of_nat (word_clz (w :: 'a :: len word)) :: 'a signed word) \<le> LENGTH('a)"
apply (subst sint_eq_uint)
apply (rule not_msb_from_less)
apply simp
apply (rule word_of_nat_less)
apply simp
apply (rule order_le_less_trans[OF word_clz_max])
apply (simp add: word_size)
using small_powers_of_2 apply simp
apply (subst uint_nat)
apply (simp add: unat_of_nat)
apply (subst Divides.mod_less)
apply simp
apply (rule order_le_less_trans[OF word_clz_max[simplified]])
apply (simp add: word_size)
by (metis word_clz_max wsst_TYs(3))
lemma word_clz_sint_lower[simp]:
"LENGTH('a) \<ge> 3
\<Longrightarrow> - sint (of_nat (word_clz (w :: 'a :: len word)) :: 'a signed word) \<le> LENGTH('a)"
apply (subst sint_eq_uint)
using small_powers_of_2 uint_nat
apply (simp add: order_le_less_trans[OF word_clz_max] not_msb_from_less word_of_nat_less
word_size)
by (simp add: uint_nat)
lemma shiftr_less_t2n3:
"\<lbrakk> (2 :: 'a word) ^ (n + m) = 0; m < LENGTH('a) \<rbrakk>
\<Longrightarrow> (x :: 'a :: len word) >> n < 2 ^ m"
by (fastforce intro: shiftr_less_t2n' simp: mask_def power_overflow)
lemma unat_shiftr_le_bound:
"\<lbrakk> 2 ^ (LENGTH('a :: len) - n) - 1 \<le> bnd; 0 < n \<rbrakk>
\<Longrightarrow> unat ((x :: 'a word) >> n) \<le> bnd"
using less_not_refl3 le_step_down_nat le_trans less_or_eq_imp_le word_shiftr_lt
by (metis (no_types, lifting))
lemma shiftr_eqD:
"\<lbrakk> x >> n = y >> n; is_aligned x n; is_aligned y n \<rbrakk>
\<Longrightarrow> x = y"
by (metis is_aligned_shiftr_shiftl)
lemma word_shiftr_shiftl_shiftr_eq_shiftr:
"a \<ge> b \<Longrightarrow> (x :: 'a :: len word) >> a << b >> b = x >> a"
by (simp add: mask_shift multi_shift_simps(5) shiftr_shiftr)
lemma of_int_uint_ucast:
"of_int (uint (x :: 'a::len word)) = (ucast x :: 'b::len word)"
by (simp add: ucast_def word_of_int)
lemma mod_mask_drop:
"\<lbrakk> m = 2 ^ n; 0 < m; mask n && msk = mask n \<rbrakk>
\<Longrightarrow> (x mod m) && msk = x mod m"
by (simp add: word_mod_2p_is_mask word_bw_assocs)
lemma mask_eq_ucast_eq:
"\<lbrakk> x && mask LENGTH('a) = (x :: ('c :: len word));
LENGTH('a) \<le> LENGTH('b)\<rbrakk>
\<Longrightarrow> ucast (ucast x :: ('a :: len word)) = (ucast x :: ('b :: len word))"
by (metis ucast_and_mask ucast_id ucast_ucast_mask ucast_up_eq)
lemma of_nat_less_t2n:
"of_nat i < (2 :: ('a :: len) word) ^ n
\<Longrightarrow> n < LENGTH('a) \<and> unat (of_nat i :: 'a word) < 2 ^ n"
apply (cases "n < LENGTH('a)")
by (clarsimp simp: word_less_nat_alt power_overflow)+
lemmas double_neg_mask = neg_mask_combine
lemmas int_unat = uint_nat[symmetric]
lemmas word_sub_mono3 = word_plus_mcs_4'
lemma word_sub_mono4:
"\<lbrakk> y + x \<le> z + x; (y :: ('a :: len) word) \<le> y + x; z \<le> z + x \<rbrakk>
\<Longrightarrow> y \<le> z"
apply (subst(asm) add.commute)
apply (subst(asm) add.commute,
erule word_sub_mono2)
apply simp
apply (simp add: add.commute)+
done
lemma eq_or_less_helperD:
"\<lbrakk> n = unat (2 ^ m - 1 :: 'a :: len word) \<or> n < unat (2 ^ m - 1 :: 'a word);
m < LENGTH('a) \<rbrakk>
\<Longrightarrow> n < 2 ^ m"
apply (simp add: unat_sub word_1_le_power)
apply (subgoal_tac "2 ^ m \<ge> (1 :: nat)")
apply arith
apply simp
done
lemma mask_sub:
"n \<le> m \<Longrightarrow> mask m - mask n = mask m && ~~ mask n"
apply (simp add: field_simps)
apply (subst word_plus_and_or_coroll)
apply (metis mask_AND_NOT_mask word_bw_comms(1))
by (metis (no_types, lifting)
AND_NOT_mask_plus_AND_mask_eq and_mask_eq_iff_shiftr_0 mask_AND_NOT_mask shiftr_mask_le
word_bool_alg.conj.commute word_bw_comms(2) word_plus_and_or_coroll)
lemma neg_mask_diff_bound:
"sz'\<le> sz \<Longrightarrow> (ptr && ~~ mask sz') - (ptr && ~~ mask sz) \<le> 2 ^ sz - 2 ^ sz'"
(is "_ \<Longrightarrow> ?lhs \<le> ?rhs")
proof -
assume lt: "sz' \<le> sz"
hence "?lhs = ptr && (mask sz && (~~ mask sz'))"
apply (simp add: mask_out_sub_mask field_simps mask_and_mask min.absorb2)
apply (simp add: mask_sub)
apply (subst word_plus_and_or_coroll)
apply (simp add: word_bool_alg.conj_left_commute)
by (metis (no_types, lifting)
and_mask_eq_iff_shiftr_0 mask_AND_NOT_mask shiftr_mask_le word_bool_alg.conj.commute
word_bool_alg.conj_disj_distrib word_plus_and_or_coroll word_plus_and_or_coroll2)
also have "\<dots> \<le> ?rhs" using lt
apply (simp add: mask_sub[symmetric])
apply (simp add: mask_def field_simps word_and_le1)
done
finally show ?thesis by simp
qed
lemma shift_distinct_helper:
"\<lbrakk> (x :: 'a :: len word) < bnd; y < bnd; x \<noteq> y; x << n = y << n; n < LENGTH('a);
bnd - 1 \<le> 2 ^ (LENGTH('a) - n) - 1 \<rbrakk>
\<Longrightarrow> P"
apply (cases "n = 0")
apply simp
apply (drule word_plus_mono_right[where x=1])
apply simp_all
apply (subst word_le_sub1)
apply (rule power_not_zero)
apply simp
apply simp
apply (drule(1) order_less_le_trans)+
apply (clarsimp simp: bang_eq)
apply (drule_tac x="na + n" in spec)
apply (simp add: nth_shiftl)
apply (case_tac "na + n < LENGTH('a)", simp_all)
apply safe
apply (drule(1) nth_bounded)
apply simp
apply simp
apply (drule(1) nth_bounded)
apply simp
apply simp
done
lemma of_nat_shift_distinct_helper:
"\<lbrakk> x < bnd; y < bnd; x \<noteq> y; (of_nat x :: 'a :: len word) << n = of_nat y << n;
n < LENGTH('a); bnd \<le> 2 ^ (LENGTH('a) - n) \<rbrakk>
\<Longrightarrow> P"
apply (cases "n = 0")
apply (simp add: word_unat.Abs_inject unats_def)
apply (subgoal_tac "bnd < 2 ^ LENGTH('a)")
apply (erule(1) shift_distinct_helper[rotated, rotated, rotated])
defer
apply (erule(1) of_nat_mono_maybe[rotated])
apply (erule(1) of_nat_mono_maybe[rotated])
apply (simp add: word_unat.Abs_inject unats_def)
apply (erule order_le_less_trans)
apply (rule power_strict_increasing)
apply simp
apply simp
apply (simp add: word_less_nat_alt)
apply (simp add: unat_minus_one [OF of_nat_neq_0]
word_unat.Abs_inverse unats_def)
done
lemma pre_helper2:
"\<lbrakk> is_aligned (base :: 'a :: len word) n; n < LENGTH('a); bits \<le> n; x < 2 ^ (n - bits) \<rbrakk>
\<Longrightarrow> base + x * 2^bits \<in> {base .. base + 2 ^ n - 1}"
apply (subgoal_tac "x * 2^bits < 2 ^ n")
apply simp
apply (rule context_conjI)
apply (erule(1) is_aligned_no_wrap')
apply (subst add_diff_eq[symmetric])
apply (rule word_plus_mono_right)
apply simp
apply (erule is_aligned_no_wrap')
apply simp
apply (drule_tac k="2^bits" in word_mult_less_mono1)
apply (simp add: p2_gt_0)
apply (subst unat_power_lower, simp)+
apply (simp only: power_add[symmetric])
apply (rule power_strict_increasing)
apply simp
apply simp
apply (simp add: power_add[symmetric])
done
lemma of_bl_length2:
"length xs + c < LENGTH('a) \<Longrightarrow> of_bl xs * 2^c < (2::'a::len word) ^ (length xs + c)"
apply (simp add: power_add)
apply (rule word_mult_less_mono1[OF of_bl_length])
by (auto simp add: p2_gt_0 power_add[symmetric])
lemma ptr_add_distinct_helper:
"\<lbrakk> ptr_add (p :: 'a :: len word) (x * 2 ^ n) = ptr_add p (y * 2 ^ n); x \<noteq> y;
x < bnd; y < bnd; n < LENGTH('a);
bnd \<le> 2 ^ (LENGTH('a) - n) \<rbrakk>
\<Longrightarrow> P"
apply (clarsimp simp: ptr_add_def word_unat_power[symmetric]
shiftl_t2n[symmetric, simplified mult.commute])
using of_nat_shift_distinct_helper
by blast
lemma mask_out_eq_0:
"\<lbrakk> idx < 2 ^ sz; sz < LENGTH('a) \<rbrakk>
\<Longrightarrow> ((of_nat idx) :: 'a :: len word) && ~~ mask sz = 0"
apply (clarsimp simp: mask_out_sub_mask)
apply (subst less_mask_eq[symmetric])
apply (erule(1) of_nat_power)
apply simp
done
lemma is_aligned_neg_mask_eq':
"is_aligned ptr sz = (ptr && ~~ mask sz = ptr)"
apply (rule iffI)
apply (erule is_aligned_neg_mask_eq)
apply (simp add: is_aligned_mask)
apply (drule sym)
apply (subst (asm) word_plus_and_or_coroll2[symmetric,where w = "mask sz"])
apply simp
done
lemma neg_mask_mask_unat:
"sz < LENGTH('a)
\<Longrightarrow> unat ((ptr :: 'a :: len word) && ~~ mask sz) + unat (ptr && mask sz) = unat ptr"
apply (subst unat_plus_simple[THEN iffD1, symmetric])
apply (simp add: AND_NOT_mask_plus_AND_mask_eq word_and_le2)
by (simp add: AND_NOT_mask_plus_AND_mask_eq)
lemma unat_pow_le_intro:
"LENGTH('a) \<le> n \<Longrightarrow> unat (x :: 'a :: len word) < 2 ^ n"
by (metis (mono_tags, hide_lams)
less_imp_le less_irrefl lt2p_lem nat_less_le of_nat_le_iff of_nat_numeral
semiring_1_class.of_nat_power uint_nat)
lemma unat_shiftl_less_t2n:
"\<lbrakk> unat (x :: 'a :: len word) < 2 ^ (m - n); m < LENGTH('a) \<rbrakk>
\<Longrightarrow> unat (x << n) < 2 ^ m"
by (metis (no_types, lifting)
Word_Lemmas.of_nat_power diff_le_self le_less_trans shiftl_less_t2n unat_less_power
unat_lt2p unat_of_nat_eq word_less_nat_alt)
lemma unat_is_aligned_add_helper:
"\<lbrakk> is_aligned p n; unat d < 2 ^ n \<rbrakk>
\<Longrightarrow> unat (p + d && mask n) = unat d \<and> unat (p + d && ~~ mask n) = unat p"
by (metis add.right_neutral and_mask_eq_iff_le_mask and_not_mask le_mask_iff mask_add_aligned
mask_out_add_aligned mult_zero_right shiftl_t2n shiftr_le_0)
lemma unat_shiftr_shiftl_mask_zero:
"\<lbrakk> c + a \<ge> LENGTH('a) + b ; c < LENGTH('a) \<rbrakk>
\<Longrightarrow> unat (((q :: 'a :: len word) >> a << b) && ~~ mask c) = 0"
by (fastforce intro: unat_is_aligned_add_helper[where p=0 and n=c, simplified, THEN conjunct2]
unat_shiftl_less_t2n unat_shiftr_less_t2n unat_pow_le_intro)
lemma of_nat_ucast:
"is_down (ucast :: ('a :: len) word \<Rightarrow> ('b :: len) word)
\<Longrightarrow> (of_nat n :: 'b word) = ucast (of_nat n :: 'a word)"
apply (subst word_unat.inverse_norm)
apply (simp add: ucast_def word_of_int[symmetric]
of_nat_nat[symmetric] unat_def[symmetric])
apply (simp add: unat_of_nat)
apply (rule nat_int.Rep_eqD)
apply (simp only: zmod_int)
apply (rule mod_mod_cancel)
apply (subst int_dvd_int_iff)
apply (rule le_imp_power_dvd)
apply (simp add: is_down_def target_size_def source_size_def word_size)
done
lemma leq_low_bits_iff_zero:
"\<lbrakk> x \<le> mask (low bits + high bits); x >> low_bits = 0 \<rbrakk>
\<Longrightarrow> (x && mask low_bits = 0) = (x = 0)"
using and_mask_eq_iff_shiftr_0 by force
lemma unat_less_iff:
"\<lbrakk> unat (a :: 'a :: len word) = b; c < 2 ^ LENGTH('a) \<rbrakk>
\<Longrightarrow> (a < of_nat c) = (b < c)"
apply (rule iffI)
apply (drule unat_less_helper)
apply simp
using unat_ucast_less_no_overflow by blast
lemma is_aligned_no_overflow3:
"\<lbrakk> is_aligned (a :: 'a :: len word) n; n < LENGTH('a); b < 2 ^ n; c \<le> 2 ^ n; b< c \<rbrakk>
\<Longrightarrow> a + b \<le> a + (c - 1)"
apply (rule word_plus_mono_right)
apply (simp add:minus_one_helper3)
apply (erule is_aligned_no_wrap')
by (meson le_m1_iff_lt minus_one_helper3 not_less)
lemma unat_sub_le_strg:
"unat v \<le> v2 \<and> x \<le> v \<and> y \<le> v \<and> y < (x :: ('a :: len) word)
\<longrightarrow> unat (x + (- 1 - y)) \<le> v2"
apply clarsimp
apply (erule order_trans[rotated])
apply (fold word_le_nat_alt)
apply (rule order_trans[rotated], assumption)
apply (rule order_trans[rotated], rule word_sub_le[where y="y + 1"])
apply (erule Word.inc_le)
apply (simp add: field_simps)
done
lemma mask_add_aligned_right:
"is_aligned p n \<Longrightarrow> (q + p) && mask n = q && mask n"
by (simp add: mask_add_aligned add.commute)
lemma leq_high_bits_shiftr_low_bits_leq_bits:
"x \<le> 2 ^ high_bits - 1
\<Longrightarrow> (x :: 'a :: len word) << low_bits \<le> mask (low_bits + high_bits)"
by (metis le_mask_shiftl_le_mask mask_2pm1)
lemma from_to_bool_last_bit:
"from_bool (to_bool (x && 1)) = x && 1"
apply (simp add: from_bool_def to_bool_and_1
split: bool.split)
apply (safe intro!: word_eqI, auto)
done
lemma word_two_power_neg_ineq:
"2 ^ m \<noteq> (0 :: 'a word) \<Longrightarrow> 2 ^ n \<le> - (2 ^ m :: ('a :: len) word)"
apply (cases "n < LENGTH('a)", simp_all add: power_overflow)
apply (cases "m < LENGTH('a)", simp_all add: power_overflow)
apply (simp add: word_le_nat_alt Aligned.unat_minus word_size)
apply (cases "LENGTH('a)", simp_all)
apply (simp add: less_Suc_eq_le)
apply (drule power_increasing[where a=2 and n=n]
power_increasing[where a=2 and n=m], simp)+
apply (drule(1) add_le_mono)
apply simp
done
lemma multi_lessD:
"\<lbrakk> (a :: nat) * b < c; 0 < a; 0 < b \<rbrakk>
\<Longrightarrow> a < c \<and> b < c"
by (cases a, simp_all,cases b,simp_all)
lemmas unat_le_helper = word_unat_less_le
lemmas word_of_nat_plus = of_nat_add[where 'a="'a :: len word"]
lemmas word_of_nat_minus = of_nat_diff[where 'a="'a :: len word"]
lemma unat_shiftl_absorb:
"\<lbrakk> x \<le> 2 ^ p; p + k < LENGTH('a) \<rbrakk>
\<Longrightarrow> unat (x :: 'a :: len word) * 2 ^ k = unat (x * 2 ^ k)"
apply (simp add:unat_word_ariths)
apply (subst mod_less)
apply (rule le_less_trans[OF mult_le_mono1])
apply (erule iffD1[OF word_le_nat_alt])
apply (clarsimp simp: power_add[symmetric])+
done
(* this is a bit deceptive: 2 ^ len.. = 0, so really this is relying on 'word_n1_ge': ptr \<le> -1 *)
lemma word_up_bound:
"(ptr :: 'a :: len word) \<le> 2 ^ LENGTH('a) - 1 "
by auto
lemma word_plus_mono_right_split:
"\<lbrakk> unat ((x :: 'a :: len word) && mask sz) + unat z < 2 ^ sz; sz < LENGTH('a) \<rbrakk>
\<Longrightarrow> x \<le> x + z"
(is "\<lbrakk> ?bound; ?len \<rbrakk> \<Longrightarrow> ?rhs \<le> ?lhs")
apply (subgoal_tac "(x && ~~ mask sz) + (x && mask sz)
\<le> (x && ~~ mask sz) + ((x && mask sz) + z)")
apply (simp add:word_plus_and_or_coroll2 field_simps)
apply (rule word_plus_mono_right)
apply (simp add:no_olen_add )
apply (rule less_le_trans)
apply (simp add:uint_nat)
apply (subst of_nat_add[symmetric])
apply (drule iffD2[OF of_nat_less_iff])
apply simp
apply (rule less_imp_le)
apply (rule less_le_trans[where y = "2^LENGTH('a)"] )
apply simp
apply simp
apply (rule word_plus_mono_right2)
apply (rule is_aligned_no_overflow')
apply (rule Aligned.is_aligned_neg_mask[OF le_refl])
apply (rule le_m1_iff_lt[THEN iffD1,THEN iffD2])
apply (simp add: p2_gt_0)
apply (rule iffD2[OF word_less_nat_alt])
apply (auto simp:unat_plus_if_size word_size not_less)
done
lemma mul_not_mask_eq_neg_shiftl:
"~~ mask n = (-1) << n"
by (simp add: NOT_mask shiftl_t2n)
lemma shiftr_mul_not_mask_eq_and_not_mask:
"(x >> n) * ~~ mask n = - (x && ~~ mask n)"
by (metis (no_types, hide_lams)
and_not_mask mul_not_mask_eq_neg_shiftl mult_minus_left semiring_normalization_rules(7)
shiftl_1 shiftl_t2n)
lemma mask_eq_n1_shiftr:
"n \<le> LENGTH('a)
\<Longrightarrow> (mask n :: 'a :: len word) = (-1) >> (LENGTH('a) - n)"
apply (subst word_bl.Rep_inject[symmetric])
apply (subst to_bl_mask, simp)
apply (subst bl_shiftr, simp add: word_size)
apply (subst to_bl_n1, simp add: word_size)
done
lemma is_aligned_mask_out_add_eq:
"is_aligned p n
\<Longrightarrow> (p + x) && ~~ mask n = p + (x && ~~ mask n)"
by (simp add: mask_out_sub_mask mask_add_aligned)
lemmas is_aligned_mask_out_add_eq_sub
= is_aligned_mask_out_add_eq[where x="a - b" for a b, simplified field_simps]
lemma aligned_bump_down:
"is_aligned x n
\<Longrightarrow> (x - 1) && ~~ mask n = x - 2 ^ n"
apply (frule is_aligned_mask_out_add_eq[where x="-1"])
apply (simp add: NOT_mask)
done
lemma base_length_minus_one_inequality:
assumes foo: "wbase \<le> 2 ^ sz - 1"
"1 \<le> (wlength :: ('a :: len) word)"
"wlength \<le> 2 ^ sz - wbase"
"sz < LENGTH ('a)"
shows "wbase \<le> wbase + wlength - 1"
proof -
note sz_less = power_strict_increasing[OF foo(4), where a=2]
from foo have plus: "unat wbase + unat wlength < 2 ^ LENGTH('a)"
apply -
apply (rule order_le_less_trans[rotated], rule sz_less, simp)
apply (simp add: unat_arith_simps split: if_split_asm)
done
from foo show ?thesis
by (simp add: unat_arith_simps plus)
qed
lemma unat_2tp_if:
"unat (2 ^ n :: ('a :: len) word) = (if n < LENGTH ('a) then 2 ^ n else 0)"
by (split if_split, simp_all add: power_overflow)
lemma mask_of_mask:
"mask (n::nat) && mask (m::nat) = mask (min m n)"
apply (rule word_eqI)
apply (auto simp:word_size)
done
lemma unat_signed_ucast_less_ucast:
"LENGTH('a) \<le> LENGTH('b)
\<Longrightarrow> unat (ucast (x :: 'a :: len word) :: 'b :: len signed word) = unat x"
by (simp add: unat_ucast_up_simp)
lemmas unat_ucast_mask = unat_ucast_eq_unat_and_mask[where w=a for a]
lemma length_upto_enum_cases:
fixes a :: word32
shows "length [a .e. b] = (if a \<le> b then Suc (unat b) - unat a else 0)"
apply (case_tac "a \<le> b")
apply (clarsimp)
apply (clarsimp simp: upto_enum_def)
apply unat_arith
done
lemmas from_bool_to_bool_and_1 = from_to_bool_last_bit[where x=r for r]
lemma t2n_mask_eq_if:
"(2 ^ n && mask m) = (if n < m then 2 ^ n else 0)"
by (rule word_eqI, auto simp add: word_size nth_w2p split: if_split)
lemma unat_ucast_le:
"unat (ucast (x :: 'a :: len word) :: 'b :: len word) \<le> unat x"
by (simp add: ucast_nat_def unat_le_helper)
lemma ucast_le_up_down_iff:
"\<lbrakk> LENGTH('a) \<le> LENGTH('b);
(x :: 'b :: len word) \<le> ucast (max_word :: 'a :: len word) \<rbrakk>
\<Longrightarrow> (ucast x \<le> (y :: 'a word)) = (x \<le> ucast y)"
using le_max_word_ucast_id ucast_le_ucast by metis
lemmas max_word_neq_0 = max_word_not_0
lemma ucast_ucast_mask_shift:
"a \<le> LENGTH('a) + b
\<Longrightarrow> ucast (ucast (p && mask a >> b) :: 'a :: len word) = p && mask a >> b"
by (metis add.commute le_mask_iff shiftr_mask_le ucast_ucast_eq_mask_shift word_and_le')
lemma unat_ucast_mask_shift:
"a \<le> LENGTH('a) + b
\<Longrightarrow> unat (ucast (p && mask a >> b) :: 'a :: len word) = unat (p && mask a >> b)"
by (metis linear ucast_ucast_mask_shift unat_ucast_up_simp)
lemma mask_overlap_zero:
"a \<le> b \<Longrightarrow> (p && mask a) && ~~ mask b = 0"
by (metis NOT_mask_AND_mask mask_lower_twice2 max_def)
lemma mask_shifl_overlap_zero:
"a + c \<le> b \<Longrightarrow> (p && mask a << c) && ~~ mask b = 0"
by (metis and_not_mask le_mask_iff mask_shiftl_decompose shiftl_0 shiftl_over_and_dist
shiftr_mask_le word_and_le' word_bool_alg.conj_commute)
lemma mask_overlap_zero':
"a \<ge> b \<Longrightarrow> (p && ~~ mask a) && mask b = 0"
using mask_AND_NOT_mask mask_AND_less_0 by blast
lemma mask_rshift_mult_eq_rshift_lshift:
"((a :: 'a :: len word) >> b) * (1 << c) = (a >> b << c)"
by (simp add: shiftl_t2n)
lemma shift_alignment:
"a \<ge> b \<Longrightarrow> is_aligned (p >> a << a) b"
using is_aligned_shift is_aligned_weaken by blast
lemma mask_split_sum_twice:
"a \<ge> b \<Longrightarrow> (p && ~~ mask a) + ((p && mask a) && ~~ mask b) + (p && mask b) = p"
by (simp add: add.commute multiple_mask_trivia word_bool_alg.conj_commute
word_bool_alg.conj_left_commute word_plus_and_or_coroll2)
lemma mask_shift_eq_mask_mask:
"(p && mask a >> b << b) = (p && mask a) && ~~ mask b"
by (simp add: and_not_mask)
lemma mask_shift_sum:
"\<lbrakk> a \<ge> b; unat n = unat (p && mask b) \<rbrakk>
\<Longrightarrow> (p && ~~ mask a) + (p && mask a >> b) * (1 << b) + n = (p :: 'a :: len word)"
by (metis and_not_mask mask_rshift_mult_eq_rshift_lshift mask_split_sum_twice word_unat.Rep_eqD)
lemmas word_le_p2m1 = word_up_bound[where ptr=w for w]
lemma inj_ucast:
"\<lbrakk> uc = ucast; is_up uc \<rbrakk>
\<Longrightarrow> inj uc"
using down_ucast_inj is_up_down by blast
lemma ucast_eq_0[OF refl]:
"\<lbrakk> c = ucast; is_up c \<rbrakk>
\<Longrightarrow> (c x = 0) = (x = 0)"
by (metis uint_0_iff uint_up_ucast)
lemma is_up_compose':
fixes uc :: "'a :: len word \<Rightarrow> 'b :: len word"
and uc' :: "'b word \<Rightarrow> 'c :: len sword"
shows "\<lbrakk> is_up uc; is_up uc' \<rbrakk>
\<Longrightarrow> is_up (uc' \<circ> uc)"
unfolding is_up_def by (simp add: Word.target_size Word.source_size)
lemma is_up_compose:
"\<lbrakk> is_up uc; is_up uc' \<rbrakk>
\<Longrightarrow> is_up (uc' \<circ> uc)"
unfolding is_up_def by (simp add: Word.target_size Word.source_size)
lemma uint_is_up_compose:
fixes uc :: "'a :: len word \<Rightarrow> 'b :: len word"
and uc' :: "'b word \<Rightarrow> 'c :: len sword"
assumes "uc = ucast"
and "uc' = ucast"
and " uuc = uc' \<circ> uc"
shows "\<lbrakk> is_up uc; is_up uc' \<rbrakk>
\<Longrightarrow> uint (uuc b) = uint b"
apply (simp add: assms)
apply (frule is_up_compose)
apply (simp_all )
apply (simp only: Word.uint_up_ucast)
done
lemma uint_is_up_compose_pred:
fixes uc :: "'a :: len word \<Rightarrow> 'b :: len word"
and uc' :: "'b word \<Rightarrow> 'c :: len sword"
assumes "uc = ucast" and "uc' = ucast" and " uuc = uc' \<circ> uc"
shows "\<lbrakk> is_up uc; is_up uc' \<rbrakk>
\<Longrightarrow> P (uint (uuc b)) \<longleftrightarrow> P( uint b)"
apply (simp add: assms)
apply (frule is_up_compose)
apply (simp_all )
apply (simp only: Word.uint_up_ucast)
done
lemma is_down_up_sword:
fixes uc :: "'a :: len word \<Rightarrow> 'b :: len sword"
shows "\<lbrakk> uc = ucast; LENGTH('a) < LENGTH('b) \<rbrakk>
\<Longrightarrow> is_up uc = (\<not> is_down uc)"
by (simp add: target_size source_size is_up_def is_down_def )
lemma is_not_down_compose:
fixes uc :: "'a :: len word \<Rightarrow> 'b :: len word"
and uc' :: "'b word \<Rightarrow> 'c :: len sword"
shows "\<lbrakk> uc = ucast; uc' = ucast; LENGTH('a) < LENGTH('c) \<rbrakk>
\<Longrightarrow> \<not> is_down (uc' \<circ> uc)"
unfolding is_down_def
by (simp add: Word.target_size Word.source_size)
lemma sint_ucast_uint:
fixes uc :: "'a :: len word \<Rightarrow> 'b :: len word"
and uc' :: "'b word \<Rightarrow> 'c :: len sword"
assumes "uc = ucast" and " uc' = ucast" and "uuc=uc' \<circ> uc "
and "LENGTH('a) < LENGTH('c signed)"
shows "\<lbrakk> is_up uc; is_up uc'\<rbrakk>
\<Longrightarrow> sint (uuc b) = uint b"
apply (simp add: assms)
apply (frule is_up_compose')
apply simp_all
apply (simp add: ucast_ucast_b)
apply (rule sint_ucast_eq_uint)
apply (insert assms)
apply (simp add: is_down_def target_size source_size)
done
lemma sint_ucast_uint_pred:
fixes uc :: "'a :: len word \<Rightarrow> 'b :: len word"
and uc' :: "'b word \<Rightarrow> 'c :: len sword"
and uuc :: "'a word \<Rightarrow> 'c sword"
assumes "uc = ucast" and " uc' = ucast" and "uuc=uc' \<circ> uc "
and "LENGTH('a) < LENGTH('c )"
shows "\<lbrakk> is_up uc; is_up uc' \<rbrakk>
\<Longrightarrow> P (uint b) \<longleftrightarrow> P (sint (uuc b))"
apply (simp add: assms )
apply (insert sint_ucast_uint[where uc=uc and uc'=uc' and uuc=uuc and b = b])
apply (simp add: assms)
done
lemma sint_uucast_uint_uucast_pred:
fixes uc :: "'a :: len word \<Rightarrow> 'b :: len word"
and uc' :: "'b word \<Rightarrow> 'c :: len sword"
assumes "uc = ucast" and " uc' = ucast" and "uuc=uc' \<circ> uc "
and "LENGTH('a) < LENGTH('c )"
shows "\<lbrakk> is_up uc; is_up uc' \<rbrakk>
\<Longrightarrow> P (uint(uuc b)) \<longleftrightarrow> P (sint (uuc b))"
apply (simp add: assms )
apply (insert sint_ucast_uint[where uc=uc and uc'=uc' and uuc=uuc and b = b])
apply (insert uint_is_up_compose_pred[where uc=uc and uc'=uc' and uuc=uuc and b=b])
apply (simp add: assms uint_is_up_compose_pred)
done
lemma unat_minus':
fixes x :: "'a :: len word"
shows "x \<noteq> 0 \<Longrightarrow> unat (-x) = 2 ^ LENGTH('a) - unat x"
apply (simp add: unat_def word_minus_def)
apply (simp add: int_word_uint zmod_zminus1_eq_if uint_0_iff)
apply (subst nat_diff_distrib)
apply simp
apply (rule order_less_imp_le [OF uint_lt2p])
apply (clarsimp simp: nat_power_eq)
done
lemma word_nth_neq:
"n < LENGTH('a) \<Longrightarrow> (~~ x :: 'a :: len word) !! n = (\<not> x !! n)"
by (simp add: word_size word_ops_nth_size)
lemma word_wrap_of_natD:
fixes x :: "'a :: len word"
assumes wraps: "\<not> x \<le> x + of_nat n"
shows "\<exists>k. x + of_nat k = 0 \<and> k \<le> n"
proof -
show ?thesis
proof (rule exI [where x = "unat (- x)"], intro conjI)
show "x + of_nat (unat (-x)) = 0"
by simp
next
show "unat (-x) \<le> n"
proof (subst unat_minus')
from wraps show "x \<noteq> 0"
by (rule contrapos_pn, simp add: not_le)
next
show "2 ^ LENGTH('a) - unat x \<le> n" using wraps
apply (simp add: no_olen_add_nat le_diff_conv not_less)
apply (erule order_trans)
apply (simp add: unat_of_nat)
done
qed
qed
qed
lemma of_int_sint_scast:
"of_int (sint (x :: 'a :: len word)) = (scast x :: 'b :: len word)"
by (metis scast_def word_of_int)
lemma scast_of_nat_to_signed [simp]:
"scast (of_nat x :: 'a :: len word) = (of_nat x :: 'a signed word)"
by (metis cast_simps(23) scast_scast_id(2))
lemma scast_of_nat_signed_to_unsigned_add:
"(scast ((of_nat x) + (of_nat y) :: 'a :: len signed word))
= ((of_nat x) + (of_nat y) :: 'a :: len word)"
by (metis of_nat_add scast_of_nat)
lemma scast_of_nat_unsigned_to_signed_add:
"(scast ((of_nat x) + (of_nat y) :: 'a :: len word))
= ((of_nat x) + (of_nat y) :: 'a :: len signed word)"
by (metis Abs_fnat_hom_add scast_of_nat_to_signed)
lemma and_mask_cases:
fixes x :: "'a :: len word"
assumes len: "n < LENGTH('a)"
shows "x && mask n \<in> of_nat ` set [0 ..< 2 ^ n]"
proof -
have "x && mask n \<in> {0 .. 2 ^ n - 1}"
by (simp add: mask_def word_and_le1)
also
have "... = of_nat ` {0 .. 2 ^ n - 1}"
apply (rule set_eqI, rule iffI)
apply (clarsimp simp: image_iff)
apply (rule_tac x="unat x" in bexI; simp)
using len
apply (simp add: word_le_nat_alt unat_2tp_if unat_minus_one)
using len
apply (clarsimp simp: word_le_nat_alt unat_2tp_if unat_minus_one)
apply (subst unat_of_nat_eq; simp add: nat_le_Suc_less)
apply (erule less_le_trans)
apply simp
done
also have "{0::nat .. 2^n - 1} = set [0 ..< 2^n]" by (auto simp: nat_le_Suc_less)
finally show ?thesis .
qed
lemma two_bits_cases:
"\<lbrakk> LENGTH('a) > 2; (x :: 'a :: len word) && 3 = 0 \<Longrightarrow> P; x && 3 = 1 \<Longrightarrow> P;
x && 3 = 2 \<Longrightarrow> P; x && 3 = 3 \<Longrightarrow> P \<rbrakk>
\<Longrightarrow> P"
apply (frule and_mask_cases[where n=2 and x=x, simplified mask_def])
using upt_conv_Cons by auto[1]
lemma sint_of_nat_ge_zero:
"x < 2 ^ (LENGTH('a) - 1) \<Longrightarrow> sint (of_nat x :: 'a :: len word) \<ge> 0"
by (simp add: Word_Lemmas.of_nat_power not_msb_from_less sint_eq_uint)
lemma sint_of_nat_le:
"\<lbrakk> b < 2 ^ (LENGTH('a) - 1); a \<le> b \<rbrakk>
\<Longrightarrow> sint (of_nat a :: 'a :: len word) \<le> sint (of_nat b :: 'a :: len word)"
apply (subst sint_eq_uint) defer
apply (subst sint_eq_uint) defer
apply (meson le_less_trans nat_power_minus_less of_nat_mono_maybe_le word_le_def)
apply (simp add: Word_Lemmas.of_nat_power not_msb_from_less)+
done
lemma int_eq_sint:
"\<lbrakk> x < 2 ^ (LENGTH('a) - 1) \<rbrakk>
\<Longrightarrow> sint (of_nat x :: 'a :: len word) = int x"
apply (subst sint_eq_uint)
apply (simp add: Word_Lemmas.of_nat_power not_msb_from_less)
by (metis int_unat nat_power_minus_less unat_of_nat_eq)
lemma sint_ctz:
"LENGTH('a) > 2
\<Longrightarrow> 0 \<le> sint (of_nat (word_ctz (x :: 'a :: len word)) :: 'a signed word)
\<and> sint (of_nat (word_ctz x) :: 'a signed word) \<le> LENGTH('a)"
apply (subgoal_tac "LENGTH('a) < 2 ^ (LENGTH('a) - 1)")
apply (rule conjI)
apply (metis len_signed order_le_less_trans sint_of_nat_ge_zero word_ctz_le)
apply (metis int_eq_sint len_signed sint_of_nat_le word_ctz_le)
by (rule small_powers_of_2, simp)
lemma pow_sub_less:
"\<lbrakk> a + b \<le> LENGTH('a); unat (x :: 'a :: len word) = 2 ^ a \<rbrakk>
\<Longrightarrow> unat (x * 2 ^ b - 1) < 2 ^ (a + b)"
by (metis (mono_tags, lifting)
eq_or_less_helperD not_less of_nat_numeral power_add semiring_1_class.of_nat_power
unat_pow_le_intro word_unat.Rep_inverse)
lemma sle_le_2pl:
"\<lbrakk> (b :: 'a :: len word) < 2 ^ (LENGTH('a) - 1); a \<le> b \<rbrakk>
\<Longrightarrow> word_sle a b"
by (simp add: not_msb_from_less word_sle_msb_le)
lemma sless_less_2pl:
"\<lbrakk> (b :: 'a :: len word) < 2 ^ (LENGTH('a) - 1); a < b \<rbrakk>
\<Longrightarrow> word_sless a b"
using not_msb_from_less word_sless_msb_less by blast
lemma mask_1_eq_1:
"mask 1 = 1"
unfolding mask_def by simp
lemma and_mask2: "w << n >> n = w && mask (size w - n)"
apply (case_tac "n \<le> size w")
apply (clarsimp simp: word_and_le2 and_mask shiftl_zero_size)+
done
lemma zero_OR_eq:
"y = 0 \<Longrightarrow> (x || y) = x"
by simp
lemma unat_of_nat_word_log2:
"LENGTH('a) < 2 ^ LENGTH('b)
\<Longrightarrow> unat (of_nat (word_log2 (n :: 'a :: len word)) :: 'b :: len word) = word_log2 n"
apply (subst unat_of_nat_eq)
apply (rule word_log2_max[THEN less_trans])
apply (simp add: word_size)
apply simp
done
lemma aligned_sub_aligned_simple:
"\<lbrakk> is_aligned a n; is_aligned b n \<rbrakk>
\<Longrightarrow> is_aligned (a - b) n"
by (simp add: Aligned.aligned_sub_aligned)
declare is_aligned_neg_mask_eq[simp]
declare is_aligned_neg_mask_weaken[simp]
lemma minus_one_shift:
"- (1 << n) = (-1 << n :: 'a::len word)"
by (simp add: mul_not_mask_eq_neg_shiftl[symmetric] mask_def NOT_eq)
lemma ucast_eq_mask:
"(UCAST('a::len \<rightarrow> 'b::len) x = UCAST('a \<rightarrow> 'b) y) =
(x && mask LENGTH('b) = y && mask LENGTH('b))"
apply (cases "LENGTH('b) < LENGTH('a)")
apply (auto simp: nth_ucast word_size intro!: word_eqI dest: word_eqD)[1]
apply (auto simp: shiftr_eq_0 and_mask_eq_iff_shiftr_0[THEN iffD2] dest: ucast_up_inj)
done
context
fixes w :: "'a::len word"
begin
private lemma sbintrunc_uint_ucast:
assumes "Suc n = len_of TYPE('b::len)"
shows "sbintrunc n (uint (ucast w :: 'b word)) = sbintrunc n (uint w)"
by (metis assms sbintrunc_bintrunc ucast_def word_ubin.eq_norm)
private lemma test_bit_sbintrunc:
assumes "i < len_of TYPE('a)"
shows "(word_of_int (sbintrunc n (uint w)) :: 'a word) !! i
= (if n < i then w !! n else w !! i)"
using assms by (simp add: nth_sbintr)
(simp add: test_bit_bin)
private lemma test_bit_sbintrunc_ucast:
assumes len_a: "i < len_of TYPE('a)"
shows "(word_of_int (sbintrunc (len_of TYPE('b) - 1) (uint (ucast w :: 'b word))) :: 'a word) !! i
= (if len_of TYPE('b::len) \<le> i then w !! (len_of TYPE('b) - 1) else w !! i)"
apply (subst sbintrunc_uint_ucast)
apply simp
apply (subst test_bit_sbintrunc)
apply (rule len_a)
apply (rule if_cong[OF _ refl refl])
using leD less_linear by fastforce
lemma scast_ucast_high_bits:
shows "scast (ucast w :: 'b::len word) = w
\<longleftrightarrow> (\<forall> i \<in> {len_of TYPE('b) ..< size w}. w !! i = w !! (len_of TYPE('b) - 1))"
unfolding scast_def sint_uint word_size
apply (subst word_eq_iff)
apply (rule iffI)
apply (rule ballI)
apply (drule_tac x=i in spec)
apply (subst (asm) test_bit_sbintrunc_ucast; simp)
apply (rule allI)
apply (case_tac "n < len_of TYPE('a)")
apply (subst test_bit_sbintrunc_ucast)
apply simp
apply (case_tac "n \<ge> len_of TYPE('b)")
apply (drule_tac x=n in bspec)
by auto
lemma scast_ucast_mask_compare:
shows "scast (ucast w :: 'b::len word) = w
\<longleftrightarrow> (w \<le> mask (len_of TYPE('b) - 1) \<or> ~~ mask (len_of TYPE('b) - 1) \<le> w)"
apply (clarsimp simp: le_mask_high_bits neg_mask_le_high_bits scast_ucast_high_bits word_size)
apply (rule iffI; clarsimp)
apply (rename_tac i j; case_tac "i = len_of TYPE('b) - 1"; case_tac "j = len_of TYPE('b) - 1")
by auto
end
end
|
# GraphHopper Directions API
#
# You use the GraphHopper Directions API to add route planning, navigation and route optimization to your software. E.g. the Routing API has turn instructions and elevation data and the Route Optimization API solves your logistic problems and supports various constraints like time window and capacity restrictions. Also it is possible to get all distances between all locations with our fast Matrix API.
#
# OpenAPI spec version: 1.0.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' Location Class
#'
#' @field lon
#' @field lat
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
Location <- R6::R6Class(
'Location',
public = list(
`lon` = NULL,
`lat` = NULL,
initialize = function(`lon`, `lat`){
if (!missing(`lon`)) {
stopifnot(is.numeric(`lon`), length(`lon`) == 1)
self$`lon` <- `lon`
}
if (!missing(`lat`)) {
stopifnot(is.numeric(`lat`), length(`lat`) == 1)
self$`lat` <- `lat`
}
},
toJSON = function() {
LocationObject <- list()
if (!is.null(self$`lon`)) {
LocationObject[['lon']] <- self$`lon`
}
if (!is.null(self$`lat`)) {
LocationObject[['lat']] <- self$`lat`
}
LocationObject
},
fromJSON = function(LocationJson) {
LocationObject <- jsonlite::fromJSON(LocationJson)
if (!is.null(LocationObject$`lon`)) {
self$`lon` <- LocationObject$`lon`
}
if (!is.null(LocationObject$`lat`)) {
self$`lat` <- LocationObject$`lat`
}
},
toJSONString = function() {
sprintf(
'{
"lon": %d,
"lat": %d
}',
self$`lon`,
self$`lat`
)
},
fromJSONString = function(LocationJson) {
LocationObject <- jsonlite::fromJSON(LocationJson)
self$`lon` <- LocationObject$`lon`
self$`lat` <- LocationObject$`lat`
}
)
)
|
getOS = function(){
sysinf = Sys.info()
if (!is.null(sysinf)){
os = sysinf['sysname']
if (os == 'Darwin')
os = "osx"
} else { ## mystery machine
os = .Platform$OS.type
if (grepl("^darwin", R.version$os))
os = "osx"
if (grepl("linux-gnu", R.version$os))
os = "linux"
}
tolower(os)
}
if(getOS()=="linux")
dyn.load("chartr-ModelSpec.so") |
Feather filled and attached to an intricately laser cut hand crafted tail. Our balloons are delicately assembled and anchored by a globe.
They can be personalised and enjoy good float times.
Our blue butterfly design are perfect for the arrival of new babies. Equally effective in showrooms or exhibitions.
Delivery Feather filled and attached to an intricately laser cut hand crafted tail. Our balloons are delicately assembled and anchored by a globe. |
{-# OPTIONS --cubical --safe #-}
module Cubical.HITs.Nullification.Base where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Function
open import Cubical.Foundations.PathSplitEquiv
open isPathSplitEquiv
isNull : ∀ {ℓ ℓ'} (S : Type ℓ) (A : Type ℓ') → Type (ℓ-max ℓ ℓ')
isNull S A = isPathSplitEquiv (const {A = A} {B = S})
data Null {ℓ ℓ'} (S : Type ℓ) (A : Type ℓ') : Type (ℓ-max ℓ ℓ') where
∣_∣ : A → Null S A
-- the image of every map (S → Null S A) is contractible in Null S A
hub : (f : S → Null S A) → Null S A
spoke : (f : S → Null S A) (s : S) → hub f ≡ f s
-- the image of every map (S → x ≡ y) for x y : A is contractible in x ≡ y
≡hub : ∀ {x y} (p : S → x ≡ y) → x ≡ y
≡spoke : ∀ {x y} (p : S → x ≡ y) (s : S) → ≡hub p ≡ p s
isNull-Null : ∀ {ℓ ℓ'} {S : Type ℓ} {A : Type ℓ'} → isNull S (Null S A)
fst (sec isNull-Null) f = hub f
snd (sec isNull-Null) f i s = spoke f s i
fst (secCong isNull-Null x y) p i = ≡hub (funExt⁻ p) i
snd (secCong isNull-Null x y) p i j s = ≡spoke (funExt⁻ p) s i j
|
If $A$ is a measurable set and $B$ is a subset of $A$, then the measure of the difference $A - B$ is equal to the measure of $A$ minus the measure of $B$. |
>Is not a bear. Is not a pear.
Has not a care shes not a bear.
She has a flair for purple hair.
Shes kind of round, each extra pound
With a penchant to wriggle
when shes struck with a giggle.
Shes not rare, au contraire.
Just...
not a bear.<
|
lemma higher_deriv_diff: fixes z::complex assumes "f holomorphic_on S" "g holomorphic_on S" "open S" "z \<in> S" shows "(deriv ^^ n) (\<lambda>w. f w - g w) z = (deriv ^^ n) f z - (deriv ^^ n) g z" |
Students in Free Enterprise. We are the go to student organizations student organization on campus. Our team is project oriented and our unique membership structure gives us the potential to grow our organization exponentially. All of our projects are constructed with the economic, social, and environmental impact in mind. We believe that our model allows for UC Davis students to ultimately have a more optimal positive impact on the local community. By being project oriented and managing projects virtually, we are able to leverage our resources to the fullest. Our UC Davis SIFE team is part of a 48,000 student network–the largest collegiate business club in the world. We are one of 1500 teams that represent SIFE headquarters. For more information about SIFE headquarters visit SIFE.org .
This year we have focused our efforts on three areas:
1) Teaching financial literacy to youth,
2) Contribution towards the development of our community,
3) Creating a strong business network on campus through events and activities.
Enjoy visiting our home and project management pages. We encourage all students to contact us if they have any projects that they would like to lead or partner on. Also please contact us if you’re an outside organization and are interested in working with our team.
20112012 Sponsors:
Liberty Mutual
|
# Simple symbolic maniplation in Python
All is done using [SymPy](https://docs.sympy.org/latest/index.html)
```python
from sympy import *
```
Define some common symbols to be used as variables, integers, or functions for symbolic purposes
```python
x, y, z, t = symbols('x y z t')
k, m, n = symbols('k m n', integer=True)
f, g, h = symbols('f g h', cls=Function)
```
Integration
```python
integrate(cos(x), x)
```
$\displaystyle \sin{\left(x \right)}$
Differentiation
```python
diff(atan(x),x)
```
$\displaystyle \frac{1}{x^{2} + 1}$
```python
```
|
function varargout = quad2d(varargin)
%QUAD2D Complete definite integral of SPHEREFUN.
% I = QUAD2D( F ), returns the definite integral of a SPHEREFUN integrated
% over its domain of definition.
%
% I = QUAD2D(F, a, b, c, d), returns the definite integral of a SPHEREFUN.
% Integrated over the domangle [a b] x [c d].
%
% See also SPHEREFUN/INTEGRAL2, SPHEREFUN/SUM2, SPHEREFUN/INTEGRAL.
% Copyright 2017 by The University of Oxford and The Chebfun Developers.
% See http://www.chebfun.org/ for Chebfun information.
[varargout{1:nargout}] = quad2d@separableApprox(varargin{:});
end
|
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
⊢ withDensity ν (rnDeriv μ ν) = μ
[PROOFSTEP]
obtain ⟨_, ⟨E, hE₁, hE₂, hE₃⟩, hadd⟩ := haveLebesgueDecomposition_spec μ ν
[GOAL]
case intro.intro.intro.intro.intro
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
left✝ : Measurable (rnDeriv μ ν)
hadd : μ = singularPart μ ν + withDensity ν (rnDeriv μ ν)
E : Set α
hE₁ : MeasurableSet E
hE₂ : ↑↑(singularPart μ ν) E = 0
hE₃ : ↑↑ν Eᶜ = 0
⊢ withDensity ν (rnDeriv μ ν) = μ
[PROOFSTEP]
have : singularPart μ ν = 0 :=
by
refine' le_antisymm (fun A (_ : MeasurableSet A) => _) (Measure.zero_le _)
suffices singularPart μ ν Set.univ = 0
by
rw [Measure.coe_zero, Pi.zero_apply, ← this]
exact measure_mono (Set.subset_univ _)
rw [← measure_add_measure_compl hE₁, hE₂, zero_add]
have : (singularPart μ ν + ν.withDensity (rnDeriv μ ν)) Eᶜ = μ Eᶜ := by rw [← hadd]
rw [Measure.coe_add, Pi.add_apply, h hE₃] at this
exact (add_eq_zero_iff.1 this).1
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
left✝ : Measurable (rnDeriv μ ν)
hadd : μ = singularPart μ ν + withDensity ν (rnDeriv μ ν)
E : Set α
hE₁ : MeasurableSet E
hE₂ : ↑↑(singularPart μ ν) E = 0
hE₃ : ↑↑ν Eᶜ = 0
⊢ singularPart μ ν = 0
[PROOFSTEP]
refine' le_antisymm (fun A (_ : MeasurableSet A) => _) (Measure.zero_le _)
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
left✝ : Measurable (rnDeriv μ ν)
hadd : μ = singularPart μ ν + withDensity ν (rnDeriv μ ν)
E : Set α
hE₁ : MeasurableSet E
hE₂ : ↑↑(singularPart μ ν) E = 0
hE₃ : ↑↑ν Eᶜ = 0
A : Set α
x✝ : MeasurableSet A
⊢ ↑↑(singularPart μ ν) A ≤ ↑↑0 A
[PROOFSTEP]
suffices singularPart μ ν Set.univ = 0
by
rw [Measure.coe_zero, Pi.zero_apply, ← this]
exact measure_mono (Set.subset_univ _)
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
left✝ : Measurable (rnDeriv μ ν)
hadd : μ = singularPart μ ν + withDensity ν (rnDeriv μ ν)
E : Set α
hE₁ : MeasurableSet E
hE₂ : ↑↑(singularPart μ ν) E = 0
hE₃ : ↑↑ν Eᶜ = 0
A : Set α
x✝ : MeasurableSet A
this : ↑↑(singularPart μ ν) Set.univ = 0
⊢ ↑↑(singularPart μ ν) A ≤ ↑↑0 A
[PROOFSTEP]
rw [Measure.coe_zero, Pi.zero_apply, ← this]
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
left✝ : Measurable (rnDeriv μ ν)
hadd : μ = singularPart μ ν + withDensity ν (rnDeriv μ ν)
E : Set α
hE₁ : MeasurableSet E
hE₂ : ↑↑(singularPart μ ν) E = 0
hE₃ : ↑↑ν Eᶜ = 0
A : Set α
x✝ : MeasurableSet A
this : ↑↑(singularPart μ ν) Set.univ = 0
⊢ ↑↑(singularPart μ ν) A ≤ ↑↑(singularPart μ ν) Set.univ
[PROOFSTEP]
exact measure_mono (Set.subset_univ _)
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
left✝ : Measurable (rnDeriv μ ν)
hadd : μ = singularPart μ ν + withDensity ν (rnDeriv μ ν)
E : Set α
hE₁ : MeasurableSet E
hE₂ : ↑↑(singularPart μ ν) E = 0
hE₃ : ↑↑ν Eᶜ = 0
A : Set α
x✝ : MeasurableSet A
⊢ ↑↑(singularPart μ ν) Set.univ = 0
[PROOFSTEP]
rw [← measure_add_measure_compl hE₁, hE₂, zero_add]
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
left✝ : Measurable (rnDeriv μ ν)
hadd : μ = singularPart μ ν + withDensity ν (rnDeriv μ ν)
E : Set α
hE₁ : MeasurableSet E
hE₂ : ↑↑(singularPart μ ν) E = 0
hE₃ : ↑↑ν Eᶜ = 0
A : Set α
x✝ : MeasurableSet A
⊢ ↑↑(singularPart μ ν) Eᶜ = 0
[PROOFSTEP]
have : (singularPart μ ν + ν.withDensity (rnDeriv μ ν)) Eᶜ = μ Eᶜ := by rw [← hadd]
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
left✝ : Measurable (rnDeriv μ ν)
hadd : μ = singularPart μ ν + withDensity ν (rnDeriv μ ν)
E : Set α
hE₁ : MeasurableSet E
hE₂ : ↑↑(singularPart μ ν) E = 0
hE₃ : ↑↑ν Eᶜ = 0
A : Set α
x✝ : MeasurableSet A
⊢ ↑↑(singularPart μ ν + withDensity ν (rnDeriv μ ν)) Eᶜ = ↑↑μ Eᶜ
[PROOFSTEP]
rw [← hadd]
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
left✝ : Measurable (rnDeriv μ ν)
hadd : μ = singularPart μ ν + withDensity ν (rnDeriv μ ν)
E : Set α
hE₁ : MeasurableSet E
hE₂ : ↑↑(singularPart μ ν) E = 0
hE₃ : ↑↑ν Eᶜ = 0
A : Set α
x✝ : MeasurableSet A
this : ↑↑(singularPart μ ν + withDensity ν (rnDeriv μ ν)) Eᶜ = ↑↑μ Eᶜ
⊢ ↑↑(singularPart μ ν) Eᶜ = 0
[PROOFSTEP]
rw [Measure.coe_add, Pi.add_apply, h hE₃] at this
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
left✝ : Measurable (rnDeriv μ ν)
hadd : μ = singularPart μ ν + withDensity ν (rnDeriv μ ν)
E : Set α
hE₁ : MeasurableSet E
hE₂ : ↑↑(singularPart μ ν) E = 0
hE₃ : ↑↑ν Eᶜ = 0
A : Set α
x✝ : MeasurableSet A
this : ↑↑(singularPart μ ν) Eᶜ + ↑↑(withDensity ν (rnDeriv μ ν)) Eᶜ = 0
⊢ ↑↑(singularPart μ ν) Eᶜ = 0
[PROOFSTEP]
exact (add_eq_zero_iff.1 this).1
[GOAL]
case intro.intro.intro.intro.intro
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
left✝ : Measurable (rnDeriv μ ν)
hadd : μ = singularPart μ ν + withDensity ν (rnDeriv μ ν)
E : Set α
hE₁ : MeasurableSet E
hE₂ : ↑↑(singularPart μ ν) E = 0
hE₃ : ↑↑ν Eᶜ = 0
this : singularPart μ ν = 0
⊢ withDensity ν (rnDeriv μ ν) = μ
[PROOFSTEP]
rw [this, zero_add] at hadd
[GOAL]
case intro.intro.intro.intro.intro
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
left✝ : Measurable (rnDeriv μ ν)
hadd : μ = withDensity ν (rnDeriv μ ν)
E : Set α
hE₁ : MeasurableSet E
hE₂ : ↑↑(singularPart μ ν) E = 0
hE₃ : ↑↑ν Eᶜ = 0
this : singularPart μ ν = 0
⊢ withDensity ν (rnDeriv μ ν) = μ
[PROOFSTEP]
exact hadd.symm
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝¹ : IsFiniteMeasure μ
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
i : Set α
hi : MeasurableSet i
⊢ ∫ (x : α) in i, ENNReal.toReal (rnDeriv μ ν x) ∂ν = ENNReal.toReal (↑↑μ i)
[PROOFSTEP]
rw [integral_toReal, ← withDensity_apply _ hi, withDensity_rnDeriv_eq μ ν h]
[GOAL]
case hfm
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝¹ : IsFiniteMeasure μ
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
i : Set α
hi : MeasurableSet i
⊢ AEMeasurable fun x => rnDeriv μ ν x
[PROOFSTEP]
measurability
[GOAL]
case hf
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝¹ : IsFiniteMeasure μ
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
i : Set α
hi : MeasurableSet i
⊢ ∀ᵐ (x : α) ∂restrict ν i, rnDeriv μ ν x < ⊤
[PROOFSTEP]
refine' ae_lt_top (μ.measurable_rnDeriv ν) (lt_of_le_of_lt (lintegral_mono_set i.subset_univ) _).ne
[GOAL]
case hf
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝¹ : IsFiniteMeasure μ
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
i : Set α
hi : MeasurableSet i
⊢ ∫⁻ (x : α) in Set.univ, rnDeriv μ ν x ∂ν < ⊤
[PROOFSTEP]
rw [← withDensity_apply _ MeasurableSet.univ, withDensity_rnDeriv_eq μ ν h]
[GOAL]
case hf
α : Type u_1
β : Type u_2
m : MeasurableSpace α
μ ν : Measure α
inst✝¹ : IsFiniteMeasure μ
inst✝ : HaveLebesgueDecomposition μ ν
h : μ ≪ ν
i : Set α
hi : MeasurableSet i
⊢ ↑↑μ Set.univ < ⊤
[PROOFSTEP]
exact measure_lt_top _ _
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : s ≪ᵥ toENNRealVectorMeasure μ
⊢ withDensityᵥ μ (rnDeriv s μ) = s
[PROOFSTEP]
rw [absolutelyContinuous_ennreal_iff, (_ : μ.toENNRealVectorMeasure.ennrealToMeasure = μ),
totalVariation_absolutelyContinuous_iff] at h
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
⊢ withDensityᵥ μ (rnDeriv s μ) = s
[PROOFSTEP]
ext1 i hi
[GOAL]
case h
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ ↑(withDensityᵥ μ (rnDeriv s μ)) i = ↑s i
[PROOFSTEP]
rw [withDensityᵥ_apply (integrable_rnDeriv _ _) hi, rnDeriv, integral_sub, withDensity_rnDeriv_toReal_eq h.1 hi,
withDensity_rnDeriv_toReal_eq h.2 hi]
[GOAL]
case h
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ ENNReal.toReal (↑↑(toJordanDecomposition s).posPart i) - ENNReal.toReal (↑↑(toJordanDecomposition s).negPart i) = ↑s i
[PROOFSTEP]
conv_rhs => rw [← s.toSignedMeasure_toJordanDecomposition]
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
| ↑s i
[PROOFSTEP]
rw [← s.toSignedMeasure_toJordanDecomposition]
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
| ↑s i
[PROOFSTEP]
rw [← s.toSignedMeasure_toJordanDecomposition]
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
| ↑s i
[PROOFSTEP]
rw [← s.toSignedMeasure_toJordanDecomposition]
[GOAL]
case h
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ ENNReal.toReal (↑↑(toJordanDecomposition s).posPart i) - ENNReal.toReal (↑↑(toJordanDecomposition s).negPart i) =
↑(JordanDecomposition.toSignedMeasure (toJordanDecomposition s)) i
[PROOFSTEP]
erw [VectorMeasure.sub_apply]
[GOAL]
case h
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ ENNReal.toReal (↑↑(toJordanDecomposition s).posPart i) - ENNReal.toReal (↑↑(toJordanDecomposition s).negPart i) =
↑(toSignedMeasure (toJordanDecomposition s).posPart) i - ↑(toSignedMeasure (toJordanDecomposition s).negPart) i
[PROOFSTEP]
rw [toSignedMeasure_apply_measurable hi, toSignedMeasure_apply_measurable hi]
[GOAL]
case h.hf
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ Integrable fun x => ENNReal.toReal (Measure.rnDeriv (toJordanDecomposition s).posPart μ x)
case h.hg
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ Integrable fun x => ENNReal.toReal (Measure.rnDeriv (toJordanDecomposition s).negPart μ x)
[PROOFSTEP]
all_goals
rw [← integrableOn_univ]
refine' IntegrableOn.restrict _ MeasurableSet.univ
refine' ⟨_, hasFiniteIntegral_toReal_of_lintegral_ne_top _⟩
· apply Measurable.aestronglyMeasurable
measurability
· rw [set_lintegral_univ]
exact (lintegral_rnDeriv_lt_top _ _).ne
[GOAL]
case h.hf
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ Integrable fun x => ENNReal.toReal (Measure.rnDeriv (toJordanDecomposition s).posPart μ x)
[PROOFSTEP]
rw [← integrableOn_univ]
[GOAL]
case h.hf
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ IntegrableOn (fun x => ENNReal.toReal (Measure.rnDeriv (toJordanDecomposition s).posPart μ x)) Set.univ
[PROOFSTEP]
refine' IntegrableOn.restrict _ MeasurableSet.univ
[GOAL]
case h.hf
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ IntegrableOn (fun x => ENNReal.toReal (Measure.rnDeriv (toJordanDecomposition s).posPart μ x)) Set.univ
[PROOFSTEP]
refine' ⟨_, hasFiniteIntegral_toReal_of_lintegral_ne_top _⟩
[GOAL]
case h.hf.refine'_1
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ AEStronglyMeasurable (fun x => ENNReal.toReal (Measure.rnDeriv (toJordanDecomposition s).posPart μ x))
(Measure.restrict μ Set.univ)
[PROOFSTEP]
apply Measurable.aestronglyMeasurable
[GOAL]
case h.hf.refine'_1.hf
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ Measurable fun x => ENNReal.toReal (Measure.rnDeriv (toJordanDecomposition s).posPart μ x)
[PROOFSTEP]
measurability
[GOAL]
case h.hf.refine'_2
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ ∫⁻ (x : α) in Set.univ, Measure.rnDeriv (toJordanDecomposition s).posPart μ x ∂μ ≠ ⊤
[PROOFSTEP]
rw [set_lintegral_univ]
[GOAL]
case h.hf.refine'_2
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ ∫⁻ (x : α), Measure.rnDeriv (toJordanDecomposition s).posPart μ x ∂μ ≠ ⊤
[PROOFSTEP]
exact (lintegral_rnDeriv_lt_top _ _).ne
[GOAL]
case h.hg
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ Integrable fun x => ENNReal.toReal (Measure.rnDeriv (toJordanDecomposition s).negPart μ x)
[PROOFSTEP]
rw [← integrableOn_univ]
[GOAL]
case h.hg
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ IntegrableOn (fun x => ENNReal.toReal (Measure.rnDeriv (toJordanDecomposition s).negPart μ x)) Set.univ
[PROOFSTEP]
refine' IntegrableOn.restrict _ MeasurableSet.univ
[GOAL]
case h.hg
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ IntegrableOn (fun x => ENNReal.toReal (Measure.rnDeriv (toJordanDecomposition s).negPart μ x)) Set.univ
[PROOFSTEP]
refine' ⟨_, hasFiniteIntegral_toReal_of_lintegral_ne_top _⟩
[GOAL]
case h.hg.refine'_1
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ AEStronglyMeasurable (fun x => ENNReal.toReal (Measure.rnDeriv (toJordanDecomposition s).negPart μ x))
(Measure.restrict μ Set.univ)
[PROOFSTEP]
apply Measurable.aestronglyMeasurable
[GOAL]
case h.hg.refine'_1.hf
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ Measurable fun x => ENNReal.toReal (Measure.rnDeriv (toJordanDecomposition s).negPart μ x)
[PROOFSTEP]
measurability
[GOAL]
case h.hg.refine'_2
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ ∫⁻ (x : α) in Set.univ, Measure.rnDeriv (toJordanDecomposition s).negPart μ x ∂μ ≠ ⊤
[PROOFSTEP]
rw [set_lintegral_univ]
[GOAL]
case h.hg.refine'_2
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : (toJordanDecomposition s).posPart ≪ μ ∧ (toJordanDecomposition s).negPart ≪ μ
i : Set α
hi : MeasurableSet i
⊢ ∫⁻ (x : α), Measure.rnDeriv (toJordanDecomposition s).negPart μ x ∂μ ≠ ⊤
[PROOFSTEP]
exact (lintegral_rnDeriv_lt_top _ _).ne
[GOAL]
α : Type u_1
β : Type u_2
m : MeasurableSpace α
s : SignedMeasure α
μ : Measure α
inst✝ : SigmaFinite μ
h : totalVariation s ≪ ennrealToMeasure (toENNRealVectorMeasure μ)
⊢ ennrealToMeasure (toENNRealVectorMeasure μ) = μ
[PROOFSTEP]
exact equivMeasure.right_inv μ
|
is a key component of a heart-healthy lifestyle. These supplements can help.
Cholesterol. Along with blood pressure and glucose (blood sugar), it’s a factor that you have to keep under control for the sake of your cardiovascular system. What’s more, this waxy material seems to have other ominous connections: A recent study found a link between even moderately elevated cholesterol levels at mid-life and dementia risk in old age.
According to the American Heart Association, total cholesterol levels below 200 mg/dL are desirable. Levels of 240 and above are high risk; anything between 200 and 240 represents a borderline risk area. For LDL (“bad”) cholesterol, less than 100 is good, 190 and above is very high risk, and anything in between falls into a pattern of increasing risk (100-129, near optimal; 130-159, borderline high; 160-189, high). Conversely, you want higher levels of HDL (“good”) cholesterol—less than 40 for men and 50 for women increases heart disease risk.
Cholesterol imbalances are harmful because they contribute to atherosclerosis, the buildup of fatty plaques within arteries that slow blood flow. LDL can become oxidized, a process similar to the rusting of metal. When it does, it can become stuck in the tissue that lines arterial walls. The presence of LDL in arteries attracts blood cells called platelets, which attempt to make repairs; this process causes inflammation, which in turn further stimulates plaque development. Sometimes the plaque becomes unstable, or prone to rupture. If it ruptures a clot can form at the site, which can lead to a heart attack (in the coronary arteries) or stroke (in arteries feeding the brain). HDL, the “good” form of cholesterol, helps shepherd LDL back to the liver for processing.
While atherosclerosis is best known for its cardiovascular consequences, it can cause symptoms elsewhere in the body—anywhere blood flow is reduced by plaque-laden arteries. Plaque buildup in pelvic arteries can lead to erectile dysfunction (ED) in men and is strongly suspected of causing sexual dysfunction in women. Reduced blood supply in the arms and legs, a condition known as peripheral arterial disease (PAD), can cause numbness, weakness and pain.
Not smoking. That should go without saying.
Avoiding such dietary hazards as heavily refined foods (including white bread, pasta, rice and sugar), excessive amounts of table salt and saturated fat, and food grown or processed with pesticides, antibiotics or preservatives.
Not drinking or, at the very least, limiting your alcohol intake. The same goes for caffeine, which acts as a stimulant and diuretic.
Regular relief of stress, which has been linked to inflammation. Meditation, tai chi, yoga and plain ’ol exercise are all known to reduce stress.
Exercise for its own sake; physical activity helps lower both total and LDL while raising HDL. |
A large, brand-new movie studio in Palm Beach County already has a potential box-office hit.
Twentieth Century Fox will film interior scenes for Speed II, sequel to the popular 1994 action movie, in the 20,000-square-foot sound stage at Palm Beach Ocean Studios in West Palm Beach, the studio's chief executive said on Thursday.
"I said when we opened in April we'd be lucky to get something in here by September," said Thorpe Shuttleworth, president and developer of the 42,000-square-foot studio at the Vista Center on Okeechobee Boulevard, west of Florida's Turnpike.
"And now we've got a high-budget action-adventure feature. Yes, that's a coup," he said.
Speed II will star Sandra Bullock, making a return appearance in the sequel, and Jason Patric, who is taking over the role played by Keanu Reeves in the original, according to Variety, the movie industry's trade publication.
Speed II will help get the studio's name out among film producers, said Chuck Eldred, executive director of the county's Film and Television Commission.
Eldred pushed for the county to give Shuttleworth $208,000 in job growth incentives.
"This is exactly what we needed to attract this kind of attention here," Eldred said.
And the studio is benefiting from the increased attention being paid to South Florida by Hollywood producers after recent films such as The Birdcage and Striptease, Shuttleworth said.
The film's producers will hire extra cast members in Florida, and other technical support workers may be hired locally as well.
Film industry publications estimate the cost of making Speed II at $40 million to $70 million.
The movie's producers toured the studio in April and have been working there since Monday. Sets are under construction in preparation for filming later this year. |
# netcdfTest2 more netcdf msi test for msiNcInqId, msiNcInqWithId,
# msiNcGetNumDim, etc which do inquery of individual variable instead
# of using the comprehensive inquery msiNcInq as used in netcdfTest1
# The netcdf test files pres_temp_4D.nc and sfc_pres_temp.nc can be found
# in the netcdf directory
netcdfTest2 () {
if (msiNcOpen (*ncTestPath, "0", *ncid) == 0) {
writeLine("stdout", "msiNcOpen success, ncid = *ncid");
} else {
writeLine("stdout", "msiNcOpen failed");
fail;
}
# inq longitude
if (msiNcInqId ("longitude", 1, *ncid, *londimid) == 0) {
writeLine("stdout", "msiNcInqId success, londimid = *londimid");
} else {
writeLine("stdout", "msiNcInqId failed");
fail;
}
if (msiNcInqWithId (*londimid, 1, *ncid, *inqOut) == 0) {
writeLine("stdout", "msiNcInqWithId londimid success");
if (msiNcGetArrayLen (*inqOut, *lonArrayLen) == 0) {
writeLine ("stdout", "lonArrayLen = *lonArrayLen");
} else {
writeLine("stdout", "msiNcGetArrayLen failed");
fail;
}
} else {
writeLine("stdout", "msiNcInqWithId failed");
fail;
}
# inq latitude
if (msiNcInqId ("latitude", 1, *ncid, *latdimid) == 0) {
writeLine("stdout", "msiNcInqId success, latdimid = *latdimid");
} else {
writeLine("stdout", "msiNcInqId failed");
fail;
}
if (msiNcInqWithId (*latdimid, 1, *ncid, *inqOutl) == 0) {
writeLine("stdout", "msiNcInqWithId latdimid success");
if (msiNcGetArrayLen (*inqOutl, *latArrayLen) == 0) {
writeLine ("stdout", "latArrayLen = *latArrayLen");
} else {
writeLine("stdout", "msiNcGetArrayLen failed");
fail;
}
} else {
writeLine("stdout", "msiNcInqWithId failed");
fail;
}
# variables
if (msiNcInqId ("pressure", 0, *ncid, *pressvarid) == 0) {
writeLine("stdout", "msiNcInqId success, pressvarid = *pressvarid");
} else {
writeLine("stdout", "msiNcInqId failed");
fail;
}
if (msiNcInqWithId (*pressvarid, 0, *ncid, *pressinqout) == 0) {
writeLine("stdout", "msiNcInqWithId pressvarid success");
if (msiNcGetNumDim (*pressinqout, *ndim) == 0) {
writeLine("stdout", "pressinqout ndim = *ndim");
for(*I=0;*I<*ndim;*I=*I+1) {
msiNcGetElementInArray (*pressinqout, *I, *element);
writeLine("stdout", "dimid *I: *element");
}
} else {
writeLine("stdout", "msiNcGetNumDim failed");
fail;
}
if (msiNcGetDataType (*pressinqout, *pressDataType) == 0) {
writeLine("stdout", "msiNcGetDataType success pressDataType = *pressDataType");
} else {
writeLine("stdout", "msiNcGetDataType pressinqout failed");
fail;
}
}
if (msiNcGetVarsByType (*pressDataType, *ncid, *pressvarid, *ndim, "0%0", "3%5", "1%1", *getVarsOut) == 0) {
# inqOut is a struct.
writeLine("stdout", "msiNcGetVarsByType pressvarid success");
if (msiNcGetArrayLen (*getVarsOut, *pressArrayLen) == 0) {
writeLine ("stdout", "pressArrayLen = *pressArrayLen");
for(*I=0;*I<*pressArrayLen;*I=*I+1) {
msiNcGetElementInArray (*getVarsOut, *I, *element);
if (*pressDataType == 5) {
# float. writeLine cannot handle float yet.
msiFloatToString (*element, *floatStr);
writeLine("stdout", "pressure *I: *floatStr");
} else {
writeLine("stdout", "pressure *I: *element");
}
}
} else {
writeLine("stdout", "msiNcGetArrayLen failed");
fail;
}
} else {
writeLine("stdout", "msiNcGetVarsByType pressvarid failed");
fail;
}
if (msiNcClose (*ncid) == 0) {
writeLine("stdout", "msiNcClose success, ncid = *ncid");
} else {
writeLine("stdout", "msiNcClose failed");
fail;
}
}
INPUT *ncTestPath="/wanZone/home/rods/netcdf/sfc_pres_temp.nc"
OUTPUT ruleExecOut,*tempVaraOut
|
||| Internal functions used by HashMap and HashSet
module Data.HashMap.Internal
import Data.Array16
import public Data.Hashable
infix 6 `eq`
infixr 5 .&.
infixr 4 >>
||| Binary and.
%inline
(.&.) : Bits64 -> Bits64 -> Bits64
(.&.) = prim__and_Bits64
||| Right shift.
%inline
(>>) : Bits64 -> Bits64 -> Bits64
(>>) = prim__shr_Bits64
||| Mask for 4 bits.
export
data BitMask
= BM0
| BM1
| BM2
| BM3
| BM4
| BM5
| BM6
| BM7
| BM8
| BM9
| BMa
| BMb
| BMc
| BMd
| BMe
| BMf
||| Initial bit mask.
export
bitMask0 : BitMask
bitMask0 = BM0
public export
Hash : Type
Hash = Bits64
||| Mask 4 bits.
bitMask : BitMask -> Hash -> Bits64
bitMask mask h = case mask of
BM0 => 0x000000000000000f .&. h >> 0x00
BM1 => 0x00000000000000f0 .&. h >> 0x04
BM2 => 0x0000000000000f00 .&. h >> 0x08
BM3 => 0x000000000000f000 .&. h >> 0x0c
BM4 => 0x00000000000f0000 .&. h >> 0x10
BM5 => 0x0000000000f00000 .&. h >> 0x14
BM6 => 0x000000000f000000 .&. h >> 0x18
BM7 => 0x00000000f0000000 .&. h >> 0x1c
BM8 => 0x0000000f00000000 .&. h >> 0x20
BM9 => 0x000000f000000000 .&. h >> 0x24
BMa => 0x00000f0000000000 .&. h >> 0x28
BMb => 0x0000f00000000000 .&. h >> 0x2c
BMc => 0x000f000000000000 .&. h >> 0x30
BMd => 0x00f0000000000000 .&. h >> 0x34
BMe => 0x0f00000000000000 .&. h >> 0x38
BMf => 0xf000000000000000 .&. h >> 0x3c
||| Get the `BitMask` for the next 4 bits.
nextBitMask : BitMask -> BitMask
nextBitMask = \case
BM0 => BM1
BM1 => BM2
BM2 => BM3
BM3 => BM4
BM4 => BM5
BM5 => BM6
BM6 => BM7
BM7 => BM8
BM8 => BM9
BM9 => BMa
BMa => BMb
BMb => BMc
BMc => BMd
BMd => BMe
BMe => BMf
BMf => BM0
||| Is this the last 4 bits?
isLastBM : BitMask -> Bool
isLastBM = \case
BMf => True
_ => False
public export
Salt : Type
Salt = Bits64
||| Get the next salt.
-- TODO: get better algorithm
nextSalt : Salt -> Salt
nextSalt = (2 +)
||| Return just if a predicate is satisfied.
justWhen : Bool -> Lazy a -> Maybe a
justWhen True x = Just x
justWhen False _ = Nothing
||| Return `Nothing` is predicate is false, else return other value.
joinWhen : Bool -> Lazy (Maybe a) -> Maybe a
joinWhen True x = x
joinWhen False _ = Nothing
||| Internal Hash-array map trie (HAMT) that assumes the same hash and Eq is used.
export
data HashArrayMapTrie k v
= Empty
| Leaf Hash k v -- full hash
| Collision Hash Salt (HashArrayMapTrie k v) -- full hash
| Node (Array16 (HashArrayMapTrie k v))
export
Functor (HashArrayMapTrie k) where
map _ Empty = Empty
map f (Leaf h k v) = Leaf h k (f v)
map f (Collision h s m) = Collision h s (map f m)
map f (Node arr) = Node (map (map f) arr)
||| An empty HAMT.
export
empty : HashArrayMapTrie k v
empty = Empty
||| A HAMT containing one key and value.
export
singleton : Hash -> k -> v -> HashArrayMapTrie k v
singleton = Leaf
||| Create a HAMT from 2 keys and values, which have different hashes.
node2 : BitMask -> Hash -> k -> v -> Hash -> k -> v -> HashArrayMapTrie k v
node2 bm h0 k0 v0 h1 k1 v1 = Node $
write (bitMask bm h0) (Leaf h0 k0 v0) $
write (bitMask bm h1) (Leaf h1 k1 v1) $
new Empty
mutual
||| Create a HAMT from 2 keys and values, where the hashes collide.
collision2 :
(eq : k -> k -> Bool) ->
(hashWithSalt : Salt -> k -> Hash) ->
Salt -> Hash ->
k -> v -> k -> v ->
HashArrayMapTrie k v
collision2 eq hws s0 h k0 v0 k1 v1 =
let s1 = nextSalt s0
h0 = hws s1 k0
h1 = hws s1 k1
m0 = insert eq hws s1 BM0 h0 k0 v0
$ insert eq hws s1 BM0 h1 k1 v1
Empty
in Collision h s1 m0
export
||| Insert a key and value into a HAMT, replacing any existing values.
insert :
(eq : k -> k -> Bool) ->
(hashWithSalt : Salt -> k -> Hash) ->
Salt ->
BitMask ->
Hash ->
k ->
v ->
HashArrayMapTrie k v ->
HashArrayMapTrie k v
insert eq hws s0 bm0 h0 k0 v0 m0 = case m0 of
Empty => Leaf h0 k0 v0
Leaf h1 k1 v1 => if h0 /= h1
then node2 bm0 h0 k0 v0 h1 k1 v1
else if k0 `eq` k1
then Leaf h0 k0 v0
else collision2 eq hws s0 h0 k0 v0 k1 v1
Collision h1 s1 m1 => if h0 == h1
then Collision h1 s1
$ insert eq hws s1 BM0 (hws s1 k0) k0 v0 m1
else -- hashes are different so it can't be the last bit mask
Node $
update (bitMask bm0 h0) (insert eq hws s0 (nextBitMask bm0) h0 k0 v0) $
write (bitMask bm0 h1) m0 $
new Empty
Node ar =>
Node $ update (bitMask bm0 h0)
(insert eq hws s0 (nextBitMask bm0) h0 k0 v0) ar
||| Delete a key and value from a HAMT.
export
delete :
(eq : k -> k -> Bool) ->
(hashWithSalt : Salt -> k -> Hash) ->
BitMask ->
Hash -> k ->
HashArrayMapTrie k v ->
HashArrayMapTrie k v
delete eq hws bm0 h0 k0 m0 = case m0 of
Empty => Empty
Leaf h1 k1 v1 => if h0 == h1 && k0 `eq` k1
then Empty
else Leaf h1 k1 v1
Collision h1 s1 m1 => if h0 == h1
then Collision h1 s1
$ delete eq hws BM0 (hws s1 k0) k0 m1
else m0
Node ar => Node $ update (bitMask bm0 h0) (delete eq hws (nextBitMask bm0) h0 k0) ar
||| Lookup a value at a key in a HAMT.
export
lookup :
(eq : k -> k -> Bool) ->
(hashWithSalt : Salt -> k -> Hash) ->
BitMask ->
Hash -> k ->
HashArrayMapTrie k v ->
Maybe v
lookup eq hws bm0 h0 k0 m0 = case m0 of
Empty => Nothing
Leaf h1 k1 v => justWhen (h0 == h1 && k0 `eq` k1) v
Collision h1 s m1 => joinWhen (h0 == h1)
$ lookup eq hws BM0 (hws s k0) k0 m1
Node ar => lookup eq hws (nextBitMask bm0) h0 k0 $ index (bitMask bm0 h0) ar
||| Fold a HAMT with the key and value.
||| Note: this is based on the order of the hash not the key.
export
foldWithKey : (k -> v -> acc -> acc) -> acc -> HashArrayMapTrie k v -> acc
foldWithKey _ z Empty = z
foldWithKey f z (Leaf _ k v) = f k v z
foldWithKey f z (Collision _ _ m) = foldWithKey f z m
foldWithKey f z (Node ar) = foldr (flip $ foldWithKey f) z ar
|
using CompScienceMeshes, BEAST
o, x, y, z = euclidianbasis(3)
sol = 5.0;
Δt, Nt = 100.0/sol,200
D, Δx = 1.0, 0.45
Γ = meshsphere(D, Δx)
X = raviartthomas(Γ)
(A, b, c) = butcher_tableau_radau_2stages();
V = StagedTimeStep(X, c, Δt, Nt);
duration, delay, amplitude = 2000.0/sol, 2000.0/sol, 1.0
gaussian = creategaussian(duration, delay, duration)
direction, polarisation = z, x
E = planewave(polarisation, direction, derive(gaussian), sol)
LaplaceEFIO(s::T) where {T} = MWSingleLayer3D(-s/sol, s*s/sol, T(sol));
kmax = 15;
rho = 1.0001;
T = RungeKuttaConvolutionQuadrature(LaplaceEFIO, A, b, Δt, kmax, rho);
@hilbertspace j
@hilbertspace j′
tdefie = @discretise T[j′,j] == -1E[j′] j∈V j′∈V
xefie_irk = solve(tdefie)
|
{-# LANGUAGE DataKinds #-}
module SCG.Graph () where
{--
import GHC.TypeLits
import Algebra.Graph
import Backprop
import Numeric.LinearAlgebra.Static
{--
Optimizing loss functions over random variables is often intractable
due to the loss functions and their gradients being either a sum over
an exponential number of latent variable configurations or non-analytic,
high-dimensional integrals.
Usually this is resolved by using problem-specific MC Grad Estimators.
There is a general technique for this!
Stochastic Computation Graphs:
- Allows derivation of unbiased gradient estimators for general expected losses.
- Estimator can be computed as the grad of a differentiable "surrogate loss" through backprop
- Variance reduction techniques can be applied to this problem formulation
- Hessian-free methods and majorization-minimization algorithms can be generalized to the SCG framework
!! The main modification to backprop is to introduce extra gradient signals at the stochastic nodes.
--}
-- Gradient Estimators for a Single Random Variable
type RV = Vector Double
type Theta = Matrix
parameterizedProbDist :: Theta -> RV
costfn :: RV -> Double
costfn rv = 2.0
-- scoreFunctionEstimator: By Log-Derivative Trick (valid if p_x_theta is continuous function of theta, though not necessarily for x)
-- also known as REINFORCE or likelihood ratio estimator
ddTheta_expect_over_x_f_x x f p theta = expectation $ x $ f x $ ddTheta $ log $ p x theta
-- if x is deterministic differentiable (perhaps representable as a type constraint? find one)
-- function of theta and another rv z, (i.e, x(z, theta)) we can use
-- !!Pathwise Derivative
-- only valid if f(x(z, theta)) is continous function of theta for all z (another type constraint?)
ddTheta_expect_over_x_f_ztheta f x z theta = expectation $ z $ ddTheta $ f (x z theta)
-- Theta might appear inside the expectation and the prob dist!
-- ddtheta Expectation_z~p(., theta)[ f( x(z, theta) ) ]
-- then two terms in the gradient estimator:
ddtheta_expect_over_z_from_pOfTheta_f_of_x_of_z_theta p f x z theta =
expectation map (\z -> pointwiseEstimate) sample (p z theta)
where pointwiseEstimate = ddTheta $ f (x z theta) + (ddTheta $ log (p z theta)) * (f $ x z theta)
{--
1. SF can be used even if f is discontinuous or x is a discrete rv
2. SF only requires trajectories, PD requires f'(x)
3. SF has higher variance than PD, unless f is rough as in time-series problems with exploding gradients
4. PD has a deterministic limit, SF doesn't.
--}
{--
STOCHASTIC COMPUTATION GRAPHS:
Directed, Acyclic Graph with Three Kinds of nodes:
- Input Nodes, set externally including the parameters we differentiate with respect to
- Deterministic Nodes: pure functions of their parents
- Stochastic Nodes: Distributed Conditionally on their parents.
Each parent v of a non-input node w is connected to it by a directed edge (v, w)
THE STRUCTURE OF THE GRAPH FULLY SPECIFIES:
- What estimator we will use, SF or PD or a combination thereof
- Nodes arranged in series are multiplicative terms only
- Nodes arranged in parallel lead to sums over mulplicative terms
-
--}
{--
data Node a = InputNode a | DeterministicNode a | StochasticNode a
newtype InputNode a = IO a
newtype DeterministicNode v e = ([v] -> e -> b)
newtype StochasticNode v e = ConditionalDistribution [v] e
data DirectedEdge a = DirectedEdge
{ _v :: Node,
_w :: DeterministicNode | StochasticNode
}
--}
-- TODO: Create a directed acyclic graph DS using alga
-- TODO: Define Node types which typecheck (initially for a scalar parameterization of the relevant types)
-- TODO: Add backprop for folds over the deterministic nodes? derive an instance of Backprop for edges?
-- TODO: Add the right gradient estimation procedure for combinations of deterministic and stochastic nodes
-- TODO: Figure out how to do dataflow programming over the Graph
--}
|
(*******************************************************************************
Title: Pullbacks3.v
Authors: Jeremy Avigad, Chris Kapulkin, Peter LeFanu Lumsdaine
Date: 1 March 2013
The (abstract) two pullbacks lemma.
*******************************************************************************)
(* Imports *)
Require Import HoTT EquivalenceVarieties.
Require Import Auxiliary Pullbacks.
(*******************************************************************************
The *abstract* two pullbacks lemma.
Suppose we have two squares that paste together to a rectangle, and
the right square is a pullback. Then the whole rectangle is a
pullback if and only if the left square is.
P2 ---> P1 ---> A
| |_| |f
V V V
B2 -h-> B1 -g-> C
For the proof, we first fix the outer cospan and right-hand square:
P1 ---> A
|_| |f
V V
B2 -h-> B1 -g-> C
We then show that a cone over the left square is a pullback for that
square iff the composite cone is a pullback for the whole rectangle.
To prove this, we first construct a commutative triangle as follows:
_-> (Cones from X to left-hand square)
_- |
[X,P2]_ |
-_ V
-> (Cones from X to rectangle)
and show that the right-hand vertical map is a weak equivalence.
It then follows, by 2-of-3, that either of the diagonal maps is an
equivalence if the other one is; i.e. that the two universal
properties are equivalent.
See TwoPullbacks_alt.v for a more detailed discussion, and multiple approaches.
A naming convention we mostly adhere to: cones over the right-hand
square (f,g) are named [C1], [C1'], etc; cones over the left-hand square
(or similar squares) are [C2], [C2'], etc; and cones over the whole
rectangle as [C3], etc.
*******************************************************************************)
Section Abstract_Two_Pullbacks_Lemma.
Context {A B1 B2 C : Type} (f : A -> C) (g : B1 -> C) (h : B2 -> B1).
Definition left_cospan_cone_to_composite {P1 : Type} (C1 : cospan_cone f g P1)
{P2 : Type} (C2: cospan_cone (cospan_cone_map2 C1) h P2)
: cospan_cone f (g o h) P2.
Proof.
exists (cospan_cone_map1 C1 o cospan_cone_map1 C2).
exists (cospan_cone_map2 C2).
intros x.
apply (concat (cospan_cone_comm C1 (cospan_cone_map1 C2 x))).
apply (ap g (cospan_cone_comm C2 x)).
Defined.
Lemma two_pullback_triangle_commutes {P1 : Type} (C1 : cospan_cone f g P1)
{P2 : Type} (C2 : cospan_cone (cospan_cone_map2 C1) h P2)
{X : Type} (m : X -> P2)
: left_cospan_cone_to_composite C1 (map_to_cospan_cone C2 X m)
= map_to_cospan_cone (left_cospan_cone_to_composite C1 C2) X m.
Proof.
exact 1.
Defined.
Lemma composite_cospan_cone_to_left (P1 : abstract_pullback f g)
{X : Type} (C2 : cospan_cone f (g o h) X)
: cospan_cone (cospan_cone_map2 P1) h X.
Proof.
set (C1_UP_at_X := BuildEquiv (pullback_cone_UP P1 X)).
set (C1' := @mk_cospan_cone _ _ _ f g _ _ _ (cospan_cone_comm C2)).
exists (C1_UP_at_X ^-1 C1').
exists (cospan_cone_map2 C2).
intros x.
apply (ap10 (packed_cospan_cone_map2 P1 C1') x).
Defined.
Lemma composite_cospan_cone_to_left_is_section
(P1 : abstract_pullback f g) (X : Type)
: (@left_cospan_cone_to_composite _ P1 X) o (composite_cospan_cone_to_left P1)
== idmap.
Proof.
intro C2.
set (C1' := @mk_cospan_cone _ _ _ f g _ _ _ (cospan_cone_comm C2)).
apply cospan_cone_path'. simpl.
exists (packed_cospan_cone_map1 P1 C1'). exists 1.
intros x; simpl. apply (concatR (concat_p1 _)^).
unfold cospan_cone_comm; simpl.
apply moveR_pM. apply (packed_cospan_cone_comm P1 C1').
Qed.
Lemma left_cospan_cone_aux0 (P1 : abstract_pullback f g)
{X : Type} (C2 : cospan_cone (cospan_cone_map2 P1) h X)
: @mk_cospan_cone _ _ _ f g _ _ _ (cospan_cone_comm (left_cospan_cone_to_composite P1 C2))
= map_to_cospan_cone P1 X (cospan_cone_map1 C2).
Proof.
apply cospan_cone_path'; simpl. exists 1.
(* Helps human-readability, but slows Coq down:
unfold cospan_cone_map2; simpl. *)
exists (path_forall (fun x => (cospan_cone_comm C2 x)^)).
intros x. unfold cospan_cone_comm at 1 2 3; simpl.
apply concat2. apply inverse, concat_1p.
apply (concatR (ap_V _ _)), ap.
apply (concat (inv_V _)^), ap.
revert x; apply apD10. apply inverse, eisretr.
Defined.
Lemma left_cospan_cone_aux1 (P1 : abstract_pullback f g)
{X : Type} (C2 : cospan_cone (cospan_cone_map2 P1) h X)
: (BuildEquiv (pullback_cone_UP P1 X))^-1
(@mk_cospan_cone _ _ _ f g _ _ _
(cospan_cone_comm (left_cospan_cone_to_composite P1 C2)))
= cospan_cone_map1 C2.
Proof.
apply moveR_I. apply left_cospan_cone_aux0.
Defined.
Lemma left_cospan_cone_aux2 (P1 : abstract_pullback f g)
{X : Type} (C2 : cospan_cone (cospan_cone_map2 P1) h X) (x:X)
(C1' := @mk_cospan_cone _ _ _ f g _ _ _
(cospan_cone_comm (left_cospan_cone_to_composite P1 C2)))
: ap (cospan_cone_map2 P1) (ap10 (left_cospan_cone_aux1 P1 C2) x)
= (ap10 (ap cospan_cone_map2 (eisretr (map_to_cospan_cone P1 X) C1')) x
@ (cospan_cone_comm C2 x)^).
Proof.
set (P1_UP_at_X := BuildEquiv (pullback_cone_UP P1 X)).
rewrite <- ap10_ap_postcompose.
change ((fun f' : X -> P1 => cospan_cone_map2 P1 o f'))
with (cospan_cone_map2 o P1_UP_at_X).
rewrite ap_compose.
path_via' (ap10
(ap cospan_cone_map2 (eisretr P1_UP_at_X C1')
@ path_forall (fun y => (cospan_cone_comm C2 y)^)) x).
Focus 2. rewrite ap10_pp.
apply ap. revert x; apply apD10. apply eisretr.
revert x; apply apD10; apply ap.
unfold left_cospan_cone_aux1, moveR_I. fold P1_UP_at_X.
rewrite ap_pp. rewrite ap_inverse_o_equiv. fold C1'.
path_via' (ap cospan_cone_map2 (eisretr P1_UP_at_X C1'
@ left_cospan_cone_aux0 P1 C2)).
apply ap. rewrite eisadj. apply concat_pV_p.
rewrite ap_pp. apply ap. refine (cospan_cone_path'_map2 _).
Qed.
Lemma composite_cospan_cone_to_left_is_retraction
(P1 : abstract_pullback f g) (X : Type)
: (composite_cospan_cone_to_left P1) o (@left_cospan_cone_to_composite _ P1 X)
== idmap.
Proof.
intros C2.
set (e := BuildEquiv (pullback_cone_UP P1 X)).
set (C1' := (@mk_cospan_cone _ _ _ f g _ _ _
(cospan_cone_comm (left_cospan_cone_to_composite P1 C2)))).
unfold composite_cospan_cone_to_left.
fold C1'. fold e.
apply cospan_cone_path'.
exists (left_cospan_cone_aux1 P1 C2).
exists 1.
intros x.
path_via' (ap10 (packed_cospan_cone_map2 P1 C1') x).
exact 1.
apply (concatR (concat_p1 _)^).
unfold packed_cospan_cone_map2. simpl.
path_via'
((ap10 (ap cospan_cone_map2 (eisretr e C1')) x
@ (cospan_cone_comm C2 x)^)
@ cospan_cone_comm C2 x).
apply inverse, concat_pV_p.
apply whiskerR. apply inverse. apply left_cospan_cone_aux2.
Qed.
Lemma left_cospan_cone_to_composite_isequiv
(P1 : abstract_pullback f g) (X : Type)
: IsEquiv (@left_cospan_cone_to_composite _ P1 X).
Proof.
apply (isequiv_adjointify (composite_cospan_cone_to_left P1)).
apply composite_cospan_cone_to_left_is_section.
apply composite_cospan_cone_to_left_is_retraction.
Qed.
Lemma abstract_two_pullbacks_lemma
(P1 : abstract_pullback f g)
{P2 : Type} (C2 : cospan_cone (cospan_cone_map2 P1) h P2)
: is_pullback_cone C2 <-> is_pullback_cone (left_cospan_cone_to_composite P1 C2).
Proof.
set (P1_UP := pullback_cone_UP P1).
split.
(* -> *)
intros C2_UP X.
change (map_to_cospan_cone (left_cospan_cone_to_composite P1 C2) X)
with (left_cospan_cone_to_composite P1 o (map_to_cospan_cone C2 X)).
apply @isequiv_compose.
apply C2_UP.
apply left_cospan_cone_to_composite_isequiv.
(* <- *)
intros C3_UP X.
refine (cancelL_isequiv (left_cospan_cone_to_composite P1)).
apply left_cospan_cone_to_composite_isequiv.
change (left_cospan_cone_to_composite P1 o (map_to_cospan_cone C2 X))
with (map_to_cospan_cone (left_cospan_cone_to_composite P1 C2) X).
apply C3_UP.
Qed.
End Abstract_Two_Pullbacks_Lemma.
(*
Local Variables:
coq-prog-name: "hoqtop"
End:
*)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.