text
stringlengths 0
3.34M
|
---|
/-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import analysis.box_integral.partition.basic
/-!
# Tagged partitions
A tagged (pre)partition is a (pre)partition `π` enriched with a tagged point for each box of
‵π`. For simplicity we require that the function `box_integral.tagged_prepartition.tag` is defined
on all boxes `J : box ι` but use its values only on boxes of the partition. Given `π :
box_integral.tagged_partition I`, we require that each `box_integral.tagged_partition π J` belongs
to `box_integral.box.Icc I`. If for every `J ∈ π`, `π.tag J` belongs to `J.Icc`, then `π` is called
a *Henstock* partition. We do not include this assumption into the definition of a tagged
(pre)partition because McShane integral is defined as a limit along tagged partitions without this
requirement.
### Tags
rectangular box, box partition
-/
noncomputable theory
open_locale classical ennreal nnreal
open set function
namespace box_integral
variables {ι : Type*}
/-- A tagged prepartition is a prepartition enriched with a tagged point for each box of the
prepartition. For simiplicity we require that `tag` is defined for all boxes in `ι → ℝ` but
we will use onle the values of `tag` on the boxes of the partition. -/
structure tagged_prepartition (I : box ι) extends prepartition I :=
(tag : box ι → ι → ℝ)
(tag_mem_Icc : ∀ J, tag J ∈ I.Icc)
namespace tagged_prepartition
variables {I J J₁ J₂ : box ι} (π : tagged_prepartition I) {x : ι → ℝ}
instance : has_mem (box ι) (tagged_prepartition I) := ⟨λ J π, J ∈ π.boxes⟩
@[simp] lemma mem_to_prepartition {π : tagged_prepartition I} :
J ∈ π.to_prepartition ↔ J ∈ π := iff.rfl
@[simp] lemma mem_mk (π : prepartition I) (f h) :
J ∈ mk π f h ↔ J ∈ π := iff.rfl
/-- Union of all boxes of a tagged prepartition. -/
def Union : set (ι → ℝ) := π.to_prepartition.Union
lemma Union_def : π.Union = ⋃ J ∈ π, ↑J := rfl
@[simp] lemma Union_mk (π : prepartition I) (f h) : (mk π f h).Union = π.Union := rfl
@[simp] lemma Union_to_prepartition : π.to_prepartition.Union = π.Union := rfl
@[simp] lemma mem_Union : x ∈ π.Union ↔ ∃ J ∈ π, x ∈ J := set.mem_Union₂
lemma subset_Union (h : J ∈ π) : ↑J ⊆ π.Union := subset_bUnion_of_mem h
lemma Union_subset : π.Union ⊆ I := Union₂_subset π.le_of_mem'
/-- A tagged prepartition is a partition if it covers the whole box. -/
def is_partition := π.to_prepartition.is_partition
lemma is_partition_iff_Union_eq : is_partition π ↔ π.Union = I :=
prepartition.is_partition_iff_Union_eq
/-- The tagged partition made of boxes of `π` that satisfy predicate `p`. -/
@[simps { fully_applied := ff }] def filter (p : box ι → Prop) : tagged_prepartition I :=
⟨π.1.filter p, π.2, π.3⟩
@[simp] lemma mem_filter {p : box ι → Prop} : J ∈ π.filter p ↔ J ∈ π ∧ p J :=
finset.mem_filter
@[simp] lemma Union_filter_not (π : tagged_prepartition I) (p : box ι → Prop) :
(π.filter (λ J, ¬p J)).Union = π.Union \ (π.filter p).Union :=
π.to_prepartition.Union_filter_not p
end tagged_prepartition
namespace prepartition
variables {I J : box ι}
/-- Given a partition `π` of `I : box_integral.box ι` and a collection of tagged partitions
`πi J` of all boxes `J ∈ π`, returns the tagged partition of `I` into all the boxes of `πi J`
with tags coming from `(πi J).tag`. -/
def bUnion_tagged (π : prepartition I) (πi : Π J, tagged_prepartition J) :
tagged_prepartition I :=
{ to_prepartition := π.bUnion (λ J, (πi J).to_prepartition),
tag := λ J, (πi (π.bUnion_index (λ J, (πi J).to_prepartition) J)).tag J,
tag_mem_Icc := λ J, box.le_iff_Icc.1 (π.bUnion_index_le _ _) ((πi _).tag_mem_Icc _) }
@[simp] lemma mem_bUnion_tagged (π : prepartition I) {πi : Π J, tagged_prepartition J} :
J ∈ π.bUnion_tagged πi ↔ ∃ J' ∈ π, J ∈ πi J' :=
π.mem_bUnion
lemma tag_bUnion_tagged (π : prepartition I) {πi : Π J, tagged_prepartition J} (hJ : J ∈ π) {J'}
(hJ' : J' ∈ πi J) :
(π.bUnion_tagged πi).tag J' = (πi J).tag J' :=
begin
have : J' ∈ π.bUnion_tagged πi, from π.mem_bUnion.2 ⟨J, hJ, hJ'⟩,
obtain rfl := π.bUnion_index_of_mem hJ hJ',
refl
end
@[simp] lemma Union_bUnion_tagged (π : prepartition I) (πi : Π J, tagged_prepartition J) :
(π.bUnion_tagged πi).Union = ⋃ J ∈ π, (πi J).Union :=
Union_bUnion _ _
lemma forall_bUnion_tagged (p : (ι → ℝ) → box ι → Prop) (π : prepartition I)
(πi : Π J, tagged_prepartition J) :
(∀ J ∈ π.bUnion_tagged πi, p ((π.bUnion_tagged πi).tag J) J) ↔
∀ (J ∈ π) (J' ∈ πi J), p ((πi J).tag J') J' :=
begin
simp only [bex_imp_distrib, mem_bUnion_tagged],
refine ⟨λ H J hJ J' hJ', _, λ H J' J hJ hJ', _⟩,
{ rw ← π.tag_bUnion_tagged hJ hJ', exact H J' J hJ hJ' },
{ rw π.tag_bUnion_tagged hJ hJ', exact H J hJ J' hJ' }
end
lemma is_partition.bUnion_tagged {π : prepartition I} (h : is_partition π)
{πi : Π J, tagged_prepartition J} (hi : ∀ J ∈ π, (πi J).is_partition) :
(π.bUnion_tagged πi).is_partition :=
h.bUnion hi
end prepartition
namespace tagged_prepartition
variables {I J : box ι} {π π₁ π₂ : tagged_prepartition I} {x : ι → ℝ}
/-- Given a tagged partition `π` of `I` and a (not tagged) partition `πi J hJ` of each `J ∈ π`,
returns the tagged partition of `I` into all the boxes of all `πi J hJ`. The tag of a box `J`
is defined to be the `π.tag` of the box of the partition `π` that includes `J`.
Note that usually the result is not a Henstock partition. -/
@[simps tag { fully_applied := ff }]
def bUnion_prepartition (π : tagged_prepartition I) (πi : Π J, prepartition J) :
tagged_prepartition I :=
{ to_prepartition := π.to_prepartition.bUnion πi,
tag := λ J, π.tag (π.to_prepartition.bUnion_index πi J),
tag_mem_Icc := λ J, π.tag_mem_Icc _ }
lemma is_partition.bUnion_prepartition {π : tagged_prepartition I} (h : is_partition π)
{πi : Π J, prepartition J} (hi : ∀ J ∈ π, (πi J).is_partition) :
(π.bUnion_prepartition πi).is_partition :=
h.bUnion hi
/-- Given two partitions `π₁` and `π₁`, one of them tagged and the other is not, returns the tagged
partition with `to_partition = π₁.to_partition ⊓ π₂` and tags coming from `π₁`.
Note that usually the result is not a Henstock partition. -/
def inf_prepartition (π : tagged_prepartition I) (π' : prepartition I) :
tagged_prepartition I :=
π.bUnion_prepartition $ λ J, π'.restrict J
@[simp] lemma inf_prepartition_to_prepartition (π : tagged_prepartition I) (π' : prepartition I) :
(π.inf_prepartition π').to_prepartition = π.to_prepartition ⊓ π' := rfl
lemma mem_inf_prepartition_comm :
J ∈ π₁.inf_prepartition π₂.to_prepartition ↔ J ∈ π₂.inf_prepartition π₁.to_prepartition :=
by simp only [← mem_to_prepartition, inf_prepartition_to_prepartition, inf_comm]
lemma is_partition.inf_prepartition (h₁ : π₁.is_partition) {π₂ : prepartition I}
(h₂ : π₂.is_partition) :
(π₁.inf_prepartition π₂).is_partition :=
h₁.inf h₂
open metric
/-- A tagged partition is said to be a Henstock partition if for each `J ∈ π`, the tag of `J`
belongs to `J.Icc`. -/
def is_Henstock (π : tagged_prepartition I) : Prop := ∀ J ∈ π, π.tag J ∈ J.Icc
@[simp] lemma is_Henstock_bUnion_tagged
{π : prepartition I} {πi : Π J, tagged_prepartition J} :
is_Henstock (π.bUnion_tagged πi) ↔ ∀ J ∈ π, (πi J).is_Henstock :=
π.forall_bUnion_tagged (λ x J, x ∈ J.Icc) πi
/-- In a Henstock prepartition, there are at most `2 ^ fintype.card ι` boxes with a given tag. -/
lemma is_Henstock.card_filter_tag_eq_le [fintype ι] (h : π.is_Henstock) (x : ι → ℝ) :
(π.boxes.filter (λ J, π.tag J = x)).card ≤ 2 ^ fintype.card ι :=
calc (π.boxes.filter (λ J, π.tag J = x)).card ≤ (π.boxes.filter (λ J : box ι, x ∈ J.Icc)).card :
begin
refine finset.card_le_of_subset (λ J hJ, _),
rw finset.mem_filter at hJ ⊢, rcases hJ with ⟨hJ, rfl⟩,
exact ⟨hJ, h J hJ⟩
end
... ≤ 2 ^ fintype.card ι : π.to_prepartition.card_filter_mem_Icc_le x
/-- A tagged partition `π` is subordinate to `r : (ι → ℝ) → ℝ` if each box `J ∈ π` is included in
the closed ball with center `π.tag J` and radius `r (π.tag J)`. -/
def is_subordinate [fintype ι] (π : tagged_prepartition I) (r : (ι → ℝ) → Ioi (0 : ℝ)) : Prop :=
∀ J ∈ π, (J : _).Icc ⊆ closed_ball (π.tag J) (r $ π.tag J)
variables {r r₁ r₂ : (ι → ℝ) → Ioi (0 : ℝ)}
@[simp] lemma is_subordinate_bUnion_tagged [fintype ι]
{π : prepartition I} {πi : Π J, tagged_prepartition J} :
is_subordinate (π.bUnion_tagged πi) r ↔ ∀ J ∈ π, (πi J).is_subordinate r :=
π.forall_bUnion_tagged (λ x J, J.Icc ⊆ closed_ball x (r x)) πi
lemma is_subordinate.bUnion_prepartition [fintype ι] (h : is_subordinate π r)
(πi : Π J, prepartition J) :
is_subordinate (π.bUnion_prepartition πi) r :=
λ J hJ, subset.trans (box.le_iff_Icc.1 $ π.to_prepartition.le_bUnion_index hJ) $
h _ $ π.to_prepartition.bUnion_index_mem hJ
lemma is_subordinate.inf_prepartition [fintype ι] (h : is_subordinate π r) (π' : prepartition I) :
is_subordinate (π.inf_prepartition π') r :=
h.bUnion_prepartition _
lemma is_subordinate.mono' [fintype ι] {π : tagged_prepartition I}
(hr₁ : π.is_subordinate r₁) (h : ∀ J ∈ π, r₁ (π.tag J) ≤ r₂ (π.tag J)) :
π.is_subordinate r₂ :=
λ J hJ x hx, closed_ball_subset_closed_ball (h _ hJ) (hr₁ _ hJ hx)
lemma is_subordinate.mono [fintype ι] {π : tagged_prepartition I}
(hr₁ : π.is_subordinate r₁) (h : ∀ x ∈ I.Icc, r₁ x ≤ r₂ x) :
π.is_subordinate r₂ :=
hr₁.mono' $ λ J _, h _ $ π.tag_mem_Icc J
lemma is_subordinate.diam_le [fintype ι] {π : tagged_prepartition I}
(h : π.is_subordinate r) (hJ : J ∈ π.boxes) :
diam J.Icc ≤ 2 * r (π.tag J) :=
calc diam J.Icc ≤ diam (closed_ball (π.tag J) (r $ π.tag J)) :
diam_mono (h J hJ) bounded_closed_ball
... ≤ 2 * r (π.tag J) : diam_closed_ball (le_of_lt (r _).2)
/-- Tagged prepartition with single box and prescribed tag. -/
@[simps { fully_applied := ff }]
def single (I J : box ι) (hJ : J ≤ I) (x : ι → ℝ) (h : x ∈ I.Icc) : tagged_prepartition I :=
⟨prepartition.single I J hJ, λ J, x, λ J, h⟩
@[simp] lemma mem_single {J'} (hJ : J ≤ I) (h : x ∈ I.Icc) : J' ∈ single I J hJ x h ↔ J' = J :=
finset.mem_singleton
instance (I : box ι) : inhabited (tagged_prepartition I) :=
⟨single I I le_rfl I.upper I.upper_mem_Icc⟩
lemma is_partition_single_iff (hJ : J ≤ I) (h : x ∈ I.Icc) :
(single I J hJ x h).is_partition ↔ J = I :=
prepartition.is_partition_single_iff hJ
lemma is_partition_single (h : x ∈ I.Icc) : (single I I le_rfl x h).is_partition :=
prepartition.is_partition_top I
lemma forall_mem_single (p : (ι → ℝ) → (box ι) → Prop) (hJ : J ≤ I) (h : x ∈ I.Icc) :
(∀ J' ∈ single I J hJ x h, p ((single I J hJ x h).tag J') J') ↔ p x J :=
by simp
@[simp] lemma is_Henstock_single_iff (hJ : J ≤ I) (h : x ∈ I.Icc) :
is_Henstock (single I J hJ x h) ↔ x ∈ J.Icc :=
forall_mem_single (λ x J, x ∈ J.Icc) hJ h
@[simp]
@[simp] lemma is_subordinate_single [fintype ι] (hJ : J ≤ I) (h : x ∈ I.Icc) :
is_subordinate (single I J hJ x h) r ↔ J.Icc ⊆ closed_ball x (r x) :=
forall_mem_single (λ x J, J.Icc ⊆ closed_ball x (r x)) hJ h
@[simp] lemma Union_single (hJ : J ≤ I) (h : x ∈ I.Icc) :
(single I J hJ x h).Union = J :=
prepartition.Union_single hJ
/-- Union of two tagged prepartitions with disjoint unions of boxes. -/
def disj_union (π₁ π₂ : tagged_prepartition I) (h : disjoint π₁.Union π₂.Union) :
tagged_prepartition I :=
{ to_prepartition := π₁.to_prepartition.disj_union π₂.to_prepartition h,
tag := π₁.boxes.piecewise π₁.tag π₂.tag,
tag_mem_Icc := λ J, by { dunfold finset.piecewise, split_ifs,
exacts [π₁.tag_mem_Icc J, π₂.tag_mem_Icc J] } }
@[simp] lemma disj_union_boxes (h : disjoint π₁.Union π₂.Union) :
(π₁.disj_union π₂ h).boxes = π₁.boxes ∪ π₂.boxes := rfl
@[simp] lemma mem_disj_union (h : disjoint π₁.Union π₂.Union) :
J ∈ π₁.disj_union π₂ h ↔ J ∈ π₁ ∨ J ∈ π₂ :=
finset.mem_union
@[simp] lemma Union_disj_union (h : disjoint π₁.Union π₂.Union) :
(π₁.disj_union π₂ h).Union = π₁.Union ∪ π₂.Union :=
prepartition.Union_disj_union _
lemma disj_union_tag_of_mem_left (h : disjoint π₁.Union π₂.Union) (hJ : J ∈ π₁) :
(π₁.disj_union π₂ h).tag J = π₁.tag J :=
dif_pos hJ
lemma disj_union_tag_of_mem_right (h : disjoint π₁.Union π₂.Union) (hJ : J ∈ π₂) :
(π₁.disj_union π₂ h).tag J = π₂.tag J :=
dif_neg $ λ h₁, h.le_bot ⟨π₁.subset_Union h₁ J.upper_mem, π₂.subset_Union hJ J.upper_mem⟩
lemma is_subordinate.disj_union [fintype ι] (h₁ : is_subordinate π₁ r)
(h₂ : is_subordinate π₂ r) (h : disjoint π₁.Union π₂.Union) :
is_subordinate (π₁.disj_union π₂ h) r :=
begin
refine λ J hJ, (finset.mem_union.1 hJ).elim (λ hJ, _) (λ hJ, _),
{ rw disj_union_tag_of_mem_left _ hJ, exact h₁ _ hJ },
{ rw disj_union_tag_of_mem_right _ hJ, exact h₂ _ hJ }
end
lemma is_Henstock.disj_union (h₁ : is_Henstock π₁) (h₂ : is_Henstock π₂)
(h : disjoint π₁.Union π₂.Union) :
is_Henstock (π₁.disj_union π₂ h) :=
begin
refine λ J hJ, (finset.mem_union.1 hJ).elim (λ hJ, _) (λ hJ, _),
{ rw disj_union_tag_of_mem_left _ hJ, exact h₁ _ hJ },
{ rw disj_union_tag_of_mem_right _ hJ, exact h₂ _ hJ }
end
/-- If `I ≤ J`, then every tagged prepartition of `I` is a tagged prepartition of `J`. -/
def embed_box (I J : box ι) (h : I ≤ J) :
tagged_prepartition I ↪ tagged_prepartition J :=
{ to_fun := λ π,
{ le_of_mem' := λ J' hJ', (π.le_of_mem' J' hJ').trans h,
tag_mem_Icc := λ J, box.le_iff_Icc.1 h (π.tag_mem_Icc J),
.. π },
inj' := by { rintro ⟨⟨b₁, h₁le, h₁d⟩, t₁, ht₁⟩ ⟨⟨b₂, h₂le, h₂d⟩, t₂, ht₂⟩ H, simpa using H } }
section distortion
variables [fintype ι] (π)
open finset
/-- The distortion of a tagged prepartition is the maximum of distortions of its boxes. -/
def distortion : ℝ≥0 := π.to_prepartition.distortion
lemma distortion_le_of_mem (h : J ∈ π) : J.distortion ≤ π.distortion :=
le_sup h
lemma distortion_le_iff {c : ℝ≥0} : π.distortion ≤ c ↔ ∀ J ∈ π, box.distortion J ≤ c :=
finset.sup_le_iff
@[simp] lemma _root_.box_integral.prepartition.distortion_bUnion_tagged (π : prepartition I)
(πi : Π J, tagged_prepartition J) :
(π.bUnion_tagged πi).distortion = π.boxes.sup (λ J, (πi J).distortion) :=
sup_bUnion _ _
@[simp] lemma distortion_bUnion_prepartition (π : tagged_prepartition I)
(πi : Π J, prepartition J) :
(π.bUnion_prepartition πi).distortion = π.boxes.sup (λ J, (πi J).distortion) :=
sup_bUnion _ _
@[simp] lemma distortion_disj_union (h : disjoint π₁.Union π₂.Union) :
(π₁.disj_union π₂ h).distortion = max π₁.distortion π₂.distortion :=
sup_union
lemma distortion_of_const {c} (h₁ : π.boxes.nonempty) (h₂ : ∀ J ∈ π, box.distortion J = c) :
π.distortion = c :=
(sup_congr rfl h₂).trans (sup_const h₁ _)
@[simp] lemma distortion_single (hJ : J ≤ I) (h : x ∈ I.Icc) :
distortion (single I J hJ x h) = J.distortion :=
sup_singleton
lemma distortion_filter_le (p : box ι → Prop) : (π.filter p).distortion ≤ π.distortion :=
sup_mono (filter_subset _ _)
end distortion
end tagged_prepartition
end box_integral
|
subroutine splines(x,y,no,ntau,tau,yy,f)
include 'PARAMETER' !por kt
parameter (kt2=kt*kn)
implicit real*4 (a-h,o-z)
real*4 a(kt,kt),x(*),y(*),tau(*),yy(*),f(kt,kt),ff(kt2)
real*4 taure(kt)
COMMON/FACTORSPLIN/FF
common/cambio/ncambno,ncambpre !si 0,0 cambio nodos, cambio prec
c ncambno vale 0 la primera vez que se corre un ciclo
c " " 1 las restantes
data iset/0/
iset=iset*ncambno
n=no+2
IF(ISET.EQ.0)THEN
paso=x(2)-x(1)
iset=1
ncambno=1
if(n.gt.2)then
CALL SPLINB(n,kt,a)
ks=0
do i=1,ntau
j1=1+int((tau(i)-x(1))/paso)
j2=j1+1
f2=(tau(i)-x(j1))/paso
f1=1.d0-f2
f3=f1*(f1*f1-1.d0)
f4=f2*(f2*f2-1.d0)
do k=1,n
f(i,k)=f3*a(j1,k)+f4*a(j2,k)
if(k.eq.j1)f(i,k)=f(i,k)+f1
if(k.eq.j2)f(i,k)=f(i,k)+f2
end do
do k=1,n
ks=ks+1
ff(ks)=f(i,k)
end do
end do
else
do i=1,ntau
taure(i)=(tau(i)-x(1))/paso
end do
if(no.eq.0)then
ks=0
do i=1,ntau
f(i,1)=1.d0-taure(i)
f(i,2)=taure(i)
do k=1,n
ks=ks+1
ff(ks)=f(i,k)
end do
end do
else
ks=0
do i=1,ntau
if(taure(i).lt.1.d0)then
f(i,1)=1.d0-taure(i)
f(i,2)=taure(i)
f(i,3)=0.d0
else
f(i,1)=0.d0
f(i,2)=2.d0-taure(i)
f(i,3)=taure(i)-1.d0
end if
do k=1,n
ks=ks+1
ff(ks)=f(i,k)
end do
end do
end if
end if
END IF
c a partir de aqui lo hace siempre
ks=0
do i=1,ntau
yy(i)=0.d0
do k=1,n
ks=ks+1
f(i,k)=ff(ks)
yy(i)=yy(i)+ff(ks)*y(k)
end do
end do
return
end
|
-- ------------------------------------------------------------- [ Element.idr ]
-- Module : Element.idr
-- Copyright : (c) Jan de Muijnck-Hughes
-- License : see LICENSE
-- --------------------------------------------------------------------- [ EOH ]
||| This module details correctness properties for element insertion.
|||
||| Correctness/Soundness Properties ::
|||
||| 1. All elements added to the model must be unique.
|||
||| Note ::
|||
||| + This property should probably be replaced within the model using a Set.
|||
module GRL.Property.Element
import public Decidable.Equality
import public Data.AVL.Graph
import public Data.List
import GRL.Model
import GRL.IR
import GRL.Common
%access export
-- ------------------------------------------------------- [ Element Insertion ]
||| Check to see if the element is unique.
|||
isElemUnique : (node : GExpr ELEM) -> (model : GModel) -> Bool
isElemUnique (Elem ty t s) m = not $ hasGoal t m
||| Check to see if the element is unique.
|||
%hint
checkElemBool : (node : GExpr ELEM) -> (model : GModel) -> Bool
checkElemBool n m = isElemUnique n m
-- --------------------------------------------------------------------- [ EOF ]
|
(** Coq coding by choukh, Oct 2021 **)
Require Import BBST.Axiom.Meta.
Require Import BBST.Axiom.Extensionality.
Require Import BBST.Axiom.Separation.
Require Import BBST.Axiom.Union.
Require Export BBST.Axiom.Infinity.
Require Export BBST.Definition.Include.
Require Export BBST.Definition.Emptyset.
Require Export BBST.Definition.OneTwo.
Require Export BBST.Definition.Successor.
Require Export BBST.Definition.TransitiveSet.
Definition 为自然数 := λ n, ∀ A, 归纳的 A → n ∈ A.
Definition ω := {a ∊ 𝐈 | 为自然数 a}.
Theorem ω是任意归纳集的共通部分 : ∀ A, 归纳的 A → ω ⊆ A.
Proof. intros A H x Hx. apply 分离之条件 in Hx. auto. Qed.
Theorem ω里有且仅有自然数 : ∀ n, n ∈ ω ↔ 为自然数 n.
Proof.
split.
- intros n属于ω. now apply 分离除去 in n属于ω.
- intros n为自然数. apply 分离介入; auto.
apply n为自然数. apply 无穷公理.
Qed.
(* 皮亚诺公理1 *)
Lemma 零是自然数 : ∅ ∈ ω.
Proof.
apply 分离介入. apply 无穷公理. intros A [H _]. auto.
Qed.
Lemma ω不为零 : ω ≠ ∅.
Proof.
intros H. pose proof 零是自然数.
rewrite H in H0. 空集归谬.
Qed.
Global Hint Immediate 零是自然数 ω不为零 : core.
(* 皮亚诺公理2 *)
Theorem ω是归纳集 : 归纳的 ω.
Proof.
split. auto.
intros a Ha. apply 分离之条件 in Ha. apply 分离介入.
- apply 无穷公理. apply Ha. apply 无穷公理.
- intros A A归纳. apply A归纳. apply Ha. apply A归纳.
Qed.
Corollary ω归纳 : ∀n ∈ ω, n⁺ ∈ ω.
Proof. apply ω是归纳集. Qed.
Global Hint Resolve ω归纳 : core.
Fact 壹是自然数 : 壹 ∈ ω.
Proof. rewrite <- 零的后继为壹. auto. Qed.
Global Hint Immediate 壹是自然数 : core.
Fact 贰是自然数 : 贰 ∈ ω.
Proof. rewrite <- 壹的后继为贰. auto. Qed.
Global Hint Immediate 贰是自然数 : core.
(* 皮亚诺公理3 *)
Theorem 零不是任何自然数的后继 : ¬ ∃ n ∈ ω, n⁺ = ∅.
Proof. intros [n [Hn H]]. eapply 后继非空. apply H. Qed.
(* 皮亚诺公理5 *)
Theorem 归纳原理 : ∀ N, N ⊆ ω → 归纳的 N → N = ω.
Proof.
intros N N子集 N归纳. 外延 n Hn.
- apply N子集. apply Hn.
- apply 分离之条件 in Hn. apply Hn. apply N归纳.
Qed.
Corollary 归纳法 : ∀ P : 性质, P ∅ → (∀n ∈ ω, P n → P n⁺) → ∀n ∈ ω, P n.
Proof with auto.
intros P 起始 归纳 n Hn. set {n ∊ ω | P n} as N.
assert (N = ω). {
apply 归纳原理. apply 分离为子集. split. apply 分离介入...
intros m Hm. apply 分离除去 in Hm as [Hm HPm]. apply 分离介入...
}
rewrite <- H in Hn. apply 分离之条件 in Hn...
Qed.
Ltac 归纳 n Hn :=
match goal with
| |- ∀n ∈ ω, _ => intros n Hn; pattern n
| Hn: n ∈ ω |- _ => pattern n
end;
match goal with |- ?P n => let IH := fresh "归纳假设" in
generalize dependent n; apply (归纳法 P); [|intros n Hn IH]
end.
Tactic Notation "归纳" simple_intropattern(n) simple_intropattern(Hn) := 归纳 n Hn.
Tactic Notation "归纳" simple_intropattern(n) := 归纳 n ?Hn.
Tactic Notation "归纳" := let n := fresh "n" in let Hn := fresh "Hn" in 归纳 n Hn.
Theorem 非零自然数的前驱存在 : ∀n ∈ ω, n ≠ ∅ → ∃k ∈ ω, n = k⁺.
Proof.
归纳.
- (* n = ∅ *) intros 矛盾. easy.
- (* n = m⁺ *) intros _. exists n. split; easy.
Qed.
Ltac 讨论 n := match goal with | Hn: n ∈ ω |- _ =>
let H := fresh "H" in let p := fresh "p" in
let Hp := fresh "Hp" in let Heq := fresh "Heq" in
排中 (n = ∅) as [|H]; [|
apply (非零自然数的前驱存在 n Hn) in H as [p [Hp Heq]]
]; subst n; [|rename p into n] end.
(* 练习5-1 *)
Fact 零小于后继数 : ∀n ∈ ω, ∅ ∈ n⁺.
Proof. 归纳; auto. Qed.
Global Hint Immediate 零小于后继数 : core.
Theorem ω为传递集 : 为传递集 ω.
Proof.
apply 传递集即其元素都为其子集. 归纳.
- (* n = ∅ *) auto.
- (* n = m⁺ *) intros x Hx. apply 后继除去 in Hx as [].
+ now apply 归纳假设.
+ now subst.
Qed.
Global Hint Resolve ω为传递集 : core.
Theorem 自然数为传递集 : ∀n ∈ ω, 为传递集 n.
Proof.
归纳; intros p q Hp Hq.
- 空集归谬.
- apply 后继除去 in Hq as [].
+ apply 左后继介入. eapply 归纳假设; eauto.
+ subst. auto.
Qed.
Global Hint Immediate 自然数为传递集 : core.
(* 皮亚诺公理4 *)
Lemma 后继是单射 : ∀ n m ∈ ω, n⁺ = m⁺ → n = m.
Proof.
intros n Hn m Hm 相等.
apply 自然数为传递集 in Hn, Hm.
rewrite 传递集即其后继的并等于自身 in Hn, Hm.
congruence.
Qed.
|
theory Chapter9_1
imports "HOL-IMP.Types" "Short_Theory"
begin
text\<open>
\section*{Chapter 9}
\exercise
Reformulate the inductive predicates \ @{prop"\<Gamma> \<turnstile> a : \<tau>"},
\ @{prop"\<Gamma> \<turnstile> (b::bexp)"} \
and \ \mbox{@{prop"\<Gamma> \<turnstile> (c::com)"}} \ as three recursive functions
\<close>
fun atype :: "tyenv \<Rightarrow> aexp \<Rightarrow> ty option" where
(* your definition/proof here *)
fun bok :: "tyenv \<Rightarrow> bexp \<Rightarrow> bool" where
(* your definition/proof here *)
fun cok :: "tyenv \<Rightarrow> com \<Rightarrow> bool" where
(* your definition/proof here *)
text\<open> and prove \<close>
lemma atyping_atype: "(\<Gamma> \<turnstile> a : \<tau>) = (atype \<Gamma> a = Some \<tau>)"
(* your definition/proof here *)
lemma btyping_bok: "(\<Gamma> \<turnstile> b) = bok \<Gamma> b"
(* your definition/proof here *)
lemma ctyping_cok: "(\<Gamma> \<turnstile> c) = cok \<Gamma> c"
(* your definition/proof here *)
text\<open>
\endexercise
\exercise
Modify the evaluation and typing of @{typ aexp} by allowing @{typ int}s to be coerced
to @{typ real}s with the predefined coercion function
\noquotes{@{term[source] "real_of_int :: int \<Rightarrow> real"}} where necessary.
Now every @{typ aexp} has a value. Define an evaluation function:
\<close>
fun aval :: "aexp \<Rightarrow> state \<Rightarrow> val" where
(* your definition/proof here *)
text\<open>
Similarly, every @{typ aexp} has a type.
Define a function that computes the type of an @{typ aexp}
\<close>
fun atyp :: "tyenv \<Rightarrow> aexp \<Rightarrow> ty" where
(* your definition/proof here *)
text\<open> and prove that it computes the correct type: \<close>
lemma "\<Gamma> \<turnstile> s \<Longrightarrow> atyp \<Gamma> a = type (aval a s)"
(* your definition/proof here *)
text\<open>
Note that Isabelle inserts the coercion @{typ real} automatically.
For example, if you write @{term "Rv(i+r)"} where @{text"i :: int"} and
@{text "r :: real"} then it becomes @{term "Rv(real i + x)"}.
\endexercise
\bigskip
For the following two exercises copy theory @{short_theory "Types"} and modify it as required.
\begin{exercise}
Add a @{text REPEAT} loop (see Exercise~\ref{exe:IMP:REPEAT}) to the typed version of IMP
and update the type soundness proof.
\end{exercise}
\begin{exercise}
Modify the typed version of IMP as follows. Values are now either integers or booleans.
Thus variables can have boolean values too. Merge the two expressions types
@{typ aexp} and @{typ bexp} into one new type @{text exp} of expressions
that has the constructors of both types (of course without real constants).
Combine @{const taval} and @{const tbval} into one evaluation predicate
@{text "eval :: exp \<Rightarrow> state \<Rightarrow> val \<Rightarrow> bool"}. Similarly combine the two typing predicates
into one: @{text "\<Gamma> \<turnstile> e : \<tau>"} where @{text "e :: exp"} and the IMP-type @{text \<tau>} can
be one of @{text Ity} or @{text Bty}.
Adjust the small-step semantics and the type soundness proof.
\end{exercise}
\<close>
end
|
/*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*
** **
** This file forms part of the Underworld geophysics modelling application. **
** **
** For full license and copyright information, please refer to the LICENSE.md file **
** located at the project root, or contact the authors. **
** **
**~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*/
#if 0
/*
Performs y = ( G^T G )^{-1} ( G^T K G ) ( G^T G )^{-1} x
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <petsc.h>
#include <petscmat.h>
#include <petscvec.h>
#include <petscksp.h>
#include <petscpc.h>
#include "common-driver-utils.h"
#include <petscversion.h>
#if ( (PETSC_VERSION_MAJOR >= 3) && (PETSC_VERSION_MINOR >=3) )
#if (PETSC_VERSION_MINOR >=6)
#include "petsc/private/pcimpl.h"
#include "petsc/private/kspimpl.h"
#else
#include "petsc-private/pcimpl.h"
#include "petsc-private/kspimpl.h"
#endif
#else
#include "private/pcimpl.h"
#include "private/kspimpl.h"
#endif
#include "pc_ScaledGtKG.h"
#include <StGermain/StGermain.h>
#include <StgDomain/StgDomain.h>
#define PCTYPE_SCGtKG "scgtkg"
/* private data */
typedef struct {
Mat B, Bt, F, C; /* Bt \in [M x N], F \in [M x M] */
Vec X1,X2, Y1,Y2; /* the scaling vectors */
Vec s,t,X; /* s \in [M], t \in [N], X \in [M] */
KSP ksp_BBt; /* The user MUST provide this operator */
PetscTruth BBt_has_cnst_nullspace;
PetscTruth monitor_activated;
} _PC_SC_GtKG;
typedef _PC_SC_GtKG* PC_SC_GtKG;
/* private prototypes */
PetscErrorCode BSSCR_PCApply_ScGtKG( PC pc, Vec x, Vec y );
PetscErrorCode BSSCR_PCApplyTranspose_ScGtKG( PC pc, Vec x, Vec y );
PetscErrorCode BSSCR_PCSetUp_GtKG( PC pc );
PetscErrorCode BSSCR_BSSCR_pc_error_ScGtKG( PC pc, const char func_name[] )
{
const PCType type;
PCGetType( pc, &type );
if( strcmp(type,PCTYPE_SCGtKG)!=0 ) {
printf("Error(%s): PC type (%s) should be scgtkg. \n",func_name, type );
PetscFinalize();
exit(0);
}
PetscFunctionReturn(0);
}
void BSSCR_BSSCR_get_number_nonzeros_AIJ_ScGtKG( Mat A, PetscInt *nnz )
{
MatInfo info;
MatGetInfo( A, MAT_GLOBAL_SUM, &info );
*nnz = info.nz_used;
}
PetscErrorCode BSSCR_PCScGtKGBBtContainsConstantNullSpace( PC pc, PetscTruth *has_cnst_nullsp )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
PetscInt N;
PetscScalar sum;
PetscReal nrm;
Vec l,r;
Mat BBt,A;
Stg_KSPGetOperators( ctx->ksp_BBt, &BBt, PETSC_NULL, PETSC_NULL );
A = BBt;
MatGetVecs( A, &r, &l ); // l = A r
VecGetSize(r,&N);
sum = 1.0/N;
VecSet(r,sum);
/* scale */
VecPointwiseMult( r, r, ctx->Y2 );
/* {l} = [A] {r} */
MatMult( A,r, l );
/* scale */
VecPointwiseMult( l, l, ctx->X2 );
VecNorm(l,NORM_2,&nrm);
if (nrm < 1.e-7) {
*has_cnst_nullsp = PETSC_TRUE;
}
else {
*has_cnst_nullsp = PETSC_FALSE;
}
Stg_VecDestroy(&l);
Stg_VecDestroy(&r);
PetscFunctionReturn(0);
}
/*
I should not modify setup called!!
This is handled via petsc.
*/
PetscErrorCode BSSCR_PCSetUp_ScGtKG( PC pc )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
if( ctx->F == PETSC_NULL ) { Stg_SETERRQ( PETSC_ERR_SUP, "gtkg: F not set" ); }
if( ctx->Bt == PETSC_NULL ) { Stg_SETERRQ( PETSC_ERR_SUP, "gtkg: Bt not set" ); }
if(ctx->ksp_BBt==PETSC_NULL) {
BSSCR_PCScGtKGUseStandardBBtOperator( pc ) ;
}
BSSCR_PCScGtKGBBtContainsConstantNullSpace( pc, &ctx->BBt_has_cnst_nullspace );
if( ctx->BBt_has_cnst_nullspace == PETSC_TRUE ) {
PetscPrintf( PETSC_COMM_WORLD, "\t* Detected prescence of constant nullspace in BBt-C\n" );
}
PetscFunctionReturn(0);
}
PetscErrorCode BSSCR_PCDestroy_ScGtKG( PC pc )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
if( ctx == PETSC_NULL ) { PetscFunctionReturn(0); }
if( ctx->ksp_BBt != PETSC_NULL ) { Stg_KSPDestroy(&ctx->ksp_BBt ); }
if( ctx->s != PETSC_NULL ) { Stg_VecDestroy(&ctx->s ); }
if( ctx->X != PETSC_NULL ) { Stg_VecDestroy(&ctx->X ); }
if( ctx->t != PETSC_NULL ) { Stg_VecDestroy(&ctx->t ); }
if( ctx->X1 != PETSC_NULL ) { Stg_VecDestroy(&ctx->X1 ); }
if( ctx->X2 != PETSC_NULL ) { Stg_VecDestroy(&ctx->X2 ); }
if( ctx->Y1 != PETSC_NULL ) { Stg_VecDestroy(&ctx->Y1 ); }
if( ctx->Y2 != PETSC_NULL ) { Stg_VecDestroy(&ctx->Y2 ); }
PetscFree( ctx );
PetscFunctionReturn(0);
}
PetscErrorCode BSSCR_PCView_ScGtKG( PC pc, PetscViewer viewer )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
PetscViewerASCIIPushTab(viewer); //1
PetscViewerASCIIPrintf( viewer, "gtkg-ksp \n" );
PetscViewerASCIIPrintf(viewer,"---------------------------------\n");
PetscViewerASCIIPushTab(viewer);
KSPView( ctx->ksp_BBt, viewer );
PetscViewerASCIIPopTab(viewer);
PetscViewerASCIIPrintf(viewer,"---------------------------------\n");
PetscViewerASCIIPopTab(viewer); //1
PetscFunctionReturn(0);
}
PetscErrorCode BSSCRBSSCR_Lp_monitor_ScGtKG( KSP ksp, PetscInt index )
{
PetscInt max_it;
PetscReal rnorm;
KSPConvergedReason reason;
KSPGetIterationNumber( ksp, &max_it );
KSPGetResidualNorm( ksp, &rnorm );
KSPGetConvergedReason( ksp, &reason );
if (reason >= 0) {
PetscPrintf(((PetscObject)ksp)->comm,"\t<Lp(%d)>: Linear solve converged. its.=%.4d ; |r|=%5.5e ; Reason=%s\n",
index, max_it, rnorm, KSPConvergedReasons[reason] );
} else {
PetscPrintf(((PetscObject)ksp)->comm,"\t<Lp(%d)>: Linear solve did not converge. its.=%.4d ; |r|=%5.5e ; Residual reduction=%2.2e ; Reason=%s\n",
index, max_it, rnorm, (ksp->rnorm0/rnorm), KSPConvergedReasons[reason]);
}
PetscFunctionReturn(0);
}
PetscErrorCode BSSCR_PCScBFBTSubKSPMonitor( KSP ksp, PetscInt index, PetscLogDouble time )
{
PetscInt max_it;
PetscReal rnorm;
KSPConvergedReason reason;
KSPGetIterationNumber( ksp, &max_it );
KSPGetResidualNorm( ksp, &rnorm );
KSPGetConvergedReason( ksp, &reason );
PetscPrintf(((PetscObject)ksp)->comm," PCScBFBTSubKSP (%d): %D Residual norm; r0 %12.12e, r %12.12e: Reason %s: Time %5.5e \n",
index, max_it, ksp->rnorm0, rnorm, KSPConvergedReasons[reason], time );
PetscFunctionReturn(0);
}
/*
Performs y <- S*^{-1} x
S*^{-1} = ( B' Bt' )^{-1} B' F' Bt' ( B' Bt' )^{-1}
where
F' = X1 F Y1
Bt' = X1 Bt Y2
B' = X2 B Y1
Thus, S*^{-1} = [ X2 B Y1 X1 Bt Y2 ]^{-1} . [ X2 B Y1 . X1 F Y1 . X1 Bt Y2 ] . [ X2 B Y1 X1 Bt Y2 ]^{-1}
= Y2^{-1} ksp_BBt X2^{-1} . [ B' F' Bt' ] . Y2^{-1} ksp_BBt X2^{-1}
= Y2^{-1} ksp_BBt . [ B Y1 . X1 F Y1 . X1 Bt ] . ksp_BBt X2^{-1}
*/
PetscErrorCode BSSCR_PCApply_ScGtKG( PC pc, Vec x, Vec y )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
KSP ksp;
Mat F, Bt;
Vec s,t,X;
PetscLogDouble t0,t1;
ksp = ctx->ksp_BBt;
F = ctx->F;
Bt = ctx->Bt;
s = ctx->s;
t = ctx->t;
X = ctx->X;
/* Apply scaled Poisson operator */
/* scale x */
/* ========================================================
NOTE:
I THINK TO OMIT THESE AS WE WANT TO UNSCALE THE
PRECONDITIONER AS S IN THIS CASE IS NOT SCALED
======================================================== */
// VecPointwiseDivide( x, x, ctx->X2 ); /* x <- x/X2 */ /* NEED TO BE SURE */
if( ctx->BBt_has_cnst_nullspace == PETSC_TRUE ) {
BSSCR_VecRemoveConstNullspace( x, PETSC_NULL );
}
PetscGetTime(&t0);
KSPSolve( ksp, x, t ); /* t <- GtG_inv x */
PetscGetTime(&t1);
if (ctx->monitor_activated) {
BSSCR_PCScBFBTSubKSPMonitor(ksp,1,(t1-t0));
}
/* Apply Bt */
MatMult( Bt, t, s ); /* s <- G t */
VecPointwiseMult( s, s, ctx->X1 ); /* s <- s * X1 */
/* Apply F */
VecPointwiseMult( s, s, ctx->Y1 ); /* s <- s * Y1 */
MatMult( F, s, X ); /* X <- K s */
VecPointwiseMult( X, X, ctx->X1 ); /* X <- X * X1 */
/* Apply B */
VecPointwiseMult( X, X, ctx->Y1 ); /* X <- X * Y1 */
MatMultTranspose( Bt, X, t ); /* t <- Gt X */
if( ctx->BBt_has_cnst_nullspace == PETSC_TRUE ) {
BSSCR_VecRemoveConstNullspace( t, PETSC_NULL );
}
PetscGetTime(&t0);
KSPSolve( ksp, t, y ); /* y <- GtG_inv t */
PetscGetTime(&t1);
if (ctx->monitor_activated) {
BSSCR_PCScBFBTSubKSPMonitor(ksp,2,(t1-t0));
}
VecPointwiseMult( y, y, ctx->Y2 ); /* y <- y/Y2 */
/* undo modification made to x on entry */
// VecPointwiseMult( x, x, ctx->X2 ); /* x <- x/X2 */ /* NEED TO BE SURE */
PetscFunctionReturn(0);
}
/*
Performs y <- S*^{-1} x
S*^{-1} = ( B' Bt' )^{-1} B' F' Bt' - C' ( B' Bt' )^{-1}
where
F' = X1 F Y1
Bt' = X1 Bt Y2
B' = X2 B Y1
C' = X2 C Y2
Thus, S*^{-1} = [ X2 B Y1 X1 Bt Y2 ]^{-1} . [ X2 B Y1 . X1 F Y1 . X1 Bt Y2 - X2 C Y2 ] . [ X2 B Y1 X1 Bt Y2 ]^{-1}
= Y2^{-1} ksp_BBt X2^{-1} . [ B' F' Bt' - C' ] . Y2^{-1} ksp_BBt X2^{-1}
= Y2^{-1} ksp_BBt . [ B Y1 . X1 F Y1 . X1 Bt - C ] . ksp_BBt X2^{-1}
*/
PetscErrorCode BSSCR_BSSCR_PCApply_ScGtKG_C( PC pc, Vec x, Vec y )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
KSP ksp;
Mat F, Bt,C;
Vec s,t,X;
PetscLogDouble t0,t1;
ksp = ctx->ksp_BBt;
F = ctx->F;
Bt = ctx->Bt;
C = ctx->C;
s = ctx->s;
t = ctx->t;
X = ctx->X;
/* Apply scaled Poisson operator */
/* scale x */
/* ========================================================
NOTE:
I THINK TO OMIT THESE AS WE WANT TO UNSCALE THE
PRECONDITIONER AS S IN THIS CASE IS NOT SCALED
======================================================== */
// VecPointwiseDivide( x, x, ctx->X2 ); /* x <- x/X2 */ /* NEED TO BE SURE */
if( ctx->BBt_has_cnst_nullspace == PETSC_TRUE ) {
BSSCR_VecRemoveConstNullspace( x, PETSC_NULL );
}
PetscGetTime(&t0);
KSPSolve( ksp, x, t ); /* t <- GtG_inv x */
PetscGetTime(&t1);
if (ctx->monitor_activated) {
BSSCR_PCScBFBTSubKSPMonitor(ksp,1,(t1-t0));
}
/* Apply Bt */
MatMult( Bt, t, s ); /* s <- G t */
VecPointwiseMult( s, s, ctx->X1 ); /* s <- s * X1 */
/* Apply F */
VecPointwiseMult( s, s, ctx->Y1 ); /* s <- s * Y1 */
MatMult( F, s, X ); /* X <- K s */
VecPointwiseMult( X, X, ctx->X1 ); /* X <- X * X1 */
/* Apply B */
VecPointwiseMult( X, X, ctx->Y1 ); /* X <- X * Y1 */
MatMultTranspose( Bt, X, s ); /* s <- Gt X */
/* s <- s - C t */
VecScale( s, -1.0 );
MatMultAdd( C, t, s, s );
VecScale( s, -1.0 );
if( ctx->BBt_has_cnst_nullspace == PETSC_TRUE ) {
BSSCR_VecRemoveConstNullspace( s, PETSC_NULL );
}
PetscGetTime(&t0);
KSPSolve( ksp, s, y ); /* y <- GtG_inv s */
PetscGetTime(&t1);
if (ctx->monitor_activated) {
BSSCR_PCScBFBTSubKSPMonitor(ksp,2,(t1-t0));
}
VecPointwiseMult( y, y, ctx->Y2 ); /* y <- y/Y2 */
/* undo modification made to x on entry */
// VecPointwiseMult( x, x, ctx->X2 ); /* x <- x/X2 */ /* NEED TO BE SURE */
PetscFunctionReturn(0);
}
/* Need to check this one if correct */
/*
S^{-1} = ( G^T G )^{-1} G^T K G ( G^T G )^{-1}
= A C A
S^{-T} = A^T (A C)^T
= A^T C^T A^T, but A = G^T G which is symmetric
= A C^T A
= A G^T ( G^T K )^T A
= A G^T K^T G A
*/
PetscErrorCode BSSCR_PCApplyTranspose_ScGtKG( PC pc, Vec x, Vec y )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
KSP ksp;
Mat F, Bt;
Vec s,t,X;
PetscLogDouble t0,t1;
ksp = ctx->ksp_BBt;
F = ctx->F;
Bt = ctx->Bt;
s = ctx->s;
t = ctx->t;
X = ctx->X;
/* Apply scaled Poisson operator */
/* scale x */
/* ========================================================
NOTE:
I THINK TO OMIT THESE AS WE WANT TO UNSCALE THE
PRECONDITIONER AS S IN THIS CASE IS NOT SCALED
======================================================== */
// VecPointwiseDivide( x, x, ctx->X2 ); /* x <- x/X2 */ /* NEED TO BE SURE */
if( ctx->BBt_has_cnst_nullspace == PETSC_TRUE ) {
BSSCR_VecRemoveConstNullspace( x, PETSC_NULL );
}
PetscGetTime(&t0);
KSPSolveTranspose( ksp, x, t ); /* t <- GtG_inv x */
PetscGetTime(&t1);
if (ctx->monitor_activated) {
BSSCR_PCScBFBTSubKSPMonitor(ksp,1,(t1-t0));
}
/* Apply Bt */
MatMult( Bt, t, s ); /* s <- G t */
VecPointwiseMult( s, s, ctx->X1 ); /* s <- s * X1 */
/* Apply F */
VecPointwiseMult( s, s, ctx->Y1 ); /* s <- s * Y1 */
MatMultTranspose( F, s, X ); /* X <- K s */
VecPointwiseMult( X, X, ctx->X1 ); /* X <- X * X1 */
/* Apply B */
VecPointwiseMult( X, X, ctx->Y1 ); /* X <- X * Y1 */
MatMultTranspose( Bt, X, t ); /* t <- Gt X */
if( ctx->BBt_has_cnst_nullspace == PETSC_TRUE ) {
BSSCR_VecRemoveConstNullspace( t, PETSC_NULL );
}
PetscGetTime(&t0);
KSPSolveTranspose( ksp, t, y ); /* y <- GtG_inv t */
PetscGetTime(&t1);
if (ctx->monitor_activated) {
BSSCR_PCScBFBTSubKSPMonitor(ksp,2,(t1-t0));
}
VecPointwiseMult( y, y, ctx->Y2 ); /* y <- y/Y2 */
/* undo modification made to x on entry */
// VecPointwiseMult( x, x, ctx->X2 ); /* x <- x/X2 */ /* NEED TO BE SURE */
PetscFunctionReturn(0);
}
/*
Performs y <- S^{-1} x
S^{-1} = ( G^T Di G )^{-1} G^T Di K Di G ( G^T Di G )^{-1}
where Di = diag(M)^{-1}
*/
/*
Only the options related to GtKG should be set here.
*/
PetscErrorCode BSSCR_PCSetFromOptions_ScGtKG( PC pc )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
PetscTruth ivalue, flg;
if(ctx->ksp_BBt!=PETSC_NULL) {
PetscOptionsGetTruth( PETSC_NULL, "-pc_gtkg_monitor", &ivalue, &flg );
BSSCR_PCScGtKGSetSubKSPMonitor( pc, ivalue );
}
PetscFunctionReturn(0);
}
/* ---- Exposed functions ---- */
PetscErrorCode BSSCR_PCCreate_ScGtKG( PC pc )
{
PC_SC_GtKG pc_data;
PetscErrorCode ierr;
/* create memory for ctx */
ierr = Stg_PetscNew( _PC_SC_GtKG,&pc_data);CHKERRQ(ierr);
/* init ctx */
pc_data->F = PETSC_NULL;
pc_data->Bt = PETSC_NULL;
pc_data->B = PETSC_NULL;
pc_data->BBt_has_cnst_nullspace = PETSC_FALSE;
pc_data->ksp_BBt = PETSC_NULL;
pc_data->monitor_activated = PETSC_FALSE;
pc_data->X1 = PETSC_NULL;
pc_data->X2 = PETSC_NULL;
pc_data->Y1 = PETSC_NULL;
pc_data->Y2 = PETSC_NULL;
pc_data->s = PETSC_NULL;
pc_data->t = PETSC_NULL;
pc_data->X = PETSC_NULL;
/* set ctx onto pc */
pc->data = (void*)pc_data;
ierr = PetscLogObjectMemory(pc,sizeof(_PC_SC_GtKG));CHKERRQ(ierr);
/* define operations */
pc->ops->setup = BSSCR_PCSetUp_ScGtKG;
pc->ops->view = BSSCR_PCView_ScGtKG;
pc->ops->destroy = BSSCR_PCDestroy_ScGtKG;
pc->ops->setfromoptions = BSSCR_PCSetFromOptions_ScGtKG;
pc->ops->apply = BSSCR_PCApply_ScGtKG;
pc->ops->applytranspose = BSSCR_PCApplyTranspose_ScGtKG;
PetscFunctionReturn(0);
}
PetscErrorCode BSSCR_PCScGtKGGetScalings( PC pc, Vec *X1, Vec *X2, Vec *Y1, Vec *Y2 )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
BSSCR_BSSCR_pc_error_ScGtKG( pc, __func__ );
if( X1 != PETSC_NULL ) { *X1 = ctx->X1; }
if( X2 != PETSC_NULL ) { *X2 = ctx->X2; }
if( Y1 != PETSC_NULL ) { *Y1 = ctx->Y1; }
if( Y2 != PETSC_NULL ) { *Y2 = ctx->Y2; }
PetscFunctionReturn(0);
}
/*
F & Bt must different to PETSC_NULL
B may be PETSC_NULL
C can be PETSC_NULL
*/
PetscErrorCode BSSCR_PCScGtKGSetOperators( PC pc, Mat F, Mat Bt, Mat B, Mat C )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
BSSCR_BSSCR_pc_error_ScGtKG( pc, __func__ );
ctx->F = F;
ctx->Bt = Bt;
ctx->B = B;
ctx->C = C;
if( C != PETSC_NULL ) {
pc->ops->apply = BSSCR_BSSCR_PCApply_ScGtKG_C;
pc->ops->applytranspose = PETSC_NULL;
}
/* Create vectors */
if( ctx->s == PETSC_NULL ) { MatGetVecs( ctx->F, &ctx->s, PETSC_NULL ); }
if( ctx->X == PETSC_NULL ) { MatGetVecs( ctx->F, PETSC_NULL, &ctx->X ); }
if( ctx->t == PETSC_NULL ) { MatGetVecs( ctx->Bt, &ctx->t, PETSC_NULL ); }
if( ctx->F == PETSC_NULL ) { Stg_SETERRQ( PETSC_ERR_SUP, "gtkg: F not set" ); }
if( ctx->Bt == PETSC_NULL ) { Stg_SETERRQ( PETSC_ERR_SUP, "gtkg: Bt not set" ); }
if( ctx->X1 == PETSC_NULL ) { MatGetVecs( ctx->F, &ctx->X1, PETSC_NULL ); }
if( ctx->Y1 == PETSC_NULL ) { MatGetVecs( ctx->F, &ctx->Y1, PETSC_NULL ); }
if( ctx->X2 == PETSC_NULL ) { MatGetVecs( ctx->Bt, &ctx->X2, PETSC_NULL ); }
if( ctx->Y2 == PETSC_NULL ) { MatGetVecs( ctx->Bt, &ctx->Y2, PETSC_NULL ); }
VecSet( ctx->X1, 1.0 );
VecSet( ctx->Y1, 1.0 );
VecSet( ctx->X2, 1.0 );
VecSet( ctx->Y2, 1.0 );
PetscFunctionReturn(0);
}
PetscErrorCode BSSCR_PCScGtKGAttachNullSpace( PC pc )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
MatNullSpace nsp;
BSSCR_BSSCR_pc_error_ScGtKG( pc, __func__ );
/* Attach a null space */
MatNullSpaceCreate( PETSC_COMM_WORLD, PETSC_TRUE, PETSC_NULL, PETSC_NULL, &nsp );
#if ( (PETSC_VERSION_MAJOR >= 3) && (PETSC_VERSION_MINOR <6) )
KSPSetNullSpace( ctx->ksp_BBt, nsp );
#else
Mat A;
KSPGetOperators(ctx->ksp_BBt,&A,NULL);//Note: DOES NOT increase the reference counts of the matrix, so you should NOT destroy them.
MatSetNullSpace( A, nsp);
#endif
/*
NOTE: This does NOT destroy the memory for nsp, it just decrements the nsp->refct, so that
the next time MatNullSpaceDestroy() is called, the memory will be released. The next time this
is called will be by KSPDestroy();
*/
MatNullSpaceDestroy( nsp );
PetscFunctionReturn(0);
}
PetscErrorCode BSSCR_PCScGtKGGetKSP( PC pc, KSP *ksp )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
BSSCR_BSSCR_pc_error_ScGtKG( pc, __func__ );
if( ksp != PETSC_NULL ) {
(*ksp) = ctx->ksp_BBt;
}
PetscFunctionReturn(0);
}
PetscErrorCode BSSCR_PCScGtKGSetKSP( PC pc, KSP ksp )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
BSSCR_BSSCR_pc_error_ScGtKG( pc, __func__ );
if( ctx->ksp_BBt != PETSC_NULL ) {
Stg_KSPDestroy(&ctx->ksp_BBt);
}
PetscObjectReference( (PetscObject)ksp );
ctx->ksp_BBt = ksp;
PetscFunctionReturn(0);
}
PetscErrorCode BSSCR_PCScGtKGSetSubKSPMonitor( PC pc, PetscTruth flg )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
BSSCR_BSSCR_pc_error_ScGtKG( pc, __func__ );
ctx->monitor_activated = flg;
PetscFunctionReturn(0);
}
PetscErrorCode BSSCR_PCScGtKGUseStandardScaling( PC pc )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
Mat K,G,D,C;
Vec rG;
PetscScalar rg2, rg, ra;
PetscInt N;
Vec rA, rC;
Vec L1,L2, R1,R2;
BSSCR_BSSCR_pc_error_ScGtKG( pc, __func__ );
L1 = ctx->X1;
L2 = ctx->X2;
R1 = ctx->Y1;
R2 = ctx->Y2;
rA = L1;
rC = L2;
K = ctx->F;
G = ctx->Bt;
D = ctx->B;
C = ctx->C;
VecDuplicate( rA, &rG );
/* Get magnitude of K */
MatGetRowMax( K, rA, PETSC_NULL );
VecSqrt( rA );
VecReciprocal( rA );
VecDot( rA,rA, &ra );
VecGetSize( rA, &N );
ra = PetscSqrtScalar( ra/N );
/* Get magnitude of G */
MatGetRowMax( G, rG, PETSC_NULL );
VecDot( rG, rG, &rg2 );
VecGetSize( rG, &N );
rg = PetscSqrtScalar(rg2/N);
// printf("rg = %f \n", rg );
VecSet( rC, 1.0/(rg*ra) );
Stg_VecDestroy(&rG );
VecCopy( L1, R1 );
VecCopy( L2, R2 );
PetscFunctionReturn(0);
}
/*
Builds
B Y1 X1 Bt
and creates a ksp when C=0, otherwise it builds
B Y1 X1 Bt - C
*/
PetscErrorCode BSSCR_PCScGtKGUseStandardBBtOperator( PC pc )
{
PC_SC_GtKG ctx = (PC_SC_GtKG)pc->data;
PetscReal fill;
Mat diag_mat,C;
Vec diag;
PetscInt M,N, m,n;
MPI_Comm comm;
PetscInt nnz_I, nnz_G;
MatType mtype;
const char *prefix;
Mat BBt;
KSP ksp;
PetscTruth ivalue, flg, has_cnst_nullsp;
BSSCR_BSSCR_pc_error_ScGtKG( pc, __func__ );
/* Assemble BBt */
MatGetSize( ctx->Bt, &M, &N );
MatGetLocalSize( ctx->Bt, &m, &n );
MatGetVecs( ctx->Bt, PETSC_NULL, &diag );
/* Define diagonal matrix Y1 X1 */
VecPointwiseMult( diag, ctx->Y1, ctx->X1 );
PetscObjectGetComm( (PetscObject)ctx->F, &comm );
MatCreate( comm, &diag_mat );
MatSetSizes( diag_mat, m,m , M, M );
#if (((PETSC_VERSION_MAJOR==3) && (PETSC_VERSION_MINOR>=3)) || (PETSC_VERSION_MAJOR>3) )
MatSetUp(diag_mat);
#endif
MatGetType( ctx->Bt, &mtype );
MatSetType( diag_mat, mtype );
MatDiagonalSet( diag_mat, diag, INSERT_VALUES );
/* Build operator B Y1 X1 Bt */
BSSCR_BSSCR_get_number_nonzeros_AIJ_ScGtKG( diag_mat, &nnz_I );
BSSCR_BSSCR_get_number_nonzeros_AIJ_ScGtKG( ctx->Bt, &nnz_G );
/*
Not sure the best way to estimate the fill factor.
BBt is a laplacian on the pressure space.
This might tell us something useful...
*/
fill = (PetscReal)(nnz_G)/(PetscReal)( nnz_I );
MatPtAP( diag_mat, ctx->Bt, MAT_INITIAL_MATRIX, fill, &BBt );
Stg_MatDestroy(&diag_mat );
Stg_VecDestroy(&diag );
C = ctx->C;
if( C !=PETSC_NULL ) {
MatAXPY( BBt, -1.0, C, DIFFERENT_NONZERO_PATTERN );
}
/* Build the solver */
KSPCreate( ((PetscObject)pc)->comm, &ksp );
Stg_KSPSetOperators( ksp, BBt, BBt, SAME_NONZERO_PATTERN );
PCGetOptionsPrefix( pc,&prefix );
KSPSetOptionsPrefix( ksp, prefix );
KSPAppendOptionsPrefix( ksp, "pc_gtkg_" ); /* -pc_GtKG_ksp_type <type>, -ksp_GtKG_pc_type <type> */
BSSCR_PCScGtKGSetKSP( pc, ksp );
BSSCR_MatContainsConstNullSpace( BBt, NULL, &has_cnst_nullsp );
if( has_cnst_nullsp == PETSC_TRUE ) {
BSSCR_PCScGtKGAttachNullSpace( pc );
}
PetscOptionsGetTruth( PETSC_NULL, "-pc_gtkg_monitor", &ivalue, &flg );
BSSCR_PCScGtKGSetSubKSPMonitor( pc, ivalue );
Stg_KSPDestroy(&ksp);
Stg_MatDestroy(&BBt);
PetscFunctionReturn(0);
}
#endif
|
From Coq Require Import
Vector
Fin
Program.Equality.
From Coq Require
List.
From CTree Require Import Core.Utils.
From Equations Require Import Equations.
Export VectorNotations.
Declare Scope fin_vector_scope.
Notation vec n T := (Vector.t T n).
Notation fin := Fin.t.
Equations vector_remove{A n}(v: vec (S n) A)(i: fin (S n)) : vec n A by wf n lt :=
vector_remove (h :: h' :: ts) (FS (FS j)) := h :: (vector_remove (h' :: ts) (FS j));
vector_remove (h :: h' :: ts) (FS F1) := h :: ts;
vector_remove (h :: h' :: ts) F1 := h' :: ts;
vector_remove (h::nil) F1 := @nil A.
Transparent vector_remove.
Equations vector_replace{A n}(v: vec n A)(i: fin n)(a: A): vec n A by wf n lt :=
vector_replace [] _ _ := [];
vector_replace (h :: h' :: ts) (FS (FS j)) a := h :: (vector_replace (h' :: ts) (FS j) a);
vector_replace (h :: h' :: ts) (FS F1) a := h :: a :: ts;
vector_replace (h :: h' :: ts) F1 a := a :: h' :: ts;
vector_replace [h] F1 a := [a].
Transparent vector_replace.
Notation "v '@' i ':=' a" := (vector_replace v i a) (at level 80): fin_vector_scope.
Notation "v '$' i" := (nth v i) (at level 80): fin_vector_scope.
Notation "v '--' i" := (vector_remove v i) (at level 80): fin_vector_scope.
(** Vector utils *)
Equations forallb {A}{m: nat}(f: A -> bool)(a: vec m A): bool :=
forallb _ [] := true;
forallb f (h :: ts) := andb (f h) (forallb f ts).
Transparent forallb.
Lemma forall_reflect: forall A (m: nat)(f: A -> bool) (l: vec m A),
forallb f l = true <-> Forall (fun x => f x = true) l.
Proof.
intros A m f l.
split; intros.
- dependent induction l.
+ econstructor.
+ econstructor; cbn in H; apply andb_prop in H;
destruct H.
* exact H.
* apply IHl; assumption.
- dependent induction H.
+ reflexivity.
+ cbn;
apply andb_true_intro; split; assumption.
Defined.
Fixpoint fin_all (n : nat) : list (fin n) :=
match n as n return list (fin n) with
| 0 => List.nil
| S n => List.cons (@F1 n) (List.map (@FS _) (fin_all n))
end%list.
Theorem fin_all_In : forall {n} (f : fin n),
List.In f (fin_all n).
Proof.
induction n; intros.
inversion f.
remember (S n). destruct f.
simpl; firstorder.
inversion Heqn0. subst.
simpl. right. apply List.in_map. auto.
Qed.
Theorem fin_case : forall n (f : fin (S n)),
f = F1 \/ exists f', f = FS f'.
Proof.
intros. generalize (fin_all_In f). intros.
destruct H; auto.
eapply List.in_map_iff in H. right. destruct H.
exists x. intuition.
Qed.
Fixpoint zip {A B : Type} {n : nat} (a : Vector.t A n) (b : Vector.t B n) : Vector.t (A * B) n :=
match a in Vector.t _ n return Vector.t B n -> Vector.t (A * B) n with
| ha :: ta => fun b => (ha, Vector.hd b) :: zip ta (Vector.tl b)
| [] => fun _ => []
end b.
Definition pairwise{A B n}(R: rel A B): rel (vec n A) (vec n B) :=
fun a b => @Forall (A * B) (fun '(a, b) => R a b) n (zip a b).
|
theory Optional
imports Prism Lens
begin
locale optional =
fixes get' :: "'s \<Rightarrow> 'a option" and set :: "'a \<Rightarrow> 's \<Rightarrow> 's"
assumes set_get'[simp]: "get' (set a s) = map_option (\<lambda>_. a) (get' s)"
assumes get'_set[simp]: "get' s = Some a \<Longrightarrow> set a s = s"
assumes set_set[simp]: "set a (set a' s) = set a s"
begin
definition modify' :: "('a \<Rightarrow> 'a) \<Rightarrow> 's \<Rightarrow> 's option" where
[optics]: "modify' f s = map_option (\<lambda>a. set (f a) s) (get' s)"
definition modify :: "('a \<Rightarrow> 'a) \<Rightarrow> 's \<Rightarrow> 's" where
[optics]: "modify f s = (case get' s of None \<Rightarrow> s | Some a \<Rightarrow> set (f a) s)"
lemma modify_id[simp]: "modify id = id"
unfolding modify_def[abs_def] id_def by (auto split: option.splits)
lemma modify_comp[simp]: "modify (f \<circ> g) = modify f \<circ> modify g"
unfolding modify_def[abs_def] comp_def by (auto split: option.splits)
end
context lens begin
definition [optics]: "get' = Some \<circ> get"
sublocale optional: optional get' set
by unfold_locales (auto simp: get'_def)
lemma modify_eq[simp]: "optional.modify = modify"
unfolding modify_def[abs_def] optional.modify_def[abs_def]
by (simp add: get'_def)
end
context prism begin
definition set where [optics]: "set a s = (case get' s of None \<Rightarrow> s | Some _ \<Rightarrow> back a)"
sublocale optional: optional get' set
by unfold_locales (auto simp: set_def split: option.splits)
lemma modify_eq[simp]: "optional.modify = modify"
unfolding modify_def[abs_def] optional.modify_def[abs_def] set_def
by (rule ext)+ (auto split: option.splits)
lemma modify'_eq[simp]: "optional.modify' = modify'"
unfolding modify'_def[abs_def] optional.modify'_def[abs_def] set_def comp_def
by (rule ext)+ (auto simp: map_option_case split: option.splits)
end
context iso begin
lemma get'_eq[simp]: "lens.get' = get'"
unfolding get'_def lens.get'_def by simp
lemma set_eq[simp]: "prism.set = set"
unfolding set_def[abs_def] prism.set_def[abs_def]
by (simp add: get'_def)
end
locale compose_optional_optional =
one: optional f g + two: optional h i for f :: "'s \<Rightarrow> 'a option" and g and h :: "'a \<Rightarrow> 'b option" and i
begin
definition [optics]: "get' s = Option.bind (f s) h"
definition set where [optics]: "set b = one.modify (i b)"
sublocale optional get' set
proof
fix b s
assume "get' s = Some b"
then obtain a where "f s = Some a" "h a = Some b"
unfolding get'_def
by (meson bind_eq_Some_conv)
thus "set b s = s"
unfolding set_def one.modify_def by simp
next
fix b s
show "get' (set b s) = map_option (\<lambda>_. b) (get' s)"
unfolding get'_def set_def one.modify_def
by (auto split: option.splits)
next
fix b b' s
show "set b (set b' s) = set b s"
unfolding set_def one.modify_def
by (auto split: option.splits)
qed
end
context compose_prism_prism begin
sublocale optional_optional: compose_optional_optional f "prism.set f g" h "prism.set h i" ..
lemma get'_eq[simp]: "optional_optional.get' = get'"
unfolding get'_def[abs_def] optional_optional.get'_def[abs_def] ..
lemma set_eq[simp]: "optional_optional.set = set"
unfolding set_def[abs_def] optional_optional.set_def[abs_def] one.optional.modify_def[abs_def]
unfolding get'_def back_def comp_apply
unfolding one.set_def two.set_def
by (rule ext)+ (auto split: option.splits)
lemma modify_eq[simp]: "optional_optional.modify = modify"
unfolding optional_optional.modify_def[abs_def] modify_def[abs_def]
unfolding get'_eq set_eq set_def
by (rule ext)+ (auto split: option.splits)
lemma modify'_eq[simp]: "optional_optional.modify' = modify'"
unfolding optional_optional.modify'_def[abs_def] modify'_def[abs_def]
unfolding set_eq get'_eq set_def
by (rule ext)+ (auto cong: map_option_cong)
end
context compose_lens_lens begin
sublocale optional_optional: compose_optional_optional "lens.get' f" g "lens.get' h" i ..
lemma get'_eq[simp]: "optional_optional.get' = get'"
unfolding get'_def[abs_def] optional_optional.get'_def[abs_def]
unfolding get_def one.get'_def two.get'_def
by auto
lemma set_eq[simp]: "optional_optional.set = set"
unfolding set_def[abs_def] optional_optional.set_def[abs_def]
by (rule ext)+ (simp add: one.modify_def)
lemma modify_eq[simp]: "optional_optional.modify = modify"
unfolding optional_optional.modify_def[abs_def] modify_def[abs_def]
unfolding get'_eq set_eq
unfolding set_def get'_def
by simp
end
end |
/**
* @file
* @copyright defined in eos/LICENSE.txt
*/
#include <algorithm>
#include <vector>
#include <iterator>
#include <boost/test/unit_test.hpp>
#include <eos/chain/chain_controller.hpp>
#include <eos/chain/exceptions.hpp>
#include <eos/chain/permission_object.hpp>
#include <eos/chain/key_value_object.hpp>
#include <eos/chain/producer_objects.hpp>
#include <eos/utilities/tempdir.hpp>
#include <fc/crypto/digest.hpp>
#include <boost/test/unit_test.hpp>
#include <boost/range/algorithm/find.hpp>
#include <boost/range/algorithm/find_if.hpp>
#include <boost/range/algorithm/permutation.hpp>
#include "../common/database_fixture.hpp"
using namespace eosio;
using namespace chain;
BOOST_AUTO_TEST_SUITE(special_account_tests)
//Check special accounts exits in genesis
BOOST_FIXTURE_TEST_CASE(accounts_exists, testing_fixture)
{ try {
Make_Blockchain(chain);
auto nobody = chain_db.find<account_object, by_name>(config::nobody_account_name);
BOOST_CHECK(nobody != nullptr);
const auto& nobody_active_authority = chain_db.get<permission_object, by_owner>(boost::make_tuple(config::nobody_account_name, config::active_name));
BOOST_CHECK_EQUAL(nobody_active_authority.auth.threshold, 0);
BOOST_CHECK_EQUAL(nobody_active_authority.auth.accounts.size(), 0);
BOOST_CHECK_EQUAL(nobody_active_authority.auth.keys.size(), 0);
const auto& nobody_owner_authority = chain_db.get<permission_object, by_owner>(boost::make_tuple(config::nobody_account_name, config::owner_name));
BOOST_CHECK_EQUAL(nobody_owner_authority.auth.threshold, 0);
BOOST_CHECK_EQUAL(nobody_owner_authority.auth.accounts.size(), 0);
BOOST_CHECK_EQUAL(nobody_owner_authority.auth.keys.size(), 0);
// TODO: check for anybody account
//auto anybody = chain_db.find<account_object, by_name>(config::anybody_account_name);
//BOOST_CHECK(anybody == nullptr);
auto producers = chain_db.find<account_object, by_name>(config::producers_account_name);
BOOST_CHECK(producers != nullptr);
auto& gpo = chain_db.get<global_property_object>();
const auto& producers_active_authority = chain_db.get<permission_object, by_owner>(boost::make_tuple(config::producers_account_name, config::active_name));
BOOST_CHECK_EQUAL(producers_active_authority.auth.threshold, config::producers_authority_threshold);
BOOST_CHECK_EQUAL(producers_active_authority.auth.accounts.size(), gpo.active_producers.size());
BOOST_CHECK_EQUAL(producers_active_authority.auth.keys.size(), 0);
std::vector<account_name> active_auth;
for(auto& apw : producers_active_authority.auth.accounts) {
active_auth.emplace_back(apw.permission.account);
}
std::vector<account_name> diff;
std::set_difference(
active_auth.begin(),
active_auth.end(),
gpo.active_producers.begin(),
gpo.active_producers.end(),
std::inserter(diff, diff.begin())
);
BOOST_CHECK_EQUAL(diff.size(), 0);
const auto& producers_owner_authority = chain_db.get<permission_object, by_owner>(boost::make_tuple(config::producers_account_name, config::owner_name));
BOOST_CHECK_EQUAL(producers_owner_authority.auth.threshold, 0);
BOOST_CHECK_EQUAL(producers_owner_authority.auth.accounts.size(), 0);
BOOST_CHECK_EQUAL(producers_owner_authority.auth.keys.size(), 0);
} FC_LOG_AND_RETHROW() }
//Check correct authority when new set of producers are elected
BOOST_FIXTURE_TEST_CASE(producers_authority, testing_fixture)
{ try {
Make_Blockchain(chain)
Make_Account(chain, alice);
Make_Account(chain, bob);
Make_Account(chain, charlie);
Make_Account(chain, newproducer1);
Make_Account(chain, newproducer2);
Make_Account(chain, newproducer3);
chain.produce_blocks();
Make_Producer(chain, newproducer1);
Make_Producer(chain, newproducer2);
Make_Producer(chain, newproducer3);
Approve_Producer(chain, alice, newproducer1, true);
Approve_Producer(chain, bob, newproducer2, true);
Approve_Producer(chain, charlie, newproducer3, true);
chain.produce_blocks(config::blocks_per_round - chain.head_block_num() );
auto& gpo = chain_db.get<global_property_object>();
BOOST_REQUIRE(boost::find(gpo.active_producers, "newproducer1") != gpo.active_producers.end());
BOOST_REQUIRE(boost::find(gpo.active_producers, "newproducer2") != gpo.active_producers.end());
BOOST_REQUIRE(boost::find(gpo.active_producers, "newproducer3") != gpo.active_producers.end());
const auto& producers_active_authority = chain_db.get<permission_object, by_owner>(boost::make_tuple(config::producers_account_name, config::active_name));
BOOST_CHECK_EQUAL(producers_active_authority.auth.threshold, config::producers_authority_threshold);
BOOST_CHECK_EQUAL(producers_active_authority.auth.accounts.size(), gpo.active_producers.size());
BOOST_CHECK_EQUAL(producers_active_authority.auth.keys.size(), 0);
std::vector<account_name> active_auth;
for(auto& apw : producers_active_authority.auth.accounts) {
active_auth.emplace_back(apw.permission.account);
}
std::vector<account_name> diff;
std::set_difference(
active_auth.begin(),
active_auth.end(),
gpo.active_producers.begin(),
gpo.active_producers.end(),
std::inserter(diff, diff.begin())
);
BOOST_CHECK_EQUAL(diff.size(), 0);
const auto& producers_owner_authority = chain_db.get<permission_object, by_owner>(boost::make_tuple(config::producers_account_name, config::owner_name));
BOOST_CHECK_EQUAL(producers_owner_authority.auth.threshold, 0);
BOOST_CHECK_EQUAL(producers_owner_authority.auth.accounts.size(), 0);
BOOST_CHECK_EQUAL(producers_owner_authority.auth.keys.size(), 0);
} FC_LOG_AND_RETHROW() }
BOOST_AUTO_TEST_SUITE_END()
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj34eqsynthconj4 : forall (lv0 : natural), (@eq natural (Succ lv0) (plus (Succ Zero) lv0)).
Admitted.
QuickChick conj34eqsynthconj4.
|
(*****************************************************************************
* Copyright (c) 2005-2010 ETH Zurich, Switzerland
* 2008-2015 Achim D. Brucker, Germany
* 2009-2017 Université Paris-Sud, France
* 2015-2017 The University of Sheffield, UK
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************)
subsection\<open>Packets and Networks\<close>
theory
NetworkCore
imports
Main
begin
text\<open>
In networks based e.g. on TCP/IP, a message from A to B is encapsulated in \emph{packets}, which
contain the content of the message and routing information. The routing information mainly
contains its source and its destination address.
In the case of stateless packet filters, a firewall bases its decision upon this routing
information and, in the stateful case, on the content. Thus, we model a packet as a four-tuple of
the mentioned elements, together with an id field.
\<close>
text\<open>The ID is an integer:\<close>
type_synonym id = int
text\<open>
To enable different representations of addresses (e.g. IPv4 and IPv6, with or without ports),
we model them as an unconstrained type class and directly provide several instances:
\<close>
class adr
type_synonym '\<alpha> src = "'\<alpha>"
type_synonym '\<alpha> dest = "'\<alpha>"
instance int ::adr ..
instance nat ::adr ..
instance "fun" :: (adr,adr) adr ..
instance prod :: (adr,adr) adr ..
text\<open>
The content is also specified with an unconstrained generic type:
\<close>
type_synonym '\<beta> content = "'\<beta>"
text \<open>
For applications where the concrete representation of the content field does not matter (usually
the case for stateless packet filters), we provide a default type which can be used in those
cases:
\<close>
datatype DummyContent = data
text\<open>Finally, a packet is:\<close>
type_synonym ('\<alpha>,'\<beta>) packet = "id \<times> '\<alpha> src \<times> '\<alpha> dest \<times> '\<beta> content"
text\<open>
Protocols (e.g. http) are not modelled explicitly. In the case of stateless packet filters, they
are only visible by the destination port of a packet, which are modelled as part of the address.
Additionally, stateful firewalls often determine the protocol by the content of a packet.
\<close>
definition src :: "('\<alpha>::adr,'\<beta>) packet \<Rightarrow> '\<alpha>"
where "src = fst o snd "
text\<open>
Port numbers (which are part of an address) are also modelled in a generic way. The integers and
the naturals are typical representations of port numbers.
\<close>
class port
instance int ::port ..
instance nat :: port ..
instance "fun" :: (port,port) port ..
instance "prod" :: (port,port) port ..
text\<open>
A packet therefore has two parameters, the first being the address, the second the content. For
the sake of simplicity, we do not allow to have a different address representation format for the
source and the destination of a packet.
To access the different parts of a packet directly, we define a couple of projectors:
\<close>
definition id :: "('\<alpha>::adr,'\<beta>) packet \<Rightarrow> id"
where "id = fst"
definition dest :: "('\<alpha>::adr,'\<beta>) packet \<Rightarrow> '\<alpha> dest"
where "dest = fst o snd o snd"
definition content :: "('\<alpha>::adr,'\<beta>) packet \<Rightarrow> '\<beta> content"
where "content = snd o snd o snd"
datatype protocol = tcp | udp
lemma either: "\<lbrakk>a \<noteq> tcp;a \<noteq> udp\<rbrakk> \<Longrightarrow> False"
by (case_tac "a",simp_all)
lemma either2[simp]: "(a \<noteq> tcp) = (a = udp)"
by (case_tac "a",simp_all)
lemma either3[simp]: "(a \<noteq> udp) = (a = tcp)"
by (case_tac "a",simp_all)
text\<open>
The following two constants give the source and destination port number of a packet. Address
representations using port numbers need to provide a definition for these types.
\<close>
consts src_port :: "('\<alpha>::adr,'\<beta>) packet \<Rightarrow> '\<gamma>::port"
consts dest_port :: "('\<alpha>::adr,'\<beta>) packet \<Rightarrow> '\<gamma>::port"
consts src_protocol :: "('\<alpha>::adr,'\<beta>) packet \<Rightarrow> protocol"
consts dest_protocol :: "('\<alpha>::adr,'\<beta>) packet \<Rightarrow> protocol"
text\<open>A subnetwork (or simply a network) is a set of sets of addresses.\<close>
type_synonym '\<alpha> net = "'\<alpha> set set"
text\<open>The relation {in\_subnet} (\<open>\<sqsubset>\<close>) checks if an address is in a specific network.\<close>
definition
in_subnet :: "'\<alpha>::adr \<Rightarrow> '\<alpha> net \<Rightarrow> bool" (infixl "\<sqsubset>" 100) where
"in_subnet a S = (\<exists> s \<in> S. a \<in> s)"
text\<open>The following lemmas will be useful later.\<close>
lemma in_subnet:
"(a, e) \<sqsubset> {{(x1,y). P x1 y}} = P a e"
by (simp add: in_subnet_def)
lemma src_in_subnet:
"src(q,(a,e),r,t) \<sqsubset> {{(x1,y). P x1 y}} = P a e"
by (simp add: in_subnet_def in_subnet src_def)
lemma dest_in_subnet:
"dest (q,r,((a),e),t) \<sqsubset> {{(x1,y). P x1 y}} = P a e"
by (simp add: in_subnet_def in_subnet dest_def)
text\<open>
Address models should provide a definition for the following constant, returning a network
consisting of the input address only.
\<close>
consts subnet_of :: "'\<alpha>::adr \<Rightarrow> '\<alpha> net"
lemmas packet_defs = in_subnet_def id_def content_def src_def dest_def
end
|
Our main clinical psychology offices are located at Brinkworth House, in the village of Brinkworth, a few miles west of Swindon.
We are situated on the B4042 between Junction 16 (Wootton Bassett) of the M4 and Malmesbury. Brinkworth House is opposite the Golf Club on the Malmesbury side of the village. Travelling from the East, we are on the left, about a mile after the 'Three Crowns' pub. Travelling from the West, we are on the right, about a mile after the Brinkworth village sign. Brinkworth House shares it's entrance with the Walled Garden Nursery, so you can also look out for their sign.
We are easily accessible from the North Wiltshire towns of Swindon, Malmesbury, Wootton Bassett, Calne, Chippenham, Corsham, Melksham, Devizes, Cricklade and Marlborough. It is also a short journey from the south Gloucestershire towns of Tetbury, Lechlade, Fairford and Cirencester; Oxfordshire towns such as Faringdon and Wantage; and from west Berkshire towns near Hungerford.
If you are travelling using SatNav, the postcode is SN15 5DF.
Alternatively, click here for a map and directions from wherever you are. |
= = = Ratings and awards = = =
|
If $f$ is holomorphic on an open set $M$ and $f$ does not take on the values $a$ and $b$, then either $f$ or $1/f$ has a limit at $z$. |
Require Export language.
(** Reduce existing [Included] definitions, both in the context and in the goal. *)
Ltac red_incl :=
repeat (match goal with
| H : Included _ _ _ |- _ => red in H
| |- Included _ _ _ => red
end).
(** Solve the trivial case of having a inductive type with no constructors in the
context. *)
Ltac solve_empty :=
match goal with
| H : _ ∈ ∅ |- _ => inv H
end.
Ltac solvv :=
match goal with
| H : _ ∈ ∅ |- _ => inv H
end.
Ltac solve_empty_union :=
match goal with
| H : _ ∈ (∅ ∪ _) |- _ => destruct H;try solve_empty
| H : _ ∈ (_ ∪ ∅) |- _ => destruct H;try solve_empty
end.
Ltac solve_trivial_union :=
match goal with
| H : ?w ∈ ?L |- ?w ∈ (_ ∪ ?L) => right;auto
| H : ?w ∈ ?L |- ?w ∈ (?L ∪ _) => left;auto
| H : ?w ∈ ?L |- ?w ∈ (_ ∪ (_ ∪ ?L)) => right;right;auto
| H : ?w ∈ ?L |- ?w ∈ (_ ∪ (?L ∪ _)) => right;left;auto
| H : ?w ∈ ?L |- ?w ∈ ((?L ∪ _) ∪ _) => left;left;auto
| H : ?w ∈ ?L |- ?w ∈ ((_ ∪ ?L) ∪ _) => left;right;auto
| H : ?w ∈ (?L ∪ _) |- ?w ∈ ?L => destruct H;auto
| H : ?w ∈ (_ ∪ ?L) |- ?w ∈ ?L => destruct H;auto
end.
(** Simple tactic that can be applied to split an equivalence corresponding to
the equality of two languages. *)
Ltac split_eq := split;unfold Included;intros.
Generalizable All Variables.
(** Some extra conditions. *)
Lemma neg_not_nil_aux_1 :
forall l1 l2,
~[] ∈ (l1 ∪ l2) -> ~[] ∈ l1 /\ ~[] ∈ l2.
Proof.
intros;split;intro;apply H;solve_trivial_union.
Qed.
Lemma empty_not_nil : {ε} !∼ ∅.
Proof.
red;intro H.
destruct H as [H0 H1].
red_incl.
assert([] ∈ {ε}) by eauto.
apply H0 in H;solve_empty.
Qed.
(** Properties of the neutral and absorving elements of languages. *)
Section NeutralAndAbsorventLemmas.
Variable l : language.
Lemma conc_l_neutral_right : l • {ε} ∼ l.
Proof.
split_eq.
destruct H as [w0 w1 H H0].
destruct H0;rewrite <- app_nil_end;auto.
rewrite app_nil_end;constructor;auto.
Qed.
Lemma conc_l_neutral_left : {ε} • l ∼ l.
Proof.
split_eq.
do 2 destruct H;auto.
change x with (nil++x)%list;constructor;auto.
Qed.
Lemma conc_l_empty_left : ∅ • l ∼ ∅.
Proof.
split_eq;destruct H;solve_empty.
Qed.
Lemma conc_l_empty_right : l • ∅ ∼ ∅.
Proof.
split_eq.
destruct H as [x x' X H'];solve_empty.
solve_empty.
Qed.
Lemma union_l_neutral_left : ∅ ∪ l ∼ l.
Proof.
split_eq.
solve_empty_union;trivial.
solve_trivial_union.
Qed.
Lemma union_l_neutral_right : l ∪ ∅ ∼ l.
Proof.
split_eq.
solve_empty_union;auto.
solve_trivial_union.
Qed.
End NeutralAndAbsorventLemmas.
Hint Resolve
@conc_l_empty_right
@conc_l_empty_left
@conc_l_neutral_left
@conc_l_neutral_right
@union_l_neutral_left
@union_l_neutral_right : lgs.
(** Tactic to solve simple concatenation with the neutral element. *)
Ltac simpl_trivial_concat :=
match goal with
| H : _ ∈ ({ε} • ?L) |- _ => rewrite conc_l_neutral_left in H
| H : _ ∈ (?L • {ε}) |- _ => rewrite conc_l_neutral_right in H
| |- _ ∈ ({ε} • ?L) => rewrite conc_l_neutral_left
| |- _ ∈ (?L • {ε}) => rewrite conc_l_neutral_right
end.
Hint Rewrite
@conc_l_empty_right
@conc_l_empty_left
@conc_l_neutral_left
@conc_l_neutral_right
@union_l_neutral_left
@union_l_neutral_right : lgs.
Section AssociativityLemmas.
Variables l1 l2 l3: language.
Lemma conc_l_assoc : (l1 • l2) • l3 ∼ l1 • (l2 • l3).
Proof.
split_eq;
[ destruct H as [x x' [x'' H H''] H'] |
destruct H as [x x' H [x'' H' H'']] ];
[ rewrite app_ass | rewrite <- app_ass];
repeat constructor;assumption.
Qed.
Lemma union_l_assoc : (l1 ∪ l2) ∪ l3 ∼ l1 ∪ (l2 ∪ l3).
Proof.
split_eq.
destruct H as [x [H1 | H1'] | x H2];solve_trivial_union.
destruct H as [ x H1 | x [H1 | H2]];solve_trivial_union.
Qed.
End AssociativityLemmas.
Hint Resolve
@conc_l_assoc
@union_l_assoc : lgs.
(** Distributivity of concatenation over union. *)
Section DistributivityLemmas.
Variables l1 l2 l3 : language.
Lemma conc_l_distr_right : l1 • (l2 ∪ l3) ∼ (l1 • l2) ∪ (l1 • l3).
Proof.
split_eq.
destruct H as [x x' H [x'' H'|x'' H']];
[left|right];auto with lgs.
destruct H as [x [x' x'' H H'] | x [x' x'' H H']];clear x;
constructor;auto;solve_trivial_union.
Qed.
Lemma conc_l_distr_left : (l1 ∪ l2) • l3 ∼ (l1 • l3) ∪ (l2 • l3).
Proof.
split_eq.
destruct H as [x x' [x'' H | x'' ] H'];
[left|right];auto with lgs.
destruct H as [x [x' x'' H H']|x [x' x'' H H']];
constructor;auto;solve_trivial_union.
Qed.
End DistributivityLemmas.
Hint Resolve
@conc_l_distr_left
@conc_l_distr_right : lgs.
(** Commutativity and idempotence of union. *)
Section CommutativityAndIdempotenceLemmas.
Variable l1 l2 : language.
Lemma union_l_comm : l1 ∪ l2 ∼ l2 ∪ l1.
Proof.
split_eq;
destruct H as [x H1 | x H2];
solve [constructor 1;auto | constructor 2;auto].
Qed.
Lemma union_l_idemp : l1 ∪ l1 ∼ l1.
Proof.
split_eq;
[inversion_clear H|constructor];auto.
Qed.
End CommutativityAndIdempotenceLemmas.
Hint Resolve
@union_l_comm
@union_l_idemp : lgs.
Hint Rewrite
union_l_idemp : lgs.
Ltac unfold_lleq :=
match goal with
| H : _ ≤_ |- _ => unfold lleq in H
| |- _ ≤ _ => unfold lleq
| _ => idtac
end.
Section LessOrEqualRelationLemmas.
Variable l1 l2 l3 : language.
Lemma lleq_Included_equiv : l1 ≤ l2 <-> l1 ⊆ l2.
Proof.
split;intros;unfold_lleq;intros.
red;intros.
apply H;solve_trivial_union.
red_incl;split_eq.
solve_trivial_union.
solve_trivial_union.
Qed.
Lemma lleq_refl : l1 ≤ l1.
Proof.
unfold lleq;auto with lgs.
Qed.
Lemma lleq_trans : l1 ≤ l2 -> l2 ≤ l3 -> l1 ≤ l3.
Proof.
unfold lleq;intros;
rewrite <- H0, <- union_l_assoc, H;reflexivity.
Qed.
Lemma mon_concat : l1 ≤ l2 -> (l1 • l3) ≤ (l2 • l3).
Proof.
unfold lleq;intros.
rewrite <- conc_l_distr_left, H;reflexivity.
Qed.
Lemma eq_to_leq : l1 ∼ l2 -> l1 ≤ l2.
Proof.
intro H;rewrite H;unfold lleq;auto with lgs.
Qed.
Lemma leq_to_eq : l1 ≤ l2 /\ l2 ≤ l1 -> l1 ∼ l2.
Proof.
unfold lleq;intros.
destruct H;rewrite <- H, <- H0 at 1.
auto with lgs.
Qed.
End LessOrEqualRelationLemmas.
Hint Resolve
@lleq_refl
@lleq_refl
@lleq_trans
@mon_concat
@eq_to_leq : lgs.
Global Instance lleq_relf_m : Reflexive lleq.
Proof.
unfold Reflexive;intro;auto with lgs.
Qed.
Global Instance lleq_trans_m : Transitive lleq.
Proof.
unfold Transitive;intros;eapply lleq_trans;eauto.
Qed.
(** We now enumerate a set of intermediary lemmas that are used to
establish the standard properties of Kleene's star operator.*)
Section LLeqStarLemmas.
Lemma star_l_contains_eps :
forall l,
{ε} ≤ l∗.
Proof.
split_eq;
solve_trivial_union.
constructor 1 with 0;auto.
Qed.
Lemma star_l_union_l_comm :
forall l,
l ∪ l∗ ∼ l∗.
Proof.
split_eq;solve_trivial_union;econstructor 1 with 1;simpl;
rewrite app_nil_end;auto with lgs.
Qed.
Lemma star_plus_on : forall l, {ε} ∪ l∗ ∼ l∗.
Proof.
intro;apply star_l_contains_eps.
Qed.
(* Star of empty is the epsilon language *)
Lemma empty_star_is_epsilon : ∅ ∗ ∼ {ε}.
Proof.
split_eq.
destruct H.
induction n;auto with lang.
simpl in H.
destruct H;try solve_empty.
constructor 1 with (n:=0);auto with lgs.
Qed.
(* Star of epsilon language is the epsilon language itself *)
Lemma id_empty_star_is_epsilon : {ε} ∗ ∼ {ε}.
Proof.
split_eq.
destruct H.
induction n;auto.
simpl in H;destruct H.
apply IHn;auto.
destruct H.
assumption.
constructor 1 with (n:=0);simpl;assumption.
Qed.
Lemma lang_in_star_to_n : forall w r,
w ∈ (r ∗) -> exists n, w ∈ (r •• n).
Proof.
intros.
destruct H as [n w H1];
exists n;exact H1.
Qed.
(* Any language is contained in its kleene closure *)
Lemma lang_in_star : forall l, l ≤ l∗.
Proof.
intro.
unfold lleq;auto with lang.
apply star_l_union_l_comm.
Qed.
Lemma star_l_ConL_comm : forall l,
l • l∗ ≤ l∗.
Proof.
split_eq;solve_trivial_union.
destruct H as [x1 x2 H1 [n x3 H2]].
constructor 1 with (n:=S n);simpl;auto with lgs.
Qed.
Lemma inProdProdStar : forall l,
l • l∗ ≤ l∗ • l∗.
Proof.
split_eq.
solve_trivial_union.
destruct H.
constructor;auto.
constructor 1 with (n:=1);simpl.
rewrite (app_nil_end w1).
constructor;[auto|constructor].
solve_trivial_union.
Qed.
Lemma inProdProdInv : forall l, l∗ • l ≤ l ∗ • l∗.
Proof.
split_eq.
solve_trivial_union.
do 2 destruct H.
induction n.
constructor;[constructor 1 with (n:=0)|constructor 1 with (n:=1)];auto.
rewrite (app_nil_end w2).
constructor;auto.
constructor.
constructor;
[constructor 1 with (n:=S n)|constructor 1 with (n:=1)];simpl;auto.
rewrite (app_nil_end w2);constructor;auto.
solve_trivial_union.
Qed.
Lemma plus_l_conc : forall l n m,
l •• (n+m) ∼ (l •• n) • (l •• m).
Proof.
intros l n;revert n l.
induction n;simpl;split_eq.
replace x with (nil++x)%list;auto.
constructor;[constructor|auto].
destruct H.
destruct H;simpl;auto.
destruct H.
eapply conc_l_assoc.
constructor;auto.
eapply IHn;auto.
apply conc_l_assoc in H.
destruct H.
constructor;auto.
eapply IHn.
assumption.
Qed.
Lemma assoc_succ_conc : forall l n m,
l • (l •• (n+m)) ∼ (l •• (S n)) • (l •• m).
Proof.
induction n;
split_eq.
simpl in * |- *.
destruct H.
constructor;auto with lang.
apply (conc_l_neutral_right _);auto.
simpl.
destruct H.
constructor;auto.
apply (conc_l_neutral_right l);auto.
simpl.
destruct H.
destruct (conc_l_assoc l (l • (l••n)) (l••m)).
apply H2.
constructor;auto.
clear H1;clear H2.
simpl in H0.
destruct (IHn m).
apply H1 in H0.
auto.
simpl in H.
apply conc_l_assoc in H.
simpl.
destruct H.
constructor;auto.
apply (IHn m).
simpl.
assumption.
Qed.
Lemma nconcat_invert_order : forall n l,
l •• (S n) ∼ (l••n)•l.
Proof.
induction n;
simpl;intros.
rewrite conc_l_neutral_left.
rewrite conc_l_neutral_right.
reflexivity.
simpl in IHn.
rewrite IHn at 1.
rewrite conc_l_assoc.
reflexivity.
Qed.
Lemma star_prod_eq_star : forall l,
l∗ ∼ l∗ • l∗.
Proof.
split_eq.
rewrite (app_nil_end x);constructor;try assumption.
constructor 1 with (n:=0);constructor.
do 2 destruct H.
destruct H0.
constructor 1 with (n:=n+n0).
induction n.
destruct H.
simpl;assumption.
simpl in H.
simpl.
destruct H.
rewrite app_ass.
constructor;auto.
eapply plus_l_conc.
constructor;assumption.
Qed.
Lemma power_of_star_lang : forall n l,
l∗ •• n ≤ l∗.
Proof.
induction n;simpl;split_eq.
apply star_plus_on in H.
assumption.
constructor 2;assumption.
destruct H.
destruct H.
eapply star_prod_eq_star.
constructor;auto.
eapply IHn.
constructor;assumption.
assumption.
constructor 2.
assumption.
Qed.
Lemma double_star_in_star : forall l,
l∗∗ ≤ l∗.
Proof.
split_eq.
destruct H;auto.
destruct H.
apply power_of_star_lang with n.
constructor 1.
apply H.
constructor 2;auto.
Qed.
Lemma double_star_eq_star : forall l,
l∗∗ ∼ l ∗.
Proof.
split_eq.
eapply double_star_in_star.
left.
auto.
apply double_star_in_star in H.
destruct H.
assumption.
constructor 1 with (n:=1).
simpl.
eapply conc_l_neutral_right.
assumption.
Qed.
End LLeqStarLemmas.
Hint Resolve
@star_l_union_l_comm
@star_plus_on
@empty_star_is_epsilon
@id_empty_star_is_epsilon
@lang_in_star
@lang_in_star_to_n
@star_l_ConL_comm
@inProdProdStar
@inProdProdInv
@plus_l_conc
@assoc_succ_conc
@nconcat_invert_order
@star_prod_eq_star
@power_of_star_lang
@double_star_in_star : lgs.
Section KleeneAlgebraStarAxiom_1.
Lemma kat_ax_1_aux_1 : forall l,
{ε} ∪ (l • l∗) ≤ l∗.
Proof.
split_eq.
destruct H as [ w [ w1 H1| w1 H2] | w H3];auto with lang.
constructor 1 with (n:=0);auto.
eapply star_prod_eq_star.
destruct H2.
constructor.
constructor 1 with (n:=1);simpl;auto.
rewrite (app_nil_end w1).
constructor;auto.
assumption.
solve_trivial_union.
Qed.
Lemma kat_ax_1_aux_2 : forall l,
l ∗ ≤ {ε} ∪ (l • l∗).
Proof.
split_eq.
do 2 destruct H.
revert n H.
induction n;intros.
constructor;auto.
simpl in H.
constructor 2.
destruct H.
constructor;auto.
constructor 1 with (n:=n);auto.
constructor 1;auto.
constructor 2;auto.
destruct H.
constructor 2;constructor;auto.
constructor 2;constructor 2;auto.
Qed.
(** First Kleene's star property *)
Lemma kat_ax_1_lang : forall l,
{ε} ∪ (l • l∗) ∼ l∗.
Proof.
intros;apply leq_to_eq;intros.
split.
eapply kat_ax_1_aux_1.
apply kat_ax_1_aux_2.
Qed.
End KleeneAlgebraStarAxiom_1.
Hint Resolve
@kat_ax_1_aux_1
@kat_ax_1_aux_2
@kat_ax_1_lang : lgs.
Section KleeneAlgebraStarAxiom_2.
Lemma kat_ax2_aux_1 : forall l,
{ε} ∪ (l∗ • l) ≤ l ∗.
Proof.
split_eq.
do 2 destruct H.
constructor 1 with (n:=0);auto.
destruct H.
eapply star_prod_eq_star.
constructor;auto.
constructor 1 with (n:=1);simpl.
rewrite (app_nil_end w2).
constructor;auto.
constructor 1 with (n:=n);auto.
solve_trivial_union.
Qed.
Lemma kat_ax2_aux_2_a : forall n l w,
w ∈ ((l •• n) • l) -> w ∈ (l ∗ • l).
Proof.
induction n;intros.
simpl in H.
destruct H.
constructor;auto.
constructor 1 with (n:=0);simpl;auto.
destruct H.
constructor;auto.
constructor 1 with (n:=S n);auto.
Qed.
Lemma kat_ax2_aux_2 : forall l,
l ∗ ≤ {ε} ∪ (l ∗ • l).
Proof.
split_eq.
destruct H.
destruct H.
induction n.
constructor;auto.
constructor 2.
apply nconcat_invert_order in H.
destruct H.
constructor.
constructor 1 with (n:=n).
auto.
auto.
destruct H;
[constructor|constructor 2];auto.
constructor 2;auto.
Qed.
(** First Kleene's star property *)
Lemma kat_ax_2_lang : forall l,
{ε} ∪ (l ∗ • l) ∼ l∗.
Proof.
intro.
eapply leq_to_eq.
split.
apply kat_ax2_aux_1.
apply kat_ax2_aux_2.
Qed.
End KleeneAlgebraStarAxiom_2.
Hint Resolve
@kat_ax2_aux_2_a
@kat_ax2_aux_2
@kat_ax_2_lang : lgs.
(** Remaining axioms for Kleene algebra *)
Section KleeneAlgebraStarAxiom_3.
Variables l1 l2 : language.
(* The closure contains all the powers of the solution *)
Lemma forall_n_closure_lang :
forall l,
(forall n:nat, (l1••n) • l2 ≤ l) -> l1 ∗ • l2 ≤ l.
Proof.
split_eq.
destruct H0;auto.
destruct H0.
destruct H0.
generalize (H n);clear H;intro H.
apply H.
constructor.
constructor;auto.
constructor 2;auto.
Qed.
Lemma kat_ax3_aux_4 :
forall l,
l2 ≤ l /\ l1 • l ≤ l ->
(forall n, (l1••n) • l2 ≤ l).
Proof.
intuition.
induction n.
simpl.
unfold lleq in * |- *.
rewrite conc_l_neutral_left.
assumption.
simpl.
unfold lleq in * |- *.
rewrite conc_l_assoc.
eapply(lleq_trans (l1 • ((l1 •• n) • l2)) (l1 • l) l);auto.
split_eq.
destruct H;auto.
destruct H;auto.
constructor;auto.
apply IHn.
constructor.
assumption.
constructor 2;auto.
Qed.
Lemma kat_ax3_aux_5 :
forall x,
(l1 • x) ∪ l2 ≤ x -> l2 ≤ x /\ l1 • x ≤ x.
Proof.
split;
split_eq.
destruct H0;auto.
apply H.
constructor.
constructor 2;auto.
constructor 2;auto.
destruct H0.
apply H;repeat constructor;auto.
auto.
constructor 2;auto.
Qed.
(** Third Kleene's star property *)
Lemma kat_ax_3_lang :
forall x,
(l1 • x) ∪ l2 ≤ x -> l1 ∗ • l2 ≤ x.
Proof.
intros x H1.
apply forall_n_closure_lang.
apply kat_ax3_aux_4.
apply kat_ax3_aux_5.
auto.
Qed.
End KleeneAlgebraStarAxiom_3.
Hint Resolve
@kat_ax_3_lang : lgs.
Section KleeneAlgebraStarAxiom_4.
Variables l1 l2 : language.
Lemma forall_n_closure_lang_inv :
forall l,
(forall n, l2 • (l1 •• n) ≤ l) ->
l2 • l1∗ ≤ l.
Proof.
split_eq.
destruct H0;auto.
destruct H0.
destruct H1.
eapply H.
constructor;constructor;auto.
apply H1.
constructor 2;auto.
Qed.
Lemma kat_ax4_aux_4 :
forall x,
l2 ≤ x /\ x • l1 ≤ x ->
(forall n, l2 • (l1 •• n) ≤ x).
Proof.
intuition.
induction n.
simpl.
unfold lleq.
rewrite conc_l_neutral_right;auto.
unfold lleq.
rewrite nconcat_invert_order.
rewrite <- conc_l_assoc.
apply (lleq_trans ((l2 • (l1 •• n)) • l1) (x • l1) x);auto.
split_eq.
destruct H;auto.
destruct H.
constructor.
apply IHn.
constructor;auto.
auto.
constructor 2;auto.
Qed.
Lemma kat_ax4_aux_5 :
forall x,
(x • l1) ∪ l2 ≤ x -> l2 ≤ x /\ x • l1 ≤ x.
Proof.
split;split_eq.
destruct H0;auto.
apply H.
constructor;constructor 2;auto.
constructor 2;auto.
destruct H0;auto.
apply H;constructor;constructor;auto.
constructor 2;auto.
Qed.
(** Forth Kleene's star property *)
Lemma kat_ax_4_lang :
forall x,
(x • l1) ∪ l2 ≤ x -> l2 • l1∗ ≤ x.
Proof.
intros x H1.
apply forall_n_closure_lang_inv.
apply kat_ax4_aux_4.
apply kat_ax4_aux_5.
assumption.
Qed.
End KleeneAlgebraStarAxiom_4.
Hint Resolve @kat_ax_4_lang : lgs.
Section KaBissimulation_Auxiliary.
Lemma KA_bissimulation_aux_1 :
forall a b x,
(x ∪ a • x • b ∗) ≤ (x∪x • b • b∗) ->
(a∗ • x) ≤ (x • b∗).
Proof.
intros.
apply kat_ax_3_lang.
rewrite union_l_comm.
cut(x • b∗ ∼ x∪x • (b • b∗)).
intros.
rewrite H0 at 2.
repeat rewrite <- conc_l_assoc;auto.
rewrite <- (conc_l_neutral_right x) at 2.
rewrite <- conc_l_distr_right.
apply conc_l_m;auto with lgs.
symmetry;auto with lgs.
Qed.
Lemma KA_Bissimulation_aux_2 :
forall a b x,
a • x ≤ x • b ->
x ∪ a • x • b ∗ ≤ x ∪ x • b • b∗.
Proof.
intros.
apply union_l_lleq.
apply lleq_refl.
apply conc_l_lleq;
[assumption|apply lleq_refl].
Qed.
(* Rest of adapted code *)
Lemma KA_Bissimulation_Imply_1 :
forall a b x,
a • x ≤ x • b ->
a ∗ • x ≤ x • b ∗.
Proof.
intros.
apply KA_bissimulation_aux_1.
apply KA_Bissimulation_aux_2.
assumption.
Qed.
Lemma Bissimulation_Imply_2 :
forall a b x,
x • b ≤ a • x ->
x • b∗ ≤ a∗ • x.
Proof.
intros.
apply kat_ax_4_lang.
(*pattern (a[*]) at 2.*)
rewrite <- kat_ax_2_lang at 2.
rewrite conc_l_distr_left.
rewrite conc_l_neutral_left.
rewrite union_l_comm.
apply union_l_lleq.
apply lleq_refl.
do 2 rewrite conc_l_assoc.
apply conc_l_lleq.
apply lleq_refl.
assumption.
Qed.
End KaBissimulation_Auxiliary.
Hint Resolve
@KA_Bissimulation_Imply_1
@Bissimulation_Imply_2 : lgs.
Section KaDenesting_Auxiliary.
Fact Denesting_aux_1 :
forall a b,
{ε} ≤ a∗ • (b • a∗)∗.
Proof.
intros.
rewrite <- (conc_l_neutral_left {ε}) at 1.
apply conc_l_lleq;apply star_l_contains_eps.
Qed.
Fact Denesting_aux_2 :
forall a b,
a • (a∗ • (b • a∗)∗) ≤ a∗ • (b • a∗)∗.
Proof.
intros.
rewrite <- conc_l_assoc.
apply conc_l_lleq;auto with lgs.
Qed.
Lemma Denesting_aux_3 : forall a b,
b • (a∗ • (b • a∗)∗) ≤ (b • a∗)∗.
Proof.
intros.
eapply (lleq_trans _ ((b • a ∗) • (b • a ∗) ∗) _).
rewrite conc_l_assoc;auto with lgs.
apply star_l_ConL_comm.
Qed.
Lemma Denesting_aux_4 : forall a b,
(b • a∗)∗ ≤ a∗ • (b • a∗)∗.
Proof.
intros.
rewrite <- (conc_l_neutral_left ((b • a∗)∗)) at 1.
apply conc_l_lleq;auto with lgs.
apply star_l_contains_eps.
Qed.
Lemma Denesting_aux_5 :
forall a b,
b • (a∗ • (b • a∗)∗) ≤ a∗ • (b • a∗)∗.
Proof.
intros.
apply (lleq_trans _ ((b • a∗)∗) _).
apply Denesting_aux_3.
apply Denesting_aux_4.
Qed.
Lemma Denesting_aux_6 : forall a b,
{ε} ∪ a • (a∗ • (b • a∗)∗) ∪
b • (a∗ • (b • a∗)∗) ≤ a∗ • (b • a∗)∗.
Proof.
intros.
do 2 rewrite <- (union_l_idemp (a ∗ • (b • a ∗) ∗)) at 3.
rewrite <- (Denesting_aux_2 a b) at 3.
rewrite <- (Denesting_aux_1 a b) at 4.
rewrite (union_l_comm (a • (a ∗ • (b • a ∗) ∗)) ({ε})).
apply union_l_lleq.
reflexivity.
apply Denesting_aux_5.
Qed.
Lemma Denesting_Imply_1 : forall a b,
(a ∪ b)∗ ≤ a∗ • (b • a∗)∗.
Proof.
intros.
rewrite <- (conc_l_neutral_right ((a ∪ b)∗)).
apply kat_ax_3_lang.
rewrite (union_l_comm _ {ε}).
rewrite conc_l_distr_left.
rewrite <- union_l_assoc.
apply Denesting_aux_6.
Qed.
Lemma Denesting_aux_7 : forall a b,
(a ∪ b)∗ • ((a ∪ b)∗)∗ ≤ (a ∪ b)∗.
Proof.
intros.
rewrite double_star_eq_star.
rewrite <- star_prod_eq_star.
reflexivity.
Qed.
Lemma Denesting_aux_9 : forall a b,
(a ∪ b)∗ • ((a ∪ b) • (a ∪ b)∗)∗
≤ (a ∪ b)∗ • ((a ∪ b)∗)∗.
Proof.
intros.
apply conc_l_lleq.
apply lleq_refl.
apply star_l_lleq.
apply star_l_ConL_comm.
Qed.
Lemma Denesting_aux_10 : forall a b,
a∗ • (b • a∗)∗ ≤
(a ∪ b)∗ • ((a ∪ b) • (a ∪ b)∗)∗.
Proof.
intros.
apply conc_l_lleq.
apply star_l_lleq.
unfold lleq.
rewrite <- union_l_assoc.
rewrite union_l_idemp.
reflexivity.
apply star_l_lleq.
apply conc_l_lleq.
unfold lleq.
rewrite union_l_comm.
rewrite union_l_assoc.
rewrite union_l_idemp.
reflexivity.
apply star_l_lleq.
unfold lleq.
rewrite <- union_l_assoc.
rewrite union_l_idemp.
reflexivity.
Qed.
Lemma Denesting_Imply_2 : forall a b,
a∗ • (b • a∗)∗ ≤ (a ∪ b)∗.
Proof.
intros.
transitivity ((a ∪ b)∗ • ((a ∪ b)∗)∗).
transitivity ((a ∪ b)∗ • ((a ∪ b) • (a ∪ b)∗)∗).
apply Denesting_aux_10.
apply Denesting_aux_9.
apply Denesting_aux_7.
Qed.
End KaDenesting_Auxiliary.
(* end hide *)
Lemma ka_bisimulation :
forall l1 l2 l3,
l1 • l2 ∼ l2 • l3 -> l2 • l3∗ ∼ l1∗ • l2.
Proof.
intros;destruct H;split;
apply lleq_Included_equiv;apply lleq_Included_equiv in H;
apply lleq_Included_equiv in H0;
[apply Bissimulation_Imply_2|apply KA_Bissimulation_Imply_1];
assumption.
Qed.
Lemma ka_denesting :
forall l1 l2,
(l1 ∪ l2)∗ ∼ l1∗ • (l2 • l1∗)∗.
Proof.
intros.
generalize(Denesting_Imply_1 l1 l2).
intro.
generalize(Denesting_Imply_2 l1 l2).
intro.
unfold lleq in H,H0.
rewrite <- H.
rewrite <- H0 at 1;auto with lgs.
Qed.
Lemma ka_sliding :
forall l1 l2,
(l1 • l2)∗ • l1 ∼ l1 • (l2 • l1)∗.
Proof.
intros.
symmetry.
apply ka_bisimulation.
rewrite <- conc_l_assoc.
reflexivity.
Qed.
Lemma empty_or_empty :
forall l1 l2,
l1 ∪ l2 ∼ ∅ <-> l1 ∼ ∅ /\ l2 ∼ ∅.
Proof.
intros.
split;intros.
do 2 split;red;intros;
try apply H.
constructor;auto.
inversion H0.
constructor 2;auto.
inversion H0.
destruct H.
split;red;intros.
destruct H1.
apply H in H1;auto.
apply H0 in H1;auto.
inversion H1.
Qed.
Ltac solve_by_ka_axioms :=
try reflexivity ;
match goal with
| |- context[?x • {ε} ∼ ?x] =>
eapply conc_l_neutral_right
| |- context[{ε} • ?l ∼ ?l] =>
eapply conc_l_neutral_left
| |- context[∅ • ?l ∼ ∅] =>
eapply conc_l_empty_left
| |- context[?l • ∅ ∼ ∅] =>
eapply conc_l_empty_right
| |- context[(?l1 • ?l2) • ?l3 ∼ ?l1 • (?l2 • ?l3)] =>
eapply conc_l_assoc
| |- context[?l1 • (?l2 ∪ ?l3) ∼ (?l1 • ?l2) ∪ (?l1 • ?l3)] =>
eapply conc_l_distr_right
| |- context[(?l1 ∪ ?l2) • ?l3 ∼ (?l1 • ?l3) ∪ (?l2 • ?l3)] =>
eapply conc_l_distr_left
| |- context[∅ ∪ ?l ∼ ?l] =>
eapply union_l_neutral_left
| |- context[?l ∪ ∅ ∼ ?l] =>
eapply union_l_neutral_right
| |- context[?l1 ∪ ?l2 ∼ ?l2 ∪ ?l1] =>
eapply union_l_comm
| |- context[?l ∪ ?l ∼ ?l] =>
eapply union_l_idemp
| |- context[(?l1 ∪ ?l2) ∪ ?l3 ∼ ?l1 ∪ (?l2 ∪ ?l3)] =>
eapply union_l_assoc
| |- context[{ε} ∪ (?l • (?l∗)) ∼ ?l∗] =>
eapply kat_ax_1_lang
| |- context[{ε} ∪ ((?l∗) • ?l) ∼ ?l∗] =>
eapply kat_ax_2_lang
| _ => fail 1 "Not an equation of Kleene algebra"
end.
Ltac solve_by_ka :=
try solve_by_ka_axioms;symmetry;solve_by_ka_axioms.
(** Some more usefull results. *)
Lemma not_null_in_lconc:
forall (w1 w2 x : word)(l1 l2 : language)(a : Z),
w1 ∈ l1 -> w2 ∈ l2 -> (w1 ++ w2)%list = a :: x -> ~[] ∈ l1 ->
(exists k, w1 = a :: k).
Proof.
induction w1;intros.
inv H1.
rewrite <- app_comm_cons in H1;injection H1;intros;subst.
exists w1;auto.
Qed.
Lemma not_null_in_lconc_2:
forall (w1 w2 x : word)(l1 l2 : language)(a : Z),
w1 ∈ l1 -> w2 ∈ l2 -> (w1 ++ w2)%list = a :: x -> w1 =/= [] ->
(exists k, w1 = a :: k).
Proof.
induction w1;intros.
elim H2;autotc.
rewrite <- app_comm_cons in H1;injection H1;intros;subst.
exists w1;auto.
Qed.
|
SUBROUTINE INITBIN4
C
C M.R. MORTON 02 JUN 1999
C CHANGE RECORD
C INITIALIZES BINARY FILE FOR EFDC OUTPUT. PLACES CONTROL
C PARAMETERS FOR POST-PROCESSOR IN HEADER SECTION OF BINARY
C FILE WQSDTS.BIN FOR BENTHIC FLUX RATES.
C
USE GLOBAL
REAL,SAVE,ALLOCATABLE,DIMENSION(:)::XLON
REAL,SAVE,ALLOCATABLE,DIMENSION(:)::YLAT
LOGICAL FEXIST,IS1OPEN,IS2OPEN
CHARACTER*20 WQNAME(30)
CHARACTER*10 WQUNITS(30)
CHARACTER*3 WQCODE(30)
IF(.NOT.ALLOCATED(XLON))THEN
ALLOCATE(XLON(LCM))
ALLOCATE(YLAT(LCM))
XLON=0.0
YLAT=0.0
ENDIF
C
C THE FOLLOWING PARAMETERS ARE SPECIFIED IN EFDC.INP AND WQ3DSD.INP:
C KCSD = NUMBER OF VERTICAL LAYERS (FORCED TO 1 FOR BENTHIC FLUX FIL
C ISMTSDT = NUMBER OF TIME STEPS PER DATA DUMP OF BENTHIC FLUXES
C DT = TIME STEP OF EFDC MODEL IN SECONDS
C LA = NUMBER OF ACTIVE CELLS + 1 IN MODEL
C TBEGAN = BEGINNING TIME OF RUN IN DAYS
C THE PARAMETER NPARM MUST BE CHANGED IF THE OUTPUT DATA
C IS CHANGED IN SUBROUTINE WSMTSBIN:
C NPARM = NUMBER OF PARAMETERS WRITTEN TO BINARY FILE
C NREC4 = NUMBER OF RECORDS WRITTEN TO BINARY FILE (ONE RECORD
C IS A COMPLETE DATA DUMP FOR TIME INTERVAL IWQDIUDT)
C
NPARM = 8
NCELLS = LA-1
NREC4 = 0
TEND = TBEGIN
KCSD = 1
MAXRECL4 = 32
IF(NPARM .GE. 8)THEN
MAXRECL4 = NPARM*4
ENDIF
C
C THE FOLLOWING WATER QUALITY NAMES, UNITS, AND 3-CHARACTER CODES
C SHOULD BE MODIFIED TO MATCH THE PARAMETERS WRITTEN TO THE BINARY
C FILE IN SUBROUTINE WSMTSBIN. THE CHARACTER STRINGS MUST BE
C EXACTLY THE LENGTH SPECIFIED BELOW IN ORDER FOR THE POST-PROCESSOR
C TO WORK CORRECTLY.
C BE SURE WQNAME STRINGS ARE EXACTLY 20-CHARACTERS LONG:
C------------------' 1 2'
C------------------'12345678901234567890'
C
WQNAME( 1) = 'SOD_BENTHIC_FLUX '
WQNAME( 2) = 'NH4_BENTHIC_FLUX '
WQNAME( 3) = 'NO3_BENTHIC_FLUX '
WQNAME( 4) = 'PO4D_BENTHIC_FLUX '
WQNAME( 5) = 'SAD_BENTHIC_FLUX '
WQNAME( 6) = 'COD_BENTHIC_FLUX '
WQNAME( 7) = 'SEDIMENT_TEMPERATURE'
WQNAME( 8) = 'BENTHIC_STRESS '
C
C BE SURE WQUNITS STRINGS ARE EXACTLY 10-CHARACTERS LONG:
C-------------------' 1'
C-------------------'1234567890'
C
WQUNITS( 1) = 'G/M2/DAY '
WQUNITS( 2) = 'G/M2/DAY '
WQUNITS( 3) = 'G/M2/DAY '
WQUNITS( 4) = 'G/M2/DAY '
WQUNITS( 5) = 'G/M2/DAY '
WQUNITS( 6) = 'G/M2/DAY '
WQUNITS( 7) = 'DEGC '
WQUNITS( 8) = 'DAYS '
C
C BE SURE WQCODE STRINGS ARE EXACTLY 3-CHARACTERS LONG:
C------------------'123'
C
WQCODE( 1) = 'SOD'
WQCODE( 2) = 'FNH'
WQCODE( 3) = 'FNO'
WQCODE( 4) = 'FP4'
WQCODE( 5) = 'FSA'
WQCODE( 6) = 'FCO'
WQCODE( 7) = 'SMT'
WQCODE( 8) = 'BST'
C
C IF WQSDTS.BIN ALREADY EXISTS, OPEN FOR APPENDING HERE.
C
IF(ISSDBIN .EQ. 2)THEN
IO = 1
5 IO = IO+1
IF(IO .GT. 99)THEN
WRITE(0,*) ' NO AVAILABLE IO UNITS ... IO > 99'
STOP ' EFDC HALTED IN SUBROUTINE INITBIN4'
ENDIF
INQUIRE(UNIT=IO, OPENED=IS2OPEN)
IF(IS2OPEN) GOTO 5
INQUIRE(FILE='WQSDTS.BIN', EXIST=FEXIST)
IF(FEXIST)THEN
OPEN(UNIT=IO, FILE='WQSDTS.BIN', ACCESS='DIRECT',
& FORM='UNFORMATTED', STATUS='UNKNOWN', RECL=MAXRECL4)
WRITE(0,*) 'OLD FILE WQSDTS.BIN FOUND...OPENING FOR APPEND'
READ(IO, REC=1) NREC4, TBEGAN, TEND, DT, ISMTSDT, NPARM,
& NCELLS, KCSD
NR6 = 1 + NPARM*3 + NCELLS*4 + (NCELLS*KCSD+1)*NREC4 + 1
CLOSE(IO)
ELSE
ISSDBIN=1
ENDIF
ENDIF
C
C IF WQSDTS.BIN ALREADY EXISTS, DELETE IT HERE.
C
IF(ISSDBIN .EQ. 1)THEN
TBEGAN = TBEGIN
IO = 1
10 IO = IO+1
IF(IO .GT. 99)THEN
WRITE(0,*) ' NO AVAILABLE IO UNITS ... IO > 99'
STOP ' EFDC HALTED IN SUBROUTINE INITBIN4'
ENDIF
INQUIRE(UNIT=IO, OPENED=IS2OPEN)
IF(IS2OPEN) GOTO 10
INQUIRE(FILE='WQSDTS.BIN', EXIST=FEXIST)
IF(FEXIST)THEN
OPEN(UNIT=IO, FILE='WQSDTS.BIN')
CLOSE(UNIT=IO, STATUS='DELETE')
WRITE(0,*) 'OLD FILE WQSDTS.BIN DELETED...'
ENDIF
OPEN(UNIT=IO, FILE='WQSDTS.BIN', ACCESS='DIRECT',
& FORM='UNFORMATTED', STATUS='UNKNOWN', RECL=MAXRECL4)
C
C WRITE CONTROL PARAMETERS FOR POST-PROCESSOR TO HEADER
C SECTION OF THE WQSDTS.BIN BINARY FILE:
C
WRITE(IO) NREC4, TBEGAN, TEND, DT, ISMTSDT, NPARM, NCELLS, KCSD
DO I=1,NPARM
WRITE(IO) WQNAME(I)
ENDDO
DO I=1,NPARM
WRITE(IO) WQUNITS(I)
ENDDO
DO I=1,NPARM
WRITE(IO) WQCODE(I)
ENDDO
C
C WRITE CELL I,J MAPPING REFERENCE TO HEADER SECTION OF BINARY FILE:
C
DO L=2,LA
WRITE(IO) IL(L)
ENDDO
DO L=2,LA
WRITE(IO) JL(L)
ENDDO
C
C ** READ IN XLON AND YLAT OR UTME AND UTMN OF CELL CENTERS OF
C ** CURVILINEAR PORTION OF THE GRID FROM FILE LXLY.INP:
C
IO1 = 0
20 IO1 = IO1+1
IF(IO1 .GT. 99)THEN
WRITE(0,*) ' NO AVAILABLE IO UNITS ... IO1 > 99'
STOP ' EFDC HALTED IN SUBROUTINE INITBIN4'
ENDIF
INQUIRE(UNIT=IO1, OPENED=IS1OPEN)
IF(IS1OPEN) GOTO 20
OPEN(IO1,FILE='LXLY.INP',STATUS='UNKNOWN')
DO NS=1,4
READ(IO1,1111)
ENDDO
1111 FORMAT(80X)
DO LL=1,LVC
READ(IO1,*) I,J,XUTME,YUTMN
L=LIJ(I,J)
XLON(L)=XUTME
YLAT(L)=YUTMN
ENDDO
CLOSE(IO1)
C
C WRITE XLON AND YLAT OF CELL CENTERS TO HEADER SECTION OF
C BINARY OUTPUT FILE:
C
DO L=2,LA
WRITE(IO) XLON(L)
ENDDO
DO L=2,LA
WRITE(IO) YLAT(L)
ENDDO
INQUIRE(UNIT=IO, NEXTREC=NR6)
CLOSE(IO)
ENDIF
RETURN
END
|
Formal statement is: lemma locally_injective_linear_image: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::euclidean_space" assumes f: "linear f" "inj f" and iff: "\<And>S. P (f ` S) \<longleftrightarrow> Q S" shows "locally P (f ` S) \<longleftrightarrow> locally Q S" Informal statement is: If $f$ is a linear map from $\mathbb{R}^n$ to $\mathbb{R}^m$ that is injective, then $f$ is a local homeomorphism. |
/-
Copyright (c) 2018 Patrick Massot. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Patrick Massot, Chris Hughes, Michael Howes
-/
import data.fintype.basic
import algebra.group.hom
import algebra.group.semiconj
import data.equiv.mul_add_aut
import algebra.group_with_zero.basic
/-!
# Conjugacy of group elements
See also `mul_aut.conj` and `quandle.conj`.
-/
universes u v
variables {α : Type u} {β : Type v}
section monoid
variables [monoid α] [monoid β]
/-- We say that `a` is conjugate to `b` if for some unit `c` we have `c * a * c⁻¹ = b`. -/
def is_conj (a b : α) := ∃ c : αˣ, semiconj_by ↑c a b
@[refl] lemma is_conj.refl (a : α) : is_conj a a :=
⟨1, semiconj_by.one_left a⟩
@[symm] lemma is_conj.symm {a b : α} : is_conj a b → is_conj b a
| ⟨c, hc⟩ := ⟨c⁻¹, hc.units_inv_symm_left⟩
@[trans] lemma is_conj.trans {a b c : α} : is_conj a b → is_conj b c → is_conj a c
| ⟨c₁, hc₁⟩ ⟨c₂, hc₂⟩ := ⟨c₂ * c₁, hc₂.mul_left hc₁⟩
@[simp] lemma is_conj_iff_eq {α : Type*} [comm_monoid α] {a b : α} : is_conj a b ↔ a = b :=
⟨λ ⟨c, hc⟩, begin
rw [semiconj_by, mul_comm, ← units.mul_inv_eq_iff_eq_mul, mul_assoc, c.mul_inv, mul_one] at hc,
exact hc,
end, λ h, by rw h⟩
protected lemma monoid_hom.map_is_conj (f : α →* β) {a b : α} : is_conj a b → is_conj (f a) (f b)
| ⟨c, hc⟩ := ⟨units.map f c, by rw [units.coe_map, semiconj_by, ← f.map_mul, hc.eq, f.map_mul]⟩
end monoid
section group
variables [group α]
@[simp] lemma is_conj_iff {a b : α} :
is_conj a b ↔ ∃ c : α, c * a * c⁻¹ = b :=
⟨λ ⟨c, hc⟩, ⟨c, mul_inv_eq_iff_eq_mul.2 hc⟩, λ ⟨c, hc⟩,
⟨⟨c, c⁻¹, mul_inv_self c, inv_mul_self c⟩, mul_inv_eq_iff_eq_mul.1 hc⟩⟩
@[simp] lemma is_conj_one_right {a : α} : is_conj 1 a ↔ a = 1 :=
⟨λ ⟨c, hc⟩, mul_right_cancel (hc.symm.trans ((mul_one _).trans (one_mul _).symm)), λ h, by rw [h]⟩
@[simp] lemma is_conj_one_left {a : α} : is_conj a 1 ↔ a = 1 :=
calc is_conj a 1 ↔ is_conj 1 a : ⟨is_conj.symm, is_conj.symm⟩
... ↔ a = 1 : is_conj_one_right
@[simp] lemma conj_inv {a b : α} : (b * a * b⁻¹)⁻¹ = b * a⁻¹ * b⁻¹ :=
((mul_aut.conj b).map_inv a).symm
@[simp] lemma conj_mul {a b c : α} : (b * a * b⁻¹) * (b * c * b⁻¹) = b * (a * c) * b⁻¹ :=
((mul_aut.conj b).map_mul a c).symm
@[simp] lemma conj_pow {i : ℕ} {a b : α} : (a * b * a⁻¹) ^ i = a * (b ^ i) * a⁻¹ :=
begin
induction i with i hi,
{ simp },
{ simp [pow_succ, hi] }
end
@[simp] lemma conj_zpow {i : ℤ} {a b : α} : (a * b * a⁻¹) ^ i = a * (b ^ i) * a⁻¹ :=
begin
induction i,
{ simp },
{ simp [zpow_neg_succ_of_nat, conj_pow] }
end
lemma conj_injective {x : α} : function.injective (λ (g : α), x * g * x⁻¹) :=
(mul_aut.conj x).injective
end group
@[simp] lemma is_conj_iff₀ [group_with_zero α] {a b : α} :
is_conj a b ↔ ∃ c : α, c ≠ 0 ∧ c * a * c⁻¹ = b :=
⟨λ ⟨c, hc⟩, ⟨c, begin
rw [← units.coe_inv', units.mul_inv_eq_iff_eq_mul],
exact ⟨c.ne_zero, hc⟩,
end⟩, λ ⟨c, c0, hc⟩,
⟨units.mk0 c c0, begin
rw [semiconj_by, ← units.mul_inv_eq_iff_eq_mul, units.coe_inv', units.coe_mk0],
exact hc
end⟩⟩
namespace is_conj
/- This small quotient API is largely copied from the API of `associates`;
where possible, try to keep them in sync -/
/-- The setoid of the relation `is_conj` iff there is a unit `u` such that `u * x = y * u` -/
protected def setoid (α : Type*) [monoid α] : setoid α :=
{ r := is_conj, iseqv := ⟨is_conj.refl, λa b, is_conj.symm, λa b c, is_conj.trans⟩ }
end is_conj
local attribute [instance, priority 100] is_conj.setoid
/-- The quotient type of conjugacy classes of a group. -/
def conj_classes (α : Type*) [monoid α] : Type* :=
quotient (is_conj.setoid α)
namespace conj_classes
section monoid
variables [monoid α] [monoid β]
/-- The canonical quotient map from a monoid `α` into the `conj_classes` of `α` -/
protected def mk {α : Type*} [monoid α] (a : α) : conj_classes α :=
⟦a⟧
instance : inhabited (conj_classes α) := ⟨⟦1⟧⟩
theorem mk_eq_mk_iff_is_conj {a b : α} :
conj_classes.mk a = conj_classes.mk b ↔ is_conj a b :=
iff.intro quotient.exact quot.sound
theorem quotient_mk_eq_mk (a : α) : ⟦ a ⟧ = conj_classes.mk a := rfl
theorem quot_mk_eq_mk (a : α) : quot.mk setoid.r a = conj_classes.mk a := rfl
theorem forall_is_conj {p : conj_classes α → Prop} :
(∀a, p a) ↔ (∀a, p (conj_classes.mk a)) :=
iff.intro
(assume h a, h _)
(assume h a, quotient.induction_on a h)
theorem mk_surjective : function.surjective (@conj_classes.mk α _) :=
forall_is_conj.2 (λ a, ⟨a, rfl⟩)
instance : has_one (conj_classes α) := ⟨⟦ 1 ⟧⟩
theorem one_eq_mk_one : (1 : conj_classes α) = conj_classes.mk 1 := rfl
lemma exists_rep (a : conj_classes α) : ∃ a0 : α, conj_classes.mk a0 = a :=
quot.exists_rep a
/-- A `monoid_hom` maps conjugacy classes of one group to conjugacy classes of another. -/
def map (f : α →* β) : conj_classes α → conj_classes β :=
quotient.lift (conj_classes.mk ∘ f) (λ a b ab, mk_eq_mk_iff_is_conj.2 (f.map_is_conj ab))
lemma map_surjective {f : α →* β} (hf : function.surjective f) :
function.surjective (conj_classes.map f) :=
begin
intros b,
obtain ⟨b, rfl⟩ := conj_classes.mk_surjective b,
obtain ⟨a, rfl⟩ := hf b,
exact ⟨conj_classes.mk a, rfl⟩,
end
instance [fintype α] [decidable_rel (is_conj : α → α → Prop)] :
fintype (conj_classes α) :=
quotient.fintype (is_conj.setoid α)
end monoid
section comm_monoid
variable [comm_monoid α]
lemma mk_injective : function.injective (@conj_classes.mk α _) :=
λ _ _, (mk_eq_mk_iff_is_conj.trans is_conj_iff_eq).1
lemma mk_bijective : function.bijective (@conj_classes.mk α _) :=
⟨mk_injective, mk_surjective⟩
/-- The bijection between a `comm_group` and its `conj_classes`. -/
def mk_equiv : α ≃ conj_classes α :=
⟨conj_classes.mk, quotient.lift id (λ (a : α) b, is_conj_iff_eq.1), quotient.lift_mk _ _,
begin
rw [function.right_inverse, function.left_inverse, forall_is_conj],
intro x,
rw [← quotient_mk_eq_mk, ← quotient_mk_eq_mk, quotient.lift_mk, id.def],
end⟩
end comm_monoid
end conj_classes
section monoid
variables [monoid α]
/-- Given an element `a`, `conjugates a` is the set of conjugates. -/
def conjugates_of (a : α) : set α := {b | is_conj a b}
lemma mem_conjugates_of_self {a : α} : a ∈ conjugates_of a := is_conj.refl _
lemma is_conj.conjugates_of_eq {a b : α} (ab : is_conj a b) :
conjugates_of a = conjugates_of b :=
set.ext (λ g, ⟨λ ag, (ab.symm).trans ag, λ bg, ab.trans bg⟩)
lemma is_conj_iff_conjugates_of_eq {a b : α} :
is_conj a b ↔ conjugates_of a = conjugates_of b :=
⟨is_conj.conjugates_of_eq, λ h, begin
have ha := mem_conjugates_of_self,
rwa ← h at ha,
end⟩
end monoid
namespace conj_classes
variables [monoid α]
local attribute [instance] is_conj.setoid
/-- Given a conjugacy class `a`, `carrier a` is the set it represents. -/
def carrier : conj_classes α → set α :=
quotient.lift conjugates_of (λ (a : α) b ab, is_conj.conjugates_of_eq ab)
lemma mem_carrier_mk {a : α} : a ∈ carrier (conj_classes.mk a) := is_conj.refl _
lemma mem_carrier_iff_mk_eq {a : α} {b : conj_classes α} :
a ∈ carrier b ↔ conj_classes.mk a = b :=
begin
revert b,
rw forall_is_conj,
intro b,
rw [carrier, eq_comm, mk_eq_mk_iff_is_conj, ← quotient_mk_eq_mk, quotient.lift_mk],
refl,
end
lemma carrier_eq_preimage_mk {a : conj_classes α} :
a.carrier = conj_classes.mk ⁻¹' {a} :=
set.ext (λ x, mem_carrier_iff_mk_eq)
end conj_classes
|
open import Data.Product using ( ∃ ; _×_ ; _,_ ; proj₁ ; proj₂ )
open import Relation.Unary using ( _∈_ )
open import Web.Semantic.DL.ABox using ( ABox )
open import Web.Semantic.DL.ABox.Model using ( _⊨a_ ; _⊨b_ ; ⊨a-resp-≲ ; ⊨b-resp-≲ )
open import Web.Semantic.DL.ABox.Interp using ( Interp ; ⌊_⌋ ; _*_ )
open import Web.Semantic.DL.ABox.Interp.Morphism using ( _≲_ ; ≲-refl )
open import Web.Semantic.DL.KB using ( _,_ )
open import Web.Semantic.DL.KB.Model using ( _⊨_ )
open import Web.Semantic.DL.Integrity using ( Initial ; _⊕_⊨_ ; extension ; ext-init ; ext-⊨ ; ext✓ ; init-≲ ; init-⊨ ; init-med ; med-≲ )
open import Web.Semantic.DL.Signature using ( Signature )
open import Web.Semantic.DL.TBox using ( TBox ; _,_ )
open import Web.Semantic.DL.TBox.Model using ( _⊨t_ )
open import Web.Semantic.DL.Category.Object using ( Object ; _,_ ; IN ; iface )
open import Web.Semantic.Util using ( _⊕_⊕_ ; inode ; enode )
module Web.Semantic.DL.Category.Morphism {Σ : Signature} {S T : TBox Σ} where
infixr 4 _,_
-- A morphism A ⇒ B is an abox F such that for every I ⊨ S , T , A
-- there is a J which is the initial extension of I satisfying (S , F),
-- and moreover J satisfies (T , B).
data _⇒_w/_ (A B : Object S T) (V : Set) : Set₁ where
_,_ : (F : ABox Σ (IN A ⊕ V ⊕ IN B)) →
(∀ I → (I ⊨ (S , T) , iface A) → (I ⊕ (S , F) ⊨ (T , iface B))) →
(A ⇒ B w/ V)
data _⇒_ (A B : Object S T) : Set₁ where
_,_ : ∀ V → (A ⇒ B w/ V) → (A ⇒ B)
BN : ∀ {A B} → (F : A ⇒ B) → Set
BN (V , F,F✓) = V
impl : ∀ {A B} → (F : A ⇒ B) → ABox Σ (IN A ⊕ BN F ⊕ IN B)
impl (V , F , F✓) = F
impl✓ : ∀ {A B} → (F : A ⇒ B) → ∀ I → (I ⊨ (S , T) , iface A) → (I ⊕ (S , impl F) ⊨ (T , iface B))
impl✓ (V , F , F✓) = F✓
apply : ∀ {A B} (F : A ⇒ B) I → (I ⊨ (S , T) , iface A) →
Interp Σ (IN A ⊕ BN F ⊕ IN B)
apply F I I⊨STA = extension (impl✓ F I I⊨STA)
apply-init : ∀ {A B} (F : A ⇒ B) I I⊨STA →
(apply F I I⊨STA ∈ Initial I (S , impl F))
apply-init F I I⊨STA = ext-init (impl✓ F I I⊨STA)
apply-⊨ : ∀ {A B} (F : A ⇒ B) I I⊨STA →
(enode * (apply F I I⊨STA) ⊨ (T , iface B))
apply-⊨ F I I⊨STA = ext-⊨ (impl✓ F I I⊨STA)
apply-≲ : ∀ {A B} (F : A ⇒ B) I I⊨STA → (I ⊨a impl F) →
(apply F (inode * I) I⊨STA ≲ I)
apply-≲ F I ((I⊨S , I⊨T) , I⊨A) I⊨F =
med-≲ (init-med
(apply-init F (inode * I) ((I⊨S , I⊨T) , I⊨A))
I
(≲-refl (inode * I))
(I⊨S , I⊨F))
apply✓ : ∀ {A B} (F : A ⇒ B) I I⊨STA →
(enode * apply F I I⊨STA ⊨ (S , T) , iface B)
apply✓ F I I⊨STA = ext✓ (impl✓ F I I⊨STA)
-- Morphisms F and G are equivalent whenever
-- in any interpretation I ⊨ S,T
-- we have I ⊨ F iff I ⊨ G.
infix 2 _⊑_ _⊑′_ _≣_
_⊑_ : ∀ {A B : Object S T} → (A ⇒ B) → (A ⇒ B) → Set₁
_⊑_ {A} F G =
∀ I → (inode * I ⊨ (S , T) , iface A) → (I ⊨a impl F) → (I ⊨b impl G)
data _≣_ {A B : Object S T} (F G : A ⇒ B) : Set₁ where
_,_ : (F ⊑ G) → (G ⊑ F) → (F ≣ G)
-- An alternative characterization, which may be easier
-- to work with.
_⊑′_ : ∀ {A B : Object S T} → (A ⇒ B) → (A ⇒ B) → Set₁
F ⊑′ G = ∀ I I⊨STA → (apply F I I⊨STA) ⊨b (impl G)
⊑′-impl-⊑ : ∀ {A B : Object S T} → (F G : A ⇒ B) → (F ⊑′ G) → (F ⊑ G)
⊑′-impl-⊑ F G F⊑′G I I⊨STA I⊨F =
⊨b-resp-≲ (apply-≲ F I I⊨STA I⊨F) (impl G) (F⊑′G (inode * I) I⊨STA)
⊑-impl-⊑′ : ∀ {A B : Object S T} → (F G : A ⇒ B) → (F ⊑ G) → (F ⊑′ G)
⊑-impl-⊑′ {A} {B} F G F⊑G I (I⊨ST , I⊨A) = J⊨G where
J : Interp Σ (IN A ⊕ BN F ⊕ IN B)
J = apply F I (I⊨ST , I⊨A)
J⊨S : ⌊ J ⌋ ⊨t S
J⊨S = proj₁ (init-⊨ (apply-init F I (I⊨ST , I⊨A)))
J⊨T : ⌊ J ⌋ ⊨t T
J⊨T = proj₁ (apply-⊨ F I (I⊨ST , I⊨A))
J⊨A : inode * J ⊨a iface A
J⊨A = ⊨a-resp-≲ (init-≲ (apply-init F I (I⊨ST , I⊨A))) (iface A) I⊨A
J⊨F : J ⊨a impl F
J⊨F = proj₂ (init-⊨ (apply-init F I (I⊨ST , I⊨A)))
J⊨G : J ⊨b impl G
J⊨G = F⊑G J ((J⊨S , J⊨T) , J⊨A) J⊨F
|
module Prelude.Float where
open import Prelude.String
postulate
Float : Set
floatToString : Float -> String
stringToFloat : String -> Float
{-# BUILTIN FLOAT Float #-}
{-# COMPILED_EPIC floatToString (f : Float) -> String = frString(floatToStr(f)) #-}
{-# COMPILED_EPIC stringToFloat (s : Any) -> Float = strToFloat(mkString(s)) #-} |
Furtado embarked on a world concert tour , the Get Loose Tour , on 16 February 2007 in the UK , in support of the album ; the tour included thirty @-@ one dates in Europe and Canada , with additional shows in the US , Japan , Australia and Latin America . Furtado described the show as a " full sensory experience " with " a beginning , middle and end ... [ it ] takes you on a journey " , also stressing the importance of crowd involvement and " spontaneity and rawness , because those are my roots , you know ? I started by doing club shows , and that 's the energy I love , the raw club energy of just feeling like you 're rocking out . " Though Furtado said choreographed dance routines were to be included in the show , she described it as " music @-@ based ... Everything else is just to keep it sophisticated and sensual and fun . " Furtado said she hoped to have Chris Martin , Juanes , Justin Timberlake , Timbaland and Calle 13 to guest on the tour , and have a " revolving door " of opening acts with Latin musicians opening in the US .
|
module ReadWrite
( -- * Read
readMolV2000
-- * Write
, writeXYZ
) where
import Prelude hiding (readFile)
import Control.Lens
import Control.Monad.State
import Numeric.LinearAlgebra.Data
import System.Directory
import System.IO
import System.IO.Unsafe
import Text.Printf (hPrintf)
import Types
-- * Read
-- | Read molecule at *.pdb format (V2000)
readMolV2000 :: FilePath -> (Molecule, [Bond])
readMolV2000 inf =
let txt = (lines . unsafePerformIO . readFile) inf
counts_line = words (txt !! 3)
count_atoms = read (counts_line !! 0)
count_bonds = read (counts_line !! 1)
atoms_lines = (take count_atoms . drop 4) txt
bonds_lines = (take count_bonds . drop (4 + count_atoms)) txt
in (foldr addAtom molecule atoms_lines, foldr addBond [] bonds_lines)
where
addAtom a = over atoms ((:) (readline' a))
addBond b = (:) (readline'' b)
readline' l =
execState
(do let w = words l
acoordin . x .= read (w !! 0)
acoordin . y .= read (w !! 1)
acoordin . z .= read (w !! 2)
aelement .= w !! 3
avdwrad .= vdwr (w !! 3))
atom
readline'' l =
execState
(do let w = words l
bfid .= read (w !! 0) - 1
bsid .= read (w !! 1) - 1
btype .= read (w !! 2)
bster .= read (w !! 3)
btop .= read (w !! 4))
bond
-- | Get VDW radius
vdwr :: Element -> Double
vdwr a =
case a of
"H" -> 0.500 -- 0.5 -- 1.000
"O" -> 0.650 -- 0.5 -- 1.300
"N" -> 0.700 -- 0.5 -- 1.400
"C" -> 0.750 -- 0.5 -- 1.500
"S" -> 0.950 -- 0.5 -- 1.900
"Br"-> 0.950
othrewise -> error $ "vdwr not found for: " ++ show a
-- * Write
-- | Write molecule in *.xyz format
writeXYZ :: FilePath -> String -> Molecule -> IO ()
writeXYZ ouf comment molecule = do
(tmp_name, tmp_handle) <- openTempFile "." "temp"
hPrint tmp_handle (views atoms length molecule)
hPutStrLn tmp_handle comment
mapM_ (writeData tmp_handle) (view atoms molecule)
hClose tmp_handle
renameFile tmp_name ouf
where
writeData hdl atom = do
let e = view aelement atom
(Point x y z) = view acoordin atom
hPrintf hdl "%s\t%8.6f\t%8.6f\t%8.6f\n" e x y z |
```
import igraph as ig
import numpy as np
from sympy.solvers import nsolve
from sympy import *
```
```
import scipy as sp
from scipy import stats
```
```
from scipy.stats import rankdata
```
```
from math import radians, cos, sin, asin, sqrt
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
```
```
from shapely.geometry import Point,MultiPoint,LineString,Polygon
```
```
import powerlaw as pl
%matplotlib inline
import matplotlib.pyplot as plt
```
```
from scipy.spatial import distance
def dist(v1,v2):
dist = distance.euclidean((v1[0],v1[1]),(v2[0],v2[1]))
return dist
```
```
from scipy import special
from scipy import stats
import pandas as pd
```
```
aggragate = pd.read_csv('final_table_ca.csv')
```
```
x_coord = np.array((aggragate['Lon'].tolist(),aggragate['Lat'].tolist())).T
y_coord = np.array((aggragate['Lon'].tolist(),aggragate['Lat'].tolist())).T
```
```
population = ((aggragate['pop'].values).reshape(111,1)).tolist()
```
```
coords = np.concatenate([x_coord,y_coord],axis=1)
```
```
n = 111
alldist = []
A = np.zeros((n,n))
A_binom = np.zeros((n,n))
count=0
for i in range(0,n):
for j in range(i+1,n):
m = (5*10**-7)*(population[i][0]*population[j][0])/haversine(x_coord[i][0], x_coord[i][1], y_coord[j][0], y_coord[j][1])**2#(dist(coords[i],coords[j]))**2
A[i][j] = m
A[j][i] = m
h = (np.random.binomial(111, 0.01503695919007753, 1))/110
A_binom[i][j] = h
A_binom[j][i] = h
```
```
Dt = np.sum(A,axis=1).tolist()
DD = np.diag(Dt)
L = A - DD
Dt_binom = np.sum(A_binom,axis=1).tolist()
DD_binom = np.diag(Dt_binom)
L_binom = A_binom - DD_binom
```
```
def bins_counts(array,a,b,c):
count, bins = np.histogram(array, bins=np.logspace(a,b,c),density=1)
t =[]
for i in range(len(count)):
t.append((bins[i + 1] + bins[i])/float(2))
return t,count
```
```
d_frac = aggragate['d_frac'].tolist()
```
```
#np.random.shuffle(d_frac)
x0_cf = []
y0_cf = []
c=[]#### status parameter
a_12 = []
a_21 = []
for i in range(111):
if d_frac[i] > 0.5:
x0_cf.append(1.2)
y0_cf.append(0.4)
c.append(0.5)
a_12.append(21)
a_21.append(0.0)
if d_frac[i] <= 0.5:
x0_cf.append(0.4)
y0_cf.append(1.2)
c.append(-0.5)
a_12.append(0.0)
a_21.append(21)
aggragate['c'] = c
aggragate['x0'] = x0_cf
aggragate['y0'] = y0_cf
aggragate['a12'] = a_12
aggragate['a21'] = a_21
```
```
cc = (aggragate['c'].values).reshape(111,1)
a_12 = (aggragate['a12'].values).reshape(111,1)
a_21 = (aggragate['a21'].values).reshape(111,1)
```
```
deltat=10.**(-3.)
def fun(e,f):
l1 = np.matmul(L,e)
l5 = np.matmul(L,f)
l3 = np.matmul(L,(e*f))
return (e + deltat*(e*(1-e) + cc*e*f +l1*d1 + l3*a_12), f + deltat*(f*(1-f) - cc*e*f + l5*d2 + l3*a_21))
```
```
d1 = 0.01
d2 = 0.01
```
```
t= 140000#Pasos temporales totales
tiempo=np.linspace(0,t,t+1)
```
```
evolution_2x = []
evolution_2y = []
for i in range(5):
up = []
vp = []
uavg=[]
x_0 = 1.2#aggragate['x0'].tolist()
y_0 = 0.4#aggragate['y0'].tolist()
for n in range((111)):
up.append(x_0 + 0.1*x_0*(2.*np.random.rand()-1))
vp.append(y_0 + 0.1*y_0*(2.*np.random.rand()-1))
upp = np.array([up]).T
vpp = np.array([vp]).T
ui = upp
vi = vpp
#uavg= []
for j in tiempo:
u =(fun(ui,vi))[0]
v=(fun(ui,vi))[1]
ui = u
vi = v
uavg.append(np.average(ui))
evolution_2x.append(ui)
evolution_2y.append(vi)
print(i)
```
```
aggragate['xi_sim'] = np.mean(evolution_2x,axis=0)
aggragate['yi_dim'] = np.mean(evolution_2y,axis=0)
```
```
```
```
new=pd.DataFrame()
new['xi_sim'] = (aggragate.groupby('districts').apply(lambda x: np.average(x['xi_sim']/(x['xi_sim']+x['yi_sim']), weights=x['pop']))).tolist()
new['yi_sim'] = (aggragate.groupby('districts').apply(lambda x: np.average(x['yi_sim']/(x['xi_sim']+x['yi_sim']), weights=x['pop']))).tolist()
```
```
sp.stats.pearsonr(new['yi_fix_emp'].tolist(),new['s_frac'].tolist())
```
```
fig, axes = plt.subplots(1,2, figsize=(2*6.4, 4.8))
font0 = FontProperties()
font1 = font0.copy()
font1.set_weight('bold')
font1.set_size('large')
font1.set_family('sans-serif')
axes[0].text(0.5, 1.45,
'German',
verticalalignment='center', horizontalalignment='center',
color='black',fontproperties=font1,fontsize=20,transform=axes[0].transAxes, **hfont)
ka_agg_1.plot(ax=axes[0], column='xi_e_s_1', cmap='Oranges', edgecolor='black',vmin=0,vmax=1)
axes[0].set_yticklabels([])
axes[0].set_xticklabels([])
axes[0].set_yticks([])
axes[0].set_xticks([])
axes[0].spines['right'].set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['bottom'].set_visible(False)
axes[0].spines['left'].set_visible(False)
axes[0].text(0.1,0.035,r'$\rho^G_p = 0.89$',fontsize=20,transform=axes[0].transAxes, **hfont)
axes[0].text(0.05, 0.95, '(a)',
verticalalignment='center', horizontalalignment='center',
color='black',fontproperties=font1,fontsize=20,transform=axes[0].transAxes, **hfont)
#########
axes[1].text(0.5, 1.45,
'Slovenian',
verticalalignment='center', horizontalalignment='center',
color='black',fontproperties=font1,fontsize=20,transform=axes[1].transAxes, **hfont)
ka_agg_1.plot(ax=axes[1], column='yi_e_s_1', cmap='Purples', edgecolor='black',vmin=0,vmax=1)
axes[1].set_yticklabels([])
axes[1].set_xticklabels([])
axes[1].set_yticks([])
axes[1].set_xticks([])
axes[1].spines['right'].set_visible(False)
axes[1].spines['top'].set_visible(False)
axes[1].spines['bottom'].set_visible(False)
axes[1].spines['left'].set_visible(False)
axes[1].text(0.1,0.035,r'$\rho^S_p = 0.89$',fontsize=20,transform=axes[1].transAxes, **hfont)
axes[1].text(0.05, 0.95, '(b)',
verticalalignment='center', horizontalalignment='center',
color='black',fontproperties=font1,fontsize=20,transform=axes[1].transAxes, **hfont)
vmin = 0
vmax = 1
fig.tight_layout()
sm = plt.cm.ScalarMappable(cmap='Oranges', norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
sm_1 = plt.cm.ScalarMappable(cmap='Purples', norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm_1._A = []
cbaxes = fig.add_axes([0.4735, 0.04, 0.012, 0.9])
cbar = fig.colorbar(sm,cax=cbaxes,orientation='vertical',ticks=[0, 0.5, 1])
cbar.ax.tick_params(labelsize=20)
cbaxes = fig.add_axes([.97, 0.04, 0.012, 0.9])
cbar = fig.colorbar(sm_1,cax=cbaxes,orientation='vertical',ticks=[0, 0.5, 1])
cbar.ax.tick_params(labelsize=20)
plt.savefig('carinthia_extra_map_1.pdf',bbox_inches='tight')
```
|
The norm of a natural number is the natural number itself. |
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoidWithZero α
p : α
hp : Prime p
s✝ : Multiset α
a : α
s : Multiset α
ih : p ∣ Multiset.prod s → ∃ a, a ∈ s ∧ p ∣ a
h : p ∣ Multiset.prod (a ::ₘ s)
⊢ p ∣ a * Multiset.prod s
[PROOFSTEP]
simpa using h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoidWithZero α
p : α
hp : Prime p
s : Multiset β
f : β → α
h : p ∣ Multiset.prod (Multiset.map f s)
⊢ ∃ a, a ∈ s ∧ p ∣ f a
[PROOFSTEP]
simpa only [exists_prop, Multiset.mem_map, exists_exists_and_eq_and] using hp.exists_mem_multiset_dvd h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CancelCommMonoidWithZero α
p : α
hp : Prime p
s : Multiset α
⊢ (∀ (r : α), r ∈ 0 → Prime r) → p ∣ Multiset.prod 0 → ∃ q, q ∈ 0 ∧ p ~ᵤ q
[PROOFSTEP]
simp [mt isUnit_iff_dvd_one.2 hp.not_unit]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CancelCommMonoidWithZero α
p : α
hp : Prime p
s✝ : Multiset α
a : α
s : Multiset α
ih : (∀ (r : α), r ∈ s → Prime r) → p ∣ Multiset.prod s → ∃ q, q ∈ s ∧ p ~ᵤ q
hs : ∀ (r : α), r ∈ a ::ₘ s → Prime r
hps : p ∣ Multiset.prod (a ::ₘ s)
⊢ ∃ q, q ∈ a ::ₘ s ∧ p ~ᵤ q
[PROOFSTEP]
rw [Multiset.prod_cons] at hps
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CancelCommMonoidWithZero α
p : α
hp : Prime p
s✝ : Multiset α
a : α
s : Multiset α
ih : (∀ (r : α), r ∈ s → Prime r) → p ∣ Multiset.prod s → ∃ q, q ∈ s ∧ p ~ᵤ q
hs : ∀ (r : α), r ∈ a ::ₘ s → Prime r
hps : p ∣ a * Multiset.prod s
⊢ ∃ q, q ∈ a ::ₘ s ∧ p ~ᵤ q
[PROOFSTEP]
cases' hp.dvd_or_dvd hps with h h
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CancelCommMonoidWithZero α
p : α
hp : Prime p
s✝ : Multiset α
a : α
s : Multiset α
ih : (∀ (r : α), r ∈ s → Prime r) → p ∣ Multiset.prod s → ∃ q, q ∈ s ∧ p ~ᵤ q
hs : ∀ (r : α), r ∈ a ::ₘ s → Prime r
hps : p ∣ a * Multiset.prod s
h : p ∣ a
⊢ ∃ q, q ∈ a ::ₘ s ∧ p ~ᵤ q
[PROOFSTEP]
have hap := hs a (Multiset.mem_cons.2 (Or.inl rfl))
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CancelCommMonoidWithZero α
p : α
hp : Prime p
s✝ : Multiset α
a : α
s : Multiset α
ih : (∀ (r : α), r ∈ s → Prime r) → p ∣ Multiset.prod s → ∃ q, q ∈ s ∧ p ~ᵤ q
hs : ∀ (r : α), r ∈ a ::ₘ s → Prime r
hps : p ∣ a * Multiset.prod s
h : p ∣ a
hap : Prime a
⊢ ∃ q, q ∈ a ::ₘ s ∧ p ~ᵤ q
[PROOFSTEP]
exact ⟨a, Multiset.mem_cons_self a _, hp.associated_of_dvd hap h⟩
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CancelCommMonoidWithZero α
p : α
hp : Prime p
s✝ : Multiset α
a : α
s : Multiset α
ih : (∀ (r : α), r ∈ s → Prime r) → p ∣ Multiset.prod s → ∃ q, q ∈ s ∧ p ~ᵤ q
hs : ∀ (r : α), r ∈ a ::ₘ s → Prime r
hps : p ∣ a * Multiset.prod s
h : p ∣ Multiset.prod s
⊢ ∃ q, q ∈ a ::ₘ s ∧ p ~ᵤ q
[PROOFSTEP]
rcases ih (fun r hr => hs _ (Multiset.mem_cons.2 (Or.inr hr))) h with ⟨q, hq₁, hq₂⟩
[GOAL]
case inr.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CancelCommMonoidWithZero α
p : α
hp : Prime p
s✝ : Multiset α
a : α
s : Multiset α
ih : (∀ (r : α), r ∈ s → Prime r) → p ∣ Multiset.prod s → ∃ q, q ∈ s ∧ p ~ᵤ q
hs : ∀ (r : α), r ∈ a ::ₘ s → Prime r
hps : p ∣ a * Multiset.prod s
h : p ∣ Multiset.prod s
q : α
hq₁ : q ∈ s
hq₂ : p ~ᵤ q
⊢ ∃ q, q ∈ a ::ₘ s ∧ p ~ᵤ q
[PROOFSTEP]
exact ⟨q, Multiset.mem_cons.2 (Or.inr hq₁), hq₂⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s : Multiset α
n : α
h : ∀ (a : α), a ∈ s → Prime a
div : ∀ (a : α), a ∈ s → a ∣ n
uniq : ∀ (a : α), countp (Associated a) s ≤ 1
⊢ prod s ∣ n
[PROOFSTEP]
induction' s using Multiset.induction_on with a s induct n primes divs generalizing n
[GOAL]
case empty
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s : Multiset α
n✝ : α
h✝ : ∀ (a : α), a ∈ s → Prime a
div✝ : ∀ (a : α), a ∈ s → a ∣ n✝
uniq✝ : ∀ (a : α), countp (Associated a) s ≤ 1
n : α
h : ∀ (a : α), a ∈ 0 → Prime a
div : ∀ (a : α), a ∈ 0 → a ∣ n
uniq : ∀ (a : α), countp (Associated a) 0 ≤ 1
⊢ prod 0 ∣ n
[PROOFSTEP]
simp only [Multiset.prod_zero, one_dvd]
[GOAL]
case cons
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s✝ : Multiset α
n✝ : α
h✝ : ∀ (a : α), a ∈ s✝ → Prime a
div✝ : ∀ (a : α), a ∈ s✝ → a ∣ n✝
uniq✝ : ∀ (a : α), countp (Associated a) s✝ ≤ 1
a : α
s : Multiset α
induct :
∀ (n : α),
(∀ (a : α), a ∈ s → Prime a) → (∀ (a : α), a ∈ s → a ∣ n) → (∀ (a : α), countp (Associated a) s ≤ 1) → prod s ∣ n
n : α
h : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → Prime a_1
div : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → a_1 ∣ n
uniq : ∀ (a_1 : α), countp (Associated a_1) (a ::ₘ s) ≤ 1
⊢ prod (a ::ₘ s) ∣ n
[PROOFSTEP]
rw [Multiset.prod_cons]
[GOAL]
case cons
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s✝ : Multiset α
n✝ : α
h✝ : ∀ (a : α), a ∈ s✝ → Prime a
div✝ : ∀ (a : α), a ∈ s✝ → a ∣ n✝
uniq✝ : ∀ (a : α), countp (Associated a) s✝ ≤ 1
a : α
s : Multiset α
induct :
∀ (n : α),
(∀ (a : α), a ∈ s → Prime a) → (∀ (a : α), a ∈ s → a ∣ n) → (∀ (a : α), countp (Associated a) s ≤ 1) → prod s ∣ n
n : α
h : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → Prime a_1
div : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → a_1 ∣ n
uniq : ∀ (a_1 : α), countp (Associated a_1) (a ::ₘ s) ≤ 1
⊢ a * prod s ∣ n
[PROOFSTEP]
obtain ⟨k, rfl⟩ : a ∣ n := div a (Multiset.mem_cons_self a s)
[GOAL]
case cons.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s✝ : Multiset α
n : α
h✝ : ∀ (a : α), a ∈ s✝ → Prime a
div✝ : ∀ (a : α), a ∈ s✝ → a ∣ n
uniq✝ : ∀ (a : α), countp (Associated a) s✝ ≤ 1
a : α
s : Multiset α
induct :
∀ (n : α),
(∀ (a : α), a ∈ s → Prime a) → (∀ (a : α), a ∈ s → a ∣ n) → (∀ (a : α), countp (Associated a) s ≤ 1) → prod s ∣ n
h : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → Prime a_1
uniq : ∀ (a_1 : α), countp (Associated a_1) (a ::ₘ s) ≤ 1
k : α
div : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → a_1 ∣ a * k
⊢ a * prod s ∣ a * k
[PROOFSTEP]
apply mul_dvd_mul_left a
[GOAL]
case cons.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s✝ : Multiset α
n : α
h✝ : ∀ (a : α), a ∈ s✝ → Prime a
div✝ : ∀ (a : α), a ∈ s✝ → a ∣ n
uniq✝ : ∀ (a : α), countp (Associated a) s✝ ≤ 1
a : α
s : Multiset α
induct :
∀ (n : α),
(∀ (a : α), a ∈ s → Prime a) → (∀ (a : α), a ∈ s → a ∣ n) → (∀ (a : α), countp (Associated a) s ≤ 1) → prod s ∣ n
h : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → Prime a_1
uniq : ∀ (a_1 : α), countp (Associated a_1) (a ::ₘ s) ≤ 1
k : α
div : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → a_1 ∣ a * k
⊢ prod s ∣ k
[PROOFSTEP]
refine
induct _ (fun a ha => h a (Multiset.mem_cons_of_mem ha)) (fun b b_in_s => ?_) fun a =>
(Multiset.countp_le_of_le _ (Multiset.le_cons_self _ _)).trans (uniq a)
[GOAL]
case cons.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s✝ : Multiset α
n : α
h✝ : ∀ (a : α), a ∈ s✝ → Prime a
div✝ : ∀ (a : α), a ∈ s✝ → a ∣ n
uniq✝ : ∀ (a : α), countp (Associated a) s✝ ≤ 1
a : α
s : Multiset α
induct :
∀ (n : α),
(∀ (a : α), a ∈ s → Prime a) → (∀ (a : α), a ∈ s → a ∣ n) → (∀ (a : α), countp (Associated a) s ≤ 1) → prod s ∣ n
h : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → Prime a_1
uniq : ∀ (a_1 : α), countp (Associated a_1) (a ::ₘ s) ≤ 1
k : α
div : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → a_1 ∣ a * k
b : α
b_in_s : b ∈ s
⊢ b ∣ k
[PROOFSTEP]
have b_div_n := div b (Multiset.mem_cons_of_mem b_in_s)
[GOAL]
case cons.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s✝ : Multiset α
n : α
h✝ : ∀ (a : α), a ∈ s✝ → Prime a
div✝ : ∀ (a : α), a ∈ s✝ → a ∣ n
uniq✝ : ∀ (a : α), countp (Associated a) s✝ ≤ 1
a : α
s : Multiset α
induct :
∀ (n : α),
(∀ (a : α), a ∈ s → Prime a) → (∀ (a : α), a ∈ s → a ∣ n) → (∀ (a : α), countp (Associated a) s ≤ 1) → prod s ∣ n
h : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → Prime a_1
uniq : ∀ (a_1 : α), countp (Associated a_1) (a ::ₘ s) ≤ 1
k : α
div : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → a_1 ∣ a * k
b : α
b_in_s : b ∈ s
b_div_n : b ∣ a * k
⊢ b ∣ k
[PROOFSTEP]
have a_prime := h a (Multiset.mem_cons_self a s)
[GOAL]
case cons.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s✝ : Multiset α
n : α
h✝ : ∀ (a : α), a ∈ s✝ → Prime a
div✝ : ∀ (a : α), a ∈ s✝ → a ∣ n
uniq✝ : ∀ (a : α), countp (Associated a) s✝ ≤ 1
a : α
s : Multiset α
induct :
∀ (n : α),
(∀ (a : α), a ∈ s → Prime a) → (∀ (a : α), a ∈ s → a ∣ n) → (∀ (a : α), countp (Associated a) s ≤ 1) → prod s ∣ n
h : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → Prime a_1
uniq : ∀ (a_1 : α), countp (Associated a_1) (a ::ₘ s) ≤ 1
k : α
div : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → a_1 ∣ a * k
b : α
b_in_s : b ∈ s
b_div_n : b ∣ a * k
a_prime : Prime a
⊢ b ∣ k
[PROOFSTEP]
have b_prime := h b (Multiset.mem_cons_of_mem b_in_s)
[GOAL]
case cons.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s✝ : Multiset α
n : α
h✝ : ∀ (a : α), a ∈ s✝ → Prime a
div✝ : ∀ (a : α), a ∈ s✝ → a ∣ n
uniq✝ : ∀ (a : α), countp (Associated a) s✝ ≤ 1
a : α
s : Multiset α
induct :
∀ (n : α),
(∀ (a : α), a ∈ s → Prime a) → (∀ (a : α), a ∈ s → a ∣ n) → (∀ (a : α), countp (Associated a) s ≤ 1) → prod s ∣ n
h : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → Prime a_1
uniq : ∀ (a_1 : α), countp (Associated a_1) (a ::ₘ s) ≤ 1
k : α
div : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → a_1 ∣ a * k
b : α
b_in_s : b ∈ s
b_div_n : b ∣ a * k
a_prime : Prime a
b_prime : Prime b
⊢ b ∣ k
[PROOFSTEP]
refine' (b_prime.dvd_or_dvd b_div_n).resolve_left fun b_div_a => _
[GOAL]
case cons.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s✝ : Multiset α
n : α
h✝ : ∀ (a : α), a ∈ s✝ → Prime a
div✝ : ∀ (a : α), a ∈ s✝ → a ∣ n
uniq✝ : ∀ (a : α), countp (Associated a) s✝ ≤ 1
a : α
s : Multiset α
induct :
∀ (n : α),
(∀ (a : α), a ∈ s → Prime a) → (∀ (a : α), a ∈ s → a ∣ n) → (∀ (a : α), countp (Associated a) s ≤ 1) → prod s ∣ n
h : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → Prime a_1
uniq : ∀ (a_1 : α), countp (Associated a_1) (a ::ₘ s) ≤ 1
k : α
div : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → a_1 ∣ a * k
b : α
b_in_s : b ∈ s
b_div_n : b ∣ a * k
a_prime : Prime a
b_prime : Prime b
b_div_a : b ∣ a
⊢ False
[PROOFSTEP]
have assoc := b_prime.associated_of_dvd a_prime b_div_a
[GOAL]
case cons.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s✝ : Multiset α
n : α
h✝ : ∀ (a : α), a ∈ s✝ → Prime a
div✝ : ∀ (a : α), a ∈ s✝ → a ∣ n
uniq✝ : ∀ (a : α), countp (Associated a) s✝ ≤ 1
a : α
s : Multiset α
induct :
∀ (n : α),
(∀ (a : α), a ∈ s → Prime a) → (∀ (a : α), a ∈ s → a ∣ n) → (∀ (a : α), countp (Associated a) s ≤ 1) → prod s ∣ n
h : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → Prime a_1
uniq : ∀ (a_1 : α), countp (Associated a_1) (a ::ₘ s) ≤ 1
k : α
div : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → a_1 ∣ a * k
b : α
b_in_s : b ∈ s
b_div_n : b ∣ a * k
a_prime : Prime a
b_prime : Prime b
b_div_a : b ∣ a
assoc : b ~ᵤ a
⊢ False
[PROOFSTEP]
have := uniq a
[GOAL]
case cons.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s✝ : Multiset α
n : α
h✝ : ∀ (a : α), a ∈ s✝ → Prime a
div✝ : ∀ (a : α), a ∈ s✝ → a ∣ n
uniq✝ : ∀ (a : α), countp (Associated a) s✝ ≤ 1
a : α
s : Multiset α
induct :
∀ (n : α),
(∀ (a : α), a ∈ s → Prime a) → (∀ (a : α), a ∈ s → a ∣ n) → (∀ (a : α), countp (Associated a) s ≤ 1) → prod s ∣ n
h : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → Prime a_1
uniq : ∀ (a_1 : α), countp (Associated a_1) (a ::ₘ s) ≤ 1
k : α
div : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → a_1 ∣ a * k
b : α
b_in_s : b ∈ s
b_div_n : b ∣ a * k
a_prime : Prime a
b_prime : Prime b
b_div_a : b ∣ a
assoc : b ~ᵤ a
this : countp (Associated a) (a ::ₘ s) ≤ 1
⊢ False
[PROOFSTEP]
rw [Multiset.countp_cons_of_pos _ (Associated.refl _), Nat.succ_le_succ_iff, ← not_lt, Multiset.countp_pos] at this
[GOAL]
case cons.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : (a : α) → DecidablePred (Associated a)
s✝ : Multiset α
n : α
h✝ : ∀ (a : α), a ∈ s✝ → Prime a
div✝ : ∀ (a : α), a ∈ s✝ → a ∣ n
uniq✝ : ∀ (a : α), countp (Associated a) s✝ ≤ 1
a : α
s : Multiset α
induct :
∀ (n : α),
(∀ (a : α), a ∈ s → Prime a) → (∀ (a : α), a ∈ s → a ∣ n) → (∀ (a : α), countp (Associated a) s ≤ 1) → prod s ∣ n
h : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → Prime a_1
uniq : ∀ (a_1 : α), countp (Associated a_1) (a ::ₘ s) ≤ 1
k : α
div : ∀ (a_1 : α), a_1 ∈ a ::ₘ s → a_1 ∣ a * k
b : α
b_in_s : b ∈ s
b_div_n : b ∣ a * k
a_prime : Prime a
b_prime : Prime b
b_div_a : b ∣ a
assoc : b ~ᵤ a
this : ¬∃ a_1, a_1 ∈ s ∧ a ~ᵤ a_1
⊢ False
[PROOFSTEP]
exact this ⟨b, b_in_s, assoc.symm⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : Unique αˣ
s : Finset α
n : α
h : ∀ (a : α), a ∈ s → Prime a
div : ∀ (a : α), a ∈ s → a ∣ n
⊢ ∏ p in s, p ∣ n
[PROOFSTEP]
classical exact
Multiset.prod_primes_dvd n (by simpa only [Multiset.map_id', Finset.mem_def] using h)
(by simpa only [Multiset.map_id', Finset.mem_def] using div)
(by
-- POrting note: was
-- `simp only [Multiset.map_id', associated_eq_eq, Multiset.countp_eq_card_filter, ←
-- Multiset.count_eq_card_filter_eq, ← Multiset.nodup_iff_count_le_one, s.nodup]`
intro a
simp only [Multiset.map_id', associated_eq_eq, Multiset.countp_eq_card_filter]
change Multiset.card (Multiset.filter (fun b => a = b) s.val) ≤ 1
apply le_of_eq_of_le (Multiset.count_eq_card_filter_eq _ _).symm
apply Multiset.nodup_iff_count_le_one.mp
exact s.nodup)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : Unique αˣ
s : Finset α
n : α
h : ∀ (a : α), a ∈ s → Prime a
div : ∀ (a : α), a ∈ s → a ∣ n
⊢ ∏ p in s, p ∣ n
[PROOFSTEP]
exact
Multiset.prod_primes_dvd n (by simpa only [Multiset.map_id', Finset.mem_def] using h)
(by simpa only [Multiset.map_id', Finset.mem_def] using div)
(by
-- POrting note: was
-- `simp only [Multiset.map_id', associated_eq_eq, Multiset.countp_eq_card_filter, ←
-- Multiset.count_eq_card_filter_eq, ← Multiset.nodup_iff_count_le_one, s.nodup]`
intro a
simp only [Multiset.map_id', associated_eq_eq, Multiset.countp_eq_card_filter]
change Multiset.card (Multiset.filter (fun b => a = b) s.val) ≤ 1
apply le_of_eq_of_le (Multiset.count_eq_card_filter_eq _ _).symm
apply Multiset.nodup_iff_count_le_one.mp
exact s.nodup)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : Unique αˣ
s : Finset α
n : α
h : ∀ (a : α), a ∈ s → Prime a
div : ∀ (a : α), a ∈ s → a ∣ n
⊢ ∀ (a : α), a ∈ Multiset.map (fun p => p) s.val → Prime a
[PROOFSTEP]
simpa only [Multiset.map_id', Finset.mem_def] using h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : Unique αˣ
s : Finset α
n : α
h : ∀ (a : α), a ∈ s → Prime a
div : ∀ (a : α), a ∈ s → a ∣ n
⊢ ∀ (a : α), a ∈ Multiset.map (fun p => p) s.val → a ∣ n
[PROOFSTEP]
simpa only [Multiset.map_id', Finset.mem_def] using div
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : Unique αˣ
s : Finset α
n : α
h : ∀ (a : α), a ∈ s → Prime a
div : ∀ (a : α), a ∈ s → a ∣ n
⊢ ∀ (a : α), Multiset.countp (Associated a) (Multiset.map (fun p => p) s.val) ≤ 1
[PROOFSTEP]
intro a
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : Unique αˣ
s : Finset α
n : α
h : ∀ (a : α), a ∈ s → Prime a
div : ∀ (a : α), a ∈ s → a ∣ n
a : α
⊢ Multiset.countp (Associated a) (Multiset.map (fun p => p) s.val) ≤ 1
[PROOFSTEP]
simp only [Multiset.map_id', associated_eq_eq, Multiset.countp_eq_card_filter]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : Unique αˣ
s : Finset α
n : α
h : ∀ (a : α), a ∈ s → Prime a
div : ∀ (a : α), a ∈ s → a ∣ n
a : α
⊢ ↑Multiset.card (Multiset.filter (Eq a) s.val) ≤ 1
[PROOFSTEP]
change Multiset.card (Multiset.filter (fun b => a = b) s.val) ≤ 1
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : Unique αˣ
s : Finset α
n : α
h : ∀ (a : α), a ∈ s → Prime a
div : ∀ (a : α), a ∈ s → a ∣ n
a : α
⊢ ↑Multiset.card (Multiset.filter (fun b => a = b) s.val) ≤ 1
[PROOFSTEP]
apply le_of_eq_of_le (Multiset.count_eq_card_filter_eq _ _).symm
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : Unique αˣ
s : Finset α
n : α
h : ∀ (a : α), a ∈ s → Prime a
div : ∀ (a : α), a ∈ s → a ∣ n
a : α
⊢ Multiset.count a s.val ≤ 1
[PROOFSTEP]
apply Multiset.nodup_iff_count_le_one.mp
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : CancelCommMonoidWithZero α
inst✝ : Unique αˣ
s : Finset α
n : α
h : ∀ (a : α), a ∈ s → Prime a
div : ∀ (a : α), a ∈ s → a ∣ n
a : α
⊢ Multiset.Nodup s.val
[PROOFSTEP]
exact s.nodup
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p : Multiset α
⊢ Multiset.prod (Multiset.map Associates.mk 0) = Associates.mk (Multiset.prod 0)
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p : Multiset α
a : α
s : Multiset α
ih : Multiset.prod (Multiset.map Associates.mk s) = Associates.mk (Multiset.prod s)
⊢ Multiset.prod (Multiset.map Associates.mk (a ::ₘ s)) = Associates.mk (Multiset.prod (a ::ₘ s))
[PROOFSTEP]
simp [ih, Associates.mk_mul_mk]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p : Finset β
f : β → α
⊢ ∏ i in p, Associates.mk (f i) = Associates.mk (∏ i in p, f i)
[PROOFSTEP]
have : (fun i => Associates.mk (f i)) = Associates.mk ∘ f := funext <| fun x => Function.comp_apply
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p : Finset β
f : β → α
this : (fun i => Associates.mk (f i)) = Associates.mk ∘ f
⊢ ∏ i in p, Associates.mk (f i) = Associates.mk (∏ i in p, f i)
[PROOFSTEP]
rw [Finset.prod_eq_multiset_prod, this, ← Multiset.map_map, prod_mk, ← Finset.prod_eq_multiset_prod]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p q : Multiset α
⊢ Multiset.Rel Associated p q ↔ Multiset.map Associates.mk p = Multiset.map Associates.mk q
[PROOFSTEP]
rw [← Multiset.rel_eq, Multiset.rel_map]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p q : Multiset α
⊢ Multiset.Rel Associated p q ↔ Multiset.Rel (fun a b => Associates.mk a = Associates.mk b) p q
[PROOFSTEP]
simp only [mk_eq_mk_iff_associated]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p : Multiset (Associates α)
⊢ Multiset.prod 0 = 1 ↔ ∀ (a : Associates α), a ∈ 0 → a = 1
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p : Multiset (Associates α)
⊢ ∀ ⦃a : Associates α⦄ {s : Multiset (Associates α)},
(Multiset.prod s = 1 ↔ ∀ (a : Associates α), a ∈ s → a = 1) →
(Multiset.prod (a ::ₘ s) = 1 ↔ ∀ (a_2 : Associates α), a_2 ∈ a ::ₘ s → a_2 = 1)
[PROOFSTEP]
simp (config := { contextual := true }) [mul_eq_one_iff, or_imp, forall_and]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p q : Multiset (Associates α)
h : p ≤ q
⊢ Multiset.prod p ≤ Multiset.prod q
[PROOFSTEP]
haveI := Classical.decEq (Associates α)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p q : Multiset (Associates α)
h : p ≤ q
this : DecidableEq (Associates α)
⊢ Multiset.prod p ≤ Multiset.prod q
[PROOFSTEP]
haveI := Classical.decEq α
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p q : Multiset (Associates α)
h : p ≤ q
this✝ : DecidableEq (Associates α)
this : DecidableEq α
⊢ Multiset.prod p ≤ Multiset.prod q
[PROOFSTEP]
suffices p.prod ≤ (p + (q - p)).prod by rwa [add_tsub_cancel_of_le h] at this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p q : Multiset (Associates α)
h : p ≤ q
this✝¹ : DecidableEq (Associates α)
this✝ : DecidableEq α
this : Multiset.prod p ≤ Multiset.prod (p + (q - p))
⊢ Multiset.prod p ≤ Multiset.prod q
[PROOFSTEP]
rwa [add_tsub_cancel_of_le h] at this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p q : Multiset (Associates α)
h : p ≤ q
this✝ : DecidableEq (Associates α)
this : DecidableEq α
⊢ Multiset.prod p ≤ Multiset.prod (p + (q - p))
[PROOFSTEP]
suffices p.prod * 1 ≤ p.prod * (q - p).prod by simpa
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p q : Multiset (Associates α)
h : p ≤ q
this✝¹ : DecidableEq (Associates α)
this✝ : DecidableEq α
this : Multiset.prod p * 1 ≤ Multiset.prod p * Multiset.prod (q - p)
⊢ Multiset.prod p ≤ Multiset.prod (p + (q - p))
[PROOFSTEP]
simpa
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CommMonoid α
p q : Multiset (Associates α)
h : p ≤ q
this✝ : DecidableEq (Associates α)
this : DecidableEq α
⊢ Multiset.prod p * 1 ≤ Multiset.prod p * Multiset.prod (q - p)
[PROOFSTEP]
exact mul_mono (le_refl p.prod) one_le
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : CancelCommMonoidWithZero α
s✝ : Multiset (Associates α)
p : Associates α
hp : Prime p
a : Associates α
s : Multiset (Associates α)
ih : p ≤ Multiset.prod s → ∃ a, a ∈ s ∧ p ≤ a
h : p ≤ Multiset.prod (a ::ₘ s)
⊢ p ≤ a * Multiset.prod s
[PROOFSTEP]
simpa using h
|
using ..ChemistryFeaturization.AbstractType: AbstractFeatureDescriptor
export AbstractAtomFeatureDescriptor,
AbstractPairFeatureDescriptor, AbstractEnvironmentFeatureDescriptor
abstract type AbstractAtomFeatureDescriptor <: AbstractFeatureDescriptor end
abstract type AbstractPairFeatureDescriptor <: AbstractFeatureDescriptor end
abstract type AbstractEnvironmentFeatureDescriptor <: AbstractFeatureDescriptor end
|
import set_theory.cardinal
import linear_algebra.dimension
universes u v
namespace finsupp
open finset
--finsupp or fintype
def equiv_fun {α : Type u} {β : Type v} [decidable_eq α] [has_zero α] [h : fintype β] :
(β →₀ α) ≃ (β → α) :=
{ to_fun := finsupp.to_fun,
inv_fun := λ f, finsupp.mk (finset.filter (λ a, f a ≠ 0) h.elems) f
(assume a, by rw[mem_filter]; exact and_iff_right (fintype.complete a)),
left_inv := λ f, finsupp.ext (λ _, rfl),
right_inv := λ f, rfl }
variables {α : Type u} {β : Type v}
variables [decidable_eq α]
variables [decidable_eq β] [add_comm_group β]
variable (s : finset α)
--with map_domain
lemma map_domain_apply {α₁ α₂ : Type*} [decidable_eq α₁] [decidable_eq α₂]
(v : α₁ → α₂) (f : α₁ →₀ β) (h : function.injective v) {a : α₁} :
(map_domain v f) (v a) = f a := show (f.sum $ λ x, single (v x)) (v a) = f a,
begin
rw[←sum_single f],
simp,
apply sum_congr, refl,
intros,
simp[single_apply, function.injective.eq_iff h],
congr
end
--subtype_domain
lemma subtype_domain_left_inv (p : α → Prop) [d : decidable_pred p] (f : α →₀ β) (h : ∀ a ∈ f.support, p a) :
map_domain subtype.val (subtype_domain p f) = f :=
finsupp.ext $ λ a, match d a with
| is_true (hp : p a) := by rw[←subtype.coe_mk _ hp];
exact map_domain_apply _ _ subtype.val_injective
| is_false (hp : ¬p a) :=
have a ∉ f.support, from mt (h a) hp,
have h0 : f a = 0, from of_not_not $ mt ((f.mem_support_to_fun a).mpr) this,
begin
rw[h0],
apply (not_mem_support_iff).mp,
apply mt (mem_of_subset map_domain_support),
simp,
assume x _ hfx hxa,
exact absurd h0 (hxa ▸ hfx)
end
end
--subtype domain
lemma subtype_domain_right_inv (p : α → Prop) [decidable_pred p] (f : subtype p →₀ β) :
subtype_domain p (map_domain subtype.val f) = f :=
finsupp.ext $ λ a, map_domain_apply _ _ (subtype.val_injective)
--lc ?
def equiv_lc [ring α] [module α β] {s : set β} [decidable_pred s] :
(s →₀ α) ≃ lc.supported s :=
{ to_fun := λ f, ⟨map_domain subtype.val f,
assume a h,
have h0 : a ∈ image _ _, from mem_of_subset map_domain_support h,
let ⟨ap, _, hs⟩ := mem_image.mp h0 in hs ▸ ap.property⟩,
inv_fun := (finsupp.subtype_domain s) ∘ subtype.val,
left_inv := subtype_domain_right_inv _,
right_inv := λ f, subtype.eq $ subtype_domain_left_inv _ f.val f.property }
end finsupp
namespace module
variables {α : Type u} {β : Type v}
variables [ring α] [decidable_eq α]
variables [add_comm_group β] [module α β] [decidable_eq β]
variables {b : set β}
include α β
--basis.lean
noncomputable def equiv_finsupp_basis [decidable_pred b] (h : is_basis b) : β ≃ (b →₀ α) :=
calc β ≃ lc.supported b : (module_equiv_lc h).to_equiv
... ≃ (b →₀ α) : equiv.symm finsupp.equiv_lc
--basis.lean
noncomputable def equiv_fun_basis [decidable_pred b] [fintype b] (h : is_basis b) : β ≃ (b → α) :=
calc β ≃ (b →₀ α) : equiv_finsupp_basis h
... ≃ (b → α) : finsupp.equiv_fun
end module
namespace vector_space
open fintype module
variables (α : Type u) (β : Type v)
variables [discrete_field α] [fintype α]
variables [add_comm_group β] [fintype β]
variables [vector_space α β]
--vector_space
lemma card_fin [deβ : decidable_eq β] : ∃ n : ℕ, card β = (card α) ^ n :=
let ⟨b, hb⟩ := exists_is_basis β in
begin
haveI : fintype b := set.finite.fintype (set.finite.of_fintype b),
haveI : decidable_pred b := set.decidable_mem_of_fintype b,
exact ⟨card b,
calc card β = card (b → α) : card_congr (equiv_fun_basis hb)
... = card α ^ card b : card_fun⟩
end
end vector_space
|
Formal statement is: lemma tendsto_inverse_0_at_top: "LIM x F. f x :> at_top \<Longrightarrow> ((\<lambda>x. inverse (f x) :: real) \<longlongrightarrow> 0) F" Informal statement is: If $f(x)$ tends to infinity, then $1/f(x)$ tends to zero. |
lemma lim_infinity_imp_sequentially: "(f \<longlongrightarrow> l) at_infinity \<Longrightarrow> ((\<lambda>n. f(n)) \<longlongrightarrow> l) sequentially" |
[STATEMENT]
lemma lset_conv_lnth: "lset xs = {lnth xs n|n. enat n < llength xs}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lset xs = {lnth xs n |n. enat n < llength xs}
[PROOF STEP]
by(auto simp add: in_lset_conv_lnth) |
section \<open> Non-SI Units Accepted for SI use \<close>
theory SI_Accepted
imports SI_Derived
begin
definition [si_def, si_eq]: "minute = 60 *\<^sub>Q second"
definition [si_def, si_eq]: "hour = 60 *\<^sub>Q minute"
definition [si_def, si_eq]: "day = 24 *\<^sub>Q hour"
definition [si_def, si_eq]: "astronomical_unit = 149597870700 *\<^sub>Q metre"
definition degree :: "'a::real_field[L/L]" where
[si_def, si_eq]: "degree = (2\<cdot>(of_real pi) / 180) *\<^sub>Q radian"
abbreviation degrees ("_\<degree>" [999] 999) where "n\<degree> \<equiv> n *\<^sub>Q degree"
definition [si_def, si_eq]: "litre = 1/1000 *\<^sub>Q metre\<^sup>\<three>"
definition [si_def, si_eq]: "tonne = 10^3 *\<^sub>Q kilogram"
definition [si_def, si_eq]: "dalton = 1.66053906660 * (1 / 10^27) *\<^sub>Q kilogram"
subsection \<open> Example Unit Equations \<close>
lemma "1 *\<^sub>Q hour = 3600 *\<^sub>Q second"
by (si_simp)
lemma "watt \<^bold>\<cdot> hour \<cong>\<^sub>Q 3600 *\<^sub>Q joule" by (si_calc)
lemma "25 *\<^sub>Q metre \<^bold>/ second = 90 *\<^sub>Q (kilo *\<^sub>Q metre) \<^bold>/ hour"
by (si_calc)
end |
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas64_2e416m2e208m1_8limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
function p = jaccard_similarity(A,B)
%% jaccard similarity
% This function calculates jaccard similarity index of inputs arrays A and
% B. The formula to find the Index is (number of entries in both sets) / (number of entries in either set) * 100
% The higher the percentage, the more similar the two arrays.
% For this, each of input arrays is modified by removing its same entries
% (except on them), then number of common entries between two new arrays is
% calculated by comparing them.
modified_A = unique(A);
modified_B = unique(B);
length_mA = length(modified_A);
length_mB = length(modified_B);
common_number = 0; %initialize the number of common entries
if length_mA <= length_mB
X = modified_A;
Y = modified_B;
else
X = modified_B;
Y = modified_A;
end
for i = 1:length(X)
for j = 1:length(Y)
if X(i) == Y(j)
common_number = common_number + 1;
end
end
end
total_number = length_mA + length_mB - common_number;
p = (common_number/total_number)*100;
end
|
Should I stop seeing my councillor?
Hi Sam. I'm in a bit of a dilemma. So basically I'm going through a tough time , I'm especially worried about my Dad who suffers from a mental illness. I have my mum at home but I see a councillor at school and when I talk about things and my dad it makes me feel good to get it off my chest and tell someone how i feel. But the thing is she is a bit judgmental. She always focuses on how my Dad is feeling and not my feelings, and she talks about him, but she has never even met him so she doesn't really know much. Some of the things that she says make me feel like she is judging him and it makes me really uncomfortable. The school recommends that I go and see her regularly, and I do need someone to talk to but I don't think she is right for me. Do you have any advice?
It can be hard when we feel family members are being judged by anyone, especially someone who is meant to be helping us. Finding someone to talk to who can make us feel comfortable to share our issues with can really make a difference.
Counselling sessions are for you to talk about the things that concern you and should not focus on others. Your feelings are important and should be the main focus of each session.
Sometimes it can be hard to connect with a counsellor and this may be one of those times. It is okay to tell her how you feel and give her a chance to repair the relationship between you. Most counsellors will openly accept this feedback and try to make things work for you.
If you find you still cannot work with her then try talking to the school about how you feel. You have the right to get things off your chest and to feel heard. Not getting on well with this counsellor doesnt mean you should stop counselling entirely.
One way that may make you feel comfortable is to talk to your doctor about counselling, as most surgeries have a counsellor for patients to use. Your doctor can refer you for talking therapy that is free on the NHS, although this will only be available if the doctor thinks it will help.
Often talking to people of the same age can help. The ChildLine message boards have posts from people who are experiencing similar things. You may even want to discuss your feelings with a ChildLine counsellor. |
Numerous artists worked on the series as well , such as John Ridgway ( the original series artist ) , Simon Bisley , Mark Buckingham , Richard Corben , Steve Dillon , Marcelo <unk> , Jock , David Lloyd , Leonardo Manco , and Sean Phillips . Cover artists included Dave McKean ( who designed the first run of the series ' covers ) , Tim Bradstreet ( who designed the most ) , Glenn Fabry , Kent Williams , David Lloyd , and Sean Phillips .
|
"""
Package functions for interacting with Travis.
$(EXPORTS)
"""
module Travis
using Compat, DocStringExtensions
import Compat.Pkg
export genkeys
import Compat.LibGit2.GITHUB_REGEX
"""
$(SIGNATURES)
Generate ssh keys for package `package` to automatically deploy docs from Travis to GitHub
pages. `package` can be either the name of a package or a path. Providing a path allows keys
to be generated for non-packages or packages that are not found in the Julia `LOAD_PATH`.
Use the `remote` keyword to specify the user and repository values.
This function requires the following command lines programs to be installed:
- `which`
- `git`
- `travis`
- `ssh-keygen`
# Examples
```jlcon
julia> using Documenter
julia> Travis.genkeys("MyPackageName")
[ ... output ... ]
julia> Travis.genkeys("MyPackageName", remote="organization")
[ ... output ... ]
julia> Travis.genkeys("/path/to/target/directory")
[ ... output ... ]
```
"""
function genkeys(package; remote="origin")
# Error checking. Do the required programs exist?
success(`which which`) || error("'which' not found.")
success(`which git`) || error("'git' not found.")
success(`which ssh-keygen`) || error("'ssh-keygen' not found.")
directory = "docs"
filename = ".documenter"
path = isdir(package) ? package : Pkg.dir(package, directory)
isdir(path) || error("`$path` not found. Provide a package name or directory.")
cd(path) do
# Check for old '$filename.enc' and terminate.
isfile("$filename.enc") &&
error("$package already has an ssh key. Remove it and try again.")
# Are we in a git repo?
success(`git status`) || error("'Travis.genkey' only works with git repositories.")
# Find the GitHub repo org and name.
user, repo =
let r = readchomp(`git config --get remote.$remote.url`)
m = match(GITHUB_REGEX, r)
m === nothing && error("no remote repo named '$remote' found.")
m[2], m[3]
end
# Generate the ssh key pair.
success(`ssh-keygen -N "" -f $filename`) || error("failed to generated ssh key pair.")
# Prompt user to add public key to github then remove the public key.
let url = "https://github.com/$user/$repo/settings/keys"
Compat.@info("add the public key below to $url with read/write access:")
println("\n", read("$filename.pub", String))
rm("$filename.pub")
end
# Base64 encode the private key and prompt user to add it to travis. The key is
# *not* encoded for the sake of security, but instead to make it easier to
# copy/paste it over to travis without having to worry about whitespace.
let url = "https://travis-ci.org/$user/$repo/settings"
Compat.@info("add a secure environment variable named 'DOCUMENTER_KEY' to $url with value:")
println("\n", base64encode(read(".documenter", String)), "\n")
rm(filename)
end
end
end
end # module
|
import pickle
import tensorflow as tf
import numpy as np
import tensorflow_addons as tf_ad
from program.utils.write_output_file import format_result
from program.data_process.data_preprocessor import GeneralDataPreprocessor
from program.abstracts.abstract_ner_predictor import NerPredictor
from transformers import BertTokenizer, TFBertModel
from dataclasses import dataclass
@dataclass
class BertBilstmCrfPredictor(NerPredictor):
def __post_init__(self):
bert_model_name = [
"hfl/chinese-bert-wwm",
"hfl/chinese-bert-wwm-ext",
"hfl/chinese-roberta-wwm-ext",
"chinese-roberta-wwm-ext-large",
]
self.tokenizer = BertTokenizer.from_pretrained(bert_model_name[0])
self.bert_model = TFBertModel.from_pretrained(bert_model_name[0], from_pt=True)
test_X_path = self.model_data_path + "test_X.pkl"
test_mapping_path = self.model_data_path + "test_mapping.pkl"
id2tag_path = self.model_data_path + "id2tag.pkl"
test_X, self.test_mapping = GeneralDataPreprocessor.loadTestArrays(
test_X_path, test_mapping_path
)
with open(id2tag_path, "rb") as f:
self.id2tag = pickle.load(f)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
ckpt.restore(tf.train.latest_checkpoint(self.checkpoint_path))
def predict_sentence(self, sentence):
# dataset = encode sentence
# = [[1445 33 1878 826 1949 1510 112]]
dataset = self.tokenizer(
sentence,
add_special_tokens=False,
return_token_type_ids=False,
is_split_into_words=True,
padding=True,
)
dataset = tf.data.Dataset.from_tensors(dict(dataset)).batch(1)
# logits = (1, 7, 28) = (sentence, words, predict_distrib)
# text_lens = [7]
logits, text_lens = self.model.predict(dataset)
paths = []
logits = logits.squeeze()[np.newaxis, :]
text_lens = [sum(text_lens)]
for logit, text_len in zip(logits, text_lens):
viterbi_path, _ = tf_ad.text.viterbi_decode(
logit[:text_len], self.model.transition_params
)
paths.append(viterbi_path)
# paths[0] = tag in sentence
# = [18, 19, 19, 1, 26, 27, 1]
# result = ['B-name', 'I-name', 'I-name', 'O', 'B-time', 'I-time', 'O']
result = [self.id2tag[id] for id in paths[0]]
# entities_result =
# [{'begin': 0, 'end': 3, 'words': '賈伯斯', 'type': 'name'},
# {'begin': 4, 'end': 6, 'words': '七號', 'type': 'time'}]
entities_result = format_result(list(sentence), result)
return entities_result
def predict(self):
# restore model
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
ckpt.restore(tf.train.latest_checkpoint(self.checkpoint_path))
article_id = 0
counter = 0
results = []
result = []
for testset in self.test_X:
prediction = self.predict_sentence(testset)
# predict_pos + counter
if prediction:
for pred in prediction:
pred["begin"] += counter
pred["end"] += counter
result.append(pred)
counter += len(testset)
if counter == self.test_mapping[article_id]:
results.append(result)
article_id += 1
counter = 0
result = []
self.results = results
def output(self):
output = []
article_id = 0
start_batch = 0
end_batch = 0
for article in self.test_mapping:
start_batch = end_batch
end_batch += (len(article) // self.max_sentence_length) + 1
pos_counter = 0
entity_type = None
start_pos = None
end_pos = None
for preds in self.prediction[start_batch:end_batch]:
# get rid of [CLS], [SEP] in common batches
# exceptions only occur in last batches, no matters
preds = preds[1:-1]
for i, pred in enumerate(preds):
if self.id2tag[pred][0] == "B":
start_pos = pos_counter
entity_type = self.id2tag[pred][2:] # remove "B-"
elif self.id2tag[pred][0] == "I":
end_pos = pos_counter
elif (
self.id2tag[pred][0] == "O" or i + 1 == self.max_sentence_length
):
if entity_type:
entity_name = article[start_pos : (end_pos + 1)]
output.append(
(
article_id,
start_pos,
end_pos,
entity_name,
entity_type,
)
)
entity_type = None
pos_counter += 1
|
#! -*- coding:utf-8 -*-
import json
from tqdm import tqdm
import codecs
import numpy as np
RANDOM_SEED = 2019
rel_set = set()
train_data = []
with open('train.json') as f:
for l in tqdm(f):
a = json.loads(l)
if not a['relations']:
continue
line = {
'text': a['sentext'].lstrip('\"').strip('\r\n').rstrip('\"'),
'triple_list': [(i['em1'], i['rtext'], i['em2']) for i in a['relations'] if i['rtext'] != 'None']
}
if not line['triple_list']:
continue
train_data.append(line)
for rm in a['relations']:
if rm['rtext'] != 'None':
rel_set.add(rm['rtext'])
id2rel = {i:j for i,j in enumerate(sorted(rel_set))}
rel2id = {j:i for i,j in id2rel.items()}
with codecs.open('rel2id.json', 'w', encoding='utf-8') as f:
json.dump([id2rel, rel2id], f, indent=4, ensure_ascii=False)
train_len = len(train_data)
random_order = list(range(train_len))
np.random.seed(RANDOM_SEED)
np.random.shuffle(random_order)
dev_data = [train_data[i] for i in random_order[:int(0.005 * train_len)]]
train_data = [train_data[i] for i in random_order[int(0.005 * train_len):]]
with codecs.open('train_triples.json', 'w', encoding='utf-8') as f:
json.dump(train_data, f, indent=4, ensure_ascii=False)
with codecs.open('dev_triples.json', 'w', encoding='utf-8') as f:
json.dump(dev_data, f, indent=4, ensure_ascii=False)
test_data = []
with open('test.json') as f:
for l in tqdm(f):
a = json.loads(l)
if not a['relations']:
continue
line = {
'text': a['sentext'].lstrip('\"').strip('\r\n').rstrip('\"'),
'triple_list': [(i['em1'], i['rtext'], i['em2']) for i in a['relations'] if i['rtext'] != 'None']
}
if not line['triple_list']:
continue
test_data.append(line)
with codecs.open('test_triples.json', 'w', encoding='utf-8') as f:
json.dump(test_data, f, indent=4, ensure_ascii=False)
|
#include <numpy_eigen/boost_python_headers.hpp>
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
using namespace boost::python;
//typedef UniformCubicBSpline<Eigen::Dynamic> UniformCubicBSplineX;
void import_rotational_kinematics_python();
void export_rotations();
void export_transformations();
void export_quaternion_algebra();
void export_homogeneous_coordinates();
void exportTransformation();
void exportHomogeneousPoint();
void exportTimestampCorrectors();
void exportPropertyTree();
void exportPropertyTreeLoader();
void exportEigen();
void exportUncertainVector();
void exportMatrixArchive();
void exportLogging();
void exportTiming();
void exportNsecTime();
void exportRandom();
void export_eigen_property_tree();
void export_kinematics_property_tree();
void exportValueStoreRef();
void exportKeyValueStorePair();
void exportExtendibleValueStoreRef();
void exportExtendibleKeyValueStorePair();
BOOST_PYTHON_MODULE(libsm_python)
{
import_rotational_kinematics_python();
export_rotations();
export_transformations();
export_quaternion_algebra();
export_homogeneous_coordinates();
exportTransformation();
exportHomogeneousPoint();
exportTimestampCorrectors();
exportPropertyTree();
exportPropertyTreeLoader();
exportEigen();
exportUncertainVector();
exportMatrixArchive();
exportLogging();
exportTiming();
exportNsecTime();
exportRandom();
export_eigen_property_tree();
export_kinematics_property_tree();
exportValueStoreRef();
exportKeyValueStorePair();
exportExtendibleValueStoreRef();
exportExtendibleKeyValueStorePair();
}
|
% ------------------------------------------------------------------------------
% Your readers must be able to understand at a glance which data set belongs to which research question or hypothesis.
% - Describe your data objectively
% - Use graphs and tables to illustrate your data.
% - Refer to your research question with each result
% - Rank your results in order of importance
% - Confirm or reject your hypotheses
% ------------------------------------------------------------------------------
\opt{never}{\addbibresource{03-tail/bibliography.bib}} % to make citation found in most IDE
\chapter{Validation}
\label{chap:validation}
% -- Your text goes here --
\lipsum[25]
\minitoc
\newpage
% -----------------------------------------------------------------------------
\section{Section 1}
% -- Your text goes here --
\lipsum[26-27]
% -----------------------------------------------------------------------------
\section{Section 2}
% -- Your text goes here --
\lipsum[28-29]
% -----------------------------------------------------------------------------
\section{Discussion}
% -- Your text goes here --
\lipsum[30-31] |
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
! This file was ported from Lean 3 source module computability.turing_machine
! leanprover-community/mathlib commit 4c19a16e4b705bf135cf9a80ac18fcc99c438514
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Data.Fintype.Option
import Mathlib.Data.Fintype.Prod
import Mathlib.Data.Fintype.Pi
import Mathlib.Data.Vector.Basic
import Mathlib.Data.PFun
import Mathlib.Logic.Function.Iterate
import Mathlib.Order.Basic
import Mathlib.Tactic.ApplyFun
import Mathlib.Tactic.WLOG
import Mathlib.Tactic.RSuffices
/-!
# Turing machines
This file defines a sequence of simple machine languages, starting with Turing machines and working
up to more complex languages based on Wang B-machines.
## Naming conventions
Each model of computation in this file shares a naming convention for the elements of a model of
computation. These are the parameters for the language:
* `Γ` is the alphabet on the tape.
* `Λ` is the set of labels, or internal machine states.
* `σ` is the type of internal memory, not on the tape. This does not exist in the TM0 model, and
later models achieve this by mixing it into `Λ`.
* `K` is used in the TM2 model, which has multiple stacks, and denotes the number of such stacks.
All of these variables denote "essentially finite" types, but for technical reasons it is
convenient to allow them to be infinite anyway. When using an infinite type, we will be interested
to prove that only finitely many values of the type are ever interacted with.
Given these parameters, there are a few common structures for the model that arise:
* `Stmt` is the set of all actions that can be performed in one step. For the TM0 model this set is
finite, and for later models it is an infinite inductive type representing "possible program
texts".
* `Cfg` is the set of instantaneous configurations, that is, the state of the machine together with
its environment.
* `Machine` is the set of all machines in the model. Usually this is approximately a function
`Λ → Stmt`, although different models have different ways of halting and other actions.
* `step : Cfg → Option Cfg` is the function that describes how the state evolves over one step.
If `step c = none`, then `c` is a terminal state, and the result of the computation is read off
from `c`. Because of the type of `step`, these models are all deterministic by construction.
* `init : Input → Cfg` sets up the initial state. The type `Input` depends on the model;
in most cases it is `List Γ`.
* `eval : Machine → Input → Part Output`, given a machine `M` and input `i`, starts from
`init i`, runs `step` until it reaches an output, and then applies a function `Cfg → Output` to
the final state to obtain the result. The type `Output` depends on the model.
* `Supports : Machine → Finset Λ → Prop` asserts that a machine `M` starts in `S : Finset Λ`, and
can only ever jump to other states inside `S`. This implies that the behavior of `M` on any input
cannot depend on its values outside `S`. We use this to allow `Λ` to be an infinite set when
convenient, and prove that only finitely many of these states are actually accessible. This
formalizes "essentially finite" mentioned above.
-/
open Relation
open Nat (iterate)
open Function (update iterate_succ iterate_succ_apply iterate_succ' iterate_succ_apply'
iterate_zero_apply)
namespace Turing
/-- The `BlankExtends` partial order holds of `l₁` and `l₂` if `l₂` is obtained by adding
blanks (`default : Γ`) to the end of `l₁`. -/
def BlankExtends {Γ} [Inhabited Γ] (l₁ l₂ : List Γ) : Prop :=
∃ n, l₂ = l₁ ++ List.replicate n default
#align turing.blank_extends Turing.BlankExtends
@[refl]
theorem BlankExtends.refl {Γ} [Inhabited Γ] (l : List Γ) : BlankExtends l l :=
⟨0, by simp⟩
#align turing.blank_extends.refl Turing.BlankExtends.refl
@[trans]
theorem BlankExtends.trans {Γ} [Inhabited Γ] {l₁ l₂ l₃ : List Γ} :
BlankExtends l₁ l₂ → BlankExtends l₂ l₃ → BlankExtends l₁ l₃ := by
rintro ⟨i, rfl⟩ ⟨j, rfl⟩
exact ⟨i + j, by simp [List.replicate_add]⟩
#align turing.blank_extends.trans Turing.BlankExtends.trans
theorem BlankExtends.below_of_le {Γ} [Inhabited Γ] {l l₁ l₂ : List Γ} :
BlankExtends l l₁ → BlankExtends l l₂ → l₁.length ≤ l₂.length → BlankExtends l₁ l₂ := by
rintro ⟨i, rfl⟩ ⟨j, rfl⟩ h; use j - i
simp only [List.length_append, add_le_add_iff_left, List.length_replicate] at h
simp only [← List.replicate_add, add_tsub_cancel_of_le h, List.append_assoc]
#align turing.blank_extends.below_of_le Turing.BlankExtends.below_of_le
/-- Any two extensions by blank `l₁,l₂` of `l` have a common join (which can be taken to be the
longer of `l₁` and `l₂`). -/
def BlankExtends.above {Γ} [Inhabited Γ] {l l₁ l₂ : List Γ} (h₁ : BlankExtends l l₁)
(h₂ : BlankExtends l l₂) : { l' // BlankExtends l₁ l' ∧ BlankExtends l₂ l' } :=
if h : l₁.length ≤ l₂.length then ⟨l₂, h₁.below_of_le h₂ h, BlankExtends.refl _⟩
else ⟨l₁, BlankExtends.refl _, h₂.below_of_le h₁ (le_of_not_ge h)⟩
#align turing.blank_extends.above Turing.BlankExtends.above
theorem BlankExtends.above_of_le {Γ} [Inhabited Γ] {l l₁ l₂ : List Γ} :
BlankExtends l₁ l → BlankExtends l₂ l → l₁.length ≤ l₂.length → BlankExtends l₁ l₂ := by
rintro ⟨i, rfl⟩ ⟨j, e⟩ h; use i - j
refine' List.append_right_cancel (e.symm.trans _)
rw [List.append_assoc, ← List.replicate_add, tsub_add_cancel_of_le]
apply_fun List.length at e
simp only [List.length_append, List.length_replicate] at e
rwa [← add_le_add_iff_left, e, add_le_add_iff_right]
#align turing.blank_extends.above_of_le Turing.BlankExtends.above_of_le
/-- `BlankRel` is the symmetric closure of `BlankExtends`, turning it into an equivalence
relation. Two lists are related by `BlankRel` if one extends the other by blanks. -/
def BlankRel {Γ} [Inhabited Γ] (l₁ l₂ : List Γ) : Prop :=
BlankExtends l₁ l₂ ∨ BlankExtends l₂ l₁
#align turing.blank_rel Turing.BlankRel
@[refl]
theorem BlankRel.refl {Γ} [Inhabited Γ] (l : List Γ) : BlankRel l l :=
Or.inl (BlankExtends.refl _)
#align turing.blank_rel.refl Turing.BlankRel.refl
@[symm]
theorem BlankRel.symm {Γ} [Inhabited Γ] {l₁ l₂ : List Γ} : BlankRel l₁ l₂ → BlankRel l₂ l₁ :=
Or.symm
#align turing.blank_rel.symm Turing.BlankRel.symm
@[trans]
theorem BlankRel.trans {Γ} [Inhabited Γ] {l₁ l₂ l₃ : List Γ} :
BlankRel l₁ l₂ → BlankRel l₂ l₃ → BlankRel l₁ l₃ := by
rintro (h₁ | h₁) (h₂ | h₂)
· exact Or.inl (h₁.trans h₂)
· cases' le_total l₁.length l₃.length with h h
· exact Or.inl (h₁.above_of_le h₂ h)
· exact Or.inr (h₂.above_of_le h₁ h)
· cases' le_total l₁.length l₃.length with h h
· exact Or.inl (h₁.below_of_le h₂ h)
· exact Or.inr (h₂.below_of_le h₁ h)
· exact Or.inr (h₂.trans h₁)
#align turing.blank_rel.trans Turing.BlankRel.trans
/-- Given two `BlankRel` lists, there exists (constructively) a common join. -/
def BlankRel.above {Γ} [Inhabited Γ] {l₁ l₂ : List Γ} (h : BlankRel l₁ l₂) :
{ l // BlankExtends l₁ l ∧ BlankExtends l₂ l } := by
refine'
if hl : l₁.length ≤ l₂.length then ⟨l₂, Or.elim h id fun h' ↦ _, BlankExtends.refl _⟩
else ⟨l₁, BlankExtends.refl _, Or.elim h (fun h' ↦ _) id⟩
exact (BlankExtends.refl _).above_of_le h' hl
exact (BlankExtends.refl _).above_of_le h' (le_of_not_ge hl)
#align turing.blank_rel.above Turing.BlankRel.above
/-- Given two `BlankRel` lists, there exists (constructively) a common meet. -/
def BlankRel.below {Γ} [Inhabited Γ] {l₁ l₂ : List Γ} (h : BlankRel l₁ l₂) :
{ l // BlankExtends l l₁ ∧ BlankExtends l l₂ } := by
refine'
if hl : l₁.length ≤ l₂.length then ⟨l₁, BlankExtends.refl _, Or.elim h id fun h' ↦ _⟩
else ⟨l₂, Or.elim h (fun h' ↦ _) id, BlankExtends.refl _⟩
exact (BlankExtends.refl _).above_of_le h' hl
exact (BlankExtends.refl _).above_of_le h' (le_of_not_ge hl)
#align turing.blank_rel.below Turing.BlankRel.below
theorem BlankRel.equivalence (Γ) [Inhabited Γ] : Equivalence (@BlankRel Γ _) :=
⟨BlankRel.refl, @BlankRel.symm _ _, @BlankRel.trans _ _⟩
#align turing.blank_rel.equivalence Turing.BlankRel.equivalence
/-- Construct a setoid instance for `BlankRel`. -/
def BlankRel.setoid (Γ) [Inhabited Γ] : Setoid (List Γ) :=
⟨_, BlankRel.equivalence _⟩
#align turing.blank_rel.setoid Turing.BlankRel.setoid
/-- A `ListBlank Γ` is a quotient of `List Γ` by extension by blanks at the end. This is used to
represent half-tapes of a Turing machine, so that we can pretend that the list continues
infinitely with blanks. -/
def ListBlank (Γ) [Inhabited Γ] :=
Quotient (BlankRel.setoid Γ)
#align turing.list_blank Turing.ListBlank
instance ListBlank.inhabited {Γ} [Inhabited Γ] : Inhabited (ListBlank Γ) :=
⟨Quotient.mk'' []⟩
#align turing.list_blank.inhabited Turing.ListBlank.inhabited
instance ListBlank.hasEmptyc {Γ} [Inhabited Γ] : EmptyCollection (ListBlank Γ) :=
⟨Quotient.mk'' []⟩
#align turing.list_blank.has_emptyc Turing.ListBlank.hasEmptyc
/-- A modified version of `Quotient.liftOn'` specialized for `ListBlank`, with the stronger
precondition `BlankExtends` instead of `BlankRel`. -/
@[reducible] -- Porting note: Removed `@[elab_as_elim]`
protected def ListBlank.liftOn {Γ} [Inhabited Γ] {α} (l : ListBlank Γ) (f : List Γ → α)
(H : ∀ a b, BlankExtends a b → f a = f b) : α :=
l.liftOn' f <| by rintro a b (h | h) <;> [exact H _ _ h, exact (H _ _ h).symm]
#align turing.list_blank.lift_on Turing.ListBlank.liftOn
/-- The quotient map turning a `List` into a `ListBlank`. -/
def ListBlank.mk {Γ} [Inhabited Γ] : List Γ → ListBlank Γ :=
Quotient.mk''
#align turing.list_blank.mk Turing.ListBlank.mk
@[elab_as_elim]
protected theorem ListBlank.induction_on {Γ} [Inhabited Γ] {p : ListBlank Γ → Prop}
(q : ListBlank Γ) (h : ∀ a, p (ListBlank.mk a)) : p q :=
Quotient.inductionOn' q h
#align turing.list_blank.induction_on Turing.ListBlank.induction_on
/-- The head of a `ListBlank` is well defined. -/
def ListBlank.head {Γ} [Inhabited Γ] (l : ListBlank Γ) : Γ := by
apply l.liftOn List.headI
rintro a _ ⟨i, rfl⟩
cases a
· cases i <;> rfl
rfl
#align turing.list_blank.head Turing.ListBlank.head
@[simp]
theorem ListBlank.head_mk {Γ} [Inhabited Γ] (l : List Γ) :
ListBlank.head (ListBlank.mk l) = l.headI :=
rfl
#align turing.list_blank.head_mk Turing.ListBlank.head_mk
/-- The tail of a `ListBlank` is well defined (up to the tail of blanks). -/
def ListBlank.tail {Γ} [Inhabited Γ] (l : ListBlank Γ) : ListBlank Γ := by
apply l.liftOn (fun l ↦ ListBlank.mk l.tail)
rintro a _ ⟨i, rfl⟩
refine' Quotient.sound' (Or.inl _)
cases a
· cases' i with i <;> [exact ⟨0, rfl⟩, exact ⟨i, rfl⟩]
exact ⟨i, rfl⟩
#align turing.list_blank.tail Turing.ListBlank.tail
@[simp]
theorem ListBlank.tail_mk {Γ} [Inhabited Γ] (l : List Γ) :
ListBlank.tail (ListBlank.mk l) = ListBlank.mk l.tail :=
rfl
#align turing.list_blank.tail_mk Turing.ListBlank.tail_mk
/-- We can cons an element onto a `ListBlank`. -/
def ListBlank.cons {Γ} [Inhabited Γ] (a : Γ) (l : ListBlank Γ) : ListBlank Γ := by
apply l.liftOn (fun l ↦ ListBlank.mk (List.cons a l))
rintro _ _ ⟨i, rfl⟩
exact Quotient.sound' (Or.inl ⟨i, rfl⟩)
#align turing.list_blank.cons Turing.ListBlank.cons
@[simp]
theorem ListBlank.cons_mk {Γ} [Inhabited Γ] (a : Γ) (l : List Γ) :
ListBlank.cons a (ListBlank.mk l) = ListBlank.mk (a :: l) :=
rfl
#align turing.list_blank.cons_mk Turing.ListBlank.cons_mk
@[simp]
theorem ListBlank.head_cons {Γ} [Inhabited Γ] (a : Γ) : ∀ l : ListBlank Γ, (l.cons a).head = a :=
Quotient.ind' fun _ ↦ rfl
#align turing.list_blank.head_cons Turing.ListBlank.head_cons
@[simp]
theorem ListBlank.tail_cons {Γ} [Inhabited Γ] (a : Γ) : ∀ l : ListBlank Γ, (l.cons a).tail = l :=
Quotient.ind' fun _ ↦ rfl
#align turing.list_blank.tail_cons Turing.ListBlank.tail_cons
/-- The `cons` and `head`/`tail` functions are mutually inverse, unlike in the case of `List` where
this only holds for nonempty lists. -/
@[simp]
theorem ListBlank.cons_head_tail {Γ} [Inhabited Γ] : ∀ l : ListBlank Γ, l.tail.cons l.head = l := by
apply Quotient.ind'
refine' fun l ↦ Quotient.sound' (Or.inr _)
cases l
· exact ⟨1, rfl⟩
· rfl
#align turing.list_blank.cons_head_tail Turing.ListBlank.cons_head_tail
/-- The `cons` and `head`/`tail` functions are mutually inverse, unlike in the case of `List` where
this only holds for nonempty lists. -/
theorem ListBlank.exists_cons {Γ} [Inhabited Γ] (l : ListBlank Γ) :
∃ a l', l = ListBlank.cons a l' :=
⟨_, _, (ListBlank.cons_head_tail _).symm⟩
#align turing.list_blank.exists_cons Turing.ListBlank.exists_cons
/-- The n-th element of a `ListBlank` is well defined for all `n : ℕ`, unlike in a `List`. -/
def ListBlank.nth {Γ} [Inhabited Γ] (l : ListBlank Γ) (n : ℕ) : Γ := by
apply l.liftOn (fun l ↦ List.getI l n)
rintro l _ ⟨i, rfl⟩
cases' lt_or_le n _ with h h
· rw [List.getI_append _ _ _ h]
rw [List.getI_eq_default _ h]
cases' le_or_lt _ n with h₂ h₂
· rw [List.getI_eq_default _ h₂]
rw [List.getI_eq_get _ h₂, List.get_append_right' h, List.get_replicate]
#align turing.list_blank.nth Turing.ListBlank.nth
@[simp]
theorem ListBlank.nth_mk {Γ} [Inhabited Γ] (l : List Γ) (n : ℕ) :
(ListBlank.mk l).nth n = l.getI n :=
rfl
#align turing.list_blank.nth_mk Turing.ListBlank.nth_mk
@[simp]
theorem ListBlank.nth_zero {Γ} [Inhabited Γ] (l : ListBlank Γ) : l.nth 0 = l.head := by
conv => lhs; rw [← ListBlank.cons_head_tail l]
exact Quotient.inductionOn' l.tail fun l ↦ rfl
#align turing.list_blank.nth_zero Turing.ListBlank.nth_zero
@[simp]
theorem ListBlank.nth_succ {Γ} [Inhabited Γ] (l : ListBlank Γ) (n : ℕ) :
l.nth (n + 1) = l.tail.nth n := by
conv => lhs; rw [← ListBlank.cons_head_tail l]
exact Quotient.inductionOn' l.tail fun l ↦ rfl
#align turing.list_blank.nth_succ Turing.ListBlank.nth_succ
@[ext]
theorem ListBlank.ext {Γ} [i : Inhabited Γ] {L₁ L₂ : ListBlank Γ} :
(∀ i, L₁.nth i = L₂.nth i) → L₁ = L₂ := by
refine' ListBlank.induction_on L₁ fun l₁ ↦ ListBlank.induction_on L₂ fun l₂ H ↦ _
wlog h : l₁.length ≤ l₂.length
· cases le_total l₁.length l₂.length <;> [skip, symm] <;> apply this <;> try assumption
intro
rw [H]
refine' Quotient.sound' (Or.inl ⟨l₂.length - l₁.length, _⟩)
refine' List.ext_get _ fun i h h₂ ↦ Eq.symm _
· simp only [add_tsub_cancel_of_le h, List.length_append, List.length_replicate]
simp only [ListBlank.nth_mk] at H
cases' lt_or_le i l₁.length with h' h'
· simp only [List.get_append _ h', List.get?_eq_get h, List.get?_eq_get h',
← List.getI_eq_get _ h, ← List.getI_eq_get _ h', H]
· simp only [List.get_append_right' h', List.get_replicate, List.get?_eq_get h,
List.get?_len_le h', ← List.getI_eq_default _ h', H, List.getI_eq_get _ h]
#align turing.list_blank.ext Turing.ListBlank.ext
/-- Apply a function to a value stored at the nth position of the list. -/
@[simp]
def ListBlank.modifyNth {Γ} [Inhabited Γ] (f : Γ → Γ) : ℕ → ListBlank Γ → ListBlank Γ
| 0, L => L.tail.cons (f L.head)
| n + 1, L => (L.tail.modifyNth f n).cons L.head
#align turing.list_blank.modify_nth Turing.ListBlank.modifyNth
theorem ListBlank.nth_modifyNth {Γ} [Inhabited Γ] (f : Γ → Γ) (n i) (L : ListBlank Γ) :
(L.modifyNth f n).nth i = if i = n then f (L.nth i) else L.nth i := by
induction' n with n IH generalizing i L
· cases i <;> simp only [ListBlank.nth_zero, if_true, ListBlank.head_cons, ListBlank.modifyNth,
ListBlank.nth_succ, if_false, ListBlank.tail_cons, Nat.zero_eq]
· cases i
· rw [if_neg (Nat.succ_ne_zero _).symm]
simp only [ListBlank.nth_zero, ListBlank.head_cons, ListBlank.modifyNth, Nat.zero_eq]
· simp only [IH, ListBlank.modifyNth, ListBlank.nth_succ, ListBlank.tail_cons, Nat.succ.injEq]
#align turing.list_blank.nth_modify_nth Turing.ListBlank.nth_modifyNth
/-- A pointed map of `Inhabited` types is a map that sends one default value to the other. -/
structure PointedMap.{u, v} (Γ : Type u) (Γ' : Type v) [Inhabited Γ] [Inhabited Γ'] :
Type max u v where
f : Γ → Γ'
map_pt' : f default = default
#align turing.pointed_map Turing.PointedMap
instance {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] : Inhabited (PointedMap Γ Γ') :=
⟨⟨default, rfl⟩⟩
instance {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] : CoeFun (PointedMap Γ Γ') fun _ ↦ Γ → Γ' :=
⟨PointedMap.f⟩
-- @[simp] -- Porting note: dsimp can prove this
theorem PointedMap.mk_val {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : Γ → Γ') (pt) :
(PointedMap.mk f pt : Γ → Γ') = f :=
rfl
#align turing.pointed_map.mk_val Turing.PointedMap.mk_val
@[simp]
theorem PointedMap.map_pt {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ') :
f default = default :=
PointedMap.map_pt' _
#align turing.pointed_map.map_pt Turing.PointedMap.map_pt
@[simp]
theorem PointedMap.headI_map {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ')
(l : List Γ) : (l.map f).headI = f l.headI := by
cases l <;> [exact (PointedMap.map_pt f).symm, rfl]
#align turing.pointed_map.head_map Turing.PointedMap.headI_map
/-- The `map` function on lists is well defined on `ListBlank`s provided that the map is
pointed. -/
def ListBlank.map {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ') (l : ListBlank Γ) :
ListBlank Γ' := by
apply l.liftOn (fun l ↦ ListBlank.mk (List.map f l))
rintro l _ ⟨i, rfl⟩; refine' Quotient.sound' (Or.inl ⟨i, _⟩)
simp only [PointedMap.map_pt, List.map_append, List.map_replicate]
#align turing.list_blank.map Turing.ListBlank.map
@[simp]
theorem ListBlank.map_mk {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ') (l : List Γ) :
(ListBlank.mk l).map f = ListBlank.mk (l.map f) :=
rfl
#align turing.list_blank.map_mk Turing.ListBlank.map_mk
@[simp]
theorem ListBlank.head_map {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ')
(l : ListBlank Γ) : (l.map f).head = f l.head := by
conv => lhs; rw [← ListBlank.cons_head_tail l]
exact Quotient.inductionOn' l fun a ↦ rfl
#align turing.list_blank.head_map Turing.ListBlank.head_map
@[simp]
theorem ListBlank.tail_map {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ')
(l : ListBlank Γ) : (l.map f).tail = l.tail.map f := by
conv => lhs; rw [← ListBlank.cons_head_tail l]
exact Quotient.inductionOn' l fun a ↦ rfl
#align turing.list_blank.tail_map Turing.ListBlank.tail_map
@[simp]
theorem ListBlank.map_cons {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ')
(l : ListBlank Γ) (a : Γ) : (l.cons a).map f = (l.map f).cons (f a) := by
refine' (ListBlank.cons_head_tail _).symm.trans _
simp only [ListBlank.head_map, ListBlank.head_cons, ListBlank.tail_map, ListBlank.tail_cons]
#align turing.list_blank.map_cons Turing.ListBlank.map_cons
@[simp]
theorem ListBlank.nth_map {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ')
(l : ListBlank Γ) (n : ℕ) : (l.map f).nth n = f (l.nth n) := by
refine' l.inductionOn fun l ↦ _
-- Porting note: Added `suffices` to get `simp` to work.
suffices ((mk l).map f).nth n = f ((mk l).nth n) by exact this
simp only [List.get?_map, ListBlank.map_mk, ListBlank.nth_mk, List.getI_eq_iget_get?]
cases l.get? n
· exact f.2.symm
· rfl
#align turing.list_blank.nth_map Turing.ListBlank.nth_map
/-- The `i`-th projection as a pointed map. -/
def proj {ι : Type _} {Γ : ι → Type _} [∀ i, Inhabited (Γ i)] (i : ι) :
PointedMap (∀ i, Γ i) (Γ i) :=
⟨fun a ↦ a i, rfl⟩
#align turing.proj Turing.proj
theorem proj_map_nth {ι : Type _} {Γ : ι → Type _} [∀ i, Inhabited (Γ i)] (i : ι) (L n) :
(ListBlank.map (@proj ι Γ _ i) L).nth n = L.nth n i := by
rw [ListBlank.nth_map]; rfl
#align turing.proj_map_nth Turing.proj_map_nth
theorem ListBlank.map_modifyNth {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (F : PointedMap Γ Γ')
(f : Γ → Γ) (f' : Γ' → Γ') (H : ∀ x, F (f x) = f' (F x)) (n) (L : ListBlank Γ) :
(L.modifyNth f n).map F = (L.map F).modifyNth f' n := by
induction' n with n IH generalizing L <;>
simp only [*, ListBlank.head_map, ListBlank.modifyNth, ListBlank.map_cons, ListBlank.tail_map]
#align turing.list_blank.map_modify_nth Turing.ListBlank.map_modifyNth
/-- Append a list on the left side of a `ListBlank`. -/
@[simp]
def ListBlank.append {Γ} [Inhabited Γ] : List Γ → ListBlank Γ → ListBlank Γ
| [], L => L
| a :: l, L => ListBlank.cons a (ListBlank.append l L)
#align turing.list_blank.append Turing.ListBlank.append
@[simp]
theorem ListBlank.append_mk {Γ} [Inhabited Γ] (l₁ l₂ : List Γ) :
ListBlank.append l₁ (ListBlank.mk l₂) = ListBlank.mk (l₁ ++ l₂) := by
induction l₁ <;>
simp only [*, ListBlank.append, List.nil_append, List.cons_append, ListBlank.cons_mk]
#align turing.list_blank.append_mk Turing.ListBlank.append_mk
theorem ListBlank.append_assoc {Γ} [Inhabited Γ] (l₁ l₂ : List Γ) (l₃ : ListBlank Γ) :
ListBlank.append (l₁ ++ l₂) l₃ = ListBlank.append l₁ (ListBlank.append l₂ l₃) := by
refine' l₃.inductionOn fun l ↦ _
-- Porting note: Added `suffices` to get `simp` to work.
suffices append (l₁ ++ l₂) (mk l) = append l₁ (append l₂ (mk l)) by exact this
simp only [ListBlank.append_mk, List.append_assoc]
#align turing.list_blank.append_assoc Turing.ListBlank.append_assoc
/-- The `bind` function on lists is well defined on `ListBlank`s provided that the default element
is sent to a sequence of default elements. -/
def ListBlank.bind {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (l : ListBlank Γ) (f : Γ → List Γ')
(hf : ∃ n, f default = List.replicate n default) : ListBlank Γ' := by
apply l.liftOn (fun l ↦ ListBlank.mk (List.bind l f))
rintro l _ ⟨i, rfl⟩; cases' hf with n e; refine' Quotient.sound' (Or.inl ⟨i * n, _⟩)
rw [List.bind_append, mul_comm]; congr
induction' i with i IH; rfl
simp only [IH, e, List.replicate_add, Nat.mul_succ, add_comm, List.replicate_succ, List.cons_bind]
#align turing.list_blank.bind Turing.ListBlank.bind
@[simp]
theorem ListBlank.bind_mk {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (l : List Γ) (f : Γ → List Γ') (hf) :
(ListBlank.mk l).bind f hf = ListBlank.mk (l.bind f) :=
rfl
#align turing.list_blank.bind_mk Turing.ListBlank.bind_mk
@[simp]
theorem ListBlank.cons_bind {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (a : Γ) (l : ListBlank Γ)
(f : Γ → List Γ') (hf) : (l.cons a).bind f hf = (l.bind f hf).append (f a) := by
refine' l.inductionOn fun l ↦ _
-- Porting note: Added `suffices` to get `simp` to work.
suffices ((mk l).cons a).bind f hf = ((mk l).bind f hf).append (f a) by exact this
simp only [ListBlank.append_mk, ListBlank.bind_mk, ListBlank.cons_mk, List.cons_bind]
#align turing.list_blank.cons_bind Turing.ListBlank.cons_bind
/-- The tape of a Turing machine is composed of a head element (which we imagine to be the
current position of the head), together with two `ListBlank`s denoting the portions of the tape
going off to the left and right. When the Turing machine moves right, an element is pulled from the
right side and becomes the new head, while the head element is consed onto the left side. -/
structure Tape (Γ : Type _) [Inhabited Γ] where
head : Γ
left : ListBlank Γ
right : ListBlank Γ
#align turing.tape Turing.Tape
instance Tape.inhabited {Γ} [Inhabited Γ] : Inhabited (Tape Γ) :=
⟨by constructor <;> apply default⟩
#align turing.tape.inhabited Turing.Tape.inhabited
/-- A direction for the turing machine `move` command, either
left or right. -/
inductive Dir
| left
| right
deriving DecidableEq, Inhabited
#align turing.dir Turing.Dir
/-- The "inclusive" left side of the tape, including both `left` and `head`. -/
def Tape.left₀ {Γ} [Inhabited Γ] (T : Tape Γ) : ListBlank Γ :=
T.left.cons T.head
#align turing.tape.left₀ Turing.Tape.left₀
/-- The "inclusive" right side of the tape, including both `right` and `head`. -/
def Tape.right₀ {Γ} [Inhabited Γ] (T : Tape Γ) : ListBlank Γ :=
T.right.cons T.head
#align turing.tape.right₀ Turing.Tape.right₀
/-- Move the tape in response to a motion of the Turing machine. Note that `T.move Dir.left` makes
`T.left` smaller; the Turing machine is moving left and the tape is moving right. -/
def Tape.move {Γ} [Inhabited Γ] : Dir → Tape Γ → Tape Γ
| Dir.left, ⟨a, L, R⟩ => ⟨L.head, L.tail, R.cons a⟩
| Dir.right, ⟨a, L, R⟩ => ⟨R.head, L.cons a, R.tail⟩
#align turing.tape.move Turing.Tape.move
@[simp]
theorem Tape.move_left_right {Γ} [Inhabited Γ] (T : Tape Γ) :
(T.move Dir.left).move Dir.right = T := by
cases T; simp [Tape.move]
#align turing.tape.move_left_right Turing.Tape.move_left_right
@[simp]
theorem Tape.move_right_left {Γ} [Inhabited Γ] (T : Tape Γ) :
(T.move Dir.right).move Dir.left = T := by
cases T; simp [Tape.move]
#align turing.tape.move_right_left Turing.Tape.move_right_left
/-- Construct a tape from a left side and an inclusive right side. -/
def Tape.mk' {Γ} [Inhabited Γ] (L R : ListBlank Γ) : Tape Γ :=
⟨R.head, L, R.tail⟩
#align turing.tape.mk' Turing.Tape.mk'
@[simp]
theorem Tape.mk'_left {Γ} [Inhabited Γ] (L R : ListBlank Γ) : (Tape.mk' L R).left = L :=
rfl
#align turing.tape.mk'_left Turing.Tape.mk'_left
@[simp]
theorem Tape.mk'_head {Γ} [Inhabited Γ] (L R : ListBlank Γ) : (Tape.mk' L R).head = R.head :=
rfl
#align turing.tape.mk'_head Turing.Tape.mk'_head
@[simp]
theorem Tape.mk'_right {Γ} [Inhabited Γ] (L R : ListBlank Γ) : (Tape.mk' L R).right = R.tail :=
rfl
#align turing.tape.mk'_right Turing.Tape.mk'_right
@[simp]
theorem Tape.mk'_right₀ {Γ} [Inhabited Γ] (L R : ListBlank Γ) : (Tape.mk' L R).right₀ = R :=
ListBlank.cons_head_tail _
#align turing.tape.mk'_right₀ Turing.Tape.mk'_right₀
@[simp]
theorem Tape.mk'_left_right₀ {Γ} [Inhabited Γ] (T : Tape Γ) : Tape.mk' T.left T.right₀ = T := by
cases T
simp only [Tape.right₀, Tape.mk', ListBlank.head_cons, ListBlank.tail_cons, eq_self_iff_true,
and_self_iff]
#align turing.tape.mk'_left_right₀ Turing.Tape.mk'_left_right₀
theorem Tape.exists_mk' {Γ} [Inhabited Γ] (T : Tape Γ) : ∃ L R, T = Tape.mk' L R :=
⟨_, _, (Tape.mk'_left_right₀ _).symm⟩
#align turing.tape.exists_mk' Turing.Tape.exists_mk'
@[simp]
theorem Tape.move_left_mk' {Γ} [Inhabited Γ] (L R : ListBlank Γ) :
(Tape.mk' L R).move Dir.left = Tape.mk' L.tail (R.cons L.head) := by
simp only [Tape.move, Tape.mk', ListBlank.head_cons, eq_self_iff_true, ListBlank.cons_head_tail,
and_self_iff, ListBlank.tail_cons]
#align turing.tape.move_left_mk' Turing.Tape.move_left_mk'
@[simp]
theorem Tape.move_right_mk' {Γ} [Inhabited Γ] (L R : ListBlank Γ) :
(Tape.mk' L R).move Dir.right = Tape.mk' (L.cons R.head) R.tail := by
simp only [Tape.move, Tape.mk', ListBlank.head_cons, eq_self_iff_true, ListBlank.cons_head_tail,
and_self_iff, ListBlank.tail_cons]
#align turing.tape.move_right_mk' Turing.Tape.move_right_mk'
/-- Construct a tape from a left side and an inclusive right side. -/
def Tape.mk₂ {Γ} [Inhabited Γ] (L R : List Γ) : Tape Γ :=
Tape.mk' (ListBlank.mk L) (ListBlank.mk R)
#align turing.tape.mk₂ Turing.Tape.mk₂
/-- Construct a tape from a list, with the head of the list at the TM head and the rest going
to the right. -/
def Tape.mk₁ {Γ} [Inhabited Γ] (l : List Γ) : Tape Γ :=
Tape.mk₂ [] l
#align turing.tape.mk₁ Turing.Tape.mk₁
/-- The `nth` function of a tape is integer-valued, with index `0` being the head, negative indexes
on the left and positive indexes on the right. (Picture a number line.) -/
def Tape.nth {Γ} [Inhabited Γ] (T : Tape Γ) : ℤ → Γ
| 0 => T.head
| (n + 1 : ℕ) => T.right.nth n
| -(n + 1 : ℕ) => T.left.nth n
#align turing.tape.nth Turing.Tape.nth
@[simp]
theorem Tape.nth_zero {Γ} [Inhabited Γ] (T : Tape Γ) : T.nth 0 = T.1 :=
rfl
#align turing.tape.nth_zero Turing.Tape.nth_zero
theorem Tape.right₀_nth {Γ} [Inhabited Γ] (T : Tape Γ) (n : ℕ) : T.right₀.nth n = T.nth n := by
cases n <;> simp only [Tape.nth, Tape.right₀, Int.ofNat_zero, ListBlank.nth_zero,
ListBlank.nth_succ, ListBlank.head_cons, ListBlank.tail_cons, Nat.zero_eq]
#align turing.tape.right₀_nth Turing.Tape.right₀_nth
@[simp]
theorem Tape.mk'_nth_nat {Γ} [Inhabited Γ] (L R : ListBlank Γ) (n : ℕ) :
(Tape.mk' L R).nth n = R.nth n := by
rw [← Tape.right₀_nth, Tape.mk'_right₀]
#align turing.tape.mk'_nth_nat Turing.Tape.mk'_nth_nat
@[simp]
theorem Tape.move_left_nth {Γ} [Inhabited Γ] :
∀ (T : Tape Γ) (i : ℤ), (T.move Dir.left).nth i = T.nth (i - 1)
| ⟨_, L, _⟩, -(n + 1 : ℕ) => (ListBlank.nth_succ _ _).symm
| ⟨_, L, _⟩, 0 => (ListBlank.nth_zero _).symm
| ⟨a, L, R⟩, 1 => (ListBlank.nth_zero _).trans (ListBlank.head_cons _ _)
| ⟨a, L, R⟩, (n + 1 : ℕ) + 1 => by
rw [add_sub_cancel]
change (R.cons a).nth (n + 1) = R.nth n
rw [ListBlank.nth_succ, ListBlank.tail_cons]
#align turing.tape.move_left_nth Turing.Tape.move_left_nth
@[simp]
theorem Tape.move_right_nth {Γ} [Inhabited Γ] (T : Tape Γ) (i : ℤ) :
(T.move Dir.right).nth i = T.nth (i + 1) := by
conv => rhs; rw [← T.move_right_left]
rw [Tape.move_left_nth, add_sub_cancel]
#align turing.tape.move_right_nth Turing.Tape.move_right_nth
@[simp]
theorem Tape.move_right_n_head {Γ} [Inhabited Γ] (T : Tape Γ) (i : ℕ) :
((Tape.move Dir.right^[i]) T).head = T.nth i := by
induction i generalizing T
· rfl
· simp only [*, Tape.move_right_nth, Int.ofNat_succ, iterate_succ, Function.comp_apply]
#align turing.tape.move_right_n_head Turing.Tape.move_right_n_head
/-- Replace the current value of the head on the tape. -/
def Tape.write {Γ} [Inhabited Γ] (b : Γ) (T : Tape Γ) : Tape Γ :=
{ T with head := b }
#align turing.tape.write Turing.Tape.write
@[simp]
theorem Tape.write_self {Γ} [Inhabited Γ] : ∀ T : Tape Γ, T.write T.1 = T := by
rintro ⟨⟩; rfl
#align turing.tape.write_self Turing.Tape.write_self
@[simp]
theorem Tape.write_nth {Γ} [Inhabited Γ] (b : Γ) :
∀ (T : Tape Γ) {i : ℤ}, (T.write b).nth i = if i = 0 then b else T.nth i
| _, 0 => rfl
| _, (_ + 1 : ℕ) => rfl
| _, -(_ + 1 : ℕ) => rfl
#align turing.tape.write_nth Turing.Tape.write_nth
@[simp]
theorem Tape.write_mk' {Γ} [Inhabited Γ] (a b : Γ) (L R : ListBlank Γ) :
(Tape.mk' L (R.cons a)).write b = Tape.mk' L (R.cons b) := by
simp only [Tape.write, Tape.mk', ListBlank.head_cons, ListBlank.tail_cons, eq_self_iff_true,
and_self_iff]
#align turing.tape.write_mk' Turing.Tape.write_mk'
/-- Apply a pointed map to a tape to change the alphabet. -/
def Tape.map {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ') (T : Tape Γ) : Tape Γ' :=
⟨f T.1, T.2.map f, T.3.map f⟩
#align turing.tape.map Turing.Tape.map
@[simp]
theorem Tape.map_fst {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ') :
∀ T : Tape Γ, (T.map f).1 = f T.1 := by
rintro ⟨⟩; rfl
#align turing.tape.map_fst Turing.Tape.map_fst
@[simp]
theorem Tape.map_write {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ') (b : Γ) :
∀ T : Tape Γ, (T.write b).map f = (T.map f).write (f b) := by
rintro ⟨⟩; rfl
#align turing.tape.map_write Turing.Tape.map_write
-- Porting note: `simpNF` complains about LHS does not simplify when using the simp lemma on
-- itself, but it does indeed.
@[simp, nolint simpNF]
theorem Tape.write_move_right_n {Γ} [Inhabited Γ] (f : Γ → Γ) (L R : ListBlank Γ) (n : ℕ) :
((Tape.move Dir.right^[n]) (Tape.mk' L R)).write (f (R.nth n)) =
(Tape.move Dir.right^[n]) (Tape.mk' L (R.modifyNth f n)) := by
induction' n with n IH generalizing L R
· simp only [ListBlank.nth_zero, ListBlank.modifyNth, iterate_zero_apply, Nat.zero_eq]
rw [← Tape.write_mk', ListBlank.cons_head_tail]
simp only [ListBlank.head_cons, ListBlank.nth_succ, ListBlank.modifyNth, Tape.move_right_mk',
ListBlank.tail_cons, iterate_succ_apply, IH]
#align turing.tape.write_move_right_n Turing.Tape.write_move_right_n
theorem Tape.map_move {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ') (T : Tape Γ) (d) :
(T.move d).map f = (T.map f).move d := by
cases T
cases d <;> simp only [Tape.move, Tape.map, ListBlank.head_map, eq_self_iff_true,
ListBlank.map_cons, and_self_iff, ListBlank.tail_map]
#align turing.tape.map_move Turing.Tape.map_move
theorem Tape.map_mk' {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ') (L R : ListBlank Γ) :
(Tape.mk' L R).map f = Tape.mk' (L.map f) (R.map f) := by
simp only [Tape.mk', Tape.map, ListBlank.head_map, eq_self_iff_true, and_self_iff,
ListBlank.tail_map]
#align turing.tape.map_mk' Turing.Tape.map_mk'
theorem Tape.map_mk₂ {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ') (L R : List Γ) :
(Tape.mk₂ L R).map f = Tape.mk₂ (L.map f) (R.map f) := by
simp only [Tape.mk₂, Tape.map_mk', ListBlank.map_mk]
#align turing.tape.map_mk₂ Turing.Tape.map_mk₂
theorem Tape.map_mk₁ {Γ Γ'} [Inhabited Γ] [Inhabited Γ'] (f : PointedMap Γ Γ') (l : List Γ) :
(Tape.mk₁ l).map f = Tape.mk₁ (l.map f) :=
Tape.map_mk₂ _ _ _
#align turing.tape.map_mk₁ Turing.Tape.map_mk₁
/-- Run a state transition function `σ → Option σ` "to completion". The return value is the last
state returned before a `none` result. If the state transition function always returns `some`,
then the computation diverges, returning `Part.none`. -/
-- Porting note: Added noncomputable, because `PFun.fix` is noncomputable.
noncomputable def eval {σ} (f : σ → Option σ) : σ → Part σ :=
PFun.fix fun s ↦ Part.some <| (f s).elim (Sum.inl s) Sum.inr
#align turing.eval Turing.eval
/-- The reflexive transitive closure of a state transition function. `Reaches f a b` means
there is a finite sequence of steps `f a = some a₁`, `f a₁ = some a₂`, ... such that `aₙ = b`.
This relation permits zero steps of the state transition function. -/
def Reaches {σ} (f : σ → Option σ) : σ → σ → Prop :=
ReflTransGen fun a b ↦ b ∈ f a
#align turing.reaches Turing.Reaches
/-- The transitive closure of a state transition function. `Reaches₁ f a b` means there is a
nonempty finite sequence of steps `f a = some a₁`, `f a₁ = some a₂`, ... such that `aₙ = b`.
This relation does not permit zero steps of the state transition function. -/
def Reaches₁ {σ} (f : σ → Option σ) : σ → σ → Prop :=
TransGen fun a b ↦ b ∈ f a
#align turing.reaches₁ Turing.Reaches₁
theorem reaches₁_eq {σ} {f : σ → Option σ} {a b c} (h : f a = f b) :
Reaches₁ f a c ↔ Reaches₁ f b c :=
TransGen.head'_iff.trans (TransGen.head'_iff.trans <| by rw [h]).symm
#align turing.reaches₁_eq Turing.reaches₁_eq
theorem reaches_total {σ} {f : σ → Option σ} {a b c} (hab : Reaches f a b) (hac : Reaches f a c) :
Reaches f b c ∨ Reaches f c b :=
ReflTransGen.total_of_right_unique (fun _ _ _ ↦ Option.mem_unique) hab hac
#align turing.reaches_total Turing.reaches_total
theorem reaches₁_fwd {σ} {f : σ → Option σ} {a b c} (h₁ : Reaches₁ f a c) (h₂ : b ∈ f a) :
Reaches f b c := by
rcases TransGen.head'_iff.1 h₁ with ⟨b', hab, hbc⟩
cases Option.mem_unique hab h₂; exact hbc
#align turing.reaches₁_fwd Turing.reaches₁_fwd
/-- A variation on `Reaches`. `Reaches₀ f a b` holds if whenever `Reaches₁ f b c` then
`Reaches₁ f a c`. This is a weaker property than `Reaches` and is useful for replacing states with
equivalent states without taking a step. -/
def Reaches₀ {σ} (f : σ → Option σ) (a b : σ) : Prop :=
∀ c, Reaches₁ f b c → Reaches₁ f a c
#align turing.reaches₀ Turing.Reaches₀
theorem Reaches₀.trans {σ} {f : σ → Option σ} {a b c : σ} (h₁ : Reaches₀ f a b)
(h₂ : Reaches₀ f b c) : Reaches₀ f a c
| _, h₃ => h₁ _ (h₂ _ h₃)
#align turing.reaches₀.trans Turing.Reaches₀.trans
@[refl]
theorem Reaches₀.refl {σ} {f : σ → Option σ} (a : σ) : Reaches₀ f a a
| _, h => h
#align turing.reaches₀.refl Turing.Reaches₀.refl
theorem Reaches₀.single {σ} {f : σ → Option σ} {a b : σ} (h : b ∈ f a) : Reaches₀ f a b
| _, h₂ => h₂.head h
#align turing.reaches₀.single Turing.Reaches₀.single
theorem Reaches₀.head {σ} {f : σ → Option σ} {a b c : σ} (h : b ∈ f a) (h₂ : Reaches₀ f b c) :
Reaches₀ f a c :=
(Reaches₀.single h).trans h₂
#align turing.reaches₀.head Turing.Reaches₀.head
theorem Reaches₀.tail {σ} {f : σ → Option σ} {a b c : σ} (h₁ : Reaches₀ f a b) (h : c ∈ f b) :
Reaches₀ f a c :=
h₁.trans (Reaches₀.single h)
#align turing.reaches₀.tail Turing.Reaches₀.tail
theorem reaches₀_eq {σ} {f : σ → Option σ} {a b} (e : f a = f b) : Reaches₀ f a b
| _, h => (reaches₁_eq e).2 h
#align turing.reaches₀_eq Turing.reaches₀_eq
theorem Reaches₁.to₀ {σ} {f : σ → Option σ} {a b : σ} (h : Reaches₁ f a b) : Reaches₀ f a b
| _, h₂ => h.trans h₂
#align turing.reaches₁.to₀ Turing.Reaches₁.to₀
theorem Reaches.to₀ {σ} {f : σ → Option σ} {a b : σ} (h : Reaches f a b) : Reaches₀ f a b
| _, h₂ => h₂.trans_right h
#align turing.reaches.to₀ Turing.Reaches.to₀
theorem Reaches₀.tail' {σ} {f : σ → Option σ} {a b c : σ} (h : Reaches₀ f a b) (h₂ : c ∈ f b) :
Reaches₁ f a c :=
h _ (TransGen.single h₂)
#align turing.reaches₀.tail' Turing.Reaches₀.tail'
/-- (co-)Induction principle for `eval`. If a property `C` holds of any point `a` evaluating to `b`
which is either terminal (meaning `a = b`) or where the next point also satisfies `C`, then it
holds of any point where `eval f a` evaluates to `b`. This formalizes the notion that if
`eval f a` evaluates to `b` then it reaches terminal state `b` in finitely many steps. -/
-- Porting note: Added noncomputable
@[elab_as_elim]
noncomputable def evalInduction {σ} {f : σ → Option σ} {b : σ} {C : σ → Sort _} {a : σ}
(h : b ∈ eval f a) (H : ∀ a, b ∈ eval f a → (∀ a', f a = some a' → C a') → C a) : C a :=
PFun.fixInduction h fun a' ha' h' ↦
H _ ha' fun b' e ↦ h' _ <| Part.mem_some_iff.2 <| by rw [e]; rfl
#align turing.eval_induction Turing.evalInduction
theorem mem_eval {σ} {f : σ → Option σ} {a b} : b ∈ eval f a ↔ Reaches f a b ∧ f b = none := by
refine' ⟨fun h ↦ _, fun ⟨h₁, h₂⟩ ↦ _⟩
· -- Porting note: Explicitly specify `c`.
refine' @evalInduction _ _ _ (fun a ↦ Reaches f a b ∧ f b = none) _ h fun a h IH ↦ _
cases' e : f a with a'
· rw [Part.mem_unique h
(PFun.mem_fix_iff.2 <| Or.inl <| Part.mem_some_iff.2 <| by rw [e] <;> rfl)]
exact ⟨ReflTransGen.refl, e⟩
· rcases PFun.mem_fix_iff.1 h with (h | ⟨_, h, _⟩) <;> rw [e] at h <;>
cases Part.mem_some_iff.1 h
cases' IH a' e with h₁ h₂
exact ⟨ReflTransGen.head e h₁, h₂⟩
· refine' ReflTransGen.head_induction_on h₁ _ fun h _ IH ↦ _
· refine' PFun.mem_fix_iff.2 (Or.inl _)
rw [h₂]
apply Part.mem_some
· refine' PFun.mem_fix_iff.2 (Or.inr ⟨_, _, IH⟩)
rw [h]
apply Part.mem_some
#align turing.mem_eval Turing.mem_eval
theorem eval_maximal₁ {σ} {f : σ → Option σ} {a b} (h : b ∈ eval f a) (c) : ¬Reaches₁ f b c
| bc => by
let ⟨_, b0⟩ := mem_eval.1 h
let ⟨b', h', _⟩ := TransGen.head'_iff.1 bc
cases b0.symm.trans h'
#align turing.eval_maximal₁ Turing.eval_maximal₁
theorem eval_maximal {σ} {f : σ → Option σ} {a b} (h : b ∈ eval f a) {c} : Reaches f b c ↔ c = b :=
let ⟨_, b0⟩ := mem_eval.1 h
reflTransGen_iff_eq fun b' h' ↦ by cases b0.symm.trans h'
#align turing.eval_maximal Turing.eval_maximal
theorem reaches_eval {σ} {f : σ → Option σ} {a b} (ab : Reaches f a b) : eval f a = eval f b := by
refine' Part.ext fun _ ↦ ⟨fun h ↦ _, fun h ↦ _⟩
· have ⟨ac, c0⟩ := mem_eval.1 h
exact mem_eval.2 ⟨(or_iff_left_of_imp fun cb ↦ (eval_maximal h).1 cb ▸ ReflTransGen.refl).1
(reaches_total ab ac), c0⟩
· have ⟨bc, c0⟩ := mem_eval.1 h
exact mem_eval.2 ⟨ab.trans bc, c0⟩
#align turing.reaches_eval Turing.reaches_eval
/-- Given a relation `tr : σ₁ → σ₂ → Prop` between state spaces, and state transition functions
`f₁ : σ₁ → Option σ₁` and `f₂ : σ₂ → Option σ₂`, `Respects f₁ f₂ tr` means that if `tr a₁ a₂` holds
initially and `f₁` takes a step to `a₂` then `f₂` will take one or more steps before reaching a
state `b₂` satisfying `tr a₂ b₂`, and if `f₁ a₁` terminates then `f₂ a₂` also terminates.
Such a relation `tr` is also known as a refinement. -/
def Respects {σ₁ σ₂} (f₁ : σ₁ → Option σ₁) (f₂ : σ₂ → Option σ₂) (tr : σ₁ → σ₂ → Prop) :=
∀ ⦃a₁ a₂⦄, tr a₁ a₂ → (match f₁ a₁ with
| some b₁ => ∃ b₂, tr b₁ b₂ ∧ Reaches₁ f₂ a₂ b₂
| none => f₂ a₂ = none : Prop)
#align turing.respects Turing.Respects
theorem tr_reaches₁ {σ₁ σ₂ f₁ f₂} {tr : σ₁ → σ₂ → Prop} (H : Respects f₁ f₂ tr) {a₁ a₂}
(aa : tr a₁ a₂) {b₁} (ab : Reaches₁ f₁ a₁ b₁) : ∃ b₂, tr b₁ b₂ ∧ Reaches₁ f₂ a₂ b₂ := by
induction' ab with c₁ ac c₁ d₁ _ cd IH
· have := H aa
rwa [show f₁ a₁ = _ from ac] at this
· rcases IH with ⟨c₂, cc, ac₂⟩
have := H cc
rw [show f₁ c₁ = _ from cd] at this
rcases this with ⟨d₂, dd, cd₂⟩
exact ⟨_, dd, ac₂.trans cd₂⟩
#align turing.tr_reaches₁ Turing.tr_reaches₁
theorem tr_reaches {σ₁ σ₂ f₁ f₂} {tr : σ₁ → σ₂ → Prop} (H : Respects f₁ f₂ tr) {a₁ a₂}
(aa : tr a₁ a₂) {b₁} (ab : Reaches f₁ a₁ b₁) : ∃ b₂, tr b₁ b₂ ∧ Reaches f₂ a₂ b₂ := by
rcases reflTransGen_iff_eq_or_transGen.1 ab with (rfl | ab)
· exact ⟨_, aa, ReflTransGen.refl⟩
· have ⟨b₂, bb, h⟩ := tr_reaches₁ H aa ab
exact ⟨b₂, bb, h.to_reflTransGen⟩
#align turing.tr_reaches Turing.tr_reaches
theorem tr_reaches_rev {σ₁ σ₂ f₁ f₂} {tr : σ₁ → σ₂ → Prop} (H : Respects f₁ f₂ tr) {a₁ a₂}
(aa : tr a₁ a₂) {b₂} (ab : Reaches f₂ a₂ b₂) :
∃ c₁ c₂, Reaches f₂ b₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ := by
induction' ab with c₂ d₂ _ cd IH
· exact ⟨_, _, ReflTransGen.refl, aa, ReflTransGen.refl⟩
· rcases IH with ⟨e₁, e₂, ce, ee, ae⟩
rcases ReflTransGen.cases_head ce with (rfl | ⟨d', cd', de⟩)
· have := H ee
revert this
cases' eg : f₁ e₁ with g₁ <;> simp only [Respects, and_imp, exists_imp]
· intro c0
cases cd.symm.trans c0
· intro g₂ gg cg
rcases TransGen.head'_iff.1 cg with ⟨d', cd', dg⟩
cases Option.mem_unique cd cd'
exact ⟨_, _, dg, gg, ae.tail eg⟩
· cases Option.mem_unique cd cd'
exact ⟨_, _, de, ee, ae⟩
#align turing.tr_reaches_rev Turing.tr_reaches_rev
theorem tr_eval {σ₁ σ₂ f₁ f₂} {tr : σ₁ → σ₂ → Prop} (H : Respects f₁ f₂ tr) {a₁ b₁ a₂}
(aa : tr a₁ a₂) (ab : b₁ ∈ eval f₁ a₁) : ∃ b₂, tr b₁ b₂ ∧ b₂ ∈ eval f₂ a₂ := by
cases' mem_eval.1 ab with ab b0
rcases tr_reaches H aa ab with ⟨b₂, bb, ab⟩
refine' ⟨_, bb, mem_eval.2 ⟨ab, _⟩⟩
have := H bb; rwa [b0] at this
#align turing.tr_eval Turing.tr_eval
theorem tr_eval_rev {σ₁ σ₂ f₁ f₂} {tr : σ₁ → σ₂ → Prop} (H : Respects f₁ f₂ tr) {a₁ b₂ a₂}
(aa : tr a₁ a₂) (ab : b₂ ∈ eval f₂ a₂) : ∃ b₁, tr b₁ b₂ ∧ b₁ ∈ eval f₁ a₁ := by
cases' mem_eval.1 ab with ab b0
rcases tr_reaches_rev H aa ab with ⟨c₁, c₂, bc, cc, ac⟩
cases (reflTransGen_iff_eq (Option.eq_none_iff_forall_not_mem.1 b0)).1 bc
refine' ⟨_, cc, mem_eval.2 ⟨ac, _⟩⟩
have := H cc
cases' hfc : f₁ c₁ with d₁
· rfl
rw [hfc] at this
rcases this with ⟨d₂, _, bd⟩
rcases TransGen.head'_iff.1 bd with ⟨e, h, _⟩
cases b0.symm.trans h
#align turing.tr_eval_rev Turing.tr_eval_rev
theorem tr_eval_dom {σ₁ σ₂ f₁ f₂} {tr : σ₁ → σ₂ → Prop} (H : Respects f₁ f₂ tr) {a₁ a₂}
(aa : tr a₁ a₂) : (eval f₂ a₂).Dom ↔ (eval f₁ a₁).Dom :=
⟨fun h ↦
let ⟨_, _, h, _⟩ := tr_eval_rev H aa ⟨h, rfl⟩
h,
fun h ↦
let ⟨_, _, h, _⟩ := tr_eval H aa ⟨h, rfl⟩
h⟩
#align turing.tr_eval_dom Turing.tr_eval_dom
/-- A simpler version of `Respects` when the state transition relation `tr` is a function. -/
def FRespects {σ₁ σ₂} (f₂ : σ₂ → Option σ₂) (tr : σ₁ → σ₂) (a₂ : σ₂) : Option σ₁ → Prop
| some b₁ => Reaches₁ f₂ a₂ (tr b₁)
| none => f₂ a₂ = none
#align turing.frespects Turing.FRespects
theorem frespects_eq {σ₁ σ₂} {f₂ : σ₂ → Option σ₂} {tr : σ₁ → σ₂} {a₂ b₂} (h : f₂ a₂ = f₂ b₂) :
∀ {b₁}, FRespects f₂ tr a₂ b₁ ↔ FRespects f₂ tr b₂ b₁
| some b₁ => reaches₁_eq h
| none => by unfold FRespects; rw [h]
#align turing.frespects_eq Turing.frespects_eq
theorem fun_respects {σ₁ σ₂ f₁ f₂} {tr : σ₁ → σ₂} :
(Respects f₁ f₂ fun a b ↦ tr a = b) ↔ ∀ ⦃a₁⦄, FRespects f₂ tr (tr a₁) (f₁ a₁) :=
forall_congr' fun a₁ ↦ by
cases f₁ a₁ <;> simp only [FRespects, Respects, exists_eq_left', forall_eq']
#align turing.fun_respects Turing.fun_respects
theorem tr_eval' {σ₁ σ₂} (f₁ : σ₁ → Option σ₁) (f₂ : σ₂ → Option σ₂) (tr : σ₁ → σ₂)
(H : Respects f₁ f₂ fun a b ↦ tr a = b) (a₁) : eval f₂ (tr a₁) = tr <$> eval f₁ a₁ :=
Part.ext fun b₂ ↦
⟨fun h ↦
let ⟨b₁, bb, hb⟩ := tr_eval_rev H rfl h
(Part.mem_map_iff _).2 ⟨b₁, hb, bb⟩,
fun h ↦ by
rcases(Part.mem_map_iff _).1 h with ⟨b₁, ab, bb⟩
rcases tr_eval H rfl ab with ⟨_, rfl, h⟩
rwa [bb] at h⟩
#align turing.tr_eval' Turing.tr_eval'
/-!
## The TM0 model
A TM0 turing machine is essentially a Post-Turing machine, adapted for type theory.
A Post-Turing machine with symbol type `Γ` and label type `Λ` is a function
`Λ → Γ → Option (Λ × Stmt)`, where a `Stmt` can be either `move left`, `move right` or `write a`
for `a : Γ`. The machine works over a "tape", a doubly-infinite sequence of elements of `Γ`, and
an instantaneous configuration, `Cfg`, is a label `q : Λ` indicating the current internal state of
the machine, and a `Tape Γ` (which is essentially `ℤ →₀ Γ`). The evolution is described by the
`step` function:
* If `M q T.head = none`, then the machine halts.
* If `M q T.head = some (q', s)`, then the machine performs action `s : Stmt` and then transitions
to state `q'`.
The initial state takes a `List Γ` and produces a `Tape Γ` where the head of the list is the head
of the tape and the rest of the list extends to the right, with the left side all blank. The final
state takes the entire right side of the tape right or equal to the current position of the
machine. (This is actually a `ListBlank Γ`, not a `List Γ`, because we don't know, at this level
of generality, where the output ends. If equality to `default : Γ` is decidable we can trim the list
to remove the infinite tail of blanks.)
-/
namespace TM0
-- "TM0"
set_option linter.uppercaseLean3 false
section
variable (Γ : Type _) [Inhabited Γ]
-- type of tape symbols
variable (Λ : Type _) [Inhabited Λ]
-- type of "labels" or TM states
/-- A Turing machine "statement" is just a command to either move
left or right, or write a symbol on the tape. -/
inductive Stmt
| move : Dir → Stmt
| write : Γ → Stmt
#align turing.TM0.stmt Turing.TM0.Stmt
local notation "Stmt₀" => Stmt Γ -- Porting note: Added this to clean up types.
instance Stmt.inhabited : Inhabited Stmt₀ :=
⟨Stmt.write default⟩
#align turing.TM0.stmt.inhabited Turing.TM0.Stmt.inhabited
/-- A Post-Turing machine with symbol type `Γ` and label type `Λ`
is a function which, given the current state `q : Λ` and
the tape head `a : Γ`, either halts (returns `none`) or returns
a new state `q' : Λ` and a `Stmt` describing what to do,
either a move left or right, or a write command.
Both `Λ` and `Γ` are required to be inhabited; the default value
for `Γ` is the "blank" tape value, and the default value of `Λ` is
the initial state. -/
@[nolint unusedArguments] -- this is a deliberate addition, see comment
def Machine [Inhabited Λ] :=
Λ → Γ → Option (Λ × Stmt₀)
#align turing.TM0.machine Turing.TM0.Machine
local notation "Machine₀" => Machine Γ Λ -- Porting note: Added this to clean up types.
instance Machine.inhabited : Inhabited Machine₀ := by
unfold Machine; infer_instance
#align turing.TM0.machine.inhabited Turing.TM0.Machine.inhabited
/-- The configuration state of a Turing machine during operation
consists of a label (machine state), and a tape, represented in
the form `(a, L, R)` meaning the tape looks like `L.rev ++ [a] ++ R`
with the machine currently reading the `a`. The lists are
automatically extended with blanks as the machine moves around. -/
structure Cfg where
q : Λ
Tape : Tape Γ
#align turing.TM0.cfg Turing.TM0.Cfg
local notation "Cfg₀" => Cfg Γ Λ -- Porting note: Added this to clean up types.
instance Cfg.inhabited : Inhabited Cfg₀ :=
⟨⟨default, default⟩⟩
#align turing.TM0.cfg.inhabited Turing.TM0.Cfg.inhabited
variable {Γ Λ}
/-- Execution semantics of the Turing machine. -/
def step (M : Machine₀) : Cfg₀ → Option Cfg₀ :=
fun ⟨q, T⟩ ↦ (M q T.1).map fun ⟨q', a⟩ ↦ ⟨q', match a with
| Stmt.move d => T.move d
| Stmt.write a => T.write a⟩
#align turing.TM0.step Turing.TM0.step
/-- The statement `Reaches M s₁ s₂` means that `s₂` is obtained
starting from `s₁` after a finite number of steps from `s₂`. -/
def Reaches (M : Machine₀) : Cfg₀ → Cfg₀ → Prop :=
ReflTransGen fun a b ↦ b ∈ step M a
#align turing.TM0.reaches Turing.TM0.Reaches
/-- The initial configuration. -/
def init (l : List Γ) : Cfg₀ :=
⟨default, Tape.mk₁ l⟩
#align turing.TM0.init Turing.TM0.init
/-- Evaluate a Turing machine on initial input to a final state,
if it terminates. -/
-- Porting note: Added noncomputable
noncomputable def eval (M : Machine₀) (l : List Γ) : Part (ListBlank Γ) :=
(Turing.eval (step M) (init l)).map fun c ↦ c.Tape.right₀
#align turing.TM0.eval Turing.TM0.eval
/-- The raw definition of a Turing machine does not require that
`Γ` and `Λ` are finite, and in practice we will be interested
in the infinite `Λ` case. We recover instead a notion of
"effectively finite" Turing machines, which only make use of a
finite subset of their states. We say that a set `S ⊆ Λ`
supports a Turing machine `M` if `S` is closed under the
transition function and contains the initial state. -/
def Supports (M : Machine₀) (S : Set Λ) :=
default ∈ S ∧ ∀ {q a q' s}, (q', s) ∈ M q a → q ∈ S → q' ∈ S
#align turing.TM0.supports Turing.TM0.Supports
theorem step_supports (M : Machine₀) {S : Set Λ} (ss : Supports M S) :
∀ {c c' : Cfg₀}, c' ∈ step M c → c.q ∈ S → c'.q ∈ S := by
intro ⟨q, T⟩ c' h₁ h₂
rcases Option.map_eq_some'.1 h₁ with ⟨⟨q', a⟩, h, rfl⟩
exact ss.2 h h₂
#align turing.TM0.step_supports Turing.TM0.step_supports
theorem univ_supports (M : Machine₀) : Supports M Set.univ := by
constructor <;> intros <;> apply Set.mem_univ
#align turing.TM0.univ_supports Turing.TM0.univ_supports
end
section
variable {Γ : Type _} [Inhabited Γ]
variable {Γ' : Type _} [Inhabited Γ']
variable {Λ : Type _} [Inhabited Λ]
variable {Λ' : Type _} [Inhabited Λ']
/-- Map a TM statement across a function. This does nothing to move statements and maps the write
values. -/
def Stmt.map (f : PointedMap Γ Γ') : Stmt Γ → Stmt Γ'
| Stmt.move d => Stmt.move d
| Stmt.write a => Stmt.write (f a)
#align turing.TM0.stmt.map Turing.TM0.Stmt.map
/-- Map a configuration across a function, given `f : Γ → Γ'` a map of the alphabets and
`g : Λ → Λ'` a map of the machine states. -/
def Cfg.map (f : PointedMap Γ Γ') (g : Λ → Λ') : Cfg Γ Λ → Cfg Γ' Λ'
| ⟨q, T⟩ => ⟨g q, T.map f⟩
#align turing.TM0.cfg.map Turing.TM0.Cfg.map
variable (M : Machine Γ Λ) (f₁ : PointedMap Γ Γ') (f₂ : PointedMap Γ' Γ) (g₁ : Λ → Λ') (g₂ : Λ' → Λ)
/-- Because the state transition function uses the alphabet and machine states in both the input
and output, to map a machine from one alphabet and machine state space to another we need functions
in both directions, essentially an `Equiv` without the laws. -/
def Machine.map : Machine Γ' Λ'
| q, l => (M (g₂ q) (f₂ l)).map (Prod.map g₁ (Stmt.map f₁))
#align turing.TM0.machine.map Turing.TM0.Machine.map
theorem Machine.map_step {S : Set Λ} (f₂₁ : Function.RightInverse f₁ f₂)
(g₂₁ : ∀ q ∈ S, g₂ (g₁ q) = q) :
∀ c : Cfg Γ Λ,
c.q ∈ S → (step M c).map (Cfg.map f₁ g₁) = step (M.map f₁ f₂ g₁ g₂) (Cfg.map f₁ g₁ c)
| ⟨q, T⟩, h => by
unfold step Machine.map Cfg.map
simp only [Turing.Tape.map_fst, g₂₁ q h, f₂₁ _]
rcases M q T.1 with (_ | ⟨q', d | a⟩); · rfl
· simp only [step, Cfg.map, Option.map_some', Tape.map_move f₁]
rfl
· simp only [step, Cfg.map, Option.map_some', Tape.map_write]
rfl
#align turing.TM0.machine.map_step Turing.TM0.Machine.map_step
theorem map_init (g₁ : PointedMap Λ Λ') (l : List Γ) : (init l).map f₁ g₁ = init (l.map f₁) :=
congr (congr_arg Cfg.mk g₁.map_pt) (Tape.map_mk₁ _ _)
#align turing.TM0.map_init Turing.TM0.map_init
theorem Machine.map_respects (g₁ : PointedMap Λ Λ') (g₂ : Λ' → Λ) {S} (ss : Supports M S)
(f₂₁ : Function.RightInverse f₁ f₂) (g₂₁ : ∀ q ∈ S, g₂ (g₁ q) = q) :
Respects (step M) (step (M.map f₁ f₂ g₁ g₂)) fun a b ↦ a.q ∈ S ∧ Cfg.map f₁ g₁ a = b := by
intro c _ ⟨cs, rfl⟩
cases e : step M c
· rw [← M.map_step f₁ f₂ g₁ g₂ f₂₁ g₂₁ _ cs, e]
rfl
· refine' ⟨_, ⟨step_supports M ss e cs, rfl⟩, TransGen.single _⟩
rw [← M.map_step f₁ f₂ g₁ g₂ f₂₁ g₂₁ _ cs, e]
rfl
#align turing.TM0.machine.map_respects Turing.TM0.Machine.map_respects
end
end TM0
/-!
## The TM1 model
The TM1 model is a simplification and extension of TM0 (Post-Turing model) in the direction of
Wang B-machines. The machine's internal state is extended with a (finite) store `σ` of variables
that may be accessed and updated at any time.
A machine is given by a `Λ` indexed set of procedures or functions. Each function has a body which
is a `Stmt`. Most of the regular commands are allowed to use the current value `a` of the local
variables and the value `T.head` on the tape to calculate what to write or how to change local
state, but the statements themselves have a fixed structure. The `Stmt`s can be as follows:
* `move d q`: move left or right, and then do `q`
* `write (f : Γ → σ → Γ) q`: write `f a T.head` to the tape, then do `q`
* `load (f : Γ → σ → σ) q`: change the internal state to `f a T.head`
* `branch (f : Γ → σ → Bool) qtrue qfalse`: If `f a T.head` is true, do `qtrue`, else `qfalse`
* `goto (f : Γ → σ → Λ)`: Go to label `f a T.head`
* `halt`: Transition to the halting state, which halts on the following step
Note that here most statements do not have labels; `goto` commands can only go to a new function.
Only the `goto` and `halt` statements actually take a step; the rest is done by recursion on
statements and so take 0 steps. (There is a uniform bound on many statements can be executed before
the next `goto`, so this is an `O(1)` speedup with the constant depending on the machine.)
The `halt` command has a one step stutter before actually halting so that any changes made before
the halt have a chance to be "committed", since the `eval` relation uses the final configuration
before the halt as the output, and `move` and `write` etc. take 0 steps in this model.
-/
namespace TM1
-- "TM1"
set_option linter.uppercaseLean3 false
section
variable (Γ : Type _) [Inhabited Γ]
-- Type of tape symbols
variable (Λ : Type _)
-- Type of function labels
variable (σ : Type _)
-- Type of variable settings
/-- The TM1 model is a simplification and extension of TM0
(Post-Turing model) in the direction of Wang B-machines. The machine's
internal state is extended with a (finite) store `σ` of variables
that may be accessed and updated at any time.
A machine is given by a `Λ` indexed set of procedures or functions.
Each function has a body which is a `Stmt`, which can either be a
`move` or `write` command, a `branch` (if statement based on the
current tape value), a `load` (set the variable value),
a `goto` (call another function), or `halt`. Note that here
most statements do not have labels; `goto` commands can only
go to a new function. All commands have access to the variable value
and current tape value. -/
inductive Stmt
| move : Dir → Stmt → Stmt
| write : (Γ → σ → Γ) → Stmt → Stmt
| load : (Γ → σ → σ) → Stmt → Stmt
| branch : (Γ → σ → Bool) → Stmt → Stmt → Stmt
| goto : (Γ → σ → Λ) → Stmt
| halt : Stmt
#align turing.TM1.stmt Turing.TM1.Stmt
local notation "Stmt₁" => Stmt Γ Λ σ -- Porting note: Added this to clean up types.
open Stmt
instance Stmt.inhabited : Inhabited Stmt₁ :=
⟨halt⟩
#align turing.TM1.stmt.inhabited Turing.TM1.Stmt.inhabited
/-- The configuration of a TM1 machine is given by the currently
evaluating statement, the variable store value, and the tape. -/
structure Cfg where
l : Option Λ
var : σ
Tape : Tape Γ
#align turing.TM1.cfg Turing.TM1.Cfg
local notation "Cfg₁" => Cfg Γ Λ σ -- Porting note: Added this to clean up types.
instance Cfg.inhabited [Inhabited σ] : Inhabited Cfg₁ :=
⟨⟨default, default, default⟩⟩
#align turing.TM1.cfg.inhabited Turing.TM1.Cfg.inhabited
variable {Γ Λ σ}
/-- The semantics of TM1 evaluation. -/
def stepAux : Stmt₁ → σ → Tape Γ → Cfg₁
| move d q, v, T => stepAux q v (T.move d)
| write a q, v, T => stepAux q v (T.write (a T.1 v))
| load s q, v, T => stepAux q (s T.1 v) T
| branch p q₁ q₂, v, T => cond (p T.1 v) (stepAux q₁ v T) (stepAux q₂ v T)
| goto l, v, T => ⟨some (l T.1 v), v, T⟩
| halt, v, T => ⟨none, v, T⟩
#align turing.TM1.step_aux Turing.TM1.stepAux
/-- The state transition function. -/
def step (M : Λ → Stmt₁) : Cfg₁ → Option Cfg₁
| ⟨none, _, _⟩ => none
| ⟨some l, v, T⟩ => some (stepAux (M l) v T)
#align turing.TM1.step Turing.TM1.step
/-- A set `S` of labels supports the statement `q` if all the `goto`
statements in `q` refer only to other functions in `S`. -/
def SupportsStmt (S : Finset Λ) : Stmt₁ → Prop
| move _ q => SupportsStmt S q
| write _ q => SupportsStmt S q
| load _ q => SupportsStmt S q
| branch _ q₁ q₂ => SupportsStmt S q₁ ∧ SupportsStmt S q₂
| goto l => ∀ a v, l a v ∈ S
| halt => True
#align turing.TM1.supports_stmt Turing.TM1.SupportsStmt
open Classical
/-- The subterm closure of a statement. -/
noncomputable def stmts₁ : Stmt₁ → Finset Stmt₁
| Q@(move _ q) => insert Q (stmts₁ q)
| Q@(write _ q) => insert Q (stmts₁ q)
| Q@(load _ q) => insert Q (stmts₁ q)
| Q@(branch _ q₁ q₂) => insert Q (stmts₁ q₁ ∪ stmts₁ q₂)
| Q => {Q}
#align turing.TM1.stmts₁ Turing.TM1.stmts₁
theorem stmts₁_self {q : Stmt₁} : q ∈ stmts₁ q := by
cases q <;> simp only [stmts₁, Finset.mem_insert_self, Finset.mem_singleton_self]
#align turing.TM1.stmts₁_self Turing.TM1.stmts₁_self
theorem stmts₁_trans {q₁ q₂ : Stmt₁} : q₁ ∈ stmts₁ q₂ → stmts₁ q₁ ⊆ stmts₁ q₂ := by
intro h₁₂ q₀ h₀₁
induction' q₂ with _ q IH _ q IH _ q IH <;> simp only [stmts₁] at h₁₂⊢ <;>
simp only [Finset.mem_insert, Finset.mem_union, Finset.mem_singleton] at h₁₂
iterate 3
rcases h₁₂ with (rfl | h₁₂)
· unfold stmts₁ at h₀₁
exact h₀₁
· exact Finset.mem_insert_of_mem (IH h₁₂)
case branch p q₁ q₂ IH₁ IH₂ =>
rcases h₁₂ with (rfl | h₁₂ | h₁₂)
· unfold stmts₁ at h₀₁
exact h₀₁
· exact Finset.mem_insert_of_mem (Finset.mem_union_left _ <| IH₁ h₁₂)
· exact Finset.mem_insert_of_mem (Finset.mem_union_right _ <| IH₂ h₁₂)
case goto l => subst h₁₂; exact h₀₁
case halt => subst h₁₂; exact h₀₁
#align turing.TM1.stmts₁_trans Turing.TM1.stmts₁_trans
/-- The set of all statements in a turing machine, plus one extra value `none` representing the
halt state. This is used in the TM1 to TM0 reduction. -/
noncomputable def stmts (M : Λ → Stmt₁) (S : Finset Λ) : Finset (Option Stmt₁) :=
Finset.insertNone (S.bunionᵢ fun q ↦ stmts₁ (M q))
#align turing.TM1.stmts Turing.TM1.stmts
theorem stmts_trans {M : Λ → Stmt₁} {S : Finset Λ} {q₁ q₂ : Stmt₁} (h₁ : q₁ ∈ stmts₁ q₂) :
some q₂ ∈ stmts M S → some q₁ ∈ stmts M S := by
simp only [stmts, Finset.mem_insertNone, Finset.mem_bunionᵢ, Option.mem_def, Option.some.injEq,
forall_eq', exists_imp, and_imp]
exact fun l ls h₂ ↦ ⟨_, ls, stmts₁_trans h₂ h₁⟩
#align turing.TM1.stmts_trans Turing.TM1.stmts_trans
variable [Inhabited Λ]
/-- A set `S` of labels supports machine `M` if all the `goto`
statements in the functions in `S` refer only to other functions
in `S`. -/
def Supports (M : Λ → Stmt₁) (S : Finset Λ) :=
default ∈ S ∧ ∀ q ∈ S, SupportsStmt S (M q)
#align turing.TM1.supports Turing.TM1.Supports
theorem stmts_supportsStmt {M : Λ → Stmt₁} {S : Finset Λ} {q : Stmt₁} (ss : Supports M S) :
some q ∈ stmts M S → SupportsStmt S q := by
simp only [stmts, Finset.mem_insertNone, Finset.mem_bunionᵢ, Option.mem_def, Option.some.injEq,
forall_eq', exists_imp, and_imp]
exact fun l ls h ↦ stmts₁_supportsStmt_mono h (ss.2 _ ls)
#align turing.TM1.stmts_supports_stmt Turing.TM1.stmts_supportsStmt
theorem step_supports (M : Λ → Stmt₁) {S : Finset Λ} (ss : Supports M S) :
∀ {c c' : Cfg₁}, c' ∈ step M c → c.l ∈ Finset.insertNone S → c'.l ∈ Finset.insertNone S
| ⟨some l₁, v, T⟩, c', h₁, h₂ => by
replace h₂ := ss.2 _ (Finset.some_mem_insertNone.1 h₂)
simp only [step, Option.mem_def, Option.some.injEq] at h₁; subst c'
revert h₂; induction' M l₁ with _ q IH _ q IH _ q IH generalizing v T <;> intro hs
iterate 3 exact IH _ _ hs
case branch p q₁' q₂' IH₁ IH₂ =>
unfold stepAux; cases p T.1 v
· exact IH₂ _ _ hs.2
· exact IH₁ _ _ hs.1
case goto => exact Finset.some_mem_insertNone.2 (hs _ _)
case halt => apply Multiset.mem_cons_self
#align turing.TM1.step_supports Turing.TM1.step_supports
variable [Inhabited σ]
/-- The initial state, given a finite input that is placed on the tape starting at the TM head and
going to the right. -/
def init (l : List Γ) : Cfg₁ :=
⟨some default, default, Tape.mk₁ l⟩
#align turing.TM1.init Turing.TM1.init
/-- Evaluate a TM to completion, resulting in an output list on the tape (with an indeterminate
number of blanks on the end). -/
-- Porting note: Added noncomputable
noncomputable def eval (M : Λ → Stmt₁) (l : List Γ) : Part (ListBlank Γ) :=
(Turing.eval (step M) (init l)).map fun c ↦ c.Tape.right₀
#align turing.TM1.eval Turing.TM1.eval
end
end TM1
/-!
## TM1 emulator in TM0
To prove that TM1 computable functions are TM0 computable, we need to reduce each TM1 program to a
TM0 program. So suppose a TM1 program is given. We take the following:
* The alphabet `Γ` is the same for both TM1 and TM0
* The set of states `Λ'` is defined to be `Option Stmt₁ × σ`, that is, a TM1 statement or `none`
representing halt, and the possible settings of the internal variables.
Note that this is an infinite set, because `Stmt₁` is infinite. This is okay because we assume
that from the initial TM1 state, only finitely many other labels are reachable, and there are
only finitely many statements that appear in all of these functions.
Even though `Stmt₁` contains a statement called `halt`, we must separate it from `none`
(`some halt` steps to `none` and `none` actually halts) because there is a one step stutter in the
TM1 semantics.
-/
namespace TM1to0
-- "TM1to0"
set_option linter.uppercaseLean3 false
section
variable {Γ : Type _} [Inhabited Γ]
variable {Λ : Type _} [Inhabited Λ]
variable {σ : Type _} [Inhabited σ]
local notation "Stmt₁" => TM1.Stmt Γ Λ σ
local notation "Cfg₁" => TM1.Cfg Γ Λ σ
local notation "Stmt₀" => TM0.Stmt Γ
variable (M : Λ → TM1.Stmt Γ Λ σ) -- Porting note: Unfolded `Stmt₁`.
-- Porting note: `Inhabited`s are not necessary, but `M` is necessary.
set_option linter.unusedVariables false in
/-- The base machine state space is a pair of an `Option Stmt₁` representing the current program
to be executed, or `none` for the halt state, and a `σ` which is the local state (stored in the TM,
not the tape). Because there are an infinite number of programs, this state space is infinite, but
for a finitely supported TM1 machine and a finite type `σ`, only finitely many of these states are
reachable. -/
@[nolint unusedArguments] -- We need the M assumption
def Λ' (M : Λ → TM1.Stmt Γ Λ σ) :=
Option Stmt₁ × σ
#align turing.TM1to0.Λ' Turing.TM1to0.Λ'
local notation "Λ'₁₀" => Λ' M -- Porting note: Added this to clean up types.
instance : Inhabited Λ'₁₀ :=
⟨(some (M default), default)⟩
open TM0.Stmt
/-- The core TM1 → TM0 translation function. Here `s` is the current value on the tape, and the
`Stmt₁` is the TM1 statement to translate, with local state `v : σ`. We evaluate all regular
instructions recursively until we reach either a `move` or `write` command, or a `goto`; in the
latter case we emit a dummy `write s` step and transition to the new target location. -/
def trAux (s : Γ) : Stmt₁ → σ → Λ'₁₀ × Stmt₀
| TM1.Stmt.move d q, v => ((some q, v), move d)
| TM1.Stmt.write a q, v => ((some q, v), write (a s v))
| TM1.Stmt.load a q, v => trAux s q (a s v)
| TM1.Stmt.branch p q₁ q₂, v => cond (p s v) (trAux s q₁ v) (trAux s q₂ v)
| TM1.Stmt.goto l, v => ((some (M (l s v)), v), write s)
| TM1.Stmt.halt, v => ((none, v), write s)
#align turing.TM1to0.tr_aux Turing.TM1to0.trAux
local notation "Cfg₁₀" => TM0.Cfg Γ Λ'₁₀
/-- The translated TM0 machine (given the TM1 machine input). -/
def tr : TM0.Machine Γ Λ'₁₀
| (none, _), _ => none
| (some q, v), s => some (trAux M s q v)
#align turing.TM1to0.tr Turing.TM1to0.tr
/-- Translate configurations from TM1 to TM0. -/
def trCfg : Cfg₁ → Cfg₁₀
| ⟨l, v, T⟩ => ⟨(l.map M, v), T⟩
#align turing.TM1to0.tr_cfg Turing.TM1to0.trCfg
theorem tr_respects :
Respects (TM1.step M) (TM0.step (tr M)) fun (c₁ : Cfg₁) (c₂ : Cfg₁₀) ↦ trCfg M c₁ = c₂ :=
fun_respects.2 fun ⟨l₁, v, T⟩ ↦ by
cases' l₁ with l₁; · exact rfl
simp only [trCfg, TM1.step, FRespects, Option.map]
induction' M l₁ with _ q IH _ q IH _ q IH generalizing v T
case move d q IH => exact TransGen.head rfl (IH _ _)
case write a q IH => exact TransGen.head rfl (IH _ _)
case load a q IH => exact (reaches₁_eq (by rfl)).2 (IH _ _)
case branch p q₁ q₂ IH₁ IH₂ =>
unfold TM1.stepAux; cases e : p T.1 v
· exact (reaches₁_eq (by simp only [TM0.step, tr, trAux, e]; rfl)).2 (IH₂ _ _)
· exact (reaches₁_eq (by simp only [TM0.step, tr, trAux, e]; rfl)).2 (IH₁ _ _)
iterate 2
exact TransGen.single (congr_arg some (congr (congr_arg TM0.Cfg.mk rfl) (Tape.write_self T)))
#align turing.TM1to0.tr_respects Turing.TM1to0.tr_respects
theorem tr_eval (l : List Γ) : TM0.eval (tr M) l = TM1.eval M l :=
(congr_arg _ (tr_eval' _ _ _ (tr_respects M) ⟨some _, _, _⟩)).trans
(by
rw [Part.map_eq_map, Part.map_map, TM1.eval]
congr with ⟨⟩)
#align turing.TM1to0.tr_eval Turing.TM1to0.tr_eval
variable [Fintype σ]
/-- Given a finite set of accessible `Λ` machine states, there is a finite set of accessible
machine states in the target (even though the type `Λ'` is infinite). -/
-- Porting note: Unfolded `×ˢ` to `Finset.product`.
noncomputable def trStmts (S : Finset Λ) : Finset Λ'₁₀ :=
Finset.product (TM1.stmts M S) Finset.univ
#align turing.TM1to0.tr_stmts Turing.TM1to0.trStmts
open Classical
attribute [local simp] TM1.stmts₁_self
theorem tr_supports {S : Finset Λ} (ss : TM1.Supports M S) :
TM0.Supports (tr M) ↑(trStmts M S) := by
constructor
· apply Finset.mem_product.2
constructor
· simp only [default, TM1.stmts, Finset.mem_insertNone, Option.mem_def, Option.some_inj,
forall_eq', Finset.mem_bunionᵢ]
exact ⟨_, ss.1, TM1.stmts₁_self⟩
· apply Finset.mem_univ
· intro q a q' s h₁ h₂
rcases q with ⟨_ | q, v⟩; · cases h₁
cases' q' with q' v'
simp only [trStmts, Finset.mem_coe] at h₂⊢
rw [Finset.mem_product] at h₂⊢
simp only [Finset.mem_univ, and_true_iff] at h₂⊢
cases q'; · exact Multiset.mem_cons_self _ _
simp only [tr, Option.mem_def] at h₁
have := TM1.stmts_supportsStmt ss h₂
revert this; induction q generalizing v <;> intro hs
case move d q =>
cases h₁; refine' TM1.stmts_trans _ h₂
unfold TM1.stmts₁
exact Finset.mem_insert_of_mem TM1.stmts₁_self
case write b q =>
cases h₁; refine' TM1.stmts_trans _ h₂
unfold TM1.stmts₁
exact Finset.mem_insert_of_mem TM1.stmts₁_self
case load b q IH =>
refine' IH _ (TM1.stmts_trans _ h₂) h₁ hs
unfold TM1.stmts₁
exact Finset.mem_insert_of_mem TM1.stmts₁_self
case branch p q₁ q₂ IH₁ IH₂ =>
cases h : p a v <;> rw [trAux, h] at h₁
· refine' IH₂ _ (TM1.stmts_trans _ h₂) h₁ hs.2
unfold TM1.stmts₁
exact Finset.mem_insert_of_mem (Finset.mem_union_right _ TM1.stmts₁_self)
· refine' IH₁ _ (TM1.stmts_trans _ h₂) h₁ hs.1
unfold TM1.stmts₁
exact Finset.mem_insert_of_mem (Finset.mem_union_left _ TM1.stmts₁_self)
case goto l =>
cases h₁
exact Finset.some_mem_insertNone.2 (Finset.mem_bunionᵢ.2 ⟨_, hs _ _, TM1.stmts₁_self⟩)
case halt => cases h₁
#align turing.TM1to0.tr_supports Turing.TM1to0.tr_supports
end
end TM1to0
/-!
## TM1(Γ) emulator in TM1(Bool)
The most parsimonious Turing machine model that is still Turing complete is `TM0` with `Γ = Bool`.
Because our construction in the previous section reducing `TM1` to `TM0` doesn't change the
alphabet, we can do the alphabet reduction on `TM1` instead of `TM0` directly.
The basic idea is to use a bijection between `Γ` and a subset of `Vector Bool n`, where `n` is a
fixed constant. Each tape element is represented as a block of `n` bools. Whenever the machine
wants to read a symbol from the tape, it traverses over the block, performing `n` `branch`
instructions to each any of the `2^n` results.
For the `write` instruction, we have to use a `goto` because we need to follow a different code
path depending on the local state, which is not available in the TM1 model, so instead we jump to
a label computed using the read value and the local state, which performs the writing and returns
to normal execution.
Emulation overhead is `O(1)`. If not for the above `write` behavior it would be 1-1 because we are
exploiting the 0-step behavior of regular commands to avoid taking steps, but there are
nevertheless a bounded number of `write` calls between `goto` statements because TM1 statements are
finitely long.
-/
namespace TM1to1
-- "TM1to1"
set_option linter.uppercaseLean3 false
open TM1
section
variable {Γ : Type _} [Inhabited Γ]
theorem exists_enc_dec [Fintype Γ] : ∃ (n : ℕ) (enc : Γ → Vector Bool n) (dec : Vector Bool n → Γ),
enc default = Vector.replicate n false ∧ ∀ a, dec (enc a) = a := by
letI := Classical.decEq Γ
let n := Fintype.card Γ
obtain ⟨F⟩ := Fintype.truncEquivFin Γ
let G : Fin n ↪ Fin n → Bool :=
⟨fun a b ↦ a = b, fun a b h ↦
Bool.of_decide_true <| (congr_fun h b).trans <| Bool.decide_true rfl⟩
let H := (F.toEmbedding.trans G).trans (Equiv.vectorEquivFin _ _).symm.toEmbedding
classical
let enc := H.setValue default (Vector.replicate n false)
exact ⟨_, enc, Function.invFun enc, H.setValue_eq _ _, Function.leftInverse_invFun enc.2⟩
#align turing.TM1to1.exists_enc_dec Turing.TM1to1.exists_enc_dec
variable {Λ : Type _} [Inhabited Λ]
variable {σ : Type _} [Inhabited σ]
local notation "Stmt₁" => Stmt Γ Λ σ
local notation "Cfg₁" => Cfg Γ Λ σ
/-- The configuration state of the TM. -/
inductive Λ'
| normal : Λ → Λ'
| write : Γ → Stmt₁ → Λ'
#align turing.TM1to1.Λ' Turing.TM1to1.Λ'
local notation "Λ'₁" => @Λ' Γ Λ σ -- Porting note: Added this to clean up types.
instance : Inhabited Λ'₁ :=
⟨Λ'.normal default⟩
local notation "Stmt'₁" => Stmt Bool Λ'₁ σ
local notation "Cfg'₁" => Cfg Bool Λ'₁ σ
/-- Read a vector of length `n` from the tape. -/
def readAux : ∀ n, (Vector Bool n → Stmt'₁) → Stmt'₁
| 0, f => f Vector.nil
| i + 1, f =>
Stmt.branch (fun a _ ↦ a) (Stmt.move Dir.right <| readAux i fun v ↦ f (true ::ᵥ v))
(Stmt.move Dir.right <| readAux i fun v ↦ f (false ::ᵥ v))
#align turing.TM1to1.read_aux Turing.TM1to1.readAux
variable {n : ℕ} (enc : Γ → Vector Bool n) (dec : Vector Bool n → Γ)
/-- A move left or right corresponds to `n` moves across the super-cell. -/
def move (d : Dir) (q : Stmt'₁) : Stmt'₁ :=
(Stmt.move d^[n]) q
#align turing.TM1to1.move Turing.TM1to1.move
local notation "moveₙ" => @move Γ Λ σ n -- Porting note: Added this to clean up types.
/-- To read a symbol from the tape, we use `readAux` to traverse the symbol,
then return to the original position with `n` moves to the left. -/
def read (f : Γ → Stmt'₁) : Stmt'₁ :=
readAux n fun v ↦ moveₙ Dir.left <| f (dec v)
#align turing.TM1to1.read Turing.TM1to1.read
/-- Write a list of bools on the tape. -/
def write : List Bool → Stmt'₁ → Stmt'₁
| [], q => q
| a :: l, q => (Stmt.write fun _ _ ↦ a) <| Stmt.move Dir.right <| write l q
#align turing.TM1to1.write Turing.TM1to1.write
/-- Translate a normal instruction. For the `write` command, we use a `goto` indirection so that
we can access the current value of the tape. -/
def trNormal : Stmt₁ → Stmt'₁
| Stmt.move d q => moveₙ d <| trNormal q
| Stmt.write f q => read dec fun a ↦ Stmt.goto fun _ s ↦ Λ'.write (f a s) q
| Stmt.load f q => read dec fun a ↦ (Stmt.load fun _ s ↦ f a s) <| trNormal q
| Stmt.branch p q₁ q₂ =>
read dec fun a ↦ Stmt.branch (fun _ s ↦ p a s) (trNormal q₁) (trNormal q₂)
| Stmt.goto l => read dec fun a ↦ Stmt.goto fun _ s ↦ Λ'.normal (l a s)
| Stmt.halt => Stmt.halt
#align turing.TM1to1.tr_normal Turing.TM1to1.trNormal
theorem stepAux_move (d : Dir) (q : Stmt'₁) (v : σ) (T : Tape Bool) :
stepAux (moveₙ d q) v T = stepAux q v ((Tape.move d^[n]) T) := by
suffices : ∀ i, stepAux ((Stmt.move d^[i]) q) v T = stepAux q v ((Tape.move d^[i]) T)
exact this n
intro i; induction' i with i IH generalizing T; · rfl
rw [iterate_succ', iterate_succ]
simp only [stepAux, Function.comp_apply]
rw [IH]
#align turing.TM1to1.step_aux_move Turing.TM1to1.stepAux_move
theorem supportsStmt_move {S : Finset Λ'₁} {d : Dir} {q : Stmt'₁} :
SupportsStmt S (moveₙ d q) = SupportsStmt S q := by
suffices ∀ {i}, SupportsStmt S ((Stmt.move d^[i]) q) = _ from this
intro i; induction i generalizing q <;> simp only [*, iterate]; rfl
#align turing.TM1to1.supports_stmt_move Turing.TM1to1.supportsStmt_move
theorem supportsStmt_write {S : Finset Λ'₁} {l : List Bool} {q : Stmt'₁} :
SupportsStmt S (write l q) = SupportsStmt S q := by
induction' l with _ l IH <;> simp only [write, SupportsStmt, *]
#align turing.TM1to1.supports_stmt_write Turing.TM1to1.supportsStmt_write
theorem supportsStmt_read {S : Finset Λ'₁} :
∀ {f : Γ → Stmt'₁}, (∀ a, SupportsStmt S (f a)) → SupportsStmt S (read dec f) :=
suffices
∀ (i) (f : Vector Bool i → Stmt'₁), (∀ v, SupportsStmt S (f v)) → SupportsStmt S (readAux i f)
from fun hf ↦ this n _ (by intro; simp only [supportsStmt_move, hf])
fun i f hf ↦ by
induction' i with i IH; · exact hf _
constructor <;> apply IH <;> intro <;> apply hf
#align turing.TM1to1.supports_stmt_read Turing.TM1to1.supportsStmt_read
variable (enc0 : enc default = Vector.replicate n false)
section
variable {enc}
/-- The low level tape corresponding to the given tape over alphabet `Γ`. -/
def trTape' (L R : ListBlank Γ) : Tape Bool := by
refine'
Tape.mk' (L.bind (fun x ↦ (enc x).toList.reverse) ⟨n, _⟩)
(R.bind (fun x ↦ (enc x).toList) ⟨n, _⟩) <;>
simp only [enc0, Vector.replicate, List.reverse_replicate, Bool.default_bool, Vector.toList_mk]
#align turing.TM1to1.tr_tape' Turing.TM1to1.trTape'
/-- The low level tape corresponding to the given tape over alphabet `Γ`. -/
def trTape (T : Tape Γ) : Tape Bool :=
trTape' enc0 T.left T.right₀
#align turing.TM1to1.tr_tape Turing.TM1to1.trTape
theorem trTape_mk' (L R : ListBlank Γ) : trTape enc0 (Tape.mk' L R) = trTape' enc0 L R := by
simp only [trTape, Tape.mk'_left, Tape.mk'_right₀]
#align turing.TM1to1.tr_tape_mk' Turing.TM1to1.trTape_mk'
end
variable (M : Λ → TM1.Stmt Γ Λ σ) -- Porting note: Unfolded `Stmt₁`.
/-- The top level program. -/
def tr : Λ'₁ → Stmt'₁
| Λ'.normal l => trNormal dec (M l)
| Λ'.write a q => write (enc a).toList <| moveₙ Dir.left <| trNormal dec q
#align turing.TM1to1.tr Turing.TM1to1.tr
/-- The machine configuration translation. -/
def trCfg : Cfg₁ → Cfg'₁
| ⟨l, v, T⟩ => ⟨l.map Λ'.normal, v, trTape enc0 T⟩
#align turing.TM1to1.tr_cfg Turing.TM1to1.trCfg
variable {enc}
theorem trTape'_move_left (L R : ListBlank Γ) :
(Tape.move Dir.left^[n]) (trTape' enc0 L R) = trTape' enc0 L.tail (R.cons L.head) := by
obtain ⟨a, L, rfl⟩ := L.exists_cons
simp only [trTape', ListBlank.cons_bind, ListBlank.head_cons, ListBlank.tail_cons]
suffices ∀ {L' R' l₁ l₂} (_ : Vector.toList (enc a) = List.reverseAux l₁ l₂),
(Tape.move Dir.left^[l₁.length])
(Tape.mk' (ListBlank.append l₁ L') (ListBlank.append l₂ R')) =
Tape.mk' L' (ListBlank.append (Vector.toList (enc a)) R') by
simpa only [List.length_reverse, Vector.toList_length] using this (List.reverse_reverse _).symm
intro _ _ l₁ l₂ e
induction' l₁ with b l₁ IH generalizing l₂
· cases e
rfl
simp only [List.length, List.cons_append, iterate_succ_apply]
convert IH e
simp only [ListBlank.tail_cons, ListBlank.append, Tape.move_left_mk', ListBlank.head_cons]
#align turing.TM1to1.tr_tape'_move_left Turing.TM1to1.trTape'_move_left
theorem trTape'_move_right (L R : ListBlank Γ) :
(Tape.move Dir.right^[n]) (trTape' enc0 L R) = trTape' enc0 (L.cons R.head) R.tail := by
suffices ∀ i L, (Tape.move Dir.right^[i]) ((Tape.move Dir.left^[i]) L) = L by
refine' (Eq.symm _).trans (this n _)
simp only [trTape'_move_left, ListBlank.cons_head_tail, ListBlank.head_cons,
ListBlank.tail_cons]
intro i _
induction' i with i IH
· rfl
rw [iterate_succ_apply, iterate_succ_apply', Tape.move_left_right, IH]
#align turing.TM1to1.tr_tape'_move_right Turing.TM1to1.trTape'_move_right
theorem stepAux_write (q : Stmt'₁) (v : σ) (a b : Γ) (L R : ListBlank Γ) :
stepAux (write (enc a).toList q) v (trTape' enc0 L (ListBlank.cons b R)) =
stepAux q v (trTape' enc0 (ListBlank.cons a L) R) := by
simp only [trTape', ListBlank.cons_bind]
suffices ∀ {L' R'} (l₁ l₂ l₂' : List Bool) (_ : l₂'.length = l₂.length),
stepAux (write l₂ q) v (Tape.mk' (ListBlank.append l₁ L') (ListBlank.append l₂' R')) =
stepAux q v (Tape.mk' (L'.append (List.reverseAux l₂ l₁)) R') by
refine' this [] _ _ ((enc b).2.trans (enc a).2.symm)
clear a b L R
intro L' R' l₁ l₂ l₂' e
induction' l₂ with a l₂ IH generalizing l₁ l₂'
· cases List.length_eq_zero.1 e
rfl
cases' l₂' with b l₂' <;> simp only [List.length_nil, List.length_cons, Nat.succ_inj'] at e
rw [List.reverseAux, ← IH (a :: l₁) l₂' e]
simp only [stepAux, ListBlank.append, Tape.write_mk', Tape.move_right_mk', ListBlank.head_cons,
ListBlank.tail_cons]
#align turing.TM1to1.step_aux_write Turing.TM1to1.stepAux_write
variable (encdec : ∀ a, dec (enc a) = a)
theorem stepAux_read (f : Γ → Stmt'₁) (v : σ) (L R : ListBlank Γ) :
stepAux (read dec f) v (trTape' enc0 L R) = stepAux (f R.head) v (trTape' enc0 L R) := by
suffices ∀ f, stepAux (readAux n f) v (trTape' enc0 L R) =
stepAux (f (enc R.head)) v (trTape' enc0 (L.cons R.head) R.tail) by
rw [read, this, stepAux_move, encdec, trTape'_move_left enc0]
simp only [ListBlank.head_cons, ListBlank.cons_head_tail, ListBlank.tail_cons]
obtain ⟨a, R, rfl⟩ := R.exists_cons
simp only [ListBlank.head_cons, ListBlank.tail_cons, trTape', ListBlank.cons_bind,
ListBlank.append_assoc]
suffices ∀ i f L' R' l₁ l₂ h,
stepAux (readAux i f) v (Tape.mk' (ListBlank.append l₁ L') (ListBlank.append l₂ R')) =
stepAux (f ⟨l₂, h⟩) v (Tape.mk' (ListBlank.append (l₂.reverseAux l₁) L') R') by
intro f
-- Porting note: Here was `change`.
exact this n f (L.bind (fun x => (enc x).1.reverse) _)
(R.bind (fun x => (enc x).1) _) [] _ (enc a).2
clear f L a R
intro i f L' R' l₁ l₂ _
subst i
induction' l₂ with a l₂ IH generalizing l₁
· rfl
trans
stepAux (readAux l₂.length fun v ↦ f (a ::ᵥ v)) v
(Tape.mk' ((L'.append l₁).cons a) (R'.append l₂))
· dsimp [readAux, stepAux]
simp
cases a <;> rfl
rw [← ListBlank.append, IH]
rfl
#align turing.TM1to1.step_aux_read Turing.TM1to1.stepAux_read
theorem tr_respects : Respects (step M) (step (tr enc dec M)) fun c₁ c₂ ↦ trCfg enc enc₀ c₁ = c₂ :=
fun_respects.2 fun ⟨l₁, v, T⟩ ↦ by
obtain ⟨L, R, rfl⟩ := T.exists_mk'
cases' l₁ with l₁
· exact rfl
suffices ∀ q R, Reaches (step (tr enc dec M)) (stepAux (trNormal dec q) v (trTape' enc0 L R))
(trCfg enc enc0 (stepAux q v (Tape.mk' L R))) by
refine' TransGen.head' rfl _
rw [trTape_mk']
exact this _ R
clear R l₁
intro q R
induction' q generalizing v L R
case move d q IH =>
cases d <;>
simp only [trNormal, iterate, stepAux_move, stepAux, ListBlank.head_cons,
Tape.move_left_mk', ListBlank.cons_head_tail, ListBlank.tail_cons,
trTape'_move_left enc0, trTape'_move_right enc0] <;>
apply IH
case write f q IH =>
simp only [trNormal, stepAux_read dec enc0 encdec, stepAux]
refine' ReflTransGen.head rfl _
obtain ⟨a, R, rfl⟩ := R.exists_cons
rw [tr, Tape.mk'_head, stepAux_write, ListBlank.head_cons, stepAux_move,
trTape'_move_left enc0, ListBlank.head_cons, ListBlank.tail_cons, Tape.write_mk']
apply IH
case load a q IH =>
simp only [trNormal, stepAux_read dec enc0 encdec]
apply IH
case branch p q₁ q₂ IH₁ IH₂ =>
simp only [trNormal, stepAux_read dec enc0 encdec, stepAux]
cases p R.head v <;> [apply IH₂, apply IH₁]
case goto l =>
simp only [trNormal, stepAux_read dec enc0 encdec, stepAux, trCfg, trTape_mk']
apply ReflTransGen.refl
case halt =>
simp only [trNormal, stepAux, trCfg, stepAux_move, trTape'_move_left enc0,
trTape'_move_right enc0, trTape_mk']
apply ReflTransGen.refl
#align turing.TM1to1.tr_respects Turing.TM1to1.tr_respects
open Classical
variable [Fintype Γ]
/-- The set of accessible `Λ'.write` machine states. -/
noncomputable def writes : Stmt₁ → Finset Λ'₁
| Stmt.move _ q => writes q
| Stmt.write _ q => (Finset.univ.image fun a ↦ Λ'.write a q) ∪ writes q
| Stmt.load _ q => writes q
| Stmt.branch _ q₁ q₂ => writes q₁ ∪ writes q₂
| Stmt.goto _ => ∅
| Stmt.halt => ∅
#align turing.TM1to1.writes Turing.TM1to1.writes
/-- The set of accessible machine states, assuming that the input machine is supported on `S`,
are the normal states embedded from `S`, plus all write states accessible from these states. -/
noncomputable def trSupp (S : Finset Λ) : Finset Λ'₁ :=
S.bunionᵢ fun l ↦ insert (Λ'.normal l) (writes (M l))
#align turing.TM1to1.tr_supp Turing.TM1to1.trSupp
theorem tr_supports {S : Finset Λ} (ss : Supports M S) : Supports (tr enc dec M) (trSupp M S) :=
⟨Finset.mem_bunionᵢ.2 ⟨_, ss.1, Finset.mem_insert_self _ _⟩, fun q h ↦ by
suffices ∀ q, SupportsStmt S q → (∀ q' ∈ writes q, q' ∈ trSupp M S) →
SupportsStmt (trSupp M S) (trNormal dec q) ∧
∀ q' ∈ writes q, SupportsStmt (trSupp M S) (tr enc dec M q') by
rcases Finset.mem_bunionᵢ.1 h with ⟨l, hl, h⟩
have :=
this _ (ss.2 _ hl) fun q' hq ↦ Finset.mem_bunionᵢ.2 ⟨_, hl, Finset.mem_insert_of_mem hq⟩
rcases Finset.mem_insert.1 h with (rfl | h)
exacts[this.1, this.2 _ h]
intro q hs hw
induction q
case move d q IH =>
unfold writes at hw⊢
replace IH := IH hs hw; refine' ⟨_, IH.2⟩
cases d <;> simp only [trNormal, iterate, supportsStmt_move, IH]
case write f q IH =>
unfold writes at hw⊢
simp only [Finset.mem_image, Finset.mem_union, Finset.mem_univ, exists_prop, true_and_iff]
at hw⊢
replace IH := IH hs fun q hq ↦ hw q (Or.inr hq)
refine' ⟨supportsStmt_read _ fun a _ s ↦ hw _ (Or.inl ⟨_, rfl⟩), fun q' hq ↦ _⟩
rcases hq with (⟨a, q₂, rfl⟩ | hq)
· simp only [tr, supportsStmt_write, supportsStmt_move, IH.1]
· exact IH.2 _ hq
case load a q IH =>
unfold writes at hw⊢
replace IH := IH hs hw
refine' ⟨supportsStmt_read _ fun _ ↦ IH.1, IH.2⟩
case branch p q₁ q₂ IH₁ IH₂ =>
unfold writes at hw⊢
simp only [Finset.mem_union] at hw⊢
replace IH₁ := IH₁ hs.1 fun q hq ↦ hw q (Or.inl hq)
replace IH₂ := IH₂ hs.2 fun q hq ↦ hw q (Or.inr hq)
exact ⟨supportsStmt_read _ fun _ ↦ ⟨IH₁.1, IH₂.1⟩, fun q ↦ Or.rec (IH₁.2 _) (IH₂.2 _)⟩
case goto l =>
simp only [writes, Finset.not_mem_empty]; refine' ⟨_, fun _ ↦ False.elim⟩
refine' supportsStmt_read _ fun a _ s ↦ _
exact Finset.mem_bunionᵢ.2 ⟨_, hs _ _, Finset.mem_insert_self _ _⟩
case halt =>
simp only [writes, Finset.not_mem_empty]; refine' ⟨_, fun _ ↦ False.elim⟩
simp only [SupportsStmt, supportsStmt_move, trNormal]⟩
#align turing.TM1to1.tr_supports Turing.TM1to1.tr_supports
end
end TM1to1
/-!
## TM0 emulator in TM1
To establish that TM0 and TM1 are equivalent computational models, we must also have a TM0 emulator
in TM1. The main complication here is that TM0 allows an action to depend on the value at the head
and local state, while TM1 doesn't (in order to have more programming language-like semantics).
So we use a computed `goto` to go to a state that performes the desired action and then returns to
normal execution.
One issue with this is that the `halt` instruction is supposed to halt immediately, not take a step
to a halting state. To resolve this we do a check for `halt` first, then `goto` (with an
unreachable branch).
-/
namespace TM0to1
-- "TM0to1"
set_option linter.uppercaseLean3 false
section
variable {Γ : Type _} [Inhabited Γ]
variable {Λ : Type _} [Inhabited Λ]
/-- The machine states for a TM1 emulating a TM0 machine. States of the TM0 machine are embedded
as `normal q` states, but the actual operation is split into two parts, a jump to `act s q`
followed by the action and a jump to the next `normal` state. -/
inductive Λ'
| normal : Λ → Λ'
| act : TM0.Stmt Γ → Λ → Λ'
#align turing.TM0to1.Λ' Turing.TM0to1.Λ'
local notation "Λ'₁" => @Λ' Γ Λ -- Porting note: Added this to clean up types.
instance : Inhabited Λ'₁ :=
⟨Λ'.normal default⟩
local notation "Cfg₀" => TM0.Cfg Γ Λ
local notation "Stmt₁" => TM1.Stmt Γ Λ'₁ Unit
local notation "Cfg₁" => TM1.Cfg Γ Λ'₁ Unit
variable (M : TM0.Machine Γ Λ)
open TM1.Stmt
/-- The program. -/
def tr : Λ'₁ → Stmt₁
| Λ'.normal q =>
branch (fun a _ ↦ (M q a).isNone) halt <|
goto fun a _ ↦ match M q a with
| none => default -- unreachable
| some (q', s) => Λ'.act s q'
| Λ'.act (TM0.Stmt.move d) q => move d <| goto fun _ _ ↦ Λ'.normal q
| Λ'.act (TM0.Stmt.write a) q => (write fun _ _ ↦ a) <| goto fun _ _ ↦ Λ'.normal q
#align turing.TM0to1.tr Turing.TM0to1.tr
/-- The configuration translation. -/
def trCfg : Cfg₀ → Cfg₁
| ⟨q, T⟩ => ⟨cond (M q T.1).isSome (some (Λ'.normal q)) none, (), T⟩
#align turing.TM0to1.tr_cfg Turing.TM0to1.trCfg
theorem tr_respects : Respects (TM0.step M) (TM1.step (tr M)) fun a b ↦ trCfg M a = b :=
fun_respects.2 fun ⟨q, T⟩ ↦ by
cases' e : M q T.1 with val
· simp only [TM0.step, trCfg, e]; exact Eq.refl none
cases' val with q' s
simp only [FRespects, TM0.step, trCfg, e, Option.isSome, cond, Option.map_some']
revert e -- Porting note: Added this so that `e` doesn't get into the `match`.
have : TM1.step (tr M) ⟨some (Λ'.act s q'), (), T⟩ = some ⟨some (Λ'.normal q'), (), match s with
| TM0.Stmt.move d => T.move d
| TM0.Stmt.write a => T.write a⟩ := by
cases' s with d a <;> rfl
intro e
refine' TransGen.head _ (TransGen.head' this _)
· simp only [TM1.step, TM1.stepAux]
rw [e]
rfl
cases e' : M q' _
· apply ReflTransGen.single
simp only [TM1.step, TM1.stepAux]
rw [e']
rfl
· rfl
#align turing.TM0to1.tr_respects Turing.TM0to1.tr_respects
end
end TM0to1
/-!
## The TM2 model
The TM2 model removes the tape entirely from the TM1 model, replacing it with an arbitrary (finite)
collection of stacks, each with elements of different types (the alphabet of stack `k : K` is
`Γ k`). The statements are:
* `push k (f : σ → Γ k) q` puts `f a` on the `k`-th stack, then does `q`.
* `pop k (f : σ → Option (Γ k) → σ) q` changes the state to `f a (S k).head`, where `S k` is the
value of the `k`-th stack, and removes this element from the stack, then does `q`.
* `peek k (f : σ → Option (Γ k) → σ) q` changes the state to `f a (S k).head`, where `S k` is the
value of the `k`-th stack, then does `q`.
* `load (f : σ → σ) q` reads nothing but applies `f` to the internal state, then does `q`.
* `branch (f : σ → Bool) qtrue qfalse` does `qtrue` or `qfalse` according to `f a`.
* `goto (f : σ → Λ)` jumps to label `f a`.
* `halt` halts on the next step.
The configuration is a tuple `(l, var, stk)` where `l : Option Λ` is the current label to run or
`none` for the halting state, `var : σ` is the (finite) internal state, and `stk : ∀ k, List (Γ k)`
is the collection of stacks. (Note that unlike the `TM0` and `TM1` models, these are not
`ListBlank`s, they have definite ends that can be detected by the `pop` command.)
Given a designated stack `k` and a value `L : List (Γ k)`, the initial configuration has all the
stacks empty except the designated "input" stack; in `eval` this designated stack also functions
as the output stack.
-/
namespace TM2
-- "TM2"
set_option linter.uppercaseLean3 false
section
variable {K : Type _} [DecidableEq K]
-- Index type of stacks
variable (Γ : K → Type _)
-- Type of stack elements
variable (Λ : Type _)
-- Type of function labels
variable (σ : Type _)
-- Type of variable settings
/-- The TM2 model removes the tape entirely from the TM1 model,
replacing it with an arbitrary (finite) collection of stacks.
The operation `push` puts an element on one of the stacks,
and `pop` removes an element from a stack (and modifying the
internal state based on the result). `peek` modifies the
internal state but does not remove an element. -/
inductive Stmt
| push : ∀ k, (σ → Γ k) → Stmt → Stmt
| peek : ∀ k, (σ → Option (Γ k) → σ) → Stmt → Stmt
| pop : ∀ k, (σ → Option (Γ k) → σ) → Stmt → Stmt
| load : (σ → σ) → Stmt → Stmt
| branch : (σ → Bool) → Stmt → Stmt → Stmt
| goto : (σ → Λ) → Stmt
| halt : Stmt
#align turing.TM2.stmt Turing.TM2.Stmt
local notation "Stmt₂" => Stmt Γ Λ σ -- Porting note: Added this to clean up types.
open Stmt
instance Stmt.inhabited : Inhabited Stmt₂ :=
⟨halt⟩
#align turing.TM2.stmt.inhabited Turing.TM2.Stmt.inhabited
/-- A configuration in the TM2 model is a label (or `none` for the halt state), the state of
local variables, and the stacks. (Note that the stacks are not `ListBlank`s, they have a definite
size.) -/
structure Cfg where
l : Option Λ
var : σ
stk : ∀ k, List (Γ k)
#align turing.TM2.cfg Turing.TM2.Cfg
local notation "Cfg₂" => Cfg Γ Λ σ -- Porting note: Added this to clean up types.
instance Cfg.inhabited [Inhabited σ] : Inhabited Cfg₂ :=
⟨⟨default, default, default⟩⟩
#align turing.TM2.cfg.inhabited Turing.TM2.Cfg.inhabited
variable {Γ Λ σ}
/-- The step function for the TM2 model. -/
@[simp]
def stepAux : Stmt₂ → σ → (∀ k, List (Γ k)) → Cfg₂
| push k f q, v, S => stepAux q v (update S k (f v :: S k))
| peek k f q, v, S => stepAux q (f v (S k).head?) S
| pop k f q, v, S => stepAux q (f v (S k).head?) (update S k (S k).tail)
| load a q, v, S => stepAux q (a v) S
| branch f q₁ q₂, v, S => cond (f v) (stepAux q₁ v S) (stepAux q₂ v S)
| goto f, v, S => ⟨some (f v), v, S⟩
| halt, v, S => ⟨none, v, S⟩
#align turing.TM2.step_aux Turing.TM2.stepAux
/-- The step function for the TM2 model. -/
@[simp]
def step (M : Λ → Stmt₂) : Cfg₂ → Option Cfg₂
| ⟨none, _, _⟩ => none
| ⟨some l, v, S⟩ => some (stepAux (M l) v S)
#align turing.TM2.step Turing.TM2.step
/-- The (reflexive) reachability relation for the TM2 model. -/
def Reaches (M : Λ → Stmt₂) : Cfg₂ → Cfg₂ → Prop :=
ReflTransGen fun a b ↦ b ∈ step M a
#align turing.TM2.reaches Turing.TM2.Reaches
/-- Given a set `S` of states, `SupportsStmt S q` means that `q` only jumps to states in `S`. -/
def SupportsStmt (S : Finset Λ) : Stmt₂ → Prop
| push _ _ q => SupportsStmt S q
| peek _ _ q => SupportsStmt S q
| pop _ _ q => SupportsStmt S q
| load _ q => SupportsStmt S q
| branch _ q₁ q₂ => SupportsStmt S q₁ ∧ SupportsStmt S q₂
| goto l => ∀ v, l v ∈ S
| halt => True
#align turing.TM2.supports_stmt Turing.TM2.SupportsStmt
open Classical
/-- The set of subtree statements in a statement. -/
noncomputable def stmts₁ : Stmt₂ → Finset Stmt₂
| Q@(push _ _ q) => insert Q (stmts₁ q)
| Q@(peek _ _ q) => insert Q (stmts₁ q)
| Q@(pop _ _ q) => insert Q (stmts₁ q)
| Q@(load _ q) => insert Q (stmts₁ q)
| Q@(branch _ q₁ q₂) => insert Q (stmts₁ q₁ ∪ stmts₁ q₂)
| Q@(goto _) => {Q}
| Q@halt => {Q}
#align turing.TM2.stmts₁ Turing.TM2.stmts₁
theorem stmts₁_self {q : Stmt₂} : q ∈ stmts₁ q := by
cases q <;> simp only [Finset.mem_insert_self, Finset.mem_singleton_self, stmts₁]
#align turing.TM2.stmts₁_self Turing.TM2.stmts₁_self
theorem stmts₁_trans {q₁ q₂ : Stmt₂} : q₁ ∈ stmts₁ q₂ → stmts₁ q₁ ⊆ stmts₁ q₂ := by
intro h₁₂ q₀ h₀₁
induction' q₂ with _ _ q IH _ _ q IH _ _ q IH _ q IH <;> simp only [stmts₁] at h₁₂⊢ <;>
simp only [Finset.mem_insert, Finset.mem_singleton, Finset.mem_union] at h₁₂
iterate 4
rcases h₁₂ with (rfl | h₁₂)
· unfold stmts₁ at h₀₁
exact h₀₁
· exact Finset.mem_insert_of_mem (IH h₁₂)
case branch f q₁ q₂ IH₁ IH₂ =>
rcases h₁₂ with (rfl | h₁₂ | h₁₂)
· unfold stmts₁ at h₀₁
exact h₀₁
· exact Finset.mem_insert_of_mem (Finset.mem_union_left _ (IH₁ h₁₂))
· exact Finset.mem_insert_of_mem (Finset.mem_union_right _ (IH₂ h₁₂))
case goto l => subst h₁₂; exact h₀₁
case halt => subst h₁₂; exact h₀₁
#align turing.TM2.stmts₁_trans Turing.TM2.stmts₁_trans
theorem stmts₁_supportsStmt_mono {S : Finset Λ} {q₁ q₂ : Stmt₂} (h : q₁ ∈ stmts₁ q₂)
(hs : SupportsStmt S q₂) : SupportsStmt S q₁ := by
induction' q₂ with _ _ q IH _ _ q IH _ _ q IH _ q IH <;>
simp only [stmts₁, SupportsStmt, Finset.mem_insert, Finset.mem_union, Finset.mem_singleton]
at h hs
iterate 4 rcases h with (rfl | h) <;> [exact hs, exact IH h hs]
case branch f q₁ q₂ IH₁ IH₂ => rcases h with (rfl | h | h); exacts[hs, IH₁ h hs.1, IH₂ h hs.2]
case goto l => subst h; exact hs
case halt => subst h; trivial
#align turing.TM2.stmts₁_supports_stmt_mono Turing.TM2.stmts₁_supportsStmt_mono
/-- The set of statements accessible from initial set `S` of labels. -/
noncomputable def stmts (M : Λ → Stmt₂) (S : Finset Λ) : Finset (Option Stmt₂) :=
Finset.insertNone (S.bunionᵢ fun q ↦ stmts₁ (M q))
#align turing.TM2.stmts Turing.TM2.stmts
theorem stmts_trans {M : Λ → Stmt₂} {S : Finset Λ} {q₁ q₂ : Stmt₂} (h₁ : q₁ ∈ stmts₁ q₂) :
some q₂ ∈ stmts M S → some q₁ ∈ stmts M S := by
simp only [stmts, Finset.mem_insertNone, Finset.mem_bunionᵢ, Option.mem_def, Option.some.injEq,
forall_eq', exists_imp, and_imp]
exact fun l ls h₂ ↦ ⟨_, ls, stmts₁_trans h₂ h₁⟩
#align turing.TM2.stmts_trans Turing.TM2.stmts_trans
variable [Inhabited Λ]
/-- Given a TM2 machine `M` and a set `S` of states, `Supports M S` means that all states in
`S` jump only to other states in `S`. -/
def Supports (M : Λ → Stmt₂) (S : Finset Λ) :=
default ∈ S ∧ ∀ q ∈ S, SupportsStmt S (M q)
#align turing.TM2.supports Turing.TM2.Supports
theorem stmts_supportsStmt {M : Λ → Stmt₂} {S : Finset Λ} {q : Stmt₂} (ss : Supports M S) :
some q ∈ stmts M S → SupportsStmt S q := by
simp only [stmts, Finset.mem_insertNone, Finset.mem_bunionᵢ, Option.mem_def, Option.some.injEq,
forall_eq', exists_imp, and_imp]
exact fun l ls h ↦ stmts₁_supportsStmt_mono h (ss.2 _ ls)
#align turing.TM2.stmts_supports_stmt Turing.TM2.stmts_supportsStmt
theorem step_supports (M : Λ → Stmt₂) {S : Finset Λ} (ss : Supports M S) :
∀ {c c' : Cfg₂}, c' ∈ step M c → c.l ∈ Finset.insertNone S → c'.l ∈ Finset.insertNone S
| ⟨some l₁, v, T⟩, c', h₁, h₂ => by
replace h₂ := ss.2 _ (Finset.some_mem_insertNone.1 h₂)
simp only [step, Option.mem_def, Option.some.injEq] at h₁; subst c'
revert h₂; induction' M l₁ with _ _ q IH _ _ q IH _ _ q IH _ q IH generalizing v T <;> intro hs
iterate 4 exact IH _ _ hs
case branch p q₁' q₂' IH₁ IH₂ =>
unfold stepAux; cases p v
· exact IH₂ _ _ hs.2
· exact IH₁ _ _ hs.1
case goto => exact Finset.some_mem_insertNone.2 (hs _)
case halt => apply Multiset.mem_cons_self
#align turing.TM2.step_supports Turing.TM2.step_supports
variable [Inhabited σ]
/-- The initial state of the TM2 model. The input is provided on a designated stack. -/
def init (k : K) (L : List (Γ k)) : Cfg₂ :=
⟨some default, default, update (fun _ ↦ []) k L⟩
#align turing.TM2.init Turing.TM2.init
/-- Evaluates a TM2 program to completion, with the output on the same stack as the input. -/
-- Porting note: Added noncomputable
noncomputable def eval (M : Λ → Stmt₂) (k : K) (L : List (Γ k)) : Part (List (Γ k)) :=
(Turing.eval (step M) (init k L)).map fun c ↦ c.stk k
#align turing.TM2.eval Turing.TM2.eval
end
end TM2
/-!
## TM2 emulator in TM1
To prove that TM2 computable functions are TM1 computable, we need to reduce each TM2 program to a
TM1 program. So suppose a TM2 program is given. This program has to maintain a whole collection of
stacks, but we have only one tape, so we must "multiplex" them all together. Pictorially, if stack
1 contains `[a, b]` and stack 2 contains `[c, d, e, f]` then the tape looks like this:
```
bottom: ... | _ | T | _ | _ | _ | _ | ...
stack 1: ... | _ | b | a | _ | _ | _ | ...
stack 2: ... | _ | f | e | d | c | _ | ...
```
where a tape element is a vertical slice through the diagram. Here the alphabet is
`Γ' := Bool × ∀ k, Option (Γ k)`, where:
* `bottom : Bool` is marked only in one place, the initial position of the TM, and represents the
tail of all stacks. It is never modified.
* `stk k : Option (Γ k)` is the value of the `k`-th stack, if in range, otherwise `none` (which is
the blank value). Note that the head of the stack is at the far end; this is so that push and pop
don't have to do any shifting.
In "resting" position, the TM is sitting at the position marked `bottom`. For non-stack actions,
it operates in place, but for the stack actions `push`, `peek`, and `pop`, it must shuttle to the
end of the appropriate stack, make its changes, and then return to the bottom. So the states are:
* `normal (l : Λ)`: waiting at `bottom` to execute function `l`
* `go k (s : StAct k) (q : Stmt₂)`: travelling to the right to get to the end of stack `k` in
order to perform stack action `s`, and later continue with executing `q`
* `ret (q : Stmt₂)`: travelling to the left after having performed a stack action, and executing
`q` once we arrive
Because of the shuttling, emulation overhead is `O(n)`, where `n` is the current maximum of the
length of all stacks. Therefore a program that takes `k` steps to run in TM2 takes `O((m+k)k)`
steps to run when emulated in TM1, where `m` is the length of the input.
-/
namespace TM2to1
-- "TM2to1"
set_option linter.uppercaseLean3 false
-- A displaced lemma proved in unnecessary generality
theorem stk_nth_val {K : Type _} {Γ : K → Type _} {L : ListBlank (∀ k, Option (Γ k))} {k S} (n)
(hL : ListBlank.map (proj k) L = ListBlank.mk (List.map some S).reverse) :
L.nth n k = S.reverse.get? n := by
rw [← proj_map_nth, hL, ← List.map_reverse, ListBlank.nth_mk, List.getI_eq_iget_get?,
List.get?_map]
cases S.reverse.get? n <;> rfl
#align turing.TM2to1.stk_nth_val Turing.TM2to1.stk_nth_val
section
variable {K : Type _} [DecidableEq K]
variable {Γ : K → Type _}
variable {Λ : Type _} [Inhabited Λ]
variable {σ : Type _} [Inhabited σ]
local notation "Stmt₂" => TM2.Stmt Γ Λ σ
local notation "Cfg₂" => TM2.Cfg Γ Λ σ
-- Porting note: `DecidableEq K` is not necessary.
/-- The alphabet of the TM2 simulator on TM1 is a marker for the stack bottom,
plus a vector of stack elements for each stack, or none if the stack does not extend this far. -/
def Γ' :=
Bool × ∀ k, Option (Γ k)
#align turing.TM2to1.Γ' Turing.TM2to1.Γ'
local notation "Γ'₂₁" => @Γ' K Γ -- Porting note: Added this to clean up types.
instance Γ'.inhabited : Inhabited Γ'₂₁ :=
⟨⟨false, fun _ ↦ none⟩⟩
#align turing.TM2to1.Γ'.inhabited Turing.TM2to1.Γ'.inhabited
instance Γ'.fintype [Fintype K] [∀ k, Fintype (Γ k)] : Fintype Γ'₂₁ :=
instFintypeProd _ _
#align turing.TM2to1.Γ'.fintype Turing.TM2to1.Γ'.fintype
/-- The bottom marker is fixed throughout the calculation, so we use the `addBottom` function
to express the program state in terms of a tape with only the stacks themselves. -/
def addBottom (L : ListBlank (∀ k, Option (Γ k))) : ListBlank Γ'₂₁ :=
ListBlank.cons (true, L.head) (L.tail.map ⟨Prod.mk false, rfl⟩)
#align turing.TM2to1.add_bottom Turing.TM2to1.addBottom
theorem addBottom_map (L : ListBlank (∀ k, Option (Γ k))) :
(addBottom L).map ⟨Prod.snd, by rfl⟩ = L := by
simp only [addBottom, ListBlank.map_cons]
convert ListBlank.cons_head_tail L
generalize ListBlank.tail L = L'
refine' L'.induction_on fun l ↦ _; simp
#align turing.TM2to1.add_bottom_map Turing.TM2to1.addBottom_map
theorem addBottom_modifyNth (f : (∀ k, Option (Γ k)) → ∀ k, Option (Γ k))
(L : ListBlank (∀ k, Option (Γ k))) (n : ℕ) :
(addBottom L).modifyNth (fun a ↦ (a.1, f a.2)) n = addBottom (L.modifyNth f n) := by
cases n <;>
simp only [addBottom, ListBlank.head_cons, ListBlank.modifyNth, ListBlank.tail_cons]
congr ; symm; apply ListBlank.map_modifyNth; intro ; rfl
#align turing.TM2to1.add_bottom_modify_nth Turing.TM2to1.addBottom_modifyNth
theorem addBottom_nth_snd (L : ListBlank (∀ k, Option (Γ k))) (n : ℕ) :
((addBottom L).nth n).2 = L.nth n := by
conv => rhs; rw [← addBottom_map L, ListBlank.nth_map]
#align turing.TM2to1.add_bottom_nth_snd Turing.TM2to1.addBottom_nth_snd
theorem addBottom_nth_succ_fst (L : ListBlank (∀ k, Option (Γ k))) (n : ℕ) :
((addBottom L).nth (n + 1)).1 = false := by
rw [ListBlank.nth_succ, addBottom, ListBlank.tail_cons, ListBlank.nth_map]
#align turing.TM2to1.add_bottom_nth_succ_fst Turing.TM2to1.addBottom_nth_succ_fst
theorem addBottom_head_fst (L : ListBlank (∀ k, Option (Γ k))) : (addBottom L).head.1 = true := by
rw [addBottom, ListBlank.head_cons]
#align turing.TM2to1.add_bottom_head_fst Turing.TM2to1.addBottom_head_fst
/-- A stack action is a command that interacts with the top of a stack. Our default position
is at the bottom of all the stacks, so we have to hold on to this action while going to the end
to modify the stack. -/
inductive StAct (k : K)
| push : (σ → Γ k) → StAct k
| peek : (σ → Option (Γ k) → σ) → StAct k
| pop : (σ → Option (Γ k) → σ) → StAct k
#align turing.TM2to1.st_act Turing.TM2to1.StAct
local notation "StAct₂" => @StAct K Γ σ -- Porting note: Added this to clean up types.
instance StAct.inhabited {k : K} : Inhabited (StAct₂ k) :=
⟨StAct.peek fun s _ ↦ s⟩
#align turing.TM2to1.st_act.inhabited Turing.TM2to1.StAct.inhabited
section
open StAct
-- Porting note: `Inhabited Γ` is not necessary.
/-- The TM2 statement corresponding to a stack action. -/
def stRun {k : K} : StAct₂ k → Stmt₂ → Stmt₂
| push f => TM2.Stmt.push k f
| peek f => TM2.Stmt.peek k f
| pop f => TM2.Stmt.pop k f
#align turing.TM2to1.st_run Turing.TM2to1.stRun
/-- The effect of a stack action on the local variables, given the value of the stack. -/
def stVar {k : K} (v : σ) (l : List (Γ k)) : StAct₂ k → σ
| push _ => v
| peek f => f v l.head?
| pop f => f v l.head?
#align turing.TM2to1.st_var Turing.TM2to1.stVar
/-- The effect of a stack action on the stack. -/
def stWrite {k : K} (v : σ) (l : List (Γ k)) : StAct₂ k → List (Γ k)
| push f => f v :: l
| peek _ => l
| pop _ => l.tail
#align turing.TM2to1.st_write Turing.TM2to1.stWrite
/-- We have partitioned the TM2 statements into "stack actions", which require going to the end
of the stack, and all other actions, which do not. This is a modified recursor which lumps the
stack actions into one. -/
@[elab_as_elim]
def stmtStRec.{l} {C : Stmt₂ → Sort l} (H₁ : ∀ (k) (s : StAct₂ k) (q) (_ : C q), C (stRun s q))
(H₂ : ∀ (a q) (_ : C q), C (TM2.Stmt.load a q))
(H₃ : ∀ (p q₁ q₂) (_ : C q₁) (_ : C q₂), C (TM2.Stmt.branch p q₁ q₂))
(H₄ : ∀ l, C (TM2.Stmt.goto l)) (H₅ : C TM2.Stmt.halt) : ∀ n, C n
| TM2.Stmt.push _ f q => H₁ _ (push f) _ (stmtStRec H₁ H₂ H₃ H₄ H₅ q)
| TM2.Stmt.peek _ f q => H₁ _ (peek f) _ (stmtStRec H₁ H₂ H₃ H₄ H₅ q)
| TM2.Stmt.pop _ f q => H₁ _ (pop f) _ (stmtStRec H₁ H₂ H₃ H₄ H₅ q)
| TM2.Stmt.load _ q => H₂ _ _ (stmtStRec H₁ H₂ H₃ H₄ H₅ q)
| TM2.Stmt.branch _ q₁ q₂ => H₃ _ _ _ (stmtStRec H₁ H₂ H₃ H₄ H₅ q₁) (stmtStRec H₁ H₂ H₃ H₄ H₅ q₂)
| TM2.Stmt.goto _ => H₄ _
| TM2.Stmt.halt => H₅
#align turing.TM2to1.stmt_st_rec Turing.TM2to1.stmtStRec
theorem supports_run (S : Finset Λ) {k : K} (s : StAct₂ k) (q : Stmt₂) :
TM2.SupportsStmt S (stRun s q) ↔ TM2.SupportsStmt S q := by
cases s <;> rfl
#align turing.TM2to1.supports_run Turing.TM2to1.supports_run
end
/-- The machine states of the TM2 emulator. We can either be in a normal state when waiting for the
next TM2 action, or we can be in the "go" and "return" states to go to the top of the stack and
return to the bottom, respectively. -/
inductive Λ'
| normal : Λ → Λ'
| go (k : K) : StAct₂ k → Stmt₂ → Λ'
| ret : Stmt₂ → Λ'
#align turing.TM2to1.Λ' Turing.TM2to1.Λ'
local notation "Λ'₂₁" => @Λ' K Γ Λ σ -- Porting note: Added this to clean up types.
open Λ'
instance Λ'.inhabited : Inhabited Λ'₂₁ :=
⟨normal default⟩
#align turing.TM2to1.Λ'.inhabited Turing.TM2to1.Λ'.inhabited
local notation "Stmt₂₁" => TM1.Stmt Γ'₂₁ Λ'₂₁ σ
local notation "Cfg₂₁" => TM1.Cfg Γ'₂₁ Λ'₂₁ σ
open TM1.Stmt
/-- The program corresponding to state transitions at the end of a stack. Here we start out just
after the top of the stack, and should end just after the new top of the stack. -/
def trStAct {k : K} (q : Stmt₂₁) : StAct₂ k → Stmt₂₁
| StAct.push f => (write fun a s ↦ (a.1, update a.2 k <| some <| f s)) <| move Dir.right q
| StAct.peek f => move Dir.left <| (load fun a s ↦ f s (a.2 k)) <| move Dir.right q
| StAct.pop f =>
branch (fun a _ ↦ a.1) (load (fun _ s ↦ f s none) q)
(move Dir.left <|
(load fun a s ↦ f s (a.2 k)) <| write (fun a _ ↦ (a.1, update a.2 k none)) q)
#align turing.TM2to1.tr_st_act Turing.TM2to1.trStAct
/-- The initial state for the TM2 emulator, given an initial TM2 state. All stacks start out empty
except for the input stack, and the stack bottom mark is set at the head. -/
def trInit (k : K) (L : List (Γ k)) : List Γ'₂₁ :=
let L' : List Γ'₂₁ := L.reverse.map fun a ↦ (false, update (fun _ ↦ none) k (some a))
(true, L'.headI.2) :: L'.tail
#align turing.TM2to1.tr_init Turing.TM2to1.trInit
theorem step_run {k : K} (q : Stmt₂) (v : σ) (S : ∀ k, List (Γ k)) : ∀ s : StAct₂ k,
TM2.stepAux (stRun s q) v S = TM2.stepAux q (stVar v (S k) s) (update S k (stWrite v (S k) s))
| StAct.push f => rfl
| StAct.peek f => by unfold stWrite; rw [Function.update_eq_self]; rfl
| StAct.pop f => rfl
#align turing.TM2to1.step_run Turing.TM2to1.step_run
/-- The translation of TM2 statements to TM1 statements. regular actions have direct equivalents,
but stack actions are deferred by going to the corresponding `go` state, so that we can find the
appropriate stack top. -/
def trNormal : Stmt₂ → Stmt₂₁
| TM2.Stmt.push k f q => goto fun _ _ ↦ go k (StAct.push f) q
| TM2.Stmt.peek k f q => goto fun _ _ ↦ go k (StAct.peek f) q
| TM2.Stmt.pop k f q => goto fun _ _ ↦ go k (StAct.pop f) q
| TM2.Stmt.load a q => load (fun _ ↦ a) (trNormal q)
| TM2.Stmt.branch f q₁ q₂ => branch (fun _ ↦ f) (trNormal q₁) (trNormal q₂)
| TM2.Stmt.goto l => goto fun _ s ↦ normal (l s)
| TM2.Stmt.halt => halt
#align turing.TM2to1.tr_normal Turing.TM2to1.trNormal
theorem trNormal_run {k : K} (s : StAct₂ k) (q : Stmt₂) :
trNormal (stRun s q) = goto fun _ _ ↦ go k s q := by
cases s <;> rfl
#align turing.TM2to1.tr_normal_run Turing.TM2to1.trNormal_run
open Classical
/-- The set of machine states accessible from an initial TM2 statement. -/
noncomputable def trStmts₁ : Stmt₂ → Finset Λ'₂₁
| TM2.Stmt.push k f q => {go k (StAct.push f) q, ret q} ∪ trStmts₁ q
| TM2.Stmt.peek k f q => {go k (StAct.peek f) q, ret q} ∪ trStmts₁ q
| TM2.Stmt.pop k f q => {go k (StAct.pop f) q, ret q} ∪ trStmts₁ q
| TM2.Stmt.load _ q => trStmts₁ q
| TM2.Stmt.branch _ q₁ q₂ => trStmts₁ q₁ ∪ trStmts₁ q₂
| _ => ∅
#align turing.TM2to1.tr_stmts₁ Turing.TM2to1.trStmts₁
theorem trStmts₁_run {k : K} {s : StAct₂ k} {q : Stmt₂} :
trStmts₁ (stRun s q) = {go k s q, ret q} ∪ trStmts₁ q := by
cases s <;> simp only [trStmts₁]
#align turing.TM2to1.tr_stmts₁_run Turing.TM2to1.trStmts₁_run
theorem tr_respects_aux₂ {k : K} {q : Stmt₂₁} {v : σ} {S : ∀ k, List (Γ k)}
{L : ListBlank (∀ k, Option (Γ k))}
(hL : ∀ k, L.map (proj k) = ListBlank.mk ((S k).map some).reverse) (o : StAct₂ k) :
let v' := stVar v (S k) o
let Sk' := stWrite v (S k) o
let S' := update S k Sk'
∃ L' : ListBlank (∀ k, Option (Γ k)),
(∀ k, L'.map (proj k) = ListBlank.mk ((S' k).map some).reverse) ∧
TM1.stepAux (trStAct q o) v
((Tape.move Dir.right^[(S k).length]) (Tape.mk' ∅ (addBottom L))) =
TM1.stepAux q v' ((Tape.move Dir.right^[(S' k).length]) (Tape.mk' ∅ (addBottom L'))) := by
dsimp only; simp; cases o <;> simp only [stWrite, stVar, trStAct, TM1.stepAux]
case push f =>
have := Tape.write_move_right_n fun a : Γ' ↦ (a.1, update a.2 k (some (f v)))
dsimp only at this
refine'
⟨_, fun k' ↦ _, by
-- Porting note: `rw [...]` to `erw [...]; rfl`.
erw [Tape.move_right_n_head, List.length, Tape.mk'_nth_nat, this,
addBottom_modifyNth fun a ↦ update a k (some (f v)), Nat.add_one, iterate_succ']
rfl⟩
refine' ListBlank.ext fun i ↦ _
rw [ListBlank.nth_map, ListBlank.nth_modifyNth, proj, PointedMap.mk_val]
by_cases h' : k' = k
· subst k'
split_ifs with h
<;> simp only [List.reverse_cons, Function.update_same, ListBlank.nth_mk, List.map]
-- Porting note: `le_refl` is required.
· rw [List.getI_eq_get, List.get_append_right'] <;>
simp only [h, List.get_singleton, List.length_map, List.length_reverse, Nat.succ_pos',
List.length_append, lt_add_iff_pos_right, List.length, le_refl]
rw [← proj_map_nth, hL, ListBlank.nth_mk]
cases' lt_or_gt_of_ne h with h h
· rw [List.getI_append]
simpa only [List.length_map, List.length_reverse] using h
· rw [gt_iff_lt] at h
rw [List.getI_eq_default, List.getI_eq_default] <;>
simp only [Nat.add_one_le_iff, h, List.length, le_of_lt, List.length_reverse,
List.length_append, List.length_map]
· split_ifs <;> rw [Function.update_noteq h', ← proj_map_nth, hL]
rw [Function.update_noteq h']
case peek f =>
rw [Function.update_eq_self]
use L, hL; rw [Tape.move_left_right]; congr
cases e : S k; · rfl
rw [List.length_cons, iterate_succ', Function.comp, Tape.move_right_left,
Tape.move_right_n_head, Tape.mk'_nth_nat, addBottom_nth_snd, stk_nth_val _ (hL k), e,
List.reverse_cons, ← List.length_reverse, List.get?_concat_length]
rfl
case pop f =>
cases' e : S k with hd tl
· simp only [Tape.mk'_head, ListBlank.head_cons, Tape.move_left_mk', List.length,
Tape.write_mk', List.head?, iterate_zero_apply, List.tail_nil]
rw [← e, Function.update_eq_self]
exact ⟨L, hL, by rw [addBottom_head_fst, cond]⟩
· refine'
⟨_, fun k' ↦ _, by
erw [List.length_cons, Tape.move_right_n_head, Tape.mk'_nth_nat, addBottom_nth_succ_fst,
cond, iterate_succ', Function.comp, Tape.move_right_left, Tape.move_right_n_head,
Tape.mk'_nth_nat, Tape.write_move_right_n fun a : Γ' ↦ (a.1, update a.2 k none),
addBottom_modifyNth fun a ↦ update a k none, addBottom_nth_snd,
stk_nth_val _ (hL k), e,
show (List.cons hd tl).reverse.get? tl.length = some hd by
rw [List.reverse_cons, ← List.length_reverse, List.get?_concat_length],
List.head?, List.tail]⟩
refine' ListBlank.ext fun i ↦ _
rw [ListBlank.nth_map, ListBlank.nth_modifyNth, proj, PointedMap.mk_val]
by_cases h' : k' = k
· subst k'
split_ifs with h <;> simp only [Function.update_same, ListBlank.nth_mk, List.tail]
· rw [List.getI_eq_default]
· rfl
rw [h, List.length_reverse, List.length_map]
rw [← proj_map_nth, hL, ListBlank.nth_mk, e, List.map, List.reverse_cons]
cases' lt_or_gt_of_ne h with h h
· rw [List.getI_append]
simpa only [List.length_map, List.length_reverse] using h
· rw [gt_iff_lt] at h
rw [List.getI_eq_default, List.getI_eq_default] <;>
simp only [Nat.add_one_le_iff, h, List.length, le_of_lt, List.length_reverse,
List.length_append, List.length_map]
· split_ifs <;> rw [Function.update_noteq h', ← proj_map_nth, hL]
rw [Function.update_noteq h']
#align turing.TM2to1.tr_respects_aux₂ Turing.TM2to1.tr_respects_aux₂
variable (M : Λ → TM2.Stmt Γ Λ σ) -- Porting note: Unfolded `Stmt₂`.
/-- The TM2 emulator machine states written as a TM1 program.
This handles the `go` and `ret` states, which shuttle to and from a stack top. -/
def tr : Λ'₂₁ → Stmt₂₁
| normal q => trNormal (M q)
| go k s q =>
branch (fun a _ ↦ (a.2 k).isNone) (trStAct (goto fun _ _ ↦ ret q) s)
(move Dir.right <| goto fun _ _ ↦ go k s q)
| ret q => branch (fun a _ ↦ a.1) (trNormal q) (move Dir.left <| goto fun _ _ ↦ ret q)
#align turing.TM2to1.tr Turing.TM2to1.tr
-- Porting note: unknown attribute
-- attribute [local pp_using_anonymous_constructor] Turing.TM1.Cfg
/-- The relation between TM2 configurations and TM1 configurations of the TM2 emulator. -/
inductive TrCfg : Cfg₂ → Cfg₂₁ → Prop
| mk {q : Option Λ} {v : σ} {S : ∀ k, List (Γ k)} (L : ListBlank (∀ k, Option (Γ k))) :
(∀ k, L.map (proj k) = ListBlank.mk ((S k).map some).reverse) →
TrCfg ⟨q, v, S⟩ ⟨q.map normal, v, Tape.mk' ∅ (addBottom L)⟩
#align turing.TM2to1.tr_cfg Turing.TM2to1.TrCfg
theorem tr_respects_aux₁ {k} (o q v) {S : List (Γ k)} {L : ListBlank (∀ k, Option (Γ k))}
(hL : L.map (proj k) = ListBlank.mk (S.map some).reverse) (n) (H : n ≤ S.length) :
Reaches₀ (TM1.step (tr M)) ⟨some (go k o q), v, Tape.mk' ∅ (addBottom L)⟩
⟨some (go k o q), v, (Tape.move Dir.right^[n]) (Tape.mk' ∅ (addBottom L))⟩ := by
induction' n with n IH; · rfl
apply (IH (le_of_lt H)).tail
rw [iterate_succ_apply'];
simp only [TM1.step, TM1.stepAux, tr, Tape.mk'_nth_nat, Tape.move_right_n_head,
addBottom_nth_snd, Option.mem_def]
rw [stk_nth_val _ hL, List.get?_eq_get]; rfl; rwa [List.length_reverse]
#align turing.TM2to1.tr_respects_aux₁ Turing.TM2to1.tr_respects_aux₁
theorem tr_respects_aux₃ {q v} {L : ListBlank (∀ k, Option (Γ k))} (n) : Reaches₀ (TM1.step (tr M))
⟨some (ret q), v, (Tape.move Dir.right^[n]) (Tape.mk' ∅ (addBottom L))⟩
⟨some (ret q), v, Tape.mk' ∅ (addBottom L)⟩ := by
induction' n with n IH; · rfl
refine' Reaches₀.head _ IH
simp only [Option.mem_def, TM1.step]
rw [Option.some_inj, tr, TM1.stepAux, Tape.move_right_n_head, Tape.mk'_nth_nat,
addBottom_nth_succ_fst, TM1.stepAux, iterate_succ', Function.comp_apply, Tape.move_right_left]
rfl
#align turing.TM2to1.tr_respects_aux₃ Turing.TM2to1.tr_respects_aux₃
theorem tr_respects_aux {q v T k} {S : ∀ k, List (Γ k)}
(hT : ∀ k, ListBlank.map (proj k) T = ListBlank.mk ((S k).map some).reverse) (o : StAct₂ k)
(IH : ∀ {v : σ} {S : ∀ k : K, List (Γ k)} {T : ListBlank (∀ k, Option (Γ k))},
(∀ k, ListBlank.map (proj k) T = ListBlank.mk ((S k).map some).reverse) →
∃ b, TrCfg (TM2.stepAux q v S) b ∧
Reaches (TM1.step (tr M)) (TM1.stepAux (trNormal q) v (Tape.mk' ∅ (addBottom T))) b) :
∃ b, TrCfg (TM2.stepAux (stRun o q) v S) b ∧ Reaches (TM1.step (tr M))
(TM1.stepAux (trNormal (stRun o q)) v (Tape.mk' ∅ (addBottom T))) b := by
simp only [trNormal_run, step_run]
have hgo := tr_respects_aux₁ M o q v (hT k) _ le_rfl
obtain ⟨T', hT', hrun⟩ := tr_respects_aux₂ hT o
have := hgo.tail' rfl
rw [tr, TM1.stepAux, Tape.move_right_n_head, Tape.mk'_nth_nat, addBottom_nth_snd,
stk_nth_val _ (hT k), List.get?_len_le (le_of_eq (List.length_reverse _)), Option.isNone, cond,
hrun, TM1.stepAux] at this
obtain ⟨c, gc, rc⟩ := IH hT'
refine' ⟨c, gc, (this.to₀.trans (tr_respects_aux₃ M _) c (TransGen.head' rfl _)).to_reflTransGen⟩
rw [tr, TM1.stepAux, Tape.mk'_head, addBottom_head_fst]
exact rc
#align turing.TM2to1.tr_respects_aux Turing.TM2to1.tr_respects_aux
attribute [local simp] Respects TM2.step TM2.stepAux trNormal
theorem tr_respects : Respects (TM2.step M) (TM1.step (tr M)) TrCfg := by
-- Porting note: `simp only`s are required for beta reductions.
intro c₁ c₂ h
cases' h with l v S L hT
cases' l with l; · constructor
simp only [TM2.step, Respects, Option.map_some']
rsuffices ⟨b, c, r⟩ : ∃ b, _ ∧ Reaches (TM1.step (tr M)) _ _
· exact ⟨b, c, TransGen.head' rfl r⟩
simp only [tr]
-- Porting note: `refine'` failed because of implicit lambda, so `induction` is used.
generalize M l = N
induction N using stmtStRec generalizing v S L hT with
| H₁ k s q IH => exact tr_respects_aux M hT s @IH
| H₂ a _ IH => exact IH _ hT
| H₃ p q₁ q₂ IH₁ IH₂ =>
unfold TM2.stepAux trNormal TM1.stepAux
simp only []
cases p v <;> [exact IH₂ _ hT, exact IH₁ _ hT]
| H₄ => exact ⟨_, ⟨_, hT⟩, ReflTransGen.refl⟩
| H₅ => exact ⟨_, ⟨_, hT⟩, ReflTransGen.refl⟩
#align turing.TM2to1.tr_respects Turing.TM2to1.tr_respects
theorem trCfg_init (k) (L : List (Γ k)) : TrCfg (TM2.init k L) (TM1.init (trInit k L) : Cfg₂₁) := by
rw [(_ : TM1.init _ = _)]
· refine' ⟨ListBlank.mk (L.reverse.map fun a ↦ update default k (some a)), fun k' ↦ _⟩
simp only [TM2.Cfg.stk, TM2.init]
refine' ListBlank.ext fun i ↦ _
rw [ListBlank.map_mk, ListBlank.nth_mk, List.getI_eq_iget_get?, List.map_map]
have : ((proj k').f ∘ fun a => update (β := fun k => Option (Γ k)) default k (some a))
= fun a => (proj k').f (update (β := fun k => Option (Γ k)) default k (some a)) := rfl
rw [this, List.get?_map, proj, PointedMap.mk_val]
simp only []
by_cases h : k' = k
· subst k'
simp only [Function.update_same]
rw [ListBlank.nth_mk, List.getI_eq_iget_get?, ← List.map_reverse, List.get?_map]
· simp only [Function.update_noteq h]
rw [ListBlank.nth_mk, List.getI_eq_iget_get?, List.map, List.reverse_nil]
cases L.reverse.get? i <;> rfl
· rw [trInit, TM1.init]
dsimp only
congr <;> cases L.reverse <;> try rfl
simp only [List.map_map, List.tail_cons, List.map]
rfl
#align turing.TM2to1.tr_cfg_init Turing.TM2to1.trCfg_init
theorem tr_eval_dom (k) (L : List (Γ k)) :
(TM1.eval (tr M) (trInit k L)).Dom ↔ (TM2.eval M k L).Dom :=
Turing.tr_eval_dom (tr_respects M) (trCfg_init k L)
#align turing.TM2to1.tr_eval_dom Turing.TM2to1.tr_eval_dom
theorem tr_eval (k) (L : List (Γ k)) {L₁ L₂} (H₁ : L₁ ∈ TM1.eval (tr M) (trInit k L))
(H₂ : L₂ ∈ TM2.eval M k L) :
∃ (S : ∀ k, List (Γ k))(L' : ListBlank (∀ k, Option (Γ k))),
addBottom L' = L₁ ∧
(∀ k, L'.map (proj k) = ListBlank.mk ((S k).map some).reverse) ∧ S k = L₂ := by
obtain ⟨c₁, h₁, rfl⟩ := (Part.mem_map_iff _).1 H₁
obtain ⟨c₂, h₂, rfl⟩ := (Part.mem_map_iff _).1 H₂
obtain ⟨_, ⟨L', hT⟩, h₃⟩ := Turing.tr_eval (tr_respects M) (trCfg_init k L) h₂
cases Part.mem_unique h₁ h₃
exact ⟨_, L', by simp only [Tape.mk'_right₀], hT, rfl⟩
#align turing.TM2to1.tr_eval Turing.TM2to1.tr_eval
/-- The support of a set of TM2 states in the TM2 emulator. -/
noncomputable def trSupp (S : Finset Λ) : Finset Λ'₂₁ :=
S.bunionᵢ fun l ↦ insert (normal l) (trStmts₁ (M l))
#align turing.TM2to1.tr_supp Turing.TM2to1.trSupp
theorem tr_supports {S} (ss : TM2.Supports M S) : TM1.Supports (tr M) (trSupp M S) :=
⟨Finset.mem_bunionᵢ.2 ⟨_, ss.1, Finset.mem_insert.2 <| Or.inl rfl⟩, fun l' h ↦ by
suffices ∀ (q) (_ : TM2.SupportsStmt S q) (_ : ∀ x ∈ trStmts₁ q, x ∈ trSupp M S),
TM1.SupportsStmt (trSupp M S) (trNormal q) ∧
∀ l' ∈ trStmts₁ q, TM1.SupportsStmt (trSupp M S) (tr M l') by
rcases Finset.mem_bunionᵢ.1 h with ⟨l, lS, h⟩
have :=
this _ (ss.2 l lS) fun x hx ↦ Finset.mem_bunionᵢ.2 ⟨_, lS, Finset.mem_insert_of_mem hx⟩
rcases Finset.mem_insert.1 h with (rfl | h) <;> [exact this.1, exact this.2 _ h]
clear h l'
refine' stmtStRec _ _ _ _ _
· intro _ s _ IH ss' sub -- stack op
rw [TM2to1.supports_run] at ss'
simp only [TM2to1.trStmts₁_run, Finset.mem_union, Finset.mem_insert, Finset.mem_singleton]
at sub
have hgo := sub _ (Or.inl <| Or.inl rfl)
have hret := sub _ (Or.inl <| Or.inr rfl)
cases' IH ss' fun x hx ↦ sub x <| Or.inr hx with IH₁ IH₂
refine' ⟨by simp only [trNormal_run, TM1.SupportsStmt]; intros; exact hgo, fun l h ↦ _⟩
rw [trStmts₁_run] at h
simp only [TM2to1.trStmts₁_run, Finset.mem_union, Finset.mem_insert, Finset.mem_singleton]
at h
rcases h with (⟨rfl | rfl⟩ | h)
· cases s
· exact ⟨fun _ _ ↦ hret, fun _ _ ↦ hgo⟩
· exact ⟨fun _ _ ↦ hret, fun _ _ ↦ hgo⟩
· exact ⟨⟨fun _ _ ↦ hret, fun _ _ ↦ hret⟩, fun _ _ ↦ hgo⟩
· unfold TM1.SupportsStmt TM2to1.tr
exact ⟨IH₁, fun _ _ ↦ hret⟩
· exact IH₂ _ h
· intro _ _ IH ss' sub -- load
unfold TM2to1.trStmts₁ at ss' sub⊢
exact IH ss' sub
· intro _ _ _ IH₁ IH₂ ss' sub -- branch
unfold TM2to1.trStmts₁ at sub
cases' IH₁ ss'.1 fun x hx ↦ sub x <| Finset.mem_union_left _ hx with IH₁₁ IH₁₂
cases' IH₂ ss'.2 fun x hx ↦ sub x <| Finset.mem_union_right _ hx with IH₂₁ IH₂₂
refine' ⟨⟨IH₁₁, IH₂₁⟩, fun l h ↦ _⟩
rw [trStmts₁] at h
rcases Finset.mem_union.1 h with (h | h) <;> [exact IH₁₂ _ h, exact IH₂₂ _ h]
· intro _ ss' _ -- goto
simp only [trStmts₁, Finset.not_mem_empty]; refine' ⟨_, fun _ ↦ False.elim⟩
exact fun _ v ↦ Finset.mem_bunionᵢ.2 ⟨_, ss' v, Finset.mem_insert_self _ _⟩
· intro _ _ -- halt
simp only [trStmts₁, Finset.not_mem_empty]
exact ⟨trivial, fun _ ↦ False.elim⟩⟩
#align turing.TM2to1.tr_supports Turing.TM2to1.tr_supports
end
end TM2to1
end Turing
|
module Data.Universe
%default total
public export
interface DecEq t => Universe t where
typeOf : t -> Type
show : (b : t) -> Show (typeOf b)
eq : (b : t) -> Eq (typeOf b)
|
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#pragma once
#include "Data\Pose.h"
#include "Data\Data.h"
#include <gsl\gsl>
namespace mage
{
/**
* Converts the contents of a CVMat into the mage::Matrix data container by copying the values
*/
Matrix ToMageMat(const cv::Matx34f& cvMat);
Matrix ToMageMat(const cv::Matx44f& cvMat);
Position ToMagePos(const cv::Point3f& cvPt);
Direction ToMageDir(const cv::Point3f& cvDir);
cv::Vec3f FromMageDir(const Direction& mageDir);
Matrix ToMageMat(const std::array<float, 4 * 4>& f);
Direction ToMageDir(const std::array<float, 3>& dir);
mage::Pose MageMatrixToMagePose(const mage::Matrix& viewMatrix);
void ConvertBGRToNV12(const cv::Mat& srcImg, gsl::span<uint8_t> destBuffer);
bool IsIdentity(const Matrix& m);
inline mage::Matrix CreateIdentityMageMatrix()
{
return{ 1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f };
};
// Interpolates values between min and max to the range defined from a to b,
// [a:b] can be either an increasing or decreasing range, min is always mapped to a, and max to b.
unsigned char LinearInterpolationToChar(float value, float min, float max, unsigned char a, unsigned char b);
// Construct external normal (z positive) from the X,Y components of the internal normal (z negative).
// Synthetic normals are facing the camera (external),
// generated normals are not (internal), this function is used to make the conversion,
// by changing the sign of the coordinates.
cv::Vec3f ConstructExternalNormalFromXY(float normalX, float normalY);
}
|
-- Andreas, 2019-10-21, issue #4148, reported by omelkonian
{-# OPTIONS -v impossible:100 #-}
postulate
A : Set
module M (I : Set) where
postulate
P : I → Set
record R (i : I) : Set where
constructor mk
field
f : P i
open module N = M A
data D : ∀ {i} → R i → Set where
c : ∀ {i} {t : P i} → D (mk t)
test : ∀ {i} {t : R i} → D t → Set₁
test c = Set
-- WAS: internal error in a sanity check in etaExpandRecord'_
-- Should succeed
|
! { dg-do compile }
! Tests patch for problem that was found whilst investigating
! PR24158. The call to foo would cause an ICE because the
! actual argument was of a type that was not defined. The USE
! GLOBAL was commented out, following the fix for PR29364.
!
! Contributed by Paul Thomas <[email protected]>
!
module global
type :: t2
type(t3), pointer :: d ! { dg-error "has not been declared" }
end type t2
end module global
program snafu
! use global
implicit type (t3) (z)
call foo (zin) ! { dg-error "defined|Type mismatch" }
contains
subroutine foo (z)
type :: t3
integer :: i
end type t3
type(t3) :: z
z%i = 1
end subroutine foo
end program snafu
! { dg-final { cleanup-modules "global" } }
|
The left distributive law for scalar multiplication holds. |
#ifndef TOPICH
#define TOPICH
#include <gsl/gsl_vector.h>
#include <math.h>
#include "utils.h"
#include "typedefs.h"
#include "hyperparameter.h"
#define MH_ETA_STDEV 0.005
#define MH_GAM_STDEV 0.005
/* === topic methods === */
/*
* update the count of a word in a topic
*
*/
void topic_update_word(topic* t, int w, double update);
/*
* update the document count in a topic
*
*/
void topic_update_doc_cnt(topic* t, double update);
/*
* write log probability in a file
*
*/
void topic_write_log_prob(topic* t, FILE* f);
/* === tree methods === */
/*
* allocate a new tree with a certain depth
*
*/
tree* tree_new(int depth,
int nwords,
gsl_vector* eta,
gsl_vector* gam,
double scaling_shape,
double scaling_scale);
/*
* add children to a node down to the depth of the tree;
* return the leaf of that path
*
*/
topic* tree_fill(topic* t);
/*
* add a child to a topic
*
*/
topic* topic_add_child(topic* t);
/*
* make a new topic
*
*/
topic* topic_new(int nwords, int level, topic* parent, tree* tr);
/*
* given a leaf with 0 instances, delete the leaf and all ancestors
* which also have 0 instances. (!!! use asserts here)
*
*/
void tree_prune(topic* t);
/*
* delete a node from the tree
*
*/
void delete_node(topic* t);
/*
* write a tree to file
*
*/
void tree_write_log_prob(tree* tree, FILE* f);
/*
* sample a document path from a tree
*
*/
void populate_prob_dfs(topic* node, doc* d, double* logsum, double* pprob, int root_level);
void tree_sample_doc_path(tree* tr, doc* d, short do_remove, int root_level);
/*
* sample a new path in the tree for a document
*
*/
void tree_sample_path_for_doc(tree* t, doc* d);
/*
* update the tree from an entire document
*
*/
void tree_update_from_doc(doc* d, double update, int root_level);
/*
* sample a leaf from the tree with populated probabilties
*
*/
topic* tree_sample_path(topic* node, double logsum);
topic* tree_sample_dfs(double r, topic* node, double* sum, double logsum);
void tree_add_doc_to_path(topic* node, doc* d, int root_level);
void tree_remove_doc_from_path(tree* tr, doc* d, int root_level);
/*
* write a tree to a file
*
*/
void write_tree(tree* tf, FILE* file);
void write_tree_levels(tree* tr, FILE* file);
void write_tree_level_dfs(topic* t, FILE* f);
void write_tree_topics_dfs(topic* t, FILE* f);
/*
* scores
*
*/
double gamma_score(topic* t);
double gamma_score_PY(topic* t, double gam_add);
double eta_score(topic* t);
double log_gamma_ratio(doc* d, topic* t, int level);
double log_gamma_ratio_new(doc* d, int level, double eta, int nterms);
void tree_mh_update_eta(tree* tr);
void dfs_sample_scaling(topic* t);
/*
* copying a tree
*
*/
void copy_topic(const topic* src, topic* dest);
void copy_tree_dfs(const topic* src, topic* dest);
tree * copy_tree(const tree* tr);
void free_tree(tree * tr);
void free_tree_dfs(topic * t);
int ntopics_in_tree(tree * tr);
int ntopics_in_tree_dfs(topic * t);
#endif
|
Formal statement is: lemma pseudo_mod_impl[code]: "pseudo_mod f g = poly_of_list (pseudo_mod_list (coeffs f) (coeffs g))" Informal statement is: The pseudo remainder of two polynomials is the polynomial whose coefficients are the pseudo remainder of the coefficients of the two polynomials. |
In the Sermon on the Mount , Jesus recalls the commandment , " You shall not kill " and then adds to it the proscriptions against anger , hatred and vengeance . Going further , Christ asks his disciples to love their enemies . The Catechism asserts that " it is legitimate to insist on respect for one 's own right to life . " Kreeft says , " self @-@ defense is legitimate for the same reason suicide is not : because one 's own life is a gift from God , a treasure we are responsible for preserving and defending . " The Catechism teaches that " someone who defends his life is not guilty of murder even if he is forced to deal his aggressor a lethal blow . " Legitimate defense can be not only a right but a grave duty for one who is responsible for the lives of others . The defense of the common good requires that an unjust aggressor be rendered unable to cause harm . For this reason , those who legitimately hold authority also have the right to use arms to repel aggressors against the civil community entrusted to their responsibility .
|
\lab{Profiling and Optimizing Python Code}{Profiling}
\objective{Identify which portions of the code are most time consuming using a
profiler. Optimize Python code using good coding practices and just-in-time compilation with Numba.}
\label{lab:ProfilingCode}
The best code goes through multiple drafts.
In a first draft, you should focus on writing code that does what it is supposed
to and is easy to read. After writing a first draft, you may find that your code
does not run as quickly as you need it to. Then it is time to \emph{optimize}
the most time consuming parts of your code so that they run as quickly as possible.
In this lab we will optimize the function \li{qr1()} that computes the QR
decomposition of a matrix via the modified Gram-Schmidt algorithm
(see Lab \ref{lab:QRdecomp}).
\begin{lstlisting}
import numpy as np
from scipy import linalg as la
def qr1(A):
ncols = A.shape[1]
Q = A.copy()
R = np.zeros((ncols, ncols))
for i in range(ncols):
R[i, i] = la.norm(Q[:, i])
Q[:, i] = Q[:, i]/la.norm(Q[:, i])
for j in range(i+1, ncols):
R[i, j] = Q[:, j].dot(Q[:, i])
Q[:,j] = Q[:,j]-Q[:, j].dot(Q[:, i])*Q[:,i]
return Q, R
\end{lstlisting}
\section*{What to Optimize}
Python provides a \emph{profiler} that can identify where code spends most of
its runtime. The output of the profiler will tell you where to begin your
optimization efforts.
In IPython\footnote{If you are not using IPython, you will need to use the
\li{cProfile} module documented here: \url{https://docs.python.org/2/library/profile.html}.},
you can profile a function with \li{\%prun}.
Here we profile \li{qr1()} on a random $300 \times 300$ array.
\begin{lstlisting}
In [1]: A = np.random.rand(300, 300)
In [2]: %prun qr1(A)
\end{lstlisting}
On the author's computer, we get the following output.
{\scriptsize
\begin{verbatim}
97206 function calls in 1.343 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.998 0.998 1.342 1.342 profiling_hw.py:4(qr1)
89700 0.319 0.000 0.319 0.000 {method 'dot' of 'numpy.ndarray' objects}
600 0.006 0.000 0.012 0.000 function_base.py:526(asarray_chkfinite)
600 0.006 0.000 0.009 0.000 linalg.py:1840(norm)
1200 0.005 0.000 0.005 0.000 {method 'any' of 'numpy.ndarray' objects}
600 0.002 0.000 0.002 0.000 {method 'reduce' of 'numpy.ufunc' objects}
1200 0.001 0.000 0.001 0.000 {numpy.core.multiarray.array}
1200 0.001 0.000 0.002 0.000 numeric.py:167(asarray)
1 0.001 0.001 0.001 0.001 {method 'copy' of 'numpy.ndarray' objects}
600 0.001 0.000 0.022 0.000 misc.py:7(norm)
301 0.001 0.000 0.001 0.000 {range}
1 0.001 0.001 0.001 0.001 {numpy.core.multiarray.zeros}
600 0.001 0.000 0.001 0.000 {method 'ravel' of 'numpy.ndarray' objects}
600 0.000 0.000 0.000 0.000 {method 'conj' of 'numpy.ndarray' objects}
1 0.000 0.000 1.343 1.343 <string>:1(<module>)
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
\end{verbatim}
}
The first line of the output tells us that executing \li{qr1(A)} results in
almost 100,000 function calls. Then we see a table listing these functions along
with data telling us how much time each takes. Here, \li{ncalls} is the number
of calls to the function, \li{tottime} is the total time spent in the function,
and \li{cumtime} is the amount of time spent in the function including calls
to other functions.
For example, the first line of the table is the function \li{qr1(A)} itself.
This function was called once, it took 1.342s to run, and 0.344s of that was
spent in calls to other functions. Of that 0.344s, there were 0.319s spent on
89,700 calls to \li{np.dot()}.
With this output, we see that most time is spent in multiplying matrices.
Since we cannot write a faster method to do this multiplication, we may want to
try to reduce the number of matrix multiplications we perform.
\section*{How to Optimize}
Once you have identified those parts of your code that take the most time,
how do you make them run faster?
Here are some of the techniques we will address in this lab:
\begin{itemize}
\item Avoid recomputing values
\item Avoid nested loops
\item Use existing functions instead of writing your own
\item Use generators when possible
\item Avoid excessive function calls
\item Write Pythonic code
\item Compiling Using Numba
\item Use a more efficient algorithm
\end{itemize}
You should always use the profiling and timing functions to help you decide
when an optimization is actually useful.
\begin{problem}
In this lab, we will perform many comparisons between the runtimes of various
functions. To help with these comparisions, implement the following function:
\begin{lstlisting}
def compare_timings(f, g, *args):
"""Compares the timings of 'f' and 'g' with arguments '*args'.
Inputs:
f (callable): first function to compare.
g (callable): second function to compare.
*args (any type): arguments to use when callings functions
'f' and 'g'
Returns:
comparison (string): The comparison of the runtimes of functions
'f' and 'g' in the following format :
Timing for <f>: <time>
Timing for <g>: <time>
where the values inside <> vary depending on the inputs)
\end{lstlisting}
Hint: You can gain access to the name of many functions by using its
\li{func_name} method. However, this method does not exist for all functions
we will be interested in timing. Therefore, even though it is not as clean, use
\li{str(f)} to print a string representation of f.
\end{problem}
\subsection*{Avoid Recomputing Values}
In our function \li{qr1()}, we can avoid recomputing \li{R[i,i]} in the outer
loop and \li{R[i,j]} in the inner loop.
The rewritten function is as follows:
\begin{lstlisting}
def qr2(A):
ncols = A.shape[1]
Q = A.copy()
R = np.zeros((ncols, ncols))
for i in range(ncols):
R[i, i] = la.norm(Q[:, i])
Q[:, i] = Q[:, i]/R[i, i] # this line changed
for j in range(i+1, ncols):
R[i, j] = Q[:, j].dot(Q[:, i])
Q[:,j] = Q[:,j]-R[i, j]*Q[:,i] # this line changed
return Q, R
\end{lstlisting}
Profiling \li{qr2()} on a $300 \times 300$ matrix produces the following output.
{\scriptsize
\begin{verbatim}
48756 function calls in 1.047 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.863 0.863 1.047 1.047 profiling_hw.py:16(qr2)
44850 0.171 0.000 0.171 0.000 {method 'dot' of 'numpy.ndarray' objects}
300 0.003 0.000 0.006 0.000 function_base.py:526(asarray_chkfinite)
300 0.003 0.000 0.005 0.000 linalg.py:1840(norm)
600 0.002 0.000 0.002 0.000 {method 'any' of 'numpy.ndarray' objects}
300 0.001 0.000 0.001 0.000 {method 'reduce' of 'numpy.ufunc' objects}
301 0.001 0.000 0.001 0.000 {range}
600 0.001 0.000 0.001 0.000 {numpy.core.multiarray.array}
600 0.001 0.000 0.001 0.000 numeric.py:167(asarray)
300 0.000 0.000 0.012 0.000 misc.py:7(norm)
1 0.000 0.000 0.000 0.000 {method 'copy' of 'numpy.ndarray' objects}
300 0.000 0.000 0.000 0.000 {method 'ravel' of 'numpy.ndarray' objects}
1 0.000 0.000 1.047 1.047 <string>:1(<module>)
300 0.000 0.000 0.000 0.000 {method 'conj' of 'numpy.ndarray' objects}
1 0.000 0.000 0.000 0.000 {numpy.core.multiarray.zeros}
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
\end{verbatim}
}
Our optimization reduced almost every kind of function call by half, and reduced the total run time by 0.295s.
Some less obvious ways to eliminate excess computations include moving computations out of loops, not copying large data structures, and simplifying mathematical expressions.
\subsection*{Avoid Nested Loops}
For many algorithms, the temporal complexity of an algorithm is determined by its loops. Nested loops quickly increase the temporal complexity.
The best way to avoid nested loops is to use NumPy array operations instead of iterating through arrays.
If you must use nested loops, focus your optimization efforts on the innermost loop, which gets called the most times.
\begin{problem}
The code below is an inefficient implementation of the LU
algorithm. Write a function \li{LU_opt()} that is an optimized
version of \li{LU()}. Look for ways to avoid recomputing values
and avoid nested loops by using array slicing instead.
Print a comparison of the timing of the
original function and your optimized function using your
\li{compare_timings} function.
\begin{lstlisting}
def LU(A):
"""Returns the LU decomposition of a square matrix."""
n = A.shape[0]
U = np.array(np.copy(A), dtype=float)
L = np.eye(n)
for i in range(1,n):
for j in range(i):
L[i,j] = U[i,j]/U[j,j]
for k in range(j,n):
U[i,k] -= L[i,j] * U[j,k]
return L,U
\end{lstlisting}
\end{problem}
\subsection*{Use Existing Functions Instead of Writing Your Own}
If there is an intuitive operation you would like to perform on an array, chances are that NumPy or another library already has a function that does it.
Python and NumPy functions have already been optimized, and are usually many times faster than the equivalent you might write.
We saw an example of this in Lab \ref{lab:NumPyArrays} where we compared NumPy array multiplication with our own matrix multiplication implemented in Python.
\begin{problem} \label{prob:add}
Without using any builtin functions, implement that following function:
\begin{lstlisting}
def mysum(X):
""" Return the sum of the elements of X.
Inputs:
X (array) - a 1-D array
"""
\end{lstlisting}
Perform a comparision of \li{mysum} to Python's builtin \li{<<sum>>} function and
NumPy's \li{<<np.sum>>} using your \li{compare_timings} function.
\end{problem}
\subsection*{Use Generators When Possible}
When you are iterating through a list, you can often replace the list with a \emph{generator}.
Instead of storing the entire list in memory, a generator computes each item as it is needed.
For example, the code
\begin{lstlisting}
>>> for i in range(100):
... print i
\end{lstlisting}
stores the numbers 0 to 99 in memory, looks up each one in turn, and prints it.
On the other hand, the code
\begin{lstlisting}
>>> for i in xrange(100):
... print i
\end{lstlisting}
uses a generator instead of a list.
This code computes the first number in the specified range (which is 0), and prints it.
Then it computes the next number (which is 1) and prints that.
In our example, replacing each \li{range} with \li{xrange} does not speed up \li{qr2()} by a noticeable amount.
Though the example below is contrived, it demonstrates the benefits of using generators.
\begin{lstlisting}
# both these functions will return the first iterate of a loop of length 10^8.
def list_iter():
for i in range(10**8):
return i
def generator_iter():
for i in xrange(10**8):
return i
>>> compare_timings(list_iter,generator_iter)
Timing for <function list_iter at 0x7f3deb5a4488>: 1.93316888809
Timing for <function generator_iter at 0x7f3deb5a4500>: 1.19209289551e-05
\end{lstlisting}
It is also possible to write your own generators.
Say we have a function that returns an array. And say we want to iterate through this array later in our code. In situations like these, it is valuable to consider turning your function into a generator instead of returning the whole list. The benefits of this approach mirror the benefits of using \li{xrange} instead of {range}. The only thing that needs to be adjusted is to change the \li{return} statement to a \li{yield} statement. Here is a quick example:
\begin{lstlisting}
def return_squares(n):
squares = []
for i in xrange(1,n+1):
squares.append(i**2)
return squares
def yield_squares(n):
for i in xrange(1,n+1):
yield i**2
\end{lstlisting}
When yield is called, the single value is returned and all the local variables for the function are stored away until the next iteration. To iterate step-by-step through a generator, use the generator's \li{<<next>>} method.
\begin{lstlisting}
>>> squares = yield_squares(3)
>>> squarees.next()
1
>>> squares.next()
4
>>> squares.next()
9
\end{lstlisting}
We can also easily iterate through a generator using a for loop.
\begin{lstlisting}
>>> for s in squares:
... print s,
...
1 4 9
\end{lstlisting}
\begin{problem}
Write a generator that yields the first \li{n} Fibonacci numbers.
\end{problem}
If you are interested in learning more about writing your own generators, see \url{https://docs.python.org/2/tutorial/classes.html#generators} and \url{https://wiki.python.org/moin/Generators}.
\subsection*{Avoid Excessive Function Calls}
Function calls take time.
Moreover, looking up methods associated with objects takes time.
Removing ``dots'' can significantly speed up execution time.
For example, we could rewrite our function to reduce the number of times we need to look up the function \li{la.norm()}.
\begin{lstlisting}
def qr2(A):
norm = la.norm # this reduces the number of function look ups.
ncols = A.shape[1]
Q = A.copy()
R = np.zeros((ncols, ncols))
for i in range(ncols):
R[i, i] = norm(Q[:, i])
Q[:, i] = Q[:, i]/R[i, i]
for j in range(i+1, ncols):
R[i, j] = Q[:, j].dot(Q[:, i])
Q[:,j] = Q[:,j]-R[i, j]*Q[:,i]
return Q, R
\end{lstlisting}
Once again, an analysis with \li{\%prun} reveals that this optimization does not help significantly in this case.
\subsection*{Write Pythonic Code}
Several special features of Python allow you to write fast code easily.
First, list comprehensions are much faster than for loops. These are particularly useful when building lists inside a loop.
For example, replace
\begin{lstlisting}
>>> mylist = []
>>> for i in xrange(100):
... mylist.append(math.sqrt(i))
\end{lstlisting}
with
\begin{lstlisting}
>>> mylist = [math.sqrt(i) for i in xrange(100)]
\end{lstlisting}
We can accomplish the same thing using the \li{map()} function, which is even faster.
\begin{lstlisting}
>>> mylist = map(math.sqrt, xrange(100))
\end{lstlisting}
The analog of a list comprehension also exists for generators, dictionaries, and sets.
Second, swap values with a single assignment.
\begin{lstlisting}
>>> a, b = 1, 2
>>> a, b = b, a
>>> print a, b
2 1
\end{lstlisting}
Third, many non-Boolean objects in Python have truth values.
For example, numbers are \li{False} when equal to zero and \li{True} otherwise.
Similarly, lists and strings are \li{False} when they are empty and \li{True} otherwise.
So when \li{a} is a number, instead of
\begin{lstlisting}
>>> if a != 0:
\end{lstlisting}
use
\begin{lstlisting}
>>> if a:
\end{lstlisting}
Lastly, it is more efficient to iterate through lists by iterating over the elements instead of iterating over the indices.
\begin{lstlisting}
# Bad
for i in xrange(len(my_list)):
print my_list[i],
# Good
for x in my_list:
print x,
\end{lstlisting}
However, there are situations where you will need to know the indices of the elements over which you are iterating. In these situations, use \li{enumerate}.
\begin{problem}
Using \li{\%prun}, find out which portions of the code below require the most runtime. Then, rewrite the function using some of the optimization techniques we have discussed thus far.
\begin{lstlisting}
def foo(n):
my_list = []
for i in range(n):
num = np.random.randint(-9,9)
my_list.append(num)
evens = 0
for j in range(n):
if j%2 == 0:
evens += my_list[j]
return my_list, evens
\end{lstlisting}
Hint: If you are unsure where to begin optimizing, walk through the code line by line to determine what the code is accomplishing. Then, write your own function to perform the same task in a more efficient way using the optimization techniques we have discuessed.
\end{problem}
\subsection*{Compiling Using Numba}
Though it is much easier to write simple, readable code in Python, it is also much slower than compiled languages such as C. Compiled languages, in general, are much faster.
Numba is a tool that you can use to optimize your code. Numba uses \emph{just-in-time} (JIT) compilation.
This means that the code is compiled right before it is executed. We will discuss this process a bit later in this section.
The API for using Numba is incredibly simple. All one has to do is import Numba and add the \li{@jit} function decorator to your function. The following code would be a Numba equivalent to Problem \ref{prob:add}.
\begin{lstlisting}
from numba import jit
@jit
def numba_sum(A):
total = 0
for x in A:
total += x
return total
\end{lstlisting}
Though this code looks very simple, a lot is going on behind the scenes. Without getting into too many details, one of the reasons compiled languages like C are so much faster than Python is because they have explicitly defined datatypes. The main strategy used by Numba is to speed up the Python code by assigning datatypes to all the variables. Rather than requiring us to define the datatypes explicitly as we would need to in any compiled language, Numba attempts to \emph{infer} the correct datatypes based on the datatypes of the input.
In the code above, for example, say that our array \li{A} was an array of integers. Though we have not explicitly defined a datatype for the variable \li{total}, Numba will infer that the datatype for total should also be an integer.
Once all the datatypes have been inferred and assigned, the code is translated to machine code by the LLVM library. Numba will then cache this compiled version of our code. This means that we can bypass this whole inference and compilation process the next time we run our function.
\subsubsection*{More Control Within Numba}
Though the inference engine within Numba does a good job, it's not always perfect. There are times that Numba is unable to infer all the datatypes correctly.
If you add the keyword argument, \li{nopython=True} to the \li{jit} decorator, an error will be raised if Numba was unable to convert everything to explicit datatypes.
If your function is running slower than you would expect, you can find out what is going on under the hood by calling the \li{inspect_types()} method of the function. Using this, you can see if all the datatypes are being assigned as you would expect.
\begin{lstlisting}
# Due to the length of the output, we will leave it out of the lab text.
>>> numba_sum.inspect_types()
\end{lstlisting}
If you would like to have more control, you may specify datatypes explicity as demonstrated in the code below.
In this example, we will assume that the input will be doubles. Note that is necessary to import the desired datatype from the Numba module.
\begin{lstlisting}
from numba import double
# The values inside 'dict' will be specific to your function.
@jit(nopython=True, locals=dict(A=double[:], total=double))
def numba_sum(A):
total = 0
for i in xrange(len(A)):
total += A[i]
return total
\end{lstlisting}
Notice that the jit function decorator is the only thing that changed. Note also that this means that we will not be allowed to pass an array of integers to this function. If we had not specified datatypes, the inference engine would allow us to pass arrays of any numerical datatype. In the case that our function sees a datatype that it has not seen before, the inference and compilation process would have to be repeated. As before, the new version will also be cached.
\begin{problem}
% TODO Change this problem to something more compelling.
The code below defines a Python function which takes a matrix to the $n$th power.
\begin{lstlisting}
def pymatpow(X, power):
""" Return X^{power}.
Inputs:
X (array) - A square 2-D NumPy array
power (int) - The power to which we are taking the matrix X.
Returns:
prod (array) - X^{power}
"""
prod = X.copy()
temparr = np.empty_like(X[0])
size = X.shape[0]
for n in xrange(1, power):
for i in xrange(size):
for j in xrange(size):
tot = 0.
for k in xrange(size):
tot += prod[i,k] * X[k,j]
temparr[j] = tot
prod[i] = temparr
return prod
\end{lstlisting}
\begin{enumerate}
\item Create a function \li{numba_matpow} that is the compiled version of \li{pymatpow} using Numba.
\item Write a function \li{numpy_matpow} that performs the same task as \li{pymatpow} but uses \li{np.dot()}. Compile this function using Numba.
\item Compare the speed of \li{pymatpow}, \li{numba_matpow} and the \li{numpy_matpow} function. Remember to time \li{numba_matpow} and \li{numpy_matpow} on the second pass so the compilation process is not part of your timing. Perform your comparisons using your \li{compare_timings} function.
\end{enumerate}
NumPy takes products of matrices by calling BLAS and LAPACK, which are heavily optimized linear algebra libraries written in C, assembly, and Fortran.
\end{problem}
\begin{warn}
NumPy's array methods are often faster than a Numba equivalent you could code yourself.
If you are unsure which method is fastest, time them.
\end{warn}
\subsection*{Use a More Efficient Algorithm}
The optimizations discussed thus far will speed up your code at most by a constant.
They will not change the complexity of your code.
In order to reduce the complexity (say from $O(n^2)$ to $O(n \log(n))$), you typically need to change your algorithm.
We will address the benefits of using more efficient algorithms in Problem \ref{prob:tridiag}.
A good algorithm written with a slow language (like Python) is faster than a bad algorithm written in a fast language (like C).
Hence, focus on writing fast algorithms with good Python code, and only Numba when and where it is necessary. In other words, Numba will not always save you from a poor algorithm design.
\begin{comment}
\begin{problem}
Optimize the following function using techniques described in this lab:
\begin{lstlisting}
# TODO: COME UP WITH SOME ALGORITHM TO DO HERE!!
\end{lstlisting}
It should also include a list of changes, the reasoning behind the changes, and the effect of the changes on runtime. On the author's computer, computing the LU-decomposition on a 1000x1000 matrix took over 2 and a half minutes. The optimized version took a little over a second.
Hint: The best way to approach this problem is to analyze what each piece of code is actually doing. Then, determine if there is a more efficient way to accomplish the same task. Specifically, look for ways to use array operations instead of for loops, ways to replace blocks of code with built-in Python functions, and ways to avoid recomputing values.
\end{problem}
\end{comment}
The correct choice of algorithm is more important than a fast implementation.
For example, suppose you wish to solve the following tridiagonal system.
\[\begin{bmatrix}
b_1 & c_1 & 0 & 0 & \cdots & \cdots & 0 \\
a_2 & b_2 & c_2 & 0 & \cdots & \cdots & 0 \\
0 & a_3 & b_3 & c_3 & \cdots & \cdots & 0 \\
\vdots & \vdots & \vdots & \vdots & \ddots & \ddots & \vdots \\
\vdots & \vdots & \vdots & \vdots & \ddots & \ddots & c_{n-1} \\
0 & 0 & 0 & 0 & \cdots & a_n & b_n
\end{bmatrix}
\begin{bmatrix}
x_1\\
x_2\\
x_3\\
\vdots\\
\vdots\\
x_n
\end{bmatrix}
=
\begin{bmatrix}
d_1\\
d_2\\
d_3\\
\vdots\\
\vdots\\
d_n
\end{bmatrix}\]
One way to do this is with the general \li{solve} method in SciPy's \li{linalg} module.
Alternatively, you could use an algorithm optimized for tridiagonal matrices.
The code below implements one such algorithm in Python. This is called the Thomas algorithm.
\begin{lstlisting}
def pytridiag(a,b,c,d):
"""Solve the tridiagonal system Ax = d where A has diagonals a, b, and c.
Inputs:
a, b, c, d (array) - All 1-D NumPy arrays of equal length.
Returns:
x (array) - solution to the tridiagonal system.
"""
n = len(a)
# Make copies so the original arrays remain unchanged
aa = np.copy(a)
bb = np.copy(b)
cc = np.copy(c)
dd = np.copy(d)
# Forward sweep
for i in xrange(1, n):
temp = aa[i]/bb[i-1]
bb[i] = bb[i] - temp*cc[i-1]
dd[i] = dd[i] - temp*dd[i-1]
# Back substitution
x = np.zeros_like(a)
x[-1] = dd[-1]/bb[-1]
for i in xrange(n-2, -1, -1):
x[i] = (dd[i]-cc[i]*x[i+1])/bb[i]
return x
\end{lstlisting}
\begin{problem} \label{prob:tridiag}
\leavevmode
\begin{enumerate}
\item Write a function \li{numba_tridiag} that is a compiled version of \li{pytridiag}.
\item Compare the speed of your new function with \li{pytridiag} and \li{scipy.linalg.solve}.
When comparing \li{numba_tridiag} and \li{pytridiag}, use a $1000000 \times 1000000$ sized systems.
When comparing \li{numba_tridiag} and the SciPy algorithm, use a $1000 \times 1000$ systems. You may use the code below to generate the arrays, \li{a}, \li{b}, and \li{c}, along with the corresponding tridiagonal matrix \li{A}.
\end{enumerate}
\begin{lstlisting}
def init_tridiag(n):
"""Initializes a random nxn tridiagonal matrix A.
Inputs:
n (int) : size of array
Returns:
a (1-D array) : (-1)-th diagonal of A
b (1-D array) : main diagonal of A
c (1-D array) : (1)-th diagonal of A
A (2-D array) : nxn tridiagonal matrix defined by a,b,c.
"""
a = np.random.random_integers(-9,9,n).astype("float")
b = np.random.random_integers(-9,9,n).astype("float")
c = np.random.random_integers(-9,9,n).astype("float")
# Check for and change 0 values in arrays
a[a==0] = 1
b[b==0] = 1
c[c==0] = 1
# Create tridiagonal matrix A from a,b,c
A = np.zeros((b.size,b.size))
np.fill_diagonal(A,b)
np.fill_diagonal(A[1:,:-1],a[1:])
np.fill_diagonal(A[:-1,1:],c)
return a,b,c,A
\end{lstlisting}
Note that an efficient tridiagonal matrix solver is implemented by \li{scipy.sparse.linalg.spsolve()}.
\end{problem}
\section*{When to Stop Optimizing}
You don't need to apply every possible optimization to your code.
When your code runs acceptably fast, stop optimizing. There is no need spending valuable time making optimizations once the speed is sufficient.
Moreover, remember not to prematurely optimize your functions. Make sure the function does exactly what you want it to before worrying about any kind of optimization.
% TODO: get these lab references correct and replace.
\begin{problem}
Optimize a function you wrote in a previous lab using the techniques discussed in this lab. Consider optimizing one of the following:
\begin{enumerate}
\item Householder triangularization or Hessenburg decomposition % (Lab \ref{lab:QRdecomp}).
\item Givens triangularization % (Lab \ref{lab:qr-applications})
\item Image Segmentation % (Lab \ref{lab:ImgSeg_eigenvalues})
\item Eigenvalue Solvers % (Lab \ref{lab:EigSolve})
\end{enumerate}
Compare the timings of the function before and after optimization using your \li{compare_timings} function. Write a short paragraph describing what you did to optimize your function.
\end{problem}
|
using IndexFunArrays, ROIViews
# a random position within the size limit sz
randpos(sz, dims) = Tuple((d <= length(sz)) ? rand(1:sz[d]) : 1 for d in 1:dims)
@testset "ROIView" begin
for d in 1:5
sz = Tuple(9 .+ ones(Int,d));
data = idx(sz,offset=CtrCorner);
N = 3
for s = 1:d # smaller ROI dimensions. s is ROI dimensionality
ROIpos = Tuple(Tuple(rand(1:10, s)) for n in 1:N)
ROIsize = Tuple(rand(1:10,s))
pad_val = 0 .* data[1];
rois = ROIView(data, ROIpos, ROIsize, pad_val=pad_val);
for p = 1:N # select a ROI number
p_roi = randpos(ROIsize,d) # position in the ROI data
@test size(rois) == (expand_size(ROIsize,size(data))...,N)
roi_val = rois[p_roi...,p]
pos = p_roi .+ Tuple((n<=s) ? ROIpos[p][n] .- ROIsize[n].÷2 : 0 for n=1:d)
if Base.checkbounds(Bool, data, pos...)
data_val = data[pos...]
@test roi_val .+ 1 == pos
else
data_val = pad_val
end
@test data_val == roi_val
end
end
end
end
|
/-
Copyright (c) 2018 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Johannes Hölzl, Rémy Degenne
-/
import order.filter.cofinite
/-!
# liminfs and limsups of functions and filters
Defines the Liminf/Limsup of a function taking values in a conditionally complete lattice, with
respect to an arbitrary filter.
We define `f.Limsup` (`f.Liminf`) where `f` is a filter taking values in a conditionally complete
lattice. `f.Limsup` is the smallest element `a` such that, eventually, `u ≤ a` (and vice versa for
`f.Liminf`). To work with the Limsup along a function `u` use `(f.map u).Limsup`.
Usually, one defines the Limsup as `Inf (Sup s)` where the Inf is taken over all sets in the filter.
For instance, in ℕ along a function `u`, this is `Inf_n (Sup_{k ≥ n} u k)` (and the latter quantity
decreases with `n`, so this is in fact a limit.). There is however a difficulty: it is well possible
that `u` is not bounded on the whole space, only eventually (think of `Limsup (λx, 1/x)` on ℝ. Then
there is no guarantee that the quantity above really decreases (the value of the `Sup` beforehand is
not really well defined, as one can not use ∞), so that the Inf could be anything. So one can not
use this `Inf Sup ...` definition in conditionally complete lattices, and one has to use a less
tractable definition.
In conditionally complete lattices, the definition is only useful for filters which are eventually
bounded above (otherwise, the Limsup would morally be +∞, which does not belong to the space) and
which are frequently bounded below (otherwise, the Limsup would morally be -∞, which is not in the
space either). We start with definitions of these concepts for arbitrary filters, before turning to
the definitions of Limsup and Liminf.
In complete lattices, however, it coincides with the `Inf Sup` definition.
-/
open filter set
open_locale filter
variables {α β γ ι : Type*}
namespace filter
section relation
/-- `f.is_bounded (≺)`: the filter `f` is eventually bounded w.r.t. the relation `≺`, i.e.
eventually, it is bounded by some uniform bound.
`r` will be usually instantiated with `≤` or `≥`. -/
def is_bounded (r : α → α → Prop) (f : filter α) := ∃ b, ∀ᶠ x in f, r x b
/-- `f.is_bounded_under (≺) u`: the image of the filter `f` under `u` is eventually bounded w.r.t.
the relation `≺`, i.e. eventually, it is bounded by some uniform bound. -/
def is_bounded_under (r : α → α → Prop) (f : filter β) (u : β → α) := (f.map u).is_bounded r
variables {r : α → α → Prop} {f g : filter α}
/-- `f` is eventually bounded if and only if, there exists an admissible set on which it is
bounded. -/
lemma is_bounded_iff : f.is_bounded r ↔ (∃s∈f.sets, ∃b, s ⊆ {x | r x b}) :=
iff.intro
(assume ⟨b, hb⟩, ⟨{a | r a b}, hb, b, subset.refl _⟩)
(assume ⟨s, hs, b, hb⟩, ⟨b, mem_of_superset hs hb⟩)
/-- A bounded function `u` is in particular eventually bounded. -/
lemma is_bounded_under_of {f : filter β} {u : β → α} :
(∃b, ∀x, r (u x) b) → f.is_bounded_under r u
| ⟨b, hb⟩ := ⟨b, show ∀ᶠ x in f, r (u x) b, from eventually_of_forall hb⟩
lemma is_bounded_bot : is_bounded r ⊥ ↔ nonempty α :=
by simp [is_bounded, exists_true_iff_nonempty]
lemma is_bounded_top : is_bounded r ⊤ ↔ (∃t, ∀x, r x t) :=
by simp [is_bounded, eq_univ_iff_forall]
lemma is_bounded_principal (s : set α) : is_bounded r (𝓟 s) ↔ (∃t, ∀x∈s, r x t) :=
by simp [is_bounded, subset_def]
lemma is_bounded_sup [is_trans α r] (hr : ∀b₁ b₂, ∃b, r b₁ b ∧ r b₂ b) :
is_bounded r f → is_bounded r g → is_bounded r (f ⊔ g)
| ⟨b₁, h₁⟩ ⟨b₂, h₂⟩ := let ⟨b, rb₁b, rb₂b⟩ := hr b₁ b₂ in
⟨b, eventually_sup.mpr ⟨h₁.mono (λ x h, trans h rb₁b), h₂.mono (λ x h, trans h rb₂b)⟩⟩
lemma is_bounded.mono (h : f ≤ g) : is_bounded r g → is_bounded r f
| ⟨b, hb⟩ := ⟨b, h hb⟩
lemma is_bounded_under.mono {f g : filter β} {u : β → α} (h : f ≤ g) :
g.is_bounded_under r u → f.is_bounded_under r u :=
λ hg, hg.mono (map_mono h)
lemma is_bounded_under.mono_le [preorder β] {l : filter α} {u v : α → β}
(hu : is_bounded_under (≤) l u) (hv : v ≤ᶠ[l] u) : is_bounded_under (≤) l v :=
hu.imp $ λ b hb, (eventually_map.1 hb).mp $ hv.mono $ λ x, le_trans
lemma is_bounded_under.mono_ge [preorder β] {l : filter α} {u v : α → β}
(hu : is_bounded_under (≥) l u) (hv : u ≤ᶠ[l] v) : is_bounded_under (≥) l v :=
@is_bounded_under.mono_le α βᵒᵈ _ _ _ _ hu hv
lemma is_bounded.is_bounded_under {q : β → β → Prop} {u : α → β}
(hf : ∀a₀ a₁, r a₀ a₁ → q (u a₀) (u a₁)) : f.is_bounded r → f.is_bounded_under q u
| ⟨b, h⟩ := ⟨u b, show ∀ᶠ x in f, q (u x) (u b), from h.mono (λ x, hf x b)⟩
lemma not_is_bounded_under_of_tendsto_at_top [preorder β] [no_max_order β] {f : α → β}
{l : filter α} [l.ne_bot] (hf : tendsto f l at_top) :
¬ is_bounded_under (≤) l f :=
begin
rintro ⟨b, hb⟩,
rw eventually_map at hb,
obtain ⟨b', h⟩ := exists_gt b,
have hb' := (tendsto_at_top.mp hf) b',
have : {x : α | f x ≤ b} ∩ {x : α | b' ≤ f x} = ∅ :=
eq_empty_of_subset_empty (λ x hx, (not_le_of_lt h) (le_trans hx.2 hx.1)),
exact (nonempty_of_mem (hb.and hb')).ne_empty this
end
lemma not_is_bounded_under_of_tendsto_at_bot [preorder β] [no_min_order β] {f : α → β}
{l : filter α} [l.ne_bot](hf : tendsto f l at_bot) :
¬ is_bounded_under (≥) l f :=
@not_is_bounded_under_of_tendsto_at_top α βᵒᵈ _ _ _ _ _ hf
lemma is_bounded_under.bdd_above_range_of_cofinite [semilattice_sup β] {f : α → β}
(hf : is_bounded_under (≤) cofinite f) : bdd_above (range f) :=
begin
rcases hf with ⟨b, hb⟩,
haveI : nonempty β := ⟨b⟩,
rw [← image_univ, ← union_compl_self {x | f x ≤ b}, image_union, bdd_above_union],
exact ⟨⟨b, ball_image_iff.2 $ λ x, id⟩, (hb.image f).bdd_above⟩
end
lemma is_bounded_under.bdd_below_range_of_cofinite [semilattice_inf β] {f : α → β}
(hf : is_bounded_under (≥) cofinite f) : bdd_below (range f) :=
@is_bounded_under.bdd_above_range_of_cofinite α βᵒᵈ _ _ hf
lemma is_bounded_under.bdd_above_range [semilattice_sup β] {f : ℕ → β}
(hf : is_bounded_under (≤) at_top f) : bdd_above (range f) :=
by { rw ← nat.cofinite_eq_at_top at hf, exact hf.bdd_above_range_of_cofinite }
lemma is_bounded_under.bdd_below_range [semilattice_inf β] {f : ℕ → β}
(hf : is_bounded_under (≥) at_top f) : bdd_below (range f) :=
@is_bounded_under.bdd_above_range βᵒᵈ _ _ hf
/-- `is_cobounded (≺) f` states that the filter `f` does not tend to infinity w.r.t. `≺`. This is
also called frequently bounded. Will be usually instantiated with `≤` or `≥`.
There is a subtlety in this definition: we want `f.is_cobounded` to hold for any `f` in the case of
complete lattices. This will be relevant to deduce theorems on complete lattices from their
versions on conditionally complete lattices with additional assumptions. We have to be careful in
the edge case of the trivial filter containing the empty set: the other natural definition
`¬ ∀ a, ∀ᶠ n in f, a ≤ n`
would not work as well in this case.
-/
def is_cobounded (r : α → α → Prop) (f : filter α) := ∃b, ∀a, (∀ᶠ x in f, r x a) → r b a
/-- `is_cobounded_under (≺) f u` states that the image of the filter `f` under the map `u` does not
tend to infinity w.r.t. `≺`. This is also called frequently bounded. Will be usually instantiated
with `≤` or `≥`. -/
def is_cobounded_under (r : α → α → Prop) (f : filter β) (u : β → α) := (f.map u).is_cobounded r
/-- To check that a filter is frequently bounded, it suffices to have a witness
which bounds `f` at some point for every admissible set.
This is only an implication, as the other direction is wrong for the trivial filter.-/
lemma is_cobounded.mk [is_trans α r] (a : α) (h : ∀s∈f, ∃x∈s, r a x) : f.is_cobounded r :=
⟨a, assume y s, let ⟨x, h₁, h₂⟩ := h _ s in trans h₂ h₁⟩
/-- A filter which is eventually bounded is in particular frequently bounded (in the opposite
direction). At least if the filter is not trivial. -/
lemma is_bounded.is_cobounded_flip [is_trans α r] [ne_bot f] :
f.is_bounded r → f.is_cobounded (flip r)
| ⟨a, ha⟩ := ⟨a, assume b hb,
let ⟨x, rxa, rbx⟩ := (ha.and hb).exists in
show r b a, from trans rbx rxa⟩
lemma is_bounded.is_cobounded_ge [preorder α] [ne_bot f] (h : f.is_bounded (≤)) :
f.is_cobounded (≥) :=
h.is_cobounded_flip
lemma is_bounded.is_cobounded_le [preorder α] [ne_bot f] (h : f.is_bounded (≥)) :
f.is_cobounded (≤) :=
h.is_cobounded_flip
lemma is_cobounded_bot : is_cobounded r ⊥ ↔ (∃b, ∀x, r b x) :=
by simp [is_cobounded]
lemma is_cobounded_top : is_cobounded r ⊤ ↔ nonempty α :=
by simp [is_cobounded, eq_univ_iff_forall, exists_true_iff_nonempty] {contextual := tt}
lemma is_cobounded_principal (s : set α) :
(𝓟 s).is_cobounded r ↔ (∃b, ∀a, (∀x∈s, r x a) → r b a) :=
by simp [is_cobounded, subset_def]
lemma is_cobounded.mono (h : f ≤ g) : f.is_cobounded r → g.is_cobounded r
| ⟨b, hb⟩ := ⟨b, assume a ha, hb a (h ha)⟩
end relation
lemma is_cobounded_le_of_bot [preorder α] [order_bot α] {f : filter α} : f.is_cobounded (≤) :=
⟨⊥, assume a h, bot_le⟩
lemma is_cobounded_ge_of_top [preorder α] [order_top α] {f : filter α} : f.is_cobounded (≥) :=
⟨⊤, assume a h, le_top⟩
lemma is_bounded_le_of_top [preorder α] [order_top α] {f : filter α} : f.is_bounded (≤) :=
⟨⊤, eventually_of_forall $ λ _, le_top⟩
lemma is_bounded_ge_of_bot [preorder α] [order_bot α] {f : filter α} : f.is_bounded (≥) :=
⟨⊥, eventually_of_forall $ λ _, bot_le⟩
@[simp] lemma _root_.order_iso.is_bounded_under_le_comp [preorder α] [preorder β] (e : α ≃o β)
{l : filter γ} {u : γ → α} :
is_bounded_under (≤) l (λ x, e (u x)) ↔ is_bounded_under (≤) l u :=
e.surjective.exists.trans $ exists_congr $ λ a, by simp only [eventually_map, e.le_iff_le]
@[simp] lemma _root_.order_iso.is_bounded_under_ge_comp [preorder α] [preorder β] (e : α ≃o β)
{l : filter γ} {u : γ → α} :
is_bounded_under (≥) l (λ x, e (u x)) ↔ is_bounded_under (≥) l u :=
e.dual.is_bounded_under_le_comp
@[simp, to_additive]
lemma is_bounded_under_le_inv [ordered_comm_group α] {l : filter β} {u : β → α} :
is_bounded_under (≤) l (λ x, (u x)⁻¹) ↔ is_bounded_under (≥) l u :=
(order_iso.inv α).is_bounded_under_ge_comp
@[simp, to_additive]
lemma is_bounded_under_ge_inv [ordered_comm_group α] {l : filter β} {u : β → α} :
is_bounded_under (≥) l (λ x, (u x)⁻¹) ↔ is_bounded_under (≤) l u :=
(order_iso.inv α).is_bounded_under_le_comp
lemma is_bounded_under.sup [semilattice_sup α] {f : filter β} {u v : β → α} :
f.is_bounded_under (≤) u → f.is_bounded_under (≤) v → f.is_bounded_under (≤) (λa, u a ⊔ v a)
| ⟨bu, (hu : ∀ᶠ x in f, u x ≤ bu)⟩ ⟨bv, (hv : ∀ᶠ x in f, v x ≤ bv)⟩ :=
⟨bu ⊔ bv, show ∀ᶠ x in f, u x ⊔ v x ≤ bu ⊔ bv,
by filter_upwards [hu, hv] with _ using sup_le_sup⟩
@[simp] lemma is_bounded_under_le_sup [semilattice_sup α] {f : filter β} {u v : β → α} :
f.is_bounded_under (≤) (λ a, u a ⊔ v a) ↔ f.is_bounded_under (≤) u ∧ f.is_bounded_under (≤) v :=
⟨λ h, ⟨h.mono_le $ eventually_of_forall $ λ _, le_sup_left,
h.mono_le $ eventually_of_forall $ λ _, le_sup_right⟩, λ h, h.1.sup h.2⟩
lemma is_bounded_under.inf [semilattice_inf α] {f : filter β} {u v : β → α} :
f.is_bounded_under (≥) u → f.is_bounded_under (≥) v → f.is_bounded_under (≥) (λa, u a ⊓ v a) :=
@is_bounded_under.sup αᵒᵈ β _ _ _ _
@[simp] lemma is_bounded_under_ge_inf [semilattice_inf α] {f : filter β} {u v : β → α} :
f.is_bounded_under (≥) (λ a, u a ⊓ v a) ↔ f.is_bounded_under (≥) u ∧ f.is_bounded_under (≥) v :=
@is_bounded_under_le_sup αᵒᵈ _ _ _ _ _
lemma is_bounded_under_le_abs [linear_ordered_add_comm_group α] {f : filter β} {u : β → α} :
f.is_bounded_under (≤) (λ a, |u a|) ↔ f.is_bounded_under (≤) u ∧ f.is_bounded_under (≥) u :=
is_bounded_under_le_sup.trans $ and_congr iff.rfl is_bounded_under_le_neg
/-- Filters are automatically bounded or cobounded in complete lattices. To use the same statements
in complete and conditionally complete lattices but let automation fill automatically the
boundedness proofs in complete lattices, we use the tactic `is_bounded_default` in the statements,
in the form `(hf : f.is_bounded (≥) . is_bounded_default)`. -/
meta def is_bounded_default : tactic unit :=
tactic.applyc ``is_cobounded_le_of_bot <|>
tactic.applyc ``is_cobounded_ge_of_top <|>
tactic.applyc ``is_bounded_le_of_top <|>
tactic.applyc ``is_bounded_ge_of_bot
section conditionally_complete_lattice
variables [conditionally_complete_lattice α]
/-- The `Limsup` of a filter `f` is the infimum of the `a` such that, eventually for `f`,
holds `x ≤ a`. -/
def Limsup (f : filter α) : α := Inf { a | ∀ᶠ n in f, n ≤ a }
/-- The `Liminf` of a filter `f` is the supremum of the `a` such that, eventually for `f`,
holds `x ≥ a`. -/
def Liminf (f : filter α) : α := Sup { a | ∀ᶠ n in f, a ≤ n }
/-- The `limsup` of a function `u` along a filter `f` is the infimum of the `a` such that,
eventually for `f`, holds `u x ≤ a`. -/
def limsup (f : filter β) (u : β → α) : α := (f.map u).Limsup
/-- The `liminf` of a function `u` along a filter `f` is the supremum of the `a` such that,
eventually for `f`, holds `u x ≥ a`. -/
def liminf (f : filter β) (u : β → α) : α := (f.map u).Liminf
section
variables {f : filter β} {u : β → α}
theorem limsup_eq : f.limsup u = Inf { a | ∀ᶠ n in f, u n ≤ a } := rfl
theorem liminf_eq : f.liminf u = Sup { a | ∀ᶠ n in f, a ≤ u n } := rfl
end
theorem Limsup_le_of_le {f : filter α} {a}
(hf : f.is_cobounded (≤) . is_bounded_default) (h : ∀ᶠ n in f, n ≤ a) : f.Limsup ≤ a :=
cInf_le hf h
theorem le_Liminf_of_le {f : filter α} {a}
(hf : f.is_cobounded (≥) . is_bounded_default) (h : ∀ᶠ n in f, a ≤ n) : a ≤ f.Liminf :=
le_cSup hf h
theorem le_Limsup_of_le {f : filter α} {a}
(hf : f.is_bounded (≤) . is_bounded_default) (h : ∀ b, (∀ᶠ n in f, n ≤ b) → a ≤ b) :
a ≤ f.Limsup :=
le_cInf hf h
theorem Liminf_le_of_le {f : filter α} {a}
(hf : f.is_bounded (≥) . is_bounded_default) (h : ∀ b, (∀ᶠ n in f, b ≤ n) → b ≤ a) :
f.Liminf ≤ a :=
cSup_le hf h
theorem Liminf_le_Limsup {f : filter α} [ne_bot f]
(h₁ : f.is_bounded (≤) . is_bounded_default) (h₂ : f.is_bounded (≥) . is_bounded_default) :
f.Liminf ≤ f.Limsup :=
Liminf_le_of_le h₂ $ assume a₀ ha₀, le_Limsup_of_le h₁ $ assume a₁ ha₁,
show a₀ ≤ a₁, from let ⟨b, hb₀, hb₁⟩ := (ha₀.and ha₁).exists in le_trans hb₀ hb₁
lemma Liminf_le_Liminf {f g : filter α}
(hf : f.is_bounded (≥) . is_bounded_default) (hg : g.is_cobounded (≥) . is_bounded_default)
(h : ∀ a, (∀ᶠ n in f, a ≤ n) → ∀ᶠ n in g, a ≤ n) : f.Liminf ≤ g.Liminf :=
cSup_le_cSup hg hf h
lemma Limsup_le_Limsup {f g : filter α}
(hf : f.is_cobounded (≤) . is_bounded_default) (hg : g.is_bounded (≤) . is_bounded_default)
(h : ∀ a, (∀ᶠ n in g, n ≤ a) → ∀ᶠ n in f, n ≤ a) : f.Limsup ≤ g.Limsup :=
cInf_le_cInf hf hg h
lemma Limsup_le_Limsup_of_le {f g : filter α} (h : f ≤ g)
(hf : f.is_cobounded (≤) . is_bounded_default) (hg : g.is_bounded (≤) . is_bounded_default) :
f.Limsup ≤ g.Limsup :=
Limsup_le_Limsup hf hg (assume a ha, h ha)
lemma Liminf_le_Liminf_of_le {f g : filter α} (h : g ≤ f)
(hf : f.is_bounded (≥) . is_bounded_default) (hg : g.is_cobounded (≥) . is_bounded_default) :
f.Liminf ≤ g.Liminf :=
Liminf_le_Liminf hf hg (assume a ha, h ha)
lemma limsup_le_limsup {α : Type*} [conditionally_complete_lattice β] {f : filter α} {u v : α → β}
(h : u ≤ᶠ[f] v)
(hu : f.is_cobounded_under (≤) u . is_bounded_default)
(hv : f.is_bounded_under (≤) v . is_bounded_default) :
f.limsup u ≤ f.limsup v :=
Limsup_le_Limsup hu hv $ assume b, h.trans
lemma liminf_le_liminf {α : Type*} [conditionally_complete_lattice β] {f : filter α} {u v : α → β}
(h : ∀ᶠ a in f, u a ≤ v a)
(hu : f.is_bounded_under (≥) u . is_bounded_default)
(hv : f.is_cobounded_under (≥) v . is_bounded_default) :
f.liminf u ≤ f.liminf v :=
@limsup_le_limsup βᵒᵈ α _ _ _ _ h hv hu
lemma limsup_le_limsup_of_le {α β} [conditionally_complete_lattice β] {f g : filter α} (h : f ≤ g)
{u : α → β} (hf : f.is_cobounded_under (≤) u . is_bounded_default)
(hg : g.is_bounded_under (≤) u . is_bounded_default) :
f.limsup u ≤ g.limsup u :=
Limsup_le_Limsup_of_le (map_mono h) hf hg
lemma liminf_le_liminf_of_le {α β} [conditionally_complete_lattice β] {f g : filter α} (h : g ≤ f)
{u : α → β} (hf : f.is_bounded_under (≥) u . is_bounded_default)
(hg : g.is_cobounded_under (≥) u . is_bounded_default) :
f.liminf u ≤ g.liminf u :=
Liminf_le_Liminf_of_le (map_mono h) hf hg
theorem Limsup_principal {s : set α} (h : bdd_above s) (hs : s.nonempty) :
(𝓟 s).Limsup = Sup s :=
by simp [Limsup]; exact cInf_upper_bounds_eq_cSup h hs
theorem Liminf_principal {s : set α} (h : bdd_below s) (hs : s.nonempty) :
(𝓟 s).Liminf = Inf s :=
@Limsup_principal αᵒᵈ _ s h hs
lemma limsup_congr {α : Type*} [conditionally_complete_lattice β] {f : filter α} {u v : α → β}
(h : ∀ᶠ a in f, u a = v a) : limsup f u = limsup f v :=
begin
rw limsup_eq,
congr' with b,
exact eventually_congr (h.mono $ λ x hx, by simp [hx])
end
lemma liminf_congr {α : Type*} [conditionally_complete_lattice β] {f : filter α} {u v : α → β}
(h : ∀ᶠ a in f, u a = v a) : liminf f u = liminf f v :=
@limsup_congr βᵒᵈ _ _ _ _ _ h
lemma limsup_const {α : Type*} [conditionally_complete_lattice β] {f : filter α} [ne_bot f]
(b : β) : limsup f (λ x, b) = b :=
by simpa only [limsup_eq, eventually_const] using cInf_Ici
lemma liminf_const {α : Type*} [conditionally_complete_lattice β] {f : filter α} [ne_bot f]
(b : β) : liminf f (λ x, b) = b :=
@limsup_const βᵒᵈ α _ f _ b
lemma liminf_le_limsup {f : filter β} [ne_bot f] {u : β → α}
(h : f.is_bounded_under (≤) u . is_bounded_default)
(h' : f.is_bounded_under (≥) u . is_bounded_default) :
liminf f u ≤ limsup f u :=
Liminf_le_Limsup h h'
end conditionally_complete_lattice
section complete_lattice
variables [complete_lattice α]
@[simp] theorem Limsup_bot : (⊥ : filter α).Limsup = ⊥ :=
bot_unique $ Inf_le $ by simp
@[simp] theorem Liminf_bot : (⊥ : filter α).Liminf = ⊤ :=
top_unique $ le_Sup $ by simp
@[simp] theorem Limsup_top : (⊤ : filter α).Limsup = ⊤ :=
top_unique $ le_Inf $
by simp [eq_univ_iff_forall]; exact assume b hb, (top_unique $ hb _)
@[simp] theorem Liminf_top : (⊤ : filter α).Liminf = ⊥ :=
bot_unique $ Sup_le $
by simp [eq_univ_iff_forall]; exact assume b hb, (bot_unique $ hb _)
/-- Same as limsup_const applied to `⊥` but without the `ne_bot f` assumption -/
lemma limsup_const_bot {f : filter β} : limsup f (λ x : β, (⊥ : α)) = (⊥ : α) :=
begin
rw [limsup_eq, eq_bot_iff],
exact Inf_le (eventually_of_forall (λ x, le_rfl)),
end
/-- Same as limsup_const applied to `⊤` but without the `ne_bot f` assumption -/
lemma liminf_const_top {f : filter β} : liminf f (λ x : β, (⊤ : α)) = (⊤ : α) :=
@limsup_const_bot αᵒᵈ β _ _
theorem has_basis.Limsup_eq_infi_Sup {ι} {p : ι → Prop} {s} {f : filter α} (h : f.has_basis p s) :
f.Limsup = ⨅ i (hi : p i), Sup (s i) :=
le_antisymm
(le_infi₂ $ λ i hi, Inf_le $ h.eventually_iff.2 ⟨i, hi, λ x, le_Sup⟩)
(le_Inf $ assume a ha, let ⟨i, hi, ha⟩ := h.eventually_iff.1 ha in
infi₂_le_of_le _ hi $ Sup_le ha)
theorem has_basis.Liminf_eq_supr_Inf {p : ι → Prop} {s : ι → set α} {f : filter α}
(h : f.has_basis p s) : f.Liminf = ⨆ i (hi : p i), Inf (s i) :=
@has_basis.Limsup_eq_infi_Sup αᵒᵈ _ _ _ _ _ h
theorem Limsup_eq_infi_Sup {f : filter α} : f.Limsup = ⨅ s ∈ f, Sup s :=
f.basis_sets.Limsup_eq_infi_Sup
theorem Liminf_eq_supr_Inf {f : filter α} : f.Liminf = ⨆ s ∈ f, Inf s :=
@Limsup_eq_infi_Sup αᵒᵈ _ _
/-- In a complete lattice, the limsup of a function is the infimum over sets `s` in the filter
of the supremum of the function over `s` -/
theorem limsup_eq_infi_supr {f : filter β} {u : β → α} : f.limsup u = ⨅ s ∈ f, ⨆ a ∈ s, u a :=
(f.basis_sets.map u).Limsup_eq_infi_Sup.trans $
by simp only [Sup_image, id]
lemma limsup_eq_infi_supr_of_nat {u : ℕ → α} : limsup at_top u = ⨅ n : ℕ, ⨆ i ≥ n, u i :=
(at_top_basis.map u).Limsup_eq_infi_Sup.trans $
by simp only [Sup_image, infi_const]; refl
lemma limsup_eq_infi_supr_of_nat' {u : ℕ → α} : limsup at_top u = ⨅ n : ℕ, ⨆ i : ℕ, u (i + n) :=
by simp only [limsup_eq_infi_supr_of_nat, supr_ge_eq_supr_nat_add]
theorem has_basis.limsup_eq_infi_supr {p : ι → Prop} {s : ι → set β} {f : filter β} {u : β → α}
(h : f.has_basis p s) : f.limsup u = ⨅ i (hi : p i), ⨆ a ∈ s i, u a :=
(h.map u).Limsup_eq_infi_Sup.trans $ by simp only [Sup_image, id]
/-- In a complete lattice, the liminf of a function is the infimum over sets `s` in the filter
of the supremum of the function over `s` -/
theorem liminf_eq_supr_infi {f : filter β} {u : β → α} : f.liminf u = ⨆ s ∈ f, ⨅ a ∈ s, u a :=
@limsup_eq_infi_supr αᵒᵈ β _ _ _
lemma liminf_eq_supr_infi_of_nat {u : ℕ → α} : liminf at_top u = ⨆ n : ℕ, ⨅ i ≥ n, u i :=
@limsup_eq_infi_supr_of_nat αᵒᵈ _ u
lemma liminf_eq_supr_infi_of_nat' {u : ℕ → α} : liminf at_top u = ⨆ n : ℕ, ⨅ i : ℕ, u (i + n) :=
@limsup_eq_infi_supr_of_nat' αᵒᵈ _ _
theorem has_basis.liminf_eq_supr_infi {p : ι → Prop} {s : ι → set β} {f : filter β} {u : β → α}
(h : f.has_basis p s) : f.liminf u = ⨆ i (hi : p i), ⨅ a ∈ s i, u a :=
@has_basis.limsup_eq_infi_supr αᵒᵈ _ _ _ _ _ _ _ h
@[simp] lemma liminf_nat_add (f : ℕ → α) (k : ℕ) :
at_top.liminf (λ i, f (i + k)) = at_top.liminf f :=
by { simp_rw liminf_eq_supr_infi_of_nat, exact supr_infi_ge_nat_add f k }
@[simp] lemma limsup_nat_add (f : ℕ → α) (k : ℕ) :
at_top.limsup (λ i, f (i + k)) = at_top.limsup f :=
@liminf_nat_add αᵒᵈ _ f k
lemma liminf_le_of_frequently_le' {α β} [complete_lattice β]
{f : filter α} {u : α → β} {x : β} (h : ∃ᶠ a in f, u a ≤ x) :
f.liminf u ≤ x :=
begin
rw liminf_eq,
refine Sup_le (λ b hb, _),
have hbx : ∃ᶠ a in f, b ≤ x,
{ revert h,
rw [←not_imp_not, not_frequently, not_frequently],
exact λ h, hb.mp (h.mono (λ a hbx hba hax, hbx (hba.trans hax))), },
exact hbx.exists.some_spec,
end
lemma le_limsup_of_frequently_le' {α β} [complete_lattice β]
{f : filter α} {u : α → β} {x : β} (h : ∃ᶠ a in f, x ≤ u a) :
x ≤ f.limsup u :=
@liminf_le_of_frequently_le' _ βᵒᵈ _ _ _ _ h
end complete_lattice
section conditionally_complete_linear_order
lemma eventually_lt_of_lt_liminf {f : filter α} [conditionally_complete_linear_order β]
{u : α → β} {b : β} (h : b < liminf f u) (hu : f.is_bounded_under (≥) u . is_bounded_default) :
∀ᶠ a in f, b < u a :=
begin
obtain ⟨c, hc, hbc⟩ : ∃ (c : β) (hc : c ∈ {c : β | ∀ᶠ (n : α) in f, c ≤ u n}), b < c :=
exists_lt_of_lt_cSup hu h,
exact hc.mono (λ x hx, lt_of_lt_of_le hbc hx)
end
lemma eventually_lt_of_limsup_lt {f : filter α} [conditionally_complete_linear_order β]
{u : α → β} {b : β} (h : limsup f u < b) (hu : f.is_bounded_under (≤) u . is_bounded_default) :
∀ᶠ a in f, u a < b :=
@eventually_lt_of_lt_liminf _ βᵒᵈ _ _ _ _ h hu
lemma liminf_le_of_frequently_le {α β} [conditionally_complete_linear_order β] {f : filter α}
{u : α → β} {b : β} (hu_le : ∃ᶠ x in f, u x ≤ b)
(hu : f.is_bounded_under (≥) u . is_bounded_default) :
f.liminf u ≤ b :=
@le_limsup_of_frequently_le _ βᵒᵈ _ f u b hu_le hu
lemma frequently_lt_of_lt_limsup {α β} [conditionally_complete_linear_order β] {f : filter α}
{u : α → β} {b : β}
(hu : f.is_cobounded_under (≤) u . is_bounded_default) (h : b < f.limsup u) :
∃ᶠ x in f, b < u x :=
begin
contrapose! h,
apply Limsup_le_of_le hu,
simpa using h,
end
lemma frequently_lt_of_liminf_lt {α β} [conditionally_complete_linear_order β] {f : filter α}
{u : α → β} {b : β}
(hu : f.is_cobounded_under (≥) u . is_bounded_default) (h : f.liminf u < b) :
∃ᶠ x in f, u x < b :=
@frequently_lt_of_lt_limsup _ βᵒᵈ _ f u b hu h
end conditionally_complete_linear_order
end filter
section order
open filter
lemma monotone.is_bounded_under_le_comp [nonempty β] [linear_order β] [preorder γ]
[no_max_order γ] {g : β → γ} {f : α → β} {l : filter α} (hg : monotone g)
(hg' : tendsto g at_top at_top) :
is_bounded_under (≤) l (g ∘ f) ↔ is_bounded_under (≤) l f :=
begin
refine ⟨_, λ h, h.is_bounded_under hg⟩,
rintro ⟨c, hc⟩, rw eventually_map at hc,
obtain ⟨b, hb⟩ : ∃ b, ∀ a ≥ b, c < g a := eventually_at_top.1 (hg'.eventually_gt_at_top c),
exact ⟨b, hc.mono $ λ x hx, not_lt.1 (λ h, (hb _ h.le).not_le hx)⟩
end
lemma monotone.is_bounded_under_ge_comp [nonempty β] [linear_order β] [preorder γ]
[no_min_order γ] {g : β → γ} {f : α → β} {l : filter α} (hg : monotone g)
(hg' : tendsto g at_bot at_bot) :
is_bounded_under (≥) l (g ∘ f) ↔ is_bounded_under (≥) l f :=
hg.dual.is_bounded_under_le_comp hg'
lemma antitone.is_bounded_under_le_comp [nonempty β] [linear_order β] [preorder γ]
[no_max_order γ] {g : β → γ} {f : α → β} {l : filter α} (hg : antitone g)
(hg' : tendsto g at_bot at_top) :
is_bounded_under (≤) l (g ∘ f) ↔ is_bounded_under (≥) l f :=
hg.dual_right.is_bounded_under_ge_comp hg'
lemma antitone.is_bounded_under_ge_comp [nonempty β] [linear_order β] [preorder γ]
[no_min_order γ] {g : β → γ} {f : α → β} {l : filter α} (hg : antitone g)
(hg' : tendsto g at_top at_bot) :
is_bounded_under (≥) l (g ∘ f) ↔ is_bounded_under (≤) l f :=
hg.dual_right.is_bounded_under_le_comp hg'
lemma galois_connection.l_limsup_le [conditionally_complete_lattice β]
[conditionally_complete_lattice γ] {f : filter α} {v : α → β}
{l : β → γ} {u : γ → β} (gc : galois_connection l u)
(hlv : f.is_bounded_under (≤) (λ x, l (v x)) . is_bounded_default)
(hv_co : f.is_cobounded_under (≤) v . is_bounded_default) :
l (f.limsup v) ≤ f.limsup (λ x, l (v x)) :=
begin
refine le_Limsup_of_le hlv (λ c hc, _),
rw filter.eventually_map at hc,
simp_rw (gc _ _) at hc ⊢,
exact Limsup_le_of_le hv_co hc,
end
lemma order_iso.limsup_apply {γ} [conditionally_complete_lattice β]
[conditionally_complete_lattice γ] {f : filter α} {u : α → β} (g : β ≃o γ)
(hu : f.is_bounded_under (≤) u . is_bounded_default)
(hu_co : f.is_cobounded_under (≤) u . is_bounded_default)
(hgu : f.is_bounded_under (≤) (λ x, g (u x)) . is_bounded_default)
(hgu_co : f.is_cobounded_under (≤) (λ x, g (u x)) . is_bounded_default) :
g (f.limsup u) = f.limsup (λ x, g (u x)) :=
begin
refine le_antisymm (g.to_galois_connection.l_limsup_le hgu hu_co) _,
rw [←(g.symm.symm_apply_apply (f.limsup (λ (x : α), g (u x)))), g.symm_symm],
refine g.monotone _,
have hf : u = λ i, g.symm (g (u i)), from funext (λ i, (g.symm_apply_apply (u i)).symm),
nth_rewrite 0 hf,
refine g.symm.to_galois_connection.l_limsup_le _ hgu_co,
simp_rw g.symm_apply_apply,
exact hu,
end
lemma order_iso.liminf_apply {γ} [conditionally_complete_lattice β]
[conditionally_complete_lattice γ] {f : filter α} {u : α → β} (g : β ≃o γ)
(hu : f.is_bounded_under (≥) u . is_bounded_default)
(hu_co : f.is_cobounded_under (≥) u . is_bounded_default)
(hgu : f.is_bounded_under (≥) (λ x, g (u x)) . is_bounded_default)
(hgu_co : f.is_cobounded_under (≥) (λ x, g (u x)) . is_bounded_default) :
g (f.liminf u) = f.liminf (λ x, g (u x)) :=
@order_iso.limsup_apply α βᵒᵈ γᵒᵈ _ _ f u g.dual hu hu_co hgu hgu_co
end order
|
function score = IGDp(Population,optimum)
% <min> <multi/many> <real/integer/label/binary/permutation> <large/none> <constrained/none> <expensive/none> <multimodal/none> <sparse/none> <dynamic/none>
% Inverted generational distance plus (IGD+)
%------------------------------- Reference --------------------------------
% H. Ishibuchi, H. Masuda, Y. Tanigaki, and Y. Nojima. Modified distance
% calculation in generational distance and inverted generational distance,
% Proceedings of the International Conference on Evolutionary
% Multi-Criterion Optimization, 2015, 110-125.
%------------------------------- Copyright --------------------------------
% Copyright (c) 2023 BIMK Group. You are free to use the PlatEMO for
% research purposes. All publications which use this platform or any code
% in the platform should acknowledge the use of "PlatEMO" and reference "Ye
% Tian, Ran Cheng, Xingyi Zhang, and Yaochu Jin, PlatEMO: A MATLAB platform
% for evolutionary multi-objective optimization [educational forum], IEEE
% Computational Intelligence Magazine, 2017, 12(4): 73-87".
%--------------------------------------------------------------------------
PopObj = Population.best.objs;
if size(PopObj,2) ~= size(optimum,2)
score = nan;
else
[Nr,M] = size(optimum);
[N,~] = size(PopObj);
delta = zeros(Nr,1);
for i = 1 : Nr
delta(i) = min(sqrt(sum(max(PopObj - repmat(optimum(i,:),N,1),zeros(N,M)).^2,2)));
end
score = mean(delta);
end
end |
module Control.Linear.Network
-- An experimental linear type based API to sockets
import Control.Linear.LIO
import public Network.Socket.Data
import Network.Socket
public export
data SocketState = Ready | Bound | Listening | Open | Closed
export
data Socket : SocketState -> Type where
MkSocket : Socket.Data.Socket -> Socket st
export
newSocket : LinearIO io
=> (fam : SocketFamily)
-> (ty : SocketType)
-> (pnum : ProtocolNumber)
-> (success : (1 _ : Socket Ready) -> L io ())
-> (fail : SocketError -> L io ())
-> L io ()
newSocket fam ty pnum success fail
= do Right rawsock <- socket fam ty pnum
| Left err => fail err
success (MkSocket rawsock)
export
close : LinearIO io => (1 _ : Socket st) -> L io {use=1} (Socket Closed)
close (MkSocket sock)
= do Socket.close sock
pure1 (MkSocket sock)
export
done : LinearIO io => (1 _ : Socket Closed) -> L io ()
done (MkSocket sock) = pure ()
export
bind : LinearIO io =>
(1 _ : Socket Ready) ->
(addr : Maybe SocketAddress) ->
(port : Port) ->
L io {use=1} (Res Bool (\res => Socket (case res of
False => Closed
True => Bound)))
bind (MkSocket sock) addr port
= do ok <- Socket.bind sock addr port
pure1 $ ok == 0 # MkSocket sock
export
connect : LinearIO io =>
(sock : Socket) ->
(addr : SocketAddress) ->
(port : Port) ->
L io {use=1} (Res Bool (\res => Socket (case res of
False => Closed
True => Open)))
connect sock addr port
= do ok <- Socket.connect sock addr port
pure1 $ ok == 0 # MkSocket sock
export
listen : LinearIO io =>
(1 _ : Socket Bound) ->
L io {use=1} (Res Bool (\res => Socket (case res of
False => Closed
True => Listening)))
listen (MkSocket sock)
= do ok <- Socket.listen sock
pure1 $ ok == 0 # MkSocket sock
export
accept : LinearIO io =>
(1 _ : Socket Listening) ->
L io {use=1} (Res Bool (\case
False => Socket Listening
True => (Socket Listening, Socket Open)))
accept (MkSocket sock)
= do Right (sock', sockaddr) <- Socket.accept sock
| Left err => pure1 (False # MkSocket sock)
pure1 (True # (MkSocket sock, MkSocket sock'))
export
send : LinearIO io =>
(1 _ : Socket Open) ->
(msg : String) ->
L io {use=1} (Res Bool (\res => Socket (case res of
False => Closed
True => Open)))
send (MkSocket sock) msg
= do Right c <- Socket.send sock msg
| Left err => pure1 (False # MkSocket sock)
pure1 (True # MkSocket sock)
export
recv : LinearIO io =>
(1 _ : Socket Open) ->
(len : ByteLength) ->
L io {use=1} (Res (Maybe (String, ResultCode))
(\res => Socket (case res of
Nothing => Closed
Just msg => Open)))
recv (MkSocket sock) len
= do Right msg <- Socket.recv sock len
| Left err => pure1 (Nothing # MkSocket sock)
pure1 (Just msg # MkSocket sock)
export
recvAll : LinearIO io =>
(1 _ : Socket Open) ->
L io {use=1} (Res (Maybe String)
(\res => Socket (case res of
Nothing => Closed
Just msg => Open)))
recvAll (MkSocket sock)
= do Right msg <- Socket.recvAll sock
| Left err => pure1 (Nothing # MkSocket sock)
pure1 (Just msg # MkSocket sock)
|
Require Import SpecDeps.
Require Import RData.
Require Import EventReplay.
Require Import MoverTypes.
Require Import Constants.
Require Import CommonLib.
Require Import AbsAccessor.Spec.
Local Open Scope Z_scope.
Section Spec.
Definition handle_ptimer_sysreg_write_spec (rec: Pointer) (esr: Z64) (adt: RData) : option RData :=
match esr with
| VZ64 esr =>
rely is_int64 esr;
rely (peq (base rec) buffer_loc);
rely (offset rec =? SLOT_REC);
when gidx == (buffer (priv adt)) @ (offset rec);
rely is_gidx gidx;
let gn := (gs (share adt)) @ gidx in
rely (g_tag (ginfo gn) =? GRANULE_STATE_REC);
rely (ref_accessible gn CPU_ID);
let rt := __ESR_EL2_SYSREG_ISS_RT esr in
rely is_int rt;
let val := get_reg rt (g_regs (grec gn)) in
rely is_int64 val;
rely is_int (t_masked (g_ptimer (grec gn)));
rely is_int64 (r_cnthctl_el2 (g_regs (grec gn)));
let cnth' := Z.lor (r_cnthctl_el2 (g_regs (grec gn))) CNTHCTL_EL2_EL1PTEN in
let ec := Z.land esr ESR_EL2_SYSREG_MASK in
if ec =? ESR_EL2_SYSREG_TIMER_CNTP_TVAL_EL0 then
let cntp_ctl := r_cntp_ctl_el0 (cpu_regs (priv adt)) in
rely is_int64 cntp_ctl;
match t_masked (g_ptimer (grec gn)) =? 0, __timer_condition_met cntp_ctl with
| true, true =>
Some adt {priv: (priv adt) {cpu_regs: (cpu_regs (priv adt)) {r_cntp_tval_el0: val}}}
| _, _ =>
let r' := (grec gn) {g_ptimer: (g_ptimer (grec gn)) {t_asserted: 0}}
{g_regs: (g_regs (grec gn)) {r_cnthctl_el2: cnth'}} in
Some adt {priv: (priv adt) {cpu_regs: (cpu_regs (priv adt)) {r_cntp_tval_el0: val} {r_cnthctl_el2: cnth'}}}
{share: (share adt) {gs: (gs (share adt)) # gidx == (gn {grec: r'})}}
end
else
if ec =? ESR_EL2_SYSREG_TIMER_CNTP_CTL_EL0 then
let masked := Z.land val CNTx_CTL_IMASK in
let cntp_ctl := Z.lor val CNTx_CTL_IMASK in
match masked =? 0, __timer_condition_met cntp_ctl with
| true, true =>
let r' := (grec gn) {g_ptimer: (g_ptimer (grec gn)) {t_masked: Z.land val CNTx_CTL_IMASK}} in
Some adt {priv: (priv adt) {cpu_regs: (cpu_regs (priv adt)) {r_cntp_ctl_el0: (Z.lor val CNTx_CTL_IMASK)}}}
{share: (share adt) {gs: (gs (share adt)) # gidx == (gn {grec: r'})}}
| _, _=>
let cnth' := Z.lor (r_cnthctl_el2 (g_regs (grec gn))) CNTHCTL_EL2_EL1PTEN in
let r' := (grec gn) {g_ptimer: (g_ptimer (grec gn)) {t_asserted: 0} {t_masked: Z.land val CNTx_CTL_IMASK}}
{g_regs: (g_regs (grec gn)) {r_cnthctl_el2: cnth'}} in
Some adt {priv: (priv adt) {cpu_regs: (cpu_regs (priv adt)) {r_cntp_ctl_el0: (Z.lor val CNTx_CTL_IMASK)}
{r_cnthctl_el2: cnth'}}}
{share: (share adt) {gs: (gs (share adt)) # gidx == (gn {grec: r'})}}
end
else
if ec =? ESR_EL2_SYSREG_TIMER_CNTP_CVAL_EL0 then
let cntp_ctl := r_cntp_ctl_el0 (cpu_regs (priv adt)) in
rely is_int64 cntp_ctl;
match t_masked (g_ptimer (grec gn)) =? 0, __timer_condition_met cntp_ctl with
| true, true =>
Some adt {priv: (priv adt) {cpu_regs: (cpu_regs (priv adt)) {r_cntp_cval_el0: val}}}
| _, _ =>
let r' := (grec gn) {g_ptimer: (g_ptimer (grec gn)) {t_asserted: 0}}
{g_regs: (g_regs (grec gn)) {r_cnthctl_el2: cnth'}} in
Some adt {priv: (priv adt) {cpu_regs: (cpu_regs (priv adt)) {r_cntp_cval_el0: val} {r_cnthctl_el2: cnth'}}}
{share: (share adt) {gs: (gs (share adt)) # gidx == (gn {grec: r'})}}
end
else
let cntp_ctl := r_cntp_ctl_el0 (cpu_regs (priv adt)) in
rely is_int64 cntp_ctl;
match t_masked (g_ptimer (grec gn)) =? 0, __timer_condition_met cntp_ctl with
| true, true =>
Some adt
| _, _ =>
let r' := (grec gn) {g_ptimer: (g_ptimer (grec gn)) {t_asserted: 0}}
{g_regs: (g_regs (grec gn)) {r_cnthctl_el2: cnth'}} in
Some adt {priv: (priv adt) {cpu_regs: (cpu_regs (priv adt)) {r_cnthctl_el2: cnth'}}}
{share: (share adt) {gs: (gs (share adt)) # gidx == (gn {grec: r'})}}
end
end.
End Spec.
|
function [oscilatior_corr_ifgs,oscilatior_corr_velocity] = env_oscilator_corr(envisat_flag,forced_sm_flag)
% This function perform oscialtor drift correction for envisat interferograms based on
% Petar Marinkovic presentation at ESA Living Planet 2013 in Edinburgh.
% Approximation formula:
% dR/year = c/2 * (slantRangeTime_FAR - slantRangeTime_NEAR) * corrPerYear
% 'Apparent displacement' correction:
% dR/year ~ (7.8m * 5000)*3.87e-7 ~ 0.01482m
% Correction uses the pixel information in range and in adition for
% interferograms the temporal baseline information
%
% OPTIONAL INPUT:
% envisat_flag 'y' when envisat or 'n'. When empty, a
% search is performed in the master.res file
%
% OUTPUTS:
% oscilatior_corr_velocity Correction in mm for the velocity
% oscilatior_corr_ifgs Correction for individual interferograms in rad
%
% Correction is defined such that:
% Corrected interferogram/velocity = original interferogram/velocity - correction
%
% P. Marinkovic Envisat oscialtor drift correction coded by David Bekaert -- University of Leeds 2014
%
% cite as:
% P. Marinkovic (PPO.labs) and Y. Larsen (NORUT)
% Consequences of Long-Term ASAR Local Oscillator Frequency Decay - an Empirical Study of 10 Years of Data
% ESA Living Planet Symposium (2013)
%
% Modifications
% 04/2014 DB Put non-envisat fix to zeros
% 04/2014 DB Allow forced SM oscialtor drift computation
% 06/2014 DB Fixed error for SM computation and added extra envisat check.
% 06/2014 DB Fix in case not envisat and forced SM.
% 11/2014 DB Fix to make windows and linux compatible
% 03/2015 DB Clean script output
% 01/2017 DB Bug fix for non-envisat SM case. n_ifg was not n_image
if nargin<1 || isempty(envisat_flag)
% checking if this is envisat or not
platform=getparm('platform');
if isempty(platform)
if exist('master.res','file')==2
master_file = 'master.res';
elseif exist('../master.res','file')==2
master_file = '../master.res';
else
master_file = [];
end
if ~isempty(master_file)
% make windows and linux compatible
a = fileread(master_file);
ix = strfind(a,'ASAR');
if ~isempty(ix)
platform='ENVISAT';
end
else
fprintf('Could not check if this is Envisat \n')
end
end
if strcmpi(platform,'ENVISAT')
envisat_flag = 'y';
fprintf('This is Envisat, oscilator drift is being removed... \n')
else
envisat_flag = 'n';
end
end
small_baseline_flag = getparm('small_baseline_flag');
if nargin<2
forced_sm_flag=0;
end
if forced_sm_flag==1
small_baseline_flag='n';
end
if strcmp(envisat_flag,'y')
load psver
% use the ps2.mat data
ps = load(['ps' num2str(psver) '.mat']);
lambda = getparm('lambda');
% velocity map correction:
envisat_resolution = 7.8; % ground range resolution [m]
Oscilator_drift_corr_year = 3.87e-7; % drift correction in range [1/year]
% velocity correction in mm
oscilatior_corr_velocity = (envisat_resolution*ps.ij(:,3))*Oscilator_drift_corr_year*1000;
% interferogram
if strcmp(small_baseline_flag,'y')
n_ifg = ps.n_ifg;
delta_year = (ps.ifgday(:,2)-ps.ifgday(:,1))./365.25;
else
n_ifg = ps.n_image;
delta_year = (ps.day-ps.master_day)./365.25;
end
oscilatior_corr_ifgs = -4.*pi./lambda.*repmat(oscilatior_corr_velocity,1,n_ifg)./1000.*repmat(delta_year',ps.n_ps,1);
else
load psver
ps = load(['ps' num2str(psver) '.mat']);
if strcmp(small_baseline_flag,'y')
oscilatior_corr_ifgs = zeros([ps.n_ps ps.n_ifg]);
else
n_ifg = ps.n_image; % bug fix DB
oscilatior_corr_ifgs = zeros([ps.n_ps n_ifg]);
end
oscilatior_corr_velocity = zeros([ps.n_ps 1]);
end |
Require Import Kami.AllNotations.
Require Import Kami.Utila.
Require Import StdLibKami.FreeList.Ifc.
Section Impl.
Context {ifcParams : Ifc.Params}.
Local Definition arrayRegName := (name ++ ".data")%string.
Local Definition Tag := Bit lgSize.
Local Open Scope kami_expr.
Local Open Scope kami_action.
Local Definition initialize ty: ActionT ty Void :=
Write arrayRegName: Array size Bool <- BuildArray (fun _ => $$false);
Retv.
Local Definition nextToAlloc ty: ActionT ty (Maybe Tag) :=
Read freeArray: Array size Bool <- arrayRegName;
Ret (fold_left
(fun (tag : Maybe Tag @# ty) (index : nat)
=> (IF tag @% "valid"
then tag
else STRUCT {
"valid" ::= !(#freeArray@[$index : Tag @# ty]);
"data" ::= ($index : Tag @# ty)
}))
(seq 0 size)
Invalid).
Local Definition alloc ty (tag: ty Tag): ActionT ty Bool :=
Read freeArray: Array size Bool <- arrayRegName;
LET res: Bool <- #freeArray@[#tag];
Write arrayRegName: Array size Bool <- #freeArray@[#tag <- $$true];
Ret !#res.
Local Definition free ty (tag: ty Tag): ActionT ty Void :=
Read freeArray: Array size Bool <- arrayRegName;
Write arrayRegName: Array size Bool <- #freeArray@[#tag <- $$false];
Retv.
Local Definition regs: list RegInitT := makeModule_regs ( Register arrayRegName: Array size Bool <- Default )%kami.
Definition impl: Ifc :=
{|
Ifc.regs := regs;
Ifc.regFiles := nil;
Ifc.initialize := initialize;
Ifc.nextToAlloc := nextToAlloc;
Ifc.alloc := alloc;
Ifc.free := free
|}.
Local Close Scope kami_action.
Local Close Scope kami_expr.
End Impl.
|
-- @@stderr --
dtrace: failed to compile script test/unittest/actions/printf/err.D_SYNTAX.double_prec.d: [D_SYNTAX] line 18: format conversion #1 has more than one '.' specified
|
[GOAL]
𝕜 : Type u
inst✝² : NontriviallyNormedField 𝕜
E : Type v
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace 𝕜 E
f : 𝕜 → E
⊢ support (deriv f) ⊆ tsupport f
[PROOFSTEP]
intro x
[GOAL]
𝕜 : Type u
inst✝² : NontriviallyNormedField 𝕜
E : Type v
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace 𝕜 E
f : 𝕜 → E
x : 𝕜
⊢ x ∈ support (deriv f) → x ∈ tsupport f
[PROOFSTEP]
rw [← not_imp_not]
[GOAL]
𝕜 : Type u
inst✝² : NontriviallyNormedField 𝕜
E : Type v
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace 𝕜 E
f : 𝕜 → E
x : 𝕜
⊢ ¬x ∈ tsupport f → ¬x ∈ support (deriv f)
[PROOFSTEP]
intro h2x
[GOAL]
𝕜 : Type u
inst✝² : NontriviallyNormedField 𝕜
E : Type v
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace 𝕜 E
f : 𝕜 → E
x : 𝕜
h2x : ¬x ∈ tsupport f
⊢ ¬x ∈ support (deriv f)
[PROOFSTEP]
rw [not_mem_tsupport_iff_eventuallyEq] at h2x
[GOAL]
𝕜 : Type u
inst✝² : NontriviallyNormedField 𝕜
E : Type v
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace 𝕜 E
f : 𝕜 → E
x : 𝕜
h2x : f =ᶠ[nhds x] 0
⊢ ¬x ∈ support (deriv f)
[PROOFSTEP]
exact nmem_support.mpr (h2x.deriv_eq.trans (deriv_const x 0))
|
function[pixAcc, meanClassPixAcc] = evaluatePixAccHierarchy_preload(imageList, probs, ~, overlapListCell, superPixelLabelHistosCell, varargin)
% [pixAcc, meanClassPixAcc] = evaluatePixAccHierarchy_preload(imageList, probs, ~, varargin)
%
% Same as evaluatePixAccPaint, but much faster by taking into account the SS hierarchy.
% To use this hierarchy, run reconstructSelSearchHierarchyFromFz().
%
% Copyright by Holger Caesar, 2015
% Parse input
p = inputParser;
addParameter(p, 'printStatus', true);
parse(p, varargin{:});
printStatus = p.Results.printStatus;
% Init
labelCount = size(probs{1}, 2);
assert(labelCount > 1);
pixCorrectHisto = zeros(labelCount, 1);
pixTotalHisto = zeros(labelCount, 1);
imageCount = numel(imageList);
for imageIdx = 1 : imageCount,
if printStatus,
printProgress('Evaluating pixel accuracy for image', imageIdx, imageCount);
end;
% Skip images without ground-truth
if isempty(probs{imageIdx}),
continue;
end;
% Precompute maximum over labels
[maxProbs, maxInds] = max(probs{imageIdx}, [], 2);
% Compute maximum over regions (that contain a superpixel) and count pixels
[pixCorrectHisto, pixTotalHisto] = evaluatePixAccHierarchy_loop(maxProbs, maxInds, full(overlapListCell{imageIdx}), superPixelLabelHistosCell{imageIdx}, pixCorrectHisto, pixTotalHisto);
end;
% Compute overall accuracies
pixAcc = sum(pixCorrectHisto) / sum(pixTotalHisto);
classPixAcc = pixCorrectHisto ./ pixTotalHisto;
meanClassPixAcc = nanmean(classPixAcc); |
(**
CoLoR, a Coq library on rewriting and termination.
See the COPYRIGHTS and LICENSE files.
- Frederic Blanqui, 2005-06-17
general results on booleans
*)
Set Implicit Arguments.
From Coq Require Import Arith Lia.
From Coq Require Export Bool.
From Coq Require Setoid.
From CoLoR Require Import LogicUtil.
Arguments orb_false_elim [b1 b2] _.
Arguments orb_true_elim [b1 b2] _.
#[export] Hint Rewrite negb_orb negb_andb negb_involutive eqb_negb1 eqb_negb2
orb_true_r orb_true_l orb_false_r orb_false_l orb_negb_r orb_assoc
andb_false_r andb_false_l andb_true_r andb_true_l andb_negb_r andb_assoc
absoption_andb absoption_orb
xorb_false_r xorb_false_l xorb_nilpotent xorb_assoc_reverse
: bool.
Ltac bool := autorewrite with bool.
(***********************************************************************)
(** equality *)
Lemma false_not_true : forall b, b = false <-> ~(b = true).
Proof. destruct b; intuition. Qed.
Lemma beq_true : forall b c, b = c <-> (b = true <-> c = true).
Proof.
split; intro h. subst. tauto. destruct c.
tauto. rewrite false_not_true. intuition.
Qed.
(***********************************************************************)
(** implication *)
Lemma implb1 : forall b, implb b b = true.
Proof. induction b; refl. Qed.
Lemma implb2 : forall b, implb b true = true.
Proof. induction b; refl. Qed.
(***********************************************************************)
(** conjunction *)
Lemma andb_elim : forall b c, b && c = true -> b = true /\ c = true.
Proof. destruct b; destruct c; intuition. Qed.
Arguments andb_elim [b c] _.
Lemma andb_eliml : forall b c, b && c = true -> b = true.
Proof. destruct b; destruct c; intuition. Qed.
Arguments andb_eliml [b c] _.
Lemma andb_elimr : forall b c, b && c = true -> c = true.
Proof. destruct b; destruct c; intuition. Qed.
Arguments andb_elimr [b c] _.
Lemma andb_intro : forall b c, b = true -> c = true -> b && c = true.
Proof. intros. subst b. subst c. refl. Qed.
Lemma andb_eq : forall b c, b && c = true <-> b = true /\ c = true.
Proof. split. intro. apply andb_elim. hyp. intuition. Qed.
Lemma andb_eq_false : forall b c, b && c = false <-> b = false \/ c = false.
Proof. destruct b; destruct c; bool; intuition. Qed.
(***********************************************************************)
(** negation *)
Definition neg (A : Type) (f : A->A->bool) x y := negb (f x y).
Lemma negb_lr : forall b c, negb b = c <-> b = negb c.
Proof. destruct b; destruct c; intuition. Qed.
(***********************************************************************)
(** disjonction *)
Lemma orb_intror : forall b c, c = true -> b || c = true.
Proof. intros. subst. bool. refl. Qed.
Lemma orb_introl : forall b c, c = true -> b || c = true.
Proof. intros. subst. bool. refl. Qed.
Lemma orb_eq : forall b c, b || c = true <-> b = true \/ c = true.
Proof. intuition. destruct b; auto. Qed.
(***********************************************************************)
(** equality *)
Lemma eqb_equiv : forall b b', b = b' <-> (b = true <-> b' = true).
Proof.
intros b b'. split; intro H. subst b'. refl.
destruct b. sym. rewrite <- H. refl.
destruct b'. rewrite H. refl. refl.
Qed.
(***********************************************************************)
(** decidability *)
Section dec.
Variables (A : Type) (P : A -> Prop)
(f : A -> bool) (f_ok : forall x, f x = true <-> P x).
Lemma ko : forall x, f x = false <-> ~P x.
Proof. intro x. rewrite <- f_ok. destruct (f x); intuition; discr. Qed.
Lemma dec : forall x, {P x}+{~P x}.
Proof.
intro x. case_eq (f x); intros.
left. rewrite <- f_ok. hyp. right. rewrite <- ko. hyp.
Defined.
End dec.
Arguments ko [A P f] _ x.
Arguments dec [A P f] _ x.
(***********************************************************************)
(** correspondance between boolean functions and logical connectors *)
Section bool_ok.
Variables (A : Type) (P Q : A->Prop) (bP bQ : A-> bool)
(bP_ok : forall x, bP x = true <-> P x)
(bQ_ok : forall x, bQ x = true <-> Q x).
Lemma negb_ok : forall x, negb (bP x) = true <-> ~P x.
Proof. intro. rewrite <- (ko bP_ok). destruct (bP x); simpl; intuition. Qed.
Lemma andb_ok : forall x, bP x && bQ x = true <-> P x /\ Q x.
Proof. intro. rewrite andb_eq, bP_ok, bQ_ok. refl. Qed.
Lemma orb_ok : forall x, bP x || bQ x = true <-> P x \/ Q x.
Proof. intro. rewrite orb_eq, bP_ok, bQ_ok. refl. Qed.
Lemma implb_ok : forall x, implb (bP x) (bQ x) = true <-> (P x -> Q x).
Proof.
intro x. unfold implb. case_eq (bP x).
rewrite bP_ok, bQ_ok. tauto.
rewrite (ko bP_ok). tauto.
Qed.
End bool_ok.
(***********************************************************************)
(** checking a property (P i) for all i<n *)
Section bforall_lt.
Variables (P : nat->Prop) (bP : nat->bool)
(bP_ok : forall x, bP x = true <-> P x).
Definition forall_lt n := forall i, i < n -> P i.
Fixpoint bforall_lt_aux b n := b &&
match n with
| 0 => true
| S n' => bforall_lt_aux (bP n') n'
end.
Lemma bforall_lt_aux_ok : forall n b,
bforall_lt_aux b n = true <-> b = true /\ forall_lt n.
Proof.
unfold forall_lt. induction n; simpl; intros. bool. fo. lia.
rewrite andb_eq, IHn, bP_ok. intuition.
destruct (Nat.eq_dec i n). subst. hyp. apply H2. lia.
Qed.
Definition bforall_lt := bforall_lt_aux true.
Lemma bforall_lt_ok : forall n, bforall_lt n = true <-> forall_lt n.
Proof. intro. unfold bforall_lt. rewrite bforall_lt_aux_ok. tauto. Qed.
End bforall_lt.
|
(* Title: FOLP/IFOLP.thy
Author: Martin D Coen, Cambridge University Computer Laboratory
Copyright 1992 University of Cambridge
*)
section {* Intuitionistic First-Order Logic with Proofs *}
theory IFOLP
imports Pure
begin
ML_file "~~/src/Tools/misc_legacy.ML"
setup Pure_Thy.old_appl_syntax_setup
class "term"
default_sort "term"
typedecl p
typedecl o
consts
(*** Judgements ***)
Proof :: "[o,p]=>prop"
EqProof :: "[p,p,o]=>prop" ("(3_ /= _ :/ _)" [10,10,10] 5)
(*** Logical Connectives -- Type Formers ***)
eq :: "['a,'a] => o" (infixl "=" 50)
True :: "o"
False :: "o"
Not :: "o => o" ("~ _" [40] 40)
conj :: "[o,o] => o" (infixr "&" 35)
disj :: "[o,o] => o" (infixr "|" 30)
imp :: "[o,o] => o" (infixr "-->" 25)
iff :: "[o,o] => o" (infixr "<->" 25)
(*Quantifiers*)
All :: "('a => o) => o" (binder "ALL " 10)
Ex :: "('a => o) => o" (binder "EX " 10)
Ex1 :: "('a => o) => o" (binder "EX! " 10)
(*Rewriting gadgets*)
NORM :: "o => o"
norm :: "'a => 'a"
(*** Proof Term Formers: precedence must exceed 50 ***)
tt :: "p"
contr :: "p=>p"
fst :: "p=>p"
snd :: "p=>p"
pair :: "[p,p]=>p" ("(1<_,/_>)")
split :: "[p, [p,p]=>p] =>p"
inl :: "p=>p"
inr :: "p=>p"
when :: "[p, p=>p, p=>p]=>p"
lambda :: "(p => p) => p" (binder "lam " 55)
App :: "[p,p]=>p" (infixl "`" 60)
alll :: "['a=>p]=>p" (binder "all " 55)
app :: "[p,'a]=>p" (infixl "^" 55)
exists :: "['a,p]=>p" ("(1[_,/_])")
xsplit :: "[p,['a,p]=>p]=>p"
ideq :: "'a=>p"
idpeel :: "[p,'a=>p]=>p"
nrm :: p
NRM :: p
syntax "_Proof" :: "[p,o]=>prop" ("(_ /: _)" [51, 10] 5)
parse_translation {*
let fun proof_tr [p, P] = Const (@{const_syntax Proof}, dummyT) $ P $ p
in [(@{syntax_const "_Proof"}, K proof_tr)] end
*}
(*show_proofs = true displays the proof terms -- they are ENORMOUS*)
ML {* val show_proofs = Attrib.setup_config_bool @{binding show_proofs} (K false) *}
print_translation {*
let
fun proof_tr' ctxt [P, p] =
if Config.get ctxt show_proofs then Const (@{syntax_const "_Proof"}, dummyT) $ p $ P
else P
in [(@{const_syntax Proof}, proof_tr')] end
*}
(**** Propositional logic ****)
(*Equality*)
(* Like Intensional Equality in MLTT - but proofs distinct from terms *)
axiomatization where
ieqI: "ideq(a) : a=a" and
ieqE: "[| p : a=b; !!x. f(x) : P(x,x) |] ==> idpeel(p,f) : P(a,b)"
(* Truth and Falsity *)
axiomatization where
TrueI: "tt : True" and
FalseE: "a:False ==> contr(a):P"
(* Conjunction *)
axiomatization where
conjI: "[| a:P; b:Q |] ==> <a,b> : P&Q" and
conjunct1: "p:P&Q ==> fst(p):P" and
conjunct2: "p:P&Q ==> snd(p):Q"
(* Disjunction *)
axiomatization where
disjI1: "a:P ==> inl(a):P|Q" and
disjI2: "b:Q ==> inr(b):P|Q" and
disjE: "[| a:P|Q; !!x. x:P ==> f(x):R; !!x. x:Q ==> g(x):R
|] ==> when(a,f,g):R"
(* Implication *)
axiomatization where
impI: "\<And>P Q f. (!!x. x:P ==> f(x):Q) ==> lam x. f(x):P-->Q" and
mp: "\<And>P Q f. [| f:P-->Q; a:P |] ==> f`a:Q"
(*Quantifiers*)
axiomatization where
allI: "\<And>P. (!!x. f(x) : P(x)) ==> all x. f(x) : ALL x. P(x)" and
spec: "\<And>P f. (f:ALL x. P(x)) ==> f^x : P(x)"
axiomatization where
exI: "p : P(x) ==> [x,p] : EX x. P(x)" and
exE: "[| p: EX x. P(x); !!x u. u:P(x) ==> f(x,u) : R |] ==> xsplit(p,f):R"
(**** Equality between proofs ****)
axiomatization where
prefl: "a : P ==> a = a : P" and
psym: "a = b : P ==> b = a : P" and
ptrans: "[| a = b : P; b = c : P |] ==> a = c : P"
axiomatization where
idpeelB: "[| !!x. f(x) : P(x,x) |] ==> idpeel(ideq(a),f) = f(a) : P(a,a)"
axiomatization where
fstB: "a:P ==> fst(<a,b>) = a : P" and
sndB: "b:Q ==> snd(<a,b>) = b : Q" and
pairEC: "p:P&Q ==> p = <fst(p),snd(p)> : P&Q"
axiomatization where
whenBinl: "[| a:P; !!x. x:P ==> f(x) : Q |] ==> when(inl(a),f,g) = f(a) : Q" and
whenBinr: "[| b:P; !!x. x:P ==> g(x) : Q |] ==> when(inr(b),f,g) = g(b) : Q" and
plusEC: "a:P|Q ==> when(a,%x. inl(x),%y. inr(y)) = a : P|Q"
axiomatization where
applyB: "[| a:P; !!x. x:P ==> b(x) : Q |] ==> (lam x. b(x)) ` a = b(a) : Q" and
funEC: "f:P ==> f = lam x. f`x : P"
axiomatization where
specB: "[| !!x. f(x) : P(x) |] ==> (all x. f(x)) ^ a = f(a) : P(a)"
(**** Definitions ****)
defs
not_def: "~P == P-->False"
iff_def: "P<->Q == (P-->Q) & (Q-->P)"
(*Unique existence*)
ex1_def: "EX! x. P(x) == EX x. P(x) & (ALL y. P(y) --> y=x)"
(*Rewriting -- special constants to flag normalized terms and formulae*)
axiomatization where
norm_eq: "nrm : norm(x) = x" and
NORM_iff: "NRM : NORM(P) <-> P"
(*** Sequent-style elimination rules for & --> and ALL ***)
schematic_lemma conjE:
assumes "p:P&Q"
and "!!x y.[| x:P; y:Q |] ==> f(x,y):R"
shows "?a:R"
apply (rule assms(2))
apply (rule conjunct1 [OF assms(1)])
apply (rule conjunct2 [OF assms(1)])
done
schematic_lemma impE:
assumes "p:P-->Q"
and "q:P"
and "!!x. x:Q ==> r(x):R"
shows "?p:R"
apply (rule assms mp)+
done
schematic_lemma allE:
assumes "p:ALL x. P(x)"
and "!!y. y:P(x) ==> q(y):R"
shows "?p:R"
apply (rule assms spec)+
done
(*Duplicates the quantifier; for use with eresolve_tac*)
schematic_lemma all_dupE:
assumes "p:ALL x. P(x)"
and "!!y z.[| y:P(x); z:ALL x. P(x) |] ==> q(y,z):R"
shows "?p:R"
apply (rule assms spec)+
done
(*** Negation rules, which translate between ~P and P-->False ***)
schematic_lemma notI:
assumes "!!x. x:P ==> q(x):False"
shows "?p:~P"
unfolding not_def
apply (assumption | rule assms impI)+
done
schematic_lemma notE: "p:~P \<Longrightarrow> q:P \<Longrightarrow> ?p:R"
unfolding not_def
apply (drule (1) mp)
apply (erule FalseE)
done
(*This is useful with the special implication rules for each kind of P. *)
schematic_lemma not_to_imp:
assumes "p:~P"
and "!!x. x:(P-->False) ==> q(x):Q"
shows "?p:Q"
apply (assumption | rule assms impI notE)+
done
(* For substitution int an assumption P, reduce Q to P-->Q, substitute into
this implication, then apply impI to move P back into the assumptions.*)
schematic_lemma rev_mp: "[| p:P; q:P --> Q |] ==> ?p:Q"
apply (assumption | rule mp)+
done
(*Contrapositive of an inference rule*)
schematic_lemma contrapos:
assumes major: "p:~Q"
and minor: "!!y. y:P==>q(y):Q"
shows "?a:~P"
apply (rule major [THEN notE, THEN notI])
apply (erule minor)
done
(** Unique assumption tactic.
Ignores proof objects.
Fails unless one assumption is equal and exactly one is unifiable
**)
ML {*
local
fun discard_proof (Const (@{const_name Proof}, _) $ P $ _) = P;
in
fun uniq_assume_tac ctxt =
SUBGOAL
(fn (prem,i) =>
let val hyps = map discard_proof (Logic.strip_assums_hyp prem)
and concl = discard_proof (Logic.strip_assums_concl prem)
in
if exists (fn hyp => hyp aconv concl) hyps
then case distinct (op =) (filter (fn hyp => Term.could_unify (hyp, concl)) hyps) of
[_] => assume_tac ctxt i
| _ => no_tac
else no_tac
end);
end;
*}
(*** Modus Ponens Tactics ***)
(*Finds P-->Q and P in the assumptions, replaces implication by Q *)
ML {*
fun mp_tac ctxt i =
eresolve_tac [@{thm notE}, make_elim @{thm mp}] i THEN assume_tac ctxt i
*}
(*Like mp_tac but instantiates no variables*)
ML {*
fun int_uniq_mp_tac ctxt i =
eresolve_tac [@{thm notE}, @{thm impE}] i THEN uniq_assume_tac ctxt i
*}
(*** If-and-only-if ***)
schematic_lemma iffI:
assumes "!!x. x:P ==> q(x):Q"
and "!!x. x:Q ==> r(x):P"
shows "?p:P<->Q"
unfolding iff_def
apply (assumption | rule assms conjI impI)+
done
schematic_lemma iffE:
assumes "p:P <-> Q"
and "!!x y.[| x:P-->Q; y:Q-->P |] ==> q(x,y):R"
shows "?p:R"
apply (rule conjE)
apply (rule assms(1) [unfolded iff_def])
apply (rule assms(2))
apply assumption+
done
(* Destruct rules for <-> similar to Modus Ponens *)
schematic_lemma iffD1: "[| p:P <-> Q; q:P |] ==> ?p:Q"
unfolding iff_def
apply (rule conjunct1 [THEN mp], assumption+)
done
schematic_lemma iffD2: "[| p:P <-> Q; q:Q |] ==> ?p:P"
unfolding iff_def
apply (rule conjunct2 [THEN mp], assumption+)
done
schematic_lemma iff_refl: "?p:P <-> P"
apply (rule iffI)
apply assumption+
done
schematic_lemma iff_sym: "p:Q <-> P ==> ?p:P <-> Q"
apply (erule iffE)
apply (rule iffI)
apply (erule (1) mp)+
done
schematic_lemma iff_trans: "[| p:P <-> Q; q:Q<-> R |] ==> ?p:P <-> R"
apply (rule iffI)
apply (assumption | erule iffE | erule (1) impE)+
done
(*** Unique existence. NOTE THAT the following 2 quantifications
EX!x such that [EX!y such that P(x,y)] (sequential)
EX!x,y such that P(x,y) (simultaneous)
do NOT mean the same thing. The parser treats EX!x y.P(x,y) as sequential.
***)
schematic_lemma ex1I:
assumes "p:P(a)"
and "!!x u. u:P(x) ==> f(u) : x=a"
shows "?p:EX! x. P(x)"
unfolding ex1_def
apply (assumption | rule assms exI conjI allI impI)+
done
schematic_lemma ex1E:
assumes "p:EX! x. P(x)"
and "!!x u v. [| u:P(x); v:ALL y. P(y) --> y=x |] ==> f(x,u,v):R"
shows "?a : R"
apply (insert assms(1) [unfolded ex1_def])
apply (erule exE conjE | assumption | rule assms(1))+
apply (erule assms(2), assumption)
done
(*** <-> congruence rules for simplification ***)
(*Use iffE on a premise. For conj_cong, imp_cong, all_cong, ex_cong*)
ML {*
fun iff_tac prems i =
resolve_tac (prems RL [@{thm iffE}]) i THEN
REPEAT1 (eresolve_tac [asm_rl, @{thm mp}] i)
*}
schematic_lemma conj_cong:
assumes "p:P <-> P'"
and "!!x. x:P' ==> q(x):Q <-> Q'"
shows "?p:(P&Q) <-> (P'&Q')"
apply (insert assms(1))
apply (assumption | rule iffI conjI |
erule iffE conjE mp | tactic {* iff_tac @{thms assms} 1 *})+
done
schematic_lemma disj_cong:
"[| p:P <-> P'; q:Q <-> Q' |] ==> ?p:(P|Q) <-> (P'|Q')"
apply (erule iffE disjE disjI1 disjI2 | assumption | rule iffI | tactic {* mp_tac @{context} 1 *})+
done
schematic_lemma imp_cong:
assumes "p:P <-> P'"
and "!!x. x:P' ==> q(x):Q <-> Q'"
shows "?p:(P-->Q) <-> (P'-->Q')"
apply (insert assms(1))
apply (assumption | rule iffI impI | erule iffE | tactic {* mp_tac @{context} 1 *} |
tactic {* iff_tac @{thms assms} 1 *})+
done
schematic_lemma iff_cong:
"[| p:P <-> P'; q:Q <-> Q' |] ==> ?p:(P<->Q) <-> (P'<->Q')"
apply (erule iffE | assumption | rule iffI | tactic {* mp_tac @{context} 1 *})+
done
schematic_lemma not_cong:
"p:P <-> P' ==> ?p:~P <-> ~P'"
apply (assumption | rule iffI notI | tactic {* mp_tac @{context} 1 *} | erule iffE notE)+
done
schematic_lemma all_cong:
assumes "!!x. f(x):P(x) <-> Q(x)"
shows "?p:(ALL x. P(x)) <-> (ALL x. Q(x))"
apply (assumption | rule iffI allI | tactic {* mp_tac @{context} 1 *} | erule allE |
tactic {* iff_tac @{thms assms} 1 *})+
done
schematic_lemma ex_cong:
assumes "!!x. f(x):P(x) <-> Q(x)"
shows "?p:(EX x. P(x)) <-> (EX x. Q(x))"
apply (erule exE | assumption | rule iffI exI | tactic {* mp_tac @{context} 1 *} |
tactic {* iff_tac @{thms assms} 1 *})+
done
(*NOT PROVED
ML_Thms.bind_thm ("ex1_cong", prove_goal (the_context ())
"(!!x.f(x):P(x) <-> Q(x)) ==> ?p:(EX! x.P(x)) <-> (EX! x.Q(x))"
(fn prems =>
[ (REPEAT (eresolve_tac [ex1E, spec RS mp] 1 ORELSE ares_tac [iffI,ex1I] 1
ORELSE mp_tac 1
ORELSE iff_tac prems 1)) ]))
*)
(*** Equality rules ***)
lemmas refl = ieqI
schematic_lemma subst:
assumes prem1: "p:a=b"
and prem2: "q:P(a)"
shows "?p : P(b)"
apply (rule prem2 [THEN rev_mp])
apply (rule prem1 [THEN ieqE])
apply (rule impI)
apply assumption
done
schematic_lemma sym: "q:a=b ==> ?c:b=a"
apply (erule subst)
apply (rule refl)
done
schematic_lemma trans: "[| p:a=b; q:b=c |] ==> ?d:a=c"
apply (erule (1) subst)
done
(** ~ b=a ==> ~ a=b **)
schematic_lemma not_sym: "p:~ b=a ==> ?q:~ a=b"
apply (erule contrapos)
apply (erule sym)
done
schematic_lemma ssubst: "p:b=a \<Longrightarrow> q:P(a) \<Longrightarrow> ?p:P(b)"
apply (drule sym)
apply (erule subst)
apply assumption
done
(*A special case of ex1E that would otherwise need quantifier expansion*)
schematic_lemma ex1_equalsE: "[| p:EX! x. P(x); q:P(a); r:P(b) |] ==> ?d:a=b"
apply (erule ex1E)
apply (rule trans)
apply (rule_tac [2] sym)
apply (assumption | erule spec [THEN mp])+
done
(** Polymorphic congruence rules **)
schematic_lemma subst_context: "[| p:a=b |] ==> ?d:t(a)=t(b)"
apply (erule ssubst)
apply (rule refl)
done
schematic_lemma subst_context2: "[| p:a=b; q:c=d |] ==> ?p:t(a,c)=t(b,d)"
apply (erule ssubst)+
apply (rule refl)
done
schematic_lemma subst_context3: "[| p:a=b; q:c=d; r:e=f |] ==> ?p:t(a,c,e)=t(b,d,f)"
apply (erule ssubst)+
apply (rule refl)
done
(*Useful with eresolve_tac for proving equalties from known equalities.
a = b
| |
c = d *)
schematic_lemma box_equals: "[| p:a=b; q:a=c; r:b=d |] ==> ?p:c=d"
apply (rule trans)
apply (rule trans)
apply (rule sym)
apply assumption+
done
(*Dual of box_equals: for proving equalities backwards*)
schematic_lemma simp_equals: "[| p:a=c; q:b=d; r:c=d |] ==> ?p:a=b"
apply (rule trans)
apply (rule trans)
apply (assumption | rule sym)+
done
(** Congruence rules for predicate letters **)
schematic_lemma pred1_cong: "p:a=a' ==> ?p:P(a) <-> P(a')"
apply (rule iffI)
apply (tactic {* DEPTH_SOLVE (atac 1 ORELSE eresolve_tac [@{thm subst}, @{thm ssubst}] 1) *})
done
schematic_lemma pred2_cong: "[| p:a=a'; q:b=b' |] ==> ?p:P(a,b) <-> P(a',b')"
apply (rule iffI)
apply (tactic {* DEPTH_SOLVE (atac 1 ORELSE eresolve_tac [@{thm subst}, @{thm ssubst}] 1) *})
done
schematic_lemma pred3_cong: "[| p:a=a'; q:b=b'; r:c=c' |] ==> ?p:P(a,b,c) <-> P(a',b',c')"
apply (rule iffI)
apply (tactic {* DEPTH_SOLVE (atac 1 ORELSE eresolve_tac [@{thm subst}, @{thm ssubst}] 1) *})
done
lemmas pred_congs = pred1_cong pred2_cong pred3_cong
(*special case for the equality predicate!*)
lemmas eq_cong = pred2_cong [where P = "op ="]
(*** Simplifications of assumed implications.
Roy Dyckhoff has proved that conj_impE, disj_impE, and imp_impE
used with mp_tac (restricted to atomic formulae) is COMPLETE for
intuitionistic propositional logic. See
R. Dyckhoff, Contraction-free sequent calculi for intuitionistic logic
(preprint, University of St Andrews, 1991) ***)
schematic_lemma conj_impE:
assumes major: "p:(P&Q)-->S"
and minor: "!!x. x:P-->(Q-->S) ==> q(x):R"
shows "?p:R"
apply (assumption | rule conjI impI major [THEN mp] minor)+
done
schematic_lemma disj_impE:
assumes major: "p:(P|Q)-->S"
and minor: "!!x y.[| x:P-->S; y:Q-->S |] ==> q(x,y):R"
shows "?p:R"
apply (tactic {* DEPTH_SOLVE (atac 1 ORELSE
resolve_tac [@{thm disjI1}, @{thm disjI2}, @{thm impI},
@{thm major} RS @{thm mp}, @{thm minor}] 1) *})
done
(*Simplifies the implication. Classical version is stronger.
Still UNSAFE since Q must be provable -- backtracking needed. *)
schematic_lemma imp_impE:
assumes major: "p:(P-->Q)-->S"
and r1: "!!x y.[| x:P; y:Q-->S |] ==> q(x,y):Q"
and r2: "!!x. x:S ==> r(x):R"
shows "?p:R"
apply (assumption | rule impI major [THEN mp] r1 r2)+
done
(*Simplifies the implication. Classical version is stronger.
Still UNSAFE since ~P must be provable -- backtracking needed. *)
schematic_lemma not_impE:
assumes major: "p:~P --> S"
and r1: "!!y. y:P ==> q(y):False"
and r2: "!!y. y:S ==> r(y):R"
shows "?p:R"
apply (assumption | rule notI impI major [THEN mp] r1 r2)+
done
(*Simplifies the implication. UNSAFE. *)
schematic_lemma iff_impE:
assumes major: "p:(P<->Q)-->S"
and r1: "!!x y.[| x:P; y:Q-->S |] ==> q(x,y):Q"
and r2: "!!x y.[| x:Q; y:P-->S |] ==> r(x,y):P"
and r3: "!!x. x:S ==> s(x):R"
shows "?p:R"
apply (assumption | rule iffI impI major [THEN mp] r1 r2 r3)+
done
(*What if (ALL x.~~P(x)) --> ~~(ALL x.P(x)) is an assumption? UNSAFE*)
schematic_lemma all_impE:
assumes major: "p:(ALL x. P(x))-->S"
and r1: "!!x. q:P(x)"
and r2: "!!y. y:S ==> r(y):R"
shows "?p:R"
apply (assumption | rule allI impI major [THEN mp] r1 r2)+
done
(*Unsafe: (EX x.P(x))-->S is equivalent to ALL x.P(x)-->S. *)
schematic_lemma ex_impE:
assumes major: "p:(EX x. P(x))-->S"
and r: "!!y. y:P(a)-->S ==> q(y):R"
shows "?p:R"
apply (assumption | rule exI impI major [THEN mp] r)+
done
schematic_lemma rev_cut_eq:
assumes "p:a=b"
and "!!x. x:a=b ==> f(x):R"
shows "?p:R"
apply (rule assms)+
done
lemma thin_refl: "!!X. [|p:x=x; PROP W|] ==> PROP W" .
ML_file "hypsubst.ML"
ML {*
structure Hypsubst = Hypsubst
(
(*Take apart an equality judgement; otherwise raise Match!*)
fun dest_eq (Const (@{const_name Proof}, _) $
(Const (@{const_name eq}, _) $ t $ u) $ _) = (t, u);
val imp_intr = @{thm impI}
(*etac rev_cut_eq moves an equality to be the last premise. *)
val rev_cut_eq = @{thm rev_cut_eq}
val rev_mp = @{thm rev_mp}
val subst = @{thm subst}
val sym = @{thm sym}
val thin_refl = @{thm thin_refl}
);
open Hypsubst;
*}
ML_file "intprover.ML"
(*** Rewrite rules ***)
schematic_lemma conj_rews:
"?p1 : P & True <-> P"
"?p2 : True & P <-> P"
"?p3 : P & False <-> False"
"?p4 : False & P <-> False"
"?p5 : P & P <-> P"
"?p6 : P & ~P <-> False"
"?p7 : ~P & P <-> False"
"?p8 : (P & Q) & R <-> P & (Q & R)"
apply (tactic {* fn st => IntPr.fast_tac @{context} 1 st *})+
done
schematic_lemma disj_rews:
"?p1 : P | True <-> True"
"?p2 : True | P <-> True"
"?p3 : P | False <-> P"
"?p4 : False | P <-> P"
"?p5 : P | P <-> P"
"?p6 : (P | Q) | R <-> P | (Q | R)"
apply (tactic {* IntPr.fast_tac @{context} 1 *})+
done
schematic_lemma not_rews:
"?p1 : ~ False <-> True"
"?p2 : ~ True <-> False"
apply (tactic {* IntPr.fast_tac @{context} 1 *})+
done
schematic_lemma imp_rews:
"?p1 : (P --> False) <-> ~P"
"?p2 : (P --> True) <-> True"
"?p3 : (False --> P) <-> True"
"?p4 : (True --> P) <-> P"
"?p5 : (P --> P) <-> True"
"?p6 : (P --> ~P) <-> ~P"
apply (tactic {* IntPr.fast_tac @{context} 1 *})+
done
schematic_lemma iff_rews:
"?p1 : (True <-> P) <-> P"
"?p2 : (P <-> True) <-> P"
"?p3 : (P <-> P) <-> True"
"?p4 : (False <-> P) <-> ~P"
"?p5 : (P <-> False) <-> ~P"
apply (tactic {* IntPr.fast_tac @{context} 1 *})+
done
schematic_lemma quant_rews:
"?p1 : (ALL x. P) <-> P"
"?p2 : (EX x. P) <-> P"
apply (tactic {* IntPr.fast_tac @{context} 1 *})+
done
(*These are NOT supplied by default!*)
schematic_lemma distrib_rews1:
"?p1 : ~(P|Q) <-> ~P & ~Q"
"?p2 : P & (Q | R) <-> P&Q | P&R"
"?p3 : (Q | R) & P <-> Q&P | R&P"
"?p4 : (P | Q --> R) <-> (P --> R) & (Q --> R)"
apply (tactic {* IntPr.fast_tac @{context} 1 *})+
done
schematic_lemma distrib_rews2:
"?p1 : ~(EX x. NORM(P(x))) <-> (ALL x. ~NORM(P(x)))"
"?p2 : ((EX x. NORM(P(x))) --> Q) <-> (ALL x. NORM(P(x)) --> Q)"
"?p3 : (EX x. NORM(P(x))) & NORM(Q) <-> (EX x. NORM(P(x)) & NORM(Q))"
"?p4 : NORM(Q) & (EX x. NORM(P(x))) <-> (EX x. NORM(Q) & NORM(P(x)))"
apply (tactic {* IntPr.fast_tac @{context} 1 *})+
done
lemmas distrib_rews = distrib_rews1 distrib_rews2
schematic_lemma P_Imp_P_iff_T: "p:P ==> ?p:(P <-> True)"
apply (tactic {* IntPr.fast_tac @{context} 1 *})
done
schematic_lemma not_P_imp_P_iff_F: "p:~P ==> ?p:(P <-> False)"
apply (tactic {* IntPr.fast_tac @{context} 1 *})
done
end
|
import .size --.single
open set
-- Embedding and subftypes
open_locale classical
noncomputable theory
mk_simp_attribute coe_up "upwards coercion simp lemmas"
universes u v w
instance coe_set_from_subtype {β : Type*} {S : set β} : has_coe (set S) (set β) := ⟨λ X, coe '' X⟩
/-- the intersection X ∩ S, viewed as a (set S) -/
def inter_subtype {β : Type*} (S X : set β) : (set S) := coe ⁻¹' X
variables {α : Type*} [fintype α] {S : set α}
@[coe_up] lemma subtype_coe_singleton (e : S) :
(({(e : S)} : set S) : set α) = {(e : α)} :=
image_singleton
@[coe_up] lemma subtype_coe_size (X : set S) : size X = size (X : set α) :=
(size_subtype_image X).symm
@[coe_up] lemma subtype_coe_subset {X Y : set S} :
(X ⊆ Y) ↔ ((X: set α) ⊆ (Y: set α)) :=
(image_subset_image_iff subtype.coe_injective).symm
@[coe_up] lemma subtype_set_coe_inj {X Y : set S} :
((X: set α) = (Y: set α)) ↔ (X = Y) :=
image_eq_image subtype.coe_injective
@[coe_up] lemma subtype_coe_ssubset {X Y : set S} :
(X ⊂ Y) ↔ ((X : set α) ⊂ (Y : set α)) :=
by simp_rw [ssubset_iff_subset_not_supset, subtype_coe_subset]
@[coe_up] lemma subtype_coe_union {X Y : set S} :
(((X ∪ Y) : set S) : set α) = (X ∪ Y ) :=
image_union subtype.val X Y
@[coe_up] lemma subtype_coe_inter {X Y : set S} :
(((X ∩ Y) : set S) : set α) = (X ∩ Y) :=
(image_inter subtype.coe_injective).symm
lemma subtype_coe_diff {X Y : set S} :
((X \ Y : set S) : set α) = X \ Y :=
image_diff (subtype.coe_injective) X Y
@[coe_up] lemma coe_univ :
((univ : set S) : set α) = S :=
by tidy
@[coe_up] lemma coe_empty :
((∅ : set S) : set α) = ∅ :=
by tidy
@[coe_up] lemma coe_set_is_subset (X : set S) :
(X : set α) ⊆ S :=
by tidy
@[coe_up] lemma subtype_coe_compl {X : set S} :
(((Xᶜ : set S)) : set α) = S \ (X : set α) :=
by rw [compl_eq_univ_diff, subtype_coe_diff, coe_univ]
@[coe_up] lemma coNE_inter_subtype (X : set α) :
((inter_subtype S X) : set α) = X ∩ S :=
begin
ext x, simp only [inter_subtype, set.mem_image, set.mem_inter_eq],
refine ⟨λ h, _, λ h, _⟩,
{ rcases h with ⟨⟨y,hy⟩,h,rfl⟩, simp only [subtype.coe_mk], exact ⟨h,hy⟩},
exact ⟨⟨x,h.2⟩,⟨h.1,by simp⟩⟩,
end
@[coe_up] lemma sizNE_inter_subtype (X : set α) :
size (inter_subtype S X) = size (X ∩ S) :=
by rw [subtype_coe_size, coNE_inter_subtype]
@[coe_up] lemma inter_subtype_eq_iff (X Y : set α) :
inter_subtype S X = inter_subtype S Y ↔ (X ∩ S = Y ∩ S) :=
by rw [←subtype_set_coe_inj, coNE_inter_subtype, coNE_inter_subtype]
@[simp] lemma function.embedding.image_trans {α β C : Type*} (e₁ : α ↪ β) (e₂ : β ↪ C) (X : set α) :
(e₁.trans e₂) '' X = e₂ '' (e₁ '' X) :=
by {unfold function.embedding.trans, rw ← image_comp, refl, }
@[simp] lemma equiv.image_trans {α β γ : Type*} (e₁ : α ≃ β) (e₂ : β ≃ γ) (X : set α) :
(e₁.trans e₂) '' X = e₂ '' (e₁ '' X) :=
by {unfold equiv.trans, rw ← image_comp, refl, } |
Another amazing couple - we love them - they are hilarious, outgoing, fun, go-getters, laugh ALL the time and sweet as can be. We have been looking forward to this wedding forever - and again feel like we became friends and I want to always stay in touch!!! As I went through their ceremony I couldn't help but smiling at how much Hayley laughed - the pastor was hilarious too!
For their engagement photos - click here: Austen and Hayley Canyon Lake - they are adorable I know.... wait till you see their wedding photos below. I love seeing people casual and then dressed up!
Their PASS site is: Austen and Hayley Pics can be downloaded if you see pics you want! There is also an inexpensive print option using a high quality professional printing company.
Austen and Hayley are from the same town in Washington. When they were 18 they were supposed to be set up on a blind date, but for some reason, it never happened. They ran into each other a few years later in 2009, hit it off and really never looked back. He had plans of moving to Southern California in January of 2010 and as he packed his things (3 months into their relationship) they knew it wasn't the end. After many visits and a lot of thought I followed him 5 short months later. as Hayley writes: Together, we have carved our own path and are accomplishing our dreams one day at a time."
Venue: La Valenica Hotel, La Jolla (I know right??? amazing venue) - the day was perfect - the sun didn't appear too much but actually it was so nice for photos. the water was pretty crazy that day - I am always amazed how the tide changes so much. The sea lions were out in full force!! The whole thing was amazing for their guests. Most people made a weekend out of it and who wouldn't? San Diego is just an amazing place. |
(*************************************************************)
(* Copyright Dominique Larchey-Wendling [*] *)
(* *)
(* [*] Affiliation LORIA -- CNRS *)
(*************************************************************)
(* This file is distributed under the terms of the *)
(* CeCILL v2 FREE SOFTWARE LICENSE AGREEMENT *)
(*************************************************************)
(* ** Object-level encoding of bounded universal quantification I *)
Require Import Arith Lia List Bool Setoid.
From Undecidability.Shared.Libs.DLW.Utils
Require Import utils_tac gcd prime binomial sums bool_nat rel_iter.
From Undecidability.H10.ArithLibs
Require Import luca.
From Undecidability.H10.Matija
Require Import cipher.
From Undecidability.H10.Dio
Require Import dio_logic dio_expo.
Set Implicit Arguments.
Set Default Proof Using "Type".
Local Infix "≲" := binary_le (at level 70, no associativity).
Local Notation power := (mscal mult 1).
Local Notation "∑" := (msum plus 0).
Local Infix "⇣" := nat_meet (at level 40, left associativity).
Local Infix "⇡" := nat_join (at level 50, left associativity).
(* This the Diophantine encoding of binomial coefficents *)
Section dio_fun_binomial.
Let plus_cancel_l : forall a b c, a + b = a + c -> b = c.
Proof. intros; lia. Qed.
Hint Resolve Nat.mul_add_distr_r : core.
(* We use this characterization with Newton's devel
(1+q)^n = ∑ binomial(n,i).q^i when q > 2^n
*)
Let is_binomial_eq b n k :
b = binomial n k
<-> exists q, q = power (1+n) 2
/\ is_digit (power n (1+q)) q k b.
Proof.
split.
+ intros ?; subst.
set (q := power (1+n) 2).
assert (Hq : q <> 0).
{ unfold q; generalize (@power_ge_1 (S n) 2); intros; simpl; lia. }
set (c := power n (1+q)).
exists q; split; auto; split.
* apply binomial_lt_power.
* fold c.
destruct (le_lt_dec k n) as [ Hk | Hk ].
- exists (∑ (n-k) (fun i => binomial n (S k+i) * power i q)),
(∑ k (fun i => binomial n i * power i q)); split; auto.
2: { apply sum_power_lt; auto; intros; apply binomial_lt_power. }
rewrite Nat.mul_add_distr_r, <- mult_assoc, <- power_S.
rewrite <- sum_0n_distr_r with (1 := Nat_plus_monoid) (3 := Nat_mult_monoid); auto.
rewrite <- plus_assoc, (plus_comm _ (∑ _ _)).
rewrite <- msum_plus1 with (f := fun i => binomial n i * power i q); auto.
rewrite plus_comm.
unfold c.
rewrite Newton_nat_S.
replace (S n) with (S k + (n-k)) by lia.
rewrite msum_plus; auto; f_equal; apply msum_ext.
intros; rewrite power_plus; ring.
- exists 0, c.
rewrite binomial_gt; auto.
rewrite Nat.mul_0_l; split; auto.
unfold c.
apply lt_le_trans with (power (S n) q).
++ rewrite Newton_nat_S.
apply sum_power_lt; auto.
intros; apply binomial_lt_power.
++ apply power_mono; lia.
+ intros (q & H1 & H3).
assert (Hq : q <> 0).
{ rewrite H1; generalize (@power_ge_1 (S n) 2); intros; simpl; lia. }
rewrite Newton_nat_S in H3.
apply is_digit_fun with (1 := H3).
destruct (le_lt_dec k n) as [ Hk | Hk ].
* red; split.
- subst; apply binomial_lt_power.
- exists (∑ (n-k) (fun i => binomial n (S k+i) * power i q)),
(∑ k (fun i => binomial n i * power i q)); split.
2: { apply sum_power_lt; auto; intros; subst; apply binomial_lt_power. }
rewrite Nat.mul_add_distr_r, <- mult_assoc, <- power_S.
rewrite <- sum_0n_distr_r with (1 := Nat_plus_monoid) (3 := Nat_mult_monoid); auto.
rewrite <- plus_assoc, (plus_comm _ (∑ _ _)).
rewrite <- msum_plus1 with (f := fun i => binomial n i * power i q); auto.
rewrite plus_comm.
replace (S n) with (S k + (n-k)) by lia.
rewrite msum_plus; auto; f_equal.
apply msum_ext.
intros; rewrite power_plus; ring.
* rewrite binomial_gt; auto.
rewrite <- Newton_nat_S.
split; try lia.
exists 0, (power n (1+q)); split; auto.
apply lt_le_trans with (power (S n) q).
- rewrite Newton_nat_S.
apply sum_power_lt; auto.
subst; intros; apply binomial_lt_power.
- apply power_mono; lia.
Qed.
Lemma dio_fun_binomial n k : 𝔻F n -> 𝔻F k -> 𝔻F (fun ν => binomial (n ν) (k ν)).
Proof.
dio by lemma (fun ν => is_binomial_eq (ν 0) (n ν⭳) (k ν⭳)).
Defined.
End dio_fun_binomial.
#[export] Hint Resolve dio_fun_binomial : dio_fun_db.
Local Fact dio_fun_binomial_example : 𝔻F (fun ν => binomial (ν 0) (ν 1)).
Proof. dio auto. Defined.
(* Check dio_fun_binomial_example. *)
(* Eval compute in df_size_Z (proj1_sig dio_fun_binomial_example). *)
(* This result comes from Lucas' theorem *)
Theorem binary_le_binomial n m : n ≲ m <-> rem (binomial m n) 2 = 1.
Proof.
split.
+ induction 1 as [ n | n m H1 H2 IH2 ].
* rewrite binomial_n0, rem_lt; lia.
* rewrite lucas_lemma with (1 := prime_2) (2 := div_rem_spec1 m 2) (4 := div_rem_spec1 n 2);
try (apply div_rem_spec2; lia).
rewrite mult_comm, <- rem_mult_rem, IH2, Nat.mul_1_r.
revert H1.
generalize (rem_2_is_0_or_1 m) (rem_2_is_0_or_1 n).
intros [ G1 | G1 ] [ G2 | G2 ]; rewrite G1, G2; intros; try lia.
++ rewrite binomial_n0, rem_lt; lia.
++ rewrite binomial_n0, rem_lt; lia.
++ rewrite binomial_n1, rem_lt; lia.
+ induction on n m as IH with measure m.
destruct (eq_nat_dec m 0) as [ Hm | Hm ].
* destruct n; try (intros; constructor; fail).
subst; rewrite binomial_gt, rem_lt; lia.
* generalize (div_rem_spec1 m 2) (div_rem_spec1 n 2); intros H1 H2.
rewrite lucas_lemma with (1 := prime_2) (2 := H1) (4 := H2); auto;
try (apply div_rem_spec2; lia).
rewrite rem_2_mult; intros (H3 & H4).
apply IH in H3; try lia.
constructor 2; auto.
revert H4.
generalize (rem_2_is_0_or_1 m) (rem_2_is_0_or_1 n).
intros [ G1 | G1 ] [ G2 | G2 ]; rewrite G1, G2; intros; try lia.
rewrite binomial_gt, rem_lt in H4; lia.
Qed.
Theorem dio_rel_binary_le x y : 𝔻F x -> 𝔻F y -> 𝔻R (fun v => x v ≲ y v).
Proof.
dio by lemma (fun v => binary_le_binomial (x v) (y v)).
Defined.
#[export] Hint Resolve dio_rel_binary_le : dio_rel_db.
Theorem dio_fun_nat_meet a b : 𝔻F a -> 𝔻F b -> 𝔻F (fun ν => a ν ⇣ b ν).
Proof.
dio by lemma (fun v => nat_meet_dio (v 0) (a v⭳) (b v⭳)).
Defined.
#[export] Hint Resolve dio_fun_nat_meet : dio_fun_db.
|
/-
Copyright (c) 2021 Anne Baanen. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Anne Baanen
-/
import data.zmod.basic
import group_theory.group_action.quotient
import ring_theory.int.basic
import ring_theory.ideal.quotient_operations
/-!
# `zmod n` and quotient groups / rings
This file relates `zmod n` to the quotient group
`quotient_add_group.quotient (add_subgroup.zmultiples n)` and to the quotient ring
`(ideal.span {n}).quotient`.
## Main definitions
- `zmod.quotient_zmultiples_nat_equiv_zmod` and `zmod.quotient_zmultiples_equiv_zmod`:
`zmod n` is the group quotient of `ℤ` by `n ℤ := add_subgroup.zmultiples (n)`,
(where `n : ℕ` and `n : ℤ` respectively)
- `zmod.quotient_span_nat_equiv_zmod` and `zmod.quotient_span_equiv_zmod`:
`zmod n` is the ring quotient of `ℤ` by `n ℤ : ideal.span {n}`
(where `n : ℕ` and `n : ℤ` respectively)
- `zmod.lift n f` is the map from `zmod n` induced by `f : ℤ →+ A` that maps `n` to `0`.
## Tags
zmod, quotient group, quotient ring, ideal quotient
-/
open quotient_add_group
open zmod
variables (n : ℕ) {A R : Type*} [add_group A] [ring R]
namespace int
/-- `ℤ` modulo multiples of `n : ℕ` is `zmod n`. -/
def quotient_zmultiples_nat_equiv_zmod :
ℤ ⧸ add_subgroup.zmultiples (n : ℤ) ≃+ zmod n :=
(quotient_add_equiv_of_eq (zmod.ker_int_cast_add_hom _)).symm.trans $
quotient_ker_equiv_of_right_inverse (int.cast_add_hom (zmod n)) coe int_cast_zmod_cast
/-- `ℤ` modulo multiples of `a : ℤ` is `zmod a.nat_abs`. -/
def quotient_zmultiples_equiv_zmod (a : ℤ) :
ℤ ⧸ add_subgroup.zmultiples a ≃+ zmod a.nat_abs :=
(quotient_add_equiv_of_eq (zmultiples_nat_abs a)).symm.trans
(quotient_zmultiples_nat_equiv_zmod a.nat_abs)
/-- `ℤ` modulo the ideal generated by `n : ℕ` is `zmod n`. -/
def quotient_span_nat_equiv_zmod :
ℤ ⧸ ideal.span {↑n} ≃+* zmod n :=
(ideal.quot_equiv_of_eq (zmod.ker_int_cast_ring_hom _)).symm.trans $
ring_hom.quotient_ker_equiv_of_right_inverse $
show function.right_inverse coe (int.cast_ring_hom (zmod n)),
from int_cast_zmod_cast
/-- `ℤ` modulo the ideal generated by `a : ℤ` is `zmod a.nat_abs`. -/
def quotient_span_equiv_zmod (a : ℤ) :
ℤ ⧸ ideal.span ({a} : set ℤ) ≃+* zmod a.nat_abs :=
(ideal.quot_equiv_of_eq (span_nat_abs a)).symm.trans
(quotient_span_nat_equiv_zmod a.nat_abs)
end int
namespace add_action
open add_subgroup add_monoid_hom add_equiv function
variables {α β : Type*} [add_group α] (a : α) [add_action α β] (b : β)
/-- The quotient `(ℤ ∙ a) ⧸ (stabilizer b)` is cyclic of order `minimal_period ((+ᵥ) a) b`. -/
noncomputable def zmultiples_quotient_stabilizer_equiv :
zmultiples a ⧸ stabilizer (zmultiples a) b ≃+ zmod (minimal_period ((+ᵥ) a) b) :=
(of_bijective (map _ (stabilizer (zmultiples a) b)
(zmultiples_hom (zmultiples a) ⟨a, mem_zmultiples a⟩) (by
{ rw [zmultiples_le, mem_comap, mem_stabilizer_iff,
zmultiples_hom_apply, coe_nat_zsmul, ←vadd_iterate],
exact is_periodic_pt_minimal_period ((+ᵥ) a) b })) ⟨by
{ rw [←ker_eq_bot_iff, eq_bot_iff],
refine λ q, induction_on' q (λ n hn, _),
rw [mem_bot, eq_zero_iff, int.mem_zmultiples_iff, ←zsmul_vadd_eq_iff_minimal_period_dvd],
exact (eq_zero_iff _).mp hn },
λ q, induction_on' q (λ ⟨_, n, rfl⟩, ⟨n, rfl⟩)⟩).symm.trans
(int.quotient_zmultiples_nat_equiv_zmod (minimal_period ((+ᵥ) a) b))
lemma zmultiples_quotient_stabilizer_equiv_symm_apply (n : zmod (minimal_period ((+ᵥ) a) b)) :
(zmultiples_quotient_stabilizer_equiv a b).symm n =
(n : ℤ) • (⟨a, mem_zmultiples a⟩ : zmultiples a) :=
rfl
end add_action
namespace mul_action
open add_action subgroup add_subgroup function
variables {α β : Type*} [group α] (a : α) [mul_action α β] (b : β)
local attribute [semireducible] mul_opposite
/-- The quotient `(a ^ ℤ) ⧸ (stabilizer b)` is cyclic of order `minimal_period ((•) a) b`. -/
noncomputable def zpowers_quotient_stabilizer_equiv :
zpowers a ⧸ stabilizer (zpowers a) b ≃* multiplicative (zmod (minimal_period ((•) a) b)) :=
let f := zmultiples_quotient_stabilizer_equiv (additive.of_mul a) b in
⟨f.to_fun, f.inv_fun, f.left_inv, f.right_inv, f.map_add'⟩
lemma zpowers_quotient_stabilizer_equiv_symm_apply (n : zmod (minimal_period ((•) a) b)) :
(zpowers_quotient_stabilizer_equiv a b).symm n = (⟨a, mem_zpowers a⟩ : zpowers a) ^ (n : ℤ) :=
rfl
/-- The orbit `(a ^ ℤ) • b` is a cycle of order `minimal_period ((•) a) b`. -/
noncomputable def orbit_zpowers_equiv : orbit (zpowers a) b ≃ zmod (minimal_period ((•) a) b) :=
(orbit_equiv_quotient_stabilizer _ b).trans (zpowers_quotient_stabilizer_equiv a b).to_equiv
/-- The orbit `(ℤ • a) +ᵥ b` is a cycle of order `minimal_period ((+ᵥ) a) b`. -/
noncomputable def _root_.add_action.orbit_zmultiples_equiv
{α β : Type*} [add_group α] (a : α) [add_action α β] (b : β) :
add_action.orbit (zmultiples a) b ≃ zmod (minimal_period ((+ᵥ) a) b) :=
(add_action.orbit_equiv_quotient_stabilizer (zmultiples a) b).trans
(zmultiples_quotient_stabilizer_equiv a b).to_equiv
attribute [to_additive orbit_zmultiples_equiv] orbit_zpowers_equiv
@[to_additive orbit_zmultiples_equiv_symm_apply]
lemma orbit_zpowers_equiv_symm_apply (k : zmod (minimal_period ((•) a) b)) :
(orbit_zpowers_equiv a b).symm k =
(⟨a, mem_zpowers a⟩ : zpowers a) ^ (k : ℤ) • ⟨b, mem_orbit_self b⟩ :=
rfl
lemma orbit_zpowers_equiv_symm_apply' (k : ℤ) :
(orbit_zpowers_equiv a b).symm k =
(⟨a, mem_zpowers a⟩ : zpowers a) ^ k • ⟨b, mem_orbit_self b⟩ :=
begin
rw [orbit_zpowers_equiv_symm_apply, zmod.coe_int_cast],
exact subtype.ext (zpow_smul_mod_minimal_period _ _ k),
end
lemma _root_.add_action.orbit_zmultiples_equiv_symm_apply'
{α β : Type*} [add_group α] (a : α) [add_action α β] (b : β) (k : ℤ) :
(add_action.orbit_zmultiples_equiv a b).symm k =
(k • (⟨a, mem_zmultiples a⟩ : zmultiples a)) +ᵥ ⟨b, add_action.mem_orbit_self b⟩ :=
begin
rw [add_action.orbit_zmultiples_equiv_symm_apply, zmod.coe_int_cast],
exact subtype.ext (zsmul_vadd_mod_minimal_period _ _ k),
end
attribute [to_additive orbit_zmultiples_equiv_symm_apply'] orbit_zpowers_equiv_symm_apply'
@[to_additive] lemma minimal_period_eq_card [fintype (orbit (zpowers a) b)] :
minimal_period ((•) a) b = fintype.card (orbit (zpowers a) b) :=
by rw [←fintype.of_equiv_card (orbit_zpowers_equiv a b), zmod.card]
@[to_additive] instance minimal_period_pos [finite $ orbit (zpowers a) b] :
ne_zero $ minimal_period ((•) a) b :=
⟨begin
casesI nonempty_fintype (orbit (zpowers a) b),
haveI : nonempty (orbit (zpowers a) b) := (orbit_nonempty b).to_subtype,
rw minimal_period_eq_card,
exact fintype.card_ne_zero,
end⟩
end mul_action
section group
open subgroup
variables {α : Type*} [group α] (a : α)
/-- See also `order_eq_card_zpowers`. -/
@[to_additive add_order_eq_card_zmultiples' "See also `add_order_eq_card_zmultiples`."]
lemma order_eq_card_zpowers' : order_of a = nat.card (zpowers a) :=
begin
have := nat.card_congr (mul_action.orbit_zpowers_equiv a (1 : α)),
rwa [nat.card_zmod, orbit_subgroup_one_eq_self, eq_comm] at this,
end
variables {a}
@[to_additive is_of_fin_add_order.finite_zmultiples]
lemma is_of_fin_order.finite_zpowers (h : is_of_fin_order a) : finite $ zpowers a :=
begin
rw [← order_of_pos_iff, order_eq_card_zpowers'] at h,
exact nat.finite_of_card_ne_zero h.ne.symm,
end
end group
|
SUBROUTINE LC_COUN ( stcn, cnflag, iret )
C************************************************************************
C* LC_COUN *
C* *
C* This subroutine checks STCN to see if it is a country abbreviation. *
C* The following countries are currently recognized: *
C* *
C* US United States CN Canada *
C* MX Mexico BW Bangladesh *
C* AU Australia CI China *
C* *
C* Countries whose abbreviations will conflict with US state names *
C* should not be added to this list. *
C* *
C* LC_COUN ( STCN, CNFLAG, IRET ) *
C* *
C* Input parameters: *
C* STCN CHAR* State / country abbreviation *
C* *
C* Output parameters: *
C* CNFLAG LOGICAL Country flag *
C* IRET INTEGER Return code *
C* 0 = normal return *
C** *
C* Log: *
C* I. Graffman/RDS 7/87 *
C* M. desJardins/GSFC 6/88 Cleaned up *
C************************************************************************
PARAMETER ( NCOUN = 6 )
CHARACTER*(*) stcn
LOGICAL cnflag
C*
CHARACTER cnlist (NCOUN)*12
DATA cnlist / 'US', 'CN',' MX', 'BW', 'CI', 'AU' /
C------------------------------------------------------------------------
iret = 0
cnflag = .false.
icn = 0
C
C* Check to see if this is a country.
C
DO i = 1, NCOUN
IF ( stcn .eq. cnlist (i) ) cnflag = .true.
END DO
C*
RETURN
END
|
State Before: ι : Type u_3
R : Type ?u.348167
A : Type u_1
σ : Type u_2
inst✝⁵ : Semiring A
inst✝⁴ : DecidableEq ι
inst✝³ : CanonicallyOrderedAddMonoid ι
inst✝² : SetLike σ A
inst✝¹ : AddSubmonoidClass σ A
𝒜 : ι → σ
inst✝ : GradedRing 𝒜
a b : A
n i : ι
b_mem : b ∈ 𝒜 i
h : ¬i ≤ n
⊢ ↑(↑(↑(decompose 𝒜) (a * b)) n) = 0 State After: case intro
ι : Type u_3
R : Type ?u.348167
A : Type u_1
σ : Type u_2
inst✝⁵ : Semiring A
inst✝⁴ : DecidableEq ι
inst✝³ : CanonicallyOrderedAddMonoid ι
inst✝² : SetLike σ A
inst✝¹ : AddSubmonoidClass σ A
𝒜 : ι → σ
inst✝ : GradedRing 𝒜
a : A
n i : ι
h : ¬i ≤ n
b : { x // x ∈ 𝒜 i }
⊢ ↑(↑(↑(decompose 𝒜) (a * ↑b)) n) = 0 Tactic: lift b to 𝒜 i using b_mem State Before: case intro
ι : Type u_3
R : Type ?u.348167
A : Type u_1
σ : Type u_2
inst✝⁵ : Semiring A
inst✝⁴ : DecidableEq ι
inst✝³ : CanonicallyOrderedAddMonoid ι
inst✝² : SetLike σ A
inst✝¹ : AddSubmonoidClass σ A
𝒜 : ι → σ
inst✝ : GradedRing 𝒜
a : A
n i : ι
h : ¬i ≤ n
b : { x // x ∈ 𝒜 i }
⊢ ↑(↑(↑(decompose 𝒜) (a * ↑b)) n) = 0 State After: no goals Tactic: rwa [decompose_mul, decompose_coe, coe_mul_of_apply_of_not_le] |
State Before: K : Type u_1
inst✝¹ : Field K
inst✝ : NeZero 2
a b c x : K
ha : a ≠ 0
h : ∃ s, discrim a b c = s * s
⊢ ∃ x, a * x * x + b * x + c = 0 State After: case intro
K : Type u_1
inst✝¹ : Field K
inst✝ : NeZero 2
a b c x : K
ha : a ≠ 0
s : K
hs : discrim a b c = s * s
⊢ ∃ x, a * x * x + b * x + c = 0 Tactic: rcases h with ⟨s, hs⟩ State Before: case intro
K : Type u_1
inst✝¹ : Field K
inst✝ : NeZero 2
a b c x : K
ha : a ≠ 0
s : K
hs : discrim a b c = s * s
⊢ ∃ x, a * x * x + b * x + c = 0 State After: case intro
K : Type u_1
inst✝¹ : Field K
inst✝ : NeZero 2
a b c x : K
ha : a ≠ 0
s : K
hs : discrim a b c = s * s
⊢ a * ((-b + s) / (2 * a)) * ((-b + s) / (2 * a)) + b * ((-b + s) / (2 * a)) + c = 0 Tactic: use (-b + s) / (2 * a) State Before: case intro
K : Type u_1
inst✝¹ : Field K
inst✝ : NeZero 2
a b c x : K
ha : a ≠ 0
s : K
hs : discrim a b c = s * s
⊢ a * ((-b + s) / (2 * a)) * ((-b + s) / (2 * a)) + b * ((-b + s) / (2 * a)) + c = 0 State After: case intro
K : Type u_1
inst✝¹ : Field K
inst✝ : NeZero 2
a b c x : K
ha : a ≠ 0
s : K
hs : discrim a b c = s * s
⊢ (-b + s) / (2 * a) = (-b + s) / (2 * a) ∨ (-b + s) / (2 * a) = (-b - s) / (2 * a) Tactic: rw [quadratic_eq_zero_iff ha hs] State Before: case intro
K : Type u_1
inst✝¹ : Field K
inst✝ : NeZero 2
a b c x : K
ha : a ≠ 0
s : K
hs : discrim a b c = s * s
⊢ (-b + s) / (2 * a) = (-b + s) / (2 * a) ∨ (-b + s) / (2 * a) = (-b - s) / (2 * a) State After: no goals Tactic: simp |
module Logic.Leibniz where
-- Leibniz equality
_≡_ : {A : Set} -> A -> A -> Set1
x ≡ y = (P : _ -> Set) -> P x -> P y
≡-refl : {A : Set}(x : A) -> x ≡ x
≡-refl x P px = px
≡-sym : {A : Set}(x y : A) -> x ≡ y -> y ≡ x
≡-sym x y xy P py = xy (\z -> P z -> P x) (\px -> px) py
≡-trans : {A : Set}(x y z : A) -> x ≡ y -> y ≡ z -> x ≡ z
≡-trans x y z xy yz P px = yz P (xy P px)
≡-subst : {A : Set}(P : A -> Set)(x y : A) -> x ≡ y -> P x -> P y
≡-subst P _ _ xy = xy P
|
(* Title: HOL/MicroJava/J/JListExample.thy
Author: Stefan Berghofer
*)
section \<open>Example for generating executable code from Java semantics\<close>
theory JListExample
imports Eval
begin
declare [[syntax_ambiguity_warning = false]]
consts
list_nam :: cnam
append_name :: mname
axiomatization val_nam next_nam l_nam l1_nam l2_nam l3_nam l4_nam :: vnam
where distinct_fields: "val_nam \<noteq> next_nam"
and distinct_vars1: "l_nam \<noteq> l1_nam"
and distinct_vars2: "l_nam \<noteq> l2_nam"
and distinct_vars3: "l_nam \<noteq> l3_nam"
and distinct_vars4: "l_nam \<noteq> l4_nam"
and distinct_vars5: "l1_nam \<noteq> l2_nam"
and distinct_vars6: "l1_nam \<noteq> l3_nam"
and distinct_vars7: "l1_nam \<noteq> l4_nam"
and distinct_vars8: "l2_nam \<noteq> l3_nam"
and distinct_vars9: "l2_nam \<noteq> l4_nam"
and distinct_vars10: "l3_nam \<noteq> l4_nam"
lemmas distinct_vars =
distinct_vars1
distinct_vars2
distinct_vars3
distinct_vars4
distinct_vars5
distinct_vars6
distinct_vars7
distinct_vars8
distinct_vars9
distinct_vars10
definition list_name :: cname where
"list_name = Cname list_nam"
definition val_name :: vname where
"val_name == VName val_nam"
definition next_name :: vname where
"next_name == VName next_nam"
definition l_name :: vname where
"l_name == VName l_nam"
definition l1_name :: vname where
"l1_name == VName l1_nam"
definition l2_name :: vname where
"l2_name == VName l2_nam"
definition l3_name :: vname where
"l3_name == VName l3_nam"
definition l4_name :: vname where
"l4_name == VName l4_nam"
definition list_class :: "java_mb class" where
"list_class ==
(Object,
[(val_name, PrimT Integer), (next_name, RefT (ClassT list_name))],
[((append_name, [RefT (ClassT list_name)]), PrimT Void,
([l_name], [],
If(BinOp Eq ({list_name}(LAcc This)..next_name) (Lit Null))
Expr ({list_name}(LAcc This)..next_name:=LAcc l_name)
Else
Expr ({list_name}({list_name}(LAcc This)..next_name)..
append_name({[RefT (ClassT list_name)]}[LAcc l_name])),
Lit Unit))])"
definition example_prg :: "java_mb prog" where
"example_prg == [ObjectC, (list_name, list_class)]"
code_datatype list_nam
lemma equal_cnam_code [code]:
"HOL.equal list_nam list_nam \<longleftrightarrow> True"
by(simp add: equal_cnam_def)
code_datatype append_name
lemma equal_mname_code [code]:
"HOL.equal append_name append_name \<longleftrightarrow> True"
by(simp add: equal_mname_def)
code_datatype val_nam next_nam l_nam l1_nam l2_nam l3_nam l4_nam
lemma equal_vnam_code [code]:
"HOL.equal val_nam val_nam \<longleftrightarrow> True"
"HOL.equal next_nam next_nam \<longleftrightarrow> True"
"HOL.equal l_nam l_nam \<longleftrightarrow> True"
"HOL.equal l1_nam l1_nam \<longleftrightarrow> True"
"HOL.equal l2_nam l2_nam \<longleftrightarrow> True"
"HOL.equal l3_nam l3_nam \<longleftrightarrow> True"
"HOL.equal l4_nam l4_nam \<longleftrightarrow> True"
"HOL.equal val_nam next_nam \<longleftrightarrow> False"
"HOL.equal next_nam val_nam \<longleftrightarrow> False"
"HOL.equal l_nam l1_nam \<longleftrightarrow> False"
"HOL.equal l_nam l2_nam \<longleftrightarrow> False"
"HOL.equal l_nam l3_nam \<longleftrightarrow> False"
"HOL.equal l_nam l4_nam \<longleftrightarrow> False"
"HOL.equal l1_nam l_nam \<longleftrightarrow> False"
"HOL.equal l1_nam l2_nam \<longleftrightarrow> False"
"HOL.equal l1_nam l3_nam \<longleftrightarrow> False"
"HOL.equal l1_nam l4_nam \<longleftrightarrow> False"
"HOL.equal l2_nam l_nam \<longleftrightarrow> False"
"HOL.equal l2_nam l1_nam \<longleftrightarrow> False"
"HOL.equal l2_nam l3_nam \<longleftrightarrow> False"
"HOL.equal l2_nam l4_nam \<longleftrightarrow> False"
"HOL.equal l3_nam l_nam \<longleftrightarrow> False"
"HOL.equal l3_nam l1_nam \<longleftrightarrow> False"
"HOL.equal l3_nam l2_nam \<longleftrightarrow> False"
"HOL.equal l3_nam l4_nam \<longleftrightarrow> False"
"HOL.equal l4_nam l_nam \<longleftrightarrow> False"
"HOL.equal l4_nam l1_nam \<longleftrightarrow> False"
"HOL.equal l4_nam l2_nam \<longleftrightarrow> False"
"HOL.equal l4_nam l3_nam \<longleftrightarrow> False"
by(simp_all add: distinct_fields distinct_fields[symmetric] distinct_vars distinct_vars[symmetric] equal_vnam_def)
axiomatization where
nat_to_loc'_inject: "nat_to_loc' l = nat_to_loc' l' \<longleftrightarrow> l = l'"
lemma equal_loc'_code [code]:
"HOL.equal (nat_to_loc' l) (nat_to_loc' l') \<longleftrightarrow> l = l'"
by(simp add: equal_loc'_def nat_to_loc'_inject)
definition undefined_cname :: cname
where [code del]: "undefined_cname = undefined"
declare undefined_cname_def[symmetric, code_unfold]
code_datatype Object Xcpt Cname undefined_cname
definition undefined_val :: val
where [code del]: "undefined_val = undefined"
declare undefined_val_def[symmetric, code_unfold]
code_datatype Unit Null Bool Intg Addr undefined_val
definition E where
"E = Expr (l1_name::=NewC list_name);;
Expr ({list_name}(LAcc l1_name)..val_name:=Lit (Intg 1));;
Expr (l2_name::=NewC list_name);;
Expr ({list_name}(LAcc l2_name)..val_name:=Lit (Intg 2));;
Expr (l3_name::=NewC list_name);;
Expr ({list_name}(LAcc l3_name)..val_name:=Lit (Intg 3));;
Expr (l4_name::=NewC list_name);;
Expr ({list_name}(LAcc l4_name)..val_name:=Lit (Intg 4));;
Expr ({list_name}(LAcc l1_name)..
append_name({[RefT (ClassT list_name)]}[LAcc l2_name]));;
Expr ({list_name}(LAcc l1_name)..
append_name({[RefT (ClassT list_name)]}[LAcc l3_name]));;
Expr ({list_name}(LAcc l1_name)..
append_name({[RefT (ClassT list_name)]}[LAcc l4_name]))"
definition test where
"test = Predicate.Pred (\<lambda>s. example_prg\<turnstile>Norm (Map.empty, Map.empty) -E-> s)"
lemma test_code [code]:
"test = exec_i_i_i_o example_prg (Norm (Map.empty, Map.empty)) E"
by(auto intro: exec_i_i_i_oI intro!: pred_eqI elim: exec_i_i_i_oE simp add: test_def)
ML_val \<open>
val SOME ((_, (heap, locs)), _) = Predicate.yield @{code test};
locs @{code l1_name};
locs @{code l2_name};
locs @{code l3_name};
locs @{code l4_name};
fun list_fields n =
@{code snd} (@{code the} (heap (@{code Loc} (@{code "nat_to_loc'"} n))));
fun val_field n =
list_fields n (@{code val_name}, @{code "list_name"});
fun next_field n =
list_fields n (@{code next_name}, @{code "list_name"});
val Suc = @{code Suc};
val_field @{code "0 :: nat"};
next_field @{code "0 :: nat"};
val_field @{code "1 :: nat"};
next_field @{code "1 :: nat"};
val_field (Suc (Suc @{code "0 :: nat"}));
next_field (Suc (Suc @{code "0 :: nat"}));
val_field (Suc (Suc (Suc @{code "0 :: nat"})));
next_field (Suc (Suc (Suc @{code "0 :: nat"})));
\<close>
end
|
{-# LANGUAGE RecordWildCards #-}
module Main where
import Control.Monad.Trans.Except
import System.IO
import Data.Word
import Data.Complex
import Foreign.C.Types
import Data.Monoid
import Control.Error.Util
import Foreign.Storable.Complex
import Options.Applicative
import Pipes as P
import Pipes.Prelude as P
import Data.Vector.Storable as VS hiding ((++))
import SDR.Util
import SDR.RTLSDRStream
import SDR.ArgUtils
import SDR.Serialize as S
data Options = Options {
fileName :: FilePath,
frequency :: Word32,
sampleRate :: Word32,
size :: Maybe Int
}
optParser :: Parser Options
optParser = Options
<$> strOption (
long "output "
<> short 'o'
<> metavar "FILENAME"
<> help "Output filename"
)
<*> option (fmap fromIntegral parseSize) (
long "frequency"
<> short 'f'
<> metavar "FREQUENCY"
<> help "Frequency to tune to"
)
<*> option (fmap fromIntegral parseSize) (
long "samplerate"
<> short 'r'
<> metavar "RATE"
<> help "Sample rate"
)
<*> optional (option (fmap fromIntegral parseSize) (
long "size"
<> short 's'
<> metavar "SIZE"
<> help "Size of output file. If omitted, samples will be recorded until the program is killed"
))
opt :: ParserInfo Options
opt = info (helper <*> optParser) (fullDesc <> progDesc "Record IQ samples from an RTL2832 based device" <> header "RTLSDR Record")
doIt Options{..} = do
str <- sdrStream (defaultRTLSDRParams frequency sampleRate) 1 16384
lift $ withFile fileName WriteMode $ \handle ->
runEffect $ str >-> maybe P.cat P.take size >-> P.map (interleavedIQUnsigned256ToFloat :: VS.Vector CUChar -> VS.Vector (Complex CFloat)) >-> S.toHandle handle
main = execParser opt >>= exceptT putStrLn return . doIt
|
Set Warnings "-notation-overridden".
Require Import Category.Lib.
Require Export Category.Theory.Functor.
Require Export Category.Construction.Opposite.
Generalizable All Variables.
Set Primitive Projections.
Set Universe Polymorphism.
Unset Transparent Obligations.
Definition Opposite_Functor `(F : C ⟶ D) : C^op ⟶ D^op :=
@Build_Functor (C^op) (D^op) F
(λ (x y : C ^op) (f : x ~{ C ^op }~> y), @fmap C D F y x f)
(λ (x y : C ^op) (f g : x ~{ C ^op }~> y), @fmap_respects _ _ F y x f g)
(λ x : C ^op, fmap_id)
(λ (x y z : C ^op) (f : y ~{ C ^op }~> z)
(g : x ~{ C ^op }~> y), @fmap_comp _ _ F _ _ _ g f).
Notation "F ^op" := (@Opposite_Functor _ _ F)
(at level 7, format "F ^op") : functor_scope.
Corollary Opposite_Functor_invol `{F : C ⟶ D} : (F^op)^op = F.
Proof. reflexivity. Qed.
Definition contramap `{F : C^op ⟶ D} `(f : x ~{C}~> y) :
F y ~{D}~> F x := fmap (op f).
|
(* Title: HOL/Auth/n_flash_lemma_on_inv__89.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_flash Protocol Case Study*}
theory n_flash_lemma_on_inv__89 imports n_flash_base
begin
section{*All lemmas on causal relation between inv__89 and some rule r*}
lemma n_PI_Remote_GetVsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_Get src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_Get src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Remote_GetXVsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_GetX src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_GetX src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_NakVsinv__89:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Nak dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Nak dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__0Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__1Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__2Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__0Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__1Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeUniMsg'') ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_HeadVsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_PutVsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_DirtyVsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_NakVsinv__89:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_Nak_HomeVsinv__89:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_PutVsinv__89:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_Put_HomeVsinv__89:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__0Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__1Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__2Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__0Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__1Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_1Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_2Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_3Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_4Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_5Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_6Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__0Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__1Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__0Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__1Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_HomeVsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_Home_NODE_GetVsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8Vsinv__89:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_NODE_GetVsinv__89:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__0Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__1Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10_HomeVsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10Vsinv__89:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_11Vsinv__89:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_NakVsinv__89:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_Nak_HomeVsinv__89:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutXVsinv__89:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutX_HomeVsinv__89:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutVsinv__89:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Put dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Put dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutXVsinv__89:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_PutX dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_PutX dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Local_Get_GetVsinv__89:
assumes a1: "(r=n_PI_Local_Get_Get )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const false)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_GetX__part__0Vsinv__89:
assumes a1: "(r=n_PI_Local_GetX_GetX__part__0 )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_GetX__part__1Vsinv__89:
assumes a1: "(r=n_PI_Local_GetX_GetX__part__1 )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Nak_HomeVsinv__89:
assumes a1: "(r=n_NI_Nak_Home )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Local_PutVsinv__89:
assumes a1: "(r=n_NI_Local_Put )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Local_PutXAcksDoneVsinv__89:
assumes a1: "(r=n_NI_Local_PutXAcksDone )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__89 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_PutX__part__0Vsinv__89:
assumes a1: "r=n_PI_Local_GetX_PutX__part__0 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_WbVsinv__89:
assumes a1: "r=n_NI_Wb " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_StoreVsinv__89:
assumes a1: "\<exists> src data. src\<le>N\<and>data\<le>N\<and>r=n_Store src data" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_3Vsinv__89:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_3 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_1Vsinv__89:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_1 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_ReplaceVsinv__89:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_PI_Remote_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_Store_HomeVsinv__89:
assumes a1: "\<exists> data. data\<le>N\<and>r=n_Store_Home data" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_ReplaceVsinv__89:
assumes a1: "r=n_PI_Local_Replace " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_existsVsinv__89:
assumes a1: "\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_InvAck_exists src pp" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_PutXVsinv__89:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_PI_Remote_PutX dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvVsinv__89:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Inv dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_PutXVsinv__89:
assumes a1: "r=n_PI_Local_PutX " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_Get_PutVsinv__89:
assumes a1: "r=n_PI_Local_Get_Put " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ShWbVsinv__89:
assumes a1: "r=n_NI_ShWb N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX_HeadVld__part__0Vsinv__89:
assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__0 N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ReplaceVsinv__89:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX__part__1Vsinv__89:
assumes a1: "r=n_PI_Local_GetX_PutX__part__1 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_exists_HomeVsinv__89:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_exists_Home src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Replace_HomeVsinv__89:
assumes a1: "r=n_NI_Replace_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Nak_ClearVsinv__89:
assumes a1: "r=n_NI_Nak_Clear " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_2Vsinv__89:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_2 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX_HeadVld__part__1Vsinv__89:
assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__1 N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_FAckVsinv__89:
assumes a1: "r=n_NI_FAck " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__89 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
[STATEMENT]
lemma agtt_lang_GTT_trancl_eps_free_conv:
assumes "is_gtt_eps_free \<G>"
shows "agtt_lang (GTT_trancl_eps_free \<G>) = agtt_lang (GTT_trancl \<G>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. agtt_lang (GTT_trancl_eps_free \<G>) = agtt_lang (GTT_trancl \<G>)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. agtt_lang (GTT_trancl_eps_free \<G>) = agtt_lang (GTT_trancl \<G>)
[PROOF STEP]
have "(eps (fst (GTT_trancl \<G>)))|\<^sup>+| = eps (fst (GTT_trancl \<G>))"
"(eps (snd (GTT_trancl \<G>)))|\<^sup>+| = eps (snd (GTT_trancl \<G>))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (eps (fst (GTT_trancl \<G>)))|\<^sup>+| = eps (fst (GTT_trancl \<G>)) &&& (eps (snd (GTT_trancl \<G>)))|\<^sup>+| = eps (snd (GTT_trancl \<G>))
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
is_gtt_eps_free \<G>
goal (1 subgoal):
1. (eps (fst (GTT_trancl \<G>)))|\<^sup>+| = eps (fst (GTT_trancl \<G>)) &&& (eps (snd (GTT_trancl \<G>)))|\<^sup>+| = eps (snd (GTT_trancl \<G>))
[PROOF STEP]
by (auto simp: GTT_trancl_def Let_def is_gtt_eps_free_def \<Delta>_trancl_inv)
[PROOF STATE]
proof (state)
this:
(eps (fst (GTT_trancl \<G>)))|\<^sup>+| = eps (fst (GTT_trancl \<G>))
(eps (snd (GTT_trancl \<G>)))|\<^sup>+| = eps (snd (GTT_trancl \<G>))
goal (1 subgoal):
1. agtt_lang (GTT_trancl_eps_free \<G>) = agtt_lang (GTT_trancl \<G>)
[PROOF STEP]
from ftrancl_eps_free_ta_derI[OF this(1)] ftrancl_eps_free_ta_derI[OF this(2)]
[PROOF STATE]
proof (chain)
picking this:
ta_der (ftrancl_eps_free_closures (fst (GTT_trancl \<G>))) (term_of_gterm ?t) = ta_der (fst (GTT_trancl \<G>)) (term_of_gterm ?t)
ta_der (ftrancl_eps_free_closures (snd (GTT_trancl \<G>))) (term_of_gterm ?t) = ta_der (snd (GTT_trancl \<G>)) (term_of_gterm ?t)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
ta_der (ftrancl_eps_free_closures (fst (GTT_trancl \<G>))) (term_of_gterm ?t) = ta_der (fst (GTT_trancl \<G>)) (term_of_gterm ?t)
ta_der (ftrancl_eps_free_closures (snd (GTT_trancl \<G>))) (term_of_gterm ?t) = ta_der (snd (GTT_trancl \<G>)) (term_of_gterm ?t)
goal (1 subgoal):
1. agtt_lang (GTT_trancl_eps_free \<G>) = agtt_lang (GTT_trancl \<G>)
[PROOF STEP]
by (auto simp: case_prod_beta GTT_trancl_eps_free_def intro!: agtt_lang_derI)
[PROOF STATE]
proof (state)
this:
agtt_lang (GTT_trancl_eps_free \<G>) = agtt_lang (GTT_trancl \<G>)
goal:
No subgoals!
[PROOF STEP]
qed |
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import algebra.group.defs
import data.fun_like.basic
import logic.embedding
import logic.equiv.set
import order.rel_classes
/-!
# Relation homomorphisms, embeddings, isomorphisms
This file defines relation homomorphisms, embeddings, isomorphisms and order embeddings and
isomorphisms.
## Main declarations
* `rel_hom`: Relation homomorphism. A `rel_hom r s` is a function `f : α → β` such that
`r a b → s (f a) (f b)`.
* `rel_embedding`: Relation embedding. A `rel_embedding r s` is an embedding `f : α ↪ β` such that
`r a b ↔ s (f a) (f b)`.
* `rel_iso`: Relation isomorphism. A `rel_iso r s` is an equivalence `f : α ≃ β` such that
`r a b ↔ s (f a) (f b)`.
* `sum_lex_congr`, `prod_lex_congr`: Creates a relation homomorphism between two `sum_lex` or two
`prod_lex` from relation homomorphisms between their arguments.
## Notation
* `→r`: `rel_hom`
* `↪r`: `rel_embedding`
* `≃r`: `rel_iso`
-/
open function
universes u v w
variables {α β γ : Type*} {r : α → α → Prop} {s : β → β → Prop} {t : γ → γ → Prop}
/-- A relation homomorphism with respect to a given pair of relations `r` and `s`
is a function `f : α → β` such that `r a b → s (f a) (f b)`. -/
@[nolint has_inhabited_instance]
structure rel_hom {α β : Type*} (r : α → α → Prop) (s : β → β → Prop) :=
(to_fun : α → β)
(map_rel' : ∀ {a b}, r a b → s (to_fun a) (to_fun b))
infix ` →r `:25 := rel_hom
/-- `rel_hom_class F r s` asserts that `F` is a type of functions such that all `f : F`
satisfy `r a b → s (f a) (f b)`.
The relations `r` and `s` are `out_param`s since figuring them out from a goal is a higher-order
matching problem that Lean usually can't do unaided.
-/
class rel_hom_class (F : Type*) {α β : out_param $ Type*}
(r : out_param $ α → α → Prop) (s : out_param $ β → β → Prop)
extends fun_like F α (λ _, β) :=
(map_rel : ∀ (f : F) {a b}, r a b → s (f a) (f b))
export rel_hom_class (map_rel)
-- The free parameters `r` and `s` are `out_param`s so this is not dangerous.
attribute [nolint dangerous_instance] rel_hom_class.to_fun_like
namespace rel_hom_class
variables {F : Type*}
lemma map_inf [semilattice_inf α] [linear_order β]
[rel_hom_class F ((<) : β → β → Prop) ((<) : α → α → Prop)]
(a : F) (m n : β) : a (m ⊓ n) = a m ⊓ a n :=
(strict_mono.monotone $ λ x y, map_rel a).map_inf m n
lemma map_sup [semilattice_sup α] [linear_order β]
[rel_hom_class F ((>) : β → β → Prop) ((>) : α → α → Prop)]
(a : F) (m n : β) : a (m ⊔ n) = a m ⊔ a n :=
@map_inf (order_dual α) (order_dual β) _ _ _ _ _ _ _
protected theorem is_irrefl [rel_hom_class F r s] (f : F) : ∀ [is_irrefl β s], is_irrefl α r
| ⟨H⟩ := ⟨λ a h, H _ (map_rel f h)⟩
protected theorem is_asymm [rel_hom_class F r s] (f : F) : ∀ [is_asymm β s], is_asymm α r
| ⟨H⟩ := ⟨λ a b h₁ h₂, H _ _ (map_rel f h₁) (map_rel f h₂)⟩
protected theorem acc [rel_hom_class F r s] (f : F) (a : α) : acc s (f a) → acc r a :=
begin
generalize h : f a = b, intro ac,
induction ac with _ H IH generalizing a, subst h,
exact ⟨_, λ a' h, IH (f a') (map_rel f h) _ rfl⟩
end
protected theorem well_founded [rel_hom_class F r s] (f : F) :
∀ (h : well_founded s), well_founded r
| ⟨H⟩ := ⟨λ a, rel_hom_class.acc f _ (H _)⟩
end rel_hom_class
namespace rel_hom
instance : rel_hom_class (r →r s) r s :=
{ coe := λ o, o.to_fun,
coe_injective' := λ f g h, by { cases f, cases g, congr' },
map_rel := map_rel' }
/-- Auxiliary instance if `rel_hom_class.to_fun_like.to_has_coe_to_fun` isn't found -/
instance : has_coe_to_fun (r →r s) (λ _, α → β) := ⟨λ o, o.to_fun⟩
initialize_simps_projections rel_hom (to_fun → apply)
protected theorem map_rel (f : r →r s) : ∀ {a b}, r a b → s (f a) (f b) := f.map_rel'
@[simp] theorem coe_fn_mk (f : α → β) (o) :
(@rel_hom.mk _ _ r s f o : α → β) = f := rfl
@[simp] theorem coe_fn_to_fun (f : r →r s) : (f.to_fun : α → β) = f := rfl
/-- The map `coe_fn : (r →r s) → (α → β)` is injective. -/
theorem coe_fn_injective : @function.injective (r →r s) (α → β) coe_fn :=
fun_like.coe_injective
@[ext] theorem ext ⦃f g : r →r s⦄ (h : ∀ x, f x = g x) : f = g :=
fun_like.ext f g h
theorem ext_iff {f g : r →r s} : f = g ↔ ∀ x, f x = g x :=
fun_like.ext_iff
/-- Identity map is a relation homomorphism. -/
@[refl, simps] protected def id (r : α → α → Prop) : r →r r :=
⟨λ x, x, λ a b x, x⟩
/-- Composition of two relation homomorphisms is a relation homomorphism. -/
@[trans, simps] protected def comp (g : s →r t) (f : r →r s) : r →r t :=
⟨λ x, g (f x), λ a b h, g.2 (f.2 h)⟩
/-- A relation homomorphism is also a relation homomorphism between dual relations. -/
protected def swap (f : r →r s) : swap r →r swap s :=
⟨f, λ a b, f.map_rel⟩
/-- A function is a relation homomorphism from the preimage relation of `s` to `s`. -/
def preimage (f : α → β) (s : β → β → Prop) : f ⁻¹'o s →r s := ⟨f, λ a b, id⟩
end rel_hom
/-- An increasing function is injective -/
lemma injective_of_increasing (r : α → α → Prop) (s : β → β → Prop) [is_trichotomous α r]
[is_irrefl β s] (f : α → β) (hf : ∀ {x y}, r x y → s (f x) (f y)) : injective f :=
begin
intros x y hxy,
rcases trichotomous_of r x y with h | h | h,
have := hf h, rw hxy at this, exfalso, exact irrefl_of s (f y) this,
exact h,
have := hf h, rw hxy at this, exfalso, exact irrefl_of s (f y) this
end
/-- An increasing function is injective -/
lemma rel_hom.injective_of_increasing [is_trichotomous α r]
[is_irrefl β s] (f : r →r s) : injective f :=
injective_of_increasing r s f (λ x y, f.map_rel)
-- TODO: define a `rel_iff_class` so we don't have to do all the `convert` trickery?
theorem surjective.well_founded_iff {f : α → β} (hf : surjective f)
(o : ∀ {a b}, r a b ↔ s (f a) (f b)) : well_founded r ↔ well_founded s :=
iff.intro (begin
refine rel_hom_class.well_founded (rel_hom.mk _ _ : s →r r),
{ exact classical.some hf.has_right_inverse },
intros a b h, apply o.2, convert h,
iterate 2 { apply classical.some_spec hf.has_right_inverse },
end) (rel_hom_class.well_founded (⟨f, λ _ _, o.1⟩ : r →r s))
/-- A relation embedding with respect to a given pair of relations `r` and `s`
is an embedding `f : α ↪ β` such that `r a b ↔ s (f a) (f b)`. -/
structure rel_embedding {α β : Type*} (r : α → α → Prop) (s : β → β → Prop) extends α ↪ β :=
(map_rel_iff' : ∀ {a b}, s (to_embedding a) (to_embedding b) ↔ r a b)
infix ` ↪r `:25 := rel_embedding
/-- The induced relation on a subtype is an embedding under the natural inclusion. -/
definition subtype.rel_embedding {X : Type*} (r : X → X → Prop) (p : X → Prop) :
((subtype.val : subtype p → X) ⁻¹'o r) ↪r r :=
⟨embedding.subtype p, λ x y, iff.rfl⟩
theorem preimage_equivalence {α β} (f : α → β) {s : β → β → Prop}
(hs : equivalence s) : equivalence (f ⁻¹'o s) :=
⟨λ a, hs.1 _, λ a b h, hs.2.1 h, λ a b c h₁ h₂, hs.2.2 h₁ h₂⟩
namespace rel_embedding
/-- A relation embedding is also a relation homomorphism -/
def to_rel_hom (f : r ↪r s) : (r →r s) :=
{ to_fun := f.to_embedding.to_fun,
map_rel' := λ x y, (map_rel_iff' f).mpr }
instance : has_coe (r ↪r s) (r →r s) := ⟨to_rel_hom⟩
-- see Note [function coercion]
instance : has_coe_to_fun (r ↪r s) (λ _, α → β) := ⟨λ o, o.to_embedding⟩
-- TODO: define and instantiate a `rel_embedding_class` when `embedding_like` is defined
instance : rel_hom_class (r ↪r s) r s :=
{ coe := coe_fn,
coe_injective' := λ f g h, by { rcases f with ⟨⟨⟩⟩, rcases g with ⟨⟨⟩⟩, congr' },
map_rel := λ f a b, iff.mpr (map_rel_iff' f) }
/-- See Note [custom simps projection]. We need to specify this projection explicitly in this case,
because it is a composition of multiple projections. -/
def simps.apply (h : r ↪r s) : α → β := h
initialize_simps_projections rel_embedding (to_embedding_to_fun → apply, -to_embedding)
@[simp] lemma to_rel_hom_eq_coe (f : r ↪r s) : f.to_rel_hom = f := rfl
@[simp] lemma coe_coe_fn (f : r ↪r s) : ((f : r →r s) : α → β) = f := rfl
theorem injective (f : r ↪r s) : injective f := f.inj'
theorem map_rel_iff (f : r ↪r s) : ∀ {a b}, s (f a) (f b) ↔ r a b := f.map_rel_iff'
@[simp] theorem coe_fn_mk (f : α ↪ β) (o) :
(@rel_embedding.mk _ _ r s f o : α → β) = f := rfl
@[simp] theorem coe_fn_to_embedding (f : r ↪r s) : (f.to_embedding : α → β) = f := rfl
/-- The map `coe_fn : (r ↪r s) → (α → β)` is injective. -/
theorem coe_fn_injective : @function.injective (r ↪r s) (α → β) coe_fn := fun_like.coe_injective
@[ext] theorem ext ⦃f g : r ↪r s⦄ (h : ∀ x, f x = g x) : f = g := fun_like.ext _ _ h
theorem ext_iff {f g : r ↪r s} : f = g ↔ ∀ x, f x = g x := fun_like.ext_iff
/-- Identity map is a relation embedding. -/
@[refl, simps] protected def refl (r : α → α → Prop) : r ↪r r :=
⟨embedding.refl _, λ a b, iff.rfl⟩
/-- Composition of two relation embeddings is a relation embedding. -/
@[trans] protected def trans (f : r ↪r s) (g : s ↪r t) : r ↪r t :=
⟨f.1.trans g.1, λ a b, by simp [f.map_rel_iff, g.map_rel_iff]⟩
instance (r : α → α → Prop) : inhabited (r ↪r r) := ⟨rel_embedding.refl _⟩
theorem trans_apply (f : r ↪r s) (g : s ↪r t) (a : α) : (f.trans g) a = g (f a) := rfl
@[simp] theorem coe_trans (f : r ↪r s) (g : s ↪r t) : ⇑(f.trans g) = g ∘ f := rfl
/-- A relation embedding is also a relation embedding between dual relations. -/
protected def swap (f : r ↪r s) : swap r ↪r swap s :=
⟨f.to_embedding, λ a b, f.map_rel_iff⟩
/-- If `f` is injective, then it is a relation embedding from the
preimage relation of `s` to `s`. -/
def preimage (f : α ↪ β) (s : β → β → Prop) : f ⁻¹'o s ↪r s := ⟨f, λ a b, iff.rfl⟩
theorem eq_preimage (f : r ↪r s) : r = f ⁻¹'o s :=
by { ext a b, exact f.map_rel_iff.symm }
protected theorem is_irrefl (f : r ↪r s) [is_irrefl β s] : is_irrefl α r :=
⟨λ a, mt f.map_rel_iff.2 (irrefl (f a))⟩
protected theorem is_refl (f : r ↪r s) [is_refl β s] : is_refl α r :=
⟨λ a, f.map_rel_iff.1 $ refl _⟩
protected theorem is_symm (f : r ↪r s) [is_symm β s] : is_symm α r :=
⟨λ a b, imp_imp_imp f.map_rel_iff.2 f.map_rel_iff.1 symm⟩
protected theorem is_asymm (f : r ↪r s) [is_asymm β s] : is_asymm α r :=
⟨λ a b h₁ h₂, asymm (f.map_rel_iff.2 h₁) (f.map_rel_iff.2 h₂)⟩
protected theorem is_antisymm : ∀ (f : r ↪r s) [is_antisymm β s], is_antisymm α r
| ⟨f, o⟩ ⟨H⟩ := ⟨λ a b h₁ h₂, f.inj' (H _ _ (o.2 h₁) (o.2 h₂))⟩
protected theorem is_trans : ∀ (f : r ↪r s) [is_trans β s], is_trans α r
| ⟨f, o⟩ ⟨H⟩ := ⟨λ a b c h₁ h₂, o.1 (H _ _ _ (o.2 h₁) (o.2 h₂))⟩
protected theorem is_total : ∀ (f : r ↪r s) [is_total β s], is_total α r
| ⟨f, o⟩ ⟨H⟩ := ⟨λ a b, (or_congr o o).1 (H _ _)⟩
protected theorem is_preorder : ∀ (f : r ↪r s) [is_preorder β s], is_preorder α r
| f H := by exactI {..f.is_refl, ..f.is_trans}
protected theorem is_partial_order : ∀ (f : r ↪r s) [is_partial_order β s], is_partial_order α r
| f H := by exactI {..f.is_preorder, ..f.is_antisymm}
protected theorem is_linear_order : ∀ (f : r ↪r s) [is_linear_order β s], is_linear_order α r
| f H := by exactI {..f.is_partial_order, ..f.is_total}
protected theorem is_strict_order : ∀ (f : r ↪r s) [is_strict_order β s], is_strict_order α r
| f H := by exactI {..f.is_irrefl, ..f.is_trans}
protected theorem is_trichotomous : ∀ (f : r ↪r s) [is_trichotomous β s], is_trichotomous α r
| ⟨f, o⟩ ⟨H⟩ := ⟨λ a b, (or_congr o (or_congr f.inj'.eq_iff o)).1 (H _ _)⟩
protected theorem is_strict_total_order' :
∀ (f : r ↪r s) [is_strict_total_order' β s], is_strict_total_order' α r
| f H := by exactI {..f.is_trichotomous, ..f.is_strict_order}
protected theorem acc (f : r ↪r s) (a : α) : acc s (f a) → acc r a :=
begin
generalize h : f a = b, intro ac,
induction ac with _ H IH generalizing a, subst h,
exact ⟨_, λ a' h, IH (f a') (f.map_rel_iff.2 h) _ rfl⟩
end
protected theorem well_founded : ∀ (f : r ↪r s) (h : well_founded s), well_founded r
| f ⟨H⟩ := ⟨λ a, f.acc _ (H _)⟩
protected theorem is_well_order : ∀ (f : r ↪r s) [is_well_order β s], is_well_order α r
| f H := by exactI {wf := f.well_founded H.wf, ..f.is_strict_total_order'}
/--
To define an relation embedding from an antisymmetric relation `r` to a reflexive relation `s` it
suffices to give a function together with a proof that it satisfies `s (f a) (f b) ↔ r a b`.
-/
def of_map_rel_iff (f : α → β) [is_antisymm α r] [is_refl β s]
(hf : ∀ a b, s (f a) (f b) ↔ r a b) : r ↪r s :=
{ to_fun := f,
inj' := λ x y h, antisymm ((hf _ _).1 (h ▸ refl _)) ((hf _ _).1 (h ▸ refl _)),
map_rel_iff' := hf }
@[simp]
lemma of_map_rel_iff_coe (f : α → β) [is_antisymm α r] [is_refl β s]
(hf : ∀ a b, s (f a) (f b) ↔ r a b) :
⇑(of_map_rel_iff f hf : r ↪r s) = f :=
rfl
/-- It suffices to prove `f` is monotone between strict relations
to show it is a relation embedding. -/
def of_monotone [is_trichotomous α r] [is_asymm β s] (f : α → β)
(H : ∀ a b, r a b → s (f a) (f b)) : r ↪r s :=
begin
haveI := @is_asymm.is_irrefl β s _,
refine ⟨⟨f, λ a b e, _⟩, λ a b, ⟨λ h, _, H _ _⟩⟩,
{ refine ((@trichotomous _ r _ a b).resolve_left _).resolve_right _;
exact λ h, @irrefl _ s _ _ (by simpa [e] using H _ _ h) },
{ refine (@trichotomous _ r _ a b).resolve_right (or.rec (λ e, _) (λ h', _)),
{ subst e, exact irrefl _ h },
{ exact asymm (H _ _ h') h } }
end
@[simp] theorem of_monotone_coe [is_trichotomous α r] [is_asymm β s] (f : α → β) (H) :
(@of_monotone _ _ r s _ _ f H : α → β) = f := rfl
end rel_embedding
/-- A relation isomorphism is an equivalence that is also a relation embedding. -/
structure rel_iso {α β : Type*} (r : α → α → Prop) (s : β → β → Prop) extends α ≃ β :=
(map_rel_iff' : ∀ {a b}, s (to_equiv a) (to_equiv b) ↔ r a b)
infix ` ≃r `:25 := rel_iso
namespace rel_iso
/-- Convert an `rel_iso` to an `rel_embedding`. This function is also available as a coercion
but often it is easier to write `f.to_rel_embedding` than to write explicitly `r` and `s`
in the target type. -/
def to_rel_embedding (f : r ≃r s) : r ↪r s :=
⟨f.to_equiv.to_embedding, f.map_rel_iff'⟩
theorem to_equiv_injective : injective (to_equiv : (r ≃r s) → α ≃ β)
| ⟨e₁, o₁⟩ ⟨e₂, o₂⟩ h := by { congr, exact h }
instance : has_coe (r ≃r s) (r ↪r s) := ⟨to_rel_embedding⟩
-- see Note [function coercion]
instance : has_coe_to_fun (r ≃r s) (λ _, α → β) := ⟨λ f, f⟩
-- TODO: define and instantiate a `rel_iso_class` when `equiv_like` is defined
instance : rel_hom_class (r ≃r s) r s :=
{ coe := coe_fn,
coe_injective' := equiv.coe_fn_injective.comp to_equiv_injective,
map_rel := λ f a b, iff.mpr (map_rel_iff' f) }
@[simp] lemma to_rel_embedding_eq_coe (f : r ≃r s) : f.to_rel_embedding = f := rfl
@[simp] lemma coe_coe_fn (f : r ≃r s) : ((f : r ↪r s) : α → β) = f := rfl
theorem map_rel_iff (f : r ≃r s) : ∀ {a b}, s (f a) (f b) ↔ r a b := f.map_rel_iff'
@[simp] theorem coe_fn_mk (f : α ≃ β) (o : ∀ ⦃a b⦄, s (f a) (f b) ↔ r a b) :
(rel_iso.mk f o : α → β) = f := rfl
@[simp] theorem coe_fn_to_equiv (f : r ≃r s) : (f.to_equiv : α → β) = f := rfl
/-- The map `coe_fn : (r ≃r s) → (α → β)` is injective. Lean fails to parse
`function.injective (λ e : r ≃r s, (e : α → β))`, so we use a trick to say the same. -/
theorem coe_fn_injective : @function.injective (r ≃r s) (α → β) coe_fn := fun_like.coe_injective
@[ext] theorem ext ⦃f g : r ≃r s⦄ (h : ∀ x, f x = g x) : f = g := fun_like.ext f g h
theorem ext_iff {f g : r ≃r s} : f = g ↔ ∀ x, f x = g x := fun_like.ext_iff
/-- Inverse map of a relation isomorphism is a relation isomorphism. -/
@[symm] protected def symm (f : r ≃r s) : s ≃r r :=
⟨f.to_equiv.symm, λ a b, by erw [← f.map_rel_iff, f.1.apply_symm_apply, f.1.apply_symm_apply]⟩
/-- See Note [custom simps projection]. We need to specify this projection explicitly in this case,
because it is a composition of multiple projections. -/
def simps.apply (h : r ≃r s) : α → β := h
/-- See Note [custom simps projection]. -/
def simps.symm_apply (h : r ≃r s) : β → α := h.symm
initialize_simps_projections rel_iso
(to_equiv_to_fun → apply, to_equiv_inv_fun → symm_apply, -to_equiv)
/-- Identity map is a relation isomorphism. -/
@[refl, simps apply] protected def refl (r : α → α → Prop) : r ≃r r :=
⟨equiv.refl _, λ a b, iff.rfl⟩
/-- Composition of two relation isomorphisms is a relation isomorphism. -/
@[trans, simps apply] protected def trans (f₁ : r ≃r s) (f₂ : s ≃r t) : r ≃r t :=
⟨f₁.to_equiv.trans f₂.to_equiv, λ a b, f₂.map_rel_iff.trans f₁.map_rel_iff⟩
instance (r : α → α → Prop) : inhabited (r ≃r r) := ⟨rel_iso.refl _⟩
@[simp] lemma default_def (r : α → α → Prop) : default = rel_iso.refl r := rfl
/-- a relation isomorphism is also a relation isomorphism between dual relations. -/
protected def swap (f : r ≃r s) : (swap r) ≃r (swap s) :=
⟨f.to_equiv, λ _ _, f.map_rel_iff⟩
@[simp] theorem coe_fn_symm_mk (f o) : ((@rel_iso.mk _ _ r s f o).symm : β → α) = f.symm :=
rfl
@[simp] theorem apply_symm_apply (e : r ≃r s) (x : β) : e (e.symm x) = x :=
e.to_equiv.apply_symm_apply x
@[simp] theorem symm_apply_apply (e : r ≃r s) (x : α) : e.symm (e x) = x :=
e.to_equiv.symm_apply_apply x
theorem rel_symm_apply (e : r ≃r s) {x y} : r x (e.symm y) ↔ s (e x) y :=
by rw [← e.map_rel_iff, e.apply_symm_apply]
theorem symm_apply_rel (e : r ≃r s) {x y} : r (e.symm x) y ↔ s x (e y) :=
by rw [← e.map_rel_iff, e.apply_symm_apply]
protected lemma bijective (e : r ≃r s) : bijective e := e.to_equiv.bijective
protected lemma injective (e : r ≃r s) : injective e := e.to_equiv.injective
protected lemma surjective (e : r ≃r s) : surjective e := e.to_equiv.surjective
@[simp] lemma range_eq (e : r ≃r s) : set.range e = set.univ := e.surjective.range_eq
@[simp] lemma eq_iff_eq (f : r ≃r s) {a b} : f a = f b ↔ a = b :=
f.injective.eq_iff
/-- Any equivalence lifts to a relation isomorphism between `s` and its preimage. -/
protected def preimage (f : α ≃ β) (s : β → β → Prop) : f ⁻¹'o s ≃r s := ⟨f, λ a b, iff.rfl⟩
/-- A surjective relation embedding is a relation isomorphism. -/
@[simps apply]
noncomputable def of_surjective (f : r ↪r s) (H : surjective f) : r ≃r s :=
⟨equiv.of_bijective f ⟨f.injective, H⟩, λ a b, f.map_rel_iff⟩
/--
Given relation isomorphisms `r₁ ≃r s₁` and `r₂ ≃r s₂`, construct a relation isomorphism for the
lexicographic orders on the sum.
-/
def sum_lex_congr {α₁ α₂ β₁ β₂ r₁ r₂ s₁ s₂}
(e₁ : @rel_iso α₁ β₁ r₁ s₁) (e₂ : @rel_iso α₂ β₂ r₂ s₂) :
sum.lex r₁ r₂ ≃r sum.lex s₁ s₂ :=
⟨equiv.sum_congr e₁.to_equiv e₂.to_equiv, λ a b,
by cases e₁ with f hf; cases e₂ with g hg;
cases a; cases b; simp [hf, hg]⟩
/--
Given relation isomorphisms `r₁ ≃r s₁` and `r₂ ≃r s₂`, construct a relation isomorphism for the
lexicographic orders on the product.
-/
def prod_lex_congr {α₁ α₂ β₁ β₂ r₁ r₂ s₁ s₂}
(e₁ : @rel_iso α₁ β₁ r₁ s₁) (e₂ : @rel_iso α₂ β₂ r₂ s₂) :
prod.lex r₁ r₂ ≃r prod.lex s₁ s₂ :=
⟨equiv.prod_congr e₁.to_equiv e₂.to_equiv,
λ a b, by simp [prod.lex_def, e₁.map_rel_iff, e₂.map_rel_iff]⟩
instance : group (r ≃r r) :=
{ one := rel_iso.refl r,
mul := λ f₁ f₂, f₂.trans f₁,
inv := rel_iso.symm,
mul_assoc := λ f₁ f₂ f₃, rfl,
one_mul := λ f, ext $ λ _, rfl,
mul_one := λ f, ext $ λ _, rfl,
mul_left_inv := λ f, ext f.symm_apply_apply }
@[simp] lemma coe_one : ⇑(1 : r ≃r r) = id := rfl
@[simp] lemma coe_mul (e₁ e₂ : r ≃r r) : ⇑(e₁ * e₂) = e₁ ∘ e₂ := rfl
lemma mul_apply (e₁ e₂ : r ≃r r) (x : α) : (e₁ * e₂) x = e₁ (e₂ x) := rfl
@[simp] lemma inv_apply_self (e : r ≃r r) (x) : e⁻¹ (e x) = x := e.symm_apply_apply x
@[simp] lemma apply_inv_self (e : r ≃r r) (x) : e (e⁻¹ x) = x := e.apply_symm_apply x
end rel_iso
/-- `subrel r p` is the inherited relation on a subset. -/
def subrel (r : α → α → Prop) (p : set α) : p → p → Prop :=
(coe : p → α) ⁻¹'o r
@[simp] theorem subrel_val (r : α → α → Prop) (p : set α)
{a b} : subrel r p a b ↔ r a.1 b.1 := iff.rfl
namespace subrel
/-- The relation embedding from the inherited relation on a subset. -/
protected def rel_embedding (r : α → α → Prop) (p : set α) :
subrel r p ↪r r := ⟨embedding.subtype _, λ a b, iff.rfl⟩
@[simp] theorem rel_embedding_apply (r : α → α → Prop) (p a) :
subrel.rel_embedding r p a = a.1 := rfl
instance (r : α → α → Prop) [is_well_order α r] (p : set α) : is_well_order p (subrel r p) :=
rel_embedding.is_well_order (subrel.rel_embedding r p)
instance (r : α → α → Prop) [is_refl α r] (p : set α) : is_refl p (subrel r p) :=
⟨λ x, @is_refl.refl α r _ x⟩
instance (r : α → α → Prop) [is_symm α r] (p : set α) : is_symm p (subrel r p) :=
⟨λ x y, @is_symm.symm α r _ x y⟩
instance (r : α → α → Prop) [is_trans α r] (p : set α) : is_trans p (subrel r p) :=
⟨λ x y z, @is_trans.trans α r _ x y z⟩
instance (r : α → α → Prop) [is_irrefl α r] (p : set α) : is_irrefl p (subrel r p) :=
⟨λ x, @is_irrefl.irrefl α r _ x⟩
end subrel
/-- Restrict the codomain of a relation embedding. -/
def rel_embedding.cod_restrict (p : set β) (f : r ↪r s) (H : ∀ a, f a ∈ p) : r ↪r subrel s p :=
⟨f.to_embedding.cod_restrict p H, f.map_rel_iff'⟩
@[simp] theorem rel_embedding.cod_restrict_apply (p) (f : r ↪r s) (H a) :
rel_embedding.cod_restrict p f H a = ⟨f a, H a⟩ := rfl
|
lemma smult_diff_right: "smult a (p - q) = smult a p - smult a q" for a :: "'a::comm_ring" |
-- Proof that consistent negative axioms do not jeopardize canonicity.
-- https://www.cs.bham.ac.uk/~mhe/papers/negative-axioms.pdf
{-# OPTIONS --without-K --safe #-}
module Application.NegativeAxioms.Canonicity where
open import Definition.Untyped as U
open import Definition.Typed
open import Definition.Typed.Properties
open import Definition.Typed.Weakening as T
open import Definition.Typed.Consequences.Inequality
open import Definition.Typed.Consequences.Injectivity
open import Definition.Typed.Consequences.Substitution
open import Definition.Typed.Consequences.Syntactic
open import Definition.Conversion.Consequences.Completeness
open import Definition.Conversion.FullReduction
open import Tools.Empty
open import Tools.Fin
open import Tools.Nat
open import Tools.Product
open import Tools.Sum using (_⊎_; inj₁; inj₂)
open import Tools.Unit
-- Preliminaries
---------------------------------------------------------------------------
private
Ty = Term
Cxt = Con Ty
variable
m m' : Nat
x : Fin m
ρ : Wk m m'
σ : Subst m m'
Γ Δ : Con Term m
A B C : Term m
t u : Term m
-- Numerals
data Numeral {m : Nat} : Term m → Set where
zeroₙ : Numeral zero
sucₙ : Numeral t → Numeral (suc t)
-- Negative types
---------------------------------------------------------------------------
-- A type is negative if all of its branches end in ⊥.
-- The prime example is negation ¬A.
data NegativeType (Γ : Cxt m) : Ty m → Set where
empty : NegativeType Γ Empty
pi : Γ ⊢ A
→ NegativeType (Γ ∙ A) B
→ NegativeType Γ (Π A ▹ B)
sigma : Γ ⊢ A
→ NegativeType Γ A
→ NegativeType (Γ ∙ A) B
→ NegativeType Γ (Σ A ▹ B)
conv : NegativeType Γ A
→ Γ ⊢ A ≡ B
→ NegativeType Γ B
-- Lemma: Negative types are closed under weakening.
wkNeg : ρ ∷ Δ ⊆ Γ → ⊢ Δ → NegativeType Γ A → NegativeType Δ (U.wk ρ A)
wkNeg w ⊢Δ empty
= empty
wkNeg w ⊢Δ (pi dA nB)
= pi dA' (wkNeg (lift w) (⊢Δ ∙ dA') nB)
where dA' = T.wk w ⊢Δ dA
wkNeg w ⊢Δ (sigma dA nA nB)
= sigma dA' (wkNeg w ⊢Δ nA) (wkNeg (lift w) (⊢Δ ∙ dA') nB)
where dA' = T.wk w ⊢Δ dA
wkNeg w ⊢Δ (conv n c)
= conv (wkNeg w ⊢Δ n) (wkEq w ⊢Δ c)
-- Lemma: Negative types are closed under parallel substitution.
subNeg : NegativeType Γ A → Δ ⊢ˢ σ ∷ Γ → ⊢ Δ → NegativeType Δ (subst σ A)
subNeg empty _ _ = empty
subNeg (pi ⊢A n) s ⊢Δ
= pi ⊢σA (subNeg n (liftSubst′ (wf ⊢A) ⊢Δ ⊢A s) (⊢Δ ∙ ⊢σA))
where ⊢σA = substitution ⊢A s ⊢Δ
subNeg (sigma ⊢A nA nB) s ⊢Δ
= sigma ⊢σA (subNeg nA s ⊢Δ) (subNeg nB (liftSubst′ (wf ⊢A) ⊢Δ ⊢A s) (⊢Δ ∙ ⊢σA))
where ⊢σA = substitution ⊢A s ⊢Δ
subNeg (conv n c) s ⊢Δ = conv (subNeg n s ⊢Δ) (substitutionEq c (substRefl s) ⊢Δ)
-- Corollary: Negative types are closed under single substitution.
subNeg1 : NegativeType (Γ ∙ A) B → Γ ⊢ t ∷ A → NegativeType Γ (B [ t ])
subNeg1 n ⊢t = subNeg n (singleSubst ⊢t) (wfTerm ⊢t)
-- Lemma: The first component of a negative Σ-type is negative.
fstNeg : NegativeType Γ C → Γ ⊢ C ≡ Σ A ▹ B → NegativeType Γ A
fstNeg empty c = ⊥-elim (Empty≢Σⱼ c)
fstNeg (pi _ _) c = ⊥-elim (Π≢Σ c)
fstNeg (sigma _ nA _) c = conv nA (proj₁ (Σ-injectivity c))
fstNeg (conv n c) c' = fstNeg n (trans c c')
-- Lemma: Any instance of the second component of a negative Σ-type is negative.
sndNeg : NegativeType Γ C → Γ ⊢ C ≡ Σ A ▹ B → Γ ⊢ t ∷ A → NegativeType Γ (B [ t ])
sndNeg empty c = ⊥-elim (Empty≢Σⱼ c)
sndNeg (pi _ _) c = ⊥-elim (Π≢Σ c)
sndNeg (sigma _ _ nB) c ⊢t = let (cA , cB) = Σ-injectivity c in
subNeg (conv nB cB) (singleSubst (conv ⊢t (sym cA))) (wfTerm ⊢t)
sndNeg (conv n c) c' = sndNeg n (trans c c')
-- Lemma: Any instance of the codomain of a negative Π-type is negative.
appNeg : NegativeType Γ C → Γ ⊢ C ≡ Π A ▹ B → Γ ⊢ t ∷ A → NegativeType Γ (B [ t ])
appNeg empty c = ⊥-elim (Empty≢Πⱼ c)
appNeg (sigma _ _ _) c = ⊥-elim (Π≢Σ (sym c))
appNeg (pi _ nB) c ⊢t = let (cA , cB) = injectivity c in
subNeg (conv nB cB) (singleSubst (conv ⊢t (sym cA))) (wfTerm ⊢t)
appNeg (conv n c) c' = appNeg n (trans c c')
-- Lemma: The type ℕ is not negative.
¬negℕ : NegativeType Γ C → Γ ⊢ C ≡ ℕ → ⊥
¬negℕ empty c = ℕ≢Emptyⱼ (sym c)
¬negℕ (pi _ _) c = ℕ≢Π (sym c)
¬negℕ (sigma _ _ _) c = ℕ≢Σ (sym c)
¬negℕ (conv n c) c' = ¬negℕ n (trans c c')
-- Negative contexts
---------------------------------------------------------------------------
-- A context is negative if all of its type entries are negative.
data NegativeContext : Con Ty m → Set where
ε : NegativeContext ε
_∙_ : NegativeContext Γ → NegativeType Γ A → NegativeContext (Γ ∙ A)
-- Lemma: Any entry in negative context is a negative type (needs weakening).
lookupNegative : ⊢ Γ → NegativeContext Γ → (x ∷ A ∈ Γ) → NegativeType Γ A
lookupNegative ⊢Γ∙A (nΓ ∙ nA) here
= wkNeg (step id) ⊢Γ∙A nA
lookupNegative ⊢Γ∙A@(⊢Γ ∙ Γ⊢A) (nΓ ∙ nA) (there h)
= wkNeg (step id) ⊢Γ∙A (lookupNegative ⊢Γ nΓ h)
-- Main results
---------------------------------------------------------------------------
-- We assume a negative, consistent context.
module Main (nΓ : NegativeContext Γ) (consistent : ∀{t} → Γ ⊢ t ∷ Empty → ⊥) where
-- Lemma: A neutral has negative type in a consistent negative context.
neNeg : (d : Γ ⊢ u ∷ A) (n : NfNeutral u) → NegativeType Γ A
neNeg (var ⊢Γ h ) (var _ ) = lookupNegative ⊢Γ nΓ h
neNeg (d ∘ⱼ ⊢t ) (∘ₙ n _ ) = appNeg (neNeg d n) (refl (syntacticTerm d)) ⊢t
neNeg (fstⱼ ⊢A A⊢B d ) (fstₙ n ) = fstNeg (neNeg d n) (refl (Σⱼ ⊢A ▹ A⊢B))
neNeg (sndⱼ ⊢A A⊢B d ) (sndₙ n ) = sndNeg (neNeg d n) (refl (Σⱼ ⊢A ▹ A⊢B)) (fstⱼ ⊢A A⊢B d)
neNeg (natrecⱼ _ _ _ d) (natrecₙ _ _ _ n ) = ⊥-elim (¬negℕ (neNeg d n) ⊢ℕ) where ⊢ℕ = refl (ℕⱼ (wfTerm d))
neNeg (Emptyrecⱼ _ d ) (Emptyrecₙ _ _ ) = ⊥-elim (consistent d)
neNeg (conv d c ) n = conv (neNeg d n) c
-- Lemma: A normal form of type ℕ is a numeral in a consistent negative context.
nfN : (d : Γ ⊢ u ∷ A)
→ (n : Nf u)
→ (c : Γ ⊢ A ≡ ℕ)
→ Numeral u
-- Case: neutrals. The type cannot be ℕ since it must be negative.
nfN d (ne n) c = ⊥-elim (¬negℕ (neNeg d n) c)
-- Case: numerals.
nfN (zeroⱼ x) zeroₙ c = zeroₙ
nfN (sucⱼ d) (sucₙ n) c = sucₙ (nfN d n c)
-- Case: conversion.
nfN (conv d c) n c' = nfN d n (trans c c')
-- Impossible cases: type is not ℕ.
-- * Canonical types
nfN (Πⱼ _ ▹ _) (Πₙ _ _) c = ⊥-elim (U≢ℕ c)
nfN (Σⱼ _ ▹ _) (Σₙ _ _) c = ⊥-elim (U≢ℕ c)
nfN (ℕⱼ _) ℕₙ c = ⊥-elim (U≢ℕ c)
nfN (Emptyⱼ _) Emptyₙ c = ⊥-elim (U≢ℕ c)
nfN (Unitⱼ _) Unitₙ c = ⊥-elim (U≢ℕ c)
-- * Canonical forms
nfN (lamⱼ _ _) (lamₙ _) c = ⊥-elim (ℕ≢Π (sym c))
nfN (prodⱼ _ _ _ _) (prodₙ _ _) c = ⊥-elim (ℕ≢Σ (sym c))
nfN (starⱼ _) starₙ c = ⊥-elim (ℕ≢Unitⱼ (sym c))
-- q.e.d
-- Canonicity theorem: Any well-typed term Γ ⊢ t : ℕ is convertible to a numeral.
thm : (⊢t : Γ ⊢ t ∷ ℕ) → ∃ λ u → Numeral u × Γ ⊢ t ≡ u ∷ ℕ
thm ⊢t with fullRedTerm (completeEqTerm (refl ⊢t))
... | u , nf , eq = u , nfN (proj₂ (proj₂ (syntacticEqTerm eq))) nf (refl (ℕⱼ (wfTerm ⊢t))) , eq
-- Q.E.D. 2021-05-27
|
Hellblazer boosted the popularity and image of the occult detective fiction genre and shaped it to its modern form . Many modern examples of the genre such as Hellboy , Supernatural , Grimm , The Originals , and The Dresden Files have been influenced by it , and many imitators of both the series and its character flourished such as Criminal Macabre , Gravel , Planetary , and others . Its elements and style have been used countless of times in other works and many analogues of the cynical John Constantine have appeared .
|
Formal statement is: lemma real_polynomial_function_separable: fixes x :: "'a::euclidean_space" assumes "x \<noteq> y" shows "\<exists>f. real_polynomial_function f \<and> f x \<noteq> f y" Informal statement is: If $x$ and $y$ are distinct points in $\mathbb{R}^n$, then there exists a polynomial function $f$ such that $f(x) \neq f(y)$. |
theory Nielson_VCGi
imports Nielson_Hoare "Vars"
begin
subsection "Optimized Verification Condition Generator"
text\<open>Annotated commands: commands where loops are annotated with invariants.\<close>
datatype acom =
Askip ("SKIP") |
Aassign vname aexp ("(_ ::= _)" [1000, 61] 61) |
Aseq acom acom ("_;;/ _" [60, 61] 60) |
Aif bexp acom acom ("(IF _/ THEN _/ ELSE _)" [0, 0, 61] 61) |
Aconseq "assn2*(vname set)" "assn2*(vname set)" "tbd * (vname set)" acom
("({_'/_'/_}/ CONSEQ _)" [0, 0, 0, 61] 61)|
Awhile "(assn2*(vname set))*((state\<Rightarrow>state)*(tbd*((vname set*(vname \<Rightarrow> vname set)))))" bexp acom ("({_}/ WHILE _/ DO _)" [0, 0, 61] 61)
notation com.SKIP ("SKIP")
text\<open>Strip annotations:\<close>
fun strip :: "acom \<Rightarrow> com" where
"strip SKIP = SKIP" |
"strip (x ::= a) = (x ::= a)" |
"strip (C\<^sub>1;; C\<^sub>2) = (strip C\<^sub>1;; strip C\<^sub>2)" |
"strip (IF b THEN C\<^sub>1 ELSE C\<^sub>2) = (IF b THEN strip C\<^sub>1 ELSE strip C\<^sub>2)" |
"strip ({_/_/_} CONSEQ C) = strip C" |
"strip ({_} WHILE b DO C) = (WHILE b DO strip C)"
text "support of an expression"
definition supportE :: "((char list \<Rightarrow> nat) \<Rightarrow> (char list \<Rightarrow> int) \<Rightarrow> nat) \<Rightarrow> string set" where
"supportE P = {x. \<exists>l1 l2 s. (\<forall>y. y \<noteq> x \<longrightarrow> l1 y = l2 y) \<and> P l1 s \<noteq> P l2 s}"
lemma expr_lupd: "x \<notin> supportE Q \<Longrightarrow> Q (l(x:=n)) = Q l"
by(simp add: supportE_def fun_upd_other fun_eq_iff)
(metis (no_types, lifting) fun_upd_def)
fun varacom :: "acom \<Rightarrow> lvname set" where
"varacom (C\<^sub>1;; C\<^sub>2)= varacom C\<^sub>1 \<union> varacom C\<^sub>2"
| "varacom (IF b THEN C\<^sub>1 ELSE C\<^sub>2)= varacom C\<^sub>1 \<union> varacom C\<^sub>2"
| "varacom ({(P,_)/(Qannot,_)/_} CONSEQ C)= support P \<union> varacom C \<union> support Qannot"
| "varacom ({((I,_),(S,(E,Es)))} WHILE b DO C) = support I \<union> varacom C "
| "varacom _ = {}"
fun varnewacom :: "acom \<Rightarrow> lvname set" where
"varnewacom (C\<^sub>1;; C\<^sub>2)= varnewacom C\<^sub>1 \<union> varnewacom C\<^sub>2"
| "varnewacom (IF b THEN C\<^sub>1 ELSE C\<^sub>2)= varnewacom C\<^sub>1 \<union> varnewacom C\<^sub>2"
| "varnewacom ({_/_/_} CONSEQ C)= varnewacom C"
| "varnewacom ({(I,(S,(E,Es)))} WHILE b DO C) = varnewacom C"
| "varnewacom _ = {}"
lemma finite_varnewacom: "finite (varnewacom C)"
by (induct C) (auto)
fun wf :: "acom \<Rightarrow> lvname set \<Rightarrow> bool" where
"wf SKIP _ = True" |
"wf (x ::= a) _ = True" |
"wf (C\<^sub>1;; C\<^sub>2) S = (wf C\<^sub>1 (S \<union> varnewacom C\<^sub>2) \<and> wf C\<^sub>2 S)" |
"wf (IF b THEN C\<^sub>1 ELSE C\<^sub>2) S = (wf C\<^sub>1 S \<and> wf C\<^sub>2 S)" |
"wf ({_/(Qannot,_)/_} CONSEQ C) S = (finite (support Qannot) \<and> wf C S)" |
"wf ({(_,(_,(_,Es)))} WHILE b DO C) S = ( wf C S)"
text\<open>Weakest precondition from annotated commands:\<close>
fun preT :: "acom \<Rightarrow> tbd \<Rightarrow> tbd" where
"preT SKIP e = e" |
"preT (x ::= a) e = (\<lambda>s. e(s(x := aval a s)))" |
"preT (C\<^sub>1;; C\<^sub>2) e = preT C\<^sub>1 (preT C\<^sub>2 e)" |
"preT ({_/_/_} CONSEQ C) e = preT C e" |
"preT (IF b THEN C\<^sub>1 ELSE C\<^sub>2) e =
(\<lambda>s. if bval b s then preT C\<^sub>1 e s else preT C\<^sub>2 e s)" |
"preT ({(_,(S,_))} WHILE b DO C) e = e o S"
lemma preT_constant: "preT C (%_. a) = (%_. a)"
by(induct C, auto)
lemma preT_linear: "preT C (%s. k * e s) = (%s. k * preT C e s)"
by (induct C arbitrary: e, auto)
fun postQ :: "acom \<Rightarrow> state \<Rightarrow> state" where (* seems to be forward?! *)
"postQ SKIP s = s" |
"postQ (x ::= a) s = s(x := aval a s)" |
"postQ (C\<^sub>1;; C\<^sub>2) s = postQ C\<^sub>2 (postQ C\<^sub>1 s)" |
"postQ ({_/_/_} CONSEQ C) s = postQ C s" |
"postQ (IF b THEN C\<^sub>1 ELSE C\<^sub>2) s =
(if bval b s then postQ C\<^sub>1 s else postQ C\<^sub>2 s)" |
"postQ ({(_,(S,_))} WHILE b DO C) s = S s"
(* function that, given a Command C and a set of variables
gives the set of variables, that Es depends on,
meaning a set S where
if s1 = s2 on S \<longrightarrow> \<forall>x:Es. postQ C s1 x = postQ C s2 x
*)
fun fune :: "acom \<Rightarrow> vname set \<Rightarrow> vname set" where
"fune SKIP LV = LV" |
"fune (x ::= a) LV = LV \<union> vars a" |
"fune (C\<^sub>1;; C\<^sub>2) LV = fune C\<^sub>1 (fune C\<^sub>2 LV)" |
"fune ({_/_/_} CONSEQ C) LV = fune C LV" |
"fune (IF b THEN C\<^sub>1 ELSE C\<^sub>2) LV = vars b \<union> fune C\<^sub>1 LV \<union> fune C\<^sub>2 LV" |
"fune ({(_,(S,(E,Es,SS)))} WHILE b DO C) LV = (\<Union>x\<in>LV. SS x)"
lemma fune_mono: "A \<subseteq> B \<Longrightarrow> fune C A \<subseteq> fune C B"
proof(induct C arbitrary: A B)
case (Awhile x1 x2 C)
obtain a b c d e f where a: "x1 = (a,b,c,d,e)" using prod_cases5 by blast
from Awhile show ?case unfolding a by(auto)
qed (auto simp add: le_supI1 le_supI2)
lemma TQ: "preT C e s = e (postQ C s)"
apply(induct C arbitrary: e s) by (auto)
(* given a state, how often will a While loop be evaluated ? *)
function (domintros) times :: "state \<Rightarrow> bexp \<Rightarrow> acom \<Rightarrow> nat" where
"times s b C = (if bval b s then Suc (times (postQ C s) b C) else 0)"
apply(auto) done
lemma assumes I: "I z s" and
i: "\<And>s z. I (Suc z) s \<Longrightarrow> bval b s \<and> I z (postQ C s)"
and ii: "\<And>s. I 0 s \<Longrightarrow> ~ bval b s"
shows times_z: "times s b C = z"
proof -
have "I z s \<Longrightarrow> times_dom (s, b, C) \<and> times s b C = z"
proof(induct z arbitrary: s)
case 0
have A: "times_dom (s, b, C)"
apply(rule times.domintros)
apply(simp add: ii[OF 0] ) done
have B: "times s b C = 0"
using times.psimps[OF A] by(simp add: ii[OF 0])
show ?case using A B by simp
next
case (Suc z)
from i[OF Suc(2)] have bv: "bval b s"
and g: "I z (postQ C s)" by simp_all
from Suc(1)[OF g] have p1: "times_dom (postQ C s, b, C)"
and p2: "times (postQ C s) b C = z" by simp_all
have A: "times_dom (s, b, C)"
apply(rule times.domintros) apply(rule p1) done
have B: "times s b C = Suc z"
using times.psimps[OF A] bv p2 by simp
show ?case using A B by simp
qed
then show "times s b C = z" using I by simp
qed
fun postQz :: "acom \<Rightarrow> state \<Rightarrow> nat \<Rightarrow> state" where
"postQz C s 0 = s" |
"postQz C s (Suc n) = (postQz C (postQ C s) n)"
fun preTz :: "acom \<Rightarrow> tbd \<Rightarrow> nat \<Rightarrow> tbd" where
"preTz C e 0 = e" |
"preTz C e (Suc n) = preT C (preTz C e n)"
lemma TzQ: "preTz C e n s = e (postQz C s n)"
by (induct n arbitrary: s, simp_all add: TQ)
text\<open>Weakest precondition from annotated commands:\<close>
(* if the annotated command contains no loops,
then the weakest precondition is just some mangled post condition
in the other case,
the weakest precondition is essentially the annotated invariant
of the first while loop, mangled somewhat by the commands
preceding the loop. *)
fun pre :: "acom \<Rightarrow> assn2 \<Rightarrow> assn2" where
"pre SKIP Q = Q" |
"pre (x ::= a) Q = (\<lambda>l s. Q l (s(x := aval a s)))" |
"pre (C\<^sub>1;; C\<^sub>2) Q = pre C\<^sub>1 (pre C\<^sub>2 Q)" |
"pre ({(P',Ps)/_/_} CONSEQ C) Q = P'" |
"pre (IF b THEN C\<^sub>1 ELSE C\<^sub>2) Q =
(\<lambda>l s. if bval b s then pre C\<^sub>1 Q l s else pre C\<^sub>2 Q l s)" |
"pre ({((I,Is),(S,(E,Es,SS)))} WHILE b DO C) Q = I"
fun qdeps :: "acom \<Rightarrow> vname set \<Rightarrow> vname set" where
"qdeps SKIP LV = LV" |
"qdeps (x ::= a) LV = LV \<union> vars a" |
"qdeps (C\<^sub>1;; C\<^sub>2) LV = qdeps C\<^sub>1 (qdeps C\<^sub>2 LV)" |
"qdeps ({(P',Ps)/_/_} CONSEQ C) _ = Ps" | (* the variables P' depends on *)
"qdeps (IF b THEN C\<^sub>1 ELSE C\<^sub>2) LV = vars b \<union> qdeps C\<^sub>1 LV \<union> qdeps C\<^sub>2 LV" |
"qdeps ({((I,Is),(S,(E,x,Es)))} WHILE b DO C) _ = Is" (* the variables I depends on *)
lemma qdeps_mono: "A \<subseteq> B \<Longrightarrow> qdeps C A \<subseteq> qdeps C B"
by (induct C arbitrary: A B, auto simp: le_supI1 le_supI2)
lemma supportE_if: "supportE (\<lambda>l s. if b s then A l s else B l s)
\<subseteq> supportE A \<union> supportE B"
unfolding supportE_def apply(auto)
by metis+
lemma supportE_preT: "supportE (%l. preT C (e l)) \<subseteq> supportE e"
proof(induct C arbitrary: e)
case (Aif b C1 C2 e)
show ?case
apply(simp)
apply(rule subset_trans[OF supportE_if])
using Aif by fast
next
case (Awhile A y C e)
obtain I S E x where A: "A= (I,S,E,x)" using prod_cases4 by blast
show ?case using A apply(simp) unfolding supportE_def
by blast
next
case (Aseq)
then show ?case by force
qed (simp_all add: supportE_def, blast)
lemma supportE_twicepreT: "supportE (%l. preT C1 (preT C2 (e l))) \<subseteq> supportE e"
by (rule subset_trans[OF supportE_preT supportE_preT])
lemma supportE_preTz: "supportE (%l. preTz C (e l) n) \<subseteq> supportE e"
proof (induct n)
case (Suc n)
show ?case
apply(simp)
apply(rule subset_trans[OF supportE_preT])
by fact
qed simp
lemma supportE_preTz_Un: (* like in support_wpw_Un *)
"supportE (\<lambda>l. preTz C (e l) (l x)) \<subseteq> insert x (UN n. supportE (\<lambda>l. preTz C (e l) n))"
apply(auto simp add: supportE_def subset_iff)
apply metis
done
lemma support_eq: "support (\<lambda>l s. l x = E l s) \<subseteq> supportE E \<union> {x}"
unfolding support_def supportE_def
apply(auto)
apply blast
by metis
lemma support_impl_in: "G e \<longrightarrow> support (\<lambda>l s. H e l s) \<subseteq> T
\<Longrightarrow> support (\<lambda>l s. G e \<longrightarrow> H e l s) \<subseteq> T"
unfolding support_def apply(auto)
apply blast+ done
lemma support_supportE: "\<And>P e. support (\<lambda>l s. P (e l) s) \<subseteq> supportE e"
unfolding support_def supportE_def
apply(rule subsetI)
apply(simp)
proof (clarify, goal_cases)
case (1 P e x l1 l2 s)
have P: "\<forall>s. e l1 s = e l2 s \<Longrightarrow> e l1 = e l2" by fast
show "\<exists>l1 l2. (\<forall>y. y \<noteq> x \<longrightarrow> l1 y = l2 y) \<and> (\<exists>s. e l1 s \<noteq> e l2 s)"
apply(rule exI[where x=l1])
apply(rule exI[where x=l2])
apply(safe)
using 1 apply blast
apply(rule ccontr)
apply(simp)
using 1(2) P by force
qed
lemma support_pre: "support (pre C Q) \<subseteq> support Q \<union> varacom C"
proof (induct C arbitrary: Q)
case (Awhile A b C Q)
obtain I2 S E Es SS where A: "A= (I2,(S,(E,Es,SS)))" using prod_cases5 by blast
obtain I Is where "I2=(I,Is)" by fastforce
note A=this A
have support_inv: "\<And>P. support (\<lambda>l s. P s) = {}"
unfolding support_def by blast
show ?case unfolding A by(auto)
next
case (Aseq C1 C2)
then show ?case by(auto)
next
case (Aif x C1 C2 Q)
have s1: "support (\<lambda>l s. bval x s \<longrightarrow> pre C1 Q l s) \<subseteq> support Q \<union> varacom C1"
apply(rule subset_trans[OF support_impl]) by(rule Aif)
have s2: "support (\<lambda>l s. ~ bval x s \<longrightarrow> pre C2 Q l s) \<subseteq> support Q \<union> varacom C2"
apply(rule subset_trans[OF support_impl]) by(rule Aif)
show ?case apply(simp)
apply(rule subset_trans[OF support_and])
using s1 s2 by blast
next
case (Aconseq x1 x2 x3 C)
obtain a b c d e f where "x1=(a,b)" "x2=(c,d)" "x3=(e,f)" by force
with Aconseq show ?case by auto
qed (auto simp add: support_def)
lemma finite_support_pre: "finite (support Q) \<Longrightarrow> finite (varacom C) \<Longrightarrow> finite (support (pre C Q))"
using finite_subset support_pre finite_UnI by metis
fun time :: "acom \<Rightarrow> tbd" where
"time SKIP = (%s. Suc 0)" |
"time (x ::= a) = (%s. Suc 0)" |
"time (C\<^sub>1;; C\<^sub>2) = (%s. time C\<^sub>1 s + preT C\<^sub>1 (time C\<^sub>2) s)" |
"time ({_/_/(e,es)} CONSEQ C) = e" |
"time (IF b THEN C\<^sub>1 ELSE C\<^sub>2) =
(\<lambda>s. if bval b s then 1 + time C\<^sub>1 s else 1 + time C\<^sub>2 s)" |
"time ({(_,(E',(E,x)))} WHILE b DO C) = E"
(* the set of variables, i need to know about, i.e. s1 and s2 have to agree on
s.th. time C s1 = time C s2 *)
fun kdeps :: "acom \<Rightarrow> vname set" where
"kdeps SKIP = {}" |
"kdeps (x ::= a) = {}" |
"kdeps (C\<^sub>1;; C\<^sub>2) = kdeps C\<^sub>1 \<union> fune C\<^sub>1 (kdeps C\<^sub>2)" |
"kdeps (IF b THEN C\<^sub>1 ELSE C\<^sub>2) = vars b \<union> kdeps C\<^sub>1 \<union> kdeps C\<^sub>2" |
"kdeps ({(_,(E',(E,Es,SS)))} WHILE b DO C) = Es" |
"kdeps ({_/_/(e,es)} CONSEQ C) = es"
lemma supportE_single: "supportE (\<lambda>l s. P) = {}"
unfolding supportE_def by blast
lemma supportE_plus: "supportE (\<lambda>l s. e1 l s + e2 l s) \<subseteq> supportE e1 \<union> supportE e2"
unfolding supportE_def apply(auto)
by metis
lemma supportE_Suc: "supportE (\<lambda>l s. Suc (e1 l s)) = supportE e1"
unfolding supportE_def by (auto)
lemma supportE_single2: "supportE (\<lambda>l . P) = {}"
unfolding supportE_def by blast
lemma supportE_time: "supportE (\<lambda>l. time C) = {}"
using supportE_single2 by simp
lemma "\<And>s. (\<forall>l. I (l(x:=0)) s) = (\<forall>l. l x = 0 \<longrightarrow> I l s)"
apply(auto)
by (metis fun_upd_triv)
lemma "\<And>s. (\<forall>l. I (l(x:=Suc (l x))) s) = (\<forall>l. (\<exists>n. l x = Suc n) \<longrightarrow> I l s)"
apply(auto)
proof (goal_cases)
case (1 s l n)
then have "\<And>l. I (l(x := Suc (l x))) s" by simp
from this[where l="l(x:=n)"]
have "I ((l(x:=n))(x := Suc ((l(x:=n)) x))) s" by simp
then show ?case using 1(2) apply(simp)
by (metis fun_upd_triv)
qed
text\<open>Verification condition:\<close>
definition funStar where "funStar f = (%x. {y. (x,y)\<in>{(x,y). y\<in>f x}\<^sup>*})"
lemma funStart_prop1: "x \<in> (funStar f) x" unfolding funStar_def by auto
lemma funStart_prop2: "f x \<subseteq> (funStar f) x" unfolding funStar_def by auto
fun vc :: "acom \<Rightarrow> assn2 \<Rightarrow> vname set \<Rightarrow> vname set \<Rightarrow> bool" where
"vc SKIP Q _ _ = True" |
"vc (x ::= a) Q _ _ = True" |
"vc (C\<^sub>1 ;; C\<^sub>2) Q LVQ LVE = ((vc C\<^sub>1 (pre C\<^sub>2 Q) (qdeps C\<^sub>2 LVQ) (fune C\<^sub>2 LVE \<union> kdeps C\<^sub>2)) \<and> (vc C\<^sub>2 Q LVQ LVE) )" |
"vc (IF b THEN C\<^sub>1 ELSE C\<^sub>2) Q LVQ LVE = (vc C\<^sub>1 Q LVQ LVE \<and> vc C\<^sub>2 Q LVQ LVE)" |
"vc ({(P',Ps)/(Q,Qs)/(e',es)} CONSEQ C) Q' LVQ LVE = (vc C Q Qs LVE \<comment> \<open>evtl \<open>LV\<close> weglassen - glaub eher nicht\<close>
\<and> (\<forall>s1 s2 l. (\<forall>x\<in>Ps. s1 x=s2 x) \<longrightarrow> P' l s1 = P' l s2) \<comment> \<open>annotation \<open>Ps\<close> (the set of variables \<open>P'\<close> depends on) is correct\<close>
\<and> (\<forall>s1 s2 l. (\<forall>x\<in>Qs. s1 x=s2 x) \<longrightarrow> Q l s1 = Q l s2) \<comment> \<open>annotation \<open>Qs\<close> (the set of variables \<open>Q\<close> depends on) is correct\<close>
\<and> (\<forall>s1 s2. (\<forall>x\<in>es. s1 x=s2 x) \<longrightarrow> e' s1 = e' s2) \<comment> \<open>annotation \<open>es\<close> (the set of variables \<open>e'\<close> depends on) is correct\<close>
\<and> (\<exists>k>0. (\<forall>l s. P' l s \<longrightarrow> time C s \<le> k * e' s \<and> (\<forall>t. \<exists>l'. (pre C Q) l' s \<and> ( Q l' t \<longrightarrow> Q' l t) ))))" |
"vc ({((I,Is),(S,(E,es,SS)))} WHILE b DO C) Q LVQ LVE = ((\<forall>s1 s2 l. (\<forall>x\<in>Is. s1 x = s2 x) \<longrightarrow> I l s1 = I l s2) \<comment> \<open>annotation \<open>Is\<close> is correct\<close>
\<and> (\<forall>y\<in>LVE \<union> LVQ. (let Ss=SS y in (\<forall>s1 s2. (\<forall>x\<in>Ss. s1 x = s2 x) \<longrightarrow> (S s1) y = (S s2) y))) \<comment> \<open>annotation \<open>SS\<close> is correct, for only one step\<close>
\<and> (\<forall>s1 s2. (\<forall>x\<in>es. s1 x=s2 x) \<longrightarrow> E s1 = E s2) \<comment> \<open>annotation \<open>es\<close> (the set of variables \<open>E\<close> depends on) is correct\<close>
\<and> (\<forall>l s. (I l s \<and> bval b s \<longrightarrow> pre C I l s \<and> E s \<ge> 1 + preT C E s + time C s
\<and> (\<forall>v\<in>(\<Union>y\<in>LVE \<union> LVQ. (funStar SS) y). (S s) v = (S (postQ C s)) v) ) \<and>
(I l s \<and> \<not> bval b s \<longrightarrow> Q l s \<and> E s \<ge> 1 \<and> (\<forall>v\<in>(\<Union>y\<in>LVE \<union> LVQ. (funStar SS) y). (S s) v = s v)) ) \<and>
vc C I Is (es \<union> (\<Union>y\<in>LVE. (funStar SS) y)))"
subsubsection \<open>Soundness:\<close>
abbreviation "preSet U C l s == (Ball U (%u. case u of (x,e,v) \<Rightarrow> l x = preT C e s))"
abbreviation "postSet U l s == (Ball U (%u. case u of (x,e,v) \<Rightarrow> l x = e s))"
fun ListUpdate where
"ListUpdate f [] l = f"
| "ListUpdate f ((x,e,v)#xs) q = (ListUpdate f xs q)(x:=q e x)"
lemma allg:
assumes U2: "\<And>l s n x. x\<in> fst ` upds \<Longrightarrow> A (l(x := n)) = A l"
shows
"fst ` set xs \<subseteq> fst ` upds \<Longrightarrow> A (ListUpdate l'' xs q) = A l''"
proof (induct xs)
case (Cons a xs)
obtain x e v where axe: "a = (x,e,v)"
using prod_cases3 by blast
have "A (ListUpdate l'' (a # xs) q)
= A ((ListUpdate l'' xs q)(x := q e x)) " unfolding axe by(simp)
also have
"\<dots> = A (ListUpdate l'' xs q) "
apply(rule U2)
using Cons axe by force
also have "\<dots> = A l'' "
using Cons by force
finally show ?case .
qed simp
fun ListUpdateE where
"ListUpdateE f [] = f"
| "ListUpdateE f ((x,e,v)#xs) = (ListUpdateE f xs )(x:=e)"
lemma ListUpdate_E: "ListUpdateE f xs = ListUpdate f xs (%e x. e)"
apply(induct xs) apply(simp_all)
subgoal for a xs apply(cases a) apply(simp) done
done
lemma allg_E: fixes A::assn2
assumes
" (\<And>l s n x. x \<in> fst ` upds \<Longrightarrow> A (l(x := n)) = A l)" "fst ` set xs \<subseteq> fst ` upds"
shows "A (ListUpdateE f xs) = A f"
proof -
have " A (ListUpdate f xs (%e x. e)) = A f"
apply(rule allg)
apply fact+ done
then show ?thesis by(simp only: ListUpdate_E)
qed
lemma ListUpdateE_updates: "distinct (map fst xs) \<Longrightarrow> x \<in> set xs \<Longrightarrow> ListUpdateE l'' xs (fst x) = fst (snd x)"
proof (induct xs)
case Nil
then show ?case apply(simp) done
next
case (Cons a xs)
show ?case
proof (cases "fst a = fst x")
case True
then obtain y e v where a: "a=(y,e,v)"
using prod_cases3 by blast
with True have fstx: "fst x=y" by simp
from Cons(2,3) fstx a have a2: "x=a"
by force
show ?thesis unfolding a2 a by(simp)
next
case False
with Cons(3) have A: "x\<in>set xs" by auto
then obtain y e v where a: "a=(y,e,v)"
using prod_cases3 by blast
from Cons(2) have B: "distinct (map fst xs)" by simp
from Cons(1)[OF B A] False
show ?thesis unfolding a by(simp)
qed
qed
lemma ListUpdate_updates: "x \<in> fst ` (set xs) \<Longrightarrow> ListUpdate l'' xs (%e. l) x = l x"
proof(induct xs)
case Nil
then show ?case by(simp)
next
case (Cons a xs)
obtain q p v where axe: "a = (p,q,v)"
using prod_cases3 by blast
from Cons show ?case unfolding axe
apply(cases "x=p")
by(simp_all)
qed
abbreviation "lesvars xs == fst ` (set xs)"
fun preList where
"preList [] C l s = True"
| "preList ((x,(e,v))#xs) C l s = (l x = preT C e s \<and> preList xs C l s)"
lemma preList_Seq: "preList upds (C1;; C2) l s = preList (map (\<lambda>(x, e, v). (x, preT C2 e, fune C2 v)) upds) C1 l s"
proof (induct upds)
case Nil
then show ?case by simp
next
case (Cons a xs)
obtain y e v where a: "a=(y,(e,v))"
using prod_cases3 by blast
from Cons show ?case unfolding a by (simp)
qed
lemma preSetpreList: "preList xs C l s \<Longrightarrow> preSet (set xs) C l s"
proof (induct xs)
case (Cons a xs)
obtain y e v where a: "a=(y,(e,v))"
using prod_cases3 by blast
from Cons show ?case unfolding a
by(simp)
qed simp
(* surprise. but makes sense, if the clauses are contradictory on the
left side, so are they on the right side *)
lemma preSetpreList_eq: "preList xs C l s = preSet (set xs) C l s"
proof (induct xs)
case (Cons a xs)
obtain y e v where a: "a=(y,(e,v))"
using prod_cases3 by blast
from Cons show ?case unfolding a
by(simp)
qed simp
fun postList where
"postList [] l s = True"
| "postList ((x,e,v)#xs) l s = (l x = e s \<and> postList xs l s)"
lemma "postList xs l s = (foldr (\<lambda>(x,e,v) acc l s. l x = e s \<and> acc l s) xs (%l s. True)) l s"
apply(induct xs) apply(simp) by (auto)
lemma support_postList: "support (postList xs) \<subseteq> lesvars xs"
proof (induct xs)
case (Cons a xs)
obtain y e v where a: "a=(y,(e,v))"
using prod_cases3 by blast
from Cons show ?case unfolding a
apply(simp) apply(rule subset_trans[OF support_and])
apply(rule Un_least)
subgoal apply(rule subset_trans[OF support_eq])
using supportE_twicepreT subset_trans supportE_single2 by simp
subgoal by(auto)
done
qed simp
lemma postList_preList: "postList (map (\<lambda>(x, e, v). (x, preT C2 e, fune C2 v)) upds) l s = preList upds C2 l s"
proof (induct upds)
case (Cons a xs)
obtain y e v where a: "a=(y,(e,v))"
using prod_cases3 by blast
from Cons show ?case unfolding a
by(simp)
qed simp
lemma postSetpostList: "postList xs l s \<Longrightarrow> postSet (set xs) l s"
proof (induct xs)
case (Cons a xs)
obtain y e v where a: "a=(y,(e,v))"
using prod_cases3 by blast
from Cons show ?case unfolding a
by(simp)
qed simp
lemma postListpostSet: "postSet (set xs) l s \<Longrightarrow> postList xs l s"
proof (induct xs)
case (Cons a xs)
obtain y e v where a: "a=(y,(e,v))"
using prod_cases3 by blast
from Cons show ?case unfolding a
by(simp)
qed simp
lemma postListpostSet2: " postList xs l s = postSet (set xs) l s "
using postListpostSet postSetpostList by metis
lemma ListAskip: "preList xs Askip l s = postList xs l s"
apply(induct xs)
apply(simp) by force
lemma SetAskip: "preSet U Askip l s = postSet U l s"
by simp
lemma ListAassign: "preList upds (Aassign x1 x2) l s = postList upds l (s[x2/x1])"
apply(induct upds)
apply(simp) by force
lemma SetAassign: "preSet U (Aassign x1 x2) l s = postSet U l (s[x2/x1])"
by simp
lemma ListAconseq: "preList upds (Aconseq x1 x2 x3 C) l s = preList upds C l s"
apply(induct upds)
apply(simp) by force
lemma SetAconseq: "preSet U (Aconseq x1 x2 x3 C) l s = preSet U C l s"
by simp
lemma ListAif1: "bval b s \<Longrightarrow> preList upds (IF b THEN C1 ELSE C2) l s = preList upds C1 l s"
apply(induct upds)
apply(simp) by force
lemma SetAif1: "bval b s \<Longrightarrow> preSet upds (IF b THEN C1 ELSE C2) l s = preSet upds C1 l s"
apply(simp) done
lemma ListAif2: "~ bval b s \<Longrightarrow> preList upds (IF b THEN C1 ELSE C2) l s = preList upds C2 l s"
apply(induct upds)
apply(simp) by force
lemma SetAif2: "~ bval b s \<Longrightarrow> preSet upds (IF b THEN C1 ELSE C2) l s = preSet upds C2 l s"
apply(simp) done
definition K where "K C LVQ Q == (\<forall>l s1 s2. s1 = s2 on qdeps C LVQ \<longrightarrow> pre C Q l s1 = pre C Q l s2)"
definition K2 where "K2 C e Es Q == (\<forall>s1 s2. s1 = s2 on fune C Es \<longrightarrow> preT C e s1 = preT C e s2)"
definition K3 where "K3 upds C Q = (\<forall>(a,b,c)\<in>set upds. K2 C b c Q)"
definition K4 where "K4 upds LV C Q = (K C LV Q \<and> K3 upds C Q \<and> (\<forall>s1 s2. s1 = s2 on kdeps C \<longrightarrow> time C s1 = time C s2))"
lemma k4If: "K4 upds LVQ C1 Q \<Longrightarrow> K4 upds LVQ C2 Q \<Longrightarrow> K4 upds LVQ (IF b THEN C1 ELSE C2) Q"
proof -
have fl: "\<And>A B s1 s2. A \<subseteq> B \<Longrightarrow> s1 = s2 on B \<Longrightarrow> s1 = s2 on A" by auto
assume "K4 upds LVQ C1 Q" "K4 upds LVQ C2 Q"
then show "K4 upds LVQ (IF b THEN C1 ELSE C2) Q"
unfolding K4_def K_def K3_def K2_def using bval_eq_if_eq_on_vars fl apply auto
apply blast+ done
qed
subsubsection "Soundness"
lemma vc_sound: "vc C Q LVQ LVE \<Longrightarrow> finite (support Q)
\<Longrightarrow> fst ` (set upds) \<inter> varacom C = {} \<Longrightarrow> distinct (map fst upds)
\<Longrightarrow> finite (varacom C)
\<Longrightarrow> (\<forall>l s1 s2. s1 = s2 on LVQ \<longrightarrow> Q l s1 = Q l s2)
\<Longrightarrow> (\<forall>l s1 s2. s1 = s2 on LVE \<longrightarrow> postList upds l s1 = postList upds l s2)
\<Longrightarrow> (\<forall>(a,b,c)\<in>set upds. (\<forall>s1 s2. s1 = s2 on c \<longrightarrow> b s1 = b s2)) \<comment> \<open>\<open>c\<close> are really the variables \<open>b\<close> depends on\<close>
\<Longrightarrow> (\<Union>(a,b,c)\<in>set upds. c) \<subseteq> LVE \<comment> \<open>in \<open>LV\<close> are all the variables that the expressions in \<open>upds\<close> depend on\<close>
\<Longrightarrow> \<turnstile>\<^sub>1 {%l s. pre C Q l s \<and> preList upds C l s} strip C { time C \<Down> %l s. Q l s \<and> postList upds l s}
\<and> ((\<forall>l s. pre C Q l s \<longrightarrow> Q l (postQ C s)) \<and> K4 upds LVQ C Q)"
proof(induction C arbitrary: Q upds LVE LVQ)
case (Askip Q upds)
then show ?case unfolding K4_def K_def K3_def K2_def
apply(auto)
apply(rule weaken_post[where Q="%l s. Q l s \<and> preList upds Askip l s"])
apply(simp add: Skip) using ListAskip
by fast
next
case (Aassign x1 x2 Q upds)
then show ?case unfolding K_def apply(safe) apply(auto simp add: Assign)[1]
apply(rule weaken_post[where Q="%l s. Q l s \<and> postList upds l s"])
apply(simp only: ListAassign)
apply(rule Assign) apply simp
apply(simp only: postQ.simps pre.simps) apply(auto)
unfolding K4_def K2_def K3_def K_def by (auto)
next
case (Aif b C1 C2 Q upds )
from Aif(3) have 1: "vc C1 Q LVQ LVE" and 2: "vc C2 Q LVQ LVE" by auto
have T: "\<And>l s. pre C1 Q l s \<Longrightarrow> bval b s \<Longrightarrow> Q l (postQ C1 s)"
and kT: "K4 upds LVQ C1 Q"
using Aif(1)[OF 1 Aif(4) _ Aif(6)] Aif(5-11) by auto
have F: "\<And>l s. pre C2 Q l s \<Longrightarrow> \<not> bval b s \<Longrightarrow> Q l (postQ C2 s)"
and kF: "K4 upds LVQ C2 Q"
using Aif(2)[OF 2 Aif(4) _ Aif(6)] Aif(5-11) by auto
show ?case apply(safe)
subgoal
apply(simp)
apply(rule If2[where e="\<lambda>a. if bval b a then time C1 a else time C2 a"])
subgoal
apply(simp cong: rev_conj_cong)
apply(rule ub_cost[where e'="time C1"])
apply(simp) apply(auto)[1]
apply(rule strengthen_pre[where P="%l s. pre C1 Q l s \<and> preList upds C1 l s"])
using ListAif1
apply fast
apply(rule Aif(1)[THEN conjunct1])
using Aif
apply(auto)
done
subgoal
apply(simp cong: rev_conj_cong)
apply(rule ub_cost[where e'="time C2"]) (* k=1 and *)
apply(simp) apply(auto)[1]
apply(rule strengthen_pre[where P="%l s. pre C2 Q l s \<and> preList upds C2 l s"])
using ListAif2
apply fast
apply(rule Aif(2)[THEN conjunct1])
using Aif
apply(auto)
done
by simp
using T F kT kF by (auto intro: k4If)
next
case (Aconseq P'2 Qannot2 eannot2 C Q upds)
obtain P' Ps where [simp]: "P'2 = (P',Ps)" by fastforce
obtain Qannot Q's where [simp]: "Qannot2 = (Qannot,Q's)" by fastforce
obtain eannot es where [simp]: "eannot2 = (eannot,es)" by fastforce
have ih0: "finite (support Qannot)" using Aconseq(3,6) by simp
from \<open>vc ({P'2/Qannot2/eannot2} CONSEQ C) Q LVQ LVE\<close>
obtain k where k0: "k>0" and ih1: "vc C Qannot Q's LVE"
and ih2: " (\<forall>l s. P' l s \<longrightarrow> time C s \<le> k * eannot s \<and> (\<forall>t. \<exists>l'. pre C Qannot l' s \<and> (Qannot l' t \<longrightarrow> Q l t)))"
and pc: "(\<forall>s1 s2 l. (\<forall>x\<in>Ps. s1 x=s2 x) \<longrightarrow> P' l s1 = P' l s2)"
and qc: "(\<forall>s1 s2 l. (\<forall>x\<in>Q's. s1 x=s2 x) \<longrightarrow> Qannot l s1 = Qannot l s2)"
and ec: "(\<forall>s1 s2. (\<forall>x\<in>es. s1 x=s2 x) \<longrightarrow> eannot s1 = eannot s2)"
by auto
have k: "\<turnstile>\<^sub>1 {\<lambda>l s. pre C Qannot l s \<and> preList upds C l s} strip C { time C \<Down> \<lambda>l s. Qannot l s \<and> postList upds l s}
\<and> ((\<forall>l s. pre C Qannot l s \<longrightarrow> Qannot l (postQ C s)) \<and> K4 upds Q's C Qannot)"
apply(rule Aconseq(1)) using Aconseq(2-10) by auto
note ih=k[THEN conjunct1] and ihsnd=k[THEN conjunct2]
show ?case apply(simp, safe)
apply(rule conseq[where e="time C" and P="\<lambda>l s. pre C Qannot l s \<and> preList upds C l s" and Q="%l s. Qannot l s \<and> postList upds l s"])
prefer 2
apply(rule ih)
subgoal apply(rule exI[where x=k])
proof (safe, goal_cases)
case (1)
with k0 show ?case by auto
next
case (2 l s)
then show ?case using ih2 by simp
next
case (3 l s t)
have finupds: "finite (set upds)" by simp
{
fix l s n x
assume "x \<in> fst ` (set upds)"
then have "x \<notin> support (pre C Qannot)" using Aconseq(4) support_pre by auto
from assn2_lupd[OF this] have "pre C Qannot (l(x := n)) = pre C Qannot l" .
} note U2=this
{
fix l s n x
assume "x \<in> fst ` (set upds)"
then have "x \<notin> support Qannot" using Aconseq(4) by auto
from assn2_lupd[OF this] have "Qannot (l(x := n)) = Qannot l" .
} note K2=this
from ih2 3(1) have *: "(\<exists>l'. pre C Qannot l' s \<and> (Qannot l' t \<longrightarrow> Q l t))" by simp
obtain l' where i': "pre C Qannot l' s" and ii': "(Qannot l' t \<longrightarrow> Q l t)"
and lxlx: "\<And>x. x\<in> fst ` (set upds) \<Longrightarrow> l' x = l x"
proof (goal_cases)
case 1
from * obtain l'' where i': "pre C Qannot l'' s" and ii': "(Qannot l'' t \<longrightarrow> Q l t)"
by blast
note allg=allg[where q="%e x. l x"]
have "pre C Qannot (ListUpdate l'' upds (\<lambda>e. l)) = pre C Qannot l'' "
apply(rule allg[where ?upds="set upds"]) apply(rule U2) apply fast by fast
with i' have U: "pre C Qannot (ListUpdate l'' upds (\<lambda>e. l)) s" by simp
have "Qannot (ListUpdate l'' upds (\<lambda>e. l)) = Qannot l''"
apply(rule allg[where ?upds="set upds"]) apply(rule K2) apply fast by fast
then have K: "(%l' s. Qannot l' t \<longrightarrow> Q l t) (ListUpdate l'' upds (\<lambda>e. l)) s = (%l' s. Qannot l' t \<longrightarrow> Q l t) l'' s"
by simp
with ii' have K: "(Qannot (ListUpdate l'' upds (\<lambda>e. l)) t \<longrightarrow> Q l t)" by simp
{
fix x
assume as: "x \<in> fst ` (set upds)"
have "ListUpdate l'' upds (\<lambda>e. l) x = l x"
apply(rule ListUpdate_updates)
using as by fast
} note kla=this
show "thesis"
apply(rule 1)
apply(fact U)
apply(fact K)
apply(fact kla)
done
qed
let ?upds' = "set (map (%(x,e,v). (x,preT C e s,fune C v)) upds)"
have "finite ?upds'" by simp
define xs where "xs = map (%(x,e,v). (x,preT C e s,fune C v)) upds"
then have "set xs= ?upds'" by simp
have "pre C Qannot (ListUpdateE l' xs) = pre C Qannot l' "
apply(rule allg_E[where ?upds="?upds'"]) apply(rule U2)
apply force unfolding xs_def by simp
with i' have U: "pre C Qannot (ListUpdateE l' xs ) s" by simp
have "Qannot (ListUpdateE l' xs) = Qannot l' "
apply(rule allg_E[where ?upds="?upds'"]) apply(rule K2) apply force unfolding xs_def by auto
then have K: "(%l' s. Qannot l' t \<longrightarrow> Q l t) (ListUpdateE l' xs) s = (%l' s. Qannot l' t \<longrightarrow> Q l t) l' s"
by simp
with ii' have K: "(Qannot (ListUpdateE l' xs) t \<longrightarrow> Q l t)" by simp
have xs_upds: "map fst xs = map fst upds"
unfolding xs_def by auto
have grr: "\<And>x. x \<in> ?upds' \<Longrightarrow> ListUpdateE l' xs (fst x) = fst (snd x)" apply(rule ListUpdateE_updates)
apply(simp only: xs_upds) using Aconseq(5) apply simp
unfolding xs_def apply(simp) done
show ?case
apply(rule exI[where x="ListUpdateE l' xs"])
apply(safe)
subgoal by fact
subgoal apply(rule preListpreSet) proof (safe,goal_cases)
case (1 x e v)
then have "(x, preT C e s, fune C v) \<in> ?upds'"
by force
from grr[OF this, simplified]
show ?case .
qed
subgoal using K apply(simp) done (* Qannot must be independent of x *)
subgoal apply(rule postListpostSet)
proof (safe, goal_cases)
case (1 x e v)
with lxlx[of x] have fF: "l x = l' x"
by force
from postSetpostList[OF 1(2)] have g: "postSet (set upds) (ListUpdateE l' xs) t" .
with 1(3) have A: "(ListUpdateE l' xs) x = e t"
by fast
from 1(3) grr[of "(x,preT C e s, fune C v)"] have B: "ListUpdateE l' xs x = fst (snd (x, preT C e s, fune C v))"
by force
from A B have X: "e t = preT C e s" by fastforce
from preSetpreList[OF 3(2)] have "preSet (set upds) ({P'2/Qannot2/eannot2} CONSEQ C) l s" apply(simp) done
with 1(3) have Y: "l x = preT C e s" apply(simp) by fast
from X Y show ?case by simp
qed
done
qed
subgoal using ihsnd ih2 by blast
subgoal using ihsnd[THEN conjunct2] pc unfolding K4_def K_def apply(auto)
unfolding K3_def K2_def using ec by auto
done
next
case (Aseq C1 C2 Q upds)
let ?P = "(\<lambda>l s. pre C1 (pre C2 Q) l s \<and> preList upds (C1;;C2) l s )"
let ?P' = "support Q \<union> varacom C1 \<union> varacom C2 \<union> lesvars upds"
have finite_varacom: "finite (varacom (C1;; C2))" by fact
have finite_varacomC2: "finite (varacom C2)"
apply(rule finite_subset[OF _ finite_varacom]) by simp
let ?y = "SOME x. x \<notin> ?P'"
have sup_L: "support (preList upds (C1;;C2)) \<subseteq> lesvars upds"
apply(rule support_preList) done
have sup_B: "support ?P \<subseteq> ?P'"
apply(rule subset_trans[OF support_and]) using support_pre sup_L by blast
have fP': "finite (?P')" using finite_varacom Aseq(3,4,5) apply simp done
hence "\<exists>x. x \<notin> ?P'" using infinite_UNIV_listI
using ex_new_if_finite by metis
hence ynP': "?y \<notin> ?P'" by (rule someI_ex)
hence ysupPreC2Q: "?y \<notin> support (pre C2 Q)" and ysupC1: "?y \<notin> varacom C1" using support_pre by auto
from Aseq(5) have "lesvars upds \<inter> varacom C2 = {}" by auto
from Aseq show ?case apply(auto)
proof (rule Seq, goal_cases)
case 2
show "\<turnstile>\<^sub>1 {(%l s. pre C2 Q l s \<and> preList upds C2 l s )} strip C2 { time C2 \<Down> (%l s. Q l s \<and> postList upds l s)}"
apply(rule weaken_post[where Q="(%l s. Q l s \<and> postList upds l s)"])
apply(rule 2(2)[THEN conjunct1])
apply fact
apply (fact)+ using 2(8) by simp
next
case 3
fix s
show "time C1 s + preT C1 (time C2) s \<le> time C1 s + preT C1 (time C2) s"
by simp
next
case 1
from ynP' have yC1: "?y \<notin> varacom C1" by blast
have xC1: "lesvars upds \<inter> varacom C1 = {}" using Aseq(5) by auto
from finite_support_pre[OF Aseq(4) finite_varacomC2]
have G: "finite (support (pre C2 Q))" .
let ?upds = "map (\<lambda>a. case a of (x,e,v) \<Rightarrow> (x, preT C2 e, fune C2 v)) upds"
let ?upds' = "(?y,time C2, kdeps C2)#?upds"
{
have A: " lesvars ?upds' = {?y} \<union> lesvars upds" apply simp
by force
from Aseq(5) have 2: "lesvars upds \<inter> varacom C1 = {}" by auto
have " lesvars ?upds' \<inter> varacom C1 = {}"
unfolding A using ysupC1 2 by blast
} note klar=this
have t: "fst \<circ> (\<lambda>(x, e, v). (x, preT C2 e, fune C2 v)) = fst" by auto
{
fix a b c X
assume "a \<notin> lesvars X" "(a,b,c) \<in> set X"
then have "False" by force
} note helper=this
have dmap: "distinct (map fst ?upds')"
apply(auto simp add: t)
subgoal for e apply(rule helper[of ?y upds e]) using ynP' by auto
subgoal by fact
done
note bla1=1(1)[where Q="pre C2 Q" and upds="?upds'", OF 1(10) G klar dmap]
note bla=1(2)[OF 1(11,3), THEN conjunct2, THEN conjunct2]
from 1(4) have kal: "lesvars upds \<inter> varacom C2 = {}" by auto
from bla[OF kal Aseq.prems(4,6,7,8,9)] have bla4: "K4 upds LVQ C2 Q" by auto
then have bla: "K C2 LVQ Q" unfolding K4_def by auto
have A:
"\<turnstile>\<^sub>1 {\<lambda>l s. pre C1 (pre C2 Q) l s \<and> preList ?upds' C1 l s}
strip C1
{ time C1 \<Down> \<lambda>l s. pre C2 Q l s \<and> postList ?upds' l s} \<and>
(\<forall>l s. pre C1 (pre C2 Q) l s \<longrightarrow> pre C2 Q l (postQ C1 s)) \<and> K4 ?upds' (qdeps C2 LVQ) C1 (pre C2 Q)"
apply(rule 1(1)[where Q="pre C2 Q" and upds="?upds'", OF 1(10) G klar dmap])
proof (goal_cases)
case 1
then show ?case using bla unfolding K_def by auto
next
case 2
show ?case apply(rule,rule,rule,rule) proof (goal_cases)
case (1 l s1 s2)
then show ?case using bla4 using Aseq.prems(9) unfolding K4_def K3_def K2_def
apply(simp)
proof (goal_cases)
case 1
then have t: "time C2 s1 = time C2 s2" by auto
have post: "postList (map (\<lambda>(x, e, v). (x, preT C2 e, fune C2 v)) upds) l s1 = postList (map (\<lambda>(x, e, v). (x, preT C2 e, fune C2 v)) upds) l s2" (is "?IH upds")
using 1
proof (induct upds)
case (Cons a upds)
then have IH: "?IH upds" by auto
obtain x e v where a: "a = (x,e,v)" using prod_cases3 by blast
from Cons(4) have "v \<subseteq> LVE" unfolding a by auto
with Cons(2) have s12v: "s1 = s2 on fune C2 v" unfolding a using fune_mono by blast
with Cons(3) IH a show ?case by auto
qed auto
from post t show ?case by auto
qed
qed
next
case 3
then show ?case using bla4 unfolding K4_def K3_def K2_def by(auto)
next
case 4
then show ?case apply(auto)
proof (goal_cases)
case (1 x a aa b)
with Aseq.prems(9) have "b \<subseteq> LVE" by auto
with fune_mono have "fune C2 b \<subseteq> fune C2 LVE" by auto
with 1 show ?case by blast
qed
qed
show " \<turnstile>\<^sub>1 {\<lambda>l s. (pre C1 (pre C2 Q) l s \<and> preList upds (C1;; C2) l s) \<and> l ?y = preT C1 (time C2) s} strip C1
{ time C1 \<Down> \<lambda>l s. (pre C2 Q l s \<and> preList upds C2 l s) \<and> time C2 s \<le> l ?y}"
apply(rule conseq_old)
prefer 2
apply(rule A[THEN conjunct1])
apply(auto simp: preList_Seq postList_preList) done
from A[THEN conjunct2, THEN conjunct2] have A1: "K C1 (qdeps C2 LVQ) (pre C2 Q)"
and A2: "K3 ?upds' C1 (pre C2 Q)" and A3: "(\<forall>s1 s2. s1 = s2 on kdeps C1 \<longrightarrow> time C1 s1 = time C1 s2)" unfolding K4_def by auto
from bla4 have B1: "K C2 LVQ Q" and B2: "K3 upds C2 Q" and B3: "(\<forall>s1 s2. s1 = s2 on kdeps C2 \<longrightarrow> time C2 s1 = time C2 s2)" unfolding K4_def by auto
show "K4 upds LVQ (C1;; C2) Q "
unfolding K4_def apply(safe)
subgoal using A1 B1 unfolding K_def by(simp)
subgoal using A2 B2 unfolding K3_def K2_def apply(auto) done
subgoal for s1 s2 using A3 B3 apply auto
proof (goal_cases)
case 1
then have t: "time C1 s1 = time C1 s2" by auto
from A2 have "\<forall>s1 s2. s1 = s2 on fune C1 (kdeps C2) \<longrightarrow> preT C1 (time C2) s1 = preT C1 (time C2) s2" unfolding K3_def K2_def by auto
then have p: "preT C1 (time C2) s1 = preT C1 (time C2) s2"
using 1(1) by simp
from t p show ?case by auto
qed
done
next
from ynP' sup_B show "?y \<notin> support ?P" by blast
have F: "support (preList upds C2) \<subseteq> lesvars upds"
apply(rule support_preList) done
have "support (\<lambda>l s. pre C2 Q l s \<and> preList upds C2 l s) \<subseteq> ?P'"
apply(rule subset_trans[OF support_and]) using F support_pre by blast
with ynP'
show "?y \<notin> support (\<lambda>l s. pre C2 Q l s \<and> preList upds C2 l s)" by blast
next
case (6 l s)
note bla=6(2)[OF 6(11,3), THEN conjunct2, THEN conjunct2]
from 6(4) have kal: "lesvars upds \<inter> varacom C2 = {}" by auto
from bla[OF kal Aseq.prems(4,6,7,8,9)] have bla4: "K4 upds LVQ C2 Q" by auto
then have bla: "K C2 LVQ Q" unfolding K4_def by auto
have 11: "finite (support (pre C2 Q )) "
apply(rule finite_subset[OF support_pre])
using 6(3,4,10) finite_varacomC2 by blast
have A: "\<forall>l s. pre C1 (pre C2 Q ) l s \<longrightarrow> pre C2 Q l (postQ C1 s)"
apply(rule 6(1)[where upds="[]", THEN conjunct2, THEN conjunct1])
apply(fact)+ apply(auto) using bla unfolding K_def apply blast+ done
have B: "(\<forall>l s. pre C2 Q l s \<longrightarrow> Q l (postQ C2 s))"
apply(rule 6(2)[where upds="[]", THEN conjunct2, THEN conjunct1])
apply(fact)+ apply auto using Aseq.prems(6) by auto
from A B 6 show ?case by simp
qed
next
case (Awhile A b C Q upds)
obtain I2 S E Es SS where aha[simp]: "A = (I2,(S,(E,Es,SS)))" using prod_cases5 by blast
obtain I Is where aha2: "I2 = (I, Is)"
by fastforce
let ?LV ="(\<Union>y\<in>LVE \<union> LVQ. (funStar SS) y)"
have LVE_LVE: "LVE \<subseteq> (\<Union>y\<in>LVE. (funStar SS) y)" using funStart_prop1 by fast
have LV_LV: "LVE \<union> LVQ \<subseteq> ?LV" using funStart_prop1 by fast
have LV_LV2: "(\<Union>y\<in>LVE \<union> LVQ. SS y) \<subseteq> ?LV" using funStart_prop2 by fast
have LVE_LV2: "(\<Union>y\<in>LVE. SS y) \<subseteq> (\<Union>y\<in>LVE. (funStar SS) y)" using funStart_prop2 by fast
note aha = aha2 aha
with aha aha2 \<open>vc (Awhile A b C) Q LVQ LVE\<close> have "vc (Awhile ((I,Is),S,E,Es,SS) b C) Q LVQ LVE" apply auto apply fast+ done
then
have vc: "vc C I Is (Es \<union> (\<Union>y\<in>LVE. (funStar SS) y))"
and IQ: "\<forall>l s. (I l s \<and> bval b s \<longrightarrow> pre C I l s \<and> 1 + preT C E s + time C s \<le> E s \<and> S s = S (postQ C s) on ?LV)" and
pre: "\<forall>l s. (I l s \<and> \<not> bval b s \<longrightarrow> Q l s \<and> 1 \<le> E s \<and> S s = s on ?LV)"
and Is: "(\<forall>s1 s2 l. s1 = s2 on Is \<longrightarrow> I l s1 = I l s2)"
and Ss: "(\<forall>y\<in>LVE \<union> LVQ. let Ss = SS y in \<forall>s1 s2. s1 = s2 on Ss \<longrightarrow> S s1 y = S s2 y)"
and Es: "(\<forall>s1 s2. s1 = s2 on Es \<longrightarrow> E s1 = E s2)" apply simp_all apply auto apply fast+ done
then have pre2: "\<And>l s. I l s \<Longrightarrow> \<not> bval b s \<Longrightarrow> Q l s \<and> 1 \<le> E s \<and> S s = s on ?LV"
and IQ2: "\<And>l s. (I l s \<Longrightarrow> bval b s \<Longrightarrow> pre C I l s \<and> 1 + preT C E s + time C s \<le> E s \<and> S s = S (postQ C s) on ?LV)"
and Ss2: "\<And>y s1 s2. s1 = s2 on (\<Union>y\<in>LVE. SS y) \<Longrightarrow> S s1 = S s2 on LVE"
by auto
from Ss have Ssc: "\<And>c s1 s2. c \<subseteq> LVE \<Longrightarrow> s1 = s2 on (\<Union>y\<in>c. SS y) \<Longrightarrow> S s1 = S s2 on c"
by auto
from IQ have IQ_in: "\<And>l s. I l s \<Longrightarrow> bval b s \<Longrightarrow> S s = S (postQ C s) on ?LV" by auto
have inv_impl: "\<And>l s. I l s \<Longrightarrow> bval b s \<Longrightarrow> pre C I l s" using IQ by auto
have yC: "lesvars upds \<inter> varacom C = {}" using Awhile(4) aha by auto
let ?upds = "map (%(x,e,v). (x, %s. e (S s), \<Union>x\<in>v. SS x)) upds"
let ?INV = "%l s. I l s \<and> postList ?upds l s"
have "lesvars upds \<inter> support I = {}" using Awhile(4) unfolding aha by auto
let ?P="lesvars upds \<union> varacom ({A} WHILE b DO C) "
let ?z="SOME z::lvname. z \<notin> ?P"
have "finite ?P" apply(auto simp del: aha) by (fact Awhile(6))
hence "\<exists>z. z\<notin>?P" using infinite_UNIV_listI
using ex_new_if_finite by metis
hence znP: "?z \<notin> ?P" by (rule someI_ex)
from znP have
zny: "?z \<notin> lesvars upds"
and zI: "?z \<notin> support I"
and blb: "?z \<notin> varacom C" by (simp_all add: aha)
from Awhile(4,6) have 23: "finite (varacom C)"
and 26: "finite (support I)" by (auto simp add: finite_subset aha)
have "\<forall>l s. pre C I l s \<longrightarrow> I l (postQ C s)"
apply(rule Awhile(1)[THEN conjunct2, THEN conjunct1])
apply(fact)+ subgoal using Is apply auto done
subgoal using Awhile(8) LVE_LVE by (metis subsetD sup.cobounded2)
apply fact using Awhile(10) LVE_LVE by blast
hence step: "\<And>l s. pre C I l s \<Longrightarrow> I l (postQ C s)" by simp
have fua: "lesvars ?upds = lesvars upds"
by force
let ?upds' = "(?z,E,Es) # ?upds"
show ?case
proof (safe, goal_cases)
case (2 l s)
from 2 have A: "I l s" unfolding aha by(simp)
then have I: "I l s" by simp
{ fix n
have "E s = n \<Longrightarrow> I l s \<Longrightarrow> Q l (postQ ({A} WHILE b DO C) s)"
proof (induct n arbitrary: s l rule: less_induct)
case (less n)
then show ?case
proof (cases "bval b s")
case True
with less IQ2 have "pre C I l s" and S: "S s = S (postQ C s) on ?LV" and t: "1 + preT C E s + time C s \<le> E s" by auto
with step have I': "I l (postQ C s)" and "1 + E (postQ C s) + time C s \<le> E s" using TQ by auto
with less have "E (postQ C s) < n" by auto
with less(1) I' have "Q l (postQ ({A} WHILE b DO C) (postQ C s))" by auto
with step show ?thesis using S apply simp using Awhile(7)
by (metis (no_types, lifting) LV_LV SUP_union contra_subsetD sup.boundedE)
next
case False
with pre2 less(3) have "Q l s" "S s = s on ?LV" by auto
then show ?thesis apply simp using Awhile(7)
by (metis (no_types, lifting) LV_LV SUP_union contra_subsetD sup.boundedE)
qed
qed
}
with I show "Q l (postQ ({A} WHILE b DO C) s)" by simp
next
case 1
have g: "\<And>e. e \<circ> S = (%s. e (S s)) " by auto
have "lesvars ?upds' \<inter> varacom C = {}"
using yC blb by(auto)
have z: "(fst \<circ> (\<lambda>(x, e, v). (x, \<lambda>s. e (S s), \<Union>x\<in>v. SS x))) = fst" by(auto)
have "distinct (map fst ?upds')"
using Awhile(5) zny by (auto simp add: z)
have klae: "\<And>s1 s2 A B. B \<subseteq> A \<Longrightarrow> s1 = s2 on A \<Longrightarrow> s1 = s2 on B" by auto
from Awhile(8) Awhile(9) have gl: "\<And>a b c s1 s2. (a,b,c) \<in> set upds \<Longrightarrow> s1 = s2 on c \<Longrightarrow> b s1 = b s2"
by fast
have CombALL: " \<turnstile>\<^sub>1 {\<lambda>l s. pre C I l s \<and> preList ?upds' C l s}
strip C
{ time C \<Down> \<lambda>l s. I l s \<and> postList ?upds' l s} \<and>
(\<forall>l s. pre C I l s \<longrightarrow> I l (postQ C s)) \<and> K4 ((SOME z. z \<notin> lesvars upds \<union> varacom ({A} WHILE b DO C), E, Es) # map (\<lambda>(x, e, v). (x, \<lambda>s. e (S s), \<Union>x\<in>v. SS x)) upds) Is C I "
apply(rule Awhile.IH[where upds="?upds'" ] )
apply (fact)+
subgoal apply safe using Is apply blast
using Is apply blast done
subgoal
using Is Es apply auto
apply(simp_all add: postListpostSet2, safe)
proof (goal_cases)
case (1 l s1 s2 x e v)
from 1(5,6) have i: "l x = e (S s1)" by auto
from Awhile(10) 1(6) have vLC: "v \<subseteq> LVE" by auto
have st: "(\<Union>y\<in>v. SS y) \<subseteq> (\<Union>y\<in>LVE. SS y)" using vLC by blast
also have "\<dots> \<subseteq> (\<Union>y\<in>LVE. funStar SS y)" using LVE_LV2 by blast
finally have st: "(\<Union>y\<in>v. SS y) \<subseteq> Es \<union> (\<Union>y\<in>LVE. funStar SS y)" by blast
have ii: "e (S s1) = e (S s2)"
apply(rule gl)
apply fact
apply(rule Ssc)
apply fact
using st 1(3) by blast
from i ii show ?case by simp
next
case (2 l s1 s2 x e v)
from 2(5,6) have i: "l x = e (S s2)" by auto
from Awhile(10) 2(6) have vLC: "v \<subseteq> LVE" by auto
have st: "(\<Union>y\<in>v. SS y) \<subseteq> (\<Union>y\<in>LVE. SS y)" using vLC by blast
also have "\<dots> \<subseteq> (\<Union>y\<in>LVE. funStar SS y)" using LVE_LV2 by blast
finally have st: "(\<Union>y\<in>v. SS y) \<subseteq> Es \<union> (\<Union>y\<in>LVE. funStar SS y)" by blast
have ii: "e (S s1) = e (S s2)"
apply(rule gl)
apply fact
apply(rule Ssc)
apply fact
using st 2(3) by blast
from i ii show ?case by simp
qed apply(auto)
subgoal using Es by auto
subgoal apply(rule gl) apply(simp) using Ss Awhile(10) by fastforce
subgoal using Awhile(10) LVE_LV2 by blast
done
from this[THEN conjunct2, THEN conjunct2] have
K: "K C Is I" and K3: "K3 ?upds' C I" and Kt: "\<forall>s1 s2. s1 = s2 on kdeps C \<longrightarrow> time C s1 = time C s2" unfolding K4_def by auto
show "K4 upds LVQ ({A} WHILE b DO C) Q"
unfolding K4_def apply safe
subgoal using K unfolding K_def aha using Is by auto
subgoal using K3 unfolding K3_def K2_def aha apply auto
subgoal for x e v apply (rule gl) apply simp apply(rule Ssc) using Awhile(10)
apply fast apply blast done done
subgoal using Kt Es unfolding aha by auto
done
show ?case
apply(simp add: aha)
apply(rule conseq_old[where P="?INV" and e'=E and Q="\<lambda>l s. ?INV l s \<and> ~ bval b s"])
defer
proof (goal_cases)
case 3
show ?case apply(rule exI[where x=1]) apply(auto)[1] apply(simp only: postList_preList[symmetric] ) apply (auto)[1]
by(simp only: g)
next
case 2 (* post condition is satisfied after exiting the loop *)
show ?case
proof (safe, goal_cases)
case (1 l s)
then show ?case using pre by auto
next
case (2 l s)
from Awhile(8) have Aw7: "\<And>l s1 s2. s1 = s2 on LVE \<Longrightarrow> postList upds l s1 = postList upds l s2" by auto
have "postList (map (\<lambda>(x, e, v). (x, \<lambda>s. e (S s), \<Union>x\<in>v. SS x)) upds) l s =
postList upds l (S s)" apply(induct upds) apply auto done
also have "\<dots> = postList upds l s" using Aw7[of "S s" s "l"] pre2 2 LV_LV
by fast
finally show ?case using 2(3) by simp
qed
next
case 1
show ?case
proof(rule While, goal_cases)
case 1
note Comb=CombALL[THEN conjunct1]
show "\<turnstile>\<^sub>1 {\<lambda>l s. (I l s \<and> postList ?upds l s) \<and> bval b s \<and> preT C E s = l ?z}
strip C { time C \<Down> \<lambda>l s. (I l s \<and> postList ?upds l s) \<and> E s \<le> l ?z}"
apply(rule conseq_old)
apply(rule exI[where x=1]) apply(simp)
prefer 2
proof (rule Comb, safe, goal_cases)
case (2 l s)
from IQ_in[OF 2(1)] gl Awhile(10,9)
have y: "postList ?upds l s =
preList ?upds C l s" (is "?IH upds")
proof (induct upds)
case (Cons a upds')
obtain y e v where axe: "a = (y,e,v)" using prod_cases3 by blast
have IH: "?IH upds'" apply(rule Cons(1))
using Cons(2-5) by auto
from Cons(3) axe have ke: "\<And>s1 s2. s1 = s2 on v \<Longrightarrow> e s1 = e s2"
by fastforce
have vLC: "v \<subseteq> LVE" using axe Cons(4) by simp
have step: "e (S s) = e (S (postQ C s))" apply(rule ke) using Cons(2) using vLC LV_LV 2(3)
by blast
show ?case unfolding axe using IH step apply(simp)
apply(simp only: TQ) done
qed simp
from 2 show ?case by(simp add: y)
qed (auto simp: inv_impl)
next
show "\<forall>l s. bval b s \<and> I l s \<and> postList ?upds l s \<longrightarrow> 1 + preT C E s + time C s \<le> E s"
proof (clarify, goal_cases)
case (1 l s)
thus ?case
using 1 IQ by auto
qed
next
show "\<forall>l s. ~bval b s \<and> I l s \<and> postList ?upds l s \<longrightarrow> 1 \<le> E s"
proof (clarify, goal_cases)
case (1 l s)
with pre show ?case by auto
qed
next
have pff: "?z \<notin> lesvars ?upds" apply(simp only: fua) by fact
have "support (\<lambda>l s. I l s \<and> postList ?upds l s) \<subseteq> support I \<union> support (postList ?upds)"
by(rule support_and)
also
have "support (postList ?upds)
\<subseteq> lesvars ?upds"
apply(rule support_postList) done
finally
have "support (\<lambda>l s. I l s \<and> postList ?upds l s) \<subseteq> support I \<union> lesvars ?upds"
by blast
thus "?z \<notin> support (\<lambda>l s. I l s \<and> postList ?upds l s)"
apply(rule contra_subsetD)
using zI pff by(simp)
qed
qed
qed
qed
corollary vc_sound':
assumes "vc C Q Qset {}"
"finite (support Q)" "finite (varacom C)"
"\<forall>l s. P l s \<longrightarrow> pre C Q l s"
"\<And>s1 s2 l. s1 = s2 on Qset \<Longrightarrow> Q l s1 = Q l s2"
shows "\<turnstile>\<^sub>1 {P} strip C {time C \<Down> Q}"
proof -
show ?thesis
apply(rule conseq_old)
prefer 2 apply(rule vc_sound[where upds="[]", OF assms(1), simplified, OF assms(2-3), THEN conjunct1])
using assms(4,5) apply auto
done
qed
corollary vc_sound'':
assumes "vc C Q Qset {}"
"finite (support Q)" "finite (varacom C)"
" (\<exists>k>0. \<forall>l s. P l s \<longrightarrow> pre C Q l s \<and> time C s \<le> k * e s)"
"\<And>s1 s2 l. s1 = s2 on Qset \<Longrightarrow> Q l s1 = Q l s2"
shows "\<turnstile>\<^sub>1 {P} strip C {e \<Down> Q}"
proof -
show ?thesis
apply(rule conseq_old)
prefer 2 apply(rule vc_sound[where upds="[]", OF assms(1), simplified, OF assms(2-3), THEN conjunct1])
using assms(4,5) apply auto
done
qed
end
|
SUBROUTINE TIPE (X,Y,XYD,CHR,N,OPT)
C
C (X,Y) = STARTING OR ENDING POINT OF THE LINE TO BE TYPED (ALWAYS
C LEFT-TO-RIGHT OR TOP-TO-BOTTOM.
C XYD = +/-1 IF X = STARTING OR ENDING POINT OF THE LINE.
C = +/-2 IF Y = STARTING OR ENDING POINT OF THE LINE.
C CHR = CHARACTERS TO BE TYPED.
C N = NUMBER OF CHARACTERS.
C OPT = -1 TO INITIATE THE TYPING MODE.
C = +1 TO TERMINATE THE TYPING MODE.
C = 0 TO TYPE A LINE.
C
INTEGER XYD,CHR(1),OPT,PLOTER,CHAR,C(80),BLANK,LSTCHR,
1 CHARX,D
REAL XY(2,2)
COMMON /PLTDAT/ MODEL,PLOTER,SKPPLT(18),SKPA(3),CNTCHR(2)
COMMON /CHAR94/ CHAR(60)
DATA BLANK , LSTCHR / 48,47 /
C
IF (OPT .NE. 0) GO TO 150
C
C OPT = 0.
C
D = MAX0(IABS(XYD),1)
S = CNTCHR(D)
IF (XYD.EQ.-1 .OR. XYD.EQ.2) S = -S
XY(1,1) = X
XY(2,1) = Y
XY(1,2) = XY(1,1)
XY(2,2) = XY(2,1)
C
C PRINT A MAXIMUM OF 80 CHARACTERS AT A TIME.
C
DO 130 J = 1,N,80
IF (XYD .LT. 0) GO TO 105
L1 = J
L2 = L1 + 79
IF (L2 .GT. N) L2 = N
GO TO 106
105 L2 = N - J + 1
L1 = L2 - 79
IF (L1 .LE. 0) L1 = 1
C
106 NC = 0
DO 120 L = L1,L2
CHARX = CHR(L)
DO 110 I = 1,LSTCHR
IF (CHARX .EQ. CHAR(I)) GO TO 111
110 CONTINUE
I = BLANK
111 NC = NC + 1
C(NC) = I
120 CONTINUE
C
C TYPE THE -NC- CHARACTERS JUST PROCESSED.
C
XY(D,2) = XY(D,1) + S*FLOAT(L1-1)
CALL TYPE10 (XY(1,2),XY(2,2),XYD,C,NC,0)
GO TO 130
130 CONTINUE
GO TO 200
C
C OPT = +/-1
C
150 CALL TYPE10 (0,0,0,0,0,OPT)
GO TO 200
200 RETURN
END
|
using Polylogarithms
using SpecialFunctions
using Test
using DataFrames, CSV
import Base.MathConstants: π, pi, ℯ, e, γ, eulergamma, catalan, φ, golden
include("test_defs.jl")
include("../src/gamma_derivatives.jl")
@testset "Derivatives of the gamma function at 1.0" begin
@testset " throws errors" begin
@test_throws DomainError D(2, -1, 1.0)
@test_throws DomainError D(-1, 2, 1.0)
# @test_throws MethodError stieltjes(1.5)
end
@testset " types" begin
# @test typeof(stieltjes(1)) == Float64
end
@testset " values" begin
@test g1(0) ≈ 1.0
@test g2(0) ≈ 1.0
@test g1(1) ≈ -γ
@test g2(1) ≈ -γ
for n=2:5
@test g1(n) ≈ g2(n)
end
end
end
|
State Before: α : Type u_1
m : MeasurableSpace α
μ ν : Measure α
s : Set α
h : μ ≤ ν
⊢ μ ≤ 0 + ν State After: no goals Tactic: rwa [zero_add] |
Recently I had the pleasure of remixing a tune by a couple of my favorite artists, Pitch Black. They massively inspired me to become an electronic musician, and have since helped me in various aspects of my career from music production to how to handle yourself in a strange new country. They are both fantastic blokes who have kept NZ on the true live electronica map for over 10 years, and they are still powering strong.
As well as release the tune, I have recorded the entire production process from start to finish, and am releasing 6 hours of high definition video footage to show people exactly how I did everything. The first two hours are available now to everyone. I will be releasing one hour per week to the public until it is finished. |
module Main
%default total
-- Data type
data BF : Char -> Type where
Next : BF '>'
Prev : BF '<'
Increment : BF '+'
Decrement : BF '-'
Output : BF '.'
Accept : BF ','
Forward : BF '['
Backward : BF ']'
-- Interpreter
data Tape a = MkTape (Stream a) a (Stream a)
right : Tape a -> Tape a
right (MkTape ls c (r :: rs)) = MkTape (c :: ls) r rs
left : Tape a -> Tape a
left (MkTape (l :: ls) c rs) = MkTape ls l (c :: rs)
emptyTape : Tape Int
emptyTape = MkTape (repeat 0) 0 (repeat 0)
Instruction : Type
Instruction = Maybe (Sigma Char BF)
mutual
partial
nextInstruction : Tape Int -> Tape Instruction -> IO ()
nextInstruction dat ins = bf' dat (right ins)
partial
runInstruction : Tape Int -> Tape Instruction -> BF c -> IO ()
runInstruction dat ins Next =
nextInstruction (right dat) ins
runInstruction dat ins Prev =
nextInstruction (left dat) ins
runInstruction (MkTape ls c rs) ins Increment =
nextInstruction (MkTape ls (succ c) rs) ins
runInstruction (MkTape ls c rs) ins Decrement =
nextInstruction (MkTape ls (pred c) rs) ins
runInstruction dat@(MkTape _ c _) ins Output = do
putChar (chr c)
nextInstruction dat ins
runInstruction (MkTape ls _ rs) ins Access = do
c <- getChar
nextInstruction (MkTape ls (ord c) rs) ins
runInstruction dat@(MkTape _ n _) ins Forward = do
if n == 0
then loopRight Z dat ins
else nextInstruction dat ins
runInstruction dat@(MkTape _ n _) ins Backward = do
if n /= 0
then loopLeft Z dat ins
else nextInstruction dat ins
runInstruction dat ins _ =
nextInstruction dat ins
partial
loopRight' : Nat -> Tape Int -> Tape Instruction -> BF c -> IO ()
loopRight' (S Z) dat ins Backward =
nextInstruction dat ins
loopRight' n dat ins Backward =
loopRight (pred n) dat (right ins)
loopRight' n dat ins Forward =
loopRight (succ n) dat (right ins)
loopRight' n dat ins _ =
loopRight n dat (right ins)
partial
loopRight : Nat -> Tape Int -> Tape Instruction -> IO ()
loopRight n dat ins@(MkTape _ (Just p) _) =
loopRight' n dat ins (getProof p)
loopRight n dat ins =
loopRight n dat (right ins)
partial
loopLeft' : Nat -> Tape Int -> Tape Instruction -> BF c -> IO ()
loopLeft' (S Z) dat ins Forward =
nextInstruction dat ins
loopLeft' n dat ins Forward =
loopLeft (pred n) dat (left ins)
loopLeft' n dat ins Backward =
loopLeft (succ n) dat (left ins)
loopLeft' n dat ins _ =
loopLeft n dat (left ins)
partial
loopLeft : Nat -> Tape Int -> Tape Instruction -> IO ()
loopLeft n dat ins@(MkTape _ (Just p) _) =
loopLeft' n dat ins (getProof p)
loopLeft n dat ins =
loopLeft n dat (left ins)
partial
bf' : Tape Int -> Tape Instruction -> IO ()
bf' _ (MkTape _ Nothing _) = return ()
bf' dat ins@(MkTape _ (Just p) _) = runInstruction dat ins (getProof p)
-- Literals
data Every : List a -> Type where
partial
bf : (s : String) -> { auto p : Every (unpack s) } -> IO ()
partial
main : IO ()
main = do
bf"++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-."
|
import .definitions
import .nat_extra
import .decidable_relations
def plist : list ℕ → bool
| [] := tt
| (h :: t) := nt.irreducible h && plist t
namespace list
/-
Basic lemmas concerning products and prime lists
-/
lemma product_concat (l1 l2 : list ℕ) : product (l1 ++ l2) = (product l1) * (product l2) :=
begin
induction l1,
{ by simp [product] },
{ by simp [product, ih_1] }
end
lemma plist_concat (l1 l2 : list ℕ) : plist l1 = tt → plist l2 = tt → plist (l1 ++ l2) = tt :=
begin
induction l1,
{ intros, by simp [a_1] },
{ intros, simp [plist] at a_2, simp [plist], apply and.intro,
{ exact a_2.right },
{ exact ih_1 a_2.left a_3 }
}
end
lemma lmax_head {x y : ℕ} {t : list ℕ} (h : sorted (x :: y :: t) = tt) : y ≤ x :=
begin
simp [sorted] at h,
have : max x (lmax (y :: t)) = x, exact nat.eq_eq h.right.right,
have hx : max y (lmax t) ≤ x, by rw [←this]; exact le_max_right x (lmax (y :: t)),
have hy : y ≤ max y (lmax t), by simp [le_max_left],
exact nat.le_trans hy hx
end
lemma plist_prod_nonzero {l : list ℕ} (h : plist l = tt) : product l ≠ 0 :=
begin
induction l with p t ih,
{ simp [product] },
{ simp [product],
have ht : plist t = tt, by simp [plist] at h; exact h.left,
by_cases p = 0 with hp,
{ simp [plist] at h,
have : nt.irreducible p, from of_to_bool_true h.right,
exact absurd hp this.left },
{ have hpg0 : product t ≠ 0, from ih ht,
--this was incredibly annoying; any ideas how we could make this easier?
have hp : nat.succ (nat.pred p) = p, from nat.succ_pred_eq_of_pos (nat.pos_of_ne_zero hp),
have ht : nat.succ (nat.pred (product t)) = product t, from nat.succ_pred_eq_of_pos (nat.pos_of_ne_zero hpg0),
have : p*(product t) = 1 + (nat.pred p) + (nat.pred (product t)) + (nat.pred p)*(nat.pred (product t)),
by rw [←hp,←ht]; apply nat.foil,
have : p*(product t) = nat.succ((nat.pred p) + (nat.pred (product t)) + (nat.pred p)*(nat.pred (product t))),
by rw [←nat.one_add]; simp [this],
rw [this], exact nat.succ_ne_zero ((nat.pred p) + (nat.pred (product t)) + (nat.pred p)*(nat.pred (product t)))
}
}
end
lemma plist_prod_one {l : list ℕ} (h : plist l = tt) : product l = 1 → l = [] :=
begin
cases l with p t,
{ simp },
{ intro, simp [product] at a,
have : p = 1, from (nat.unique_unit a).left,
simp [plist, nt.computable_irreducible] at h,
have : p ≠ 1, from (of_to_bool_true h.right).right.left,
contradiction
}
end
/-
We'll need some machinery about permutations to talk about unique prime lists.
This stuff is ripped from mathlib.
-/
variable {α : Type}
open perm
@[refl] protected theorem perm.refl : ∀ (l : list α), l ~ l
| [] := perm.nil
| (x::xs) := skip x (perm.refl xs)
@[symm] protected theorem perm.symm {l₁ l₂ : list α} (p : l₁ ~ l₂) : l₂ ~ l₁ :=
perm.rec_on p
perm.nil
(λ x l₁ l₂ p₁ r₁, skip x r₁)
(λ x y l, swap y x l)
(λ l₁ l₂ l₃ p₁ p₂ r₁ r₂, trans r₂ r₁)
attribute [trans] perm.trans
/-
Interactions of permutations with our other list definitions
-/
lemma perm_product {l₁ l₂ : list ℕ} (h : l₁ ~ l₂) : product l₁ = product l₂ :=
begin
induction h,
{ simp },
{ simp [product,ih_1] },
{ simp [product] },
{ simp [ih_1,ih_2] }
end
lemma perm_lmax {l₁ l₂ : list ℕ} (h : l₁ ~ l₂) : lmax l₁ = lmax l₂ :=
begin
induction h,
{ simp },
{ simp [lmax,ih_1] },
{ simp [lmax,max_left_comm] },
{ simp [ih_1,ih_2] }
end
lemma perm_plist {l₁ l₂ : list ℕ} (h : l₁ ~ l₂) (hp : plist l₁ = tt) : plist l₂ = tt :=
begin
-- is there a way to clean up this induction re: term introduction?
-- check out perm_induction_on in perm.lean in mathlib?
induction h with x l₁ l₂ l₃ ih x y l l₁ l₂,
{ exact hp },
{ have : plist l₁ = tt, by simp [plist] at hp; exact hp.left,
have hp2 : plist l₂ = tt, by simp [this] at ih; exact ih,
have : nt.irreducible x, by simp [plist] at hp; exact of_to_bool_true hp.right,
simp [plist], exact and.intro hp2 (to_bool_true this)
},
{ have hpx : to_bool (nt.irreducible x) = tt, by simp [plist] at hp; exact hp.right.left,
have hpy : to_bool (nt.irreducible y) = tt, by simp [plist,plist] at hp; exact hp.right.right,
have hpl : plist l = tt, by simp [plist, plist] at hp; exact hp.left,
have hpyl : plist (y :: l) = tt, by simp [plist]; exact and.intro hpl hpy,
simp [plist], exact and.intro hpl (and.intro hpx hpy)
},
{ apply ih_2, apply ih_1, assumption }
end
/-
We need to prove the existence of sorted lists. To do this, we introduce a
sorting algorithm and show that it spits out a sorted permutation.
-/
lemma perm_ordered_insert (x : ℕ) (l : list ℕ) : ordered_insert x l ~ x :: l :=
begin
induction l with y l ih,
{ simp [ordered_insert] },
{ by_cases y ≤ x with h,
{ simp [ordered_insert, h] },
{ simp [ordered_insert, h],
have hxy : y :: x :: l ~ x :: y :: l, by simp [perm.swap],
suffices : y :: ordered_insert x l ~ y :: x :: l,
{ transitivity, apply this, apply hxy },
{ apply perm.skip, simp [ih] }
}
}
end
lemma perm_insertion_sort (l : list ℕ) : insertion_sort l ~ l :=
begin
induction l with y l ih,
{ simp [insertion_sort] },
{ simp [insertion_sort],
have h : ordered_insert y (insertion_sort l) ~ y :: insertion_sort l,
by apply perm_ordered_insert,
transitivity, assumption, apply perm.skip, apply ih
}
end
lemma sorted_singleton (x : ℕ) : sorted [x] = tt :=
begin
simp [sorted, lmax, max],
by_cases x ≤ 0,
{ simp [nat.eq_zero_of_le_zero h, nat.eq] },
{ simp [h, nat.eq] }
end
lemma sorted_head (x : ℕ) (l : list ℕ)
: sorted (x :: l) = tt ↔ lmax l ≤ x ∧ sorted l = tt :=
begin
split,
{ intro h, simp [sorted] at h,
have hx : max x (lmax l) = x, from nat.eq_eq h.right,
split,
{ rw [←hx], simp [le_max_right] },
{ exact h.left }
},
{ intro h, simp [sorted],
have : max x (lmax l) = x, by simp [max_eq_left h.left],
apply and.intro,
{ exact h.right },
{ exact nat.eq_of_eq this }
}
end
lemma sorted_ordered_insert {x : ℕ} {l : list ℕ}
: sorted l = tt → sorted (ordered_insert x l) = tt :=
begin
intro h, induction l with y l ih,
{ simp [sorted_singleton] },
{ by_cases y ≤ x with hxy,
{ simp [hxy], simp [sorted],
apply and.intro,
{ simp [sorted] at h, exact h.left },
{ simp [sorted] at h, apply and.intro,
{ exact h.right },
{ have : lmax (y :: l) ≤ x,
begin
simp [lmax],
have : max y (lmax l) = y, from nat.eq_eq h.right,
rw [←this] at hxy, exact hxy
end,
have : max x (lmax (y :: l)) = x, from max_eq_left this,
exact nat.eq_of_eq this
}
}
},
{ have : x ≤ y ∨ y ≤ x, from le_total x y,
have hyx : x ≤ y, by simp [hxy] at this; assumption,
have hyl : lmax l ≤ y, by simp [sorted_head] at h; exact h.right,
have : ordered_insert x l ~ x :: l, by simp [perm_ordered_insert],
have hyxl : lmax (ordered_insert x l) ≤ y, by
simp [perm_lmax this, lmax]; simp [max_le hyx hyl],
have : sorted l = tt, by simp [sorted] at h; exact h.left,
have hxl : sorted (ordered_insert x l) = tt, from ih this,
simp [hxy,sorted_head], apply and.intro,
{ exact hxl },
{ exact hyxl }
}
}
end
lemma sorted_insertion_sort (l : list ℕ) : (sorted $ insertion_sort l) = tt :=
begin
induction l with x l ih,
{ simp [insertion_sort, sorted] },
{ simp [sorted_ordered_insert ih] }
end
/-
Sorting a sorted list is idempotent: idem_insertion_sort.
We conceptually might want the following lemma:
lemma perm_sorted {l₁ l₂ : list ℕ} (h : l₁ ~ l₂) (hs1 : sorted l₁) (hs2 : sorted l₂) : l₁ = l₂
But this seems very hard to prove given our definition of perm_sorted. So
instead we'll get the needed result from less general principles.
-/
lemma idem_insertion_sort {l : list ℕ} (h : sorted l = tt) : insertion_sort l = l :=
--perm_sorted (perm_insertion_sort l) (sorted_insertion_sort l) h
begin
induction l with x t ih,
{ simp },
{ have : sorted t = tt, by simp [sorted] at h; exact h.left,
simp [insertion_sort], rw [ih this],
induction t, { simp }, { simp [ordered_insert, list.lmax_head h] }
}
end
end list |
#include <boost/multi_array.hpp>
#include <iostream>
int main()
{
boost::multi_array<char, 1> a{boost::extents[6]};
a[0] = 'B';
a[1] = 'o';
a[2] = 'o';
a[3] = 's';
a[4] = 't';
a[5] = '\0';
std::cout << a.origin() << '\n';
} |
-- --------------------------------------------------------------- [ Nonce.idr ]
-- Module : Nonce
-- Description : Types for Nonces
-- Copyright : (c) Jan de Muijnck-Hughes
-- License : see LICENSE
-- --------------------------------------------------------------------- [ EOH ]
module Crypto.Nonce
import Crypto.Common
data TyNonce = Zero | Random
||| Nonces for guaranteeing freshness.
|||
||| @ s The setting.
||| @ tyN The nonce type.
||| @ l The length.
data Nonce : (s : Setting) -> (tyN : TyNonce) -> (l : Nat) -> Type where
||| The all zero nonce for asymmetric encryption.
MkZeroPkNonce : Vect n a -> Nonce Asymm Zero n
||| A randomly generated nonce for asymmetric encryption.
MkRndPkNonce : Vect n a -> Nonce Asymm Random n
||| The all zero nonce for symmetric encryption.
MkZeroSkNonce : Vect n a -> Nonce Symm Zero n
||| A randomly generated nonce for symmetric encryption.
MkRndSkNonce : Vect n a -> Nonce Symm Random n
-- --------------------------------------------------------------------- [ EOF ]
|
[STATEMENT]
lemma weakBisimSubstExtBang:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
assumes "guarded P"
shows "\<Psi> \<rhd> !P \<approx>\<^sub>s P \<parallel> !P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Psi> \<rhd> !P \<approx>\<^sub>s P \<parallel> !P
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
guarded P
goal (1 subgoal):
1. \<Psi> \<rhd> !P \<approx>\<^sub>s P \<parallel> !P
[PROOF STEP]
by(metis strongBisimSubstWeakBisimSubst bisimSubstExtBang) |
[STATEMENT]
lemma enat_min_eq_0_iff:
fixes a b :: enat
shows "min a b = 0 \<longleftrightarrow> a = 0 \<or> b = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (min a b = 0) = (a = 0 \<or> b = 0)
[PROOF STEP]
by(auto simp add: min_def) |
\section{Score functions}
|
theory SafeProof
imports ProcLangX
begin
type_synonym state = "string \<Rightarrow> p_exp option"
definition invalid_state where
"invalid_state e S = (\<exists> a. a \<in> free_vars e \<and> a \<notin> dom S)"
definition valid_perm_env where
"valid_perm_env r_s S = (use_env_vars r_s \<subseteq> dom S)"
|
theory T158
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z)))
"
nitpick[card nat=4,timeout=86400]
oops
end |
{-# OPTIONS --sized-types #-}
open import FRP.JS.Array using ( ⟨⟩ ; ⟨_ ; _,_ ; _⟩ )
open import FRP.JS.Bool using ( Bool ; true ; false ; not )
open import FRP.JS.JSON using ( JSON ; float ; bool ; string ; object ; array ; null ; parse ; _≟_ )
open import FRP.JS.Maybe using ( Maybe ; just ; nothing ; _≟[_]_ )
open import FRP.JS.Object using ( ⟪_ ; _↦_⟫ ; _↦_,_ )
open import FRP.JS.QUnit using ( TestSuite ; test ; ok ; ok! ; _,_ )
module FRP.JS.Test.JSON where
⟨1⟩ = array (⟨ float 1.0 ⟩)
⟨n⟩ = array (⟨ null ⟩)
⟨1,n⟩ = array (⟨ float 1.0 , null ⟩)
⟨n,1⟩ = array (⟨ null , float 1.0 ⟩)
⟨⟨1⟩⟩ = array (⟨ ⟨1⟩ ⟩)
⟪a↦1⟫ = object (⟪ "a" ↦ float 1.0 ⟫)
⟪b↦n⟫ = object (⟪ "b" ↦ null ⟫)
⟪a↦1,b↦n⟫ = object (⟪ "a" ↦ float 1.0 , "b" ↦ null ⟫)
⟪a↦⟪a↦1⟫⟫ = object (⟪ "a" ↦ ⟪a↦1⟫ ⟫)
⟪a↦⟨1⟩⟫ = object (⟪ "a" ↦ ⟨1⟩ ⟫)
⟨⟪a↦1⟫⟩ = array (⟨ ⟪a↦1⟫ ⟩)
_≟?_ : Maybe JSON → Maybe JSON → Bool
j ≟? k = j ≟[ _≟_ ] k
tests : TestSuite
tests =
( test "≟"
( ok "n ≟ n" (null ≟ null)
, ok "a ≟ a" (string "a" ≟ string "a")
, ok "t ≟ t" (bool true ≟ bool true)
, ok "f ≟ f" (bool false ≟ bool false)
, ok "1 ≟ 1" (float 1.0 ≟ float 1.0)
, ok "2 ≟ 2" (float 2.0 ≟ float 2.0)
, ok "⟨1⟩ ≟ ⟨1⟩" (⟨1⟩ ≟ ⟨1⟩)
, ok "⟨n⟩ ≟ ⟨n⟩" (⟨n⟩ ≟ ⟨n⟩)
, ok "⟨1,n⟩ ≟ ⟨1,n⟩" (⟨1,n⟩ ≟ ⟨1,n⟩)
, ok "⟪a↦1⟫ ≟ ⟪a↦1⟫" (⟪a↦1⟫ ≟ ⟪a↦1⟫)
, ok "⟪b↦n⟫ ≟ ⟪b↦n⟫" (⟪b↦n⟫ ≟ ⟪b↦n⟫)
, ok "⟪a↦1,b↦n⟫ ≟ ⟪a↦1,b↦n⟫" (⟪a↦1,b↦n⟫ ≟ ⟪a↦1,b↦n⟫)
, ok "⟪a↦⟪a↦1⟫⟫ ≟ ⟪a↦⟪a↦1⟫⟫" (⟪a↦⟪a↦1⟫⟫ ≟ ⟪a↦⟪a↦1⟫⟫)
, ok "⟪a↦⟨1⟩⟫ ≟ ⟪a↦⟨1⟩⟫" (⟪a↦⟨1⟩⟫ ≟ ⟪a↦⟨1⟩⟫)
, ok "⟨⟪a↦1⟫⟩ ≟ ⟨⟪a↦1⟫⟩" (⟨⟪a↦1⟫⟩ ≟ ⟨⟪a↦1⟫⟩)
, ok "n ≟ a" (not (null ≟ string "a"))
, ok "n ≟ 'n'" (not (null ≟ string "null"))
, ok "n ≟ f" (not (null ≟ bool false))
, ok "n ≟ 0" (not (null ≟ float 0.0))
, ok "a ≟ b" (not (string "a" ≟ string "b"))
, ok "a ≟ A" (not (string "a" ≟ string "A"))
, ok "t ≟ 't'" (not (string "true" ≟ bool true))
, ok "1 ≟ '1'" (not (string "1.0" ≟ float 1.0))
, ok "t ≟ f" (not (bool true ≟ bool false))
, ok "f ≟ 0" (not (bool false ≟ float 0.0))
, ok "1 ≟ 2" (not (float 1.0 ≟ float 2.0))
, ok "⟨1⟩ ≟ ⟨n⟩" (not (⟨1⟩ ≟ ⟨n⟩))
, ok "⟨1⟩ ≟ ⟨1,n⟩" (not (⟨1⟩ ≟ ⟨1,n⟩))
, ok "⟨1,n⟩ ≟ ⟨n,1⟩" (not (⟨1,n⟩ ≟ ⟨n,1⟩))
, ok "⟪a↦1⟫ ≟ ⟪b↦n⟫" (not (⟪a↦1⟫ ≟ ⟪b↦n⟫))
, ok "⟪a↦1⟫ ≟ ⟪a↦1,b↦n⟫" (not (⟪a↦1⟫ ≟ ⟪a↦1,b↦n⟫))
, ok "⟪a↦⟪a↦1⟫⟫ ≟ ⟪a↦1⟫" (not (⟪a↦⟪a↦1⟫⟫ ≟ ⟪a↦1⟫))
, ok "⟪a↦⟨1⟩⟫ ≟ ⟪a↦1⟫" (not (⟪a↦⟨1⟩⟫ ≟ ⟪a↦1⟫))
, ok "⟨⟪a↦1⟫⟩ ≟ ⟪a↦1⟫" (not (⟨⟪a↦1⟫⟩ ≟ ⟪a↦1⟫)) )
, test "parse"
( ok! "parse n" (parse "null" ≟? just null)
, ok! "parse a" (parse "\"a\"" ≟? just (string "a"))
, ok! "parse a" (parse "true" ≟? just (bool true))
, ok! "parse a" (parse "false" ≟? just (bool false))
, ok! "parse a" (parse "1" ≟? just (float 1.0))
, ok! "parse a" (parse "1.0" ≟? just (float 1.0))
, ok! "parse ⟨1⟩" (parse "[1]" ≟? just ⟨1⟩)
, ok! "parse ⟨n⟩" (parse "[null]" ≟? just ⟨n⟩)
, ok! "parse ⟨1,n⟩" (parse "[1,null]" ≟? just ⟨1,n⟩)
, ok! "parse ⟨n,1⟩" (parse "[null,1]" ≟? just ⟨n,1⟩)
, ok! "parse ⟨⟨1⟩⟩" (parse "[[1]]" ≟? just ⟨⟨1⟩⟩)
, ok! "parse ⟪a↦1⟫" (parse "{\"a\":1}" ≟? just ⟪a↦1⟫)
, ok! "parse ⟪b↦n⟫" (parse "{\"b\":null}" ≟? just ⟪b↦n⟫)
, ok! "parse ⟪a↦1,b↦n⟫" (parse "{\"a\":1,\"b\":null}" ≟? just ⟪a↦1,b↦n⟫)
, ok! "parse ⟪a↦⟪a↦1⟫⟫" (parse "{\"a\":{\"a\":1}}" ≟? just ⟪a↦⟪a↦1⟫⟫)
, ok! "parse ⟪a↦⟨1⟩⟫" (parse "{\"a\":[1]}" ≟? just ⟪a↦⟨1⟩⟫)
, ok! "parse ⟨⟪a↦1⟫⟩" (parse "[{\"a\":1}]" ≟? just ⟨⟪a↦1⟫⟩)
, ok! "parse ⟨⟪a↦1⟫⟩" (parse "][" ≟? nothing ) ) ) |
Formal statement is: lemma limitin_atin: "limitin Y f y (atin X x) \<longleftrightarrow> y \<in> topspace Y \<and> (x \<in> topspace X \<longrightarrow> (\<forall>V. openin Y V \<and> y \<in> V \<longrightarrow> (\<exists>U. openin X U \<and> x \<in> U \<and> f ` (U - {x}) \<subseteq> V)))" Informal statement is: A function $f$ has a limit $y$ at $x$ if and only if $y$ is in the topological space $Y$ and for every open set $V$ containing $y$, there exists an open set $U$ containing $x$ such that $f(U - \{x\}) \subseteq V$. |
State Before: V : Type u
G : SimpleGraph V
K L L' M : Set V
C : ComponentCompl G L
h : K ⊆ L
⊢ ↑C ⊆ ↑(hom h C) State After: case intro
V : Type u
G : SimpleGraph V
K L L' M : Set V
h : K ⊆ L
c : V
cL : ¬c ∈ L
⊢ c ∈ ↑(hom h (componentComplMk G cL)) Tactic: rintro c ⟨cL, rfl⟩ State Before: case intro
V : Type u
G : SimpleGraph V
K L L' M : Set V
h : K ⊆ L
c : V
cL : ¬c ∈ L
⊢ c ∈ ↑(hom h (componentComplMk G cL)) State After: no goals Tactic: exact ⟨fun h' => cL (h h'), rfl⟩ |
import Base: size, length, iterate, getindex, setindex!, lastindex
using Base: @propagate_inbounds
import Adapt
using OffsetArrays
import Oceananigans.Architectures: architecture
import Oceananigans.Utils: datatuple
import Oceananigans.Grids: total_size, topology
using Oceananigans.Architectures
using Oceananigans.Grids
using Oceananigans.Utils
@hascuda using CuArrays
"""
AbstractField{X, Y, Z, A, G}
Abstract supertype for fields located at `(X, Y, Z)` with data stored in a container
of type `A`. The field is defined on a grid `G`.
"""
abstract type AbstractField{X, Y, Z, A, G} end
"""
Field{X, Y, Z, A, G, B} <: AbstractField{X, Y, Z, A, G}
A field defined at the location (`X`, `Y`, `Z`), each of which can be either `Cell`
or `Face`, and with data stored in a container of type `A` (typically an array).
The field is defined on a grid `G` and has field boundary conditions `B`.
"""
struct Field{X, Y, Z, A, G, B} <: AbstractField{X, Y, Z, A, G}
data :: A
grid :: G
boundary_conditions :: B
function Field{X, Y, Z}(data, grid, bcs) where {X, Y, Z}
Tx, Ty, Tz = total_size((X, Y, Z), grid)
if size(data) != (Tx, Ty, Tz)
e = "Cannot construct field at ($X, $Y, $Z) with size(data)=$(size(data)). " *
"`data` must have size ($Tx, $Ty, $Tz)."
throw(ArgumentError(e))
end
return new{X, Y, Z, typeof(data), typeof(grid), typeof(bcs)}(data, grid, bcs)
end
end
"""
Field(L::Tuple, arch, grid, bcs, [data=zeros(arch, grid)])
Construct a `Field` on some architecture `arch` and a `grid` with some `data`.
The field's location is defined by a tuple `L` of length 3 whose elements are
`Cell` or `Face` and has field boundary conditions `bcs`.
"""
function Field(L::Tuple, arch, grid, bcs,
data=zeros(eltype(grid), arch, grid, (typeof(L[1]), typeof(L[2]), typeof(L[3]))))
return Field{typeof(L[1]), typeof(L[2]), typeof(L[3])}(data, grid, bcs)
end
function Field(L::NTuple{3, DataType}, arch, grid, bcs,
data=zeros(eltype(grid), arch, grid, (L[1], L[2], L[3])))
return Field{L[1], L[2], L[3]}(data, grid, bcs)
end
"""
Field(X, Y, Z, arch, grid, [data=zeros(arch, grid)], bcs)
Construct a `Field` on some architecture `arch` and a `grid` with some `data`.
The field's location is defined by `X`, `Y`, `Z` where each is either `Cell` or `Face`
and has field boundary conditions `bcs`.
"""
Field(X, Y, Z, arch, grid, bcs, data=zeros(eltype(grid), arch, grid, (X, Y, Z))) =
Field((X, Y, Z), arch, grid, bcs, data)
"""
CellField([FT=eltype(grid)], arch::AbstractArchitecture, grid, bcs=TracerBoundaryConditions(grid),
data=zeros(FT, arch, grid, (Cell, Cell, Cell)))
Return a `Field{Cell, Cell, Cell}` on architecture `arch` and `grid` containing `data`
with field boundary conditions `bcs`.
"""
function CellField(arch::AbstractArchitecture, grid,
bcs=TracerBoundaryConditions(grid),
data=zeros(eltype(grid), arch, grid, (Cell, Cell, Cell)))
return Field(Cell, Cell, Cell, arch, grid, bcs, data)
end
"""
XFaceField([FT=eltype(grid)], arch::AbstractArchitecture, grid, bcs=UVelocityBoundaryConditions(grid),
data=zeros(FT, arch, grid, (Face, Cell, Cell)))
Return a `Field{Face, Cell, Cell}` on architecture `arch` and `grid` containing `data`
with field boundary conditions `bcs`.
"""
function XFaceField(arch::AbstractArchitecture, grid,
bcs=UVelocityBoundaryConditions(grid),
data=zeros(eltype(grid), arch, grid, (Face, Cell, Cell)))
return Field(Face, Cell, Cell, arch, grid, bcs, data)
end
"""
YFaceField([FT=eltype(grid)], arch::AbstractArchitecture, grid, bcs=VVelocityBoundaryConditions(grid),
data=zeros(FT, arch, grid, (Cell, Face, Cell)))
Return a `Field{Cell, Face, Cell}` on architecture `arch` and `grid` containing `data`
with field boundary conditions `bcs`.
"""
function YFaceField(arch::AbstractArchitecture, grid,
bcs=VVelocityBoundaryConditions(grid),
data=zeros(eltype(grid), arch, grid, (Cell, Face, Cell)))
return Field(Cell, Face, Cell, arch, grid, bcs, data)
end
"""
ZFaceField([FT=eltype(grid)], arch::AbstractArchitecture, grid, bcs=WVelocityBoundaryConditions(grid),
data=zeros(FT, arch, grid, (Cell, Cell, Face))
Return a `Field{Cell, Cell, Face}` on architecture `arch` and `grid` containing `data`
with field boundary conditions `bcs`.
"""
function ZFaceField(arch::AbstractArchitecture, grid,
bcs=WVelocityBoundaryConditions(grid),
data=zeros(eltype(grid), arch, grid, (Cell, Cell, Face)))
return Field(Cell, Cell, Face, arch, grid, bcs, data)
end
CellField(FT::DataType, arch, grid, bcs=TracerBoundaryConditions(grid)) =
CellField(arch, grid, bcs, zeros(FT, arch, grid, (Cell, Cell, Cell)))
XFaceField(FT::DataType, arch, grid, bcs=UVelocityBoundaryConditions(grid)) =
XFaceField(arch, grid, bcs, zeros(FT, arch, grid, (Face, Cell, Cell)))
YFaceField(FT::DataType, arch, grid, bcs=VVelocityBoundaryConditions(grid)) =
YFaceField(arch, grid, bcs, zeros(FT, arch, grid, (Cell, Face, Cell)))
ZFaceField(FT::DataType, arch, grid, bcs=WVelocityBoundaryConditions(grid)) =
ZFaceField(arch, grid, bcs, zeros(FT, arch, grid, (Cell, Cell, Face)))
#####
##### Functions for querying fields
#####
location(a) = nothing
"Returns the location `(X, Y, Z)` of an `AbstractField{X, Y, Z}`."
location(::AbstractField{X, Y, Z}) where {X, Y, Z} = (X, Y, Z) # note no instantiation
x_location(::AbstractField{X}) where X = X
y_location(::AbstractField{X, Y}) where {X, Y} = Y
z_location(::AbstractField{X, Y, Z}) where {X, Y, Z} = Z
"Returns the architecture where the field data `f.data` is stored."
architecture(f::Field) = architecture(f.data)
architecture(o::OffsetArray) = architecture(o.parent)
"Returns the length of a field's `data`."
@inline length(f::Field) = length(f.data)
"Returns the topology of a fields' `grid`."
@inline topology(f, args...) = topology(f.grid, args...)
"""
Returns the length of a field located at `Cell` centers along a grid
dimension of length `N` and with halo points `H`.
"""
dimension_length(loc, topo, N, H=0) = N + 2H
"""
Returns the length of a field located at cell `Face`s along a grid
dimension of length `N` and with halo points `H`.
"""
dimension_length(::Type{Face}, ::Bounded, N, H=0) = N + 1 + 2H
"""
size(loc, grid)
Returns the size of a field at `loc` on `grid`.
This is a 3-tuple of integers corresponding to the number of interior nodes
of `f` along `x, y, z`.
"""
@inline size(loc, grid) = (dimension_length(loc[1], topology(grid, 1), grid.Nx),
dimension_length(loc[2], topology(grid, 2), grid.Ny),
dimension_length(loc[3], topology(grid, 3), grid.Nz))
"""
size(f::AbstractField{X, Y, Z}) where {X, Y, Z}
Returns the size of an `AbstractField{X, Y, Z}` located at `X, Y, Z`.
This is a 3-tuple of integers corresponding to the number of interior nodes
of `f` along `x, y, z`.
"""
@inline size(f::AbstractField) = size(location(f), f.grid)
"""
total_size(loc, grid)
Returns the "total" size of a field at `loc` on `grid`.
This is a 3-tuple of integers corresponding to the number of grid points
contained by `f` along `x, y, z`.
"""
@inline total_size(loc, grid) = (dimension_length(loc[1], topology(grid, 1), grid.Nx, grid.Hx),
dimension_length(loc[2], topology(grid, 2), grid.Ny, grid.Hy),
dimension_length(loc[3], topology(grid, 3), grid.Nz, grid.Hz))
@inline total_size(f::AbstractField) = total_size(location(f), f.grid)
@propagate_inbounds getindex(f::Field, inds...) = @inbounds getindex(f.data, inds...)
@propagate_inbounds setindex!(f::Field, v, inds...) = @inbounds setindex!(f.data, v, inds...)
@inline lastindex(f::Field) = lastindex(f.data)
@inline lastindex(f::Field, dim) = lastindex(f.data, dim)
"Returns `f.data` for `f::Field` or `f` for `f::AbstractArray."
@inline data(a) = a # fallback
@inline data(f::Field) = f.data
# Endpoint for recursive `datatuple` function:
@inline datatuple(obj::AbstractField) = data(obj)
"Returns `f.data.parent` for `f::Field`."
@inline Base.parent(f::Field) = f.data.parent
@inline interior_indices(loc, topo, N) = 1:N
@inline interior_indices(::Type{Face}, ::Bounded, N) = 1:N+1
@inline interior_parent_indices(loc, topo, N, H) = 1+H:N+H
@inline interior_parent_indices(::Type{Face}, ::Bounded, N, H) = 1+H:N+1+H
"Returns a view of `f` that excludes halo points."
@inline interior(f::Field{X, Y, Z}) where {X, Y, Z} = view(f.data, interior_indices(X, topology(f, 1), f.grid.Nx),
interior_indices(Y, topology(f, 2), f.grid.Ny),
interior_indices(Z, topology(f, 3), f.grid.Nz))
"Returns a reference (not a view) to the interior points of `field.data.parent.`"
@inline interiorparent(f::Field{X, Y, Z}) where {X, Y, Z} =
@inbounds f.data.parent[interior_parent_indices(X, topology(f, 1), f.grid.Nx, f.grid.Hx),
interior_parent_indices(Y, topology(f, 2), f.grid.Ny, f.grid.Hy),
interior_parent_indices(Z, topology(f, 3), f.grid.Nz, f.grid.Hz)]
iterate(f::Field, state=1) = iterate(f.data, state)
@inline xnode(::Type{Cell}, i, grid) = @inbounds grid.xC[i]
@inline xnode(::Type{Face}, i, grid) = @inbounds grid.xF[i]
@inline ynode(::Type{Cell}, j, grid) = @inbounds grid.yC[j]
@inline ynode(::Type{Face}, j, grid) = @inbounds grid.yF[j]
@inline znode(::Type{Cell}, k, grid) = @inbounds grid.zC[k]
@inline znode(::Type{Face}, k, grid) = @inbounds grid.zF[k]
@inline xnode(i, ψ::Field{X, Y, Z}) where {X, Y, Z} = xnode(X, i, ψ.grid)
@inline ynode(j, ψ::Field{X, Y, Z}) where {X, Y, Z} = ynode(Y, j, ψ.grid)
@inline znode(k, ψ::Field{X, Y, Z}) where {X, Y, Z} = znode(Z, k, ψ.grid)
# Dispatch insanity
xnodes(::Type{Cell}, topo, grid) = reshape(grid.xC, grid.Nx, 1, 1)
ynodes(::Type{Cell}, topo, grid) = reshape(grid.yC, 1, grid.Ny, 1)
znodes(::Type{Cell}, topo, grid) = reshape(grid.zC, 1, 1, grid.Nz)
xnodes(::Type{Face}, topo, grid) = reshape(grid.xF[1:end-1], grid.Nx, 1, 1)
ynodes(::Type{Face}, topo, grid) = reshape(grid.yF[1:end-1], 1, grid.Ny, 1)
znodes(::Type{Face}, topo, grid) = reshape(grid.zF[1:end-1], 1, 1, grid.Nz)
xnodes(::Type{Face}, ::Bounded, grid) = reshape(grid.xF, grid.Nx+1, 1, 1)
ynodes(::Type{Face}, ::Bounded, grid) = reshape(grid.yF, 1, grid.Ny+1, 1)
znodes(::Type{Face}, ::Bounded, grid) = reshape(grid.zF, 1, 1, grid.Nz+1)
xnodes(ψ::AbstractField) = xnodes(x_location(ψ), topology(ψ, 1), ψ.grid)
ynodes(ψ::AbstractField) = ynodes(y_location(ψ), topology(ψ, 2), ψ.grid)
znodes(ψ::AbstractField) = znodes(z_location(ψ), topology(ψ, 3), ψ.grid)
nodes(ψ::AbstractField) = (xnodes(ψ), ynodes(ψ), znodes(ψ))
#####
##### Creating offset arrays for field data by dispatching on architecture.
#####
"""
Return a range of indices for a field located at `Cell` centers
`along a grid dimension of length `N` and with halo points `H`.
"""
offset_indices(loc, topo, N, H=0) = 1 - H : N + H
"""
Return a range of indices for a field located at cell `Face`s
`along a grid dimension of length `N` and with halo points `H`.
"""
offset_indices(::Type{Face}, ::Bounded, N, H=0) = 1 - H : N + H + 1
"""
OffsetArray(underlying_data, grid, loc)
Returns an `OffsetArray` that maps to `underlying_data` in memory,
with offset indices appropriate for the `data` of a field on
a `grid` of `size(grid)` and located at `loc`.
"""
function OffsetArray(underlying_data, grid, loc)
ii = offset_indices(loc[1], topology(grid, 1), grid.Nx, grid.Hx)
jj = offset_indices(loc[2], topology(grid, 2), grid.Ny, grid.Hy)
kk = offset_indices(loc[3], topology(grid, 3), grid.Nz, grid.Hz)
return OffsetArray(underlying_data, ii, jj, kk)
end
"""
zeros([FT=Float64], ::CPU, grid, loc)
Returns an `OffsetArray` of zeros of float type `FT`, with
parent data in CPU memory and indices corresponding to a field on a
`grid` of `size(grid)` and located at `loc`.
"""
function Base.zeros(FT, ::CPU, grid, loc=(Cell, Cell, Cell))
underlying_data = zeros(FT, dimension_length(loc[1], topology(grid, 1), grid.Nx, grid.Hx),
dimension_length(loc[2], topology(grid, 2), grid.Ny, grid.Hy),
dimension_length(loc[3], topology(grid, 3), grid.Nz, grid.Hz))
return OffsetArray(underlying_data, grid, loc)
end
"""
zeros([FT=Float64], ::GPU, grid, loc)
Returns an `OffsetArray` of zeros of float type `FT`, with
parent data in GPU memory and indices corresponding to a field on a `grid`
of `size(grid)` and located at `loc`.
"""
function Base.zeros(FT, ::GPU, grid, loc=(Cell, Cell, Cell))
underlying_data = CuArray{FT}(undef, dimension_length(loc[1], topology(grid, 1), grid.Nx, grid.Hx),
dimension_length(loc[2], topology(grid, 2), grid.Ny, grid.Hy),
dimension_length(loc[3], topology(grid, 3), grid.Nz, grid.Hz))
underlying_data .= 0 # Ensure data is initially 0.
return OffsetArray(underlying_data, grid, loc)
end
# Default to type of Grid
Base.zeros(arch, grid, loc=(Cell, Cell, Cell)) = zeros(eltype(grid), arch, grid, loc)
Base.zeros(FT, ::CPU, grid, Nx, Ny, Nz) = zeros(FT, Nx, Ny, Nz)
Base.zeros(FT, ::GPU, grid, Nx, Ny, Nz) = zeros(FT, Nx, Ny, Nz) |> CuArray
Base.zeros(arch, grid, args...) = zeros(eltype(grid), args...)
|
[STATEMENT]
lemma map_of_map_keys:
"set xs = dom m \<Longrightarrow> map_of (map (\<lambda>k. (k, the (m k))) xs) = m"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set xs = dom m \<Longrightarrow> map_of (map (\<lambda>k. (k, the (m k))) xs) = m
[PROOF STEP]
by (rule ext) (auto simp add: map_of_map_restrict restrict_map_def) |
The following chart gives the statistical information about the remittance outflow from Switzerland during the period of 2000 to 2010. To get more statistics information please select the data range below. To get statistics on other countries visit the below section "Remittances - more countries".
Remittance outflows from Switzerland were about $10705 million (2006), $12248 million (2007), $14459 million (2008), $14906 million (2009), $16878 million (2010) to other countries. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.